diff --git a/core/block_validator.go b/core/block_validator.go
index a75eeb01a1..3a5b42f443 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -35,6 +35,7 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/trie"
+ "github.com/ethereum/go-ethereum/common"
)
// BlockValidator is responsible for validating block headers, uncles and
@@ -45,14 +46,17 @@ type BlockValidator struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for validating
+
+ CheckRoot func(expected, got common.Hash) bool
}
// NewBlockValidator returns a new block validator which is safe for re-use
func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
validator := &BlockValidator{
- config: config,
- engine: engine,
- bc: blockchain,
+ config: config,
+ engine: engine,
+ bc: blockchain,
+ CheckRoot: func(expected, got common.Hash) bool { return expected == got },
}
return validator
}
@@ -106,12 +110,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
}
// Ancestor block must be known.
- if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
- if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
- return consensus.ErrUnknownAncestor
- }
- return consensus.ErrPrunedAncestor
- }
+ // if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
+ // if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
+ // return consensus.ErrUnknownAncestor
+ // }
+ // return consensus.ErrPrunedAncestor
+ // }
return nil
}
@@ -135,7 +139,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
}
// Validate the state root against the received state root and throw
// an error if they don't match.
- if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
+ if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); !v.CheckRoot(header.Root, root) {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
}
return nil
diff --git a/core/blockchain.go b/core/blockchain.go
index 0199ceaf8a..d1cd15258f 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -33,6 +33,7 @@ import (
"fmt"
"io"
"math/big"
+ "os"
"runtime"
"strings"
"sync"
@@ -60,6 +61,12 @@ import (
"github.com/ethereum/go-ethereum/log"
)
+var tapeDir string
+
+func init() {
+ tapeDir = os.Getenv("TAPE_DIR")
+}
+
var (
accountReadTimer = metrics.NewRegisteredCounter("chain/account/reads", nil)
accountHashTimer = metrics.NewRegisteredCounter("chain/account/hashes", nil)
@@ -72,6 +79,8 @@ var (
snapshotAccountReadTimer = metrics.NewRegisteredCounter("chain/snapshot/account/reads", nil)
snapshotStorageReadTimer = metrics.NewRegisteredCounter("chain/snapshot/storage/reads", nil)
snapshotCommitTimer = metrics.NewRegisteredCounter("chain/snapshot/commits", nil)
+ snapshotCacheMissAccount = metrics.NewRegisteredCounter("chain/snapshot/cache/miss/account", nil)
+ snapshotCacheMissStorage = metrics.NewRegisteredCounter("chain/snapshot/cache/miss/storage", nil)
triedbCommitTimer = metrics.NewRegisteredCounter("chain/triedb/commits", nil)
@@ -170,10 +179,16 @@ type CacheConfig struct {
StateHistory uint64 // Number of blocks from head whose state histories are reserved.
StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
+ KeyValueDB *triedb.KeyValueConfig // Config for key value db
+
SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
}
+func (c *CacheConfig) TrieDBConfig() *triedb.Config {
+ return c.triedbConfig()
+}
+
// triedbConfig derives the configures for trie database.
func (c *CacheConfig) triedbConfig() *triedb.Config {
config := &triedb.Config{Preimages: c.Preimages}
@@ -191,6 +206,7 @@ func (c *CacheConfig) triedbConfig() *triedb.Config {
DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
}
}
+ config.KeyValueDB = c.KeyValueDB
return config
}
@@ -224,6 +240,13 @@ type txLookup struct {
transaction *types.Transaction
}
+type blockRoot struct {
+ *types.Block
+ root common.Hash
+}
+
+func (b *blockRoot) Root() common.Hash { return b.root }
+
// BlockChain represents the canonical chain given a database with a genesis
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
//
@@ -241,6 +264,7 @@ type txLookup struct {
type BlockChain struct {
chainConfig *params.ChainConfig // Chain & network configuration
cacheConfig *CacheConfig // Cache configuration for pruning
+ genesis *Genesis
db ethdb.Database // Low level persistent database to store final content in
snaps *snapshot.Tree // Snapshot tree for fast trie leaf access
@@ -289,7 +313,7 @@ type BlockChain struct {
// different than [chainAcceptedFeed], which is sent an event after an accepted
// block is processed (after each loop of the accepted worker). If there is a
// clean shutdown, all items inserted into the [acceptorQueue] will be processed.
- acceptorQueue chan *types.Block
+ acceptorQueue chan *blockRoot
// [acceptorClosingLock], and [acceptorClosed] are used
// to synchronize the closing of the [acceptorQueue] channel.
@@ -327,6 +351,17 @@ type BlockChain struct {
// [txIndexTailLock] is used to synchronize the updating of the tx index tail.
txIndexTailLock sync.Mutex
+
+ // for logging kvs read from snapshot
+ snapWriter writer
+}
+
+func (bc *BlockChain) SetSnapWriter(w writer) {
+ bc.snapWriter = w
+}
+
+type Opts struct {
+ LastAcceptedRoot common.Hash
}
// NewBlockChain returns a fully initialised block chain using information
@@ -335,6 +370,7 @@ type BlockChain struct {
func NewBlockChain(
db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, engine consensus.Engine,
vmConfig vm.Config, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool,
+ opts ...Opts,
) (*BlockChain, error) {
if cacheConfig == nil {
return nil, errCacheConfigNotSpecified
@@ -373,9 +409,10 @@ func NewBlockChain(
engine: engine,
vmConfig: vmConfig,
senderCacher: NewTxSenderCacher(runtime.NumCPU()),
- acceptorQueue: make(chan *types.Block, cacheConfig.AcceptorQueueLimit),
+ acceptorQueue: make(chan *blockRoot, cacheConfig.AcceptorQueueLimit),
quit: make(chan struct{}),
acceptedLogsCache: NewFIFOCache[common.Hash, [][]*types.Log](cacheConfig.AcceptedCacheSize),
+ genesis: genesis,
}
bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
bc.validator = NewBlockValidator(chainConfig, bc, engine)
@@ -393,10 +430,22 @@ func NewBlockChain(
bc.currentBlock.Store(nil)
// Create the state manager
- bc.stateManager = NewTrieWriter(bc.triedb, cacheConfig)
+ var tdb TrieDB = bc.triedb
+ if cacheConfig.StateScheme == rawdb.PathScheme {
+ tdb = &NoDerefTrieDB{tdb}
+ }
+ bc.stateManager = NewTrieWriter(tdb, cacheConfig)
+
+ // if err := bc.reprocessFromGenesis(); err != nil {
+ // return nil, err
+ // }
// Re-generate current block state if it is missing
- if err := bc.loadLastState(lastAcceptedHash); err != nil {
+ lastAcceptedRoot := common.Hash{}
+ if len(opts) > 0 {
+ lastAcceptedRoot = opts[0].LastAcceptedRoot
+ }
+ if err := bc.loadLastState(lastAcceptedHash, lastAcceptedRoot); err != nil {
return nil, err
}
@@ -409,7 +458,11 @@ func NewBlockChain(
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
- if !bc.HasState(head.Root) {
+ headRoot := head.Root
+ if len(opts) > 0 {
+ headRoot = opts[0].LastAcceptedRoot
+ }
+ if !bc.HasState(headRoot) {
return nil, fmt.Errorf("head state missing %d:%s", head.Number, head.Hash())
}
@@ -550,11 +603,16 @@ func (bc *BlockChain) startAcceptor() {
acceptorQueueGauge.Dec(1)
if err := bc.flattenSnapshot(func() error {
+ parent := bc.GetHeaderByHash(next.ParentHash())
+ if parent.Root == next.Block.Root() {
+ return nil
+ }
return bc.stateManager.AcceptTrie(next)
}, next.Hash()); err != nil {
log.Crit("unable to flatten snapshot from acceptor", "blockHash", next.Hash(), "err", err)
}
+ next := next.Block
// Update last processed and transaction lookup index
if err := bc.writeBlockAcceptedIndices(next); err != nil {
log.Crit("failed to write accepted block effects", "err", err)
@@ -593,19 +651,20 @@ func (bc *BlockChain) startAcceptor() {
// addAcceptorQueue adds a new *types.Block to the [acceptorQueue]. This will
// block if there are [AcceptorQueueLimit] items in [acceptorQueue].
-func (bc *BlockChain) addAcceptorQueue(b *types.Block) {
+func (bc *BlockChain) addAcceptorQueue(b *blockRoot) bool {
// We only acquire a read lock here because it is ok to add items to the
// [acceptorQueue] concurrently.
bc.acceptorClosingLock.RLock()
defer bc.acceptorClosingLock.RUnlock()
if bc.acceptorClosed {
- return
+ return false
}
acceptorQueueGauge.Inc(1)
bc.acceptorWg.Add(1)
bc.acceptorQueue <- b
+ return true
}
// DrainAcceptorQueue blocks until all items in [acceptorQueue] have been
@@ -646,12 +705,12 @@ func (bc *BlockChain) stopAcceptor() {
close(bc.acceptorQueue)
}
-func (bc *BlockChain) InitializeSnapshots() {
+func (bc *BlockChain) InitializeSnapshots(opts ...*Opts) {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
head := bc.CurrentBlock()
- bc.initSnapshot(head)
+ bc.initSnapshot(head, opts...)
}
// SenderCacher returns the *TxSenderCacher used within the core package.
@@ -661,7 +720,7 @@ func (bc *BlockChain) SenderCacher() *TxSenderCacher {
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
-func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error {
+func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash, lastAcceptedRoot common.Hash) error {
// Initialize genesis state
if lastAcceptedHash == (common.Hash{}) {
return bc.loadGenesisState()
@@ -706,7 +765,12 @@ func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error {
// reprocessState is necessary to ensure that the last accepted state is
// available. The state may not be available if it was not committed due
// to an unclean shutdown.
- return bc.reprocessState(bc.lastAccepted, 2*bc.cacheConfig.CommitInterval)
+ return bc.reprocessState(bc.lastAccepted, lastAcceptedRoot, 2*bc.cacheConfig.CommitInterval)
+}
+
+func (bc *BlockChain) LoadGenesisState(block *types.Block) error {
+ bc.genesisBlock = block
+ return bc.loadGenesisState()
}
func (bc *BlockChain) loadGenesisState() error {
@@ -770,6 +834,13 @@ func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, fi
return nil
}
+func (bc *BlockChain) WriteHeadBlock(block *types.Block) {
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ bc.writeHeadBlock(block)
+}
+
// writeHeadBlock injects a new head block into the current block chain. This method
// assumes that the block is indeed a true head. It will also reset the head
// header to this very same block if they are older or if they are on a different side chain.
@@ -1031,6 +1102,14 @@ func (bc *BlockChain) LastAcceptedBlock() *types.Block {
//
// Assumes [bc.chainmu] is not held by the caller.
func (bc *BlockChain) Accept(block *types.Block) error {
+ // in normal operation, don't error on closed as we can recover on restart
+ errorOnClosed := false
+ return bc.AcceptWithRoot(block, block.Root(), errorOnClosed)
+}
+
+var ErrAcceptorClosed = errors.New("acceptor closed")
+
+func (bc *BlockChain) AcceptWithRoot(block *types.Block, root common.Hash, errorOnClosed bool) error {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
@@ -1057,7 +1136,11 @@ func (bc *BlockChain) Accept(block *types.Block) error {
// Enqueue block in the acceptor
bc.lastAccepted = block
- bc.addAcceptorQueue(block)
+ added := bc.addAcceptorQueue(&blockRoot{Block: block, root: root})
+ if !added && errorOnClosed {
+ return ErrAcceptorClosed
+ }
+
acceptedBlockGasUsedCounter.Inc(int64(block.GasUsed()))
acceptedTxsCounter.Inc(int64(len(block.Transactions())))
return nil
@@ -1226,7 +1309,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
for n, block := range chain {
- if err := bc.insertBlock(block, true); err != nil {
+ if err := bc.insertBlock(block, nil, true); err != nil {
return n, err
}
}
@@ -1239,17 +1322,21 @@ func (bc *BlockChain) InsertBlock(block *types.Block) error {
}
func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error {
+ return bc.InsertBlockManualWithParent(block, nil, writes)
+}
+
+func (bc *BlockChain) InsertBlockManualWithParent(block *types.Block, parent *types.Header, writes bool) error {
bc.blockProcFeed.Send(true)
defer bc.blockProcFeed.Send(false)
bc.chainmu.Lock()
- err := bc.insertBlock(block, writes)
+ err := bc.insertBlock(block, parent, writes)
bc.chainmu.Unlock()
return err
}
-func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
+func (bc *BlockChain) insertBlock(block *types.Block, parent *types.Header, writes bool) error {
start := time.Now()
bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), block.Time()), block.Transactions())
@@ -1289,9 +1376,10 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
// No validation errors for the block
- // Retrieve the parent block to determine which root to build state on
- substart = time.Now()
- parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
+ if parent == nil {
+ // Retrieve the parent block to determine which root to build state on
+ parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
+ }
// Instantiate the statedb to use for processing transactions
//
@@ -1299,7 +1387,24 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
// entries directly from the trie (much slower).
bc.flattenLock.Lock()
defer bc.flattenLock.Unlock()
- statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
+ // This will attempt to execute the block txs in parallel.
+ // This is to avoid snapshot misses during execution.
+ // sp := newStatePrefetcher(bc.chainConfig, bc, bc.engine)
+ // tape := new(tape)
+ // sp.Prefetch(block, parent.Root, bc.vmConfig, tape)
+
+ substart = time.Now()
+ var statedb *state.StateDB
+ if bc.snaps != nil {
+ snap := bc.snaps.Snapshot(parent.Root)
+ withRecording := snap
+ if bc.snapWriter != nil {
+ withRecording = &snapRecorder{snap, bc.snapWriter}
+ }
+ statedb, err = state.NewWithSnapshot(parent.Root, bc.stateCache, withRecording)
+ } else {
+ statedb, err = state.New(parent.Root, bc.stateCache, nil)
+ }
if err != nil {
return err
}
@@ -1310,8 +1415,14 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
defer statedb.StopPrefetcher()
// Process block using the parent state as reference point
+ accountMissStart := snapshot.SnapshotCleanAccountMissMeter.Snapshot().Count()
+ storageMissStart := snapshot.SnapshotCleanStorageMissMeter.Snapshot().Count()
pstart := time.Now()
- receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig)
+ var writers []writer
+ if bc.snapWriter != nil {
+ writers = append(writers, bc.snapWriter)
+ }
+ receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig, writers...)
if serr := statedb.Error(); serr != nil {
log.Error("statedb error encountered", "err", serr, "number", block.Number(), "hash", block.Hash())
}
@@ -1320,6 +1431,10 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
return err
}
ptime := time.Since(pstart)
+ accountMissEnd := snapshot.SnapshotCleanAccountMissMeter.Snapshot().Count()
+ storageMissEnd := snapshot.SnapshotCleanStorageMissMeter.Snapshot().Count()
+ snapshotCacheMissAccount.Inc(accountMissEnd - accountMissStart)
+ snapshotCacheMissStorage.Inc(storageMissEnd - storageMissStart)
// Validate the state using the default validator
vstart := time.Now()
@@ -1640,57 +1755,184 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
log.Debug(reason.String())
}
+type extraStats struct {
+ spTime time.Duration
+ pTime time.Duration
+ vTime time.Duration
+ cTime time.Duration
+ readTime time.Duration
+ tapeLen uint64
+ blocks uint64
+ txs uint64
+}
+
// reprocessBlock reprocesses a previously accepted block. This is often used
// to regenerate previously pruned state tries.
-func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) (common.Hash, error) {
+func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block, extras ...*extraStats) (common.Hash, error) {
+ var stats *extraStats
+ if len(extras) > 0 {
+ stats = extras[0]
+ }
+
+ bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, current.Number(), current.Time()), current.Transactions())
+
// Retrieve the parent block and its state to execute block
var (
statedb *state.StateDB
err error
parentRoot = parent.Root()
)
+
+ // This will attempt to execute the block txs and create a tape.
+ // This is to avoid snapshot misses during execution.
+ tape := new(tape)
+ spStart := time.Now()
+ sp := newStatePrefetcher(bc.chainConfig, bc, bc.engine)
+ sp.Prefetch(current, parent.Root(), bc.vmConfig, tape)
+ spTime := time.Since(spStart)
+
// We don't simply use [NewWithSnapshot] here because it doesn't return an
// error if [bc.snaps != nil] and [bc.snaps.Snapshot(parentRoot) == nil].
if bc.snaps == nil {
statedb, err = state.New(parentRoot, bc.stateCache, nil)
} else {
snap := bc.snaps.Snapshot(parentRoot)
+ withReplay := &snapReplay{snap, *tape}
if snap == nil {
return common.Hash{}, fmt.Errorf("failed to get snapshot for parent root: %s", parentRoot)
}
- statedb, err = state.NewWithSnapshot(parentRoot, bc.stateCache, snap)
+ statedb, err = state.NewWithSnapshot(parentRoot, bc.stateCache, withReplay)
}
if err != nil {
return common.Hash{}, fmt.Errorf("could not fetch state for (%s: %d): %v", parent.Hash().Hex(), parent.NumberU64(), err)
}
-
// Enable prefetching to pull in trie node paths while processing transactions
statedb.StartPrefetcher("chain", state.WithConcurrentWorkers(bc.cacheConfig.TriePrefetcherParallelism))
defer statedb.StopPrefetcher()
// Process previously stored block
+ trieReadStart := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
+ trieReadStart += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
+ accountMissStart := snapshot.SnapshotCleanAccountMissMeter.Snapshot().Count()
+ storageMissStart := snapshot.SnapshotCleanStorageMissMeter.Snapshot().Count()
+ pstart := time.Now()
receipts, _, usedGas, err := bc.processor.Process(current, parent.Header(), statedb, vm.Config{})
if err != nil {
return common.Hash{}, fmt.Errorf("failed to re-process block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
}
+ ptime := time.Since(pstart)
+ accountMissEnd := snapshot.SnapshotCleanAccountMissMeter.Snapshot().Count()
+ storageMissEnd := snapshot.SnapshotCleanStorageMissMeter.Snapshot().Count()
+ trieReadEnd := statedb.SnapshotAccountReads + statedb.AccountReads
+ trieReadEnd += statedb.SnapshotStorageReads + statedb.StorageReads
+ snapshotCacheMissAccount.Inc(accountMissEnd - accountMissStart)
+ snapshotCacheMissStorage.Inc(storageMissEnd - storageMissStart)
// Validate the state using the default validator
+ vstart := time.Now()
if err := bc.validator.ValidateState(current, statedb, receipts, usedGas); err != nil {
return common.Hash{}, fmt.Errorf("failed to validate state while re-processing block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
}
+ vtime := time.Since(vstart)
log.Debug("Processed block", "block", current.Hash(), "number", current.NumberU64())
// Commit all cached state changes into underlying memory database.
// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
// diff layer for the block.
+ var root common.Hash
+ cstart := time.Now()
if bc.snaps == nil {
- return statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()))
+ root, err = statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()))
+ } else {
+ root, err = statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash())
+ }
+ ctime := time.Since(cstart)
+
+ if stats != nil {
+ stats.spTime += spTime
+ stats.pTime += ptime
+ stats.vTime += vtime
+ stats.cTime += ctime
+ stats.tapeLen += uint64(tape.Len())
+ stats.txs += uint64(len(current.Transactions()))
+ stats.readTime += trieReadEnd - trieReadStart
+ stats.blocks++
+ }
+
+ return root, err
+}
+
+// reprocessFrom destroys the current snapshot (overrides it with genesis state) and
+// reprocesses the chain from the genesis block up to the current head block.
+func (bc *BlockChain) reprocessFromGenesis() error {
+ metrics.EnabledExpensive = false
+ target := uint64(30_000_000)
+ log.Warn("Reprocessing chain from genesis", "target", target)
+
+ if err := bc.loadGenesisState(); err != nil {
+ return err
+ }
+ bc.initSnapshot(bc.hc.genesisHeader)
+ log.Warn("Snapshot initialized with genesis state")
+
+ parent := bc.genesisBlock
+
+ stats := &extraStats{}
+ var (
+ start = time.Now()
+ logged time.Time
+ totalFlatTime time.Duration
+ )
+ for i := uint64(1); i <= target; i++ {
+ current := bc.GetBlockByNumber(i)
+
+ _, err := bc.reprocessBlock(parent, current, stats)
+ if err != nil {
+ return err
+ }
+
+ // Flatten snapshot if initialized, holding a reference to the state root until the next block
+ // is processed.
+ fstart := time.Now()
+ if err := bc.flattenSnapshot(func() error {
+ return bc.stateManager.AcceptTrie(current)
+ }, current.Hash()); err != nil {
+ return err
+ }
+ flatTime := time.Since(fstart)
+ totalFlatTime += flatTime
+
+ // Print progress logs if long enough time elapsed
+ accountMiss := snapshotCacheMissAccount.Snapshot().Count()
+ storageMiss := snapshotCacheMissStorage.Snapshot().Count()
+ if time.Since(logged) > 8*time.Second {
+ log.Info(
+ "Reprocessing chain",
+ "block", i,
+ "elapsed", common.PrettyDuration(time.Since(start).Truncate(time.Second)),
+ "flat", common.PrettyDuration(totalFlatTime.Truncate(time.Second)),
+ "spTime", stats.spTime.Truncate(time.Second),
+ "pTime", stats.pTime.Truncate(time.Second),
+ "vTime", stats.vTime.Truncate(time.Second),
+ "cTime", stats.cTime.Truncate(time.Second),
+ "readTime", stats.readTime.Truncate(time.Millisecond),
+ "accountMiss", accountMiss,
+ "storageMiss", storageMiss,
+ "tapeLen", stats.tapeLen,
+ "txs", stats.txs,
+ "blocks", stats.blocks,
+ )
+ logged = time.Now()
+ }
+
+ parent = current
}
- return statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash())
+
+ return nil
}
// initSnapshot instantiates a Snapshot instance and adds it to [bc]
-func (bc *BlockChain) initSnapshot(b *types.Header) {
+func (bc *BlockChain) initSnapshot(b *types.Header, opts ...*Opts) {
if bc.cacheConfig.SnapshotLimit <= 0 || bc.snaps != nil {
return
}
@@ -1710,10 +1952,44 @@ func (bc *BlockChain) initSnapshot(b *types.Header) {
AsyncBuild: asyncBuild,
SkipVerify: !bc.cacheConfig.SnapshotVerify,
}
+ kvConfig := bc.cacheConfig.KeyValueDB
+ if b.Number.Uint64() == 0 && kvConfig != nil && kvConfig.KVBackend != nil && !bc.cacheConfig.SnapshotNoBuild {
+ var err error
+ bc.snaps, err = snapshot.New(snapconfig, bc.db, bc.triedb, common.Hash{}, types.EmptyRootHash)
+ if err != nil {
+ log.Error("failed to initialize snapshots", "headRoot", b.Root, "err", err, "async", asyncBuild)
+ }
+ tmpDiskDB := rawdb.NewMemoryDatabase()
+ tmpTrieDB := triedb.NewDatabase(tmpDiskDB, nil)
+ tmpStateDatabase := state.NewDatabaseWithNodeDB(tmpDiskDB, tmpTrieDB)
+ statedb, err := state.New(types.EmptyRootHash, tmpStateDatabase, bc.snaps)
+ if err != nil {
+ log.Error("failed to initialize genesis state", "err", err)
+ }
+ bc.genesis.toBlockWithState(tmpDiskDB, statedb)
+ _, err = statedb.CommitWithSnap(0, false, bc.snaps, bc.genesisBlock.Hash(), common.Hash{})
+ if err != nil {
+ log.Error("failed to commit genesis state", "err", err)
+ }
+ if err := bc.snaps.Flatten(bc.genesisBlock.Hash()); err != nil {
+ log.Error("failed to flatten genesis snapshot", "err", err)
+ }
+ root := bc.genesisBlock.Root()
+ if len(opts) > 0 {
+ root = opts[0].LastAcceptedRoot
+ }
+ rawdb.WriteSnapshotRoot(bc.db, root)
+ // Need to mark the snapshot completed
+ snapshot.ResetSnapshotGeneration(bc.db)
+ }
var err error
- bc.snaps, err = snapshot.New(snapconfig, bc.db, bc.triedb, b.Hash(), b.Root)
+ root := b.Root
+ if len(opts) > 0 {
+ root = opts[0].LastAcceptedRoot
+ }
+ bc.snaps, err = snapshot.New(snapconfig, bc.db, bc.triedb, b.Hash(), root)
if err != nil {
- log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root, "err", err, "async", asyncBuild)
+ log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", root, "err", err, "async", asyncBuild)
}
}
@@ -1721,7 +1997,7 @@ func (bc *BlockChain) initSnapshot(b *types.Header) {
// it reaches a block with a state committed to the database. reprocessState does not use
// snapshots since the disk layer for snapshots will most likely be above the last committed
// state that reprocessing will start from.
-func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error {
+func (bc *BlockChain) reprocessState(current *types.Block, currentRoot common.Hash, reexec uint64) error {
origin := current.NumberU64()
acceptorTip, err := rawdb.ReadAcceptorTip(bc.db)
if err != nil {
@@ -1734,7 +2010,12 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error
acceptorTipUpToDate := acceptorTip == (common.Hash{}) || acceptorTip == current.Hash()
// If the state is already available and the acceptor tip is up to date, skip re-processing.
- if bc.HasState(current.Root()) && acceptorTipUpToDate {
+ root := current.Root()
+ if currentRoot != (common.Hash{}) {
+ root = currentRoot
+ }
+ log.Info("Looking for state", "root", root, "acceptorTip", acceptorTip, "acceptorTipUpToDate", acceptorTipUpToDate)
+ if bc.HasState(root) && acceptorTipUpToDate {
log.Info("Skipping state reprocessing", "root", current.Root())
return nil
}
@@ -2063,7 +2344,7 @@ func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error {
lastAcceptedHash := block.Hash()
bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
- if err := bc.loadLastState(lastAcceptedHash); err != nil {
+ if err := bc.loadLastState(lastAcceptedHash, common.Hash{}); err != nil {
return err
}
// Create the state manager
diff --git a/core/genesis.go b/core/genesis.go
index 07d3ba072d..c889ce6920 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -230,7 +230,18 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo
if err != nil {
panic(err)
}
+ head, root := g.toBlockWithState(db, statedb)
+ statedb.Commit(0, false)
+ // Commit newly generated states into disk if it's not empty.
+ if root != types.EmptyRootHash {
+ if err := triedb.Commit(root, true); err != nil {
+ panic(fmt.Sprintf("unable to commit genesis block: %v", err))
+ }
+ }
+ return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
+}
+func (g *Genesis) toBlockWithState(db ethdb.Database, statedb *state.StateDB) (*types.Header, common.Hash) {
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
@@ -246,7 +257,7 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo
}
// Configure any stateful precompiles that should be enabled in the genesis.
- err = ApplyPrecompileActivations(g.Config, nil, types.NewBlockWithHeader(head), statedb)
+ err := ApplyPrecompileActivations(g.Config, nil, types.NewBlockWithHeader(head), statedb)
if err != nil {
panic(fmt.Sprintf("unable to configure precompiles in genesis block: %v", err))
}
@@ -298,15 +309,7 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo
}
}
}
-
- statedb.Commit(0, false)
- // Commit newly generated states into disk if it's not empty.
- if root != types.EmptyRootHash {
- if err := triedb.Commit(root, true); err != nil {
- panic(fmt.Sprintf("unable to commit genesis block: %v", err))
- }
- }
- return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
+ return head, root
}
// Commit writes the block and state of a genesis specification to the database.
diff --git a/core/state/database.go b/core/state/database.go
index e29e9b8d78..8ce59c169e 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -32,6 +32,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/shim"
"github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/trie/trienode"
"github.com/ava-labs/coreth/trie/utils"
@@ -81,8 +82,26 @@ type Database interface {
TrieDB() *triedb.Database
}
-// Trie is a Ethereum Merkle Patricia trie.
type Trie interface {
+ Itrie
+ PrefetchAccount(address common.Address) (*types.StateAccount, error)
+ PrefetchStorage(addr common.Address, key []byte) ([]byte, error)
+}
+
+type LegacyAdapter struct {
+ Itrie
+}
+
+func (a LegacyAdapter) PrefetchAccount(address common.Address) (*types.StateAccount, error) {
+ return a.Itrie.GetAccount(address)
+}
+
+func (a LegacyAdapter) PrefetchStorage(addr common.Address, key []byte) ([]byte, error) {
+ return a.Itrie.GetStorage(addr, key)
+}
+
+// Trie is a Ethereum Merkle Patricia trie.
+type Itrie interface {
// GetKey returns the sha3 preimage of a hashed key that was previously used
// to store a value.
//
@@ -189,18 +208,47 @@ type cachingDB struct {
// OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
+ if kvConfig := db.triedb.Config().KeyValueDB; kvConfig != nil {
+ if kvConfig.KVBackend != nil {
+ return shim.NewAccountTrieKV(root, kvConfig.KVBackend, db.triedb)
+ }
+ // Legacy backend maintains hash compatibility with geth
+ // to test the shim layer.
+ backend, err := shim.NewLegacyBackend(root, common.Hash{}, root, db.triedb, kvConfig.Writer)
+ if err != nil {
+ return nil, err
+ }
+ return shim.NewStateTrie(backend, db.triedb), nil
+ }
+
if db.triedb.IsVerkle() {
- return trie.NewVerkleTrie(root, db.triedb, utils.NewPointCache(commitmentCacheItems))
+ tr, err := trie.NewVerkleTrie(root, db.triedb, utils.NewPointCache(commitmentCacheItems))
+ return LegacyAdapter{tr}, err
}
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
if err != nil {
return nil, err
}
- return tr, nil
+ return LegacyAdapter{tr}, nil
}
// OpenStorageTrie opens the storage trie of an account.
func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
+ if kvConfig := db.triedb.Config().KeyValueDB; kvConfig != nil {
+ addrHash := crypto.Keccak256Hash(address.Bytes())
+
+ if kvConfig.KVBackend != nil {
+ accountTrie := self.(*shim.StateTrie)
+ return shim.NewStorageTrieKV(stateRoot, addrHash, accountTrie)
+ }
+ // Legacy backend maintains hash compatibility with geth
+ // to test the shim layer.
+ backend, err := shim.NewLegacyBackend(stateRoot, addrHash, root, db.triedb, kvConfig.Writer)
+ if err != nil {
+ return nil, err
+ }
+ return shim.NewStateTrie(backend, db.triedb), nil
+ }
// In the verkle case, there is only one tree. But the two-tree structure
// is hardcoded in the codebase. So we need to return the same trie in this
// case.
@@ -211,14 +259,16 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre
if err != nil {
return nil, err
}
- return tr, nil
+ return LegacyAdapter{tr}, nil
}
// CopyTrie returns an independent copy of the given trie.
func (db *cachingDB) CopyTrie(t Trie) Trie {
switch t := t.(type) {
- case *trie.StateTrie:
+ case *shim.StateTrie:
return t.Copy()
+ case LegacyAdapter:
+ return LegacyAdapter{t.Itrie.(*trie.StateTrie).Copy()}
default:
panic(fmt.Errorf("unknown trie type %T", t))
}
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index abb3051762..2c2d617559 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -73,6 +73,9 @@ var (
snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
+ SnapshotCleanStorageMissMeter = snapshotCleanStorageMissMeter
+ SnapshotCleanAccountMissMeter = snapshotCleanAccountMissMeter
+
snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 0dccd28bf8..4a6d328c25 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -46,6 +46,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/uint256"
)
@@ -213,6 +214,9 @@ func (wp *workerPool) Done() {
}
func WithConcurrentWorkers(prefetchers int) PrefetcherOption {
+ if prefetchers <= 0 {
+ return nil
+ }
pool := &workerPool{
BoundedWorkers: utils.NewBoundedWorkers(prefetchers),
}
@@ -227,8 +231,13 @@ func (s *StateDB) StartPrefetcher(namespace string, opts ...PrefetcherOption) {
s.prefetcher.close()
s.prefetcher = nil
}
- if s.snap != nil {
- s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, opts...)
+ if len(opts) == 0 || opts[0] == nil {
+ return // No prefetching
+ }
+ s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, opts...)
+ kvConfig := s.db.TrieDB().Config().KeyValueDB
+ if kvConfig != nil && kvConfig.KVBackend != nil {
+ s.prefetcher.rootTrie = s.trie
}
}
@@ -720,6 +729,15 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if data == nil {
return nil
}
+ if s.snap != nil {
+ bytes, err := rlp.EncodeToBytes(data)
+ if err != nil {
+ s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err))
+ return nil
+ }
+
+ fmt.Printf("Warning: account %v not found in snapshot but found in trie as %x\n", addr, bytes)
+ }
}
// Insert into the live set
obj := newObject(s, addr, data)
@@ -1250,6 +1268,16 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
if prev.Root == types.EmptyRootHash {
continue
}
+ tdbConfig := s.db.TrieDB().Config()
+ if tdbConfig.KeyValueDB != nil && tdbConfig.KeyValueDB.KVBackend != nil {
+ deleted, err := tdbConfig.KeyValueDB.KVBackend.PrefixDelete(addrHash[:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to delete storage from kv backend, err: %w", err)
+ }
+ if deleted > 0 {
+ log.Info("Deleted storage from kv backend", "addrHash", addrHash, "deleted", deleted)
+ }
+ }
// Remove storage slots belong to the account.
aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
if err != nil {
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index fc7be65b4a..bdaf71ad28 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -37,6 +37,7 @@ var (
// Note, the prefetcher's API is not thread safe.
type triePrefetcher struct {
db Database // Database to fetch trie nodes through
+ rootTrie Trie // Root trie for the state trie
root common.Hash // Root hash of the account trie for metrics
fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies.
fetchers map[string]*subfetcher // Subfetchers for each trie
@@ -158,12 +159,24 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm
id := p.trieID(owner, root)
fetcher := p.fetchers[id]
if fetcher == nil {
- fetcher = newSubfetcher(p.db, p.root, owner, root, addr, p.options...)
+ fetcher = newSubfetcher(p, p.db, p.root, owner, root, addr, p.options...)
p.fetchers[id] = fetcher
}
fetcher.schedule(keys)
}
+func (p *triePrefetcher) getRootTrie() Trie {
+ if p.rootTrie != nil {
+ return p.rootTrie
+ }
+ rootTrie, err := p.db.OpenTrie(p.root)
+ if err != nil {
+ log.Warn("Trie prefetcher failed opening root trie", "root", p.root, "err", err)
+ return nil
+ }
+ return rootTrie
+}
+
// trie returns the trie matching the root hash, or nil if the prefetcher doesn't
// have it.
func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
@@ -216,6 +229,8 @@ func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
// main prefetcher is paused and either all requested items are processed or if
// the trie being worked on is retrieved from the prefetcher.
type subfetcher struct {
+ p *triePrefetcher
+
db Database // Database to load trie nodes through
state common.Hash // Root hash of the state to prefetch
owner common.Hash // Owner of the trie, usually account hash
@@ -240,8 +255,9 @@ type subfetcher struct {
// newSubfetcher creates a goroutine to prefetch state items belonging to a
// particular root hash.
-func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address, opts ...PrefetcherOption) *subfetcher {
+func newSubfetcher(p *triePrefetcher, db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address, opts ...PrefetcherOption) *subfetcher {
sf := &subfetcher{
+ p: p,
db: db,
state: state,
owner: owner,
@@ -312,16 +328,12 @@ func (sf *subfetcher) loop() {
// Start by opening the trie and stop processing if it fails
if sf.owner == (common.Hash{}) {
- trie, err := sf.db.OpenTrie(sf.root)
- if err != nil {
- log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
- return
- }
+ trie := sf.p.getRootTrie()
sf.trie = trie
} else {
// The trie argument can be nil as verkle doesn't support prefetching
// yet. TODO FIX IT(rjl493456442), otherwise code will panic here.
- trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil)
+ trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, sf.p.getRootTrie())
if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return
diff --git a/core/state/trie_prefetcher.libevm.go b/core/state/trie_prefetcher.libevm.go
index 7a2a35ddc8..b2eb9deee6 100644
--- a/core/state/trie_prefetcher.libevm.go
+++ b/core/state/trie_prefetcher.libevm.go
@@ -110,7 +110,7 @@ func (p *subfetcherPool) execute(fn func(Trie)) {
// and logging errors. See [subfetcherPool.execute] re worker pools.
func (p *subfetcherPool) GetAccount(addr common.Address) {
p.execute(func(t Trie) {
- if _, err := t.GetAccount(addr); err != nil {
+ if _, err := t.PrefetchAccount(addr); err != nil {
log.Error("account prefetching failed", "address", addr, "err", err)
}
})
@@ -119,7 +119,7 @@ func (p *subfetcherPool) GetAccount(addr common.Address) {
// GetStorage is the storage equivalent of [subfetcherPool.GetAccount].
func (p *subfetcherPool) GetStorage(addr common.Address, key []byte) {
p.execute(func(t Trie) {
- if _, err := t.GetStorage(addr, key); err != nil {
+ if _, err := t.PrefetchStorage(addr, key); err != nil {
log.Error("storage prefetching failed", "address", addr, "key", key, "err", err)
}
})
diff --git a/core/state_manager.go b/core/state_manager.go
index 59447055b7..f28278100c 100644
--- a/core/state_manager.go
+++ b/core/state_manager.go
@@ -31,9 +31,9 @@ import (
"math/rand"
"time"
- "github.com/ava-labs/coreth/core/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
)
func init() {
@@ -58,9 +58,9 @@ const (
)
type TrieWriter interface {
- InsertTrie(block *types.Block) error // Handle inserted trie reference of [root]
- AcceptTrie(block *types.Block) error // Mark [root] as part of an accepted block
- RejectTrie(block *types.Block) error // Notify TrieWriter that the block containing [root] has been rejected
+ InsertTrie(block Block) error // Handle inserted trie reference of [root]
+ AcceptTrie(block Block) error // Mark [root] as part of an accepted block
+ RejectTrie(block Block) error // Notify TrieWriter that the block containing [root] has been rejected
Shutdown() error
}
@@ -71,6 +71,13 @@ type TrieDB interface {
Cap(limit common.StorageSize) error
}
+type NoDerefTrieDB struct {
+ TrieDB
+}
+
+func (nd *NoDerefTrieDB) Dereference(root common.Hash) error { return nil }
+func (nd *NoDerefTrieDB) Cap(limit common.StorageSize) error { return nil }
+
func NewTrieWriter(db TrieDB, config *CacheConfig) TrieWriter {
if config.Pruning {
cm := &cappedMemoryTrieWriter{
@@ -90,23 +97,29 @@ func NewTrieWriter(db TrieDB, config *CacheConfig) TrieWriter {
}
}
+type Block interface {
+ Root() common.Hash
+ Hash() common.Hash
+ NumberU64() uint64
+}
+
type noPruningTrieWriter struct {
TrieDB
}
-func (np *noPruningTrieWriter) InsertTrie(block *types.Block) error {
+func (np *noPruningTrieWriter) InsertTrie(block Block) error {
// We don't attempt to [Cap] here because we should never have
// a significant amount of [TrieDB.Dirties] (we commit each block).
return nil
}
-func (np *noPruningTrieWriter) AcceptTrie(block *types.Block) error {
+func (np *noPruningTrieWriter) AcceptTrie(block Block) error {
// We don't need to call [Dereference] on the block root at the end of this
// function because it is removed from the [TrieDB.Dirties] map in [Commit].
return np.TrieDB.Commit(block.Root(), false)
}
-func (np *noPruningTrieWriter) RejectTrie(block *types.Block) error {
+func (np *noPruningTrieWriter) RejectTrie(block Block) error {
return np.TrieDB.Dereference(block.Root())
}
@@ -123,13 +136,14 @@ type cappedMemoryTrieWriter struct {
tipBuffer *BoundedBuffer[common.Hash]
}
-func (cm *cappedMemoryTrieWriter) InsertTrie(block *types.Block) error {
+func (cm *cappedMemoryTrieWriter) InsertTrie(block Block) error {
// The use of [Cap] in [InsertTrie] prevents exceeding the configured memory
// limit (and OOM) in case there is a large backlog of processing (unaccepted) blocks.
_, nodes, imgs := cm.TrieDB.Size() // all memory is contained within the nodes return for hashdb
if nodes <= cm.memoryCap && imgs <= cm.imageCap {
return nil
}
+ log.Warn("Trie memory cap exceeded, capping trie", "block", block.Hash().Hex(), "nodes", nodes, "images", imgs)
if err := cm.TrieDB.Cap(cm.memoryCap - ethdb.IdealBatchSize); err != nil {
return fmt.Errorf("failed to cap trie for block %s: %w", block.Hash().Hex(), err)
}
@@ -137,7 +151,7 @@ func (cm *cappedMemoryTrieWriter) InsertTrie(block *types.Block) error {
return nil
}
-func (cm *cappedMemoryTrieWriter) AcceptTrie(block *types.Block) error {
+func (cm *cappedMemoryTrieWriter) AcceptTrie(block Block) error {
root := block.Root()
// Attempt to dereference roots at least [tipBufferSize] old (so queries at tip
@@ -152,7 +166,7 @@ func (cm *cappedMemoryTrieWriter) AcceptTrie(block *types.Block) error {
// Commit this root if we have reached the [commitInterval].
modCommitInterval := block.NumberU64() % cm.commitInterval
if modCommitInterval == 0 {
- if err := cm.TrieDB.Commit(root, true); err != nil {
+ if err := cm.TrieDB.Commit(root, false); err != nil {
return fmt.Errorf("failed to commit trie for block %s: %w", block.Hash().Hex(), err)
}
return nil
@@ -185,7 +199,7 @@ func (cm *cappedMemoryTrieWriter) AcceptTrie(block *types.Block) error {
return nil
}
-func (cm *cappedMemoryTrieWriter) RejectTrie(block *types.Block) error {
+func (cm *cappedMemoryTrieWriter) RejectTrie(block Block) error {
cm.TrieDB.Dereference(block.Root())
return nil
}
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
new file mode 100644
index 0000000000..c33bbae7a2
--- /dev/null
+++ b/core/state_prefetcher.go
@@ -0,0 +1,254 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package core
+
+import (
+ "github.com/ava-labs/coreth/consensus"
+ "github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/state/snapshot"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/core/vm"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/sync/errgroup"
+)
+
+// statePrefetcher is a basic Prefetcher, which blindly executes a block on top
+// of an arbitrary state with the goal of prefetching potentially useful state
+// data from disk before the main block processor start executing.
+type statePrefetcher struct {
+ config *params.ChainConfig // Chain configuration options
+ bc *BlockChain // Canonical block chain
+ engine consensus.Engine // Consensus engine used for block rewards
+}
+
+// newStatePrefetcher initialises a new statePrefetcher.
+func newStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *statePrefetcher {
+ return &statePrefetcher{
+ config: config,
+ bc: bc,
+ engine: engine,
+ }
+}
+
+// Prefetch processes the state changes according to the Ethereum rules by running
+// the transaction messages using the statedb, but any changes are discarded. The
+// only goal is to pre-cache transaction signatures and state trie nodes.
+type tape []byte
+
+func (t *tape) RecordAccountRead(_ common.Hash, val []byte) error {
+ *t = append(*t, byte(len(val)))
+ *t = append(*t, val...)
+ return nil
+}
+
+func (t *tape) RecordStorageRead(_, _ common.Hash, val []byte) error {
+ *t = append(*t, byte(len(val)))
+ *t = append(*t, val...)
+ return nil
+}
+
+func (t *tape) RecordTransactionEnd() error {
+ return nil
+}
+
+func (t tape) Len() int { return len(t) }
+
+func (p *statePrefetcher) Prefetch(block *types.Block, parentRoot common.Hash, cfg vm.Config, recorded *tape) {
+ if p.bc.snaps == nil {
+ log.Warn("Skipping prefetching transactions without snapshot cache")
+ return
+ }
+ snap := p.bc.snaps.Snapshot(parentRoot)
+ if snap == nil {
+ log.Warn("Skipping prefetching transactions without snapshot cache")
+ return
+ }
+
+ var (
+ header = block.Header()
+ gaspool = new(GasPool).AddGas(block.GasLimit())
+ blockContext = NewEVMBlockContext(header, p.bc, nil)
+ signer = types.MakeSigner(p.config, header.Number, header.Time)
+ )
+ recorder := &snapRecorder{Snapshot: snap, writer: recorded}
+ statedb, err := state.NewWithSnapshot(parentRoot, p.bc.stateCache, recorder)
+ if err != nil {
+ return
+ }
+
+ // Configure any upgrades that should go into effect during this block.
+ parent := p.bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
+ err = ApplyUpgrades(p.config, &parent.Time, block, statedb)
+ if err != nil {
+ log.Error("failed to configure precompiles processing block", "hash", block.Hash(), "number", block.NumberU64(), "timestamp", block.Time(), "err", err)
+ }
+
+ if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
+ ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
+ }
+ var eg errgroup.Group
+ eg.SetLimit(1) // Some limits just in case.
+ // Iterate over and process the individual transactions
+ results := make([]*ExecutionResult, len(block.Transactions()))
+ for i, tx := range block.Transactions() {
+ evm := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
+ eg.Go(func() error {
+ // Convert the transaction into an executable message and pre-cache its sender
+ msg, err := TransactionToMessage(tx, signer, header.BaseFee)
+ if err != nil {
+ return err // Also invalid block, bail out
+ }
+ statedb.SetTxContext(tx.Hash(), i)
+ if results[i], err = precacheTransaction(msg, p.config, gaspool, statedb, header, evm); err != nil {
+ // NOTE: We don't care that the the transaction failed, we just want to pre-cache
+ return err // Ugh, something went horribly wrong, bail out
+ }
+ return nil
+ })
+ // If we're pre-byzantium, pre-load trie nodes for the intermediate root
+ // if !byzantium {
+ // statedb.IntermediateRoot(true)
+ // }
+ }
+ // NOTE: For now I don't want to deal with trie nodes, just snapshot cache.
+ // If were post-byzantium, pre-load trie nodes for the final root hash
+ // if byzantium {
+ // statedb.IntermediateRoot(true)
+ // }
+
+ // Wait for all transactions to be processed
+ if err := eg.Wait(); err != nil {
+ log.Error("Unexpected failure in pre-caching transactions", "err", err)
+ }
+
+ // hack: just setting the gas used for now
+ receipts := make(types.Receipts, len(block.Transactions()))
+ for i, tx := range block.Transactions() {
+ receipts[i] = &types.Receipt{
+ TxHash: tx.Hash(),
+ }
+ if results[i] != nil {
+ receipts[i].GasUsed = results[i].UsedGas
+ }
+ }
+ if err := p.engine.Finalize(p.bc, block, parent, statedb, receipts); err != nil {
+ log.Error("Failed to finalize block", "err", err)
+ }
+ *recorded = *recorder.writer.(*tape)
+}
+
+// precacheTransaction attempts to apply a transaction to the given state database
+// and uses the input parameters for its environment. The goal is not to execute
+// the transaction successfully, rather to warm up touched data slots.
+func precacheTransaction(msg *Message, config *params.ChainConfig, gaspool *GasPool, statedb vmStateDB, header *types.Header, evm *vm.EVM) (*ExecutionResult, error) {
+ // Update the evm with the new transaction context.
+ evm.Reset(NewEVMTxContext(msg), statedb)
+ // Add addresses to access list if applicable
+ er, err := ApplyMessage(evm, msg, gaspool)
+ if err != nil {
+ return nil, err
+ }
+ statedb.Finalise(true)
+ return er, err
+}
+
+type vmStateDB interface {
+ vm.StateDB
+ Finalise(bool)
+ IntermediateRoot(bool) common.Hash
+ SetTxContext(common.Hash, int)
+ TxIndex() int
+ GetLogs(common.Hash, uint64, common.Hash) []*types.Log
+}
+
+type writer interface {
+ RecordStorageRead(common.Hash, common.Hash, []byte) error
+ RecordAccountRead(common.Hash, []byte) error
+ RecordTransactionEnd() error
+}
+
+type snapRecorder struct {
+ snapshot.Snapshot
+ writer writer
+}
+
+type snapReplay struct {
+ snapshot.Snapshot
+
+ tape tape
+}
+
+func (s *snapRecorder) Account(accHash common.Hash) (*types.SlimAccount, error) {
+ acc, err := s.Snapshot.Account(accHash)
+ if err != nil {
+ return nil, err
+ }
+ if acc == nil {
+ // fmt.Println("nil account added")
+ err := s.writer.RecordAccountRead(accHash, nil)
+ return nil, err
+ }
+
+ rlp, err := rlp.EncodeToBytes(acc)
+ if err != nil {
+ return nil, err
+ }
+ // fmt.Println("account added", len(rlp))
+ err = s.writer.RecordAccountRead(accHash, rlp)
+ return acc, err
+}
+
+func (s *snapRecorder) Storage(accHash common.Hash, hash common.Hash) ([]byte, error) {
+ val, err := s.Snapshot.Storage(accHash, hash)
+ if err != nil {
+ return nil, err
+ }
+ // fmt.Println("storage added", len(val))
+ err = s.writer.RecordStorageRead(accHash, hash, val)
+ return val, err
+}
+
+func (s *snapReplay) Account(accHash common.Hash) (*types.SlimAccount, error) {
+ length := int(s.tape[0])
+ s.tape = s.tape[1:]
+ if length == 0 {
+ // fmt.Println("nil account replayed")
+ return nil, nil
+ }
+
+ // fmt.Println("account replayed", length)
+ acc := new(types.SlimAccount)
+ if err := rlp.DecodeBytes(s.tape[:length], acc); err != nil {
+ return nil, err
+ }
+ s.tape = s.tape[length:]
+ return acc, nil
+}
+
+func (s *snapReplay) Storage(accHash common.Hash, hash common.Hash) ([]byte, error) {
+ length := int(s.tape[0])
+ s.tape = s.tape[1:]
+ // fmt.Println("storage replayed", length)
+
+ val := s.tape[:length]
+ s.tape = s.tape[length:]
+ return val, nil
+}
diff --git a/core/state_processor.go b/core/state_processor.go
index af5b888686..eab72b0568 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -69,7 +69,7 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
-func (p *StateProcessor) Process(block *types.Block, parent *types.Header, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
+func (p *StateProcessor) Process(block *types.Block, parent *types.Header, statedb *state.StateDB, cfg vm.Config, notify ...writer) (types.Receipts, []*types.Log, uint64, error) {
var (
receipts types.Receipts
usedGas = new(uint64)
@@ -108,6 +108,11 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state
}
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
+ for _, w := range notify {
+ if err := w.RecordTransactionEnd(); err != nil {
+ return nil, nil, 0, fmt.Errorf("could not record transaction end: %w", err)
+ }
+ }
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
if err := p.engine.Finalize(p.bc, block, parent, statedb, receipts); err != nil {
@@ -117,7 +122,7 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state
return receipts, allLogs, *usedGas, nil
}
-func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) {
+func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, statedb vmStateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) {
// Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg)
evm.Reset(txContext, statedb)
diff --git a/core/types.go b/core/types.go
index 77e6dd4d2b..8c6baea6d9 100644
--- a/core/types.go
+++ b/core/types.go
@@ -49,5 +49,5 @@ type Processor interface {
// Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles.
- Process(block *types.Block, parent *types.Header, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error)
+ Process(block *types.Block, parent *types.Header, statedb *state.StateDB, cfg vm.Config, notify ...writer) (types.Receipts, []*types.Log, uint64, error)
}
diff --git a/eth/backend.go b/eth/backend.go
index 79dce86db3..1494d6fc11 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -55,6 +55,7 @@ import (
"github.com/ava-labs/coreth/node"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
+ "github.com/ava-labs/coreth/triedb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -228,6 +229,9 @@ func New(
StateScheme: scheme,
}
)
+ if config.KVBackend != nil {
+ cacheConfig.KeyValueDB = &triedb.KeyValueConfig{KVBackend: config.KVBackend}
+ }
if err := eth.precheckPopulateMissingTries(); err != nil {
return nil, err
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index f7697112ef..25f465d508 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -35,6 +35,7 @@ import (
"github.com/ava-labs/coreth/eth/gasprice"
"github.com/ava-labs/coreth/miner"
"github.com/ava-labs/coreth/params"
+ "github.com/ava-labs/coreth/triedb"
"github.com/ethereum/go-ethereum/common"
)
@@ -107,6 +108,9 @@ type Config struct {
SnapshotCache int
Preimages bool
+ // KVBackend
+ KVBackend triedb.KVBackend
+
// AcceptedCacheSize is the depth of accepted headers cache and accepted
// logs cache at the accepted tip.
AcceptedCacheSize int
diff --git a/go.mod b/go.mod
index 0b095d2f54..9a23febbee 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,9 @@ go 1.22.8
require (
github.com/VictoriaMetrics/fastcache v1.12.1
+ github.com/Yiling-J/theine-go v0.6.0
github.com/ava-labs/avalanchego v1.12.2-0.20250106102004-902377d447ba
+ github.com/ava-labs/firewood/ffi/v2 v2.0.0-20250115224253-5544080dfc47
github.com/cespare/cp v0.1.0
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233
github.com/davecgh/go-spew v1.1.1
@@ -19,12 +21,14 @@ require (
github.com/gorilla/websocket v1.5.0
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
+ github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.4
github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.17
+ github.com/maypok86/otter v1.2.4
github.com/olekukonko/tablewriter v0.0.5
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model v0.3.0
@@ -36,6 +40,7 @@ require (
github.com/stretchr/testify v1.9.0
github.com/tyler-smith/go-bip39 v1.1.0
github.com/urfave/cli/v2 v2.25.7
+ github.com/valyala/histogram v1.2.0
go.uber.org/goleak v1.3.0
go.uber.org/mock v0.5.0
golang.org/x/crypto v0.31.0
@@ -68,7 +73,9 @@ require (
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
+ github.com/dolthub/maphash v0.1.0 // indirect
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
+ github.com/gammazero/deque v1.0.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -83,6 +90,7 @@ require (
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/klauspost/compress v1.15.15 // indirect
+ github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/magiconair/properties v1.8.6 // indirect
@@ -109,8 +117,10 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
+ github.com/valyala/fastrand v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
+ github.com/zeebo/xxh3 v1.0.2 // indirect
go.opentelemetry.io/otel v1.22.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
@@ -133,3 +143,5 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
+
+// replace github.com/ava-labs/firewood/ffi/v2 => /Users/darioush.jalali/git2/firewood/ffi
diff --git a/go.sum b/go.sum
index 9c19c061be..622f64ab0a 100644
--- a/go.sum
+++ b/go.sum
@@ -49,6 +49,8 @@ github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKz
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
+github.com/Yiling-J/theine-go v0.6.0 h1:jv7V/tcD6ijL0T4kfbJDKP81TCZBkoriNTPSqwivWuY=
+github.com/Yiling-J/theine-go v0.6.0/go.mod h1:mdch1vjgGWd7s3rWKvY+MF5InRLfRv/CWVI9RVNQ8wY=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -56,6 +58,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/ava-labs/avalanchego v1.12.2-0.20250106102004-902377d447ba h1:7t2ORGM53sqdsczNZGFQIK99of9yeetCld90keJ47Os=
github.com/ava-labs/avalanchego v1.12.2-0.20250106102004-902377d447ba/go.mod h1:oK/C7ZGo5cAEayBKBoawh2EpOo3E9gD1rpd9NAM0RkQ=
+github.com/ava-labs/firewood/ffi/v2 v2.0.0-20250115224253-5544080dfc47 h1:us8sfppAqnSwNLnvG8AKhxHkQijYkoZMlg79Q3fyU3k=
+github.com/ava-labs/firewood/ffi/v2 v2.0.0-20250115224253-5544080dfc47/go.mod h1:BvKhmb9EeMKscnTWGQFqnT5vdqEMneG/hR6Sns7GBxg=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@@ -150,6 +154,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
+github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo=
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
@@ -178,6 +184,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34=
+github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
@@ -313,6 +321,8 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
@@ -355,6 +365,8 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -393,6 +405,8 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maypok86/otter v1.2.4 h1:HhW1Pq6VdJkmWwcZZq19BlEQkHtI8xgsQzBVXJU0nfc=
+github.com/maypok86/otter v1.2.4/go.mod h1:mKLfoI7v1HOmQMwFgX4QkRk23mX6ge3RDvjdHOWG4R4=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -537,8 +551,12 @@ github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6S
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
+github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
+github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
+github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
+github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
@@ -558,6 +576,10 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
+github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 9e7ad54dde..8360702559 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -712,7 +712,7 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
if err != nil {
return nil, err
}
- storageTrie = st
+ storageTrie = state.LegacyAdapter{Itrie: st}
}
// Create the proofs for the storageKeys.
for i, key := range keys {
diff --git a/plugin/evm/cache_test.go b/plugin/evm/cache_test.go
new file mode 100644
index 0000000000..b9ad4b128e
--- /dev/null
+++ b/plugin/evm/cache_test.go
@@ -0,0 +1,21 @@
+package evm
+
+import (
+ "testing"
+
+ lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCacheEvictionPolicy(t *testing.T) {
+ onEvict := func(k uint64, v string) {
+ t.Logf("evicting key: %v, value: %v", k, v)
+ }
+ lru, err := lru.NewWithEvict(1024, onEvict)
+ require.NoError(t, err)
+
+ for i := 0; i < 2048; i++ {
+ lru.Add(uint64(i%32), "value2")
+ lru.Add(uint64(i), "value")
+ }
+}
diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go
index 3104b0faa9..0d812961d0 100644
--- a/plugin/evm/config/config.go
+++ b/plugin/evm/config/config.go
@@ -227,6 +227,9 @@ type Config struct {
// RPC settings
HttpBodyLimit uint64 `json:"http-body-limit"`
+
+ // Firewood settings
+ FirewoodDBFile string `json:"firewood-db-file"`
}
// TxPoolConfig contains the transaction pool config to be passed
diff --git a/plugin/evm/post_processing_test.go b/plugin/evm/post_processing_test.go
new file mode 100644
index 0000000000..6a253b7734
--- /dev/null
+++ b/plugin/evm/post_processing_test.go
@@ -0,0 +1,697 @@
+package evm
+
+import (
+ "fmt"
+ "io"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/Yiling-J/theine-go"
+ "github.com/ava-labs/avalanchego/database/prefixdb"
+ "github.com/ava-labs/avalanchego/utils/units"
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/state/snapshot"
+ "github.com/ava-labs/coreth/plugin/evm/database"
+ "github.com/ava-labs/coreth/shim/legacy"
+ "github.com/ava-labs/coreth/triedb"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/maypok86/otter"
+ "github.com/stretchr/testify/require"
+ "github.com/valyala/histogram"
+ "golang.org/x/crypto/sha3"
+)
+
+type totals struct {
+ blocks uint64
+ txs uint64
+ atomicTxs uint64
+ accountReads uint64
+ storageReads uint64
+ accountWrites uint64
+ storageWrites uint64
+ accountUpdates uint64
+ storageUpdates uint64
+ accountDeletes uint64
+ storageDeletes uint64
+
+ // These are int64 as we want to compute the difference (since last log),
+ // and state may be deleted.
+ accounts int64
+ storage int64
+
+ // cache stats
+ accountReadHits uint64
+ storageReadHits uint64
+ accountWriteHits uint64
+ storageWriteHits uint64
+ writeCacheEvictAccount uint64
+ writeCacheEvictStorage uint64
+
+ // eviction (historical state storage update) time
+ writeCacheEvictTime time.Duration
+
+ // update time (historical state state commitment + persistence)
+ storageUpdateTime time.Duration
+ storageUpdateCount uint64
+ storagePersistTime time.Duration
+ storagePersistCount uint64
+}
+
+type cacheIntf interface {
+ GetAndSet(k string, v []byte) bool
+ Delete(k string)
+ Len() int
+ EstimatedSize() int
+}
+
+type fastCache struct {
+ cache *fastcache.Cache
+}
+
+func (c *fastCache) GetAndSet(k string, v []byte) bool {
+ found := c.cache.Has([]byte(k))
+ c.cache.Set([]byte(k), v)
+ return found
+}
+
+func (c *fastCache) Len() int {
+ var stats fastcache.Stats
+ c.cache.UpdateStats(&stats)
+ return int(stats.EntriesCount)
+}
+
+func (c *fastCache) EstimatedSize() int {
+ var stats fastcache.Stats
+ c.cache.UpdateStats(&stats)
+ return int(stats.BytesSize)
+}
+
+func (c *fastCache) Delete(k string) {
+ c.cache.Del([]byte(k))
+}
+
+type theineCache struct {
+ *theine.Cache[string, []byte]
+}
+
+func (c *theineCache) GetAndSet(k string, v []byte) bool {
+ _, found := c.Cache.Get(k)
+ c.Cache.Set(k, v, int64(len(k)+len(v)))
+ return found
+}
+
+type otterCache struct {
+ otter.Cache[string, []byte]
+}
+
+func (c *otterCache) GetAndSet(k string, v []byte) bool {
+ _, found := c.Cache.Get(k)
+ c.Cache.Set(k, v)
+ return found
+}
+
+func (c *otterCache) EstimatedSize() int {
+ return c.Cache.Capacity()
+}
+
+func (c *otterCache) Len() int {
+ return c.Cache.Size()
+}
+
+type noCache[K, V any] struct {
+ onEvict func(k K, v V)
+}
+
+func (c *noCache[K, V]) Get(k K) (v V, ok bool) { return }
+func (c *noCache[K, V]) GetAndSet(k K, v V) bool { return false }
+func (c *noCache[K, V]) Delete(k K) {}
+func (c *noCache[K, V]) Len() int { return 0 }
+func (c *noCache[K, V]) EstimatedSize() int { return 0 }
+func (c *noCache[K, V]) GetOldest() (k K, v V, ok bool) { return }
+
+func (c *noCache[K, V]) Add(k K, v V) bool {
+ if c.onEvict != nil {
+ c.onEvict(k, v)
+ }
+ return false
+}
+
+type withUpdatedAt struct {
+ val []byte
+ updatedAt uint64
+}
+
+type writeCache[K, V any] interface {
+ Get(k K) (V, bool)
+ Add(k K, v V) bool
+ GetOldest() (K, V, bool)
+ Len() int
+}
+
+func TestPostProcess(t *testing.T) {
+ if tapeDir == "" {
+ t.Skip("No tape directory provided")
+ }
+ start, end := startBlock, endBlock
+ if start == 0 {
+ start = 1 // TODO: Verify whether genesis outs were recorded in the first block
+ }
+
+ var cache cacheIntf
+ cacheBytes := readCacheSize * units.MiB
+ if readCacheBackend == "fastcache" {
+ cache = &fastCache{cache: fastcache.New(int(cacheBytes))}
+ } else if readCacheBackend == "theine" {
+ impl, err := theine.NewBuilder[string, []byte](cacheBytes).Build()
+ require.NoError(t, err)
+ cache = &theineCache{Cache: impl}
+ } else if readCacheBackend == "otter" {
+ impl, err := otter.MustBuilder[string, []byte](int(cacheBytes)).
+ CollectStats().
+ Cost(func(key string, value []byte) uint32 {
+ return uint32(len(key) + len(value))
+ }).Build()
+ if err != nil {
+ panic(err)
+ }
+ cache = &otterCache{Cache: impl}
+ } else if readCacheBackend == "none" {
+ cache = &noCache[string, []byte]{}
+ } else {
+ t.Fatalf("Unknown cache backend: %s", readCacheBackend)
+ }
+
+ var (
+ dbs dbs
+ sourceDb ethdb.Database
+ sum totals
+ blockNumber uint64
+ storageRoot common.Hash
+ storage triedb.KVBackend
+ evictedKs [][]byte
+ evictedVs [][]byte
+ lastCommit struct {
+ txs uint64
+ number uint64
+ }
+ commitLock sync.Mutex
+ )
+ if sourceDbDir != "" {
+ sourceDb = openSourceDB(t)
+ defer sourceDb.Close()
+ }
+
+ if storageBackend != "none" {
+ dbs = openDBs(t)
+ defer dbs.Close()
+ CleanupOnInterrupt(func() {
+ commitLock.Lock()
+ dbs.Close()
+ commitLock.Unlock()
+ })
+
+ lastHash, lastRoot, lastHeight := getMetadata(dbs.metadata)
+ t.Logf("Persisted metadata: Last hash: %x, Last root: %x, Last height: %d", lastHash, lastRoot, lastHeight)
+ lastCommit.number = lastHeight
+
+ if usePersistedStartBlock {
+ start = lastHeight + 1
+ }
+ require.Equal(t, lastHeight+1, start, "Last height does not match start block")
+
+ storage = getKVBackend(t, storageBackend, dbs.merkledb)
+ if storageBackend == "legacy" {
+ cacheConfig := getCacheConfig(t, storageBackend, storage)
+ tdbConfig := cacheConfig.TrieDBConfig()
+ tdb := triedb.NewDatabase(dbs.chain, tdbConfig)
+ legacyStore := legacy.New(tdb, lastRoot, lastHeight, true)
+ // install selfdestruct re-use detection if requested
+ if trackDeletedTries {
+ store := prefixdb.New([]byte("trackDeletedTries"), dbs.metadata)
+ legacyStore.TrackDeletedTries(rawdb.NewDatabase(database.WrapDatabase(store)))
+ t.Logf("Enabled trackDeletedTries")
+
+ it := store.NewIterator()
+ for it.Next() {
+ t.Logf("starting with trackDeletedTries: %x", it.Key())
+ }
+ require.NoError(t, it.Error())
+ it.Release()
+ }
+ if writeCacheSize > 0 {
+ // Account roots will differ if we buffer writes. This tracks the actual
+ // roots (different from what is put in the trie in the tape).
+ legacyStore.DisableAccountRootCheck()
+ }
+ storage = legacyStore
+ }
+ require.Equal(t, lastRoot, common.BytesToHash(storage.Root()), "Root mismatch")
+ storageRoot = lastRoot
+ t.Logf("Storage backend initialized: %s", storageBackend)
+ }
+
+ hst := histogram.NewFast()
+ hstWithReset := histogram.NewFast()
+ inf := float64(1_000_000_000)
+ onEvict := func(k string, v withUpdatedAt) {
+ now := time.Now()
+ if len(k) == 32 {
+ sum.writeCacheEvictAccount++
+ } else {
+ sum.writeCacheEvictStorage++
+ }
+ // t.Logf("evicting key: %x @ block %d, updatedAt: %d (%d blocks ago)", short(k), blockNumber, v.updatedAt, blockNumber-v.updatedAt)
+ if storage != nil {
+ evictedKs = append(evictedKs, []byte(k))
+ evictedVs = append(evictedVs, v.val)
+ }
+ sum.writeCacheEvictTime += time.Since(now)
+ }
+
+ var writeCache writeCache[string, withUpdatedAt] = &noCache[string, withUpdatedAt]{
+ onEvict: onEvict,
+ }
+ if writeCacheSize > 0 {
+ var err error
+ writeCache, err = lru.NewWithEvict(int(writeCacheSize), onEvict)
+ require.NoError(t, err)
+ }
+
+ fm := &fileManager{dir: tapeDir, newEach: 10_000}
+
+ var lastReported totals
+ for i := start; i <= end; i++ {
+ r := fm.GetReaderFor(i)
+
+ var err error
+ blockNumber, err = readUint64(r)
+ require.NoError(t, err)
+ require.LessOrEqual(t, blockNumber, i)
+
+ blockHash, err := readHash(r)
+ require.NoError(t, err)
+
+ txs, err := readUint16(r)
+ require.NoError(t, err)
+
+ atomicTxs, err := readUint16(r)
+ require.NoError(t, err)
+
+ tapeResult := &tapeResult{
+ accountReads: make(map[string][]byte),
+ storageReads: make(map[string][]byte),
+ }
+ tapeTxs := processTape(t, r, tapeResult, cache.GetAndSet, &sum, tapeVerbose && blockNumber >= i)
+ require.Equal(t, txs, tapeTxs)
+
+ accountWrites, err := readUint16(r)
+ require.NoError(t, err)
+
+ storageWrites, err := readUint16(r)
+ require.NoError(t, err)
+
+ if blockNumber < i {
+ // we need to just finish reading the block but not process it
+ for j := 0; j < int(accountWrites); j++ {
+ _, _, err := readKV(r, 32)
+ require.NoError(t, err)
+ }
+ for j := 0; j < int(storageWrites); j++ {
+ _, _, err := readKV(r, 64)
+ require.NoError(t, err)
+ }
+ if blockNumber%uint64(logEach) == 0 {
+ t.Logf("Skipping block %d", blockNumber)
+ }
+ i--
+ continue
+ }
+
+ accountUpdates, storageUpdates := 0, 0
+ accountDeletes, storageDeletes := 0, 0
+
+ // 1. Read account writes from the tape as they come first
+ accountWritesBatch := make([]KV, accountWrites)
+ for j := 0; j < int(accountWrites); j++ {
+ k, v, err := readKV(r, 32)
+ require.NoError(t, err)
+ accountWritesBatch[j] = KV{Key: k, Value: v}
+ }
+
+ // 2. Process storage writes
+ for j := 0; j < int(storageWrites); j++ {
+ k, v, err := readKV(r, 64)
+ require.NoError(t, err)
+ if prev, ok := tapeResult.storageReads[string(k)]; ok {
+ if len(prev) > 0 && len(v) == 0 {
+ storageDeletes++
+ } else if len(prev) > 0 || (len(prev) == 0 && len(v) == 0) {
+ storageUpdates++
+ }
+ } else if tapeVerbose {
+ t.Logf("storage write without read: %x -> %x", k, v)
+ }
+ got, found := writeCache.Get(string(k))
+ if found {
+ hst.Update(float64(blockNumber - got.updatedAt))
+ hstWithReset.Update(float64(blockNumber - got.updatedAt))
+ } else {
+ hst.Update(inf)
+ hstWithReset.Update(inf)
+ }
+ writeCache.Add(string(k), withUpdatedAt{val: v, updatedAt: blockNumber})
+ if found {
+ sum.storageWriteHits++
+ }
+
+ if tapeVerbose {
+ t.Logf("storage write: %x -> %x", k, v)
+ }
+ }
+
+ // 3. Process account writes
+ for j := 0; j < int(accountWrites); j++ {
+ k, v := accountWritesBatch[j].Key, accountWritesBatch[j].Value
+ if prev, ok := tapeResult.accountReads[string(k)]; ok {
+ if len(prev) > 0 && len(v) == 0 {
+ accountDeletes++
+ } else if len(prev) > 0 || (len(prev) == 0 && len(v) == 0) {
+ accountUpdates++
+ }
+ } else if tapeVerbose {
+ t.Logf("account write without read: %x -> %x", k, v)
+ }
+ got, found := writeCache.Get(string(k))
+ if found {
+ hst.Update(float64(blockNumber - got.updatedAt))
+ hstWithReset.Update(float64(blockNumber - got.updatedAt))
+ } else {
+ hst.Update(inf)
+ hstWithReset.Update(inf)
+ }
+ writeCache.Add(string(k), withUpdatedAt{val: v, updatedAt: blockNumber})
+ if found {
+ sum.accountWriteHits++
+ }
+
+ if tapeVerbose {
+ t.Logf("account write: %x -> %x", k, v)
+ }
+ }
+
+ sum.blocks++
+ sum.txs += uint64(txs)
+ sum.atomicTxs += uint64(atomicTxs)
+
+ if storage != nil {
+ commitLock.Lock()
+
+ shouldCommitBlocks := commitEachBlocks > 0 && blockNumber-lastCommit.number >= uint64(commitEachBlocks)
+ shouldCommitTxs := commitEachTxs > 0 && sum.txs+sum.atomicTxs-lastCommit.txs >= uint64(commitEachTxs)
+ if len(evictedKs) > 0 && (shouldCommitBlocks || shouldCommitTxs) {
+ if tapeVerbose {
+ for i, k := range evictedKs {
+ t.Logf("storing: %x -> %x", k, evictedVs[i])
+ }
+ }
+
+ var accsDeleted map[string]int
+ evictedKs, evictedVs, accsDeleted = processAccountDeletes(t, evictedKs, evictedVs)
+ for k := range accsDeleted {
+ deleted, err := storage.PrefixDelete([]byte(k))
+ require.NoError(t, err)
+ if deleted > 0 {
+ t.Logf("Deleted %d keys with prefix %x from storage", deleted, k)
+ }
+ }
+
+ now := time.Now()
+ // Verify that the key value lengths are within reason
+ for _, k := range evictedKs {
+ if len(k) != 32 && len(k) != 64 {
+ panic(fmt.Sprintf("Invalid key length: %d", len(k)))
+ }
+ }
+ for _, v := range evictedVs {
+ if len(v) > 256 {
+ panic(fmt.Sprintf("Invalid value length: %d", len(v)))
+ }
+ }
+
+ // Get state commitment from storage backend
+ storageRootBytes, err := storage.Update(evictedKs, evictedVs)
+ require.NoError(t, err)
+ storageRoot = common.BytesToHash(storageRootBytes)
+ updateTime := time.Since(now)
+
+ // Request storage backend to persist the state
+ err = storage.Commit(storageRootBytes)
+ require.NoError(t, err)
+
+ sum.storagePersistTime += time.Since(now) - updateTime
+ sum.storageUpdateTime += updateTime
+ sum.storagePersistCount++
+ sum.storageUpdateCount++
+
+ if writeSnapshot {
+ snapBackend := legacy.NewSnapshot(dbs.chain)
+ for acc := range accsDeleted {
+ deleted, err := snapBackend.PrefixDelete([]byte(acc))
+ require.NoError(t, err)
+ if deleted > 0 {
+ t.Logf("Deleted %d keys with prefix %x from snapshot", deleted, acc)
+ }
+ }
+
+ _, err = snapBackend.Update(evictedKs, evictedVs)
+ require.NoError(t, err)
+ }
+
+ // Reset evicted batch
+ evictedKs, evictedVs = evictedKs[:0], evictedVs[:0]
+
+ if sourceDb != nil {
+ // update block and metadata from source db
+ hash := rawdb.ReadCanonicalHash(sourceDb, blockNumber)
+ require.Equal(t, blockHash, hash, "Block hash mismatch")
+
+ block := rawdb.ReadBlock(sourceDb, hash, blockNumber)
+ require.NotNil(t, block, "Block not found in source db")
+
+ b := dbs.chain.NewBatch()
+ rawdb.WriteCanonicalHash(b, hash, blockNumber)
+ rawdb.WriteBlock(b, block)
+
+ // update metadata
+ rawdb.WriteAcceptorTip(b, blockHash)
+ rawdb.WriteHeadBlockHash(b, blockHash)
+ rawdb.WriteHeadHeaderHash(b, blockHash)
+ rawdb.WriteSnapshotBlockHash(b, blockHash)
+ rawdb.WriteSnapshotRoot(b, block.Root()) // TODO: unsure if this should be block.Root() or storageRoot
+
+ // handle genesis
+ if lastCommit.number == 0 {
+ genesis := getMainnetGenesis(t)
+ genesisHash := genesis.ToBlock().Hash()
+ rawdb.WriteCanonicalHash(b, genesisHash, 0)
+ rawdb.WriteBlock(b, genesis.ToBlock())
+ snapshot.ResetSnapshotGeneration(b)
+ t.Logf("Updating genesis hash: %s", genesisHash.TerminalString())
+ }
+
+ require.NoError(t, b.Write())
+
+ if storageBackend == "legacy" && writeCacheSize == 0 {
+ require.Equal(t, storageRoot, block.Root(), "Root mismatch")
+ }
+ }
+
+ updateMetadata(t, dbs.metadata, blockHash, storageRoot, blockNumber)
+ lastCommit.number = blockNumber
+ lastCommit.txs = sum.txs + sum.atomicTxs
+ }
+ commitLock.Unlock()
+ }
+
+ sum.accountReads += uint64(len(tapeResult.accountReads))
+ sum.storageReads += uint64(len(tapeResult.storageReads))
+ sum.accountWrites += uint64(accountWrites)
+ sum.storageWrites += uint64(storageWrites)
+ sum.accountUpdates += uint64(accountUpdates)
+ sum.storageUpdates += uint64(storageUpdates)
+ sum.accountDeletes += uint64(accountDeletes)
+ sum.storageDeletes += uint64(storageDeletes)
+ sum.accounts += int64(int(accountWrites) - accountUpdates - 2*accountDeletes)
+ sum.storage += int64(int(storageWrites) - storageUpdates - 2*storageDeletes)
+
+ if blockNumber%uint64(logEach) == 0 {
+ storageRootStr := ""
+ if storageRoot != (common.Hash{}) {
+ storageRootStr = "/" + storageRoot.TerminalString()
+ }
+ t.Logf("Block[%s%s]: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)",
+ blockHash.TerminalString(), storageRootStr, blockNumber,
+ sum.txs-lastReported.txs, sum.atomicTxs-lastReported.atomicTxs,
+ sum.accountReads-lastReported.accountReads, sum.storageReads-lastReported.storageReads,
+ sum.accountReadHits-lastReported.accountReadHits, sum.storageReadHits-lastReported.storageReadHits,
+ sum.accountWrites-lastReported.accountWrites, sum.storageWrites-lastReported.storageWrites,
+ sum.accountWriteHits-lastReported.accountWriteHits, sum.storageWriteHits-lastReported.storageWriteHits,
+ sum.accountUpdates-lastReported.accountUpdates, sum.storageUpdates-lastReported.storageUpdates,
+ sum.accountDeletes-lastReported.accountDeletes, sum.storageDeletes-lastReported.storageDeletes,
+ sum.accounts-lastReported.accounts, sum.storage-lastReported.storage,
+ )
+ if readCacheBackend != "none" {
+ hits := sum.accountReadHits - lastReported.accountReadHits + sum.storageReadHits - lastReported.storageReadHits
+ total := sum.accountReads - lastReported.accountReads + sum.storageReads - lastReported.storageReads
+ t.Logf(
+ "Cache stats: %d hits, %d misses, %.2f hit rate, %d entries (= %.4f of state), %d MiB",
+ hits, total-hits, float64(hits)/float64(total),
+ cache.Len(), float64(cache.Len())/float64(sum.accounts+sum.storage),
+ cache.EstimatedSize()/(units.MiB),
+ )
+ }
+ writeHits := sum.accountWriteHits + sum.storageWriteHits - lastReported.accountWriteHits - lastReported.storageWriteHits
+ writeTotal := sum.accountWrites + sum.storageWrites - lastReported.accountWrites - lastReported.storageWrites
+ _, oldest, found := writeCache.GetOldest()
+ if !found {
+ oldest.updatedAt = blockNumber // so displays as 0
+ }
+ txs := sum.txs + sum.atomicTxs - lastReported.txs - lastReported.atomicTxs
+ storageUpdateCount := sum.storageUpdateCount - lastReported.storageUpdateCount
+ storageUpdateTime := sum.storageUpdateTime - lastReported.storageUpdateTime
+ storageUpdateAvg := int64(0)
+ if storageUpdateCount > 0 {
+ storageUpdateAvg = storageUpdateTime.Milliseconds() / int64(storageUpdateCount)
+ }
+ storagePersistCount := sum.storagePersistCount - lastReported.storagePersistCount
+ storagePersistTime := sum.storagePersistTime - lastReported.storagePersistTime
+ storagePersistAvg := int64(0)
+ if storagePersistCount > 0 {
+ storagePersistAvg = storagePersistTime.Milliseconds() / int64(storagePersistCount)
+ }
+ t.Logf(
+ "Write cache stats: %d hits, %d misses, %.2f hit rate, %d entries (= %.4f of state), evicted/tx: %.1f acc, %.1f storage (total: %dk) (time: %d, total: %d ms) (updates: %d, time: %d, avg: %d, total: %d ms) (commits: %d, time: %d, avg: %d, total %d ms) (oldest age: %d)",
+ writeHits, writeTotal-writeHits, float64(writeHits)/float64(writeTotal),
+ writeCache.Len(), float64(writeCache.Len())/float64(sum.accounts+sum.storage),
+ float64(sum.writeCacheEvictAccount-lastReported.writeCacheEvictAccount)/float64(txs),
+ float64(sum.writeCacheEvictStorage-lastReported.writeCacheEvictStorage)/float64(txs),
+ (sum.writeCacheEvictAccount+sum.writeCacheEvictStorage)/1000,
+ (sum.writeCacheEvictTime - lastReported.writeCacheEvictTime).Milliseconds(), sum.writeCacheEvictTime.Milliseconds(),
+ storageUpdateCount, storageUpdateTime.Milliseconds(), storageUpdateAvg, sum.storageUpdateTime.Milliseconds(),
+ storagePersistCount, storagePersistTime.Milliseconds(), storagePersistAvg, sum.storagePersistTime.Milliseconds(),
+ blockNumber-oldest.updatedAt,
+ )
+ quants := []float64{0.05, 0.1, 0.25, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95}
+ var outString string
+ for _, q := range quants {
+ val := hst.Quantile(q)
+ if val == inf {
+ outString = fmt.Sprintf("%s [%.2f inf]", outString, q)
+ continue
+ }
+ outString = fmt.Sprintf("%s [%.2f %d]", outString, q, int(val))
+ }
+ t.Logf("Write cache quantiles: %s", outString)
+ outString = ""
+ for _, q := range quants {
+ val := hstWithReset.Quantile(q)
+ if val == inf {
+ outString = fmt.Sprintf("%s [%.2f inf]", outString, q)
+ continue
+ }
+ outString = fmt.Sprintf("%s [%.2f %d]", outString, q, int(val))
+ }
+ t.Logf("Reset cache quantiles: %s", outString)
+ hstWithReset.Reset()
+ lastReported = sum
+ }
+ }
+}
+
+func processAccountDeletes(t *testing.T, ks, vs [][]byte) ([][]byte, [][]byte, map[string]int) {
+ accsDeleted := make(map[string]int)
+ for i, k := range ks {
+ if len(k) == 32 && len(vs[i]) == 0 {
+ accsDeleted[string(k)] = i // all updates with prefix k that occur before i should be omitted from the return value
+ }
+ }
+ outIdx := 0
+ for i, k := range ks {
+ prefix := k[:32]
+ if idx, found := accsDeleted[string(prefix)]; found && i < idx {
+ continue
+ }
+ ks[outIdx] = k
+ vs[outIdx] = vs[i]
+ outIdx++
+ }
+ //if outIdx < len(ks) {
+ // t.Logf("Removed %d updates from pending batch", len(ks)-outIdx)
+ //}
+ return ks[:outIdx], vs[:outIdx], accsDeleted
+}
+
+type tapeResult struct {
+ accountReads, storageReads map[string][]byte
+}
+
+// cache should return true if the value was found in the cache
+func processTape(t *testing.T, r io.Reader, tapeResult *tapeResult, cache func(k string, v []byte) bool, sum *totals, tapeVerbose bool) uint16 {
+ length, err := readUint32(r)
+ require.NoError(t, err)
+
+ pos := 0
+ txCount := uint16(0)
+ for pos < int(length) {
+ typ, err := readByte(r)
+ require.NoError(t, err)
+ pos++
+
+ switch typ {
+ case typeAccount:
+ key, val, err := readKV(r, 32)
+ require.NoError(t, err)
+ pos += 32 + 1 + len(val)
+ k := string(key)
+ if _, ok := tapeResult.accountReads[k]; !ok {
+ tapeResult.accountReads[k] = val
+ if cache(k, val) {
+ sum.accountReadHits++
+ }
+ }
+ if tapeVerbose {
+ t.Logf("account read: %x -> %x", key, val)
+ }
+ case typeStorage:
+ key, val, err := readKV(r, 64)
+ require.NoError(t, err)
+ pos += 64 + 1 + len(val)
+ k := string(key)
+ if _, ok := tapeResult.storageReads[k]; !ok {
+ tapeResult.storageReads[k] = val
+ if cache(k, val) {
+ sum.storageReadHits++
+ }
+ }
+ if tapeVerbose {
+ t.Logf("storage read: %x -> %x", key, val)
+ }
+ case typeEndTx:
+ txCount++
+ if tapeVerbose {
+ t.Logf("end tx")
+ }
+ }
+ }
+ return txCount
+}
+
+func TestXxx(t *testing.T) {
+ h := sha3.NewLegacyKeccak256()
+ h.Write(common.Hex2Bytes("ac7bbff258e5ff67efcbac43331033010676e22cbf7a9515a31e4e2b661b9b96"))
+ fmt.Printf("%x\n", h.Sum(nil))
+}
diff --git a/plugin/evm/reprocess_backend_test.go b/plugin/evm/reprocess_backend_test.go
new file mode 100644
index 0000000000..c47493d4d8
--- /dev/null
+++ b/plugin/evm/reprocess_backend_test.go
@@ -0,0 +1,216 @@
+package evm
+
+import (
+ "context"
+ "encoding/binary"
+ "encoding/json"
+ "math/big"
+ "net"
+ "testing"
+
+ "github.com/ava-labs/avalanchego/database"
+ "github.com/ava-labs/avalanchego/ids"
+ "github.com/ava-labs/avalanchego/snow"
+ "github.com/ava-labs/avalanchego/trace"
+ "github.com/ava-labs/avalanchego/upgrade"
+ "github.com/ava-labs/avalanchego/utils/constants"
+ "github.com/ava-labs/avalanchego/utils/units"
+ xmerkledb "github.com/ava-labs/avalanchego/x/merkledb"
+ "github.com/ava-labs/coreth/consensus"
+ "github.com/ava-labs/coreth/consensus/dummy"
+ "github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
+ warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp"
+ "github.com/ava-labs/coreth/shim/fw"
+ "github.com/ava-labs/coreth/shim/merkledb"
+ "github.com/ava-labs/coreth/shim/nomt"
+ "github.com/ava-labs/coreth/triedb"
+ firewood "github.com/ava-labs/firewood/ffi/v2"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/require"
+)
+
+type reprocessBackend struct {
+ Genesis *core.Genesis
+ Engine consensus.Engine
+ GetBlock func(uint64) *types.Block
+ CacheConfig core.CacheConfig
+ VerifyRoot bool
+ Disk ethdb.Database
+ Metadata database.Database
+ Name string
+}
+
+func getMerkleDB(t *testing.T, mdbKVStore database.Database) xmerkledb.MerkleDB {
+ ctx := context.Background()
+ mdb, err := xmerkledb.New(ctx, mdbKVStore, xmerkledb.Config{
+ BranchFactor: xmerkledb.BranchFactor(merkleDBBranchFactor),
+ Hasher: xmerkledb.DefaultHasher,
+ HistoryLength: 1,
+ RootGenConcurrency: 0,
+ ValueNodeCacheSize: uint(valueNodeCacheSizeMB) * units.MiB,
+ IntermediateNodeCacheSize: uint(intermediateNodeCacheSizeMB) * units.MiB,
+ IntermediateWriteBufferSize: uint(intermediateWriteBufferSizeKB) * units.KiB,
+ IntermediateWriteBatchSize: uint(intermediateWriteBatchSizeKB) * units.KiB,
+ Reg: prometheus.NewRegistry(),
+ TraceLevel: xmerkledb.InfoTrace,
+ Tracer: trace.Noop,
+ })
+ require.NoError(t, err)
+
+ return mdb
+}
+
+func getCacheConfig(t *testing.T, name string, backend triedb.KVBackend) core.CacheConfig {
+ cacheConfig := *core.DefaultCacheConfig
+ cacheConfig.StateScheme = legacyScheme
+ cacheConfig.KeyValueDB = &triedb.KeyValueConfig{KVBackend: backend}
+ cacheConfig.TriePrefetcherParallelism = prefetchers
+ cacheConfig.SnapshotLimit = 0
+ if useSnapshot {
+ cacheConfig.SnapshotLimit = 256
+ }
+ if trieCleanCacheMBs > 0 {
+ cacheConfig.TrieCleanLimit = trieCleanCacheMBs
+ }
+ cacheConfig.Pruning = pruning
+ return cacheConfig
+}
+
+func getBackend(t *testing.T, name string, blocksCount int, dbs dbs) *reprocessBackend {
+ chainConfig := params.TestChainConfig
+ key1, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 := crypto.PubkeyToAddress(key1.PublicKey)
+ g := &core.Genesis{
+ Config: chainConfig,
+ Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000000000000000)}},
+ }
+ testVM := &VM{
+ chainConfig: chainConfig,
+ ctx: &snow.Context{AVAXAssetID: ids.ID{1}},
+ }
+ someAddr := common.Address{1}
+
+ endOfBlockStateTransition := func(block *types.Header, statedb *state.StateDB) {
+ i := block.Number.Uint64()
+ statedb.SetNonce(someAddr, i)
+ iBytes := binary.BigEndian.AppendUint64(nil, i)
+ asHash := common.BytesToHash(iBytes)
+ statedb.SetState(someAddr, asHash, asHash)
+ }
+
+ cbs := dummy.ConsensusCallbacks{
+ OnExtraStateChange: func(block *types.Block, statedb *state.StateDB) (*big.Int, *big.Int, error) {
+ endOfBlockStateTransition(block.Header(), statedb)
+ return testVM.onExtraStateChange(block, statedb)
+ },
+ OnFinalizeAndAssemble: func(header *types.Header, state *state.StateDB, txs []*types.Transaction) (extraData []byte, blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) {
+ endOfBlockStateTransition(header, state)
+ return nil, nil, nil, nil
+ },
+ }
+
+ engine := dummy.NewFakerWithMode(cbs, dummy.Mode{
+ ModeSkipHeader: true,
+ })
+
+ signer := types.LatestSigner(chainConfig)
+ _, blocks, _, err := core.GenerateChainWithGenesis(g, engine, blocksCount, 2, func(i int, b *core.BlockGen) {
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: uint64(i),
+ GasPrice: b.BaseFee(),
+ Gas: 21000,
+ To: &addr1,
+ }), signer, key1)
+ b.AddTx(tx)
+ })
+ require.NoError(t, err)
+ require.Len(t, blocks, blocksCount)
+
+ kvBackend := getKVBackend(t, name, dbs.merkledb)
+ return &reprocessBackend{
+ Genesis: g,
+ Engine: engine,
+ GetBlock: func(i uint64) *types.Block { return blocks[i-1] },
+ CacheConfig: getCacheConfig(t, name, kvBackend),
+ Disk: dbs.chain,
+ Metadata: dbs.metadata,
+ Name: name,
+ VerifyRoot: name == "legacy",
+ }
+}
+
+func getMainnetGenesis(t *testing.T) core.Genesis {
+ var g core.Genesis
+ require.NoError(t, json.Unmarshal([]byte(cChainGenesisMainnet), &g))
+ // Update the chain config with mainnet upgrades
+ g.Config = params.GetChainConfig(upgrade.Mainnet, g.Config.ChainID)
+ // If the Durango is activated, activate the Warp Precompile at the same time
+ if g.Config.DurangoBlockTimestamp != nil {
+ g.Config.PrecompileUpgrades = append(g.Config.PrecompileUpgrades, params.PrecompileUpgrade{
+ Config: warpcontract.NewDefaultConfig(g.Config.DurangoBlockTimestamp),
+ })
+ }
+ g.Config.SnowCtx = &snow.Context{
+ AVAXAssetID: mainnetAvaxAssetID,
+ ChainID: mainnetCChainID,
+ NetworkID: constants.MainnetID,
+ }
+
+ t.Logf("Mainnet chain config: %v", g.Config)
+ return g
+}
+
+func getMainnetBackend(t *testing.T, name string, source ethdb.Database, dbs dbs) *reprocessBackend {
+ g := getMainnetGenesis(t)
+ testVM := &VM{
+ chainConfig: g.Config,
+ ctx: g.Config.SnowCtx,
+ }
+ cbs := dummy.ConsensusCallbacks{OnExtraStateChange: testVM.onExtraStateChange}
+ engine := dummy.NewFakerWithMode(cbs, dummy.Mode{ModeSkipHeader: true})
+
+ kvBackend := getKVBackend(t, name, dbs.merkledb)
+ return &reprocessBackend{
+ Genesis: &g,
+ Engine: engine,
+ GetBlock: func(i uint64) *types.Block {
+ hash := rawdb.ReadCanonicalHash(source, i)
+ block := rawdb.ReadBlock(source, hash, i)
+ require.NotNil(t, block)
+ return block
+ },
+ CacheConfig: getCacheConfig(t, name, kvBackend),
+ Disk: dbs.chain,
+ Metadata: dbs.metadata,
+ Name: name,
+ VerifyRoot: name == "legacy",
+ }
+}
+
+func getKVBackend(t *testing.T, name string, merkleKVStore database.Database) triedb.KVBackend {
+ if name == "merkledb" {
+ return merkledb.NewMerkleDB(getMerkleDB(t, merkleKVStore))
+ }
+ if name == "nomt" {
+ conn, err := net.Dial("unix", socketPath)
+ require.NoError(t, err)
+ return nomt.New(conn)
+ }
+ if name == "firewood" {
+ var fwdb firewood.Firewood
+ if fileExists(firewoodDBFile) {
+ fwdb = firewood.OpenDatabase(firewoodDBFile)
+ } else {
+ fwdb = firewood.CreateDatabase(firewoodDBFile)
+ }
+ return &fw.Firewood{Firewood: fwdb}
+ }
+ return nil
+}
diff --git a/plugin/evm/reprocess_recording_test.go b/plugin/evm/reprocess_recording_test.go
new file mode 100644
index 0000000000..427f63a949
--- /dev/null
+++ b/plugin/evm/reprocess_recording_test.go
@@ -0,0 +1,323 @@
+package evm
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// Each 10000 blocks, we will make a new file.
+// Each file contains:
+// - Block number (8 bytes)
+// - Block hash (32 bytes)
+// - Transactions (uint16)
+// - Atomic transactions (uint16)
+// - Length of read tape (uint32)
+// - Read tape (variable)
+// - byte type (1 byte)
+// - type = account
+// - Account address hash (32 bytes)
+// - Value len (byte)
+// - Value (variable)
+// - type = storage
+// - Account address hash (32 bytes)
+// - Key hash (32 bytes)
+// - Value len (byte)
+// - Value (variable)
+// - type = end tx
+// - Accounts Written (uint16)
+// - Storages Written (uint16)
+// For each account written:
+// - Account address hash (32 bytes)
+// - Value len (byte)
+// - Value (variable)
+// For each storage written:
+// - Account address hash (32 bytes)
+// - Key hash (32 bytes)
+// - Value len (byte)
+
+type KV struct {
+ Key []byte
+ Value []byte
+}
+type blockRecorder struct {
+ accountReads int
+ storageReads int
+ txEnds int
+ readTape []byte
+
+ accountWrites []KV
+ storageWrites []KV
+
+ fileManager *fileManager
+}
+
+func (b *blockRecorder) MustUpdate(key, value []byte) {
+ switch len(key) {
+ case 32:
+ b.accountWrites = append(b.accountWrites, KV{Key: key, Value: value})
+ case 64:
+ b.storageWrites = append(b.storageWrites, KV{Key: key, Value: value})
+ default:
+ panic("unexpected key length")
+ }
+}
+
+const typeAccount = 0
+const typeStorage = 1
+const typeEndTx = 2
+
+func (b *blockRecorder) RecordAccountRead(key common.Hash, value []byte) error {
+ b.accountReads++
+ b.readTape = append(b.readTape, typeAccount)
+ b.readTape = append(b.readTape, key[:]...)
+ b.readTape = append(b.readTape, byte(len(value)))
+ b.readTape = append(b.readTape, value...)
+ return nil
+}
+
+func (b *blockRecorder) RecordStorageRead(account common.Hash, key common.Hash, value []byte) error {
+ b.storageReads++
+ b.readTape = append(b.readTape, typeStorage)
+ b.readTape = append(b.readTape, account[:]...)
+ b.readTape = append(b.readTape, key[:]...)
+ b.readTape = append(b.readTape, byte(len(value)))
+ b.readTape = append(b.readTape, value...)
+ return nil
+}
+
+func (b *blockRecorder) RecordTransactionEnd() error {
+ b.txEnds++
+ b.readTape = append(b.readTape, typeEndTx)
+ return nil
+}
+
+func (b *blockRecorder) WriteToDisk(block *types.Block, atomicTxs uint16) {
+ if b.fileManager == nil {
+ return
+ }
+ w := b.fileManager.GetWriterFor(block.NumberU64())
+ if b.Write(block, atomicTxs, w) != nil {
+ panic("failed to write")
+ }
+}
+
+func (b *blockRecorder) Close() {
+ if b.fileManager == nil {
+ return
+ }
+ b.fileManager.Close()
+}
+
+func (b *blockRecorder) Summary(block *types.Block, atomicTxs uint16) {
+ fmt.Printf("Block %d: %s (%d txs + %d atomic)\tReads (acc, storage, tape KBs): %d, %d, %d\t Writes: %d, %d\n",
+ block.NumberU64(),
+ block.Hash().TerminalString(),
+ len(block.Transactions()),
+ atomicTxs,
+ b.accountReads,
+ b.storageReads,
+ len(b.readTape)/1024,
+ len(b.accountWrites),
+ len(b.storageWrites),
+ )
+
+ if !tapeVerbose {
+ return
+ }
+ fmt.Printf("Read Tape: %x\n", b.readTape)
+
+ fmt.Printf("Account Writes: %d\n", len(b.accountWrites))
+ for _, kv := range b.accountWrites {
+ fmt.Printf(" %x: %x\n", kv.Key, kv.Value)
+ }
+
+ fmt.Printf("Storage Writes: %d\n", len(b.storageWrites))
+ for _, kv := range b.storageWrites {
+ fmt.Printf(" %x: %x\n", kv.Key, kv.Value)
+ }
+}
+
+func writeByte(w io.Writer, b byte) error {
+ _, err := w.Write([]byte{b})
+ return err
+}
+
+func writeUint16(w io.Writer, i uint16) error {
+ _, err := w.Write(binary.BigEndian.AppendUint16(nil, i))
+ return err
+}
+
+func writeUint32(w io.Writer, i uint32) error {
+ _, err := w.Write(binary.BigEndian.AppendUint32(nil, i))
+ return err
+}
+
+func writeUint64(w io.Writer, i uint64) error {
+ _, err := w.Write(binary.BigEndian.AppendUint64(nil, i))
+ return err
+}
+
+func readByte(r io.Reader) (byte, error) {
+ buf := make([]byte, 1)
+ _, err := io.ReadFull(r, buf)
+ return buf[0], err
+}
+
+func readUint16(r io.Reader) (uint16, error) {
+ buf := make([]byte, 2)
+ _, err := io.ReadFull(r, buf)
+ return binary.BigEndian.Uint16(buf), err
+}
+
+func readUint32(r io.Reader) (uint32, error) {
+ buf := make([]byte, 4)
+ _, err := io.ReadFull(r, buf)
+ return binary.BigEndian.Uint32(buf), err
+}
+
+func readUint64(r io.Reader) (uint64, error) {
+ buf := make([]byte, 8)
+ _, err := io.ReadFull(r, buf)
+ return binary.BigEndian.Uint64(buf), err
+}
+
+func readKV(r io.Reader, keyLen int) ([]byte, []byte, error) {
+ key := make([]byte, keyLen)
+ _, err := io.ReadFull(r, key)
+ if err != nil {
+ return nil, nil, err
+ }
+ valLen, err := readByte(r)
+ if err != nil {
+ return nil, nil, err
+ }
+ val := make([]byte, valLen)
+ _, err = io.ReadFull(r, val)
+ return key, val, err
+}
+
+func readHash(r io.Reader) (common.Hash, error) {
+ var h common.Hash
+ _, err := io.ReadFull(r, h[:])
+ return h, err
+}
+
+func (b *blockRecorder) Write(block *types.Block, atomicTxs uint16, w io.Writer) error {
+ if len(block.Transactions()) != b.txEnds {
+ panic(fmt.Sprintf("mismatch block txs: %d, ends recorded: %d", len(block.Transactions()), b.txEnds))
+ }
+ if err := writeUint64(w, block.NumberU64()); err != nil {
+ return err
+ }
+ if _, err := w.Write(block.Hash().Bytes()); err != nil {
+ return err
+ }
+ if err := writeUint16(w, uint16(len(block.Transactions()))); err != nil {
+ return err
+ }
+ if err := writeUint16(w, atomicTxs); err != nil {
+ return err
+ }
+ if err := writeUint32(w, uint32(len(b.readTape))); err != nil {
+ return err
+ }
+ if _, err := w.Write(b.readTape); err != nil {
+ return err
+ }
+ if err := writeUint16(w, uint16(len(b.accountWrites))); err != nil {
+ return err
+ }
+ if err := writeUint16(w, uint16(len(b.storageWrites))); err != nil {
+ return err
+ }
+
+ for _, kv := range b.accountWrites {
+ if _, err := w.Write(kv.Key); err != nil {
+ return err
+ }
+ if err := writeByte(w, byte(len(kv.Value))); err != nil {
+ return err
+ }
+ if _, err := w.Write(kv.Value); err != nil {
+ return err
+ }
+ }
+
+ for _, kv := range b.storageWrites {
+ if _, err := w.Write(kv.Key); err != nil {
+ return err
+ }
+ if err := writeByte(w, byte(len(kv.Value))); err != nil {
+ return err
+ }
+ if _, err := w.Write(kv.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *blockRecorder) Reset() {
+ b.readTape = nil
+ b.accountWrites = nil
+ b.storageWrites = nil
+ b.storageReads = 0
+ b.accountReads = 0
+ b.txEnds = 0
+}
+
+type fileManager struct {
+ dir string
+ newEach uint64
+ lastFile uint64
+ f *os.File
+ reader io.Reader
+}
+
+func (f *fileManager) GetWriterFor(blockNumber uint64) io.Writer {
+ group := blockNumber - blockNumber%f.newEach
+ if group == f.lastFile && f.f != nil {
+ return f.f
+ }
+ if f.f != nil {
+ f.f.Close()
+ }
+ file, err := os.OpenFile(fmt.Sprintf("%s/%08d", f.dir, group), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ panic(err)
+ }
+ f.f = file
+ f.lastFile = group
+ return f.f
+}
+
+func (f *fileManager) GetReaderFor(blockNumber uint64) io.Reader {
+ group := blockNumber - blockNumber%f.newEach
+ if group == f.lastFile && f.f != nil {
+ return f.reader
+ }
+ if f.f != nil {
+ f.f.Close()
+ }
+ file, err := os.Open(fmt.Sprintf("%s/%08d", f.dir, group))
+ if err != nil {
+ panic(err)
+ }
+ f.f = file
+ f.lastFile = group
+ f.reader = bufio.NewReaderSize(f.f, 1024*1024)
+ return f.reader
+}
+
+func (f *fileManager) Close() error {
+ if f.f != nil {
+ return f.f.Close()
+ }
+ return nil
+}
diff --git a/plugin/evm/reprocess_test.go b/plugin/evm/reprocess_test.go
new file mode 100644
index 0000000000..dcde43cfc1
--- /dev/null
+++ b/plugin/evm/reprocess_test.go
@@ -0,0 +1,668 @@
+package evm
+
+import (
+ "bytes"
+ "encoding/hex"
+ "flag"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/ava-labs/avalanchego/database"
+ "github.com/ava-labs/avalanchego/database/leveldb"
+ "github.com/ava-labs/avalanchego/database/memdb"
+ "github.com/ava-labs/avalanchego/database/prefixdb"
+ "github.com/ava-labs/avalanchego/ids"
+ "github.com/ava-labs/avalanchego/utils/logging"
+ "github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/vm"
+ "github.com/ava-labs/coreth/plugin/evm/atomic"
+ evmdatabase "github.com/ava-labs/coreth/plugin/evm/database"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/require"
+ "github.com/valyala/histogram"
+ "golang.org/x/crypto/sha3"
+)
+
+var (
+ cChainGenesisFuji = "{\"config\":{\"chainId\":43113,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}"
+ cChainGenesisMainnet = "{\"config\":{\"chainId\":43114,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}"
+)
+
+var (
+ sourceDbDir = "sourceDb"
+ sourcePrefix = ""
+ dbDir = ""
+ dbPrefix = ""
+ startBlock = uint64(0)
+ endBlock = uint64(200)
+ prefetchers = 4
+ useSnapshot = true
+ writeSnapshot = false
+ pruning = true
+ skipUpgradeCheck = false
+ usePersistedStartBlock = false
+ tapeDir = ""
+ tapeVerbose = false
+ legacyScheme = rawdb.HashScheme
+ trieCleanCacheMBs = 0
+ logEach = 1
+ readCacheSize = int64(256)
+ readCacheBackend = "none"
+ writeCacheSize = uint64(1024)
+ storageBackend = "none"
+ commitEachBlocks = 1
+ commitEachTxs = 0
+ forceStartWithMismatch = false
+ trackDeletedTries = false
+
+ // merkledb options
+ merkleDBBranchFactor = 16
+ valueNodeCacheSizeMB = 1
+ intermediateNodeCacheSizeMB = 1
+ intermediateWriteBufferSizeKB = 1024
+ intermediateWriteBatchSizeKB = 256
+
+ // firewood options
+ firewoodDBFile = "firewood_db"
+
+ // ipc options
+ socketPath = "/tmp/rust_socket"
+)
+
+func TestMain(m *testing.M) {
+ flag.StringVar(&sourceDbDir, "sourceDbDir", sourceDbDir, "directory of source database")
+ flag.StringVar(&sourcePrefix, "sourcePrefix", sourcePrefix, "prefix of source database")
+ flag.StringVar(&dbDir, "dbDir", dbDir, "directory to store database (uses memory if empty)")
+ flag.StringVar(&dbPrefix, "dbPrefix", dbPrefix, "prefix of database")
+ flag.Uint64Var(&startBlock, "startBlock", startBlock, "start block number")
+ flag.Uint64Var(&endBlock, "endBlock", endBlock, "end block number")
+ flag.IntVar(&prefetchers, "prefetchers", prefetchers, "number of prefetchers")
+ flag.BoolVar(&useSnapshot, "useSnapshot", useSnapshot, "use snapshot")
+ flag.BoolVar(&writeSnapshot, "writeSnapshot", writeSnapshot, "write snapshot")
+ flag.BoolVar(&pruning, "pruning", pruning, "pruning")
+ flag.BoolVar(&skipUpgradeCheck, "skipUpgradeCheck", skipUpgradeCheck, "skip upgrade check")
+ flag.BoolVar(&usePersistedStartBlock, "usePersistedStartBlock", usePersistedStartBlock, "use persisted start block")
+ flag.StringVar(&tapeDir, "tapeDir", tapeDir, "directory to store tape")
+ flag.BoolVar(&tapeVerbose, "tapeVerbose", tapeVerbose, "verbose tape")
+ flag.StringVar(&legacyScheme, "legacyScheme", legacyScheme, "legacy scheme (hash or path)")
+ flag.IntVar(&trieCleanCacheMBs, "trieCleanCacheMBs", trieCleanCacheMBs, "clean cache size in MB")
+ flag.IntVar(&logEach, "logEach", logEach, "log one of each N blocks")
+ flag.Int64Var(&readCacheSize, "readCacheSize", readCacheSize, "read cache size in MB")
+ flag.StringVar(&readCacheBackend, "readCacheBackend", readCacheBackend, "read cache backend (theine, fastcache, otter, none)")
+ flag.Uint64Var(&writeCacheSize, "writeCacheSize", writeCacheSize, "write cache size in items")
+ flag.StringVar(&socketPath, "socketPath", socketPath, "socket path")
+ flag.StringVar(&storageBackend, "storageBackend", storageBackend, "storage backend (none, legacy, merkledb, nomt, firewood)")
+ flag.IntVar(&commitEachBlocks, "commitEachBlocks", commitEachBlocks, "commit each N blocks")
+ flag.IntVar(&commitEachTxs, "commitEachTxs", commitEachTxs, "commit each N transactions")
+ flag.BoolVar(&forceStartWithMismatch, "forceStartWithMismatch", forceStartWithMismatch, "force start with mismatch")
+ flag.BoolVar(&trackDeletedTries, "trackDeletedTries", trackDeletedTries, "track deleted tries (detect re-use of SELFDESTRUCTed accounts)")
+ flag.StringVar(&firewoodDBFile, "firewoodDBFile", firewoodDBFile, "firewood DB file")
+
+ // merkledb options
+ flag.IntVar(&merkleDBBranchFactor, "merkleDBBranchFactor", merkleDBBranchFactor, "merkleDB branch factor")
+ flag.IntVar(&valueNodeCacheSizeMB, "valueNodeCacheSizeMB", valueNodeCacheSizeMB, "value node cache size in MB")
+ flag.IntVar(&intermediateNodeCacheSizeMB, "intermediateNodeCacheSizeMB", intermediateNodeCacheSizeMB, "intermediate node cache size in MB")
+ flag.IntVar(&intermediateWriteBufferSizeKB, "intermediateWriteBufferSizeKB", intermediateWriteBufferSizeKB, "intermediate write buffer size in KB")
+ flag.IntVar(&intermediateWriteBatchSizeKB, "intermediateWriteBatchSizeKB", intermediateWriteBatchSizeKB, "intermediate write batch size in KB")
+
+ flag.Parse()
+ m.Run()
+}
+
+type prefixReader struct {
+ ethdb.Database
+ prefix []byte
+}
+
+func (r *prefixReader) Get(key []byte) ([]byte, error) {
+ return r.Database.Get(append(r.prefix, key...))
+}
+
+func (r *prefixReader) Has(key []byte) (bool, error) {
+ return r.Database.Has(append(r.prefix, key...))
+}
+
+func (r *prefixReader) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
+ pfx := make([]byte, len(r.prefix)+len(prefix))
+ copy(pfx, r.prefix)
+ copy(pfx[len(r.prefix):], prefix)
+ return prefixIt{r.Database.NewIterator(pfx, start), r.prefix}
+}
+
+type prefixIt struct {
+ ethdb.Iterator
+ prefix []byte
+}
+
+func (it prefixIt) Key() []byte {
+ return it.Iterator.Key()[len(it.prefix):]
+}
+
+const (
+ cacheSize = 128
+ handles = 1024
+)
+
+func openSourceDB(t *testing.T) ethdb.Database {
+ sourceDb, err := rawdb.NewLevelDBDatabase(sourceDbDir, cacheSize, handles, "", true)
+ if err != nil {
+ t.Skipf("Failed to open source database: %s", err)
+ }
+ prefix := []byte(sourcePrefix)
+ if bytes.HasPrefix(prefix, []byte("0x")) {
+ prefix = prefix[2:]
+ var err error
+ prefix, err = hex.DecodeString(string(prefix))
+ if err != nil {
+ t.Fatalf("invalid hex prefix: %s", prefix)
+ }
+ }
+ return &prefixReader{Database: sourceDb, prefix: prefix}
+}
+
+func TestExportBlocks(t *testing.T) {
+ sourceDb := openSourceDB(t)
+ defer sourceDb.Close()
+
+ if startBlock == 0 {
+ startBlock = 1
+ t.Logf("Start block is 0, setting to 1")
+ }
+
+ db, err := rawdb.NewLevelDBDatabase(dbDir, cacheSize, handles, "", false)
+ require.NoError(t, err)
+ defer db.Close()
+
+ logEach := 100_000
+ for i := startBlock; i <= endBlock; i++ {
+ hash := rawdb.ReadCanonicalHash(sourceDb, i)
+ block := rawdb.ReadBlock(sourceDb, hash, i)
+ if block == nil {
+ t.Fatalf("Block %d not found", i)
+ }
+ rawdb.WriteCanonicalHash(db, hash, i)
+ rawdb.WriteBlock(db, block)
+ if i%uint64(logEach) == 0 {
+ t.Logf("Exported block %d", i)
+ }
+ }
+
+ t.Logf("Exported %d blocks", endBlock-startBlock+1)
+}
+
+func TestExportCode(t *testing.T) {
+ sourceDb := openSourceDB(t)
+ defer sourceDb.Close()
+
+ dbs := openDBs(t)
+ defer dbs.Close()
+
+ db := dbs.chain
+
+ it := sourceDb.NewIterator(rawdb.CodePrefix, nil)
+ defer it.Release()
+
+ h := sha3.NewLegacyKeccak256()
+ count, bytes := uint64(0), uint64(0)
+ for it.Next() {
+ if len(it.Key()) != 33 {
+ continue
+ }
+ acc := it.Key()[1:]
+
+ hash := common.BytesToHash(acc)
+ code := it.Value()
+ _, err := h.Write(code)
+ require.NoError(t, err)
+ require.Equal(t, hash, common.BytesToHash(h.Sum(nil)))
+ h.Reset()
+
+ rawdb.WriteCode(db, hash, it.Value())
+ count++
+ bytes += uint64(len(it.Value()))
+
+ if count%uint64(logEach) == 0 {
+ t.Logf("Exported %d code entries (%d MBs)", count, bytes/(1024*1024))
+ }
+ }
+}
+
+func TestExportHeaders(t *testing.T) {
+ sourceDb := openSourceDB(t)
+ defer sourceDb.Close()
+
+ dbs := openDBs(t)
+ defer dbs.Close()
+
+ db := dbs.chain
+
+ for i := startBlock; i <= endBlock; i++ {
+ hash := rawdb.ReadCanonicalHash(sourceDb, i)
+ header := rawdb.ReadHeader(sourceDb, hash, i)
+ if header == nil {
+ t.Fatalf("Header %d not found", i)
+ }
+ rawdb.WriteHeader(db, header)
+
+ if i%uint64(logEach) == 0 {
+ t.Logf("Exported header %d", i)
+ }
+ }
+
+ t.Logf("Exported %d headers", endBlock-startBlock+1)
+}
+
+func TestQueryBlock(t *testing.T) {
+ sourceDb := openSourceDB(t)
+ defer sourceDb.Close()
+
+ for i := startBlock; i <= endBlock; i++ {
+ hash := rawdb.ReadCanonicalHash(sourceDb, i)
+ block := rawdb.ReadBlock(sourceDb, hash, i)
+ if block == nil {
+ t.Fatalf("Block %d not found", i)
+ }
+
+ t.Logf("Block %d: %x, %x", i, hash, block.Root())
+ }
+}
+
+var (
+ VMDBPrefix = []byte("vm")
+ fujiXChainID = ids.FromStringOrPanic("2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm")
+ fujiCChainID = ids.FromStringOrPanic("yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp")
+ mainnetXChainID = ids.FromStringOrPanic("2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM")
+ mainnetCChainID = ids.FromStringOrPanic("2q9e4r6Mu3U68nU1fYjgbR6JvwrRx36CohpAX5UQxse55x1Q5")
+ mainnetAvaxAssetID = ids.FromStringOrPanic("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z")
+)
+
+type dbs struct {
+ metadata database.Database
+ chain ethdb.Database
+ merkledb database.Database
+
+ base database.Database
+}
+
+func (d *dbs) Close() { d.base.Close() }
+
+func openDBs(t *testing.T) dbs {
+ var base database.Database
+ if dbDir == "" {
+ base = memdb.New()
+ } else {
+ db, err := leveldb.New(dbDir, nil, logging.NoLog{}, prometheus.NewRegistry())
+ require.NoError(t, err)
+ base = db
+ }
+
+ prefix := []byte(dbPrefix)
+ if bytes.HasPrefix(prefix, []byte("0x")) {
+ prefix = prefix[2:]
+ var err error
+ prefix, err = hex.DecodeString(string(prefix))
+ if err != nil {
+ t.Fatalf("invalid hex prefix: %s", prefix)
+ }
+ }
+
+ var chaindb ethdb.Database
+ if len(prefix) > 0 {
+ chaindb = &prefixReader{
+ Database: rawdb.NewDatabase(evmdatabase.WrapDatabase(base)),
+ prefix: prefix,
+ }
+ } else {
+ chaindb = rawdb.NewDatabase(evmdatabase.WrapDatabase(prefixdb.New(ethDBPrefix, base)))
+ }
+ return dbs{
+ metadata: prefixdb.New(reprocessMetadataPrefix, base),
+ chain: chaindb,
+ merkledb: prefixdb.New(merkledbPrefix, base),
+ base: base,
+ }
+}
+
+var (
+ reprocessMetadataPrefix = []byte("metadata")
+ merkledbPrefix = []byte("merkledb")
+
+ lastAcceptedRootKey = []byte("lastAcceptedRoot")
+ lastAcceptedHashKey = []byte("lastAcceptedHash")
+ lastAcceptedHeightKey = []byte("lastAcceptedHeight")
+)
+
+func getMetadata(db database.Database) (lastHash, lastRoot common.Hash, lastHeight uint64) {
+ if bytes, err := db.Get(lastAcceptedRootKey); err == nil {
+ lastRoot = common.BytesToHash(bytes)
+ }
+ if bytes, err := db.Get(lastAcceptedHashKey); err == nil {
+ lastHash = common.BytesToHash(bytes)
+ }
+ if bytes, err := database.GetUInt64(db, lastAcceptedHeightKey); err == nil {
+ lastHeight = bytes
+ }
+
+ return lastHash, lastRoot, lastHeight
+}
+
+func TestPersistedMetadata(t *testing.T) {
+ dbs := openDBs(t)
+ defer dbs.Close()
+
+ lastHash, lastRoot, lastHeight := getMetadata(dbs.metadata)
+ t.Logf("Last hash: %x, Last root: %x, Last height: %d", lastHash, lastRoot, lastHeight)
+}
+
+func TestCalculatePrefix(t *testing.T) {
+ prefix := prefixdb.JoinPrefixes(
+ prefixdb.MakePrefix(mainnetCChainID[:]),
+ VMDBPrefix,
+ )
+
+ prefix = append(prefix, prefixdb.MakePrefix(ethDBPrefix)...)
+ t.Logf("Prefix: %x", prefix)
+
+ t.Logf("Prefix: %x", prefixdb.MakePrefix(ethDBPrefix))
+}
+
+func init() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGPIPE, syscall.SIGHUP)
+ go cleanupOnInterrupt(c)
+}
+
+var cf struct {
+ o sync.Once
+ m sync.RWMutex
+ f []func()
+}
+
+// cleanupOnInterrupt registers a signal handler and will execute a stack of functions if an interrupt signal is caught
+func cleanupOnInterrupt(c chan os.Signal) {
+ for range c {
+ cf.o.Do(func() {
+ cf.m.RLock()
+ defer cf.m.RUnlock()
+ for i := len(cf.f) - 1; i >= 0; i-- {
+ cf.f[i]()
+ }
+ os.Exit(1)
+ })
+ }
+}
+
+// CleanupOnInterrupt stores cleanup functions to execute if an interrupt signal is caught
+func CleanupOnInterrupt(cleanup func()) {
+ cf.m.Lock()
+ defer cf.m.Unlock()
+ cf.f = append(cf.f, cleanup)
+}
+
+func TestReprocessGenesis(t *testing.T) {
+ // nomt commented out as needs separate process to function
+ for _, backend := range []string{"merkledb", "legacy", "firewood" /* , "nomt" */} {
+ t.Run(backend, func(t *testing.T) { testReprocessGenesis(t, backend) })
+ }
+}
+
+func TestReprocessMainnetBlocks(t *testing.T) {
+ enableLogging()
+ source := openSourceDB(t)
+ defer source.Close()
+
+ dbs := openDBs(t)
+ defer dbs.Close()
+
+ lastHash, lastRoot, lastHeight := getMetadata(dbs.metadata)
+ t.Logf("Persisted metadata: Last hash: %x, Last root: %x, Last height: %d", lastHash, lastRoot, lastHeight)
+
+ if usePersistedStartBlock {
+ startBlock = lastHeight
+ }
+ if forceStartWithMismatch {
+ // recover the last hash / root from the source database.
+ // makes it possible to continue from a hash source database.
+ lastHash = rawdb.ReadCanonicalHash(dbs.chain, startBlock)
+ block := rawdb.ReadBlock(dbs.chain, lastHash, startBlock)
+ lastHeight, lastRoot = startBlock, block.Root()
+ t.Logf("Forcing start with mismatch: Last hash: %x, Last root: %x", lastHash, lastRoot)
+ }
+ require.Equal(t, lastHeight, startBlock, "Last height does not match start block")
+ if lastHash != (common.Hash{}) {
+ // Other than when genesis is not performed, start processing from the next block
+ startBlock++
+ }
+
+ for _, backendName := range []string{"nomt", "merkledb", "legacy", "firewood"} {
+ t.Run(backendName, func(t *testing.T) {
+ backend := getMainnetBackend(t, backendName, source, dbs)
+ lastHash, lastRoot = reprocess(t, backend, lastHash, lastRoot, startBlock, endBlock)
+ t.Logf("Last hash: %x, Last root: %x", lastHash, lastRoot)
+ })
+ }
+}
+
+func testReprocessGenesis(t *testing.T, backendName string) {
+ dbs := openDBs(t)
+ defer dbs.Close()
+
+ blockCount := endBlock // use the end block as the block count, since we start from 0
+ backend := getBackend(t, backendName, int(blockCount), dbs)
+ cacheConfig := backend.CacheConfig
+
+ var lastHash, lastRoot common.Hash
+ start, stop := uint64(0), blockCount/2
+ lastHash, lastRoot = reprocess(t, backend, lastHash, lastRoot, start, stop)
+ if cacheConfig.SnapshotLimit > 0 {
+ accounts, storages := checkSnapshot(t, backend.Disk, false)
+ t.Logf("Iterated snapshot: Accounts: %d, Storages: %d", accounts, storages)
+ }
+
+ // Need to re-open backend as the previous one is closed
+ backend = getBackend(t, backendName, int(blockCount), dbs)
+ start, stop = blockCount/2+1, blockCount
+ lastHash, lastRoot = reprocess(t, backend, lastHash, lastRoot, start, stop)
+ if cacheConfig.SnapshotLimit > 0 {
+ accounts, storages := checkSnapshot(t, backend.Disk, false)
+ t.Logf("Iterated snapshot: Accounts: %d, Storages: %d", accounts, storages)
+ }
+ t.Logf("Last block: %d, Last hash: %x, Last root: %x", stop, lastHash, lastRoot)
+}
+
+func reprocess(
+ t *testing.T,
+ backend *reprocessBackend, lastHash, lastRoot common.Hash,
+ start, stop uint64,
+) (common.Hash, common.Hash) {
+ cacheConfig := backend.CacheConfig
+ db := backend.Disk
+
+ var lastInsertedRoot common.Hash
+ checkRootFn := func(expected, got common.Hash) bool {
+ lastInsertedRoot = got
+ if backend.VerifyRoot {
+ return expected == got
+ }
+ return true
+ }
+
+ var tapeRecorder *blockRecorder
+ if tapeDir != "" {
+ tapeRecorder = &blockRecorder{
+ fileManager: &fileManager{dir: tapeDir, newEach: 10_000},
+ }
+ defer tapeRecorder.Close()
+ cacheConfig.KeyValueDB.Writer = tapeRecorder
+ }
+
+ var opts []core.Opts
+ cacheConfig.SnapshotDelayInit = true
+ if start > 0 {
+ cacheConfig.SnapshotNoBuild = true // after genesis, snapshot must already be available
+ opts = append(opts, core.Opts{LastAcceptedRoot: lastRoot}) // after genesis, we must specify the last root
+ }
+ bc, err := core.NewBlockChain(
+ db, &cacheConfig, backend.Genesis, backend.Engine, vm.Config{}, lastHash, skipUpgradeCheck,
+ opts...,
+ )
+ require.NoError(t, err)
+ defer bc.Stop()
+
+ var lock sync.Mutex
+
+ CleanupOnInterrupt(func() {
+ lock.Lock()
+ defer lock.Unlock()
+
+ bc.Stop()
+ if tapeRecorder != nil {
+ tapeRecorder.Close()
+ }
+ })
+
+ if start == 0 {
+ // Handling the genesis block
+ normalGenesis := backend.Genesis.ToBlock()
+ require.NoError(t, bc.LoadGenesisState(normalGenesis))
+
+ lastRoot = normalGenesis.Root()
+ if backend := cacheConfig.KeyValueDB.KVBackend; backend != nil {
+ lastRoot = common.BytesToHash(backend.Root())
+ }
+
+ t.Logf("Genesis performed: hash: %x, root : %x", bc.CurrentBlock().Hash(), lastRoot)
+ if tapeRecorder != nil {
+ t.Logf("Accounts: %d, Storages: %d", len(tapeRecorder.accountWrites), len(tapeRecorder.storageWrites))
+ if tapeVerbose {
+ for _, kv := range tapeRecorder.accountWrites {
+ t.Logf("Account: %x, %x", kv.Key, kv.Value)
+ }
+ for _, kv := range tapeRecorder.storageWrites {
+ t.Logf("Storage: %x, %x", kv.Key, kv.Value)
+ }
+ }
+ }
+ start = 1
+ }
+
+ bc.Validator().(*core.BlockValidator).CheckRoot = checkRootFn
+ bc.InitializeSnapshots(&core.Opts{LastAcceptedRoot: lastRoot})
+ if tapeRecorder != nil {
+ bc.SetSnapWriter(tapeRecorder)
+ }
+
+ lastLogTime := time.Now()
+ for i := start; i <= stop; i++ {
+ block := backend.GetBlock(i)
+ isApricotPhase5 := backend.Genesis.Config.IsApricotPhase5(block.Time())
+ atomicTxs, err := atomic.ExtractAtomicTxs(block.ExtData(), isApricotPhase5, atomic.Codec)
+ require.NoError(t, err)
+
+ // Override parentRoot to match last state
+ parent := bc.GetHeaderByNumber(block.NumberU64() - 1)
+ parent.Root = lastRoot
+
+ // Take lock here to prevent shutdown before block is accepted
+ lock.Lock()
+ err = bc.InsertBlockManualWithParent(block, parent, true)
+ require.NoError(t, err)
+
+ if tapeRecorder != nil {
+ if i%uint64(logEach) == 0 {
+ tapeRecorder.Summary(block, uint16(len(atomicTxs)))
+ }
+ tapeRecorder.WriteToDisk(block, uint16(len(atomicTxs)))
+ tapeRecorder.Reset()
+ } else {
+ if i%uint64(logEach) == 0 {
+ took := time.Since(lastLogTime)
+ lastLogTime = time.Now()
+ t.Logf("(%v) Block: %d, Txs: %d (+ %d atomic), Parent State: %s", took.Truncate(time.Millisecond), i, len(block.Transactions()), len(atomicTxs), lastRoot.TerminalString())
+ }
+ }
+
+ // t.Logf("Accepting block %d, was inserted with root: %x, hash: %x", i, lastInsertedRoot, block.Hash())
+ errorOnClosed := true // make sure block is accepted
+ err = bc.AcceptWithRoot(block, lastInsertedRoot, errorOnClosed)
+ require.NoError(t, err)
+
+ lastRoot = lastInsertedRoot
+ lastHash = block.Hash()
+
+ bc.DrainAcceptorQueue()
+
+ updateMetadata(t, backend.Metadata, lastHash, lastRoot, i)
+ lock.Unlock()
+ }
+
+ return lastHash, lastRoot
+}
+
+func updateMetadata(t *testing.T, db database.Database, lastHash, lastRoot common.Hash, lastHeight uint64) {
+ require.NoError(t, db.Put(lastAcceptedRootKey, lastRoot.Bytes()))
+ require.NoError(t, db.Put(lastAcceptedHashKey, lastHash.Bytes()))
+ require.NoError(t, database.PutUInt64(db, lastAcceptedHeightKey, lastHeight))
+}
+
+func TestCheckSnapshot(t *testing.T) {
+ dbs := openDBs(t)
+ defer dbs.Close()
+
+ accounts, storages := checkSnapshot(t, dbs.chain, false)
+ t.Logf("Snapshot: Accounts: %d, Storages: %d", accounts, storages)
+}
+
+func checkSnapshot(t *testing.T, db ethdb.Database, log bool) (int, int) {
+ t.Helper()
+
+ accountHist := histogram.NewFast()
+ storageHist := histogram.NewFast()
+
+ it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
+ defer it.Release()
+ accounts := 0
+ for it.Next() {
+ if len(it.Key()) != 33 {
+ continue
+ }
+ accounts++
+ accountHist.Update(float64(len(it.Value())))
+ if log {
+ t.Logf("Snapshot (account): %x, %x\n", it.Key(), it.Value())
+ }
+ }
+
+ it2 := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
+ defer it2.Release()
+ storages := 0
+ for it2.Next() {
+ if len(it2.Key()) != 65 {
+ continue
+ }
+ storages++
+ storageHist.Update(float64(len(it2.Value())))
+ if log {
+ t.Logf("Snapshot (storage): %x, %x", it2.Key(), it2.Value())
+ }
+ }
+
+ quantiles := []float64{0.5, 0.9, 0.99, 0.999}
+ for _, q := range quantiles {
+ t.Logf("Snapshot quantile %v (account): %.2f", q, accountHist.Quantile(q))
+ t.Logf("Snapshot quantile %v (storage): %.2f", q, storageHist.Quantile(q))
+ }
+ return accounts, storages
+}
+
+func enableLogging() {
+ log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+}
diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go
index 879cb8988e..b5f5521429 100644
--- a/plugin/evm/vm.go
+++ b/plugin/evm/vm.go
@@ -23,6 +23,7 @@ import (
"github.com/ava-labs/avalanchego/network/p2p/gossip"
"github.com/ava-labs/avalanchego/upgrade"
avalanchegoConstants "github.com/ava-labs/avalanchego/utils/constants"
+ firewood "github.com/ava-labs/firewood/ffi/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/coreth/consensus/dummy"
@@ -43,6 +44,7 @@ import (
"github.com/ava-labs/coreth/plugin/evm/atomic"
"github.com/ava-labs/coreth/plugin/evm/config"
"github.com/ava-labs/coreth/plugin/evm/message"
+ "github.com/ava-labs/coreth/shim/fw"
"github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/utils"
@@ -374,6 +376,7 @@ func (vm *VM) Initialize(
}
vm.logger = corethLogger
+ log.Info("AVAX assetID", "assetID", vm.ctx.AVAXAssetID)
log.Info("Initializing Coreth VM", "Version", Version, "Config", vm.config)
if deprecateMsg != "" {
@@ -455,6 +458,7 @@ func (vm *VM) Initialize(
vm.ethConfig = ethconfig.NewDefaultConfig()
vm.ethConfig.Genesis = g
+ log.Info("Genesis bytes", "bytes", genesisBytes)
vm.ethConfig.NetworkId = vm.chainID.Uint64()
vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.readLastAccepted]
lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted()
@@ -504,6 +508,16 @@ func (vm *VM) Initialize(
vm.ethConfig.AcceptedCacheSize = vm.config.AcceptedCacheSize
vm.ethConfig.TransactionHistory = vm.config.TransactionHistory
vm.ethConfig.SkipTxIndexing = vm.config.SkipTxIndexing
+ if file := vm.config.FirewoodDBFile; file != "" {
+ var fwdb firewood.Firewood
+ if fileExists(file) {
+ fwdb = firewood.OpenDatabase(file)
+ } else {
+ fwdb = firewood.CreateDatabase(file)
+ }
+ vm.ethConfig.KVBackend = &fw.Firewood{Firewood: fwdb}
+ log.Warn("Using Firewood database (experimental)", "file", file)
+ }
// Create directory for offline pruning
if len(vm.ethConfig.OfflinePruningDataDirectory) != 0 {
@@ -576,6 +590,9 @@ func (vm *VM) Initialize(
if err != nil {
return err
}
+
+ go vm.ctx.Log.RecoverAndPanic(vm.startContinuousProfiler)
+
if err := vm.initializeChain(lastAcceptedHash); err != nil {
return err
}
@@ -605,8 +622,6 @@ func (vm *VM) Initialize(
}
vm.atomicTrie = vm.atomicBackend.AtomicTrie()
- go vm.ctx.Log.RecoverAndPanic(vm.startContinuousProfiler)
-
// so [vm.baseCodec] is a dummy codec use to fulfill the secp256k1fx VM
// interface. The fx will register all of its types, which can be safely
// ignored by the VM's codec.
@@ -1872,3 +1887,8 @@ func (vm *VM) newExportTx(
return tx, nil
}
+
+func fileExists(filename string) bool {
+ _, err := os.Stat(filename)
+ return err == nil || !os.IsNotExist(err)
+}
diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go
index e44f5ba606..920fc3a506 100644
--- a/plugin/evm/vm_warp_test.go
+++ b/plugin/evm/vm_warp_test.go
@@ -6,6 +6,7 @@ import (
"context"
"encoding/json"
"errors"
+ "fmt"
"math/big"
"testing"
"time"
@@ -491,6 +492,7 @@ func TestReceiveWarpMessage(t *testing.T) {
// Note each test corresponds to a block, the tests must be ordered by block
// time and cannot, eg be run in parallel or a separate golang test.
for _, test := range tests {
+ fmt.Println("Running test:", test.name)
testReceiveWarpMessage(
t, issuer, vm, test.sourceChainID, test.msgFrom, test.useSigners, test.blockTime,
)
diff --git a/shim/fw/firewood.go b/shim/fw/firewood.go
new file mode 100644
index 0000000000..56cc7cdff3
--- /dev/null
+++ b/shim/fw/firewood.go
@@ -0,0 +1,28 @@
+package fw
+
+import (
+ "github.com/ava-labs/coreth/triedb"
+ firewood "github.com/ava-labs/firewood/ffi/v2"
+)
+
+var _ triedb.KVBackend = &Firewood{}
+
+type Firewood struct {
+ firewood.Firewood
+}
+
+// PrefixDelete is a no-op as firewood implements deletes as prefix deletes.
+// This means when the account is deleted, all related storage is also deleted.
+func (f *Firewood) PrefixDelete(prefix []byte) (int, error) {
+ return 0, nil
+}
+
+// Update updates the trie with the provided key-value pairs.
+// Firewood ffi does not accept empty batches, so if the keys are empty, the
+// root is returned.
+func (f *Firewood) Update(ks, vs [][]byte) ([]byte, error) {
+ if len(ks) == 0 {
+ return f.Firewood.Root(), nil
+ }
+ return f.Firewood.Update(ks, vs)
+}
diff --git a/shim/geth_backend.go b/shim/geth_backend.go
new file mode 100644
index 0000000000..08e433d617
--- /dev/null
+++ b/shim/geth_backend.go
@@ -0,0 +1,72 @@
+package shim
+
+import (
+ "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/triedb/database"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var _ Backend = (*LegacyBackend)(nil)
+
+type LegacyBackend struct {
+ hash common.Hash
+ hashed bool
+ tr *trie.Trie
+
+ addrHash common.Hash
+ writer writer
+}
+
+type writer interface {
+ MustUpdate(key, value []byte)
+}
+
+func NewLegacyBackend(
+ stateRoot common.Hash, addrHash common.Hash, root common.Hash, db database.Database,
+ writer writer,
+) (*LegacyBackend, error) {
+ trieID := trie.StateTrieID(root)
+ if addrHash != (common.Hash{}) {
+ trieID = trie.StorageTrieID(stateRoot, addrHash, root)
+ }
+
+ tr, err := trie.New(trieID, db)
+ if err != nil {
+ return nil, err
+ }
+
+ return &LegacyBackend{tr: tr, addrHash: addrHash, writer: writer}, nil
+}
+
+func (b *LegacyBackend) Prefetch(key []byte) ([]byte, error) { return b.tr.Get(key) }
+func (b *LegacyBackend) Get(key []byte) ([]byte, error) { return b.tr.Get(key) }
+
+func (b *LegacyBackend) Hash(batch Batch) common.Hash {
+ if b.hashed {
+ return b.hash
+ }
+ for _, kv := range batch {
+ b.tr.MustUpdate(kv.Key, kv.Value)
+
+ if b.writer == nil {
+ continue
+ }
+ if b.addrHash != (common.Hash{}) {
+ key := append(b.addrHash.Bytes(), kv.Key...)
+ b.writer.MustUpdate(key, kv.Value)
+ } else {
+ b.writer.MustUpdate(kv.Key, kv.Value)
+ }
+ }
+ b.hashed = true
+ b.hash = b.tr.Hash()
+ return b.hash
+}
+
+func (b *LegacyBackend) Commit(batch Batch, collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
+ if !b.hashed {
+ b.Hash(batch)
+ }
+ return b.tr.Commit(collectLeaf)
+}
diff --git a/shim/hasher.go b/shim/hasher.go
new file mode 100644
index 0000000000..7446d6e777
--- /dev/null
+++ b/shim/hasher.go
@@ -0,0 +1,65 @@
+// (c) 2020-2021, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shim
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+// hasher is a type used for the trie Hash operation. A hasher has some
+// internal preallocated temp space
+type hasher struct {
+ sha crypto.KeccakState
+ tmp []byte
+ encbuf rlp.EncoderBuffer
+ parallel bool // Whether to use parallel threads when hashing
+}
+
+// hasherPool holds pureHashers
+var hasherPool = sync.Pool{
+ New: func() interface{} {
+ return &hasher{
+ tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
+ sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ encbuf: rlp.NewEncoderBuffer(nil),
+ }
+ },
+}
+
+func newHasher(parallel bool) *hasher {
+ h := hasherPool.Get().(*hasher)
+ h.parallel = parallel
+ return h
+}
+
+func returnHasherToPool(h *hasher) {
+ hasherPool.Put(h)
+}
diff --git a/shim/kv_backend.go b/shim/kv_backend.go
new file mode 100644
index 0000000000..9166507fac
--- /dev/null
+++ b/shim/kv_backend.go
@@ -0,0 +1,107 @@
+package shim
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/triedb"
+ "github.com/ava-labs/coreth/triedb/database"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ ErrRootMismatch = errors.New("root mismatch")
+ ErrStorageStateRootMismatch = errors.New("storage state root mismatch")
+)
+
+type (
+ KVBackend = triedb.KVBackend
+)
+
+type KV struct {
+ Key []byte
+ Value []byte
+}
+
+type Batch []KV
+
+type KVTrieBackend struct {
+ hashed bool
+ hash common.Hash
+ backend KVBackend
+}
+
+func (k *KVTrieBackend) Get(key []byte) ([]byte, error) {
+ //fmt.Printf("Get: %x\n", key)
+ return k.backend.Get(key)
+}
+
+func (k *KVTrieBackend) Prefetch(key []byte) ([]byte, error) {
+ //fmt.Printf("Prefetch: %x\n", key)
+ return k.backend.Prefetch(key)
+}
+
+func (k *KVTrieBackend) Hash(batch Batch) common.Hash {
+ if k.hashed {
+ return k.hash
+ }
+ //fmt.Printf("Update Total: %d\n", len(batch))
+ //for _, kv := range batch {
+ // fmt.Printf("Update: %x %x\n", kv.Key, kv.Value)
+ //}
+ ks, vs := make([][]byte, len(batch)), make([][]byte, len(batch))
+ for i, kv := range batch {
+ ks[i] = kv.Key
+ vs[i] = kv.Value
+ }
+ root, err := k.backend.Update(ks, vs)
+ if err != nil {
+ panic(fmt.Sprintf("failed to update trie: %v", err))
+ }
+ k.hashed = true
+ k.hash = common.BytesToHash(root)
+ return k.hash
+}
+
+func (k *KVTrieBackend) Commit(batch Batch, collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
+ if !k.hashed {
+ k.Hash(batch)
+ }
+ return k.hash, nil, nil
+}
+
+// NewAccountTrieKV creates a new state trie backed by a key-value store.
+// db is used for preimages
+func NewAccountTrieKV(stateRoot common.Hash, kv KVBackend, db database.Database) (*StateTrie, error) {
+ if stateRoot == types.EmptyRootHash {
+ stateRoot = common.Hash{}
+ }
+ kvRoot := common.BytesToHash(kv.Root())
+ if kvRoot != stateRoot {
+ return nil, fmt.Errorf("%w: expected %x, got %x", ErrRootMismatch, stateRoot, kvRoot)
+ }
+
+ tr := &Trie{
+ backend: &KVTrieBackend{backend: kv},
+ origin: kvRoot,
+ }
+ return &StateTrie{trie: tr, db: db}, nil
+}
+
+// NewStorageTrieKV creates a new storage trie backed by a key-value store.
+func NewStorageTrieKV(stateRoot common.Hash, account common.Hash, accountTrie *StateTrie) (*StateTrie, error) {
+ if stateRoot == types.EmptyRootHash {
+ stateRoot = common.Hash{}
+ }
+ if accountTrie.trie.origin != stateRoot {
+ return nil, fmt.Errorf("%w: expected %x, got %x", ErrStorageStateRootMismatch, stateRoot, accountTrie.trie.origin)
+ }
+ tr := &Trie{
+ parent: accountTrie.trie,
+ backend: accountTrie.trie.backend,
+ prefix: account.Bytes(),
+ }
+ return &StateTrie{trie: tr, db: accountTrie.db}, nil
+}
diff --git a/shim/legacy/legacy.go b/shim/legacy/legacy.go
new file mode 100644
index 0000000000..5e03996c99
--- /dev/null
+++ b/shim/legacy/legacy.go
@@ -0,0 +1,341 @@
+package legacy
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/triedb"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+var _ triedb.KVBackend = &Legacy{}
+
+type Legacy struct {
+ triedb *triedb.Database
+ root common.Hash
+ count uint64
+ dereference bool
+ trackDeletedTries ethdb.KeyValueStore
+ accountRootCheckDisabled bool
+ needsCommit bool
+}
+
+func New(triedb *triedb.Database, root common.Hash, count uint64, dereference bool) *Legacy {
+ return &Legacy{
+ triedb: triedb,
+ root: root,
+ count: count,
+ dereference: dereference,
+ }
+}
+
+func (l *Legacy) TrackDeletedTries(db ethdb.KeyValueStore) {
+ l.trackDeletedTries = db
+}
+
+func (l *Legacy) DisableAccountRootCheck() {
+ l.accountRootCheckDisabled = true
+}
+
+func getAccountRoot(tr *trie.Trie, accHash common.Hash) (common.Hash, error) {
+ root := types.EmptyRootHash
+ accBytes, err := tr.Get(accHash[:])
+ if err != nil {
+ return common.Hash{}, err
+ }
+ if len(accBytes) > 0 {
+ var acc types.StateAccount
+ if err := rlp.DecodeBytes(accBytes, &acc); err != nil {
+ return common.Hash{}, fmt.Errorf("failed to decode account: %w", err)
+ }
+ root = acc.Root
+ }
+ return root, nil
+}
+
+func setAccountRoot(tr *trie.Trie, accHash common.Hash, root common.Hash) error {
+ accBytes, err := tr.Get(accHash[:])
+ if err != nil {
+ return err
+ }
+ var acc types.StateAccount
+ if len(accBytes) > 0 {
+ if err := rlp.DecodeBytes(accBytes, &acc); err != nil {
+ return fmt.Errorf("failed to decode account: %w", err)
+ }
+ }
+ acc.Root = root
+ accBytes, err = rlp.EncodeToBytes(&acc)
+ if err != nil {
+ return fmt.Errorf("failed to encode account: %w", err)
+ }
+ tr.MustUpdate(accHash[:], accBytes)
+ return nil
+}
+
+func (l *Legacy) Update(ks, vs [][]byte) ([]byte, error) {
+ // Collect all nodes that are modified during the update
+ // Defined here so we can process storage deletes
+ nodes := trienode.NewMergedNodeSet()
+
+ accounts, err := trie.New(trie.StateTrieID(l.root), l.triedb)
+ if err != nil {
+ return nil, err
+ }
+ // Process the storage tries first, this means we can access the root for the
+ // storage tries before they are updated in the account trie. Necessary for
+ // the hash scheme.
+ tries := make(map[common.Hash]*trie.Trie)
+ for i, k := range ks {
+ v := vs[i]
+ accHash := common.BytesToHash(k[:32])
+ if len(k) == 32 {
+ if len(v) == 0 {
+ prevRoot, err := getAccountRoot(accounts, accHash)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get account root %x: %w", accHash, err)
+ }
+ if prevRoot != types.EmptyRootHash {
+ return nil, fmt.Errorf("account %x is deleted but has non-empty storage trie", accHash)
+ }
+ if _, ok := tries[accHash]; ok {
+ return nil, fmt.Errorf("account %x is deleted but has pending storage trie", accHash)
+ }
+ }
+
+ // otherwise, skip account updates for now
+ continue
+ }
+
+ tr, ok := tries[accHash]
+ if !ok {
+ if l.trackDeletedTries != nil {
+ found, err := l.trackDeletedTries.Has(accHash[:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to check if account %x is deleted: %w", accHash, err)
+ }
+ if found {
+ got, err := l.trackDeletedTries.Get(accHash[:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to get deleted trie %x: %w", accHash, err)
+ }
+ fmt.Println("::: found deleted trie", accHash, binary.BigEndian.Uint64(got), l.count, i)
+ return nil, fmt.Errorf("account %x is deleted", accHash)
+ }
+ }
+ root, err := getAccountRoot(accounts, accHash)
+ if err != nil {
+ return nil, err
+ }
+ tr, err = trie.New(trie.StorageTrieID(l.root, accHash, root), l.triedb)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create storage trie %x: %w", accHash, err)
+ }
+ tries[accHash] = tr
+ }
+
+ // Update the storage trie
+ if len(v) == 0 {
+ tr.MustDelete(k[32:])
+ } else {
+ tr.MustUpdate(k[32:], v)
+ }
+ }
+
+ // Hash the storage tries
+ for _, tr := range tries {
+ _, set, err := tr.Commit(false)
+ if err != nil {
+ return nil, err
+ }
+ if set != nil {
+ nodes.Merge(set)
+ }
+ }
+
+ // Update the account trie
+ for i, k := range ks {
+ v := vs[i]
+ if len(k) == 64 {
+ continue
+ }
+ if len(v) == 0 {
+ accounts.MustDelete(k)
+ } else {
+ var (
+ root common.Hash
+ shouldSetRoot bool
+ )
+ if l.accountRootCheckDisabled {
+ // If the trie is updated, the root will be set below
+ if _, ok := tries[common.BytesToHash(k[:32])]; !ok {
+ // Otherwise, avoid changing the root from it's current value
+ r, err := getAccountRoot(accounts, common.BytesToHash(k[:32]))
+ if err != nil {
+ return nil, err
+ }
+ root = r
+ shouldSetRoot = true
+ }
+ }
+
+ accounts.MustUpdate(k, v)
+
+ if shouldSetRoot {
+ if err := setAccountRoot(accounts, common.BytesToHash(k[:32]), root); err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ // Verify account trie updates match the storage trie updates
+ for accHash, tr := range tries {
+ root, err := getAccountRoot(accounts, accHash)
+ if err != nil {
+ return nil, err
+ }
+ if root != tr.Hash() {
+ if l.accountRootCheckDisabled {
+ if err := setAccountRoot(accounts, accHash, tr.Hash()); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, fmt.Errorf("account %x trie root mismatch (%x != %x)", accHash, root, tr.Hash())
+ }
+ }
+ }
+
+ next, set, err := accounts.Commit(true)
+ if err != nil {
+ return nil, err
+ }
+ if set != nil {
+ nodes.Merge(set)
+ }
+ if l.root == next {
+ fmt.Println("::: root is the same", l.root, next)
+ return next[:], nil
+ }
+ if err := l.triedb.Update(next, l.root, l.count, nodes, nil); err != nil {
+ return nil, err
+ }
+
+ // TODO: fix hashdb scheme later
+ l.root = next
+ l.count++
+ l.needsCommit = true
+ return next[:], nil
+}
+
+func (l *Legacy) Commit(rootBytes []byte) error {
+ if !l.needsCommit {
+ fmt.Println("::: no need to commit")
+ return nil
+ }
+ root := common.BytesToHash(rootBytes)
+ return l.triedb.Commit(root, false)
+}
+func (l *Legacy) Close() error { return nil }
+func (l *Legacy) Get(key []byte) ([]byte, error) { panic("implement me") }
+func (l *Legacy) Prefetch(key []byte) ([]byte, error) { panic("implement me") }
+func (l *Legacy) Root() []byte { return l.root[:] }
+
+const (
+ // storageDeleteLimit denotes the highest permissible memory allocation
+ // employed for contract storage deletion.
+ storageDeleteLimit = 512 * 1024 * 1024
+)
+
+// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
+// employed when the associated state snapshot is not available. It iterates the
+// storage slots along with all internal trie nodes via trie directly.
+func slowDeleteStorage(
+ db *triedb.Database, originalRoot, addrHash, root common.Hash,
+) (bool, int, map[common.Hash][]byte, *trienode.NodeSet, error) {
+ tr, err := trie.New(trie.StorageTrieID(originalRoot, addrHash, root), db)
+ if err != nil {
+ return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
+ }
+ it, err := tr.NodeIterator(nil)
+ if err != nil {
+ return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
+ }
+ var (
+ size common.StorageSize
+ leafs int
+ nodes = trienode.NewNodeSet(addrHash)
+ slots = make(map[common.Hash][]byte)
+ )
+ for it.Next(true) {
+ if size > storageDeleteLimit {
+ return true, leafs, nil, nil, nil
+ }
+ if it.Leaf() {
+ slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
+ size += common.StorageSize(common.HashLength + len(it.LeafBlob()))
+ leafs++
+ continue
+ }
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ size += common.StorageSize(len(it.Path()))
+ nodes.AddNode(it.Path(), trienode.NewDeleted())
+ }
+ if err := it.Error(); err != nil {
+ return false, 0, nil, nil, err
+ }
+ return false, leafs, slots, nodes, nil
+}
+
+func (l *Legacy) PrefixDelete(prefix []byte) (int, error) {
+ if l.trackDeletedTries != nil {
+ if err := l.trackDeletedTries.Put(prefix, binary.BigEndian.AppendUint64(nil, l.count)); err != nil {
+ return 0, fmt.Errorf("failed to track deleted trie %x: %w", prefix, err)
+ }
+ }
+
+ accounts, err := trie.New(trie.StateTrieID(l.root), l.triedb)
+ if err != nil {
+ return 0, err
+ }
+ origin, err := getAccountRoot(accounts, common.BytesToHash(prefix))
+ if err != nil {
+ return 0, err
+ }
+ if origin == types.EmptyRootHash {
+ return 0, nil
+ }
+ nodes := trienode.NewMergedNodeSet()
+ _, leafs, _, set, err := slowDeleteStorage(l.triedb, l.root, common.BytesToHash(prefix), origin)
+ if err != nil {
+ return 0, err
+ }
+ if set != nil {
+ nodes.Merge(set)
+ }
+ accounts.MustDelete(prefix)
+ next, set, err := accounts.Commit(true)
+ if err != nil {
+ return 0, err
+ }
+ if set != nil {
+ nodes.Merge(set)
+ }
+ if l.root == next {
+ fmt.Println("::: root is the same", l.root, next)
+ return leafs, nil
+ }
+ if err := l.triedb.Update(next, l.root, l.count, nodes, nil); err != nil {
+ return 0, err
+ }
+ l.root = next
+ l.count++
+ return leafs, nil
+}
diff --git a/shim/legacy/snapshot.go b/shim/legacy/snapshot.go
new file mode 100644
index 0000000000..5ef09fd5a3
--- /dev/null
+++ b/shim/legacy/snapshot.go
@@ -0,0 +1,82 @@
+package legacy
+
+import (
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/triedb"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+var _ triedb.KVBackend = &Snapshot{}
+
+type Snapshot struct {
+ db ethdb.Database
+}
+
+func NewSnapshot(db ethdb.Database) *Snapshot {
+ return &Snapshot{db: db}
+}
+
+func (s *Snapshot) Get(key []byte) ([]byte, error) {
+ acc := common.BytesToHash(key[:32])
+ var val []byte
+ if len(key) == 32 {
+ val = rawdb.ReadAccountSnapshot(s.db, acc)
+ } else {
+ val = rawdb.ReadStorageSnapshot(s.db, acc, common.BytesToHash(key[32:]))
+ }
+ return val, nil
+}
+
+func (s *Snapshot) Prefetch(key []byte) ([]byte, error) {
+ return nil, nil
+}
+
+func (s *Snapshot) Update(ks, vs [][]byte) ([]byte, error) {
+ for i, k := range ks {
+ acc := common.BytesToHash(k[:32])
+ if len(k) == 32 {
+ if len(vs[i]) == 0 {
+ rawdb.DeleteAccountSnapshot(s.db, acc)
+ } else {
+ var account types.StateAccount
+ if err := rlp.DecodeBytes(vs[i], &account); err != nil {
+ return nil, err
+ }
+ data := types.SlimAccountRLP(account)
+ rawdb.WriteAccountSnapshot(s.db, acc, data)
+ }
+ } else {
+ if len(vs[i]) == 0 {
+ rawdb.DeleteStorageSnapshot(s.db, acc, common.BytesToHash(k[32:]))
+ } else {
+ rawdb.WriteStorageSnapshot(s.db, acc, common.BytesToHash(k[32:]), vs[i])
+ }
+ }
+ }
+ return nil, nil
+}
+
+func (s *Snapshot) Commit(root []byte) error { return nil }
+func (s *Snapshot) Close() error { return nil }
+func (s *Snapshot) Root() []byte { return nil }
+
+func (s *Snapshot) PrefixDelete(k []byte) (int, error) {
+ rawdb.DeleteAccountSnapshot(s.db, common.BytesToHash(k))
+
+ it := s.db.NewIterator(append(rawdb.SnapshotStoragePrefix, k...), nil)
+ defer it.Release()
+
+ keysDeleted := 0
+ for it.Next() {
+ k := it.Key()[len(rawdb.SnapshotStoragePrefix):]
+ rawdb.DeleteStorageSnapshot(s.db, common.BytesToHash(k[:32]), common.BytesToHash(k[32:]))
+ keysDeleted++
+ }
+ if err := it.Error(); err != nil {
+ return 0, err
+ }
+ return keysDeleted, nil
+}
diff --git a/shim/merkledb/merkledb.go b/shim/merkledb/merkledb.go
new file mode 100644
index 0000000000..f1d8b6fe31
--- /dev/null
+++ b/shim/merkledb/merkledb.go
@@ -0,0 +1,144 @@
+package merkledb
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/ava-labs/avalanchego/database"
+ "github.com/ava-labs/avalanchego/x/merkledb"
+ "github.com/ava-labs/coreth/triedb"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var _ triedb.KVBackend = &MerkleDB{}
+
+type MerkleDB struct {
+ lock sync.RWMutex
+ db merkledb.MerkleDB
+ pendingViews []merkledb.View
+ pendingViewRoots []common.Hash
+}
+
+func NewMerkleDB(db merkledb.MerkleDB) *MerkleDB {
+ return &MerkleDB{db: db}
+}
+
+func (m *MerkleDB) Get(key []byte) ([]byte, error) {
+ val, err := m.latestView().GetValue(context.TODO(), key)
+ if err == database.ErrNotFound {
+ return nil, nil
+ }
+ return val, err
+}
+
+func (m *MerkleDB) Prefetch(key []byte) ([]byte, error) {
+ return nil, m.db.PrefetchPath(key)
+}
+
+func (m *MerkleDB) latestView() merkledb.Trie {
+ m.lock.RLock()
+ defer m.lock.RUnlock()
+
+ return m.latestViewLocked()
+}
+
+func (m *MerkleDB) latestViewLocked() merkledb.Trie {
+ if len(m.pendingViews) == 0 {
+ return m.db
+ }
+ return m.pendingViews[len(m.pendingViews)-1]
+}
+
+func (m *MerkleDB) Update(ks, vs [][]byte) ([]byte, error) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ ctx := context.TODO()
+ changes := make([]database.BatchOp, len(ks))
+ for i, k := range ks {
+ v := vs[i]
+ changes[i] = database.BatchOp{Key: k, Value: v, Delete: len(v) == 0}
+ }
+ view, err := m.latestViewLocked().NewView(ctx, merkledb.ViewChanges{BatchOps: changes})
+ if err != nil {
+ return nil, err
+ }
+ root, err := view.GetMerkleRoot(ctx)
+ if err != nil {
+ return nil, err
+ }
+ m.pendingViews = append(m.pendingViews, view)
+ m.pendingViewRoots = append(m.pendingViewRoots, common.Hash(root))
+ return root[:], nil
+}
+
+func (m *MerkleDB) Commit(rootBytes []byte) error {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ root := common.BytesToHash(rootBytes)
+
+ if len(m.pendingViews) == 0 {
+ return fmt.Errorf("no pending views")
+ }
+ pendingRootIdx := -1
+ for i, pendingRoot := range m.pendingViewRoots {
+ if pendingRoot == root {
+ pendingRootIdx = i
+ break
+ }
+ }
+ if pendingRootIdx > 0 {
+ for i := 0; i < pendingRootIdx; i++ {
+ if err := m.commitToDisk(m.pendingViewRoots[0]); err != nil {
+ return err
+ }
+ }
+ }
+ return m.commitToDisk(root)
+}
+
+func (m *MerkleDB) commitToDisk(root common.Hash) error {
+ if m.pendingViewRoots[0] != root {
+ return fmt.Errorf("root mismatch: expected %x, got %x", root, m.pendingViewRoots[0])
+ }
+ ctx := context.TODO()
+ if err := m.pendingViews[0].CommitToDB(ctx); err != nil {
+ return err
+ }
+ m.pendingViews = m.pendingViews[1:]
+ m.pendingViewRoots = m.pendingViewRoots[1:]
+ fmt.Printf("Commit: %x\n", root)
+ return nil
+}
+
+func (m *MerkleDB) Root() []byte {
+ ctx := context.TODO()
+ root, err := m.latestView().GetMerkleRoot(ctx)
+ if err != nil {
+ panic(fmt.Sprintf("failed to get merkle root: %v", err))
+ }
+ return root[:]
+}
+
+func (m *MerkleDB) Close() error {
+ last := common.Hash{}
+ m.lock.Lock()
+ if len(m.pendingViewRoots) > 0 {
+ last = m.pendingViewRoots[len(m.pendingViewRoots)-1]
+ }
+ m.lock.Unlock()
+
+ if last != (common.Hash{}) {
+ if err := m.Commit(last[:]); err != nil {
+ return err
+ }
+ }
+
+ return m.db.Close()
+}
+
+func (m *MerkleDB) PrefixDelete(prefix []byte) (int, error) {
+ return 0, nil
+}
diff --git a/shim/nomt/message.proto b/shim/nomt/message.proto
new file mode 100644
index 0000000000..364c6e9305
--- /dev/null
+++ b/shim/nomt/message.proto
@@ -0,0 +1,59 @@
+syntax = "proto3";
+
+package database_interface;
+
+message Request {
+ oneof request {
+ RootRequest root = 1;
+ GetRequest get = 2;
+ PrefetchRequest prefetch = 3;
+ UpdateRequest update = 4;
+ CloseRequest close = 5;
+ }
+}
+
+message Response {
+ int32 err_code = 1;
+ oneof response {
+ RootResponse root = 2;
+ GetResponse get = 3;
+ PrefetchResponse prefetch = 4;
+ UpdateResponse update = 5;
+ CloseResponse close = 6;
+ }
+}
+
+message RootRequest {}
+message RootResponse {
+ bytes root = 1;
+}
+
+message GetRequest {
+ bytes key = 1;
+}
+
+message GetResponse {
+ bytes value = 2;
+}
+
+message PrefetchRequest {
+ bytes key = 1;
+}
+
+message PrefetchResponse {}
+
+message UpdateRequestItem {
+ bytes key = 1;
+ bytes value = 2;
+}
+
+message UpdateRequest {
+ repeated UpdateRequestItem items = 1;
+}
+
+message UpdateResponse {
+ bytes root = 2;
+}
+
+message CloseRequest { }
+message CloseResponse { }
\ No newline at end of file
diff --git a/shim/nomt/nomt.go b/shim/nomt/nomt.go
new file mode 100644
index 0000000000..00bf9530e7
--- /dev/null
+++ b/shim/nomt/nomt.go
@@ -0,0 +1,155 @@
+package nomt
+
+import (
+ "encoding/binary"
+ "net"
+ "sync"
+
+ "github.com/ava-labs/coreth/shim/nomt/nomt"
+ "github.com/ava-labs/coreth/triedb"
+ "github.com/ethereum/go-ethereum/log"
+ "google.golang.org/protobuf/proto"
+)
+
+var _ triedb.KVBackend = &Nomt{}
+
+const maxReponseSize = 1024
+
+func response(conn net.Conn, req *nomt.Request) (*nomt.Response, error) {
+ data, err := proto.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := conn.Write(binary.BigEndian.AppendUint32(nil, uint32(len(data)))); err != nil {
+ return nil, err
+ }
+ if _, err := conn.Write(data); err != nil {
+ return nil, err
+ }
+
+ respData := make([]byte, maxReponseSize)
+ n, err := conn.Read(respData)
+ if err != nil {
+ return nil, err
+ }
+ var resp nomt.Response
+ if err := proto.Unmarshal(respData[:n], &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+type Nomt struct {
+ lock sync.RWMutex
+ conn net.Conn
+}
+
+func New(conn net.Conn) *Nomt {
+ return &Nomt{
+ conn: conn,
+ }
+}
+
+func (n *Nomt) response(req *nomt.Request) (*nomt.Response, error) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ return response(n.conn, req)
+}
+
+func (n *Nomt) Root() []byte {
+ req := &nomt.Request{
+ Request: &nomt.Request_Root{
+ Root: &nomt.RootRequest{},
+ },
+ }
+ resp, err := n.response(req)
+ if err != nil {
+ log.Error("Failed to get root", "err", err)
+ return nil
+ }
+ return resp.GetRoot().Root
+}
+
+func (n *Nomt) Get(key []byte) ([]byte, error) {
+ req := &nomt.Request{
+ Request: &nomt.Request_Get{
+ Get: &nomt.GetRequest{
+ Key: key,
+ },
+ },
+ }
+
+ resp, err := n.response(req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.GetErrCode() == 1 { // Not found
+ return nil, nil
+ }
+ return resp.GetGet().Value, nil
+}
+
+func (n *Nomt) Prefetch(key []byte) ([]byte, error) {
+ req := &nomt.Request{
+ Request: &nomt.Request_Prefetch{
+ Prefetch: &nomt.PrefetchRequest{
+ Key: key,
+ },
+ },
+ }
+
+ _, err := n.response(req)
+ return nil, err
+}
+
+func (n *Nomt) Update(ks, vs [][]byte) ([]byte, error) {
+ req := &nomt.Request{
+ Request: &nomt.Request_Update{
+ Update: &nomt.UpdateRequest{
+ Items: make([]*nomt.UpdateRequestItem, 0, len(ks)),
+ },
+ },
+ }
+
+ seen := make(map[string]struct{})
+ for i, k := range ks {
+ _, found := seen[string(k)]
+ if found {
+ continue
+ }
+ req.GetUpdate().Items = append(
+ req.GetUpdate().Items,
+ &nomt.UpdateRequestItem{Key: k, Value: vs[i]},
+ )
+ seen[string(k)] = struct{}{}
+ }
+
+ resp, err := n.response(req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.GetUpdate().Root, nil
+}
+
+func (n *Nomt) Commit(root []byte) error {
+ return nil
+}
+
+func (n *Nomt) Close() error {
+ close := &nomt.Request{
+ Request: &nomt.Request_Close{
+ Close: &nomt.CloseRequest{},
+ },
+ }
+ _, err := n.response(close)
+ if err != nil {
+ return err
+ }
+
+ return n.conn.Close()
+}
+
+func (n *Nomt) PrefixDelete(prefix []byte) (int, error) {
+ return 0, nil
+}
diff --git a/shim/nomt/nomt/message.pb.go b/shim/nomt/nomt/message.pb.go
new file mode 100644
index 0000000000..078d61137d
--- /dev/null
+++ b/shim/nomt/nomt/message.pb.go
@@ -0,0 +1,1082 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.2
+// source: message.proto
+
+package nomt
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Request struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Request:
+ //
+ // *Request_Root
+ // *Request_Get
+ // *Request_Prefetch
+ // *Request_Update
+ // *Request_Close
+ Request isRequest_Request `protobuf_oneof:"request"`
+}
+
+func (x *Request) Reset() {
+ *x = Request{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Request) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Request) ProtoMessage() {}
+
+func (x *Request) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Request.ProtoReflect.Descriptor instead.
+func (*Request) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Request) GetRequest() isRequest_Request {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (x *Request) GetRoot() *RootRequest {
+ if x, ok := x.GetRequest().(*Request_Root); ok {
+ return x.Root
+ }
+ return nil
+}
+
+func (x *Request) GetGet() *GetRequest {
+ if x, ok := x.GetRequest().(*Request_Get); ok {
+ return x.Get
+ }
+ return nil
+}
+
+func (x *Request) GetPrefetch() *PrefetchRequest {
+ if x, ok := x.GetRequest().(*Request_Prefetch); ok {
+ return x.Prefetch
+ }
+ return nil
+}
+
+func (x *Request) GetUpdate() *UpdateRequest {
+ if x, ok := x.GetRequest().(*Request_Update); ok {
+ return x.Update
+ }
+ return nil
+}
+
+func (x *Request) GetClose() *CloseRequest {
+ if x, ok := x.GetRequest().(*Request_Close); ok {
+ return x.Close
+ }
+ return nil
+}
+
+type isRequest_Request interface {
+ isRequest_Request()
+}
+
+type Request_Root struct {
+ Root *RootRequest `protobuf:"bytes,1,opt,name=root,proto3,oneof"`
+}
+
+type Request_Get struct {
+ Get *GetRequest `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
+}
+
+type Request_Prefetch struct {
+ Prefetch *PrefetchRequest `protobuf:"bytes,3,opt,name=prefetch,proto3,oneof"`
+}
+
+type Request_Update struct {
+ Update *UpdateRequest `protobuf:"bytes,4,opt,name=update,proto3,oneof"`
+}
+
+type Request_Close struct {
+ Close *CloseRequest `protobuf:"bytes,5,opt,name=close,proto3,oneof"`
+}
+
+func (*Request_Root) isRequest_Request() {}
+
+func (*Request_Get) isRequest_Request() {}
+
+func (*Request_Prefetch) isRequest_Request() {}
+
+func (*Request_Update) isRequest_Request() {}
+
+func (*Request_Close) isRequest_Request() {}
+
+type Response struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ErrCode int32 `protobuf:"varint,1,opt,name=err_code,json=errCode,proto3" json:"err_code,omitempty"`
+ // Types that are assignable to Response:
+ //
+ // *Response_Root
+ // *Response_Get
+ // *Response_Prefetch
+ // *Response_Update
+ // *Response_Close
+ Response isResponse_Response `protobuf_oneof:"response"`
+}
+
+func (x *Response) Reset() {
+ *x = Response{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Response) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Response.ProtoReflect.Descriptor instead.
+func (*Response) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Response) GetErrCode() int32 {
+ if x != nil {
+ return x.ErrCode
+ }
+ return 0
+}
+
+func (m *Response) GetResponse() isResponse_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (x *Response) GetRoot() *RootResponse {
+ if x, ok := x.GetResponse().(*Response_Root); ok {
+ return x.Root
+ }
+ return nil
+}
+
+func (x *Response) GetGet() *GetResponse {
+ if x, ok := x.GetResponse().(*Response_Get); ok {
+ return x.Get
+ }
+ return nil
+}
+
+func (x *Response) GetPrefetch() *PrefetchResponse {
+ if x, ok := x.GetResponse().(*Response_Prefetch); ok {
+ return x.Prefetch
+ }
+ return nil
+}
+
+func (x *Response) GetUpdate() *UpdateResponse {
+ if x, ok := x.GetResponse().(*Response_Update); ok {
+ return x.Update
+ }
+ return nil
+}
+
+func (x *Response) GetClose() *CloseResponse {
+ if x, ok := x.GetResponse().(*Response_Close); ok {
+ return x.Close
+ }
+ return nil
+}
+
+type isResponse_Response interface {
+ isResponse_Response()
+}
+
+type Response_Root struct {
+ Root *RootResponse `protobuf:"bytes,2,opt,name=root,proto3,oneof"`
+}
+
+type Response_Get struct {
+ Get *GetResponse `protobuf:"bytes,3,opt,name=get,proto3,oneof"`
+}
+
+type Response_Prefetch struct {
+ Prefetch *PrefetchResponse `protobuf:"bytes,4,opt,name=prefetch,proto3,oneof"`
+}
+
+type Response_Update struct {
+ Update *UpdateResponse `protobuf:"bytes,5,opt,name=update,proto3,oneof"`
+}
+
+type Response_Close struct {
+ Close *CloseResponse `protobuf:"bytes,6,opt,name=close,proto3,oneof"`
+}
+
+func (*Response_Root) isResponse_Response() {}
+
+func (*Response_Get) isResponse_Response() {}
+
+func (*Response_Prefetch) isResponse_Response() {}
+
+func (*Response_Update) isResponse_Response() {}
+
+func (*Response_Close) isResponse_Response() {}
+
+type RootRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *RootRequest) Reset() {
+ *x = RootRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RootRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RootRequest) ProtoMessage() {}
+
+func (x *RootRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RootRequest.ProtoReflect.Descriptor instead.
+func (*RootRequest) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{2}
+}
+
+type RootResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Root []byte `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"`
+}
+
+func (x *RootResponse) Reset() {
+ *x = RootResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RootResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RootResponse) ProtoMessage() {}
+
+func (x *RootResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RootResponse.ProtoReflect.Descriptor instead.
+func (*RootResponse) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *RootResponse) GetRoot() []byte {
+ if x != nil {
+ return x.Root
+ }
+ return nil
+}
+
+type GetRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (x *GetRequest) Reset() {
+ *x = GetRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetRequest) ProtoMessage() {}
+
+func (x *GetRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead.
+func (*GetRequest) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *GetRequest) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+type GetResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *GetResponse) Reset() {
+ *x = GetResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetResponse) ProtoMessage() {}
+
+func (x *GetResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead.
+func (*GetResponse) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *GetResponse) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type PrefetchRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (x *PrefetchRequest) Reset() {
+ *x = PrefetchRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PrefetchRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PrefetchRequest) ProtoMessage() {}
+
+func (x *PrefetchRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PrefetchRequest.ProtoReflect.Descriptor instead.
+func (*PrefetchRequest) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *PrefetchRequest) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+type PrefetchResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *PrefetchResponse) Reset() {
+ *x = PrefetchResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PrefetchResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PrefetchResponse) ProtoMessage() {}
+
+func (x *PrefetchResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PrefetchResponse.ProtoReflect.Descriptor instead.
+func (*PrefetchResponse) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{7}
+}
+
+type UpdateRequestItem struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *UpdateRequestItem) Reset() {
+ *x = UpdateRequestItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateRequestItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateRequestItem) ProtoMessage() {}
+
+func (x *UpdateRequestItem) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateRequestItem.ProtoReflect.Descriptor instead.
+func (*UpdateRequestItem) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *UpdateRequestItem) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *UpdateRequestItem) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type UpdateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Items []*UpdateRequestItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+}
+
+func (x *UpdateRequest) Reset() {
+ *x = UpdateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateRequest) ProtoMessage() {}
+
+func (x *UpdateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead.
+func (*UpdateRequest) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *UpdateRequest) GetItems() []*UpdateRequestItem {
+ if x != nil {
+ return x.Items
+ }
+ return nil
+}
+
+type UpdateResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Root []byte `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"`
+}
+
+func (x *UpdateResponse) Reset() {
+ *x = UpdateResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateResponse) ProtoMessage() {}
+
+func (x *UpdateResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateResponse.ProtoReflect.Descriptor instead.
+func (*UpdateResponse) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *UpdateResponse) GetRoot() []byte {
+ if x != nil {
+ return x.Root
+ }
+ return nil
+}
+
+type CloseRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CloseRequest) Reset() {
+ *x = CloseRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CloseRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CloseRequest) ProtoMessage() {}
+
+func (x *CloseRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CloseRequest.ProtoReflect.Descriptor instead.
+func (*CloseRequest) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{11}
+}
+
+type CloseResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CloseResponse) Reset() {
+ *x = CloseResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_message_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CloseResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CloseResponse) ProtoMessage() {}
+
+func (x *CloseResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_message_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CloseResponse.ProtoReflect.Descriptor instead.
+func (*CloseResponse) Descriptor() ([]byte, []int) {
+ return file_message_proto_rawDescGZIP(), []int{12}
+}
+
+var File_message_proto protoreflect.FileDescriptor
+
+var file_message_proto_rawDesc = []byte{
+ 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x12, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66,
+ 0x61, 0x63, 0x65, 0x22, 0xb9, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x35, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61,
+ 0x63, 0x65, 0x2e, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00,
+ 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x70, 0x72,
+ 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63,
+ 0x65, 0x2e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x12, 0x3b, 0x0a,
+ 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61,
+ 0x63, 0x65, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x48, 0x00, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x63, 0x6c,
+ 0x6f, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2e, 0x43,
+ 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x05, 0x63,
+ 0x6c, 0x6f, 0x73, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22,
+ 0xdb, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08,
+ 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07,
+ 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x6f, 0x6f, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12,
+ 0x33, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52,
+ 0x03, 0x67, 0x65, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x66,
+ 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08,
+ 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x12, 0x3c, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x06,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x73,
+ 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0d, 0x0a,
+ 0x0b, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x22, 0x0a, 0x0c,
+ 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74,
+ 0x22, 0x1e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x22, 0x23, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63,
+ 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x12, 0x0a, 0x10, 0x50, 0x72,
+ 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b,
+ 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
+ 0x74, 0x65, 0x6d, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4c, 0x0a, 0x0d, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x05,
+ 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x74,
+ 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x24, 0x0a, 0x0e, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72,
+ 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x22,
+ 0x0e, 0x0a, 0x0c, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22,
+ 0x0f, 0x0a, 0x0d, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_message_proto_rawDescOnce sync.Once
+ file_message_proto_rawDescData = file_message_proto_rawDesc
+)
+
+func file_message_proto_rawDescGZIP() []byte {
+ file_message_proto_rawDescOnce.Do(func() {
+ file_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_message_proto_rawDescData)
+ })
+ return file_message_proto_rawDescData
+}
+
+var file_message_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_message_proto_goTypes = []interface{}{
+ (*Request)(nil), // 0: database_interface.Request
+ (*Response)(nil), // 1: database_interface.Response
+ (*RootRequest)(nil), // 2: database_interface.RootRequest
+ (*RootResponse)(nil), // 3: database_interface.RootResponse
+ (*GetRequest)(nil), // 4: database_interface.GetRequest
+ (*GetResponse)(nil), // 5: database_interface.GetResponse
+ (*PrefetchRequest)(nil), // 6: database_interface.PrefetchRequest
+ (*PrefetchResponse)(nil), // 7: database_interface.PrefetchResponse
+ (*UpdateRequestItem)(nil), // 8: database_interface.UpdateRequestItem
+ (*UpdateRequest)(nil), // 9: database_interface.UpdateRequest
+ (*UpdateResponse)(nil), // 10: database_interface.UpdateResponse
+ (*CloseRequest)(nil), // 11: database_interface.CloseRequest
+ (*CloseResponse)(nil), // 12: database_interface.CloseResponse
+}
+var file_message_proto_depIdxs = []int32{
+ 2, // 0: database_interface.Request.root:type_name -> database_interface.RootRequest
+ 4, // 1: database_interface.Request.get:type_name -> database_interface.GetRequest
+ 6, // 2: database_interface.Request.prefetch:type_name -> database_interface.PrefetchRequest
+ 9, // 3: database_interface.Request.update:type_name -> database_interface.UpdateRequest
+ 11, // 4: database_interface.Request.close:type_name -> database_interface.CloseRequest
+ 3, // 5: database_interface.Response.root:type_name -> database_interface.RootResponse
+ 5, // 6: database_interface.Response.get:type_name -> database_interface.GetResponse
+ 7, // 7: database_interface.Response.prefetch:type_name -> database_interface.PrefetchResponse
+ 10, // 8: database_interface.Response.update:type_name -> database_interface.UpdateResponse
+ 12, // 9: database_interface.Response.close:type_name -> database_interface.CloseResponse
+ 8, // 10: database_interface.UpdateRequest.items:type_name -> database_interface.UpdateRequestItem
+ 11, // [11:11] is the sub-list for method output_type
+ 11, // [11:11] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
+}
+
+func init() { file_message_proto_init() }
+func file_message_proto_init() {
+ if File_message_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Request); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Response); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RootRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RootResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PrefetchRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PrefetchResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateRequestItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CloseRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_message_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CloseResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_message_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Request_Root)(nil),
+ (*Request_Get)(nil),
+ (*Request_Prefetch)(nil),
+ (*Request_Update)(nil),
+ (*Request_Close)(nil),
+ }
+ file_message_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Response_Root)(nil),
+ (*Response_Get)(nil),
+ (*Response_Prefetch)(nil),
+ (*Response_Update)(nil),
+ (*Response_Close)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_message_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_message_proto_goTypes,
+ DependencyIndexes: file_message_proto_depIdxs,
+ MessageInfos: file_message_proto_msgTypes,
+ }.Build()
+ File_message_proto = out.File
+ file_message_proto_rawDesc = nil
+ file_message_proto_goTypes = nil
+ file_message_proto_depIdxs = nil
+}
diff --git a/shim/nomt/nomt_test.go b/shim/nomt/nomt_test.go
new file mode 100644
index 0000000000..fb6beb9dd5
--- /dev/null
+++ b/shim/nomt/nomt_test.go
@@ -0,0 +1,66 @@
+package nomt
+
+import (
+ "fmt"
+ "net"
+ "testing"
+
+ "github.com/ava-labs/coreth/shim/nomt/nomt"
+ "github.com/stretchr/testify/require"
+)
+
+//go:generate protoc --go_out=. --go_opt=Mmessage.proto=./nomt message.proto
+
+const socketPath = "/tmp/rust_socket"
+
+func makeKey(key []byte) []byte {
+ return key
+}
+
+func BenchmarkSimple(b *testing.B) {
+ conn, err := net.Dial("unix", socketPath)
+ require.NoError(b, err)
+ defer conn.Close()
+
+ {
+ req := &nomt.Request{
+ Request: &nomt.Request_Update{
+ Update: &nomt.UpdateRequest{
+ Items: []*nomt.UpdateRequestItem{
+ {
+ Key: makeKey([]byte("key")),
+ Value: []byte("value1"),
+ },
+ },
+ },
+ },
+ }
+
+ resp, err := response(conn, req)
+ require.NoError(b, err)
+ b.Logf("Response: %x", resp.GetUpdate().Root)
+ }
+
+ batchSize := 20
+ batch := make([]*nomt.UpdateRequestItem, batchSize)
+ lastKey := 0
+ b.Logf("Starting benchmark")
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ for j := 0; j < batchSize; j++ {
+ batch[j] = &nomt.UpdateRequestItem{
+ Key: makeKey([]byte(fmt.Sprintf("key%08d", lastKey))),
+ Value: []byte(fmt.Sprintf("value%d", lastKey)),
+ }
+ lastKey++
+ }
+ req := &nomt.Request{Request: &nomt.Request_Update{Update: &nomt.UpdateRequest{Items: batch}}}
+ b.StartTimer()
+
+ resp, err := response(conn, req)
+ require.NoError(b, err)
+ require.NotNil(b, resp.GetUpdate().Root)
+ }
+}
diff --git a/shim/secure_trie.go b/shim/secure_trie.go
new file mode 100644
index 0000000000..993905b3c8
--- /dev/null
+++ b/shim/secure_trie.go
@@ -0,0 +1,241 @@
+// (c) 2020-2021, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shim
+
+import (
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/triedb/database"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// StateTrie wraps a trie with key hashing. In a stateTrie trie, all
+// access operations hash the key using keccak256. This prevents
+// calling code from creating long chains of nodes that
+// increase the access time.
+//
+// Contrary to a regular trie, a StateTrie can only be created with
+// New and must have an attached database. The database also stores
+// the preimage of each key if preimage recording is enabled.
+//
+// StateTrie is not safe for concurrent use.
+type StateTrie struct {
+ trie *Trie
+ db database.Database
+ hashKeyBuf [common.HashLength]byte
+ secKeyCache map[string][]byte
+ secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch
+}
+
+// GetStorage attempts to retrieve a storage slot with provided account address
+// and slot key. The value bytes must not be modified by the caller.
+// If the specified storage slot is not in the trie, nil will be returned.
+// If a trie node is not found in the database, a MissingNodeError is returned.
+func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
+ enc, err := t.trie.Get(t.hashKey(key))
+ if err != nil || len(enc) == 0 {
+ return nil, err
+ }
+ _, content, _, err := rlp.Split(enc)
+ return content, err
+}
+
+func (t *StateTrie) PrefetchStorage(_ common.Address, key []byte) ([]byte, error) {
+ _, err := t.trie.Prefetch(t.hashKey(key))
+ return nil, err
+}
+
+// GetAccount attempts to retrieve an account with provided account address.
+// If the specified account is not in the trie, nil will be returned.
+// If a trie node is not found in the database, a MissingNodeError is returned.
+func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
+ res, err := t.trie.Get(t.hashKey(address.Bytes()))
+ if res == nil || err != nil {
+ return nil, err
+ }
+ ret := new(types.StateAccount)
+ err = rlp.DecodeBytes(res, ret)
+ return ret, err
+}
+
+func (t *StateTrie) PrefetchAccount(address common.Address) (*types.StateAccount, error) {
+ _, err := t.trie.Prefetch(t.hashKey(address.Bytes()))
+ return nil, err
+}
+
+// GetAccountByHash does the same thing as GetAccount, however it expects an
+// account hash that is the hash of address. This constitutes an abstraction
+// leak, since the client code needs to know the key format.
+func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) {
+ res, err := t.trie.Get(addrHash.Bytes())
+ if res == nil || err != nil {
+ return nil, err
+ }
+ ret := new(types.StateAccount)
+ err = rlp.DecodeBytes(res, ret)
+ return ret, err
+}
+
+// UpdateStorage associates key with value in the trie. Subsequent calls to
+// Get will return value. If value has length zero, any existing value
+// is deleted from the trie and calls to Get will return nil.
+//
+// The value bytes must not be modified by the caller while they are
+// stored in the trie.
+//
+// If a node is not found in the database, a MissingNodeError is returned.
+func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
+ hk := t.hashKey(key)
+ v, _ := rlp.EncodeToBytes(value)
+ err := t.trie.Update(hk, v)
+ if err != nil {
+ return err
+ }
+ t.getSecKeyCache()[string(hk)] = common.CopyBytes(key)
+ return nil
+}
+
+// UpdateAccount will abstract the write of an account to the secure trie.
+func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error {
+ hk := t.hashKey(address.Bytes())
+ data, err := rlp.EncodeToBytes(acc)
+ if err != nil {
+ return err
+ }
+ if err := t.trie.Update(hk, data); err != nil {
+ return err
+ }
+ t.getSecKeyCache()[string(hk)] = address.Bytes()
+ return nil
+}
+
+func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
+ return nil
+}
+
+// DeleteStorage removes any existing storage slot from the trie.
+// If the specified trie node is not in the trie, nothing will be changed.
+// If a node is not found in the database, a MissingNodeError is returned.
+func (t *StateTrie) DeleteStorage(_ common.Address, key []byte) error {
+ hk := t.hashKey(key)
+ delete(t.getSecKeyCache(), string(hk))
+ return t.trie.Delete(hk)
+}
+
+// DeleteAccount abstracts an account deletion from the trie.
+func (t *StateTrie) DeleteAccount(address common.Address) error {
+ hk := t.hashKey(address.Bytes())
+ delete(t.getSecKeyCache(), string(hk))
+ return t.trie.Delete(hk)
+}
+
+// GetKey returns the sha3 preimage of a hashed key that was
+// previously used to store a value.
+func (t *StateTrie) GetKey(shaKey []byte) []byte {
+ if key, ok := t.getSecKeyCache()[string(shaKey)]; ok {
+ return key
+ }
+ return t.db.Preimage(common.BytesToHash(shaKey))
+}
+
+// Commit collects all dirty nodes in the trie and replaces them with the
+// corresponding node hash. All collected nodes (including dirty leaves if
+// collectLeaf is true) will be encapsulated into a nodeset for return.
+// The returned nodeset can be nil if the trie is clean (nothing to commit).
+// All cached preimages will be also flushed if preimages recording is enabled.
+// Once the trie is committed, it's not usable anymore. A new trie must
+// be created with new root and updated trie database for following usage
+func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
+ // Write all the pre-images to the actual disk database
+ if len(t.getSecKeyCache()) > 0 {
+ preimages := make(map[common.Hash][]byte)
+ for hk, key := range t.secKeyCache {
+ preimages[common.BytesToHash([]byte(hk))] = key
+ }
+ t.db.InsertPreimage(preimages)
+ t.secKeyCache = make(map[string][]byte)
+ }
+ // Commit the trie and return its modified nodeset.
+ return t.trie.Commit(collectLeaf)
+}
+
+// Hash returns the root hash of StateTrie. It does not write to the
+// database and can be used even if the trie doesn't have one.
+func (t *StateTrie) Hash() common.Hash {
+ return t.trie.Hash()
+}
+
+// Copy returns a copy of StateTrie.
+func (t *StateTrie) Copy() *StateTrie {
+ return &StateTrie{
+ trie: t.trie.Copy(),
+ db: t.db,
+ secKeyCache: t.secKeyCache,
+ }
+}
+
+// NodeIterator returns an iterator that returns nodes of the underlying trie.
+// Iteration starts at the key after the given start key.
+func (t *StateTrie) NodeIterator(start []byte) (trie.NodeIterator, error) {
+ return t.trie.NodeIterator(start)
+}
+
+// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
+// error but just print out an error message.
+func (t *StateTrie) MustNodeIterator(start []byte) trie.NodeIterator {
+ return t.trie.MustNodeIterator(start)
+}
+
+// hashKey returns the hash of key as an ephemeral buffer.
+// The caller must not hold onto the return value because it will become
+// invalid on the next call to hashKey or secKey.
+func (t *StateTrie) hashKey(key []byte) []byte {
+ h := newHasher(false)
+ h.sha.Reset()
+ h.sha.Write(key)
+ h.sha.Read(t.hashKeyBuf[:])
+ returnHasherToPool(h)
+ return t.hashKeyBuf[:]
+}
+
+// getSecKeyCache returns the current secure key cache, creating a new one if
+// ownership changed (i.e. the current secure trie is a copy of another owning
+// the actual cache).
+func (t *StateTrie) getSecKeyCache() map[string][]byte {
+ if t != t.secKeyCacheOwner {
+ t.secKeyCacheOwner = t
+ t.secKeyCache = make(map[string][]byte)
+ }
+ return t.secKeyCache
+}
+
+func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ return t.trie.Prove(key, proofDb)
+}
diff --git a/shim/trie.go b/shim/trie.go
new file mode 100644
index 0000000000..a0e0c9d207
--- /dev/null
+++ b/shim/trie.go
@@ -0,0 +1,114 @@
+package shim
+
+import (
+ "bytes"
+
+ "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/triedb/database"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+type Trie struct {
+ changes Batch
+ backend Backend
+
+ prefix []byte
+ parent *Trie
+ origin common.Hash
+}
+
+type Backend interface {
+ Get(key []byte) ([]byte, error)
+ Hash(batch Batch) common.Hash
+ Commit(batch Batch, collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
+ Prefetch(key []byte) ([]byte, error) // Note []byte value is ignored
+}
+
+func NewStateTrie(backend Backend, db database.Database) *StateTrie {
+ return &StateTrie{
+ trie: &Trie{backend: backend},
+ db: db,
+ }
+}
+
+func (t *Trie) getKey(key []byte) []byte {
+ return append(t.prefix, key...)
+}
+
+// Update batches updates to the trie
+func (t *Trie) Update(key, value []byte) error {
+ key = t.getKey(key)
+ value = bytes.Clone(value)
+ if t.parent != nil {
+ t.parent.changes = append(t.parent.changes, KV{Key: key, Value: value})
+ } else {
+ t.changes = append(t.changes, KV{Key: key, Value: value})
+ }
+ return nil
+}
+
+func (t *Trie) Delete(key []byte) error {
+ key = t.getKey(key)
+ if t.parent != nil {
+ t.parent.changes = append(t.parent.changes, KV{Key: key})
+ } else {
+ t.changes = append(t.changes, KV{Key: key})
+ }
+ return nil
+}
+
+func (t *Trie) Hash() common.Hash {
+ if t.parent != nil {
+ return common.Hash{}
+ }
+
+ return t.backend.Hash(t.changes)
+}
+
+func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
+ if t.parent != nil {
+ return common.Hash{}, nil, nil
+ }
+
+ return t.backend.Commit(t.changes, collectLeaf)
+}
+
+func (t *Trie) Get(key []byte) ([]byte, error) {
+ key = t.getKey(key)
+ return t.backend.Get(key)
+}
+
+func (t *Trie) Prefetch(key []byte) ([]byte, error) {
+ key = t.getKey(key)
+ return t.backend.Prefetch(key)
+}
+
+func (t *Trie) Copy() *Trie {
+ if legacy, ok := t.backend.(*LegacyBackend); ok {
+ return &Trie{
+ backend: &LegacyBackend{
+ tr: legacy.tr.Copy(),
+ addrHash: legacy.addrHash,
+ writer: legacy.writer,
+ },
+ prefix: bytes.Clone(t.prefix),
+ parent: t.parent,
+ origin: t.origin,
+ changes: append(Batch(nil), t.changes...),
+ }
+ }
+ // fmt.Printf("Copy requested (%d changes): %p: %x\n", len(t.changes), t, t.prefix)
+ return t
+}
+
+func (t *Trie) NodeIterator(start []byte) (trie.NodeIterator, error) {
+ if legacy, ok := t.backend.(*LegacyBackend); ok {
+ return legacy.tr.NodeIterator(start)
+ }
+ panic("not implemented")
+}
+
+func (t *Trie) MustNodeIterator(start []byte) trie.NodeIterator { panic("not implemented") }
+func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { panic("not implemented") }
diff --git a/triedb/database.go b/triedb/database.go
index 7421b74cf0..a25c12c20d 100644
--- a/triedb/database.go
+++ b/triedb/database.go
@@ -17,6 +17,7 @@
package triedb
import (
+ "bytes"
"errors"
"github.com/ava-labs/coreth/trie"
@@ -30,12 +31,60 @@ import (
"github.com/ethereum/go-ethereum/log"
)
+type KVBackend interface {
+ // Returns the current root hash of the trie.
+ // Empty trie must return common.HashLength (32) worth of zero bytes.
+ // Length of the returned slice must be common.HashLength.
+ Root() []byte
+
+ // Get retrieves the value for the given key.
+ // If the key does not exist, it must return (nil, nil).
+ Get(key []byte) ([]byte, error)
+
+ // Prefetch loads the intermediary nodes of the given key into memory.
+ // The first return value is ignored.
+ Prefetch(key []byte) ([]byte, error)
+
+ // After this call, Root() should return the same hash as returned by this call.
+ // Note when length of a particular value is zero, it means the corresponding
+ // key should be deleted.
+ // There may be duplicate keys in the batch provided, and the last one should
+ // take effect.
+ // Note after this call, the next call to Update must build on the returned root,
+ // regardless of whether Commit is called.
+ // Length of the returned root must be common.HashLength.
+ Update(keys, vals [][]byte) ([]byte, error)
+
+ // After this call, changes related to [root] should be persisted to disk.
+ // This may be implemented as no-op if Update already persists changes, or
+ // commits happen on a rolling basis.
+ // Length of the root slice is guaranteed to be common.HashLength.
+ Commit(root []byte) error
+
+ // Close closes the backend and releases all held resources.
+ Close() error
+
+ // PrefixDelete should delete all keys with the given prefix, and return the
+ // number of keys deleted.
+ PrefixDelete(prefix []byte) (int, error)
+}
+
+type KVWriter interface {
+ MustUpdate(key, value []byte)
+}
+
+type KeyValueConfig struct {
+ KVBackend KVBackend
+ Writer KVWriter
+}
+
// Config defines all necessary options for database.
type Config struct {
- Preimages bool // Flag whether the preimage of node key is recorded
- IsVerkle bool // Flag whether the db is holding a verkle tree
- HashDB *hashdb.Config // Configs for hash-based scheme
- PathDB *pathdb.Config // Configs for experimental path-based scheme
+ Preimages bool // Flag whether the preimage of node key is recorded
+ IsVerkle bool // Flag whether the db is holding a verkle tree
+ HashDB *hashdb.Config // Configs for hash-based scheme
+ PathDB *pathdb.Config // Configs for experimental path-based scheme
+ KeyValueDB *KeyValueConfig
}
// HashDefaults represents a config for using hash-based scheme with
@@ -88,6 +137,10 @@ type Database struct {
backend backend // The backend for managing trie nodes
}
+func (db *Database) Config() *Config {
+ return db.config
+}
+
// NewDatabase initializes the trie database with default settings, note
// the legacy hash-based scheme is used by default.
func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
@@ -145,6 +198,10 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n
if db.preimages != nil {
db.preimages.commit(false)
}
+ kvConfig := db.config.KeyValueDB
+ if kvConfig != nil && kvConfig.KVBackend != nil {
+ return nil
+ }
return db.backend.Update(root, parent, block, nodes, states)
}
@@ -155,6 +212,11 @@ func (db *Database) Commit(root common.Hash, report bool) error {
if db.preimages != nil {
db.preimages.commit(true)
}
+ if db.config.KeyValueDB != nil {
+ if backend := db.config.KeyValueDB.KVBackend; backend != nil {
+ return backend.Commit(root[:])
+ }
+ }
return db.backend.Commit(root, report)
}
@@ -176,6 +238,11 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize, common.Stora
// Initialized returns an indicator if the state data is already initialized
// according to the state scheme.
func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ if db.config.KeyValueDB != nil {
+ if backend := db.config.KeyValueDB.KVBackend; backend != nil {
+ return !bytes.Equal(backend.Root(), common.Hash{}.Bytes())
+ }
+ }
return db.backend.Initialized(genesisRoot)
}
@@ -189,6 +256,10 @@ func (db *Database) Scheme() string {
// resources held can be released correctly.
func (db *Database) Close() error {
db.WritePreimages()
+ kvConfig := db.config.KeyValueDB
+ if kvConfig != nil && kvConfig.KVBackend != nil {
+ return kvConfig.KVBackend.Close()
+ }
return db.backend.Close()
}