From d7e7b541907b3c0a8e1898937f411d5c219759bd Mon Sep 17 00:00:00 2001 From: Nebojsa Urosevic Date: Mon, 25 Nov 2024 23:16:00 -0800 Subject: [PATCH 1/4] core/tracing: add GetCodeHash to StateDB (#30784) This PR extends the tracing.StateDB interface by adding a GetCodeHash function. --- core/tracing/hooks.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index a21bb1577b08..0c5cae354b5c 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -42,6 +42,7 @@ type StateDB interface { GetBalance(common.Address) *uint256.Int GetNonce(common.Address) uint64 GetCode(common.Address) []byte + GetCodeHash(common.Address) common.Hash GetState(common.Address, common.Hash) common.Hash GetTransientState(common.Address, common.Hash) common.Hash Exist(common.Address) bool From a11b4bebcb4aacf36ad4ce2712c6696bdff5d143 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 26 Nov 2024 18:33:59 +0800 Subject: [PATCH 2/4] Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810) This reverts commit 23800122b37695be50565f8221858a16ce1763db. The original pull request introduces a bug and some flaky tests are detected because of this flaw. ``` --- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s) blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed {"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"} {"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"} {"output":"","gasUsed":"0x0"} {"output":"","gasUsed":"0x1db2"} {"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"} ``` Before the original PR, the snapshot would block the function until the disk layer was fully generated under the following conditions: (a) explicitly required by users with `AsyncBuild = false`. (b) the snapshot was being fully rebuilt or *the disk layer generation had resumed*. Unfortunately, with the changes introduced in that PR, the snapshot no longer waits for disk layer generation to complete if the generation is resumed. It brings lots of uncertainty and breaks this tiny debug feature. --- core/state/snapshot/snapshot.go | 48 +++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 181dc6b746fc..7466f1235198 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -206,24 +206,47 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, roo log.Warn("Snapshot maintenance disabled (syncing)") return snap, nil } + // Create the building waiter iff the background generation is allowed + if !config.NoBuild && !config.AsyncBuild { + defer snap.waitBuild() + } if err != nil { log.Warn("Failed to load snapshot", "err", err) - if config.NoBuild { - return nil, err - } - wait := snap.Rebuild(root) - if !config.AsyncBuild { - wait() + if !config.NoBuild { + snap.Rebuild(root) + return snap, nil } - return snap, nil + return nil, err // Bail out the error, don't rebuild automatically. } // Existing snapshot loaded, seed all the layers - for ; head != nil; head = head.Parent() { + for head != nil { snap.layers[head.Root()] = head + head = head.Parent() } return snap, nil } +// waitBuild blocks until the snapshot finishes rebuilding. This method is meant +// to be used by tests to ensure we're testing what we believe we are. +func (t *Tree) waitBuild() { + // Find the rebuild termination channel + var done chan struct{} + + t.lock.RLock() + for _, layer := range t.layers { + if layer, ok := layer.(*diskLayer); ok { + done = layer.genPending + break + } + } + t.lock.RUnlock() + + // Wait until the snapshot is generated + if done != nil { + <-done + } +} + // Disable interrupts any pending snapshot generator, deletes all the snapshot // layers in memory and marks snapshots disabled globally. In order to resume // the snapshot functionality, the caller must invoke Rebuild. @@ -665,9 +688,8 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) { // Rebuild wipes all available snapshot data from the persistent database and // discard all caches and diff layers. Afterwards, it starts a new snapshot -// generator with the given root hash. The returned function blocks until -// regeneration is complete. -func (t *Tree) Rebuild(root common.Hash) (wait func()) { +// generator with the given root hash. +func (t *Tree) Rebuild(root common.Hash) { t.lock.Lock() defer t.lock.Unlock() @@ -699,11 +721,9 @@ func (t *Tree) Rebuild(root common.Hash) (wait func()) { // Start generating a new snapshot from scratch on a background thread. The // generator will run a wiper first if there's not one running right now. log.Info("Rebuilding state snapshot") - disk := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root) t.layers = map[common.Hash]snapshot{ - root: disk, + root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root), } - return func() { <-disk.genPending } } // AccountIterator creates a new account iterator for the specified root hash and From 915248cd6bf14b9d1b29db182375bc456d4ef868 Mon Sep 17 00:00:00 2001 From: jwasinger Date: Tue, 26 Nov 2024 22:12:38 +0700 Subject: [PATCH 3/4] cmd/evm: don't reuse state between iterations, show errors (#30780) Reusing state between benchmark iterations resulted in inconsistent results across runs, which surfaced in https://github.com/ethereum/go-ethereum/issues/30778 . If these errors are triggered again, they will now trigger panic. --------- Co-authored-by: Martin HS --- cmd/evm/runner.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 8c382e625722..2884487faace 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -84,19 +84,20 @@ type execStats struct { func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, execStats, error) { if bench { + testing.Init() // Do one warm-up run output, gasUsed, err := execFunc() result := testing.Benchmark(func(b *testing.B) { for i := 0; i < b.N; i++ { haveOutput, haveGasUsed, haveErr := execFunc() if !bytes.Equal(haveOutput, output) { - b.Fatalf("output differs, have\n%x\nwant%x\n", haveOutput, output) + panic(fmt.Sprintf("output differs\nhave %x\nwant %x\n", haveOutput, output)) } if haveGasUsed != gasUsed { - b.Fatalf("gas differs, have %v want%v", haveGasUsed, gasUsed) + panic(fmt.Sprintf("gas differs, have %v want %v", haveGasUsed, gasUsed)) } if haveErr != err { - b.Fatalf("err differs, have %v want%v", haveErr, err) + panic(fmt.Sprintf("err differs, have %v want %v", haveErr, err)) } } }) @@ -137,7 +138,7 @@ func runCmd(ctx *cli.Context) error { var ( tracer *tracing.Hooks debugLogger *logger.StructLogger - statedb *state.StateDB + prestate *state.StateDB chainConfig *params.ChainConfig sender = common.BytesToAddress([]byte("sender")) receiver = common.BytesToAddress([]byte("receiver")) @@ -174,7 +175,7 @@ func runCmd(ctx *cli.Context) error { defer triedb.Close() genesis := genesisConfig.MustCommit(db, triedb) sdb := state.NewDatabase(triedb, nil) - statedb, _ = state.New(genesis.Root(), sdb) + prestate, _ = state.New(genesis.Root(), sdb) chainConfig = genesisConfig.Config if ctx.String(SenderFlag.Name) != "" { @@ -231,7 +232,7 @@ func runCmd(ctx *cli.Context) error { } runtimeConfig := runtime.Config{ Origin: sender, - State: statedb, + State: prestate, GasLimit: initialGas, GasPrice: flags.GlobalBig(ctx, PriceFlag.Name), Value: flags.GlobalBig(ctx, ValueFlag.Name), @@ -274,14 +275,18 @@ func runCmd(ctx *cli.Context) error { if ctx.Bool(CreateFlag.Name) { input = append(code, input...) execFunc = func() ([]byte, uint64, error) { + // don't mutate the state! + runtimeConfig.State = prestate.Copy() output, _, gasLeft, err := runtime.Create(input, &runtimeConfig) return output, gasLeft, err } } else { if len(code) > 0 { - statedb.SetCode(receiver, code) + prestate.SetCode(receiver, code) } execFunc = func() ([]byte, uint64, error) { + // don't mutate the state! + runtimeConfig.State = prestate.Copy() output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig) return output, initialGas - gasLeft, err } @@ -291,7 +296,7 @@ func runCmd(ctx *cli.Context) error { output, stats, err := timedExec(bench, execFunc) if ctx.Bool(DumpFlag.Name) { - root, err := statedb.Commit(genesisConfig.Number, true) + root, err := runtimeConfig.State.Commit(genesisConfig.Number, true) if err != nil { fmt.Printf("Failed to commit changes %v\n", err) return err @@ -310,7 +315,7 @@ func runCmd(ctx *cli.Context) error { logger.WriteTrace(os.Stderr, debugLogger.StructLogs()) } fmt.Fprintln(os.Stderr, "#### LOGS ####") - logger.WriteLogs(os.Stderr, statedb.Logs()) + logger.WriteLogs(os.Stderr, runtimeConfig.State.Logs()) } if bench || ctx.Bool(StatDumpFlag.Name) { From e0deac7f6f8dd3cb59529e25ccdd548dd003ed4d Mon Sep 17 00:00:00 2001 From: wangjingcun Date: Wed, 27 Nov 2024 14:17:03 +0800 Subject: [PATCH 4/4] core: better document reason for dropping error on return (#30811) Add a comment for error return of nil Signed-off-by: wangjingcun --- core/blockchain_reader.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 19c1b17f369c..8f1da82a2bf4 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -277,6 +277,13 @@ func (bc *BlockChain) GetTransactionLookup(hash common.Hash) (*rawdb.LegacyTxLoo if tx == nil { progress, err := bc.TxIndexProgress() if err != nil { + // No error is returned if the transaction indexing progress is unreachable + // due to unexpected internal errors. In such cases, it is impossible to + // determine whether the transaction does not exist or has simply not been + // indexed yet without a progress marker. + // + // In such scenarios, the transaction is treated as unreachable, though + // this is clearly an unintended and unexpected situation. return nil, nil, nil } // The transaction indexing is not finished yet, returning an