diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 08f568796d..dedc579a01 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -23,15 +24,13 @@ import ( type MessagePruner struct { stopwaiter.StopWaiter - transactionStreamer *TransactionStreamer - inboxTracker *InboxTracker - config MessagePrunerConfigFetcher - pruningLock sync.Mutex - lastPruneDone time.Time - cachedPrunedMessages uint64 - cachedPrunedBlockHashesInputFeed uint64 - cachedPrunedMessageResult uint64 - cachedPrunedDelayedMessages uint64 + transactionStreamer *TransactionStreamer + inboxTracker *InboxTracker + config MessagePrunerConfigFetcher + pruningLock sync.Mutex + lastPruneDone time.Time + cachedPrunedMessages uint64 + cachedPrunedDelayedMessages uint64 } type MessagePrunerConfig struct { @@ -121,7 +120,13 @@ func (m *MessagePruner) prune(ctx context.Context, count arbutil.MessageIndex, g } func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64) error { - prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, &m.cachedPrunedMessageResult, uint64(messageCount)) + if m.cachedPrunedMessages == 0 { + m.cachedPrunedMessages = fetchLastPrunedKey(m.transactionStreamer.db, lastPrunedMessageKey) + } + if m.cachedPrunedDelayedMessages == 0 { + m.cachedPrunedDelayedMessages = fetchLastPrunedKey(m.inboxTracker.db, lastPrunedDelayedMessageKey) + } + prunedKeysRange, _, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting message results: %w", err) } @@ -129,7 +134,7 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned message results:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, &m.cachedPrunedBlockHashesInputFeed, uint64(messageCount)) + prunedKeysRange, _, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting expected block hashes: %w", err) } @@ -137,43 +142,77 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned expected block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) + prunedKeysRange, lastPrunedMessage, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting last batch messages: %w", err) } if len(prunedKeysRange) > 0 { log.Info("Pruned last batch messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } + insertLastPrunedKey(m.transactionStreamer.db, lastPrunedMessageKey, lastPrunedMessage) + m.cachedPrunedMessages = lastPrunedMessage - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, &m.cachedPrunedDelayedMessages, delayedMessageCount) + prunedKeysRange, lastPrunedDelayedMessage, err := deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, m.cachedPrunedDelayedMessages, delayedMessageCount) if err != nil { return fmt.Errorf("error deleting last batch delayed messages: %w", err) } if len(prunedKeysRange) > 0 { log.Info("Pruned last batch delayed messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } + insertLastPrunedKey(m.inboxTracker.db, lastPrunedDelayedMessageKey, lastPrunedDelayedMessage) + m.cachedPrunedDelayedMessages = lastPrunedDelayedMessage return nil } -// deleteFromLastPrunedUptoEndKey is similar to deleteFromRange but automatically populates the start key -// cachedStartMinKey must not be nil. It's set to the new start key at the end of this function if successful. -func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, cachedStartMinKey *uint64, endMinKey uint64) ([]uint64, error) { - startMinKey := *cachedStartMinKey +// deleteFromLastPrunedUptoEndKey is similar to deleteFromRange but automatically populates the start key if it's not set. +// It's returns the new start key (i.e. last pruned key) at the end of this function if successful. +func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, startMinKey uint64, endMinKey uint64) ([]uint64, uint64, error) { if startMinKey == 0 { startIter := db.NewIterator(prefix, uint64ToKey(1)) if !startIter.Next() { - return nil, nil + return nil, 0, nil } startMinKey = binary.BigEndian.Uint64(bytes.TrimPrefix(startIter.Key(), prefix)) startIter.Release() } if endMinKey <= startMinKey { - *cachedStartMinKey = startMinKey - return nil, nil + return nil, startMinKey, nil } keys, err := deleteFromRange(ctx, db, prefix, startMinKey, endMinKey-1) - if err == nil { - *cachedStartMinKey = endMinKey - 1 + return keys, endMinKey - 1, err +} + +func insertLastPrunedKey(db ethdb.Database, lastPrunedKey []byte, lastPrunedValue uint64) { + lastPrunedValueByte, err := rlp.EncodeToBytes(lastPrunedValue) + if err != nil { + log.Error("error encoding last pruned value: %w", err) + } else { + err = db.Put(lastPrunedKey, lastPrunedValueByte) + if err != nil { + log.Error("error saving last pruned value: %w", err) + } + } +} + +func fetchLastPrunedKey(db ethdb.Database, lastPrunedKey []byte) uint64 { + hasKey, err := db.Has(lastPrunedKey) + if err != nil { + log.Warn("error checking for last pruned key: %w", err) + return 0 + } + if !hasKey { + return 0 + } + lastPrunedValueByte, err := db.Get(lastPrunedKey) + if err != nil { + log.Warn("error fetching last pruned key: %w", err) + return 0 + } + var lastPrunedValue uint64 + err = rlp.DecodeBytes(lastPrunedValueByte, &lastPrunedValue) + if err != nil { + log.Warn("error decoding last pruned value: %w", err) + return 0 } - return keys, err + return lastPrunedValue } diff --git a/arbnode/node.go b/arbnode/node.go index f2e3433ecd..b9ac975176 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -212,6 +212,7 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.Staker = legacystaker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} + config.Bold.MinimumGapToParentAssertion = 0 return &config } @@ -230,6 +231,7 @@ func ConfigDefaultL2Test() *Config { config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} config.TransactionStreamer = DefaultTransactionStreamerConfig + config.Bold.MinimumGapToParentAssertion = 0 return &config } @@ -285,19 +287,21 @@ type Node struct { } type SnapSyncConfig struct { - Enabled bool - PrevBatchMessageCount uint64 - PrevDelayedRead uint64 - BatchCount uint64 - DelayedCount uint64 + Enabled bool + PrevBatchMessageCount uint64 + PrevDelayedRead uint64 + BatchCount uint64 + DelayedCount uint64 + ParentChainAssertionBlock uint64 } var DefaultSnapSyncConfig = SnapSyncConfig{ - Enabled: false, - PrevBatchMessageCount: 0, - BatchCount: 0, - DelayedCount: 0, - PrevDelayedRead: 0, + Enabled: false, + PrevBatchMessageCount: 0, + PrevDelayedRead: 0, + BatchCount: 0, + DelayedCount: 0, + ParentChainAssertionBlock: 0, } type ConfigFetcher interface { @@ -596,7 +600,29 @@ func createNodeImpl( if err != nil { return nil, err } - inboxReader, err := NewInboxReader(inboxTracker, l1client, l1Reader, new(big.Int).SetUint64(deployInfo.DeployedAt), delayedBridge, sequencerInbox, func() *InboxReaderConfig { return &configFetcher.Get().InboxReader }) + firstMessageBlock := new(big.Int).SetUint64(deployInfo.DeployedAt) + if config.SnapSyncTest.Enabled { + batchCount := config.SnapSyncTest.BatchCount + delayedMessageNumber, err := exec.NextDelayedMessageNumber() + if err != nil { + return nil, err + } + if batchCount > delayedMessageNumber { + batchCount = delayedMessageNumber + } + // Find the first block containing the batch count. + // Subtract 1 to get the block before the needed batch count, + // this is done to fetch previous batch metadata needed for snap sync. + if batchCount > 0 { + batchCount-- + } + block, err := FindBlockContainingBatchCount(ctx, deployInfo.Bridge, l1client, config.SnapSyncTest.ParentChainAssertionBlock, batchCount) + if err != nil { + return nil, err + } + firstMessageBlock.SetUint64(block) + } + inboxReader, err := NewInboxReader(inboxTracker, l1client, l1Reader, firstMessageBlock, delayedBridge, sequencerInbox, func() *InboxReaderConfig { return &configFetcher.Get().InboxReader }) if err != nil { return nil, err } @@ -772,6 +798,53 @@ func createNodeImpl( }, nil } +func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Address, l1Client *ethclient.Client, parentChainAssertionBlock uint64, batchCount uint64) (uint64, error) { + bridge, err := bridgegen.NewIBridge(bridgeAddress, l1Client) + if err != nil { + return 0, err + } + high := parentChainAssertionBlock + low := uint64(0) + reduceBy := uint64(100) + if high > reduceBy { + low = high - reduceBy + } + // Reduce high and low by 100 until lowNode.InboxMaxCount < batchCount + // This will give us a range (low to high) of blocks that contain the batch count. + for low > 0 { + lowCount, err := bridge.SequencerMessageCount(&bind.CallOpts{Context: ctx, BlockNumber: new(big.Int).SetUint64(low)}) + if err != nil { + return 0, err + } + if lowCount.Uint64() > batchCount { + high = low + reduceBy = reduceBy * 2 + if low > reduceBy { + low = low - reduceBy + } else { + low = 0 + } + } else { + break + } + } + // Then binary search between low and high to find the block containing the batch count. + for low < high { + mid := low + (high-low)/2 + + midCount, err := bridge.SequencerMessageCount(&bind.CallOpts{Context: ctx, BlockNumber: new(big.Int).SetUint64(mid)}) + if err != nil { + return 0, err + } + if midCount.Uint64() < batchCount { + low = mid + 1 + } else { + high = mid + } + } + return low, nil +} + func (n *Node) OnConfigReload(_ *Config, _ *Config) error { // TODO return nil diff --git a/arbnode/schema.go b/arbnode/schema.go index 1aaded2b95..88a31ce90a 100644 --- a/arbnode/schema.go +++ b/arbnode/schema.go @@ -13,10 +13,12 @@ var ( sequencerBatchMetaPrefix []byte = []byte("s") // maps a batch sequence number to BatchMetadata delayedSequencedPrefix []byte = []byte("a") // maps a delayed message count to the first sequencer batch sequence number with this delayed count - messageCountKey []byte = []byte("_messageCount") // contains the current message count - delayedMessageCountKey []byte = []byte("_delayedMessageCount") // contains the current delayed message count - sequencerBatchCountKey []byte = []byte("_sequencerBatchCount") // contains the current sequencer message count - dbSchemaVersion []byte = []byte("_schemaVersion") // contains a uint64 representing the database schema version + messageCountKey []byte = []byte("_messageCount") // contains the current message count + lastPrunedMessageKey []byte = []byte("_lastPrunedMessageKey") // contains the last pruned message key + lastPrunedDelayedMessageKey []byte = []byte("_lastPrunedDelayedMessageKey") // contains the last pruned RLP delayed message key + delayedMessageCountKey []byte = []byte("_delayedMessageCount") // contains the current delayed message count + sequencerBatchCountKey []byte = []byte("_sequencerBatchCount") // contains the current sequencer message count + dbSchemaVersion []byte = []byte("_schemaVersion") // contains a uint64 representing the database schema version ) const currentDbSchemaVersion uint64 = 1 diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 5ee070f942..de1a970b87 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/hashdb" @@ -123,13 +122,13 @@ func NewArbosMemoryBackedArbOSState() (*ArbosState, *state.StateDB) { db := state.NewDatabaseWithConfig(raw, trieConfig) statedb, err := state.New(common.Hash{}, db, nil) if err != nil { - log.Crit("failed to init empty statedb", "error", err) + panic("failed to init empty statedb: " + err.Error()) } burner := burn.NewSystemBurner(nil, false) chainConfig := chaininfo.ArbitrumDevTestChainConfig() newState, err := InitializeArbosState(statedb, burner, chainConfig, arbostypes.TestInitMessage) if err != nil { - log.Crit("failed to open the ArbOS state", "error", err) + panic("failed to open the ArbOS state: " + err.Error()) } return newState, statedb } @@ -139,7 +138,7 @@ func ArbOSVersion(stateDB vm.StateDB) uint64 { backingStorage := storage.NewGeth(stateDB, burn.NewSystemBurner(nil, false)) arbosVersion, err := backingStorage.GetUint64ByUint64(uint64(versionOffset)) if err != nil { - log.Crit("failed to get the ArbOS version", "error", err) + panic("failed to get the ArbOS version: " + err.Error()) } return arbosVersion } diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index 8fd417c2b2..840204382c 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -66,7 +66,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, }() statedb, err := state.New(common.Hash{}, stateDatabase, nil) if err != nil { - log.Crit("failed to init empty statedb", "error", err) + panic("failed to init empty statedb :" + err.Error()) } noStateTrieChangesToCommitError := regexp.MustCompile("^triedb layer .+ is disk layer$") @@ -96,7 +96,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, burner := burn.NewSystemBurner(nil, false) arbosState, err := InitializeArbosState(statedb, burner, chainConfig, initMessage) if err != nil { - log.Crit("failed to open the ArbOS state", "error", err) + panic("failed to open the ArbOS state :" + err.Error()) } chainOwner, err := initData.GetChainOwner() diff --git a/arbos/programs/api.go b/arbos/programs/api.go index d8f12ffbd3..a622f55397 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -4,12 +4,13 @@ package programs import ( + "strconv" + "github.com/holiman/uint256" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" @@ -151,7 +152,7 @@ func newApiClosures( case vm.STATICCALL: ret, returnGas, err = evm.StaticCall(scope.Contract, contract, input, gas) default: - log.Crit("unsupported call type", "opcode", opcode) + panic("unsupported call type: " + opcode.String()) } interpreter.SetReturnData(ret) @@ -266,7 +267,7 @@ func newApiClosures( original := input crash := func(reason string) { - log.Crit("bad API call", "reason", reason, "request", req, "len", len(original), "remaining", len(input)) + panic("bad API call reason: " + reason + " request: " + strconv.Itoa(int(req)) + " len: " + strconv.Itoa(len(original)) + " remaining: " + strconv.Itoa(len(input))) } takeInput := func(needed int, reason string) []byte { if len(input) < needed { @@ -338,7 +339,7 @@ func newApiClosures( case StaticCall: opcode = vm.STATICCALL default: - log.Crit("unsupported call type", "opcode", opcode) + panic("unsupported call type opcode: " + opcode.String()) } contract := takeAddress() value := takeU256() @@ -414,8 +415,7 @@ func newApiClosures( captureHostio(name, args, outs, startInk, endInk) return []byte{}, nil, 0 default: - log.Crit("unsupported call type", "req", req) - return []byte{}, nil, 0 + panic("unsupported call type: " + strconv.Itoa(int(req))) } } } diff --git a/arbos/programs/native_api.go b/arbos/programs/native_api.go index ab15800ef9..ad8cc0477b 100644 --- a/arbos/programs/native_api.go +++ b/arbos/programs/native_api.go @@ -25,11 +25,11 @@ import "C" import ( "runtime" + "strconv" "sync" "sync/atomic" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" @@ -69,11 +69,11 @@ func newApi( func getApi(id usize) NativeApi { any, ok := apiObjects.Load(uintptr(id)) if !ok { - log.Crit("failed to load stylus Go API", "id", id) + panic("failed to load stylus Go API id: " + strconv.Itoa(int(id))) } api, ok := any.(NativeApi) if !ok { - log.Crit("wrong type for stylus Go API", "id", id) + panic("wrong type for stylus Go API id: " + strconv.Itoa(int(id))) } return api } diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 06ba6ead8c..524fcc0be7 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" gethParams "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" @@ -163,6 +164,21 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVers return stylusVersion, codeHash, info.moduleHash, dataFee, false, p.setProgram(codeHash, programData) } +func runModeToString(runmode core.MessageRunMode) string { + switch runmode { + case core.MessageCommitMode: + return "commit_runmode" + case core.MessageGasEstimationMode: + return "gas_estimation_runmode" + case core.MessageEthcallMode: + return "eth_call_runmode" + case core.MessageReplayMode: + return "replay_runmode" + default: + return "unknown_runmode" + } +} + func (p Programs) CallProgram( scope *vm.ScopeContext, statedb vm.StateDB, @@ -219,8 +235,7 @@ func (p Programs) CallProgram( localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), contract.Code, contract.CodeHash, params.PageLimit, evm.Context.Time, debugMode, program) if err != nil { - log.Crit("failed to get local wasm for activated program", "program", contract.Address()) - return nil, err + panic("failed to get local wasm for activated program: " + contract.Address().Hex()) } evmData := &EvmData{ @@ -250,17 +265,23 @@ func (p Programs) CallProgram( if runmode == core.MessageCommitMode { arbos_tag = statedb.Database().WasmCacheTag() } + + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/program_calls/%s", runModeToString(runmode)), nil).Inc(1) ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) if len(ret) > 0 && arbosVersion >= gethParams.ArbosVersion_StylusFixes { // Ensure that return data costs as least as much as it would in the EVM. evmCost := evmMemoryCost(uint64(len(ret))) if startingGas < evmCost { contract.Gas = 0 + // #nosec G115 + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas)) return nil, vm.ErrOutOfGas } maxGasToReturn := startingGas - evmCost contract.Gas = am.MinInt(contract.Gas, maxGasToReturn) } + // #nosec G115 + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas - contract.Gas)) return ret, err } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index b58a7420b7..5539a75ce1 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -85,11 +85,11 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Matches the way keyset validation was done inside DAS readers i.e logging the error // But other daproviders might just want to return the error if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(payload[0]) { - logLevel := log.Error if keysetValidationMode == daprovider.KeysetPanicIfInvalid { - logLevel = log.Crit + panic(err.Error()) + } else { + log.Error(err.Error()) } - logLevel(err.Error()) } else { return nil, err } diff --git a/bold b/bold index 290743f517..60b5e36725 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit 290743f517f7a94d62460231399fb095cb18c3a4 +Subproject commit 60b5e36725da9551b005d6171e75eda30a63d49a diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index 713d1496f9..699aa081b5 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -285,14 +286,16 @@ func stateAndHeader(blockchain *core.BlockChain, block uint64) (*arbosState.Arbo type ArbTraceForwarderAPI struct { fallbackClientUrl string fallbackClientTimeout time.Duration + blockchainConfig *params.ChainConfig initialized atomic.Bool mutex sync.Mutex fallbackClient types.FallbackClient } -func NewArbTraceForwarderAPI(fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { +func NewArbTraceForwarderAPI(blockchainConfig *params.ChainConfig, fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { return &ArbTraceForwarderAPI{ + blockchainConfig: blockchainConfig, fallbackClientUrl: fallbackClientUrl, fallbackClientTimeout: fallbackClientTimeout, } @@ -332,16 +335,45 @@ func (api *ArbTraceForwarderAPI) forward(ctx context.Context, method string, arg return resp, nil } -func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_call", callArgs, traceTypes, blockNum) +func (api *ArbTraceForwarderAPI) blockSupportedByClassicNode(blockNumOrHash json.RawMessage) error { + var bnh rpc.BlockNumberOrHash + err := bnh.UnmarshalJSON(blockNumOrHash) + if err != nil { + return err + } + blockNum, isNum := bnh.Number() + if !isNum { + return nil + } + // #nosec G115 + if blockNum < 0 || blockNum > rpc.BlockNumber(api.blockchainConfig.ArbitrumChainParams.GenesisBlockNum) { + return fmt.Errorf("block number %v is not supported by classic node", blockNum) + } + return nil } -func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_callMany", calls, blockNum) +func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_call", callArgs, traceTypes, blockNumOrHash) } -func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNum json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_replayBlockTransactions", blockNum, traceTypes) +func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_callMany", calls, blockNumOrHash) +} + +func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNumOrHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_replayBlockTransactions", blockNumOrHash, traceTypes) } func (api *ArbTraceForwarderAPI) ReplayTransaction(ctx context.Context, txHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { @@ -356,8 +388,12 @@ func (api *ArbTraceForwarderAPI) Get(ctx context.Context, txHash json.RawMessage return api.forward(ctx, "arbtrace_get", txHash, path) } -func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_block", blockNum) +func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_block", blockNumOrHash) } func (api *ArbTraceForwarderAPI) Filter(ctx context.Context, filter json.RawMessage) (*json.RawMessage, error) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 11d173a21e..16e4948723 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -284,6 +284,7 @@ func CreateExecutionNode( Namespace: "arbtrace", Version: "1.0", Service: NewArbTraceForwarderAPI( + l2BlockChain.Config(), config.RPC.ClassicRedirect, config.RPC.ClassicRedirectTimeout, ), diff --git a/precompiles/context.go b/precompiles/context.go index 670ffa7443..86e56ffbff 100644 --- a/precompiles/context.go +++ b/precompiles/context.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -58,7 +57,7 @@ func (c *Context) GasLeft() *uint64 { } func (c *Context) Restrict(err error) { - log.Crit("A metered burner was used for access-controlled work", "error", err) + panic("A metered burner was used for access-controlled work :" + err.Error()) } func (c *Context) HandleError(err error) error { @@ -88,13 +87,13 @@ func testContext(caller addr, evm mech) *Context { } state, err := arbosState.OpenArbosState(evm.StateDB, burn.NewSystemBurner(tracingInfo, false)) if err != nil { - log.Crit("unable to open arbos state", "error", err) + panic("unable to open arbos state :" + err.Error()) } ctx.State = state var ok bool ctx.txProcessor, ok = evm.ProcessingHook.(*arbos.TxProcessor) if !ok { - log.Crit("must have tx processor") + panic("must have tx processor") } return ctx } diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 54d18a0cc9..7ca9d409c6 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -120,7 +120,7 @@ func (e *SolError) Error() string { func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Precompile) { source, err := abi.JSON(strings.NewReader(metadata.ABI)) if err != nil { - log.Crit("Bad ABI") + panic("Bad ABI") } implementerType := reflect.TypeOf(implementer) @@ -128,12 +128,12 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr _, ok := implementerType.Elem().FieldByName("Address") if !ok { - log.Crit("Implementer for precompile ", contract, " is missing an Address field") + panic("Implementer for precompile " + contract + " is missing an Address field") } address, ok := reflect.ValueOf(implementer).Elem().FieldByName("Address").Interface().(addr) if !ok { - log.Crit("Implementer for precompile ", contract, "'s Address field has the wrong type") + panic("Implementer for precompile " + contract + "'s Address field has the wrong type") } gethAbiFuncTypeEquality := func(actual, geth reflect.Type) bool { @@ -167,7 +167,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr name = capitalize + name[1:] if len(method.ID) != 4 { - log.Crit("Method ID isn't 4 bytes") + panic("Method ID isn't 4 bytes") } id := *(*[4]byte)(method.ID) @@ -175,7 +175,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr handler, ok := implementerType.MethodByName(name) if !ok { - log.Crit("Precompile " + contract + " must implement " + name) + panic("Precompile " + contract + " must implement " + name) } var needs = []reflect.Type{ @@ -199,7 +199,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr needs = append(needs, reflect.TypeOf(&big.Int{})) purity = payable default: - log.Crit("Unknown state mutability ", method.StateMutability) + panic("Unknown state mutability " + method.StateMutability) } for _, arg := range method.Inputs { @@ -215,10 +215,9 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr expectedHandlerType := reflect.FuncOf(needs, outputs, false) if !gethAbiFuncTypeEquality(handler.Type, expectedHandlerType) { - log.Crit( - "Precompile "+contract+"'s "+name+"'s implementer has the wrong type\n", - "\texpected:\t", expectedHandlerType, "\n\tbut have:\t", handler.Type, - ) + panic( + "Precompile " + contract + "'s " + name + "'s implementer has the wrong type\n" + + "\texpected:\t" + expectedHandlerType.String() + "\n\tbut have:\t" + handler.Type.String()) } method := PrecompileMethod{ @@ -237,7 +236,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr method := implementerType.Method(i) name := method.Name if method.IsExported() && methodsByName[name] == nil { - log.Crit(contract + " is missing a solidity interface for " + name) + panic(contract + " is missing a solidity interface for " + name) } } @@ -269,11 +268,10 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr if arg.Indexed { _, ok := supportedIndices[arg.Type.String()] if !ok { - log.Crit( - "Please change the solidity for precompile ", contract, - "'s event ", name, ":\n\tEvent indices of type ", - arg.Type.String(), " are not supported", - ) + panic( + "Please change the solidity for precompile " + contract + + "'s event " + name + ":\n\tEvent indices of type " + + arg.Type.String() + " are not supported") } } } @@ -288,23 +286,21 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr field, ok := implementerType.Elem().FieldByName(name) if !ok { - log.Crit(missing, "event ", name, " of type\n\t", expectedFieldType) + panic(missing + "event " + name + " of type\n\t" + expectedFieldType.String()) } costField, ok := implementerType.Elem().FieldByName(name + "GasCost") if !ok { - log.Crit(missing, "event ", name, "'s GasCost of type\n\t", expectedCostType) + panic(missing + "event " + name + "'s GasCost of type\n\t" + expectedCostType.String()) } if !gethAbiFuncTypeEquality(field.Type, expectedFieldType) { - log.Crit( - context, "'s field for event ", name, " has the wrong type\n", - "\texpected:\t", expectedFieldType, "\n\tbut have:\t", field.Type, - ) + panic( + context + "'s field for event " + name + " has the wrong type\n" + + "\texpected:\t" + expectedFieldType.String() + "\n\tbut have:\t" + field.Type.String()) } if !gethAbiFuncTypeEquality(costField.Type, expectedCostType) { - log.Crit( - context, "'s field for event ", name, "GasCost has the wrong type\n", - "\texpected:\t", expectedCostType, "\n\tbut have:\t", costField.Type, - ) + panic( + context + "'s field for event " + name + "GasCost has the wrong type\n" + + "\texpected:\t" + expectedCostType.String() + "\n\tbut have:\t" + costField.Type.String()) } structFields := reflect.ValueOf(implementer).Elem() @@ -464,13 +460,12 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr field, ok := implementerType.Elem().FieldByName(name + "Error") if !ok { - log.Crit(missing, "custom error ", name, "Error of type\n\t", expectedFieldType) + panic(missing + "custom error " + name + "Error of type\n\t" + expectedFieldType.String()) } if field.Type != expectedFieldType { - log.Crit( - context, "'s field for error ", name, "Error has the wrong type\n", - "\texpected:\t", expectedFieldType, "\n\tbut have:\t", field.Type, - ) + panic( + context + "'s field for error " + name + "Error has the wrong type\n" + + "\texpected:\t" + expectedFieldType.String() + "\n\tbut have:\t" + field.Type.String()) } structFields := reflect.ValueOf(implementer).Elem() @@ -756,7 +751,7 @@ func (p *Precompile) Call( reflectArgs = append(reflectArgs, reflect.ValueOf(evm)) reflectArgs = append(reflectArgs, reflect.ValueOf(value)) default: - log.Crit("Unknown state mutability ", method.purity) + panic("Unknown state mutability " + strconv.Itoa(int(method.purity))) } args, err := method.template.Inputs.Unpack(input[4:]) diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index 75fed711eb..183ec1f083 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -5,15 +5,12 @@ package precompiles import ( "fmt" - "io" "math/big" - "os" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" @@ -183,12 +180,6 @@ func TestEventCosts(t *testing.T) { } func TestPrecompilesPerArbosVersion(t *testing.T) { - // Set up a logger in case log.Crit is called by Precompiles() - glogger := log.NewGlogHandler( - log.NewTerminalHandler(io.Writer(os.Stderr), false)) - glogger.Verbosity(log.LevelWarn) - log.SetDefault(log.NewLogger(glogger)) - expectedNewMethodsPerArbosVersion := map[uint64]int{ 0: 89, params.ArbosVersion_5: 3, diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index df7e44af0d..7a21f9e6b7 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -71,8 +71,10 @@ type BoldConfig struct { AutoIncreaseAllowance bool `koanf:"auto-increase-allowance"` DelegatedStaking DelegatedStakingConfig `koanf:"delegated-staking"` RPCBlockNumber string `koanf:"rpc-block-number"` - strategy legacystaker.StakerStrategy - blockNum rpc.BlockNumber + // How long to wait since parent assertion was created to post a new assertion + MinimumGapToParentAssertion time.Duration `koanf:"minimum-gap-to-parent-assertion"` + strategy legacystaker.StakerStrategy + blockNum rpc.BlockNumber } func (c *BoldConfig) Validate() error { @@ -126,6 +128,7 @@ var DefaultBoldConfig = BoldConfig{ AssertionPostingInterval: time.Minute * 15, AssertionScanningInterval: time.Minute, AssertionConfirmingInterval: time.Minute, + MinimumGapToParentAssertion: time.Minute, // Correct default? API: false, APIHost: "127.0.0.1", APIPort: 9393, @@ -154,6 +157,7 @@ func BoldConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".assertion-posting-interval", DefaultBoldConfig.AssertionPostingInterval, "assertion posting interval") f.Duration(prefix+".assertion-scanning-interval", DefaultBoldConfig.AssertionScanningInterval, "scan assertion interval") f.Duration(prefix+".assertion-confirming-interval", DefaultBoldConfig.AssertionConfirmingInterval, "confirm assertion interval") + f.Duration(prefix+".minimum-gap-to-parent-assertion", DefaultBoldConfig.MinimumGapToParentAssertion, "minimum duration to wait since the parent assertion was created to post a new assertion") f.Duration(prefix+".check-staker-switch-interval", DefaultBoldConfig.CheckStakerSwitchInterval, "how often to check if staker can switch to bold") f.Bool(prefix+".api", DefaultBoldConfig.API, "enable api") f.String(prefix+".api-host", DefaultBoldConfig.APIHost, "bold api host") @@ -514,6 +518,7 @@ func newBOLDChallengeManager( challengemanager.StackWithPollingInterval(scanningInterval), challengemanager.StackWithPostingInterval(postingInterval), challengemanager.StackWithConfirmationInterval(confirmingInterval), + challengemanager.StackWithMinimumGapToParentAssertion(config.MinimumGapToParentAssertion), challengemanager.StackWithTrackChallengeParentAssertionHashes(config.TrackChallengeParentAssertionHashes), challengemanager.StackWithHeaderProvider(l1Reader), } diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go index 777817bf3e..83700fc838 100644 --- a/system_tests/bold_challenge_protocol_test.go +++ b/system_tests/bold_challenge_protocol_test.go @@ -417,6 +417,7 @@ func testChallengeProtocolBOLD(t *testing.T, spawnerOpts ...server_arb.SpawnerOp challengemanager.StackWithMode(modes.MakeMode), challengemanager.StackWithPostingInterval(time.Second * 3), challengemanager.StackWithPollingInterval(time.Second), + challengemanager.StackWithMinimumGapToParentAssertion(0), challengemanager.StackWithAverageBlockCreationTime(time.Second), } diff --git a/system_tests/bold_new_challenge_test.go b/system_tests/bold_new_challenge_test.go index ad6e44bc71..fae4a57deb 100644 --- a/system_tests/bold_new_challenge_test.go +++ b/system_tests/bold_new_challenge_test.go @@ -344,6 +344,7 @@ func startBoldChallengeManager(t *testing.T, ctx context.Context, builder *NodeB challengemanager.StackWithPostingInterval(time.Second * 3), challengemanager.StackWithPollingInterval(time.Second), challengemanager.StackWithAverageBlockCreationTime(time.Second), + challengemanager.StackWithMinimumGapToParentAssertion(0), } challengeManager, err := challengemanager.NewChallengeStack( diff --git a/system_tests/overflow_assertions_test.go b/system_tests/overflow_assertions_test.go index c024a43070..eb2bb01470 100644 --- a/system_tests/overflow_assertions_test.go +++ b/system_tests/overflow_assertions_test.go @@ -224,6 +224,7 @@ func TestOverflowAssertions(t *testing.T) { challengemanager.StackWithPostingInterval(time.Second), challengemanager.StackWithPollingInterval(time.Millisecond * 500), challengemanager.StackWithAverageBlockCreationTime(time.Second), + challengemanager.StackWithMinimumGapToParentAssertion(0), } manager, err := challengemanager.NewChallengeStack( diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 69645d8878..55c13d664d 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -504,7 +504,7 @@ func TestGetValidatorWalletContractWithDataposterOnlyUsedToCreateValidatorWallet parentChainID, ) if err != nil { - log.Crit("error creating data poster to create validator wallet contract", "err", err) + Fatal(t, "error creating data poster to create validator wallet contract", "err", err) } getExtraGas := func() uint64 { return builder.nodeConfig.Staker.ExtraGas }