From 03d71e4880aafd4acddc8e2f1e499e7e08e3c63c Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 19 Nov 2024 17:14:30 +0530 Subject: [PATCH 01/16] Add flag to disable DAS chunked stores --- cmd/datool/datool.go | 4 +++- das/aggregator.go | 3 +++ das/dasRpcClient.go | 39 +++++++++++++++++++++++++-------------- das/rpc_aggregator.go | 2 +- 4 files changed, 32 insertions(+), 16 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 06f94dc952..cb8507593c 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -92,6 +92,7 @@ type ClientStoreConfig struct { SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` + UseLegacyStore bool `koanf:"use-legacy-store"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -104,6 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") + f.Bool("use-legacy-store", false, "enabling this forces the das rpc clients to use das_store. Disabled by default") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -152,7 +154,7 @@ func startClientStore(args []string) error { } } - client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize) + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) if err != nil { return err } diff --git a/das/aggregator.go b/das/aggregator.go index 372e448e76..99cc2d58b0 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -41,12 +41,14 @@ type AggregatorConfig struct { AssumedHonest int `koanf:"assumed-honest"` Backends BackendConfigList `koanf:"backends"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` + UseLegacyStore bool `koanf:"use-legacy-store"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: nil, MaxStoreChunkBodySize: 512 * 1024, + UseLegacyStore: false, } var parsedBackendsConf BackendConfigList @@ -56,6 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") + f.Bool(prefix+".use-legacy-store", DefaultAggregatorConfig.UseLegacyStore, "enabling this forces the das rpc clients to use das_store. Disabled by default") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 3ea6c4e2c6..37c3c30220 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -35,10 +35,11 @@ var ( ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string - signer signature.DataSignerFunc - chunkSize uint64 + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 + useLegacyStore bool } func nilSigner(_ []byte) ([]byte, error) { @@ -47,7 +48,7 @@ func nilSigner(_ []byte) ([]byte, error) { const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" -func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int) (*DASRPCClient, error) { +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, useLegacyStore bool) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err @@ -56,18 +57,23 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu signer = nilSigner } + client := &DASRPCClient{ + clnt: clnt, + url: target, + signer: signer, + useLegacyStore: useLegacyStore, + } + // Byte arrays are encoded in base64 - chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 - if chunkSize <= 0 { - return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) + if !useLegacyStore { + chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 + if chunkSize <= 0 { + return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) + } + client.chunkSize = uint64(chunkSize) } - return &DASRPCClient{ - clnt: clnt, - url: target, - signer: signer, - chunkSize: uint64(chunkSize), - }, nil + return client, nil } func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { @@ -83,6 +89,11 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() + if c.useLegacyStore { + log.Info("Legacy store is being force-used by the DAS client", "url", c.url) + return c.legacyStore(ctx, message, timeout) + } + // #nosec G115 timestamp := uint64(start.Unix()) nChunks := uint64(len(message)) / c.chunkSize diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 916637aac6..6869e140f1 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -110,7 +110,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) if err != nil { return nil, err } From a391c6352e8cb4eedef99cbb84046d048a62a85b Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 20 Nov 2024 09:50:58 +0530 Subject: [PATCH 02/16] address PR comments --- cmd/datool/datool.go | 6 +++--- das/aggregator.go | 6 +++--- das/dasRpcClient.go | 24 ++++++++++++------------ das/rpc_aggregator.go | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index cb8507593c..fc186c76c4 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -92,7 +92,7 @@ type ClientStoreConfig struct { SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - UseLegacyStore bool `koanf:"use-legacy-store"` + DisableChunkedStore bool `koanf:"disable-chunked-store"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("use-legacy-store", false, "enabling this forces the das rpc clients to use das_store. Disabled by default") + f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -154,7 +154,7 @@ func startClientStore(args []string) error { } } - client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) if err != nil { return err } diff --git a/das/aggregator.go b/das/aggregator.go index 99cc2d58b0..3797922bb5 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -41,14 +41,14 @@ type AggregatorConfig struct { AssumedHonest int `koanf:"assumed-honest"` Backends BackendConfigList `koanf:"backends"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - UseLegacyStore bool `koanf:"use-legacy-store"` + DisableChunkedStore bool `koanf:"disable-chunked-store"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: nil, MaxStoreChunkBodySize: 512 * 1024, - UseLegacyStore: false, + DisableChunkedStore: false, } var parsedBackendsConf BackendConfigList @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".use-legacy-store", DefaultAggregatorConfig.UseLegacyStore, "enabling this forces the das rpc clients to use das_store. Disabled by default") + f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 37c3c30220..cd4ed078f4 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -35,11 +35,11 @@ var ( ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string - signer signature.DataSignerFunc - chunkSize uint64 - useLegacyStore bool + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 + disableChunkedStore bool } func nilSigner(_ []byte) ([]byte, error) { @@ -48,7 +48,7 @@ func nilSigner(_ []byte) ([]byte, error) { const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" -func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, useLegacyStore bool) (*DASRPCClient, error) { +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, disableChunkedStore bool) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err @@ -58,14 +58,14 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu } client := &DASRPCClient{ - clnt: clnt, - url: target, - signer: signer, - useLegacyStore: useLegacyStore, + clnt: clnt, + url: target, + signer: signer, + disableChunkedStore: disableChunkedStore, } // Byte arrays are encoded in base64 - if !useLegacyStore { + if !disableChunkedStore { chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 if chunkSize <= 0 { return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) @@ -89,7 +89,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() - if c.useLegacyStore { + if c.disableChunkedStore { log.Info("Legacy store is being force-used by the DAS client", "url", c.url) return c.legacyStore(ctx, message, timeout) } diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 6869e140f1..91fdc07b45 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -110,7 +110,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) if err != nil { return nil, err } From 0b31ccb531b3eed3255749b3b30d911082201c4c Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 25 Nov 2024 10:50:29 +0530 Subject: [PATCH 03/16] address PR comments --- cmd/datool/datool.go | 2 +- das/aggregator.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index fc186c76c4..9cc2f5ebd9 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") + f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { diff --git a/das/aggregator.go b/das/aggregator.go index 3797922bb5..44f1568272 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") + f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") } type Aggregator struct { From 0eec255ec01fc4cb8c1c6bc4e5610eb9ef29c727 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 26 Nov 2024 16:13:56 +0530 Subject: [PATCH 04/16] Filter transaction --- arbos/block_processor.go | 6 +- execution/gethexec/sequencer.go | 2 +- go-ethereum | 2 +- system_tests/seq_filter_test.go | 97 +++++++++++++++++++++++++++++++++ 4 files changed, 102 insertions(+), 5 deletions(-) create mode 100644 system_tests/seq_filter_test.go diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 77475856ac..006e9f5fdd 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -118,7 +118,7 @@ type SequencingHooks struct { TxErrors []error DiscardInvalidTxsEarly bool PreTxFilter func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error - PostTxFilter func(*types.Header, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error + PostTxFilter func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions } @@ -129,7 +129,7 @@ func NoopSequencingHooks() *SequencingHooks { func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error { return nil }, - func(*types.Header, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error { + func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error { return nil }, nil, @@ -322,7 +322,7 @@ func ProduceBlockAdvanced( vm.Config{}, runMode, func(result *core.ExecutionResult) error { - return hooks.PostTxFilter(header, state, tx, sender, dataGas, result) + return hooks.PostTxFilter(header, statedb, state, tx, sender, dataGas, result) }, ) if err != nil { diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 92d440e8cb..9db5c206fe 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -490,7 +490,7 @@ func (s *Sequencer) preTxFilter(_ *params.ChainConfig, header *types.Header, sta return nil } -func (s *Sequencer) postTxFilter(header *types.Header, _ *arbosState.ArbosState, tx *types.Transaction, sender common.Address, dataGas uint64, result *core.ExecutionResult) error { +func (s *Sequencer) postTxFilter(header *types.Header, _ *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, sender common.Address, dataGas uint64, result *core.ExecutionResult) error { if result.Err != nil && result.UsedGas > dataGas && result.UsedGas-dataGas <= s.config().MaxRevertGasReject { return arbitrum.NewRevertReason(result) } diff --git a/go-ethereum b/go-ethereum index d840c42249..cf0ca286a6 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit d840c4224963814f4a9a1dfb08510ded118bf1bf +Subproject commit cf0ca286a6e6cb435fc2331078a382f5efdaadb3 diff --git a/system_tests/seq_filter_test.go b/system_tests/seq_filter_test.go new file mode 100644 index 0000000000..728e7d1fd4 --- /dev/null +++ b/system_tests/seq_filter_test.go @@ -0,0 +1,97 @@ +package arbtest + +import ( + "context" + "errors" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/arbitrum_types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/util/arbmath" +) + +func TestSequencerFilter(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.isSequencer = true + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User") + var latestL2 uint64 + var err error + for i := 0; latestL2 < 3; i++ { + _, _ = builder.L2.TransferBalance(t, "Owner", "User", big.NewInt(1e18), builder.L2Info) + latestL2, err = builder.L2.Client.BlockNumber(ctx) + Require(t, err) + } + + preTxFilter := func(withBlock bool) func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { + return func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { + switch tx.GetInner().(type) { + case *types.DynamicFeeTx: + statedb.FilterTx(withBlock) + } + return nil + } + } + postTxFilter := func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, _ *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { + if statedb.IsTxInvalid() { + return errors.New("internal error") + } + return nil + } + + header := &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: l1pricing.BatchPosterAddress, + BlockNumber: 1, + Timestamp: arbmath.SaturatingUCast[uint64](time.Now().Unix()), + RequestId: nil, + L1BaseFee: nil, + } + var txes types.Transactions + txes = append(txes, builder.L2Info.PrepareTx("Owner", "User", builder.L2Info.TransferGas, big.NewInt(1e12), nil)) + txes = append(txes, builder.L2Info.PrepareTx("User", "Owner", builder.L2Info.TransferGas, big.NewInt(1e12), nil)) + + hooks := &arbos.SequencingHooks{TxErrors: []error{}, DiscardInvalidTxsEarly: false, PreTxFilter: preTxFilter(false), PostTxFilter: postTxFilter, ConditionalOptionsForTx: nil} + block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) + if block != nil { + t.Fatal("block shouldn't be generated when all txes have failed") + } + Require(t, err) // There shouldn't be any error in block generation + if len(hooks.TxErrors) != 2 { + t.Fatalf("expected 2 tx errors, found: %d", len(hooks.TxErrors)) + } + for _, err := range hooks.TxErrors { + if err.Error() != state.ErrArbTxFilter.Error() { + t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + } + } + + hooks.TxErrors = []error{} + hooks.PreTxFilter = preTxFilter(true) + block, err = builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) + if block != nil { + t.Fatal("block shouldn't be generated when all txes have failed") + } + if err == nil { + t.Fatal("expected ErrArbTxFilter but found nil") + } + if err.Error() != state.ErrArbTxFilter.Error() { + t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + } +} From 53b11fc599351cc25005b72dc46da16c59c6964d Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 26 Nov 2024 17:29:52 +0530 Subject: [PATCH 05/16] fix lint error --- system_tests/seq_filter_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/seq_filter_test.go b/system_tests/seq_filter_test.go index 728e7d1fd4..e054c8318f 100644 --- a/system_tests/seq_filter_test.go +++ b/system_tests/seq_filter_test.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -41,8 +42,7 @@ func TestSequencerFilter(t *testing.T) { preTxFilter := func(withBlock bool) func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { return func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { - switch tx.GetInner().(type) { - case *types.DynamicFeeTx: + if _, ok := tx.GetInner().(*types.DynamicFeeTx); ok { statedb.FilterTx(withBlock) } return nil From c25a7c6bcf27eb937b8e565cb62645e157be00a5 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 2 Dec 2024 15:37:44 -0600 Subject: [PATCH 06/16] address PR comments --- go-ethereum | 2 +- system_tests/seq_filter_test.go | 97 +++++++++++++++++++-------------- 2 files changed, 57 insertions(+), 42 deletions(-) diff --git a/go-ethereum b/go-ethereum index cf0ca286a6..30abfa28d1 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit cf0ca286a6e6cb435fc2331078a382f5efdaadb3 +Subproject commit 30abfa28d13911257bc4a94631268ae46934db43 diff --git a/system_tests/seq_filter_test.go b/system_tests/seq_filter_test.go index e054c8318f..35e35a68a6 100644 --- a/system_tests/seq_filter_test.go +++ b/system_tests/seq_filter_test.go @@ -21,15 +21,51 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" ) -func TestSequencerFilter(t *testing.T) { +func TestSequencerTxFilter(t *testing.T) { t.Parallel() + + builder, header, txes, hooks, cleanup := setupSequencerFilterTest(t, false) + defer cleanup() + + block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) + if block != nil { + t.Fatal("block shouldn't be generated when all txes have failed") + } + Require(t, err) // There shouldn't be any error in block generation + if len(hooks.TxErrors) != 2 { + t.Fatalf("expected 2 tx errors, found: %d", len(hooks.TxErrors)) + } + for _, err := range hooks.TxErrors { + if err.Error() != state.ErrArbTxFilter.Error() { + t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + } + } +} + +func TestSequencerBlockFilter(t *testing.T) { + t.Parallel() + + builder, header, txes, hooks, cleanup := setupSequencerFilterTest(t, true) + defer cleanup() + + block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) + if block != nil { + t.Fatal("block shouldn't be generated when all txes have failed") + } + if err == nil { + t.Fatal("expected ErrArbTxFilter but found nil") + } + if err.Error() != state.ErrArbTxFilter.Error() { + t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + } +} + +func setupSequencerFilterTest(t *testing.T, withBlock bool) (*NodeBuilder, *arbostypes.L1IncomingMessageHeader, types.Transactions, *arbos.SequencingHooks, func()) { ctx, cancel := context.WithCancel(context.Background()) - defer cancel() builder := NewNodeBuilder(ctx).DefaultConfig(t, false) builder.isSequencer = true - cleanup := builder.Build(t) - defer cleanup() + builderCleanup := builder.Build(t) builder.L2Info.GenerateAccount("User") var latestL2 uint64 @@ -40,21 +76,6 @@ func TestSequencerFilter(t *testing.T) { Require(t, err) } - preTxFilter := func(withBlock bool) func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { - return func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { - if _, ok := tx.GetInner().(*types.DynamicFeeTx); ok { - statedb.FilterTx(withBlock) - } - return nil - } - } - postTxFilter := func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, _ *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { - if statedb.IsTxInvalid() { - return errors.New("internal error") - } - return nil - } - header := &arbostypes.L1IncomingMessageHeader{ Kind: arbostypes.L1MessageType_L2Message, Poster: l1pricing.BatchPosterAddress, @@ -63,35 +84,29 @@ func TestSequencerFilter(t *testing.T) { RequestId: nil, L1BaseFee: nil, } + var txes types.Transactions txes = append(txes, builder.L2Info.PrepareTx("Owner", "User", builder.L2Info.TransferGas, big.NewInt(1e12), nil)) txes = append(txes, builder.L2Info.PrepareTx("User", "Owner", builder.L2Info.TransferGas, big.NewInt(1e12), nil)) - hooks := &arbos.SequencingHooks{TxErrors: []error{}, DiscardInvalidTxsEarly: false, PreTxFilter: preTxFilter(false), PostTxFilter: postTxFilter, ConditionalOptionsForTx: nil} - block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) - if block != nil { - t.Fatal("block shouldn't be generated when all txes have failed") - } - Require(t, err) // There shouldn't be any error in block generation - if len(hooks.TxErrors) != 2 { - t.Fatalf("expected 2 tx errors, found: %d", len(hooks.TxErrors)) + preTxFilter := func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { + if _, ok := tx.GetInner().(*types.DynamicFeeTx); ok { + statedb.FilterTx(withBlock) + } + return nil } - for _, err := range hooks.TxErrors { - if err.Error() != state.ErrArbTxFilter.Error() { - t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + postTxFilter := func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, _ *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { + if statedb.IsTxInvalid() { + return errors.New("internal error") } + return nil } + hooks := &arbos.SequencingHooks{TxErrors: []error{}, DiscardInvalidTxsEarly: false, PreTxFilter: preTxFilter, PostTxFilter: postTxFilter, ConditionalOptionsForTx: nil} - hooks.TxErrors = []error{} - hooks.PreTxFilter = preTxFilter(true) - block, err = builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) - if block != nil { - t.Fatal("block shouldn't be generated when all txes have failed") - } - if err == nil { - t.Fatal("expected ErrArbTxFilter but found nil") - } - if err.Error() != state.ErrArbTxFilter.Error() { - t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + cleanup := func() { + builderCleanup() + cancel() } + + return builder, header, txes, hooks, cleanup } From e89533b377e392ba6aa5b78e90bdf37b921bd7a4 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 9 Dec 2024 16:25:57 -0600 Subject: [PATCH 07/16] address PR comments --- cmd/datool/datool.go | 6 +++--- das/aggregator.go | 6 +++--- das/dasRpcClient.go | 24 ++++++++++++------------ das/rpc_aggregator.go | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 9cc2f5ebd9..7ff82be229 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -92,7 +92,7 @@ type ClientStoreConfig struct { SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - DisableChunkedStore bool `koanf:"disable-chunked-store"` + EnableChunkedStore bool `koanf:"enable-chunked-store"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool("enable-chunked-store", true, "force data to always be sent to DAS all at once instead of splitting into chunks") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -154,7 +154,7 @@ func startClientStore(args []string) error { } } - client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.EnableChunkedStore) if err != nil { return err } diff --git a/das/aggregator.go b/das/aggregator.go index 44f1568272..46ca89415d 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -41,14 +41,14 @@ type AggregatorConfig struct { AssumedHonest int `koanf:"assumed-honest"` Backends BackendConfigList `koanf:"backends"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - DisableChunkedStore bool `koanf:"disable-chunked-store"` + EnableChunkedStore bool `koanf:"enable-chunked-store"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: nil, MaxStoreChunkBodySize: 512 * 1024, - DisableChunkedStore: false, + EnableChunkedStore: true, } var parsedBackendsConf BackendConfigList @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool(prefix+".enable-chunked-store", DefaultAggregatorConfig.EnableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index cd4ed078f4..aaa26a3aa9 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -35,11 +35,11 @@ var ( ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string - signer signature.DataSignerFunc - chunkSize uint64 - disableChunkedStore bool + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 + enableChunkedStore bool } func nilSigner(_ []byte) ([]byte, error) { @@ -48,7 +48,7 @@ func nilSigner(_ []byte) ([]byte, error) { const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" -func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, disableChunkedStore bool) (*DASRPCClient, error) { +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, enableChunkedStore bool) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err @@ -58,14 +58,14 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu } client := &DASRPCClient{ - clnt: clnt, - url: target, - signer: signer, - disableChunkedStore: disableChunkedStore, + clnt: clnt, + url: target, + signer: signer, + enableChunkedStore: enableChunkedStore, } // Byte arrays are encoded in base64 - if !disableChunkedStore { + if enableChunkedStore { chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 if chunkSize <= 0 { return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) @@ -89,7 +89,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() - if c.disableChunkedStore { + if !c.enableChunkedStore { log.Info("Legacy store is being force-used by the DAS client", "url", c.url) return c.legacyStore(ctx, message, timeout) } diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 91fdc07b45..1c9e2eecab 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -110,7 +110,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.EnableChunkedStore) if err != nil { return nil, err } From 3f7dd3e035caa9b2847f2bb79197d53df91a68d0 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 9 Dec 2024 17:21:46 -0600 Subject: [PATCH 08/16] fix failing tests --- das/aggregator_test.go | 4 ++-- das/rpc_test.go | 1 + system_tests/das_test.go | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 217315eef0..b14c2961ce 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -50,7 +50,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { backends = append(backends, *details) } - aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1}, ParentChainNodeURL: "none"}, backends) + aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1, EnableChunkedStore: true}, ParentChainNodeURL: "none"}, backends) Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") @@ -207,7 +207,7 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { aggregator, err := NewAggregator( ctx, DataAvailabilityConfig{ - RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest}, + RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest, EnableChunkedStore: true}, ParentChainNodeURL: "none", RequestTimeout: time.Millisecond * 2000, }, backends) diff --git a/das/rpc_test.go b/das/rpc_test.go index ebc4b736d5..c4ee71aa4f 100644 --- a/das/rpc_test.go +++ b/das/rpc_test.go @@ -84,6 +84,7 @@ func testRpcImpl(t *testing.T, size, times int, concurrent bool) { AssumedHonest: 1, Backends: beConfigs, MaxStoreChunkBodySize: (chunkSize * 2) + len(sendChunkJSONBoilerplate), + EnableChunkedStore: true, }, RequestTimeout: time.Minute, } diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 52703c879d..ba50dcfff2 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -90,6 +90,7 @@ func aggConfigForBackend(backendConfig das.BackendConfig) das.AggregatorConfig { AssumedHonest: 1, Backends: das.BackendConfigList{backendConfig}, MaxStoreChunkBodySize: 512 * 1024, + EnableChunkedStore: true, } } From 296314bf92d7f230ac368c2513a3aa7bdf1331ff Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 10 Dec 2024 09:19:14 -0600 Subject: [PATCH 09/16] address PR comment --- cmd/datool/datool.go | 2 +- das/aggregator.go | 2 +- das/dasRpcClient.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 7ff82be229..67998880e0 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("enable-chunked-store", true, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool("enable-chunked-store", true, "enable data to be sent to DAS in chunks instead of all at once") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { diff --git a/das/aggregator.go b/das/aggregator.go index 46ca89415d..d6922fced8 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".enable-chunked-store", DefaultAggregatorConfig.EnableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool(prefix+".enable-chunked-store", DefaultAggregatorConfig.EnableChunkedStore, "enable data to be sent to DAS in chunks instead of all at once") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index aaa26a3aa9..5d4ca0dc93 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -90,7 +90,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 }() if !c.enableChunkedStore { - log.Info("Legacy store is being force-used by the DAS client", "url", c.url) + log.Debug("Legacy store is being force-used by the DAS client", "url", c.url) return c.legacyStore(ctx, message, timeout) } From b2618c992ae8a76500c7789c00efcea106c743be Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 10 Dec 2024 16:09:02 -0600 Subject: [PATCH 10/16] update geth pin --- arbos/block_processor.go | 24 +++++++++++++++--------- go-ethereum | 2 +- system_tests/seq_filter_test.go | 4 ++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 006e9f5fdd..5025cd16f9 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -263,15 +263,6 @@ func ProduceBlockAdvanced( return nil, nil, err } - if err = hooks.PreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { - return nil, nil, err - } - - // Additional pre-transaction validity check - if err = extraPreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { - return nil, nil, err - } - if basefee.Sign() > 0 { dataGas = math.MaxUint64 brotliCompressionLevel, err := state.BrotliCompressionLevel() @@ -306,7 +297,22 @@ func ProduceBlockAdvanced( return nil, nil, core.ErrGasLimitReached } + if statedb.IsTxFiltered() { + return nil, nil, errors.New("cannot process a new transaction when the previous one was filtered and the statedb wasn't reverted to a snapshot") + } snap := statedb.Snapshot() + + if err = hooks.PreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { + statedb.RevertToSnapshot(snap) + return nil, nil, err + } + + // Additional pre-transaction validity check + if err = extraPreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { + statedb.RevertToSnapshot(snap) + return nil, nil, err + } + statedb.SetTxContext(tx.Hash(), len(receipts)) // the number of successful state transitions gasPool := gethGas diff --git a/go-ethereum b/go-ethereum index 30abfa28d1..c7f6a03869 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 30abfa28d13911257bc4a94631268ae46934db43 +Subproject commit c7f6a0386939b3f2dc9366613d8bae7f6c9812f7 diff --git a/system_tests/seq_filter_test.go b/system_tests/seq_filter_test.go index 35e35a68a6..d728d091c7 100644 --- a/system_tests/seq_filter_test.go +++ b/system_tests/seq_filter_test.go @@ -95,8 +95,8 @@ func setupSequencerFilterTest(t *testing.T, withBlock bool) (*NodeBuilder, *arbo } return nil } - postTxFilter := func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, _ *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { - if statedb.IsTxInvalid() { + postTxFilter := func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { + if statedb.IsTxFiltered() { return errors.New("internal error") } return nil From c75ad7bd1de688e00878a6d46f330a22804c31a4 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 11 Dec 2024 10:31:58 -0600 Subject: [PATCH 11/16] address PR comments --- arbos/block_processor.go | 13 ++++-- go-ethereum | 2 +- system_tests/seq_filter_test.go | 78 +++++++++++++++++++++++---------- 3 files changed, 67 insertions(+), 26 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 5025cd16f9..5daf163c85 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -119,6 +119,7 @@ type SequencingHooks struct { DiscardInvalidTxsEarly bool PreTxFilter func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error PostTxFilter func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error + BlockFilter func(*types.Header, *state.StateDB, types.Transactions, types.Receipts) error ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions } @@ -132,6 +133,9 @@ func NoopSequencingHooks() *SequencingHooks { func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error { return nil }, + func(*types.Header, *state.StateDB, types.Transactions, types.Receipts) error { + return nil + }, nil, } } @@ -297,9 +301,6 @@ func ProduceBlockAdvanced( return nil, nil, core.ErrGasLimitReached } - if statedb.IsTxFiltered() { - return nil, nil, errors.New("cannot process a new transaction when the previous one was filtered and the statedb wasn't reverted to a snapshot") - } snap := statedb.Snapshot() if err = hooks.PreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { @@ -461,6 +462,12 @@ func ProduceBlockAdvanced( } } + if sequencingHooks.BlockFilter != nil { + if err = sequencingHooks.BlockFilter(header, statedb, complete, receipts); err != nil { + return nil, nil, err + } + } + binary.BigEndian.PutUint64(header.Nonce[:], delayedMessagesRead) FinalizeBlock(header, complete, statedb, chainConfig) diff --git a/go-ethereum b/go-ethereum index c7f6a03869..e9a5f8af11 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit c7f6a0386939b3f2dc9366613d8bae7f6c9812f7 +Subproject commit e9a5f8af11e82a85510ff1c00932dc1a13e5cc7c diff --git a/system_tests/seq_filter_test.go b/system_tests/seq_filter_test.go index d728d091c7..fdd0c96d13 100644 --- a/system_tests/seq_filter_test.go +++ b/system_tests/seq_filter_test.go @@ -2,7 +2,6 @@ package arbtest import ( "context" - "errors" "math/big" "testing" "time" @@ -28,21 +27,28 @@ func TestSequencerTxFilter(t *testing.T) { defer cleanup() block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) - if block != nil { - t.Fatal("block shouldn't be generated when all txes have failed") - } Require(t, err) // There shouldn't be any error in block generation + if block == nil { + t.Fatal("block should be generated as second tx should pass") + } + if len(block.Transactions()) != 2 { + t.Fatalf("expecting two txs found: %d", len(block.Transactions())) + } + if block.Transactions()[1].Hash() != txes[1].Hash() { + t.Fatal("tx hash mismatch, expecting second tx to be present in the block") + } if len(hooks.TxErrors) != 2 { - t.Fatalf("expected 2 tx errors, found: %d", len(hooks.TxErrors)) + t.Fatalf("expected 2 txErrors in hooks, found: %d", len(hooks.TxErrors)) } - for _, err := range hooks.TxErrors { - if err.Error() != state.ErrArbTxFilter.Error() { - t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) - } + if hooks.TxErrors[0].Error() != state.ErrArbTxFilter.Error() { + t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + } + if hooks.TxErrors[1] != nil { + t.Fatalf("found a non-nil error for second transaction: %v", hooks.TxErrors[1]) } } -func TestSequencerBlockFilter(t *testing.T) { +func TestSequencerBlockFilterReject(t *testing.T) { t.Parallel() builder, header, txes, hooks, cleanup := setupSequencerFilterTest(t, true) @@ -60,7 +66,26 @@ func TestSequencerBlockFilter(t *testing.T) { } } -func setupSequencerFilterTest(t *testing.T, withBlock bool) (*NodeBuilder, *arbostypes.L1IncomingMessageHeader, types.Transactions, *arbos.SequencingHooks, func()) { +func TestSequencerBlockFilterAccept(t *testing.T) { + t.Parallel() + + builder, header, txes, hooks, cleanup := setupSequencerFilterTest(t, true) + defer cleanup() + + block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes[1:], hooks) + Require(t, err) + if block == nil { + t.Fatal("block should be generated as the tx should pass") + } + if len(block.Transactions()) != 2 { + t.Fatalf("expecting two txs found: %d", len(block.Transactions())) + } + if block.Transactions()[1].Hash() != txes[1].Hash() { + t.Fatal("tx hash mismatch, expecting second tx to be present in the block") + } +} + +func setupSequencerFilterTest(t *testing.T, isBlockFilter bool) (*NodeBuilder, *arbostypes.L1IncomingMessageHeader, types.Transactions, *arbos.SequencingHooks, func()) { ctx, cancel := context.WithCancel(context.Background()) builder := NewNodeBuilder(ctx).DefaultConfig(t, false) @@ -86,22 +111,31 @@ func setupSequencerFilterTest(t *testing.T, withBlock bool) (*NodeBuilder, *arbo } var txes types.Transactions - txes = append(txes, builder.L2Info.PrepareTx("Owner", "User", builder.L2Info.TransferGas, big.NewInt(1e12), nil)) + txes = append(txes, builder.L2Info.PrepareTx("Owner", "User", builder.L2Info.TransferGas, big.NewInt(1e12), []byte{1, 2, 3})) txes = append(txes, builder.L2Info.PrepareTx("User", "Owner", builder.L2Info.TransferGas, big.NewInt(1e12), nil)) - preTxFilter := func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { - if _, ok := tx.GetInner().(*types.DynamicFeeTx); ok { - statedb.FilterTx(withBlock) + hooks := arbos.NoopSequencingHooks() + if isBlockFilter { + hooks.BlockFilter = func(_ *types.Header, _ *state.StateDB, txes types.Transactions, _ types.Receipts) error { + if len(txes[1].Data()) > 0 { + return state.ErrArbTxFilter + } + return nil } - return nil - } - postTxFilter := func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { - if statedb.IsTxFiltered() { - return errors.New("internal error") + } else { + hooks.PreTxFilter = func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { + if len(tx.Data()) > 0 { + statedb.FilterTx() + } + return nil + } + hooks.PostTxFilter = func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { + if statedb.IsTxFiltered() { + return state.ErrArbTxFilter + } + return nil } - return nil } - hooks := &arbos.SequencingHooks{TxErrors: []error{}, DiscardInvalidTxsEarly: false, PreTxFilter: preTxFilter, PostTxFilter: postTxFilter, ConditionalOptionsForTx: nil} cleanup := func() { builderCleanup() From 95cc018b528321891cc1c5907f64d471b437295c Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 11 Dec 2024 15:41:22 -0600 Subject: [PATCH 12/16] address PR comments --- arbos/block_processor.go | 44 +++++++++++++++++---------------- execution/gethexec/sequencer.go | 5 +++- go-ethereum | 2 +- 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 5daf163c85..caa8abd4a7 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -115,12 +115,12 @@ func createNewHeader(prevHeader *types.Header, l1info *L1Info, state *arbosState type ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions type SequencingHooks struct { - TxErrors []error - DiscardInvalidTxsEarly bool - PreTxFilter func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error - PostTxFilter func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error - BlockFilter func(*types.Header, *state.StateDB, types.Transactions, types.Receipts) error - ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions + TxErrors []error // This can be unset + DiscardInvalidTxsEarly bool // This can be unset + PreTxFilter func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error // This has to be set + PostTxFilter func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error // This has to be set + BlockFilter func(*types.Header, *state.StateDB, types.Transactions, types.Receipts) error // This can be unset + ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions // This can be unset } func NoopSequencingHooks() *SequencingHooks { @@ -133,9 +133,7 @@ func NoopSequencingHooks() *SequencingHooks { func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error { return nil }, - func(*types.Header, *state.StateDB, types.Transactions, types.Receipts) error { - return nil - }, + nil, nil, } } @@ -176,7 +174,7 @@ func ProduceBlockAdvanced( runMode core.MessageRunMode, ) (*types.Block, types.Receipts, error) { - state, err := arbosState.OpenSystemArbosState(statedb, nil, true) + arbState, err := arbosState.OpenSystemArbosState(statedb, nil, true) if err != nil { return nil, nil, err } @@ -193,11 +191,11 @@ func ProduceBlockAdvanced( l1Timestamp: l1Header.Timestamp, } - header := createNewHeader(lastBlockHeader, l1Info, state, chainConfig) + header := createNewHeader(lastBlockHeader, l1Info, arbState, chainConfig) signer := types.MakeSigner(chainConfig, header.Number, header.Time) // Note: blockGasLeft will diverge from the actual gas left during execution in the event of invalid txs, // but it's only used as block-local representation limiting the amount of work done in a block. - blockGasLeft, _ := state.L2PricingState().PerBlockGasLimit() + blockGasLeft, _ := arbState.L2PricingState().PerBlockGasLimit() l1BlockNum := l1Info.l1BlockNumber // Prepend a tx before all others to touch up the state (update the L1 block num, pricing pools, etc) @@ -230,7 +228,7 @@ func ProduceBlockAdvanced( if !ok { return nil, nil, errors.New("retryable tx is somehow not a retryable") } - retryable, _ := state.RetryableState().OpenRetryable(retry.TicketId, time) + retryable, _ := arbState.RetryableState().OpenRetryable(retry.TicketId, time) if retryable == nil { // retryable was already deleted continue @@ -269,11 +267,11 @@ func ProduceBlockAdvanced( if basefee.Sign() > 0 { dataGas = math.MaxUint64 - brotliCompressionLevel, err := state.BrotliCompressionLevel() + brotliCompressionLevel, err := arbState.BrotliCompressionLevel() if err != nil { return nil, nil, fmt.Errorf("failed to get brotli compression level: %w", err) } - posterCost, _ := state.L1PricingState().GetPosterInfo(tx, poster, brotliCompressionLevel) + posterCost, _ := arbState.L1PricingState().GetPosterInfo(tx, poster, brotliCompressionLevel) posterCostInL2Gas := arbmath.BigDiv(posterCost, basefee) if posterCostInL2Gas.IsUint64() { @@ -303,13 +301,13 @@ func ProduceBlockAdvanced( snap := statedb.Snapshot() - if err = hooks.PreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { + if err = hooks.PreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { statedb.RevertToSnapshot(snap) return nil, nil, err } // Additional pre-transaction validity check - if err = extraPreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { + if err = extraPreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { statedb.RevertToSnapshot(snap) return nil, nil, err } @@ -329,7 +327,7 @@ func ProduceBlockAdvanced( vm.Config{}, runMode, func(result *core.ExecutionResult) error { - return hooks.PostTxFilter(header, statedb, state, tx, sender, dataGas, result) + return hooks.PostTxFilter(header, statedb, arbState, tx, sender, dataGas, result) }, ) if err != nil { @@ -339,7 +337,7 @@ func ProduceBlockAdvanced( } // Additional post-transaction validity check - if err = extraPostTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info, result); err != nil { + if err = extraPostTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info, result); err != nil { statedb.RevertToSnapshot(snap) return nil, nil, err } @@ -370,13 +368,13 @@ func ProduceBlockAdvanced( if tx.Type() == types.ArbitrumInternalTxType { // ArbOS might have upgraded to a new version, so we need to refresh our state - state, err = arbosState.OpenSystemArbosState(statedb, nil, true) + arbState, err = arbosState.OpenSystemArbosState(statedb, nil, true) if err != nil { return nil, nil, err } // Update the ArbOS version in the header (if it changed) extraInfo := types.DeserializeHeaderExtraInformation(header) - extraInfo.ArbOSFormatVersion = state.ArbOSVersion() + extraInfo.ArbOSFormatVersion = arbState.ArbOSVersion() extraInfo.UpdateHeaderWithInfo(header) } @@ -462,6 +460,10 @@ func ProduceBlockAdvanced( } } + if statedb.IsTxFiltered() { + return nil, nil, state.ErrArbTxFilter + } + if sequencingHooks.BlockFilter != nil { if err = sequencingHooks.BlockFilter(header, statedb, complete, receipts); err != nil { return nil, nil, err diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 9db5c206fe..faded7375c 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -490,7 +490,10 @@ func (s *Sequencer) preTxFilter(_ *params.ChainConfig, header *types.Header, sta return nil } -func (s *Sequencer) postTxFilter(header *types.Header, _ *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, sender common.Address, dataGas uint64, result *core.ExecutionResult) error { +func (s *Sequencer) postTxFilter(header *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, sender common.Address, dataGas uint64, result *core.ExecutionResult) error { + if statedb.IsTxFiltered() { + return state.ErrArbTxFilter + } if result.Err != nil && result.UsedGas > dataGas && result.UsedGas-dataGas <= s.config().MaxRevertGasReject { return arbitrum.NewRevertReason(result) } diff --git a/go-ethereum b/go-ethereum index e9a5f8af11..6205f5effb 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit e9a5f8af11e82a85510ff1c00932dc1a13e5cc7c +Subproject commit 6205f5effbcc8286f14cea045a9fcabb7c894413 From 66baceb7c497ff0edff87d7f6cba998b9922817d Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 16 Dec 2024 18:15:00 -0600 Subject: [PATCH 13/16] update geth pin --- arbos/block_processor.go | 23 +++++++++++------------ go-ethereum | 2 +- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index caa8abd4a7..a06034f905 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -265,6 +265,15 @@ func ProduceBlockAdvanced( return nil, nil, err } + if err = hooks.PreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { + return nil, nil, err + } + + // Additional pre-transaction validity check + if err = extraPreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { + return nil, nil, err + } + if basefee.Sign() > 0 { dataGas = math.MaxUint64 brotliCompressionLevel, err := arbState.BrotliCompressionLevel() @@ -300,18 +309,6 @@ func ProduceBlockAdvanced( } snap := statedb.Snapshot() - - if err = hooks.PreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { - statedb.RevertToSnapshot(snap) - return nil, nil, err - } - - // Additional pre-transaction validity check - if err = extraPreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { - statedb.RevertToSnapshot(snap) - return nil, nil, err - } - statedb.SetTxContext(tx.Hash(), len(receipts)) // the number of successful state transitions gasPool := gethGas @@ -333,12 +330,14 @@ func ProduceBlockAdvanced( if err != nil { // Ignore this transaction if it's invalid under the state transition function statedb.RevertToSnapshot(snap) + statedb.ClearTxFilter() return nil, nil, err } // Additional post-transaction validity check if err = extraPostTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info, result); err != nil { statedb.RevertToSnapshot(snap) + statedb.ClearTxFilter() return nil, nil, err } diff --git a/go-ethereum b/go-ethereum index 6205f5effb..313432e2a4 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 6205f5effbcc8286f14cea045a9fcabb7c894413 +Subproject commit 313432e2a408f5d7d0f50c9ad4ccf515c8d21a56 From ddc65ede027f33688a8d437fa5c02672d3e2ad89 Mon Sep 17 00:00:00 2001 From: Pepper Lebeck-Jobe Date: Tue, 24 Dec 2024 17:39:05 +1000 Subject: [PATCH 14/16] Fall back to the confirmed state if the agreed state is nil. This way, the block validators will get updates even for non-staked nodes. Fixes NIT-3009 --- staker/bold/bold_staker.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index 1a8eed80fa..539eb80abf 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -260,6 +260,13 @@ func (b *BOLDStaker) Start(ctxIn context.Context) { } if confirmedGlobalState != nil { + if agreedGlobalState == nil { + // If we don't have a latest agreed global state, we should fall back to + // using the latest confirmed global state. + for _, notifier := range b.stakedNotifiers { + notifier.UpdateLatestStaked(confirmedMsgCount, *confirmedGlobalState) + } + } for _, notifier := range b.confirmedNotifiers { notifier.UpdateLatestConfirmed(confirmedMsgCount, *confirmedGlobalState) } From a54d33d16b20c494e59b5dec1b918fb45a306633 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 27 Dec 2024 17:23:21 -0600 Subject: [PATCH 15/16] update geth pin --- .dockerignore | 1 + .github/workflows/arbitrator-ci.yml | 3 + .github/workflows/submodule-pin-check.yml | 6 +- arbitrator/Cargo.toml | 1 + arbitrator/arbutil/src/benchmark.rs | 14 + arbitrator/arbutil/src/lib.rs | 1 + arbitrator/jit/src/lib.rs | 51 + arbitrator/jit/src/main.rs | 51 +- arbitrator/jit/src/prepare.rs | 2 +- arbitrator/jit/src/program.rs | 63 +- arbitrator/jit/src/stylus_backend.rs | 4 + arbitrator/stylus/src/env.rs | 4 + arbitrator/stylus/src/host.rs | 15 + arbitrator/stylus/src/native.rs | 4 + arbitrator/tools/stylus_benchmark/Cargo.lock | 2670 +++++++++++++++++ arbitrator/tools/stylus_benchmark/Cargo.toml | 16 + .../tools/stylus_benchmark/src/benchmark.rs | 105 + arbitrator/tools/stylus_benchmark/src/main.rs | 44 + .../tools/stylus_benchmark/src/scenario.rs | 128 + .../wasm-libraries/user-host-trait/src/lib.rs | 37 + .../wasm-libraries/user-host/src/program.rs | 8 + .../wasm-libraries/user-test/src/program.rs | 7 + arbnode/batch_poster.go | 2 +- arbnode/delayed_sequencer.go | 56 +- arbnode/message_pruner.go | 2 +- arbos/addressSet/addressSet.go | 3 +- arbos/arbosState/arbosstate.go | 39 +- arbos/blockhash/blockhash.go | 3 +- arbos/blockhash/blockhash_test.go | 3 +- arbos/internal_tx.go | 5 +- arbos/l1pricing/l1PricingOldVersions.go | 5 +- arbos/l1pricing/l1pricing.go | 16 +- arbos/programs/native.go | 26 +- arbos/programs/testcompile.go | 26 +- arbos/tx_processor.go | 16 +- arbos/util/transfer.go | 3 +- cmd/conf/database.go | 6 +- cmd/nitro/init.go | 8 +- contracts | 2 +- execution/gethexec/executionengine.go | 3 +- go-ethereum | 2 +- precompiles/ArbGasInfo.go | 6 +- precompiles/ArbOwnerPublic.go | 3 +- precompiles/ArbOwner_test.go | 3 +- precompiles/ArbRetryableTx.go | 2 +- precompiles/ArbSys.go | 9 +- precompiles/precompile.go | 38 +- precompiles/precompile_test.go | 14 +- precompiles/wrapper.go | 3 +- staker/legacy/staker.go | 3 - system_tests/arbos_upgrade_test.go | 271 ++ system_tests/block_validator_test.go | 5 +- system_tests/debugapi_test.go | 235 ++ system_tests/estimation_test.go | 2 +- system_tests/fees_test.go | 6 +- system_tests/precompile_test.go | 54 +- system_tests/retryable_test.go | 2 +- system_tests/transfer_test.go | 5 +- 58 files changed, 3855 insertions(+), 267 deletions(-) create mode 100644 arbitrator/arbutil/src/benchmark.rs create mode 100644 arbitrator/jit/src/lib.rs create mode 100644 arbitrator/tools/stylus_benchmark/Cargo.lock create mode 100644 arbitrator/tools/stylus_benchmark/Cargo.toml create mode 100644 arbitrator/tools/stylus_benchmark/src/benchmark.rs create mode 100644 arbitrator/tools/stylus_benchmark/src/main.rs create mode 100644 arbitrator/tools/stylus_benchmark/src/scenario.rs create mode 100644 system_tests/arbos_upgrade_test.go diff --git a/.dockerignore b/.dockerignore index 51424900e8..2d5303a3be 100644 --- a/.dockerignore +++ b/.dockerignore @@ -36,6 +36,7 @@ arbitrator/tools/wasmer/target/ arbitrator/tools/wasm-tools/ arbitrator/tools/pricers/ arbitrator/tools/module_roots/ +arbitrator/tools/stylus_benchmark arbitrator/langs/rust/target/ arbitrator/langs/bf/target/ diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 47646017ac..dd58a30571 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -171,6 +171,9 @@ jobs: - name: Rustfmt - langs/rust run: cargo fmt --all --manifest-path arbitrator/langs/rust/Cargo.toml -- --check + - name: Rustfmt - tools/stylus_benchmark + run: cargo fmt --all --manifest-path arbitrator/tools/stylus_benchmark/Cargo.toml -- --check + - name: Make proofs from test cases run: make -j test-gen-proofs diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 60dd8ad827..94fa705655 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -25,9 +25,9 @@ jobs: run: | status_state="pending" declare -Ar exceptions=( - [contracts]=origin/develop + [contracts]=origin/pre-bold [nitro-testnode]=origin/master - + #TODO Rachel to check these are the intended branches. [arbitrator/langs/c]=origin/vm-storage-cache [arbitrator/tools/wasmer]=origin/adopt-v4.2.8 @@ -38,7 +38,7 @@ jobs: if [[ -v exceptions[$mod] ]]; then branch=${exceptions[$mod]} fi - + if ! git -C $mod merge-base --is-ancestor HEAD $branch; then echo $mod diverges from $branch divergent=1 diff --git a/arbitrator/Cargo.toml b/arbitrator/Cargo.toml index eaafb6e439..3c5228daf2 100644 --- a/arbitrator/Cargo.toml +++ b/arbitrator/Cargo.toml @@ -12,6 +12,7 @@ members = [ exclude = [ "stylus/tests/", "tools/wasmer/", + "tools/stylus_benchmark", ] resolver = "2" diff --git a/arbitrator/arbutil/src/benchmark.rs b/arbitrator/arbutil/src/benchmark.rs new file mode 100644 index 0000000000..580d0191a0 --- /dev/null +++ b/arbitrator/arbutil/src/benchmark.rs @@ -0,0 +1,14 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +use crate::evm::api::Ink; +use std::time::{Duration, Instant}; + +// Benchmark is used to track the performance of blocks of code in stylus +#[derive(Clone, Copy, Debug, Default)] +pub struct Benchmark { + pub timer: Option, + pub elapsed_total: Duration, + pub ink_start: Option, + pub ink_total: Ink, +} diff --git a/arbitrator/arbutil/src/lib.rs b/arbitrator/arbutil/src/lib.rs index 9c48a9fefc..e17e8d9448 100644 --- a/arbitrator/arbutil/src/lib.rs +++ b/arbitrator/arbutil/src/lib.rs @@ -1,6 +1,7 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE +pub mod benchmark; /// cbindgen:ignore pub mod color; pub mod crypto; diff --git a/arbitrator/jit/src/lib.rs b/arbitrator/jit/src/lib.rs new file mode 100644 index 0000000000..d0ad76bd03 --- /dev/null +++ b/arbitrator/jit/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use std::path::PathBuf; +use structopt::StructOpt; + +mod arbcompress; +mod caller_env; +pub mod machine; +mod prepare; +pub mod program; +mod socket; +pub mod stylus_backend; +mod test; +mod wasip1_stub; +mod wavmio; + +#[derive(StructOpt)] +#[structopt(name = "jit-prover")] +pub struct Opts { + #[structopt(short, long)] + binary: PathBuf, + #[structopt(long, default_value = "0")] + inbox_position: u64, + #[structopt(long, default_value = "0")] + delayed_inbox_position: u64, + #[structopt(long, default_value = "0")] + position_within_message: u64, + #[structopt(long)] + last_block_hash: Option, + #[structopt(long)] + last_send_root: Option, + #[structopt(long)] + inbox: Vec, + #[structopt(long)] + delayed_inbox: Vec, + #[structopt(long)] + preimages: Option, + #[structopt(long)] + cranelift: bool, + #[structopt(long)] + forks: bool, + #[structopt(long)] + pub debug: bool, + #[structopt(long)] + pub require_success: bool, + // JSON inputs supercede any of the command-line inputs which could + // be specified in the JSON file. + #[structopt(long)] + json_inputs: Option, +} diff --git a/arbitrator/jit/src/main.rs b/arbitrator/jit/src/main.rs index 6e44500215..e19fabc250 100644 --- a/arbitrator/jit/src/main.rs +++ b/arbitrator/jit/src/main.rs @@ -1,58 +1,13 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -use crate::machine::{Escape, WasmEnv}; use arbutil::{color, Color}; use eyre::Result; -use std::path::PathBuf; +use jit::machine; +use jit::machine::{Escape, WasmEnv}; +use jit::Opts; use structopt::StructOpt; -mod arbcompress; -mod caller_env; -mod machine; -mod prepare; -mod program; -mod socket; -mod stylus_backend; -mod test; -mod wasip1_stub; -mod wavmio; - -#[derive(StructOpt)] -#[structopt(name = "jit-prover")] -pub struct Opts { - #[structopt(short, long)] - binary: PathBuf, - #[structopt(long, default_value = "0")] - inbox_position: u64, - #[structopt(long, default_value = "0")] - delayed_inbox_position: u64, - #[structopt(long, default_value = "0")] - position_within_message: u64, - #[structopt(long)] - last_block_hash: Option, - #[structopt(long)] - last_send_root: Option, - #[structopt(long)] - inbox: Vec, - #[structopt(long)] - delayed_inbox: Vec, - #[structopt(long)] - preimages: Option, - #[structopt(long)] - cranelift: bool, - #[structopt(long)] - forks: bool, - #[structopt(long)] - debug: bool, - #[structopt(long)] - require_success: bool, - // JSON inputs supercede any of the command-line inputs which could - // be specified in the JSON file. - #[structopt(long)] - json_inputs: Option, -} - fn main() -> Result<()> { let opts = Opts::from_args(); let env = match WasmEnv::cli(&opts) { diff --git a/arbitrator/jit/src/prepare.rs b/arbitrator/jit/src/prepare.rs index e7a7ba0f4d..62dd063b75 100644 --- a/arbitrator/jit/src/prepare.rs +++ b/arbitrator/jit/src/prepare.rs @@ -1,7 +1,7 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -use crate::WasmEnv; +use crate::machine::WasmEnv; use arbutil::{Bytes32, PreimageType}; use eyre::Ok; use prover::parse_input::FileData; diff --git a/arbitrator/jit/src/program.rs b/arbitrator/jit/src/program.rs index f10a059748..d80b3771c6 100644 --- a/arbitrator/jit/src/program.rs +++ b/arbitrator/jit/src/program.rs @@ -4,8 +4,8 @@ #![allow(clippy::too_many_arguments)] use crate::caller_env::JitEnv; -use crate::machine::{Escape, MaybeEscape, WasmEnvMut}; -use crate::stylus_backend::exec_wasm; +use crate::machine::{Escape, MaybeEscape, WasmEnv, WasmEnvMut}; +use crate::stylus_backend::{exec_wasm, MessageFromCothread}; use arbutil::evm::api::Gas; use arbutil::Bytes32; use arbutil::{evm::EvmData, format::DebugBytes, heapify}; @@ -16,6 +16,7 @@ use prover::{ machine::Module, programs::{config::PricingParams, prelude::*}, }; +use std::sync::Arc; const DEFAULT_STYLUS_ARBOS_VERSION: u64 = 31; @@ -130,10 +131,6 @@ pub fn new_program( let evm_data: EvmData = unsafe { *Box::from_raw(evm_data_handler as *mut EvmData) }; let config: JitConfig = unsafe { *Box::from_raw(stylus_config_handler as *mut JitConfig) }; - // buy ink - let pricing = config.stylus.pricing; - let ink = pricing.gas_to_ink(Gas(gas)); - let Some(module) = exec.module_asms.get(&compiled_hash).cloned() else { return Err(Escape::Failure(format!( "module hash {:?} not found in {:?}", @@ -142,6 +139,21 @@ pub fn new_program( ))); }; + exec_program(exec, module, calldata, config, evm_data, gas) +} + +pub fn exec_program( + exec: &mut WasmEnv, + module: Arc<[u8]>, + calldata: Vec, + config: JitConfig, + evm_data: EvmData, + gas: u64, +) -> Result { + // buy ink + let pricing = config.stylus.pricing; + let ink = pricing.gas_to_ink(Gas(gas)); + let cothread = exec_wasm( module, calldata, @@ -162,7 +174,10 @@ pub fn new_program( /// returns request_id for the first request from the program pub fn start_program(mut env: WasmEnvMut, module: u32) -> Result { let (_, exec) = env.jit_env(); + start_program_with_wasm_env(exec, module) +} +pub fn start_program_with_wasm_env(exec: &mut WasmEnv, module: u32) -> Result { if exec.threads.len() as u32 != module || module == 0 { return Escape::hostio(format!( "got request for thread {module} but len is {}", @@ -179,13 +194,18 @@ pub fn start_program(mut env: WasmEnvMut, module: u32) -> Result { /// request_id MUST be last request id returned from start_program or send_response pub fn get_request(mut env: WasmEnvMut, id: u32, len_ptr: GuestPtr) -> Result { let (mut mem, exec) = env.jit_env(); + let msg = get_last_msg(exec, id)?; + mem.write_u32(len_ptr, msg.req_data.len() as u32); + Ok(msg.req_type) +} + +pub fn get_last_msg(exec: &mut WasmEnv, id: u32) -> Result { let thread = exec.threads.last_mut().unwrap(); let msg = thread.last_message()?; if msg.1 != id { return Escape::hostio("get_request id doesn't match"); }; - mem.write_u32(len_ptr, msg.0.req_data.len() as u32); - Ok(msg.0.req_type) + Ok(msg.0) } // gets data associated with last request. @@ -193,12 +213,8 @@ pub fn get_request(mut env: WasmEnvMut, id: u32, len_ptr: GuestPtr) -> Result MaybeEscape { let (mut mem, exec) = env.jit_env(); - let thread = exec.threads.last_mut().unwrap(); - let msg = thread.last_message()?; - if msg.1 != id { - return Escape::hostio("get_request id doesn't match"); - }; - mem.write_slice(data_ptr, &msg.0.req_data); + let msg = get_last_msg(exec, id)?; + mem.write_slice(data_ptr, &msg.req_data); Ok(()) } @@ -217,11 +233,21 @@ pub fn set_response( let result = mem.read_slice(result_ptr, result_len as usize); let raw_data = mem.read_slice(raw_data_ptr, raw_data_len as usize); + set_response_with_wasm_env(exec, id, gas, result, raw_data) +} + +pub fn set_response_with_wasm_env( + exec: &mut WasmEnv, + id: u32, + gas: u64, + result: Vec, + raw_data: Vec, +) -> MaybeEscape { let thread = exec.threads.last_mut().unwrap(); thread.set_response(id, result, raw_data, Gas(gas)) } -/// sends previos response +/// sends previous response /// MUST be called right after set_response to the same id /// returns request_id for the next request pub fn send_response(mut env: WasmEnvMut, req_id: u32) -> Result { @@ -239,7 +265,10 @@ pub fn send_response(mut env: WasmEnvMut, req_id: u32) -> Result { /// removes the last created program pub fn pop(mut env: WasmEnvMut) -> MaybeEscape { let (_, exec) = env.jit_env(); + pop_with_wasm_env(exec) +} +pub fn pop_with_wasm_env(exec: &mut WasmEnv) -> MaybeEscape { match exec.threads.pop() { None => Err(Escape::Child(eyre!("no child"))), Some(mut thread) => thread.wait_done(), @@ -247,8 +276,8 @@ pub fn pop(mut env: WasmEnvMut) -> MaybeEscape { } pub struct JitConfig { - stylus: StylusConfig, - compile: CompileConfig, + pub stylus: StylusConfig, + pub compile: CompileConfig, } /// Creates a `StylusConfig` from its component parts. diff --git a/arbitrator/jit/src/stylus_backend.rs b/arbitrator/jit/src/stylus_backend.rs index 0d8c477c6c..d250780dd9 100644 --- a/arbitrator/jit/src/stylus_backend.rs +++ b/arbitrator/jit/src/stylus_backend.rs @@ -4,6 +4,7 @@ #![allow(clippy::too_many_arguments)] use crate::machine::{Escape, MaybeEscape}; +use arbutil::benchmark::Benchmark; use arbutil::evm::api::{Gas, Ink, VecReader}; use arbutil::evm::{ api::{EvmApiMethod, EVM_API_METHOD_REQ_OFFSET}, @@ -35,6 +36,7 @@ struct MessageToCothread { pub struct MessageFromCothread { pub req_type: u32, pub req_data: Vec, + pub benchmark: Benchmark, } struct CothreadRequestor { @@ -51,6 +53,7 @@ impl RequestHandler for CothreadRequestor { let msg = MessageFromCothread { req_type: req_type as u32 + EVM_API_METHOD_REQ_OFFSET, req_data: req_data.as_ref().to_vec(), + benchmark: Benchmark::default(), }; if let Err(error) = self.tx.send(msg) { @@ -169,6 +172,7 @@ pub fn exec_wasm( let msg = MessageFromCothread { req_data: output, req_type: out_kind as u32, + benchmark: instance.env().benchmark, }; instance .env_mut() diff --git a/arbitrator/stylus/src/env.rs b/arbitrator/stylus/src/env.rs index a153fb5bf1..a2c8189029 100644 --- a/arbitrator/stylus/src/env.rs +++ b/arbitrator/stylus/src/env.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, evm::{ api::{DataReader, EvmApi, Ink}, EvmData, @@ -48,6 +49,8 @@ pub struct WasmEnv> { pub compile: CompileConfig, /// The runtime config pub config: Option, + // Used to benchmark execution blocks of code + pub benchmark: Benchmark, // Using the unused generic parameter D in a PhantomData field _data_reader_marker: PhantomData, } @@ -68,6 +71,7 @@ impl> WasmEnv { outs: vec![], memory: None, meter: None, + benchmark: Benchmark::default(), _data_reader_marker: PhantomData, } } diff --git a/arbitrator/stylus/src/host.rs b/arbitrator/stylus/src/host.rs index c72cafc316..67497302a1 100644 --- a/arbitrator/stylus/src/host.rs +++ b/arbitrator/stylus/src/host.rs @@ -5,6 +5,7 @@ use crate::env::{Escape, HostioInfo, MaybeEscape, WasmEnv, WasmEnvMut}; use arbutil::{ + benchmark::Benchmark, evm::{ api::{DataReader, EvmApi, Gas, Ink}, EvmData, @@ -46,6 +47,10 @@ where &self.evm_data } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.env.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { &mut self.evm_data.return_data_len } @@ -464,3 +469,13 @@ pub(crate) fn console_tee, T: Into + Copy>( } pub(crate) fn null_host>(_: WasmEnvMut) {} + +pub(crate) fn start_benchmark>( + mut env: WasmEnvMut, +) -> MaybeEscape { + hostio!(env, start_benchmark()) +} + +pub(crate) fn end_benchmark>(mut env: WasmEnvMut) -> MaybeEscape { + hostio!(env, end_benchmark()) +} diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 0fbdb342f3..a31df1034c 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -212,6 +212,8 @@ impl> NativeInstance { imports.define("console", "tee_f32", func!(host::console_tee::)); imports.define("console", "tee_f64", func!(host::console_tee::)); imports.define("debug", "null_host", func!(host::null_host)); + imports.define("debug", "start_benchmark", func!(host::start_benchmark)); + imports.define("debug", "end_benchmark", func!(host::end_benchmark)); } let instance = Instance::new(&mut store, &module, &imports)?; let exports = &instance.exports; @@ -429,6 +431,8 @@ pub fn module(wasm: &[u8], compile: CompileConfig, target: Target) -> Result) { + let _ = match str::from_utf8(req_data) { + Ok(v) => v, + Err(e) => panic!("Invalid UTF-8 sequence: {}", e), + }; + + match req_type { + 0 => return, + 1 => panic!("ErrExecutionReverted user revert"), + 2 => panic!("ErrExecutionReverted user failure"), + 3 => panic!("ErrOutOfGas user out of ink"), + 4 => panic!("ErrDepth user out of stack"), + _ => panic!("ErrExecutionReverted user unknown"), + } +} + +fn run(compiled_module: Vec) -> (Duration, Ink) { + let calldata = Vec::from([0u8; 32]); + let evm_data = EvmData::default(); + let config = JitConfig { + stylus: StylusConfig { + version: 2, + max_depth: 10000, + pricing: PricingParams { ink_price: 1 }, + }, + compile: CompileConfig::version(2, true), + }; + + let exec = &mut WasmEnv::default(); + + let module = jit::program::exec_program( + exec, + compiled_module.into(), + calldata, + config, + evm_data, + u64::MAX, + ) + .unwrap(); + + let req_id = jit::program::start_program_with_wasm_env(exec, module).unwrap(); + let msg = jit::program::get_last_msg(exec, req_id).unwrap(); + if msg.req_type < EVM_API_METHOD_REQ_OFFSET { + let _ = jit::program::pop_with_wasm_env(exec); + + let req_data = msg.req_data[8..].to_vec(); + check_result(msg.req_type, &req_data); + } else { + panic!("unsupported request type {:?}", msg.req_type); + } + + (msg.benchmark.elapsed_total, msg.benchmark.ink_total) +} + +pub fn benchmark(wat: Vec) -> eyre::Result<()> { + let wasm = wasmer::wat2wasm(&wat)?; + + let compiled_module = native::compile(&wasm, 2, true, Target::default())?; + + let mut durations: Vec = Vec::new(); + let mut ink_spent = Ink(0); + for i in 0..NUMBER_OF_BENCHMARK_RUNS { + print!("Run {:?}, ", i); + let (duration_run, ink_spent_run) = run(compiled_module.clone()); + durations.push(duration_run); + ink_spent = ink_spent_run; + println!( + "duration: {:?}, ink_spent: {:?}", + duration_run, ink_spent_run + ); + } + + // discard top and bottom runs + durations.sort(); + let l = NUMBER_OF_TOP_AND_BOTTOM_RUNS_TO_DISCARD as usize; + let r = NUMBER_OF_BENCHMARK_RUNS as usize - NUMBER_OF_TOP_AND_BOTTOM_RUNS_TO_DISCARD as usize; + durations = durations[l..r].to_vec(); + + let avg_duration = durations.iter().sum::() / (r - l) as u32; + let avg_ink_spent_per_micro_second = ink_spent.0 / avg_duration.as_micros() as u64; + println!("After discarding top and bottom runs: "); + println!( + "avg_duration: {:?}, avg_ink_spent_per_micro_second: {:?}", + avg_duration, avg_ink_spent_per_micro_second + ); + + Ok(()) +} diff --git a/arbitrator/tools/stylus_benchmark/src/main.rs b/arbitrator/tools/stylus_benchmark/src/main.rs new file mode 100644 index 0000000000..4b8971ecab --- /dev/null +++ b/arbitrator/tools/stylus_benchmark/src/main.rs @@ -0,0 +1,44 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +mod benchmark; +mod scenario; + +use clap::Parser; +use scenario::Scenario; +use std::path::PathBuf; +use strum::IntoEnumIterator; + +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct Args { + #[arg(short, long)] + output_wat_dir_path: Option, + + #[arg(short, long)] + scenario: Option, +} + +fn handle_scenario(scenario: Scenario, output_wat_dir_path: Option) -> eyre::Result<()> { + println!("Benchmarking {}", scenario); + let wat = scenario::generate_wat(scenario, output_wat_dir_path); + benchmark::benchmark(wat) +} + +fn main() -> eyre::Result<()> { + let args = Args::parse(); + + match args.scenario { + Some(scenario) => handle_scenario(scenario, args.output_wat_dir_path), + None => { + println!("No scenario specified, benchmarking all scenarios\n"); + for scenario in Scenario::iter() { + let benchmark_result = handle_scenario(scenario, args.output_wat_dir_path.clone()); + if let Err(err) = benchmark_result { + return Err(err); + } + } + Ok(()) + } + } +} diff --git a/arbitrator/tools/stylus_benchmark/src/scenario.rs b/arbitrator/tools/stylus_benchmark/src/scenario.rs new file mode 100644 index 0000000000..348678ed69 --- /dev/null +++ b/arbitrator/tools/stylus_benchmark/src/scenario.rs @@ -0,0 +1,128 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; +use strum_macros::{Display, EnumIter, EnumString}; + +#[derive(Copy, Clone, PartialEq, Eq, Debug, EnumString, Display, EnumIter)] +pub enum Scenario { + #[strum(serialize = "add_i32")] + AddI32, + #[strum(serialize = "xor_i32")] + XorI32, +} + +// Programs to be benchmarked have a loop in which several similar operations are executed. +// The number of operations per loop is chosen to be large enough so the overhead related to the loop is negligible, +// but not too large to avoid a big program size. +// Keeping a small program size is important to better use CPU cache, trying to keep the code in the cache. + +fn write_wat_beginning(wat: &mut Vec) { + wat.write_all(b"(module\n").unwrap(); + wat.write_all(b" (import \"debug\" \"start_benchmark\" (func $start_benchmark))\n") + .unwrap(); + wat.write_all(b" (import \"debug\" \"end_benchmark\" (func $end_benchmark))\n") + .unwrap(); + wat.write_all(b" (memory (export \"memory\") 0 0)\n") + .unwrap(); + wat.write_all(b" (global $ops_counter (mut i32) (i32.const 0))\n") + .unwrap(); + wat.write_all(b" (func (export \"user_entrypoint\") (param i32) (result i32)\n") + .unwrap(); + + wat.write_all(b" call $start_benchmark\n").unwrap(); + + wat.write_all(b" (loop $loop\n").unwrap(); +} + +fn write_wat_end( + wat: &mut Vec, + number_of_loop_iterations: usize, + number_of_ops_per_loop_iteration: usize, +) { + let number_of_ops = number_of_loop_iterations * number_of_ops_per_loop_iteration; + + // update ops_counter + wat.write_all(b" global.get $ops_counter\n") + .unwrap(); + wat.write_all( + format!( + " i32.const {}\n", + number_of_ops_per_loop_iteration + ) + .as_bytes(), + ) + .unwrap(); + wat.write_all(b" i32.add\n").unwrap(); + wat.write_all(b" global.set $ops_counter\n") + .unwrap(); + + // check if we need to continue looping + wat.write_all(b" global.get $ops_counter\n") + .unwrap(); + wat.write_all(format!(" i32.const {}\n", number_of_ops).as_bytes()) + .unwrap(); + wat.write_all(b" i32.lt_s\n").unwrap(); + wat.write_all(b" br_if $loop)\n").unwrap(); + + wat.write_all(b" call $end_benchmark\n").unwrap(); + + wat.write_all(b" i32.const 0)\n").unwrap(); + wat.write_all(b")").unwrap(); +} + +fn wat(write_wat_ops: fn(&mut Vec, usize)) -> Vec { + let number_of_loop_iterations = 200_000; + let number_of_ops_per_loop_iteration = 2000; + + let mut wat = Vec::new(); + + write_wat_beginning(&mut wat); + + write_wat_ops(&mut wat, number_of_ops_per_loop_iteration); + + write_wat_end( + &mut wat, + number_of_loop_iterations, + number_of_ops_per_loop_iteration, + ); + + wat.to_vec() +} + +fn write_add_i32_wat_ops(wat: &mut Vec, number_of_ops_per_loop_iteration: usize) { + wat.write_all(b" i32.const 0\n").unwrap(); + for _ in 0..number_of_ops_per_loop_iteration { + wat.write_all(b" i32.const 1\n").unwrap(); + wat.write_all(b" i32.add\n").unwrap(); + } + wat.write_all(b" drop\n").unwrap(); +} + +fn write_xor_i32_wat_ops(wat: &mut Vec, number_of_ops_per_loop_iteration: usize) { + wat.write_all(b" i32.const 1231\n").unwrap(); + for _ in 0..number_of_ops_per_loop_iteration { + wat.write_all(b" i32.const 12312313\n").unwrap(); + wat.write_all(b" i32.xor\n").unwrap(); + } + wat.write_all(b" drop\n").unwrap(); +} + +pub fn generate_wat(scenario: Scenario, output_wat_dir_path: Option) -> Vec { + let wat = match scenario { + Scenario::AddI32 => wat(write_add_i32_wat_ops), + Scenario::XorI32 => wat(write_xor_i32_wat_ops), + }; + + // print wat to file if needed + if let Some(output_wat_dir_path) = output_wat_dir_path { + let mut output_wat_path = output_wat_dir_path; + output_wat_path.push(format!("{}.wat", scenario)); + let mut file = File::create(output_wat_path).unwrap(); + file.write_all(&wat).unwrap(); + } + + wat +} diff --git a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs index 2f410849fc..25163e25bc 100644 --- a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs +++ b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, crypto, evm::{ self, @@ -21,6 +22,7 @@ use prover::{ }; use ruint2::Uint; use std::fmt::Display; +use std::time::Instant; macro_rules! be { ($int:expr) => { @@ -68,6 +70,7 @@ pub trait UserHost: GasMeteredMachine { fn evm_api(&mut self) -> &mut Self::A; fn evm_data(&self) -> &EvmData; + fn benchmark(&mut self) -> &mut Benchmark; fn evm_return_data_len(&mut self) -> &mut u32; fn read_slice(&self, ptr: GuestPtr, len: u32) -> Result, Self::MemoryErr>; @@ -962,4 +965,38 @@ pub trait UserHost: GasMeteredMachine { self.say(value.into()); Ok(value) } + + // Initializes benchmark data related to a code block. + // A code block is defined by the instructions between start_benchmark and end_benchmark calls. + // If start_benchmark is called multiple times without end_benchmark being called, + // then only the last start_benchmark before end_benchmark will be used. + // It is possible to have multiple code blocks benchmarked in the same program. + fn start_benchmark(&mut self) -> Result<(), Self::Err> { + let ink_curr = self.ink_ready()?; + + let benchmark = self.benchmark(); + benchmark.timer = Some(Instant::now()); + benchmark.ink_start = Some(ink_curr); + + Ok(()) + } + + // Updates cumulative benchmark data related to a code block. + // If end_benchmark is called without a corresponding start_benchmark nothing will happen. + fn end_benchmark(&mut self) -> Result<(), Self::Err> { + let ink_curr = self.ink_ready()?; + + let benchmark = self.benchmark(); + if let Some(timer) = benchmark.timer { + benchmark.elapsed_total = benchmark.elapsed_total.saturating_add(timer.elapsed()); + + let code_block_ink = benchmark.ink_start.unwrap().saturating_sub(ink_curr); + benchmark.ink_total = benchmark.ink_total.saturating_add(code_block_ink); + + benchmark.timer = None; + benchmark.ink_start = None; + }; + + Ok(()) + } } diff --git a/arbitrator/wasm-libraries/user-host/src/program.rs b/arbitrator/wasm-libraries/user-host/src/program.rs index 7b3782b2e5..a2973ce56f 100644 --- a/arbitrator/wasm-libraries/user-host/src/program.rs +++ b/arbitrator/wasm-libraries/user-host/src/program.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, evm::{ api::{EvmApiMethod, Gas, Ink, VecReader, EVM_API_METHOD_REQ_OFFSET}, req::{EvmApiRequestor, RequestHandler}, @@ -75,6 +76,8 @@ pub(crate) struct Program { pub evm_api: EvmApiRequestor, /// EVM Context info. pub evm_data: EvmData, + // Used to benchmark execution blocks of code + pub benchmark: Benchmark, /// WAVM module index. pub module: u32, /// Call configuration. @@ -167,6 +170,7 @@ impl Program { outs: vec![], evm_api: EvmApiRequestor::new(UserHostRequester::default()), evm_data, + benchmark: Benchmark::default(), module, config, early_exit: None, @@ -237,6 +241,10 @@ impl UserHost for Program { &self.evm_data } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { &mut self.evm_data.return_data_len } diff --git a/arbitrator/wasm-libraries/user-test/src/program.rs b/arbitrator/wasm-libraries/user-test/src/program.rs index 299fca08c3..99252a38f0 100644 --- a/arbitrator/wasm-libraries/user-test/src/program.rs +++ b/arbitrator/wasm-libraries/user-test/src/program.rs @@ -3,6 +3,7 @@ use crate::{ARGS, EVER_PAGES, EVM_DATA, KEYS, LOGS, OPEN_PAGES, OUTS}; use arbutil::{ + benchmark::Benchmark, evm::{ api::{EvmApi, Gas, Ink, VecReader}, user::UserOutcomeKind, @@ -28,6 +29,7 @@ impl From for eyre::ErrReport { /// Mock type representing a `user_host::Program` pub struct Program { evm_api: MockEvmApi, + benchmark: Benchmark, } #[allow(clippy::unit_arg)] @@ -52,6 +54,10 @@ impl UserHost for Program { &EVM_DATA } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { unimplemented!() } @@ -91,6 +97,7 @@ impl Program { pub fn current() -> Self { Self { evm_api: MockEvmApi, + benchmark: Benchmark::default(), } } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 45bd70c92b..70c5952042 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -1157,7 +1157,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - if arbOSVersion >= 20 { + if arbOSVersion >= params.ArbosVersion_20 { if config.IgnoreBlobPrice { use4844 = true } else { diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index abd24dbd12..235a747446 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -9,6 +9,7 @@ import ( "fmt" "math/big" "sync" + "time" flag "github.com/spf13/pflag" @@ -30,16 +31,17 @@ type DelayedSequencer struct { reader *InboxReader exec execution.ExecutionSequencer coordinator *SeqCoordinator - waitingForFinalizedBlock uint64 + waitingForFinalizedBlock *uint64 mutex sync.Mutex config DelayedSequencerConfigFetcher } type DelayedSequencerConfig struct { - Enable bool `koanf:"enable" reload:"hot"` - FinalizeDistance int64 `koanf:"finalize-distance" reload:"hot"` - RequireFullFinality bool `koanf:"require-full-finality" reload:"hot"` - UseMergeFinality bool `koanf:"use-merge-finality" reload:"hot"` + Enable bool `koanf:"enable" reload:"hot"` + FinalizeDistance int64 `koanf:"finalize-distance" reload:"hot"` + RequireFullFinality bool `koanf:"require-full-finality" reload:"hot"` + UseMergeFinality bool `koanf:"use-merge-finality" reload:"hot"` + RescanInterval time.Duration `koanf:"rescan-interval" reload:"hot"` } type DelayedSequencerConfigFetcher func() *DelayedSequencerConfig @@ -49,6 +51,7 @@ func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int64(prefix+".finalize-distance", DefaultDelayedSequencerConfig.FinalizeDistance, "how many blocks in the past L1 block is considered final (ignored when using Merge finality)") f.Bool(prefix+".require-full-finality", DefaultDelayedSequencerConfig.RequireFullFinality, "whether to wait for full finality before sequencing delayed messages") f.Bool(prefix+".use-merge-finality", DefaultDelayedSequencerConfig.UseMergeFinality, "whether to use The Merge's notion of finality before sequencing delayed messages") + f.Duration(prefix+".rescan-interval", DefaultDelayedSequencerConfig.RescanInterval, "frequency to rescan for new delayed messages (the parent chain reader's poll-interval config is more important than this)") } var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ @@ -56,6 +59,7 @@ var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ FinalizeDistance: 20, RequireFullFinality: false, UseMergeFinality: true, + RescanInterval: time.Second, } var TestDelayedSequencerConfig = DelayedSequencerConfig{ @@ -63,6 +67,7 @@ var TestDelayedSequencerConfig = DelayedSequencerConfig{ FinalizeDistance: 20, RequireFullFinality: false, UseMergeFinality: false, + RescanInterval: time.Millisecond * 100, } func NewDelayedSequencer(l1Reader *headerreader.HeaderReader, reader *InboxReader, exec execution.ExecutionSequencer, coordinator *SeqCoordinator, config DelayedSequencerConfigFetcher) (*DelayedSequencer, error) { @@ -126,13 +131,12 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock finalized = uint64(currentNum - config.FinalizeDistance) } - if d.waitingForFinalizedBlock > finalized { + if d.waitingForFinalizedBlock != nil && *d.waitingForFinalizedBlock > finalized { return nil } - // Unless we find an unfinalized message (which sets waitingForBlock), - // we won't find a new finalized message until FinalizeDistance blocks in the future. - d.waitingForFinalizedBlock = lastBlockHeader.Number.Uint64() + 1 + // Reset what block we're waiting for if we've caught up + d.waitingForFinalizedBlock = nil dbDelayedCount, err := d.inbox.GetDelayedCount() if err != nil { @@ -153,8 +157,8 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock return err } if parentChainBlockNumber > finalized { - // Message isn't finalized yet; stop here - d.waitingForFinalizedBlock = parentChainBlockNumber + // Message isn't finalized yet; wait for it to be + d.waitingForFinalizedBlock = &parentChainBlockNumber break } if lastDelayedAcc != (common.Hash{}) { @@ -216,20 +220,40 @@ func (d *DelayedSequencer) run(ctx context.Context) { headerChan, cancel := d.l1Reader.Subscribe(false) defer cancel() + latestHeader, err := d.l1Reader.LastHeader(ctx) + if err != nil { + log.Warn("delayed sequencer: failed to get latest header", "err", err) + latestHeader = nil + } + rescanTimer := time.NewTimer(d.config().RescanInterval) for { + if !rescanTimer.Stop() { + select { + case <-rescanTimer.C: + default: + } + } + if latestHeader != nil { + rescanTimer.Reset(d.config().RescanInterval) + } + var ok bool select { - case nextHeader, ok := <-headerChan: + case latestHeader, ok = <-headerChan: if !ok { - log.Info("delayed sequencer: header channel close") + log.Debug("delayed sequencer: header channel close") return } - if err := d.trySequence(ctx, nextHeader); err != nil { - log.Error("Delayed sequencer error", "err", err) + case <-rescanTimer.C: + if latestHeader == nil { + continue } case <-ctx.Done(): - log.Info("delayed sequencer: context done", "err", ctx.Err()) + log.Debug("delayed sequencer: context done", "err", ctx.Err()) return } + if err := d.trySequence(ctx, latestHeader); err != nil { + log.Error("Delayed sequencer error", "err", err) + } } } diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 840a15f328..08f568796d 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -46,7 +46,7 @@ type MessagePrunerConfigFetcher func() *MessagePrunerConfig var DefaultMessagePrunerConfig = MessagePrunerConfig{ Enable: true, PruneInterval: time.Minute, - MinBatchesLeft: 2, + MinBatchesLeft: 1000, } func MessagePrunerConfigAddOptions(prefix string, f *flag.FlagSet) { diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 4bb87e614d..ccd780aa11 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -9,6 +9,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" "github.com/offchainlabs/nitro/arbos/util" @@ -185,7 +186,7 @@ func (as *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { if err != nil { return err } - if arbosVersion >= 11 { + if arbosVersion >= params.ArbosVersion_11 { err = as.byAddress.Set(atSize, util.UintToHash(slot)) if err != nil { return err diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index a3d1ae8386..5ee070f942 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -63,9 +63,6 @@ type ArbosState struct { Burner burn.Burner } -const MaxArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes -const MaxDebugArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes - var ErrUninitializedArbOS = errors.New("ArbOS uninitialized") var ErrAlreadyInitialized = errors.New("ArbOS is already initialized") @@ -205,7 +202,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p _ = sto.SetUint64ByUint64(uint64(versionOffset), 1) // initialize to version 1; upgrade at end of this func if needed _ = sto.SetUint64ByUint64(uint64(upgradeVersionOffset), 0) _ = sto.SetUint64ByUint64(uint64(upgradeTimestampOffset), 0) - if desiredArbosVersion >= 2 { + if desiredArbosVersion >= params.ArbosVersion_2 { _ = sto.SetByUint64(uint64(networkFeeAccountOffset), util.AddressToHash(initialChainOwner)) } else { _ = sto.SetByUint64(uint64(networkFeeAccountOffset), common.Hash{}) // the 0 address until an owner sets it @@ -217,7 +214,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p _ = sto.SetUint64ByUint64(uint64(brotliCompressionLevelOffset), 0) // default brotliCompressionLevel for fast compression is 0 initialRewardsRecipient := l1pricing.BatchPosterAddress - if desiredArbosVersion >= 2 { + if desiredArbosVersion >= params.ArbosVersion_2 { initialRewardsRecipient = initialChainOwner } _ = l1pricing.InitializeL1PricingState(sto.OpenCachedSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) @@ -274,29 +271,29 @@ func (state *ArbosState) UpgradeArbosVersion( nextArbosVersion := state.arbosVersion + 1 switch nextArbosVersion { - case 2: + case params.ArbosVersion_2: ensure(state.l1PricingState.SetLastSurplus(common.Big0, 1)) - case 3: + case params.ArbosVersion_3: ensure(state.l1PricingState.SetPerBatchGasCost(0)) ensure(state.l1PricingState.SetAmortizedCostCapBips(math.MaxUint64)) - case 4: + case params.ArbosVersion_4: // no state changes needed - case 5: + case params.ArbosVersion_5: // no state changes needed - case 6: + case params.ArbosVersion_6: // no state changes needed - case 7: + case params.ArbosVersion_7: // no state changes needed - case 8: + case params.ArbosVersion_8: // no state changes needed - case 9: + case params.ArbosVersion_9: // no state changes needed - case 10: + case params.ArbosVersion_10: ensure(state.l1PricingState.SetL1FeesAvailable(stateDB.GetBalance( l1pricing.L1PricerFundsPoolAddress, ).ToBig())) - case 11: + case params.ArbosVersion_11: // Update the PerBatchGasCost to a more accurate value compared to the old v6 default. ensure(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV12)) @@ -316,23 +313,23 @@ func (state *ArbosState) UpgradeArbosVersion( case 12, 13, 14, 15, 16, 17, 18, 19: // these versions are left to Orbit chains for custom upgrades. - case 20: + case params.ArbosVersion_20: // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) case 21, 22, 23, 24, 25, 26, 27, 28, 29: // these versions are left to Orbit chains for custom upgrades. - case 30: + case params.ArbosVersion_30: programs.Initialize(state.backingStorage.OpenSubStorage(programsSubspace)) - case 31: + case params.ArbosVersion_31: params, err := state.Programs().Params() ensure(err) ensure(params.UpgradeToVersion(2)) ensure(params.Save()) - case 32: + case params.ArbosVersion_32: // no change state needed default: @@ -353,8 +350,8 @@ func (state *ArbosState) UpgradeArbosVersion( state.arbosVersion = nextArbosVersion } - if firstTime && upgradeTo >= 6 { - if upgradeTo < 11 { + if firstTime && upgradeTo >= params.ArbosVersion_6 { + if upgradeTo < params.ArbosVersion_11 { state.Restrict(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV6)) } state.Restrict(state.l1PricingState.SetEquilibrationUnits(l1pricing.InitialEquilibrationUnitsV6)) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index ff29bbca9a..df5078fd2c 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -8,6 +8,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" ) @@ -56,7 +57,7 @@ func (bh *Blockhashes) RecordNewL1Block(number uint64, blockHash common.Hash, ar // fill in hashes for any "skipped over" blocks nextNumber++ var nextNumBuf [8]byte - if arbosVersion >= 8 { + if arbosVersion >= params.ArbosVersion_8 { binary.LittleEndian.PutUint64(nextNumBuf[:], nextNumber) } diff --git a/arbos/blockhash/blockhash_test.go b/arbos/blockhash/blockhash_test.go index c7cc04d966..8dec2181a3 100644 --- a/arbos/blockhash/blockhash_test.go +++ b/arbos/blockhash/blockhash_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/storage" @@ -15,7 +16,7 @@ import ( ) func TestBlockhash(t *testing.T) { - arbosVersion := uint64(8) + arbosVersion := params.ArbosVersion_8 sto := storage.NewMemoryBacked(burn.NewSystemBurner(nil, false)) InitializeBlockhashes(sto) diff --git a/arbos/internal_tx.go b/arbos/internal_tx.go index 64dede6290..0ecdfe74cf 100644 --- a/arbos/internal_tx.go +++ b/arbos/internal_tx.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/util" @@ -56,11 +57,11 @@ func ApplyInternalTxUpdate(tx *types.ArbitrumInternalTx, state *arbosState.Arbos l1BlockNumber := util.SafeMapGet[uint64](inputs, "l1BlockNumber") timePassed := util.SafeMapGet[uint64](inputs, "timePassed") - if state.ArbOSVersion() < 3 { + if state.ArbOSVersion() < params.ArbosVersion_3 { // (incorrectly) use the L2 block number instead timePassed = util.SafeMapGet[uint64](inputs, "l2BlockNumber") } - if state.ArbOSVersion() < 8 { + if state.ArbOSVersion() < params.ArbosVersion_8 { // in old versions we incorrectly used an L1 block number one too high l1BlockNumber++ } diff --git a/arbos/l1pricing/l1PricingOldVersions.go b/arbos/l1pricing/l1PricingOldVersions.go index 1377351af3..e4cbf5e1b3 100644 --- a/arbos/l1pricing/l1PricingOldVersions.go +++ b/arbos/l1pricing/l1PricingOldVersions.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" am "github.com/offchainlabs/nitro/util/arbmath" @@ -24,7 +25,7 @@ func (ps *L1PricingState) _preversion10_UpdateForBatchPosterSpending( l1Basefee *big.Int, scenario util.TracingScenario, ) error { - if arbosVersion < 2 { + if arbosVersion < params.ArbosVersion_2 { return ps._preVersion2_UpdateForBatchPosterSpending(statedb, evm, updateTime, currentTime, batchPoster, weiSpent, scenario) } @@ -69,7 +70,7 @@ func (ps *L1PricingState) _preversion10_UpdateForBatchPosterSpending( } // impose cap on amortized cost, if there is one - if arbosVersion >= 3 { + if arbosVersion >= params.ArbosVersion_3 { amortizedCostCapBips, err := ps.AmortizedCostCapBips() if err != nil { return err diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 37dae08c33..195df3708c 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "math/big" - "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" @@ -216,7 +215,7 @@ func (ps *L1PricingState) LastSurplus() (*big.Int, error) { } func (ps *L1PricingState) SetLastSurplus(val *big.Int, arbosVersion uint64) error { - if arbosVersion < 7 { + if arbosVersion < params.ArbosVersion_7 { return ps.lastSurplus.Set_preVersion7(val) } return ps.lastSurplus.SetSaturatingWithWarning(val, "L1 pricer last surplus") @@ -309,7 +308,7 @@ func (ps *L1PricingState) UpdateForBatchPosterSpending( l1Basefee *big.Int, scenario util.TracingScenario, ) error { - if arbosVersion < 10 { + if arbosVersion < params.ArbosVersion_10 { return ps._preversion10_UpdateForBatchPosterSpending(statedb, evm, arbosVersion, updateTime, currentTime, batchPoster, weiSpent, l1Basefee, scenario) } @@ -359,7 +358,7 @@ func (ps *L1PricingState) UpdateForBatchPosterSpending( } // impose cap on amortized cost, if there is one - if arbosVersion >= 3 { + if arbosVersion >= params.ArbosVersion_3 { amortizedCostCapBips, err := ps.AmortizedCostCapBips() if err != nil { return err @@ -520,10 +519,13 @@ func (ps *L1PricingState) GetPosterInfo(tx *types.Transaction, poster common.Add if poster != BatchPosterAddress { return common.Big0, 0 } - units := atomic.LoadUint64(&tx.CalldataUnits) - if units == 0 { + var units uint64 + if cachedUnits := tx.GetCachedCalldataUnits(brotliCompressionLevel); cachedUnits != nil { + units = *cachedUnits + } else { + // The cache is empty or invalid, so we need to compute the calldata units units = ps.getPosterUnitsWithoutCache(tx, poster, brotliCompressionLevel) - atomic.StoreUint64(&tx.CalldataUnits, units) + tx.SetCachedCalldataUnits(brotliCompressionLevel, units) } // Approximate the l1 fee charged for posting this tx's calldata diff --git a/arbos/programs/native.go b/arbos/programs/native.go index cfc1170c5b..a996d50d8a 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -109,7 +109,7 @@ func activateProgramInternal( (*u64)(gasLeft), )) - module, msg, err := status_mod.toResult(output.intoBytes(), debug) + module, msg, err := status_mod.toResult(rustBytesIntoBytes(output), debug) if err != nil { if debug { log.Warn("activation failed", "err", err, "msg", msg, "program", addressForLogging) @@ -119,7 +119,7 @@ func activateProgramInternal( } return nil, nil, err } - hash := moduleHash.toHash() + hash := bytes32ToHash(moduleHash) targets := db.Database().WasmTargets() type result struct { target ethdb.WasmTarget @@ -141,7 +141,7 @@ func activateProgramInternal( goSlice([]byte(target)), output, ) - asm := output.intoBytes() + asm := rustBytesIntoBytes(output) if status_asm != 0 { results <- result{target, nil, fmt.Errorf("%w: %s", ErrProgramActivation, string(asm))} return @@ -279,7 +279,7 @@ func callProgram( )) depth := interpreter.Depth() - data, msg, err := status.toResult(output.intoBytes(), debug) + data, msg, err := status.toResult(rustBytesIntoBytes(output), debug) if status == userFailure && debug { log.Warn("program failure", "err", err, "msg", msg, "program", address, "depth", depth) } @@ -292,7 +292,7 @@ func callProgram( //export handleReqImpl func handleReqImpl(apiId usize, req_type u32, data *rustSlice, costPtr *u64, out_response *C.GoSliceData, out_raw_data *C.GoSliceData) { api := getApi(apiId) - reqData := data.read() + reqData := readRustSlice(data) reqType := RequestType(req_type - EvmApiMethodReqOffset) response, raw_data, cost := api.handler(reqType, reqData) *costPtr = u64(cost) @@ -418,14 +418,14 @@ func SetTarget(name ethdb.WasmTarget, description string, native bool) error { cbool(native), )) if status != userSuccess { - msg := arbutil.ToStringOrHex(output.intoBytes()) + msg := arbutil.ToStringOrHex(rustBytesIntoBytes(output)) log.Error("failed to set stylus compilation target", "status", status, "msg", msg) return fmt.Errorf("failed to set stylus compilation target, status %v: %v", status, msg) } return nil } -func (value bytes32) toHash() common.Hash { +func bytes32ToHash(value *bytes32) common.Hash { hash := common.Hash{} for index, b := range value.bytes { hash[index] = byte(b) @@ -449,27 +449,27 @@ func addressToBytes20(addr common.Address) bytes20 { return value } -func (slice *rustSlice) read() []byte { +func readRustSlice(slice *rustSlice) []byte { if slice.len == 0 { return nil } return arbutil.PointerToSlice((*byte)(slice.ptr), int(slice.len)) } -func (vec *rustBytes) read() []byte { +func readRustBytes(vec *rustBytes) []byte { if vec.len == 0 { return nil } return arbutil.PointerToSlice((*byte)(vec.ptr), int(vec.len)) } -func (vec *rustBytes) intoBytes() []byte { - slice := vec.read() - vec.drop() +func rustBytesIntoBytes(vec *rustBytes) []byte { + slice := readRustBytes(vec) + dropRustBytes(vec) return slice } -func (vec *rustBytes) drop() { +func dropRustBytes(vec *rustBytes) { C.free_rust_bytes(*vec) } diff --git a/arbos/programs/testcompile.go b/arbos/programs/testcompile.go index 8a4e38444a..58afa228d5 100644 --- a/arbos/programs/testcompile.go +++ b/arbos/programs/testcompile.go @@ -35,10 +35,10 @@ func Wat2Wasm(wat []byte) ([]byte, error) { status := C.wat_to_wasm(goSlice(wat), output) if status != 0 { - return nil, fmt.Errorf("failed reading wat file: %v", string(output.intoBytes())) + return nil, fmt.Errorf("failed reading wat file: %v", string(rustBytesIntoBytes(output))) } - return output.intoBytes(), nil + return rustBytesIntoBytes(output), nil } func testCompileArch(store bool) error { @@ -66,7 +66,7 @@ func testCompileArch(store bool) error { cbool(nativeArm64)) if status != 0 { - return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target arm: %v", string(rustBytesIntoBytes(output))) } status = C.stylus_target_set(goSlice(amd64CompileName), @@ -75,7 +75,7 @@ func testCompileArch(store bool) error { cbool(nativeAmd64)) if status != 0 { - return fmt.Errorf("failed setting compilation target amd: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target amd: %v", string(rustBytesIntoBytes(output))) } source, err := os.ReadFile("../../arbitrator/stylus/tests/add.wat") @@ -107,7 +107,7 @@ func testCompileArch(store bool) error { output, ) if status == 0 { - return fmt.Errorf("succeeded compiling non-existent arch: %v", string(output.intoBytes())) + return fmt.Errorf("succeeded compiling non-existent arch: %v", string(rustBytesIntoBytes(output))) } status = C.stylus_compile( @@ -118,7 +118,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling native: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling native: %v", string(rustBytesIntoBytes(output))) } if store && !nativeAmd64 && !nativeArm64 { _, err := fmt.Printf("writing host file\n") @@ -126,7 +126,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/host.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/host.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -140,7 +140,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling arm: %v", string(rustBytesIntoBytes(output))) } if store { _, err := fmt.Printf("writing arm file\n") @@ -148,7 +148,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/arm64.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/arm64.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -162,7 +162,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling amd: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling amd: %v", string(rustBytesIntoBytes(output))) } if store { _, err := fmt.Printf("writing amd64 file\n") @@ -170,7 +170,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/amd64.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/amd64.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -195,7 +195,7 @@ func resetNativeTarget() error { cbool(true)) if status != 0 { - return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target arm: %v", string(rustBytesIntoBytes(output))) } return nil @@ -260,7 +260,7 @@ func testCompileLoad() error { return err } - _, msg, err := status.toResult(output.intoBytes(), true) + _, msg, err := status.toResult(rustBytesIntoBytes(output), true) if status == userFailure { err = fmt.Errorf("%w: %v", err, msg) } diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index aec08b15b5..7cebd8da37 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -307,7 +307,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r // pay for the retryable's gas and update the pools gascost := arbmath.BigMulByUint(effectiveBaseFee, usergas) networkCost := gascost - if p.state.ArbOSVersion() >= 11 { + if p.state.ArbOSVersion() >= params.ArbosVersion_11 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -576,7 +576,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { takeFunds(maxRefund, arbmath.BigMulByUint(effectiveBaseFee, gasUsed)) // Refund any unused gas, without overdrafting the L1 deposit. networkRefund := gasRefund - if p.state.ArbOSVersion() >= 11 { + if p.state.ArbOSVersion() >= params.ArbosVersion_11 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -629,7 +629,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { } purpose := "feeCollection" - if p.state.ArbOSVersion() > 4 { + if p.state.ArbOSVersion() > params.ArbosVersion_4 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -646,11 +646,11 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { util.MintBalance(&networkFeeAccount, computeCost, p.evm, scenario, purpose) } posterFeeDestination := l1pricing.L1PricerFundsPoolAddress - if p.state.ArbOSVersion() < 2 { + if p.state.ArbOSVersion() < params.ArbosVersion_2 { posterFeeDestination = p.evm.Context.Coinbase } util.MintBalance(&posterFeeDestination, p.PosterFee, p.evm, scenario, purpose) - if p.state.ArbOSVersion() >= 10 { + if p.state.ArbOSVersion() >= params.ArbosVersion_10 { if _, err := p.state.L1PricingState().AddToL1FeesAvailable(p.PosterFee); err != nil { log.Error("failed to update L1FeesAvailable: ", "err", err) } @@ -748,13 +748,13 @@ func (p *TxProcessor) L1BlockHash(blockCtx vm.BlockContext, l1BlockNumber uint64 func (p *TxProcessor) DropTip() bool { version := p.state.ArbOSVersion() - return version != 9 || p.delayedInbox + return version != params.ArbosVersion_9 || p.delayedInbox } func (p *TxProcessor) GetPaidGasPrice() *big.Int { gasPrice := p.evm.GasPrice version := p.state.ArbOSVersion() - if version != 9 { + if version != params.ArbosVersion_9 { // p.evm.Context.BaseFee is already lowered to 0 when vm runs with NoBaseFee flag and 0 gas price gasPrice = p.evm.Context.BaseFee } @@ -762,7 +762,7 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { } func (p *TxProcessor) GasPriceOp(evm *vm.EVM) *big.Int { - if p.state.ArbOSVersion() >= 3 { + if p.state.ArbOSVersion() >= params.ArbosVersion_3 { return p.GetPaidGasPrice() } return evm.GasPrice diff --git a/arbos/util/transfer.go b/arbos/util/transfer.go index c5873b7e93..0b61868abe 100644 --- a/arbos/util/transfer.go +++ b/arbos/util/transfer.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -66,7 +67,7 @@ func TransferBalance( if arbmath.BigLessThan(balance.ToBig(), amount) { return fmt.Errorf("%w: addr %v have %v want %v", vm.ErrInsufficientBalance, *from, balance, amount) } - if evm.Context.ArbOSVersion < 30 && amount.Sign() == 0 { + if evm.Context.ArbOSVersion < params.ArbosVersion_30 && amount.Sign() == 0 { evm.StateDB.CreateZombieIfDeleted(*from) } evm.StateDB.SubBalance(*from, uint256.MustFromBig(amount), tracing.BalanceChangeTransfer) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index 8857b615f3..8d05c44500 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -112,16 +112,19 @@ func (c *PersistentConfig) Validate() error { } type PebbleConfig struct { + SyncMode bool `koanf:"sync-mode"` MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` Experimental PebbleExperimentalConfig `koanf:"experimental"` } var PebbleConfigDefault = PebbleConfig{ + SyncMode: false, // use NO-SYNC mode, see: https://github.com/ethereum/go-ethereum/issues/29819 MaxConcurrentCompactions: runtime.NumCPU(), Experimental: PebbleExperimentalConfigDefault, } func PebbleConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *PebbleConfig) { + f.Bool(prefix+".sync-mode", defaultConfig.SyncMode, "if true sync mode is used (data needs to be written to WAL before the write is marked as completed)") f.Int(prefix+".max-concurrent-compactions", defaultConfig.MaxConcurrentCompactions, "maximum number of concurrent compactions") PebbleExperimentalConfigAddOptions(prefix+".experimental", f, &defaultConfig.Experimental) } @@ -180,7 +183,7 @@ var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ BlockSize: 4 << 10, // 4 KB IndexBlockSize: 4 << 10, // 4 KB TargetFileSize: 2 << 20, // 2 MB - TargetFileSizeEqualLevels: true, + TargetFileSizeEqualLevels: false, L0CompactionConcurrency: 10, CompactionDebtConcurrency: 1 << 30, // 1GB @@ -251,6 +254,7 @@ func (c *PebbleConfig) ExtraOptions(namespace string) *pebble.ExtraOptions { walDir = path.Join(walDir, namespace) } return &pebble.ExtraOptions{ + SyncMode: c.SyncMode, BytesPerSync: c.Experimental.BytesPerSync, L0CompactionFileThreshold: c.Experimental.L0CompactionFileThreshold, L0CompactionThreshold: c.Experimental.L0CompactionThreshold, diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index acad672bb0..93c51a0040 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -335,12 +335,12 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo } // Make sure we don't allow accidentally downgrading ArbOS if chainConfig.DebugMode() { - if currentArbosState.ArbOSVersion() > arbosState.MaxDebugArbosVersionSupported { - return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", arbosState.MaxDebugArbosVersionSupported, currentArbosState.ArbOSVersion()) + if currentArbosState.ArbOSVersion() > params.MaxDebugArbosVersionSupported { + return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", params.MaxDebugArbosVersionSupported, currentArbosState.ArbOSVersion()) } } else { - if currentArbosState.ArbOSVersion() > arbosState.MaxArbosVersionSupported { - return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", arbosState.MaxArbosVersionSupported, currentArbosState.ArbOSVersion()) + if currentArbosState.ArbOSVersion() > params.MaxArbosVersionSupported { + return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", params.MaxArbosVersionSupported, currentArbosState.ArbOSVersion()) } } diff --git a/contracts b/contracts index b140ed63ac..763bd77906 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit b140ed63acdb53cb906ffd1fa3c36fdbd474364e +Subproject commit 763bd77906b7677da691eaa31c6e195d455197a4 diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 69535e82be..ffc6ceee9f 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -789,7 +789,8 @@ func (s *ExecutionEngine) cacheL1PriceDataOfMsg(seqNum arbutil.MessageIndex, rec gasUsedForL1 += receipts[i].GasUsedForL1 } for _, tx := range block.Transactions() { - callDataUnits += tx.CalldataUnits + _, cachedUnits := tx.GetRawCachedCalldataUnits() + callDataUnits += cachedUnits } } l1GasCharged := gasUsedForL1 * block.BaseFee().Uint64() diff --git a/go-ethereum b/go-ethereum index 313432e2a4..26b4dff616 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 313432e2a408f5d7d0f50c9ad4ccf515c8d21a56 +Subproject commit 26b4dff6165650b6963fb1b6f88958c29c059214 diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index 8d916926f3..c85ed93f39 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -29,7 +29,7 @@ func (con ArbGasInfo) GetPricesInWeiWithAggregator( evm mech, aggregator addr, ) (huge, huge, huge, huge, huge, huge, error) { - if c.State.ArbOSVersion() < 4 { + if c.State.ArbOSVersion() < params.ArbosVersion_4 { return con._preVersion4_GetPricesInWeiWithAggregator(c, evm, aggregator) } @@ -105,7 +105,7 @@ func (con ArbGasInfo) GetPricesInWei(c ctx, evm mech) (huge, huge, huge, huge, h // GetPricesInArbGasWithAggregator gets prices in ArbGas when using the provided aggregator func (con ArbGasInfo) GetPricesInArbGasWithAggregator(c ctx, evm mech, aggregator addr) (huge, huge, huge, error) { - if c.State.ArbOSVersion() < 4 { + if c.State.ArbOSVersion() < params.ArbosVersion_4 { return con._preVersion4_GetPricesInArbGasWithAggregator(c, evm, aggregator) } l1GasPrice, err := c.State.L1PricingState().PricePerUnit() @@ -220,7 +220,7 @@ func (con ArbGasInfo) GetGasBacklogTolerance(c ctx, evm mech) (uint64, error) { // GetL1PricingSurplus gets the surplus of funds for L1 batch posting payments (may be negative) func (con ArbGasInfo) GetL1PricingSurplus(c ctx, evm mech) (*big.Int, error) { - if c.State.ArbOSVersion() < 10 { + if c.State.ArbOSVersion() < params.ArbosVersion_10 { return con._preversion10_GetL1PricingSurplus(c, evm) } ps := c.State.L1PricingState() diff --git a/precompiles/ArbOwnerPublic.go b/precompiles/ArbOwnerPublic.go index 451e18e1cc..792b4bb59d 100644 --- a/precompiles/ArbOwnerPublic.go +++ b/precompiles/ArbOwnerPublic.go @@ -5,6 +5,7 @@ package precompiles import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" ) // ArbOwnerPublic precompile provides non-owners with info about the current chain owners. @@ -42,7 +43,7 @@ func (con ArbOwnerPublic) GetNetworkFeeAccount(c ctx, evm mech) (addr, error) { // GetInfraFeeAccount gets the infrastructure fee collector func (con ArbOwnerPublic) GetInfraFeeAccount(c ctx, evm mech) (addr, error) { - if c.State.ArbOSVersion() < 6 { + if c.State.ArbOSVersion() < params.ArbosVersion_6 { return c.State.NetworkFeeAccount() } return c.State.InfraFeeAccount() diff --git a/precompiles/ArbOwner_test.go b/precompiles/ArbOwner_test.go index 51b2fc0cd9..74b29a79b5 100644 --- a/precompiles/ArbOwner_test.go +++ b/precompiles/ArbOwner_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/burn" @@ -218,7 +219,7 @@ func TestArbInfraFeeAccount(t *testing.T) { err = prec.SetInfraFeeAccount(callCtx, evm, newAddr) // this should be a no-op (because ArbOS version 0) Require(t, err) - version5 := uint64(5) + version5 := params.ArbosVersion_5 evm = newMockEVMForTestingWithVersion(&version5) callCtx = testContext(caller, evm) prec = &ArbOwner{} diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index 8fb5aa9391..06e5ccd352 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -39,7 +39,7 @@ type ArbRetryableTx struct { var ErrSelfModifyingRetryable = errors.New("retryable cannot modify itself") func (con ArbRetryableTx) oldNotFoundError(c ctx) error { - if c.State.ArbOSVersion() >= 3 { + if c.State.ArbOSVersion() >= params.ArbosVersion_3 { return con.NoTicketWithIDError() } return errors.New("ticketId not found") diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go index 04cde46ebe..9742ed51f4 100644 --- a/precompiles/ArbSys.go +++ b/precompiles/ArbSys.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" @@ -37,7 +38,7 @@ func (con *ArbSys) ArbBlockNumber(c ctx, evm mech) (huge, error) { // ArbBlockHash gets the L2 block hash, if sufficiently recent func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes32, error) { if !arbBlockNumber.IsUint64() { - if c.State.ArbOSVersion() >= 11 { + if c.State.ArbOSVersion() >= params.ArbosVersion_11 { return bytes32{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) } return bytes32{}, errors.New("invalid block number") @@ -46,7 +47,7 @@ func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes currentNumber := evm.Context.BlockNumber.Uint64() if requestedBlockNum >= currentNumber || requestedBlockNum+256 < currentNumber { - if c.State.ArbOSVersion() >= 11 { + if c.State.ArbOSVersion() >= params.ArbosVersion_11 { return common.Hash{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) } return common.Hash{}, errors.New("invalid block number for ArbBlockHAsh") @@ -84,7 +85,7 @@ func (con *ArbSys) MapL1SenderContractAddressToL2Alias(c ctx, sender addr, dest // WasMyCallersAddressAliased checks if the caller's caller was aliased func (con *ArbSys) WasMyCallersAddressAliased(c ctx, evm mech) (bool, error) { topLevel := con.isTopLevel(c, evm) - if c.State.ArbOSVersion() < 6 { + if c.State.ArbOSVersion() < params.ArbosVersion_6 { topLevel = evm.Depth() == 2 } aliased := topLevel && util.DoesTxTypeAlias(c.txProcessor.TopTxType) @@ -180,7 +181,7 @@ func (con *ArbSys) SendTxToL1(c ctx, evm mech, value huge, destination addr, cal calldataForL1, ) - if c.State.ArbOSVersion() >= 4 { + if c.State.ArbOSVersion() >= params.ArbosVersion_4 { return leafNum, nil } return sendHash.Big(), err diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 5b5376a4ca..54d18a0cc9 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -361,7 +361,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr args = args[2:] version := arbosState.ArbOSVersion(state) - if callerCtx.readOnly && version >= 11 { + if callerCtx.readOnly && version >= params.ArbosVersion_11 { return []reflect.Value{reflect.ValueOf(vm.ErrWriteProtection)} } @@ -531,14 +531,14 @@ func Precompiles() map[addr]ArbosPrecompile { insert(MakePrecompile(pgen.ArbFunctionTableMetaData, &ArbFunctionTable{Address: types.ArbFunctionTableAddress})) insert(MakePrecompile(pgen.ArbosTestMetaData, &ArbosTest{Address: types.ArbosTestAddress})) ArbGasInfo := insert(MakePrecompile(pgen.ArbGasInfoMetaData, &ArbGasInfo{Address: types.ArbGasInfoAddress})) - ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = 10 - ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = 11 - ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = 11 - ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = params.ArbosVersion_10 + ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = params.ArbosVersion_11 + ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = params.ArbosVersion_11 + ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = params.ArbosVersion_20 insert(MakePrecompile(pgen.ArbAggregatorMetaData, &ArbAggregator{Address: types.ArbAggregatorAddress})) insert(MakePrecompile(pgen.ArbStatisticsMetaData, &ArbStatistics{Address: types.ArbStatisticsAddress})) @@ -554,10 +554,10 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwnerPublicImpl := &ArbOwnerPublic{Address: types.ArbOwnerPublicAddress} ArbOwnerPublic := insert(MakePrecompile(pgen.ArbOwnerPublicMetaData, ArbOwnerPublicImpl)) - ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 - ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = 11 - ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = 20 - ArbOwnerPublic.methodsByName["GetScheduledUpgrade"].arbosVersion = 20 + ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = params.ArbosVersion_11 + ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = params.ArbosVersion_20 + ArbOwnerPublic.methodsByName["GetScheduledUpgrade"].arbosVersion = params.ArbosVersion_20 ArbWasmImpl := &ArbWasm{Address: types.ArbWasmAddress} ArbWasm := insert(MakePrecompile(pgen.ArbWasmMetaData, ArbWasmImpl)) @@ -611,11 +611,11 @@ func Precompiles() map[addr]ArbosPrecompile { return ArbOwnerImpl.OwnerActs(context, evm, method, owner, data) } _, ArbOwner := MakePrecompile(pgen.ArbOwnerMetaData, ArbOwnerImpl) - ArbOwner.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 - ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = 5 - ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = 10 - ArbOwner.methodsByName["SetChainConfig"].arbosVersion = 11 - ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = 20 + ArbOwner.methodsByName["GetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = params.ArbosVersion_10 + ArbOwner.methodsByName["SetChainConfig"].arbosVersion = params.ArbosVersion_11 + ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = params.ArbosVersion_20 stylusMethods := []string{ "SetInkPrice", "SetWasmMaxStackDepth", "SetWasmFreePages", "SetWasmPageGas", "SetWasmPageLimit", "SetWasmMinInitGas", "SetWasmInitCostScalar", @@ -798,7 +798,7 @@ func (p *Precompile) Call( ) } // nolint:errorlint - if arbosVersion >= 11 || errRet == vm.ErrExecutionReverted { + if arbosVersion >= params.ArbosVersion_11 || errRet == vm.ErrExecutionReverted { return nil, callerCtx.gasLeft, vm.ErrExecutionReverted } // Preserve behavior with old versions which would zero out gas on this type of error diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index c8b8a46b96..75fed711eb 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -190,13 +190,13 @@ func TestPrecompilesPerArbosVersion(t *testing.T) { log.SetDefault(log.NewLogger(glogger)) expectedNewMethodsPerArbosVersion := map[uint64]int{ - 0: 89, - 5: 3, - 10: 2, - 11: 4, - 20: 8, - 30: 38, - 31: 1, + 0: 89, + params.ArbosVersion_5: 3, + params.ArbosVersion_10: 2, + params.ArbosVersion_11: 4, + params.ArbosVersion_20: 8, + params.ArbosVersion_30: 38, + params.ArbosVersion_31: 1, } precompiles := Precompiles() diff --git a/precompiles/wrapper.go b/precompiles/wrapper.go index edc079fc5b..028aed755b 100644 --- a/precompiles/wrapper.go +++ b/precompiles/wrapper.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/util" @@ -102,7 +103,7 @@ func (wrapper *OwnerPrecompile) Call( } version := arbosState.ArbOSVersion(evm.StateDB) - if !readOnly || version < 11 { + if !readOnly || version < params.ArbosVersion_11 { // log that the owner operation succeeded if err := wrapper.emitSuccess(evm, *(*[4]byte)(input[:4]), caller, input); err != nil { log.Error("failed to emit OwnerActs event", "err", err) diff --git a/staker/legacy/staker.go b/staker/legacy/staker.go index fa74be327f..504e8c8421 100644 --- a/staker/legacy/staker.go +++ b/staker/legacy/staker.go @@ -323,9 +323,6 @@ func NewStaker( return nil, err } stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) - if config().StartValidationFromStaked && blockValidator != nil { - stakedNotifiers = append(stakedNotifiers, blockValidator) - } inactiveValidatedNodes := btree.NewG(2, func(a, b validatedNode) bool { return a.number < b.number || (a.number == b.number && a.hash.Cmp(b.hash) < 0) }) diff --git a/system_tests/arbos_upgrade_test.go b/system_tests/arbos_upgrade_test.go new file mode 100644 index 0000000000..a7103a8585 --- /dev/null +++ b/system_tests/arbos_upgrade_test.go @@ -0,0 +1,271 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" +) + +func TestScheduleArbosUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + + arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + callOpts := &bind.CallOpts{Context: ctx} + scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. + tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test + var testVersion uint64 = 100 + var testTimestamp uint64 = 1 << 62 + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") + if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { + t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } +} + +func checkArbOSVersion(t *testing.T, testClient *TestClient, expectedVersion uint64, scenario string) { + statedb, err := testClient.ExecNode.Backend.ArbInterface().BlockChain().State() + Require(t, err, "could not get statedb", scenario) + state, err := arbosState.OpenSystemArbosState(statedb, nil, true) + Require(t, err, "could not open ArbOS state", scenario) + if state.ArbOSVersion() != expectedVersion { + t.Errorf("%s: expected ArbOS version %v, got %v", scenario, expectedVersion, state.ArbOSVersion()) + } + +} + +func TestArbos11To32UpgradeWithMcopy(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialVersion := uint64(11) + finalVersion := uint64(32) + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithArbOSVersion(initialVersion) + cleanup := builder.Build(t) + defer cleanup() + seqTestClient := builder.L2 + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasLimit = 32000000 + + // makes Owner a chain owner + arbDebug, err := precompilesgen.NewArbDebug(types.ArbDebugAddress, seqTestClient.Client) + Require(t, err) + tx, err := arbDebug.BecomeChainOwner(&auth) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // deploys test contract + _, tx, contract, err := mocksgen.DeployArbOS11To32UpgradeTest(&auth, seqTestClient.Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // build replica node + replicaConfig := arbnode.ConfigDefaultL1Test() + replicaConfig.BatchPoster.Enable = false + replicaTestClient, replicaCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: replicaConfig}) + defer replicaCleanup() + + checkArbOSVersion(t, seqTestClient, initialVersion, "initial sequencer") + checkArbOSVersion(t, replicaTestClient, initialVersion, "initial replica") + + // mcopy should fail since arbos 11 doesn't support it + tx, err = contract.Mcopy(&auth) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + if (err == nil) || !strings.Contains(err.Error(), "invalid opcode: MCOPY") { + t.Errorf("expected MCOPY to fail, got %v", err) + } + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // upgrade arbos to final version + arbOwner, err := precompilesgen.NewArbOwner(types.ArbOwnerAddress, seqTestClient.Client) + Require(t, err) + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, finalVersion, 0) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // checks upgrade worked + tx, err = contract.Mcopy(&auth) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + checkArbOSVersion(t, seqTestClient, finalVersion, "final sequencer") + checkArbOSVersion(t, replicaTestClient, finalVersion, "final replica") + + // generates more blocks + builder.L2Info.GenerateAccount("User2") + for i := 0; i < 3; i++ { + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + err = seqTestClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + } + + blockNumberSeq, err := seqTestClient.Client.BlockNumber(ctx) + Require(t, err) + blockNumberReplica, err := replicaTestClient.Client.BlockNumber(ctx) + Require(t, err) + if blockNumberSeq != blockNumberReplica { + t.Errorf("expected sequencer and replica to have same block number, got %v and %v", blockNumberSeq, blockNumberReplica) + } + // #nosec G115 + blockNumber := big.NewInt(int64(blockNumberSeq)) + + blockSeq, err := seqTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + blockReplica, err := replicaTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + if blockSeq.Hash() != blockReplica.Hash() { + t.Errorf("expected sequencer and replica to have same block hash, got %v and %v", blockSeq.Hash(), blockReplica.Hash()) + } +} + +func TestArbos11To32UpgradeWithCalldata(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialVersion := uint64(11) + finalVersion := uint64(32) + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithArbOSVersion(initialVersion) + builder.execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible + cleanup := builder.Build(t) + defer cleanup() + seqTestClient := builder.L2 + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasLimit = 32000000 + + // makes Owner a chain owner + arbDebug, err := precompilesgen.NewArbDebug(types.ArbDebugAddress, seqTestClient.Client) + Require(t, err) + tx, err := arbDebug.BecomeChainOwner(&auth) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // build replica node + replicaConfig := arbnode.ConfigDefaultL1Test() + replicaConfig.BatchPoster.Enable = false + replicaTestClient, replicaCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: replicaConfig}) + defer replicaCleanup() + + checkArbOSVersion(t, seqTestClient, initialVersion, "initial sequencer") + checkArbOSVersion(t, replicaTestClient, initialVersion, "initial replica") + + // upgrade arbos to final version + arbOwner, err := precompilesgen.NewArbOwner(types.ArbOwnerAddress, seqTestClient.Client) + Require(t, err) + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, finalVersion, 0) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // checks upgrade worked + var data []byte + for i := range 10 { + for range 100 { + data = append(data, byte(i)) + } + } + tx = builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, big.NewInt(1e12), data) + err = seqTestClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + checkArbOSVersion(t, seqTestClient, finalVersion, "final sequencer") + checkArbOSVersion(t, replicaTestClient, finalVersion, "final replica") + + blockNumberSeq, err := seqTestClient.Client.BlockNumber(ctx) + Require(t, err) + blockNumberReplica, err := replicaTestClient.Client.BlockNumber(ctx) + Require(t, err) + if blockNumberSeq != blockNumberReplica { + t.Errorf("expected sequencer and replica to have same block number, got %v and %v", blockNumberSeq, blockNumberReplica) + } + // #nosec G115 + blockNumber := big.NewInt(int64(blockNumberSeq)) + + blockSeq, err := seqTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + blockReplica, err := replicaTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + if blockSeq.Hash() != blockReplica.Hash() { + t.Errorf("expected sequencer and replica to have same block hash, got %v and %v", blockSeq.Hash(), blockReplica.Hash()) + } +} diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 9125c3921e..d6ae4973ac 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l2pricing" @@ -58,7 +59,7 @@ func testBlockValidatorSimple(t *testing.T, opts Options) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, opts.dasModeString) defer lifecycleManager.StopAndWaitUntil(time.Second) if opts.workload == upgradeArbOs { - chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 + chainConfig.ArbitrumChainParams.InitialArbOSVersion = params.ArbosVersion_10 } var delayEvery int @@ -202,8 +203,6 @@ func testBlockValidatorSimple(t *testing.T, opts Options) { builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000), }) - // give the inbox reader a bit of time to pick up the delayed message - time.Sleep(time.Millisecond * 500) // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index 6be79ed4c9..fd1aa746a3 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -3,6 +3,8 @@ package arbtest import ( "context" "encoding/json" + "fmt" + "math/big" "testing" "github.com/ethereum/go-ethereum/common" @@ -10,10 +12,16 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/gasestimator" "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/arbos/retryables" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestDebugAPI(t *testing.T) { @@ -57,3 +65,230 @@ func TestDebugAPI(t *testing.T) { err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), &tracers.TraceConfig{Tracer: &flatCallTracer}) Require(t, err) } + +type account struct { + Balance *hexutil.Big `json:"balance,omitempty"` + Code []byte `json:"code,omitempty"` + Nonce uint64 `json:"nonce,omitempty"` + Storage map[common.Hash]common.Hash `json:"storage,omitempty"` +} +type prestateTrace struct { + Post map[common.Address]*account `json:"post"` + Pre map[common.Address]*account `json:"pre"` +} + +func TestPrestateTracingSimple(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + sender := builder.L2Info.GetAddress("Owner") + receiver := builder.L2Info.GetAddress("User2") + ownerOldBalance, err := builder.L2.Client.BalanceAt(ctx, sender, nil) + Require(t, err) + user2OldBalance, err := builder.L2.Client.BalanceAt(ctx, receiver, nil) + Require(t, err) + + value := big.NewInt(1e6) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, value, nil) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + l2rpc := builder.L2.Stack.Attach() + + var result prestateTrace + traceConfig := map[string]interface{}{ + "tracer": "prestateTracer", + "tracerConfig": map[string]interface{}{ + "diffMode": true, + }, + } + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), traceConfig) + Require(t, err) + + if !arbmath.BigEquals(result.Pre[sender].Balance.ToInt(), ownerOldBalance) { + Fatal(t, "Unexpected initial balance of sender") + } + if !arbmath.BigEquals(result.Pre[receiver].Balance.ToInt(), user2OldBalance) { + Fatal(t, "Unexpected initial balance of receiver") + } + if !arbmath.BigEquals(result.Post[sender].Balance.ToInt(), arbmath.BigSub(ownerOldBalance, value)) { + Fatal(t, "Unexpected final balance of sender") + } + if !arbmath.BigEquals(result.Post[receiver].Balance.ToInt(), value) { + Fatal(t, "Unexpected final balance of receiver") + } + if result.Post[sender].Nonce != result.Pre[sender].Nonce+1 { + Fatal(t, "sender nonce increment wasn't registered") + } + if result.Post[receiver].Nonce != result.Pre[receiver].Nonce { + Fatal(t, "receiver nonce shouldn't change") + } +} + +func TestPrestateTracingComplex(t *testing.T) { + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + defer teardown() + + // Test prestate tracing of a ArbitrumDepositTx type tx + faucetAddr := builder.L1Info.GetAddress("Faucet") + oldBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, nil) + Require(t, err) + + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + txOpts.Value = big.NewInt(13) + + l1tx, err := delayedInbox.DepositEth439370b1(&txOpts) + Require(t, err) + + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("Got transaction status: %v, want: %v", l1Receipt.Status, types.ReceiptStatusSuccessful) + } + waitForL1DelayBlocks(t, builder) + + l2Tx := lookupL2Tx(l1Receipt) + l2Receipt, err := builder.L2.EnsureTxSucceeded(l2Tx) + Require(t, err) + newBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, l2Receipt.BlockNumber) + Require(t, err) + if got := new(big.Int); got.Sub(newBalance, oldBalance).Cmp(txOpts.Value) != 0 { + t.Errorf("Got transferred: %v, want: %v", got, txOpts.Value) + } + + l2rpc := builder.L2.Stack.Attach() + var result prestateTrace + traceConfig := map[string]interface{}{ + "tracer": "prestateTracer", + "tracerConfig": map[string]interface{}{ + "diffMode": true, + }, + } + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", l2Tx.Hash(), traceConfig) + Require(t, err) + + if _, ok := result.Pre[faucetAddr]; !ok { + Fatal(t, "Faucet account not found in the result of prestate tracer") + } + // Nonce shouldn't exist (in this case defaults to 0) in the Post map of the trace in DiffMode + if l2Tx.SkipAccountChecks() && result.Post[faucetAddr].Nonce != 0 { + Fatal(t, "Faucet account's nonce should remain unchanged ") + } + if !arbmath.BigEquals(result.Pre[faucetAddr].Balance.ToInt(), oldBalance) { + Fatal(t, "Unexpected initial balance of Faucet") + } + if !arbmath.BigEquals(result.Post[faucetAddr].Balance.ToInt(), arbmath.BigAdd(oldBalance, txOpts.Value)) { + Fatal(t, "Unexpected final balance of Faucet") + } + + // Test prestate tracing of a ArbitrumSubmitRetryableTx type tx + user2Address := builder.L2Info.GetAddress("User2") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") + + deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) + callValue := big.NewInt(1e6) + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) + Require(t, err, "failed to deploy NodeInterface") + + // estimate the gas needed to auto redeem the retryable + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL2.NoSend = true + usertxoptsL2.GasMargin = 0 + tx, err := nodeInterface.EstimateRetryableTicket( + &usertxoptsL2, + usertxoptsL2.From, + deposit, + user2Address, + callValue, + beneficiaryAddress, + beneficiaryAddress, + []byte{0x32, 0x42, 0x32, 0x88}, // increase the cost to beyond that of params.TxGas + ) + Require(t, err, "failed to estimate retryable submission") + estimate := tx.Gas() + expectedEstimate := params.TxGas + params.TxDataNonZeroGasEIP2028*4 + if float64(estimate) > float64(expectedEstimate)*(1+gasestimator.EstimateGasErrorRatio) { + t.Errorf("estimated retryable ticket at %v gas but expected %v, with error margin of %v", + estimate, + expectedEstimate, + gasestimator.EstimateGasErrorRatio, + ) + } + + // submit & auto redeem the retryable using the gas estimate + usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL1.Value = deposit + l1tx, err = delayedInbox.CreateRetryableTicket( + &usertxoptsL1, + user2Address, + callValue, + big.NewInt(1e16), + beneficiaryAddress, + beneficiaryAddress, + arbmath.UintToBig(estimate), + big.NewInt(l2pricing.InitialBaseFeeWei*2), + []byte{0x32, 0x42, 0x32, 0x88}, + ) + Require(t, err) + + l1Receipt, err = builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "l1Receipt indicated failure") + } + + waitForL1DelayBlocks(t, builder) + + l2Tx = lookupL2Tx(l1Receipt) + receipt, err := builder.L2.EnsureTxSucceeded(l2Tx) + Require(t, err) + if receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t) + } + + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) + Require(t, err) + if !arbmath.BigEquals(l2balance, callValue) { + Fatal(t, "Unexpected balance:", l2balance) + } + + ticketId := receipt.Logs[0].Topics[1] + firstRetryTxId := receipt.Logs[1].Topics[2] + fmt.Println("submitretryable txid ", ticketId) + fmt.Println("auto redeem txid ", firstRetryTxId) + + // Trace ArbitrumSubmitRetryableTx + result = prestateTrace{} + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", l2Tx.Hash(), traceConfig) + Require(t, err) + + escrowAddr := retryables.RetryableEscrowAddress(ticketId) + if _, ok := result.Pre[escrowAddr]; !ok { + Fatal(t, "Escrow account not found in the result of prestate tracer for a ArbitrumSubmitRetryableTx transaction") + } + + if !arbmath.BigEquals(result.Pre[escrowAddr].Balance.ToInt(), common.Big0) { + Fatal(t, "Unexpected initial balance of Escrow") + } + if !arbmath.BigEquals(result.Post[escrowAddr].Balance.ToInt(), callValue) { + Fatal(t, "Unexpected final balance of Escrow") + } + + // Trace ArbitrumRetryTx + result = prestateTrace{} + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", firstRetryTxId, traceConfig) + Require(t, err) + + if !arbmath.BigEquals(result.Pre[user2Address].Balance.ToInt(), common.Big0) { + Fatal(t, "Unexpected initial balance of User2") + } + if !arbmath.BigEquals(result.Post[user2Address].Balance.ToInt(), callValue) { + Fatal(t, "Unexpected final balance of User2") + } +} diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index e489b1864e..37e1efe8c5 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -162,7 +162,7 @@ func TestDifficultyForArbOSTen(t *testing.T) { defer cancel() builder := NewNodeBuilder(ctx).DefaultConfig(t, false) - builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 + builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = params.ArbosVersion_10 cleanup := builder.Build(t) defer cleanup() diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 76de23e2cb..5540728df8 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -89,10 +89,10 @@ func TestSequencerFeePaid(t *testing.T) { feePaidForL2 := arbmath.BigMulByUint(gasPrice, gasUsedForL2) tipPaidToNet := arbmath.BigMulByUint(tipCap, receipt.GasUsedForL1) gotTip := arbmath.BigEquals(networkRevenue, arbmath.BigAdd(feePaidForL2, tipPaidToNet)) - if !gotTip && version == 9 { + if !gotTip && version == params.ArbosVersion_9 { Fatal(t, "network didn't receive expected payment", networkRevenue, feePaidForL2, tipPaidToNet) } - if gotTip && version != 9 { + if gotTip && version != params.ArbosVersion_9 { Fatal(t, "tips are somehow enabled") } @@ -110,7 +110,7 @@ func TestSequencerFeePaid(t *testing.T) { return networkRevenue, tipPaidToNet } - if version != 9 { + if version != params.ArbosVersion_9 { testFees(3) return } diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index 78f34df6c7..5bc6315086 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/l1pricing" @@ -27,7 +28,7 @@ func TestPurePrecompileMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - arbosVersion := uint64(31) + arbosVersion := params.ArbosVersion_31 builder := NewNodeBuilder(ctx). DefaultConfig(t, false). WithArbOSVersion(arbosVersion) @@ -504,57 +505,6 @@ func TestGetBrotliCompressionLevel(t *testing.T) { } } -func TestScheduleArbosUpgrade(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - builder := NewNodeBuilder(ctx).DefaultConfig(t, false) - cleanup := builder.Build(t) - defer cleanup() - - auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) - - arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) - Require(t, err, "could not bind ArbOwner contract") - - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) - Require(t, err, "could not bind ArbOwner contract") - - callOpts := &bind.CallOpts{Context: ctx} - scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") - if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { - t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } - - // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. - tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) - Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) - Require(t, err) - - scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") - if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { - t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } - - // TODO: Once we have an ArbOS 30, test a real upgrade with it - // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test - var testVersion uint64 = 100 - var testTimestamp uint64 = 1 << 62 - tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) - Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) - Require(t, err) - - scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") - if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { - t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } -} - func TestArbStatistics(t *testing.T) { t.Parallel() diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 55d26c8372..49bba81374 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -316,7 +316,7 @@ func testSubmitRetryableEmptyEscrow(t *testing.T, arbosVersion uint64) { state, err := builder.L2.ExecNode.ArbInterface.BlockChain().State() Require(t, err) escrowExists := state.Exist(escrowAccount) - if escrowExists != (arbosVersion < 30) { + if escrowExists != (arbosVersion < params.ArbosVersion_30) { Fatal(t, "Escrow account existance", escrowExists, "doesn't correspond to ArbOS version", arbosVersion) } } diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index a49e059351..c221ecc137 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" ) func TestTransfer(t *testing.T) { @@ -51,12 +52,12 @@ func TestP256Verify(t *testing.T) { }{ { desc: "p256 should not be enabled on arbOS 20", - initialVersion: 20, + initialVersion: params.ArbosVersion_20, want: nil, }, { desc: "p256 should be enabled on arbOS 20", - initialVersion: 30, + initialVersion: params.ArbosVersion_30, want: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), }, } { From 2fe54e77860edc5d3dc1293cbbecc04332c5e3d5 Mon Sep 17 00:00:00 2001 From: Pepper Lebeck-Jobe Date: Mon, 30 Dec 2024 10:20:46 +0900 Subject: [PATCH 16/16] Rearrange the order of agreed and confirmed This way, the behavior of falling back to the confirmed state is more readable. --- staker/bold/bold_staker.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index 539eb80abf..063f7b9719 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -244,29 +244,29 @@ func (b *BOLDStaker) Start(ctxIn context.Context) { if err != nil { log.Warn("error updating latest wasm module root", "err", err) } + confirmedMsgCount, confirmedGlobalState, err := b.getLatestState(ctx, true) + if err != nil { + log.Error("staker: error checking latest confirmed", "err", err) + } + agreedMsgCount, agreedGlobalState, err := b.getLatestState(ctx, false) if err != nil { log.Error("staker: error checking latest agreed", "err", err) } + if agreedGlobalState == nil { + // If we don't have a latest agreed global state, we should fall back to + // using the latest confirmed global state. + agreedGlobalState = confirmedGlobalState + agreedMsgCount = confirmedMsgCount + } if agreedGlobalState != nil { for _, notifier := range b.stakedNotifiers { notifier.UpdateLatestStaked(agreedMsgCount, *agreedGlobalState) } } - confirmedMsgCount, confirmedGlobalState, err := b.getLatestState(ctx, true) - if err != nil { - log.Error("staker: error checking latest confirmed", "err", err) - } if confirmedGlobalState != nil { - if agreedGlobalState == nil { - // If we don't have a latest agreed global state, we should fall back to - // using the latest confirmed global state. - for _, notifier := range b.stakedNotifiers { - notifier.UpdateLatestStaked(confirmedMsgCount, *confirmedGlobalState) - } - } for _, notifier := range b.confirmedNotifiers { notifier.UpdateLatestConfirmed(confirmedMsgCount, *confirmedGlobalState) }