diff --git a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs index 0191718dcc..37af85c382 100644 --- a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs +++ b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs @@ -534,7 +534,7 @@ pub trait UserHost: GasMeteredMachine { fn return_data_size(&mut self) -> Result { self.buy_ink(HOSTIO_INK)?; let len = *self.evm_return_data_len(); - trace!("return_data_size", self, be!(len), &[], len) + trace!("return_data_size", self, &[], be!(len), len) } /// Emits an EVM log with the given number of topics and data, the first bytes of which should @@ -629,7 +629,8 @@ pub trait UserHost: GasMeteredMachine { self.buy_gas(gas_cost)?; let code = code.slice(); - trace!("account_code_size", self, address, &[], code.len() as u32) + let len = code.len() as u32; + trace!("account_code_size", self, address, be!(len), len) } /// Gets the code hash of the account at the given address. The semantics are equivalent @@ -735,7 +736,7 @@ pub trait UserHost: GasMeteredMachine { fn evm_gas_left(&mut self) -> Result { self.buy_ink(HOSTIO_INK)?; let gas = self.gas_left()?; - trace!("evm_gas_left", self, be!(gas), &[], gas) + trace!("evm_gas_left", self, &[], be!(gas), gas) } /// Gets the amount of ink remaining after paying for the cost of this hostio. The semantics @@ -747,7 +748,7 @@ pub trait UserHost: GasMeteredMachine { fn evm_ink_left(&mut self) -> Result { self.buy_ink(HOSTIO_INK)?; let ink = self.ink_ready()?; - trace!("evm_ink_left", self, be!(ink), &[], ink) + trace!("evm_ink_left", self, &[], be!(ink), ink) } /// Computes `value รท exponent` using 256-bit math, writing the result to the first. diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 98c19ce361..a582b64ffa 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -39,6 +39,7 @@ type SeqCoordinator struct { redisutil.RedisCoordinator + sync *SyncMonitor streamer *TransactionStreamer sequencer execution.ExecutionSequencer delayedSequencer *DelayedSequencer @@ -69,9 +70,10 @@ type SeqCoordinatorConfig struct { SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"` ReleaseRetries int `koanf:"release-retries"` // Max message per poll. - MsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` - MyUrl string `koanf:"my-url"` - Signer signature.SignVerifyConfig `koanf:"signer"` + MsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` + MyUrl string `koanf:"my-url"` + DeleteFinalizedMsgs bool `koanf:"delete-finalized-msgs"` + Signer signature.SignVerifyConfig `koanf:"signer"` } func (c *SeqCoordinatorConfig) Url() string { @@ -95,6 +97,7 @@ func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".release-retries", DefaultSeqCoordinatorConfig.ReleaseRetries, "the number of times to retry releasing the wants lockout and chosen one status on shutdown") f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MsgPerPoll), "will only be marked as wanting the lockout if not too far behind") f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrl, "url for this sequencer if it is the chosen") + f.Bool(prefix+".delete-finalized-msgs", DefaultSeqCoordinatorConfig.DeleteFinalizedMsgs, "enable deleting of finalized messages from redis") signature.SignVerifyConfigAddOptions(prefix+".signer", f) } @@ -104,7 +107,7 @@ var DefaultSeqCoordinatorConfig = SeqCoordinatorConfig{ RedisUrl: "", LockoutDuration: time.Minute, LockoutSpare: 30 * time.Second, - SeqNumDuration: 24 * time.Hour, + SeqNumDuration: 10 * 24 * time.Hour, UpdateInterval: 250 * time.Millisecond, HandoffTimeout: 30 * time.Second, SafeShutdownDelay: 5 * time.Second, @@ -112,23 +115,25 @@ var DefaultSeqCoordinatorConfig = SeqCoordinatorConfig{ RetryInterval: 50 * time.Millisecond, MsgPerPoll: 2000, MyUrl: redisutil.INVALID_URL, + DeleteFinalizedMsgs: true, Signer: signature.DefaultSignVerifyConfig, } var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ - Enable: false, - RedisUrl: "", - LockoutDuration: time.Second * 2, - LockoutSpare: time.Millisecond * 10, - SeqNumDuration: time.Minute * 10, - UpdateInterval: time.Millisecond * 10, - HandoffTimeout: time.Millisecond * 200, - SafeShutdownDelay: time.Millisecond * 100, - ReleaseRetries: 4, - RetryInterval: time.Millisecond * 3, - MsgPerPoll: 20, - MyUrl: redisutil.INVALID_URL, - Signer: signature.DefaultSignVerifyConfig, + Enable: false, + RedisUrl: "", + LockoutDuration: time.Second * 2, + LockoutSpare: time.Millisecond * 10, + SeqNumDuration: time.Minute * 10, + UpdateInterval: time.Millisecond * 10, + HandoffTimeout: time.Millisecond * 200, + SafeShutdownDelay: time.Millisecond * 100, + ReleaseRetries: 4, + RetryInterval: time.Millisecond * 3, + MsgPerPoll: 20, + MyUrl: redisutil.INVALID_URL, + DeleteFinalizedMsgs: true, + Signer: signature.DefaultSignVerifyConfig, } func NewSeqCoordinator( @@ -149,6 +154,7 @@ func NewSeqCoordinator( } coordinator := &SeqCoordinator{ RedisCoordinator: *redisCoordinator, + sync: sync, streamer: streamer, sequencer: sequencer, config: config, @@ -338,6 +344,14 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC return nil } +func (c *SeqCoordinator) getRemoteFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) { + resStr, err := c.Client.Get(ctx, redisutil.FINALIZED_MSG_COUNT_KEY).Result() + if err != nil { + return 0, err + } + return c.signedBytesToMsgCount(ctx, []byte(resStr)) +} + func (c *SeqCoordinator) getRemoteMsgCountImpl(ctx context.Context, r redis.Cmdable) (arbutil.MessageIndex, error) { resStr, err := r.Get(ctx, redisutil.MSG_COUNT_KEY).Result() if errors.Is(err, redis.Nil) { @@ -473,6 +487,17 @@ func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen strin return c.noRedisError() } // Was, and still is, the active sequencer + if c.config.DeleteFinalizedMsgs { + // Before proceeding, first try deleting finalized messages from redis and setting the finalizedMsgCount key + finalized, err := c.sync.GetFinalizedMsgCount(ctx) + if err != nil { + log.Warn("Error getting finalizedMessageCount from syncMonitor: %w", err) + } else if finalized == 0 { + log.Warn("SyncMonitor returned zero finalizedMessageCount") + } else if err := c.deleteFinalizedMsgsFromRedis(ctx, finalized); err != nil { + log.Warn("Coordinator failed to delete finalized messages from redis", "err", err) + } + } // We leave a margin of error of either a five times the update interval or a fifth of the lockout duration, whichever is greater. marginOfError := arbmath.MaxInt(c.config.LockoutDuration/5, c.config.UpdateInterval*5) if time.Now().Add(marginOfError).Before(atomicTimeRead(&c.lockoutUntil)) { @@ -492,6 +517,62 @@ func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen strin return c.noRedisError() } +func (c *SeqCoordinator) deleteFinalizedMsgsFromRedis(ctx context.Context, finalized arbutil.MessageIndex) error { + deleteMsgsAndUpdateFinalizedMsgCount := func(keys []string) error { + if len(keys) > 0 { + // To support cases during init we delete keys from reverse (i.e lowest seq num first), so that even if deletion fails in one of the iterations + // next time deleteFinalizedMsgsFromRedis is called we dont miss undeleted messages, as exists is checked from higher seqnum to lower. + // In non-init cases it doesn't matter how we delete as we always try to delete from prevFinalized to finalized + batchDeleteCount := 1000 + for i := len(keys); i > 0; i -= batchDeleteCount { + if err := c.Client.Del(ctx, keys[max(0, i-batchDeleteCount):i]...).Err(); err != nil { + return fmt.Errorf("error deleting finalized messages and their signatures from redis: %w", err) + } + } + } + finalizedBytes, err := c.msgCountToSignedBytes(finalized) + if err != nil { + return err + } + if err = c.Client.Set(ctx, redisutil.FINALIZED_MSG_COUNT_KEY, finalizedBytes, c.config.SeqNumDuration).Err(); err != nil { + return fmt.Errorf("couldn't set %s key to current finalizedMsgCount in redis: %w", redisutil.FINALIZED_MSG_COUNT_KEY, err) + } + return nil + } + prevFinalized, err := c.getRemoteFinalizedMsgCount(ctx) + if errors.Is(err, redis.Nil) { + var keys []string + for msg := finalized - 1; msg > 0; msg-- { + exists, err := c.Client.Exists(ctx, redisutil.MessageKeyFor(msg), redisutil.MessageSigKeyFor(msg)).Result() + if err != nil { + // If there is an error deleting finalized messages during init, we retry later either from this sequencer or from another + return err + } + if exists == 0 { + break + } + keys = append(keys, redisutil.MessageKeyFor(msg), redisutil.MessageSigKeyFor(msg)) + } + log.Info("Initializing finalizedMsgCount and deleting finalized messages from redis", "finalizedMsgCount", finalized) + return deleteMsgsAndUpdateFinalizedMsgCount(keys) + } else if err != nil { + return fmt.Errorf("error getting finalizedMsgCount value from redis: %w", err) + } + remoteMsgCount, err := c.getRemoteMsgCountImpl(ctx, c.Client) + if err != nil { + return fmt.Errorf("cannot get remote message count: %w", err) + } + msgToDelete := min(finalized, remoteMsgCount) + if prevFinalized < msgToDelete { + var keys []string + for msg := prevFinalized; msg < msgToDelete; msg++ { + keys = append(keys, redisutil.MessageKeyFor(msg), redisutil.MessageSigKeyFor(msg)) + } + return deleteMsgsAndUpdateFinalizedMsgCount(keys) + } + return nil +} + func (c *SeqCoordinator) update(ctx context.Context) time.Duration { chosenSeq, err := c.RecommendSequencerWantingLockout(ctx) if err != nil { @@ -522,19 +603,24 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Error("cannot read message count", "err", err) return c.config.UpdateInterval } + remoteFinalizedMsgCount, err := c.getRemoteFinalizedMsgCount(ctx) + if err != nil { + loglevel := log.Error + if errors.Is(err, redis.Nil) { + loglevel = log.Debug + } + loglevel("Cannot get remote finalized message count, might encounter failed to read message warnings later", "err", err) + } remoteMsgCount, err := c.GetRemoteMsgCount() if err != nil { log.Warn("cannot get remote message count", "err", err) return c.retryAfterRedisError() } - readUntil := remoteMsgCount - if readUntil > localMsgCount+c.config.MsgPerPoll { - readUntil = localMsgCount + c.config.MsgPerPoll - } + readUntil := min(localMsgCount+c.config.MsgPerPoll, remoteMsgCount) var messages []arbostypes.MessageWithMetadata msgToRead := localMsgCount var msgReadErr error - for msgToRead < readUntil { + for msgToRead < readUntil && localMsgCount >= remoteFinalizedMsgCount { var resString string resString, msgReadErr = c.Client.Get(ctx, redisutil.MessageKeyFor(msgToRead)).Result() if msgReadErr != nil { diff --git a/arbnode/seq_coordinator_atomic_test.go b/arbnode/seq_coordinator_test.go similarity index 57% rename from arbnode/seq_coordinator_atomic_test.go rename to arbnode/seq_coordinator_test.go index 9b9d9dea81..6498543f3a 100644 --- a/arbnode/seq_coordinator_atomic_test.go +++ b/arbnode/seq_coordinator_test.go @@ -156,3 +156,94 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { } } + +func TestSeqCoordinatorDeletesFinalizedMessages(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + coordConfig := TestSeqCoordinatorConfig + coordConfig.LockoutDuration = time.Millisecond * 100 + coordConfig.LockoutSpare = time.Millisecond * 10 + coordConfig.Signer.ECDSA.AcceptSequencer = false + coordConfig.Signer.SymmetricFallback = true + coordConfig.Signer.SymmetricSign = true + coordConfig.Signer.Symmetric.Dangerous.DisableSignatureVerification = true + coordConfig.Signer.Symmetric.SigningKey = "" + + nullSigner, err := signature.NewSignVerify(&coordConfig.Signer, nil, nil) + Require(t, err) + + redisUrl := redisutil.CreateTestRedis(ctx, t) + coordConfig.RedisUrl = redisUrl + + config := coordConfig + config.MyUrl = "test" + redisCoordinator, err := redisutil.NewRedisCoordinator(config.RedisUrl) + Require(t, err) + coordinator := &SeqCoordinator{ + RedisCoordinator: *redisCoordinator, + config: config, + signer: nullSigner, + } + + // Add messages to redis + var keys []string + msgBytes, err := coordinator.msgCountToSignedBytes(0) + Require(t, err) + for i := arbutil.MessageIndex(1); i <= 10; i++ { + err = coordinator.Client.Set(ctx, redisutil.MessageKeyFor(i), msgBytes, time.Hour).Err() + Require(t, err) + err = coordinator.Client.Set(ctx, redisutil.MessageSigKeyFor(i), msgBytes, time.Hour).Err() + Require(t, err) + keys = append(keys, redisutil.MessageKeyFor(i), redisutil.MessageSigKeyFor(i)) + } + // Set msgCount key + msgCountBytes, err := coordinator.msgCountToSignedBytes(11) + Require(t, err) + err = coordinator.Client.Set(ctx, redisutil.MSG_COUNT_KEY, msgCountBytes, time.Hour).Err() + Require(t, err) + exists, err := coordinator.Client.Exists(ctx, keys...).Result() + Require(t, err) + if exists != 20 { + t.Fatal("couldn't find all messages and signatures in redis") + } + + // Set finalizedMsgCount and delete finalized messages + err = coordinator.deleteFinalizedMsgsFromRedis(ctx, 5) + Require(t, err) + + // Check if messages and signatures were deleted successfully + exists, err = coordinator.Client.Exists(ctx, keys[:8]...).Result() + Require(t, err) + if exists != 0 { + t.Fatal("finalized messages and signatures in range 1 to 4 were not deleted") + } + + // Check if finalizedMsgCount was set to correct value + finalized, err := coordinator.getRemoteFinalizedMsgCount(ctx) + Require(t, err) + if finalized != 5 { + t.Fatalf("incorrect finalizedMsgCount, want: 5, have: %d", finalized) + } + + // Try deleting finalized messages when theres already a finalizedMsgCount + err = coordinator.deleteFinalizedMsgsFromRedis(ctx, 7) + Require(t, err) + exists, err = coordinator.Client.Exists(ctx, keys[8:12]...).Result() + Require(t, err) + if exists != 0 { + t.Fatal("finalized messages and signatures in range 5 to 6 were not deleted") + } + finalized, err = coordinator.getRemoteFinalizedMsgCount(ctx) + Require(t, err) + if finalized != 7 { + t.Fatalf("incorrect finalizedMsgCount, want: 7, have: %d", finalized) + } + + // Check that non-finalized messages are still available in redis + exists, err = coordinator.Client.Exists(ctx, keys[12:]...).Result() + Require(t, err) + if exists != 8 { + t.Fatal("non-finalized messages and signatures in range 7 to 10 are not fully available") + } +} diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index d3b9a7e1c6..5ab1ede2d6 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -72,6 +72,13 @@ func (s *SyncMonitor) SyncTargetMessageCount() arbutil.MessageIndex { return s.syncTarget } +func (s *SyncMonitor) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) { + if s.inboxReader != nil && s.inboxReader.l1Reader != nil { + return s.inboxReader.GetFinalizedMsgCount(ctx) + } + return 0, nil +} + func (s *SyncMonitor) maxMessageCount() (arbutil.MessageIndex, error) { msgCount, err := s.txStreamer.GetMessageCount() if err != nil { diff --git a/arbos/programs/api.go b/arbos/programs/api.go index a371b575b9..504289322f 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -63,14 +63,10 @@ func newApiClosures( actingAddress := contract.Address() // not necessarily WASM readOnly := interpreter.ReadOnly() evm := interpreter.Evm() - depth := evm.Depth() db := evm.StateDB chainConfig := evm.ChainConfig() getBytes32 := func(key common.Hash) (common.Hash, uint64) { - if tracingInfo != nil { - tracingInfo.RecordStorageGet(key) - } cost := vm.WasmStateLoadCost(db, actingAddress, key) return db.GetState(actingAddress, key), cost } @@ -80,9 +76,6 @@ func newApiClosures( value := common.BytesToHash(data[32:64]) data = data[64:] - if tracingInfo != nil { - tracingInfo.RecordStorageSet(key, value) - } if readOnly { return WriteProtection } @@ -142,22 +135,7 @@ func newApiClosures( // Tracing: emit the call (value transfer is done later in evm.Call) if tracingInfo != nil { - var args []uint256.Int - args = append(args, *uint256.NewInt(gas)) // gas - args = append(args, *uint256.NewInt(0).SetBytes(contract.Bytes())) // to address - if opcode == vm.CALL { - args = append(args, *uint256.NewInt(0).SetBytes(value.Bytes())) // call value - } - args = append(args, *uint256.NewInt(0)) // memory offset - args = append(args, *uint256.NewInt(uint64(len(input)))) // memory length - args = append(args, *uint256.NewInt(0)) // return offset - args = append(args, *uint256.NewInt(0)) // return size - s := &vm.ScopeContext{ - Memory: util.TracingMemoryFromBytes(input), - Stack: util.TracingStackFromArgs(args...), - Contract: scope.Contract, - } - tracingInfo.Tracer.CaptureState(0, opcode, startGas, baseCost+gas, s, []byte{}, depth, nil) + tracingInfo.CaptureStylusCall(opcode, contract, value, input, gas, startGas, baseCost) } var ret []byte @@ -215,11 +193,6 @@ func newApiClosures( one64th := gas / 64 gas -= one64th - // Tracing: emit the create - if tracingInfo != nil { - tracingInfo.Tracer.CaptureState(0, opcode, startGas, baseCost+gas, scope, []byte{}, depth, nil) - } - var res []byte var addr common.Address // zero on failure var returnGas uint64 @@ -244,9 +217,6 @@ func newApiClosures( return addr, res, cost, nil } emitLog := func(topics []common.Hash, data []byte) error { - if tracingInfo != nil { - tracingInfo.RecordEmitLog(topics, data) - } if readOnly { return vm.ErrWriteProtection } @@ -285,10 +255,7 @@ func newApiClosures( } captureHostio := func(name string, args, outs []byte, startInk, endInk uint64) { tracingInfo.Tracer.CaptureStylusHostio(name, args, outs, startInk, endInk) - if name == "evm_gas_left" || name == "evm_ink_left" { - tracingInfo.Tracer.CaptureState(0, vm.GAS, 0, 0, scope, []byte{}, depth, nil) - tracingInfo.Tracer.CaptureState(0, vm.POP, 0, 0, scope, []byte{}, depth, nil) - } + tracingInfo.CaptureEVMTraceForHostio(name, args, outs, startInk, endInk) } return func(req RequestType, input []byte) ([]byte, []byte, uint64) { diff --git a/arbos/programs/native.go b/arbos/programs/native.go index f8e2696aad..a0976afb2f 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -206,6 +206,9 @@ func callProgram( if status == userFailure && debug { log.Warn("program failure", "err", err, "msg", msg, "program", address, "depth", depth) } + if tracingInfo != nil { + tracingInfo.CaptureStylusExit(uint8(status), data, err, scope.Contract.Gas) + } return data, err } diff --git a/arbos/util/storage_cache.go b/arbos/util/storage_cache.go new file mode 100644 index 0000000000..bf05a5824d --- /dev/null +++ b/arbos/util/storage_cache.go @@ -0,0 +1,76 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package util + +import ( + "github.com/ethereum/go-ethereum/common" +) + +type storageCacheEntry struct { + Value common.Hash + Known *common.Hash +} + +func (e storageCacheEntry) dirty() bool { + return e.Known == nil || e.Value != *e.Known +} + +type storageCacheStores struct { + Key common.Hash + Value common.Hash +} + +// storageCache mirrors the stylus storage cache on arbos when tracing a call. +// This is useful for correctly reporting the SLOAD and SSTORE opcodes. +type storageCache struct { + cache map[common.Hash]storageCacheEntry +} + +func newStorageCache() *storageCache { + return &storageCache{ + cache: make(map[common.Hash]storageCacheEntry), + } +} + +// Load adds a value to the cache and returns true if the logger should emit a load opcode. +func (s *storageCache) Load(key, value common.Hash) bool { + _, ok := s.cache[key] + if !ok { + // The value was not in cache, so it came from EVM + s.cache[key] = storageCacheEntry{ + Value: value, + Known: &value, + } + } + return !ok +} + +// Store updates the value on the cache. +func (s *storageCache) Store(key, value common.Hash) { + entry := s.cache[key] + entry.Value = value // Do not change known value + s.cache[key] = entry +} + +// Flush returns the store operations that should be logged. +func (s *storageCache) Flush() []storageCacheStores { + stores := []storageCacheStores{} + for key, entry := range s.cache { + if entry.dirty() { + v := entry.Value // Create new var to avoid alliasing + entry.Known = &v + s.cache[key] = entry + stores = append(stores, storageCacheStores{ + Key: key, + Value: entry.Value, + }) + } + } + return stores +} + +// Clear clears the cache. +func (s *storageCache) Clear() { + s.cache = make(map[common.Hash]storageCacheEntry) +} diff --git a/arbos/util/storage_cache_test.go b/arbos/util/storage_cache_test.go new file mode 100644 index 0000000000..1cc4ea14ec --- /dev/null +++ b/arbos/util/storage_cache_test.go @@ -0,0 +1,110 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package util + +import ( + "bytes" + "slices" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/go-cmp/cmp" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestStorageCache(t *testing.T) { + keys := make([]common.Hash, 3) + values := make([]common.Hash, len(keys)) + for i := range keys { + keys[i] = testhelpers.RandomHash() + values[i] = testhelpers.RandomHash() + } + + cache := newStorageCache() + + t.Run("load then load", func(t *testing.T) { + emitLog := cache.Load(keys[0], values[0]) + if !emitLog { + t.Fatal("unexpected value in cache") + } + emitLog = cache.Load(keys[0], values[0]) + if emitLog { + t.Fatal("expected value in cache") + } + }) + + t.Run("load another value", func(t *testing.T) { + emitLog := cache.Load(keys[1], values[1]) + if !emitLog { + t.Fatal("unexpected value in cache") + } + }) + + t.Run("load then store", func(t *testing.T) { + _ = cache.Load(keys[2], values[0]) + cache.Store(keys[2], values[2]) + if !cache.cache[keys[2]].dirty() { + t.Fatal("expected value to be dirty") + } + if cache.cache[keys[2]].Value != values[2] { + t.Fatal("wrong value in cache") + } + }) + + t.Run("clear", func(t *testing.T) { + cache.Clear() + if len(cache.cache) != 0 { + t.Fatal("expected to be empty") + } + }) + + t.Run("store then load", func(t *testing.T) { + cache.Store(keys[0], values[0]) + emitLog := cache.Load(keys[0], values[0]) + if emitLog { + t.Fatal("expected value in cache") + } + }) + + t.Run("flush only stored", func(t *testing.T) { + _ = cache.Load(keys[1], values[1]) + cache.Store(keys[2], values[2]) + stores := cache.Flush() + expected := []storageCacheStores{ + {Key: keys[0], Value: values[0]}, + {Key: keys[2], Value: values[2]}, + } + sortFunc := func(a, b storageCacheStores) int { + return bytes.Compare(a.Key.Bytes(), b.Key.Bytes()) + } + slices.SortFunc(stores, sortFunc) + slices.SortFunc(expected, sortFunc) + if diff := cmp.Diff(stores, expected); diff != "" { + t.Fatalf("wrong flush: %s", diff) + } + // everything should still be in cache + for i := range keys { + entry, ok := cache.cache[keys[i]] + if !ok { + t.Fatal("entry missing from cache") + } + if entry.dirty() { + t.Fatal("dirty entry after flush") + } + if entry.Value != values[i] { + t.Fatal("wrong value in entry") + } + } + }) + + t.Run("do not flush known values", func(t *testing.T) { + cache.Clear() + _ = cache.Load(keys[0], values[0]) + cache.Store(keys[0], values[0]) + stores := cache.Flush() + if len(stores) != 0 { + t.Fatal("unexpected store") + } + }) +} diff --git a/arbos/util/tracing.go b/arbos/util/tracing.go index f3564143c5..c4a7168977 100644 --- a/arbos/util/tracing.go +++ b/arbos/util/tracing.go @@ -4,11 +4,12 @@ package util import ( - "fmt" + "encoding/binary" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" "github.com/holiman/uint256" ) @@ -21,10 +22,11 @@ const ( ) type TracingInfo struct { - Tracer vm.EVMLogger - Scenario TracingScenario - Contract *vm.Contract - Depth int + Tracer vm.EVMLogger + Scenario TracingScenario + Contract *vm.Contract + Depth int + storageCache *storageCache } // holds an address to satisfy core/vm's ContractRef() interface @@ -41,30 +43,14 @@ func NewTracingInfo(evm *vm.EVM, from, to common.Address, scenario TracingScenar return nil } return &TracingInfo{ - Tracer: evm.Config.Tracer, - Scenario: scenario, - Contract: vm.NewContract(addressHolder{to}, addressHolder{from}, uint256.NewInt(0), 0), - Depth: evm.Depth(), + Tracer: evm.Config.Tracer, + Scenario: scenario, + Contract: vm.NewContract(addressHolder{to}, addressHolder{from}, uint256.NewInt(0), 0), + Depth: evm.Depth(), + storageCache: newStorageCache(), } } -func (info *TracingInfo) RecordEmitLog(topics []common.Hash, data []byte) { - size := uint64(len(data)) - var args []uint256.Int - args = append(args, *uint256.NewInt(0)) // offset: byte offset in the memory in bytes - args = append(args, *uint256.NewInt(size)) // size: byte size to copy (length of data) - for _, topic := range topics { - args = append(args, HashToUint256(topic)) // topic: 32-byte value. Max topics count is 4 - } - scope := &vm.ScopeContext{ - Memory: TracingMemoryFromBytes(data), - Stack: TracingStackFromArgs(args...), - Contract: info.Contract, - } - logType := fmt.Sprintf("LOG%d", len(topics)) - info.Tracer.CaptureState(0, vm.StringToOp(logType), 0, 0, scope, []byte{}, info.Depth, nil) -} - func (info *TracingInfo) RecordStorageGet(key common.Hash) { tracer := info.Tracer if info.Scenario == TracingDuringEVM { @@ -136,6 +122,428 @@ func (info *TracingInfo) MockCall(input []byte, gas uint64, from, to common.Addr tracer.CaptureState(0, vm.POP, 0, 0, popScope, []byte{}, depth, nil) } +func (info *TracingInfo) CaptureEVMTraceForHostio(name string, args, outs []byte, startInk, endInk uint64) { + checkArgs := func(want int) bool { + if len(args) < want { + log.Warn("tracing: missing arguments bytes for hostio", "name", name, "want", want, "got", len(args)) + return false + } + return true + } + + checkOuts := func(want int) bool { + if len(outs) < want { + log.Warn("tracing: missing outputs bytes for hostio", "name", name, "want", want, "got", len(args)) + return false + } + return true + } + + firstOpcode := true + capture := func(op vm.OpCode, memory []byte, stackValues ...[]byte) { + const inkToGas = 10000 + gas := endInk / inkToGas + var cost uint64 + if firstOpcode { + cost = (startInk - endInk) / inkToGas + firstOpcode = false + } else { + // When capturing multiple opcodes, usually the first one is the relevant + // action and the following ones just pop the result values from the stack. + cost = 0 + } + info.captureState(op, gas, cost, memory, stackValues...) + } + + switch name { + case "read_args": + destOffset := []byte(nil) + offset := []byte(nil) + size := lenToBytes(outs) + capture(vm.CALLDATACOPY, outs, destOffset, offset, size) + + case "storage_load_bytes32": + if !checkArgs(32) || !checkOuts(32) { + return + } + key := args[:32] + value := outs[:32] + if info.storageCache.Load(common.Hash(key), common.Hash(value)) { + capture(vm.SLOAD, nil, key) + capture(vm.POP, nil, value) + } + + case "storage_cache_bytes32": + if !checkArgs(32 + 32) { + return + } + key := args[:32] + value := args[32:64] + info.storageCache.Store(common.Hash(key), common.Hash(value)) + + case "storage_flush_cache": + if !checkArgs(1) { + return + } + toClear := args[0] != 0 + for _, store := range info.storageCache.Flush() { + capture(vm.SSTORE, nil, store.Key.Bytes(), store.Value.Bytes()) + } + if toClear { + info.storageCache.Clear() + } + + case "transient_load_bytes32": + if !checkArgs(32) || !checkOuts(32) { + return + } + key := args[:32] + value := outs[:32] + capture(vm.TLOAD, nil, key) + capture(vm.POP, nil, value) + + case "transient_store_bytes32": + if !checkArgs(32 + 32) { + return + } + key := args[:32] + value := args[32:64] + capture(vm.TSTORE, nil, key, value) + + case "create1": + if !checkArgs(32) || !checkOuts(20) { + return + } + value := args[:32] + code := args[32:] + offset := []byte(nil) + size := lenToBytes(code) + address := outs[:20] + capture(vm.CREATE, code, value, offset, size) + capture(vm.POP, code, address) + + case "create2": + if !checkArgs(32+32) || !checkOuts(20) { + return + } + value := args[:32] + salt := args[32:64] + code := args[64:] + offset := []byte(nil) + size := lenToBytes(code) + address := outs[:20] + capture(vm.CREATE2, code, value, offset, size, salt) + capture(vm.POP, code, address) + + case "read_return_data": + if !checkArgs(8) { + return + } + destOffset := []byte(nil) + offset := args[:4] + size := args[4:8] + capture(vm.RETURNDATACOPY, outs, destOffset, offset, size) + + case "return_data_size": + if !checkOuts(4) { + return + } + size := outs[:4] + capture(vm.RETURNDATASIZE, nil) + capture(vm.POP, nil, size) + + case "emit_log": + if !checkArgs(4) { + return + } + numTopics := int(binary.BigEndian.Uint32(args[:4])) + dataOffset := 4 + 32*numTopics + if !checkArgs(dataOffset) { + return + } + data := args[dataOffset:] + offset := []byte(nil) + size := lenToBytes(data) + opcode := vm.LOG0 + vm.OpCode(numTopics) + stack := [][]byte{offset, size} + for i := 0; i < numTopics; i++ { + topic := args[4+32*i : 4+32*(i+1)] + stack = append(stack, topic) + } + capture(opcode, data, stack...) + + case "account_balance": + if !checkArgs(20) || !checkOuts(32) { + return + } + address := args[:20] + balance := outs[:32] + capture(vm.BALANCE, nil, address) + capture(vm.POP, nil, balance) + + case "account_code": + if !checkArgs(20 + 4 + 4) { + return + } + address := args[:20] + destOffset := []byte(nil) + offset := args[20:24] + size := args[24:28] + capture(vm.EXTCODECOPY, nil, address, destOffset, offset, size) + + case "account_code_size": + if !checkArgs(20) || !checkOuts(4) { + return + } + address := args[:20] + size := outs[:4] + capture(vm.EXTCODESIZE, nil, address) + capture(vm.POP, nil, size) + + case "account_codehash": + if !checkArgs(20) || !checkOuts(32) { + return + } + address := args[:20] + hash := outs[:32] + capture(vm.EXTCODEHASH, nil, address) + capture(vm.POP, nil, hash) + + case "block_basefee": + if !checkOuts(32) { + return + } + baseFee := outs[:32] + capture(vm.BASEFEE, nil) + capture(vm.POP, nil, baseFee) + + case "block_coinbase": + if !checkOuts(20) { + return + } + address := outs[:20] + capture(vm.COINBASE, nil) + capture(vm.POP, nil, address) + + case "block_gas_limit": + if !checkOuts(8) { + return + } + gasLimit := outs[:8] + capture(vm.GASLIMIT, nil) + capture(vm.POP, nil, gasLimit) + + case "block_number": + if !checkOuts(8) { + return + } + blockNumber := outs[:8] + capture(vm.NUMBER, nil) + capture(vm.POP, nil, blockNumber) + + case "block_timestamp": + if !checkOuts(8) { + return + } + timestamp := outs[:8] + capture(vm.TIMESTAMP, nil) + capture(vm.POP, nil, timestamp) + + case "chainid": + if !checkOuts(8) { + return + } + chainId := outs[:8] + capture(vm.CHAINID, nil) + capture(vm.POP, nil, chainId) + + case "contract_address": + if !checkOuts(20) { + return + } + address := outs[:20] + capture(vm.ADDRESS, nil) + capture(vm.POP, nil, address) + + case "evm_gas_left", "evm_ink_left": + if !checkOuts(8) { + return + } + gas := outs[:8] + capture(vm.GAS, nil) + capture(vm.POP, nil, gas) + + case "math_div": + if !checkArgs(32+32) || !checkOuts(32) { + return + } + a := args[:32] + b := args[32:64] + result := outs[:32] + capture(vm.DIV, nil, a, b) + capture(vm.POP, nil, result) + + case "math_mod": + if !checkArgs(32+32) || !checkOuts(32) { + return + } + a := args[:32] + b := args[32:64] + result := outs[:32] + capture(vm.MOD, nil, a, b) + capture(vm.POP, nil, result) + + case "math_pow": + if !checkArgs(32+32) || !checkOuts(32) { + return + } + a := args[:32] + b := args[32:64] + result := outs[:32] + capture(vm.EXP, nil, a, b) + capture(vm.POP, nil, result) + + case "math_add_mod": + if !checkArgs(32+32+32) || !checkOuts(32) { + return + } + a := args[:32] + b := args[32:64] + c := args[64:96] + result := outs[:32] + capture(vm.ADDMOD, nil, a, b, c) + capture(vm.POP, nil, result) + + case "math_mul_mod": + if !checkArgs(32+32+32) || !checkOuts(32) { + return + } + a := args[:32] + b := args[32:64] + c := args[64:96] + result := outs[:32] + capture(vm.MULMOD, nil, a, b, c) + capture(vm.POP, nil, result) + + case "msg_sender": + if !checkOuts(20) { + return + } + address := outs[:20] + capture(vm.CALLER, nil) + capture(vm.POP, nil, address) + + case "msg_value": + if !checkOuts(32) { + return + } + value := outs[:32] + capture(vm.CALLVALUE, nil) + capture(vm.POP, nil, value) + + case "native_keccak256": + if !checkOuts(32) { + return + } + offset := []byte(nil) + size := lenToBytes(args) + hash := outs[:32] + capture(vm.KECCAK256, args, offset, size) + capture(vm.POP, args, hash) + + case "tx_gas_price": + if !checkOuts(32) { + return + } + price := outs[:32] + capture(vm.GASPRICE, nil) + capture(vm.POP, nil, price) + + case "tx_ink_price": + if !checkOuts(4) { + return + } + price := outs[:4] + capture(vm.GASPRICE, nil) + capture(vm.POP, nil, price) + + case "tx_origin": + if !checkOuts(20) { + return + } + address := outs[:20] + capture(vm.ORIGIN, nil) + capture(vm.POP, nil, address) + + case "call_contract", "delegate_call_contract", "static_call_contract": + // The API receives the CaptureHostIO after the EVM call is done but we want to + // capture the opcde before it. So, we capture the state in CaptureStylusCall. + + case "write_result", "exit_early": + // These calls are handled on CaptureStylusExit to also cover the normal exit case. + + case "user_entrypoint", "user_returned", "msg_reentrant", "pay_for_memory_grow", "console_log_text", "console_log": + // No EVM counterpart + + default: + log.Warn("unhandled hostio trace", "name", name) + } +} + +func (info *TracingInfo) CaptureStylusCall(opCode vm.OpCode, contract common.Address, value *uint256.Int, input []byte, gas, startGas, baseCost uint64) { + var stack [][]byte + stack = append(stack, intToBytes(gas)) // gas + stack = append(stack, contract.Bytes()) // address + if opCode == vm.CALL { + stack = append(stack, value.Bytes()) // call value + } + stack = append(stack, []byte(nil)) // memory offset + stack = append(stack, lenToBytes(input)) // memory length + stack = append(stack, []byte(nil)) // return offset + stack = append(stack, []byte(nil)) // return size + info.captureState(opCode, startGas, baseCost+gas, input, stack...) +} + +func (info *TracingInfo) CaptureStylusExit(status uint8, data []byte, err error, gas uint64) { + var opCode vm.OpCode + if status == 0 { + if len(data) == 0 { + info.captureState(vm.STOP, gas, 0, nil) + return + } + opCode = vm.RETURN + } else { + opCode = vm.REVERT + if data == nil { + data = []byte(err.Error()) + } + } + offset := []byte(nil) + size := lenToBytes(data) + info.captureState(opCode, gas, 0, data, offset, size) +} + +func (info *TracingInfo) captureState(op vm.OpCode, gas uint64, cost uint64, memory []byte, stackValues ...[]byte) { + stack := []uint256.Int{} + for _, value := range stackValues { + stack = append(stack, *uint256.NewInt(0).SetBytes(value)) + } + scope := &vm.ScopeContext{ + Memory: TracingMemoryFromBytes(memory), + Stack: TracingStackFromArgs(stack...), + Contract: info.Contract, + } + info.Tracer.CaptureState(0, op, gas, cost, scope, []byte{}, info.Depth, nil) +} + +func lenToBytes(data []byte) []byte { + return intToBytes(uint64(len(data))) +} + +func intToBytes(v uint64) []byte { + return binary.BigEndian.AppendUint64(nil, v) +} + func HashToUint256(hash common.Hash) uint256.Int { value := uint256.Int{} value.SetBytes(hash.Bytes()) diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index 7d47d13e84..524433a7b5 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -164,7 +164,7 @@ "EnableArbOS": true, "AllowDebugPrecompiles": true, "DataAvailabilityCommittee": false, - "InitialArbOSVersion": 11, + "InitialArbOSVersion": 31, "InitialChainOwner": "0x0000000000000000000000000000000000000000", "GenesisBlockNum": 0 } @@ -196,7 +196,7 @@ "EnableArbOS": true, "AllowDebugPrecompiles": true, "DataAvailabilityCommittee": true, - "InitialArbOSVersion": 11, + "InitialArbOSVersion": 31, "InitialChainOwner": "0x0000000000000000000000000000000000000000", "GenesisBlockNum": 0 } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 416ee17c86..ab6bf3181d 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -238,6 +238,10 @@ func mainImpl() int { if nodeConfig.Execution.Sequencer.Enable != nodeConfig.Node.Sequencer { log.Error("consensus and execution must agree if sequencing is enabled or not", "Execution.Sequencer.Enable", nodeConfig.Execution.Sequencer.Enable, "Node.Sequencer", nodeConfig.Node.Sequencer) } + if nodeConfig.Node.SeqCoordinator.Enable && !nodeConfig.Node.ParentChainReader.Enable { + log.Error("Sequencer coordinator must be enabled with parent chain reader, try starting node with --parent-chain.connection.url") + return 1 + } var dataSigner signature.DataSignerFunc var l1TransactionOptsValidator *bind.TransactOpts diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 3564cfe48a..e14eb45a27 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -14,6 +14,7 @@ import ( "net" "net/http" "os" + "reflect" "strconv" "strings" "testing" @@ -1121,6 +1122,13 @@ func Fatal(t *testing.T, printables ...interface{}) { testhelpers.FailImpl(t, printables...) } +func CheckEqual[T any](t *testing.T, want T, got T, printables ...interface{}) { + t.Helper() + if !reflect.DeepEqual(want, got) { + testhelpers.FailImpl(t, "wrong result, want ", want, ", got ", got, printables) + } +} + func Create2ndNodeWithConfig( t *testing.T, ctx context.Context, diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 5fa5db95c2..ae34c6c5bb 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -1617,6 +1617,35 @@ func multicallAppend(calls []byte, opcode vm.OpCode, address common.Address, inn return calls } +func multicallEmptyArgs() []byte { + return []byte{0} // number of actions +} + +func multicallAppendStore(args []byte, key, value common.Hash, emitLog bool) []byte { + var action byte = 0x10 + if emitLog { + action |= 0x08 + } + args[0] += 1 + args = binary.BigEndian.AppendUint32(args, 1+64) // length + args = append(args, action) + args = append(args, key.Bytes()...) + args = append(args, value.Bytes()...) + return args +} + +func multicallAppendLoad(args []byte, key common.Hash, emitLog bool) []byte { + var action byte = 0x11 + if emitLog { + action |= 0x08 + } + args[0] += 1 + args = binary.BigEndian.AppendUint32(args, 1+32) // length + args = append(args, action) + args = append(args, key.Bytes()...) + return args +} + func assertStorageAt( t *testing.T, ctx context.Context, l2client *ethclient.Client, contract common.Address, key, value common.Hash, ) { diff --git a/system_tests/stylus_trace_test.go b/system_tests/stylus_trace_test.go new file mode 100644 index 0000000000..cb303874d6 --- /dev/null +++ b/system_tests/stylus_trace_test.go @@ -0,0 +1,479 @@ +// Copyright 2022-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package arbtest + +import ( + "bytes" + "encoding/binary" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" + "github.com/holiman/uint256" + "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/colors" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +var skipCheck = []byte("skip") + +func checkOpcode(t *testing.T, result logger.ExecutionResult, index int, wantOp vm.OpCode, wantStack ...[]byte) { + CheckEqual(t, wantOp.String(), result.StructLogs[index].Op, "wrong opcode") + for i, wantBytes := range wantStack { + if !bytes.Equal(wantBytes, skipCheck) { + wantVal := uint256.NewInt(0).SetBytes(wantBytes).Hex() + logStack := *result.StructLogs[index].Stack + // the stack is in reverse order in log + if i > len(logStack) { + Fatal(t, "missing values in log stack") + } + CheckEqual(t, wantVal, logStack[len(logStack)-1-i], "wrong stack for opcode", wantOp) + } + } +} + +func sendAndTraceTransaction( + t *testing.T, + builder *NodeBuilder, + program common.Address, + value *big.Int, + data []byte, +) logger.ExecutionResult { + ctx := builder.ctx + l2client := builder.L2.Client + l2info := builder.L2Info + rpcClient := builder.L2.ConsensusNode.Stack.Attach() + + tx := l2info.PrepareTxTo("Owner", &program, l2info.TransferGas, value, data) + err := l2client.SendTransaction(ctx, tx) + Require(t, err) + + var result logger.ExecutionResult + err = rpcClient.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), nil) + Require(t, err, "failed to trace call") + + colors.PrintGrey("Call trace:") + colors.PrintGrey("i\tdepth\topcode\tstack") + for i, log := range result.StructLogs { + if log.Stack == nil { + stack := []string{} + log.Stack = &stack + } + colors.PrintGrey(i, "\t", log.Depth, "\t", log.Op, "\t", *log.Stack) + if i > 100 { + break + } + } + + return result +} + +func intToBytes(v int) []byte { + return binary.BigEndian.AppendUint64(nil, uint64(v)) +} + +func TestStylusOpcodeTraceStorage(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + program := deployWasm(t, ctx, auth, l2client, rustFile("storage")) + + key := testhelpers.RandomHash() + value := testhelpers.RandomHash() + + trans := func(data []byte) []byte { + data[0] += 2 + return data + } + + // storage_cache_bytes32 + result := sendAndTraceTransaction(t, builder, program, nil, argsForStorageWrite(key, value)) + checkOpcode(t, result, 3, vm.SSTORE, key[:], value[:]) + + // storage_load_bytes32 + result = sendAndTraceTransaction(t, builder, program, nil, argsForStorageRead(key)) + checkOpcode(t, result, 3, vm.SLOAD, key[:]) + checkOpcode(t, result, 4, vm.POP, value[:]) + + // transient_store_bytes32 + result = sendAndTraceTransaction(t, builder, program, nil, trans(argsForStorageWrite(key, value))) + checkOpcode(t, result, 3, vm.TSTORE, key[:], value[:]) + + // transient_load_bytes32 + result = sendAndTraceTransaction(t, builder, program, nil, trans(argsForStorageRead(key))) + checkOpcode(t, result, 3, vm.TLOAD, key[:]) + checkOpcode(t, result, 4, vm.POP, nil) +} + +func TestStylusOpcodeTraceNativeKeccak(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + program := deployWasm(t, ctx, auth, l2client, watFile("timings/keccak")) + + args := binary.LittleEndian.AppendUint32(nil, 1) // rounds + args = append(args, testhelpers.RandomSlice(123)...) + hash := crypto.Keccak256Hash(args) // the keccak.wat program computes the hash of the whole args + + // native_keccak256 + result := sendAndTraceTransaction(t, builder, program, nil, args) + checkOpcode(t, result, 3, vm.KECCAK256, nil, intToBytes(len(args))) + checkOpcode(t, result, 4, vm.POP, hash[:]) +} + +func TestStylusOpcodeTraceMath(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + program := deployWasm(t, ctx, auth, l2client, rustFile("math")) + result := sendAndTraceTransaction(t, builder, program, nil, nil) + + value := common.Hex2Bytes("eddecf107b5740cef7f5a01e3ea7e287665c4e75a8eb6afae2fda2e3d4367786") + unknown := common.Hex2Bytes("c6178c2de1078cd36c3bd302cde755340d7f17fcb3fcc0b9c333ba03b217029f") + ed25519 := common.Hex2Bytes("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f") + results := [][]byte{ + common.Hex2Bytes("b28a98598473836430b84078e55690d279cca19b9922f248c6a6ad6588d12494"), + common.Hex2Bytes("265b7ffdc26469bd58409a734987e66a5ece71a2312970d5403f395d24a31b85"), + common.Hex2Bytes("00000000000000002947e87fd2cf7e1eacd01ef1286c0d795168d90db4fc5bb3"), + common.Hex2Bytes("c4b1cfcc1423392b29d826de0b3779a096d543ad2b71f34aa4596bd97f493fbb"), + common.Hex2Bytes("00000000000000000000000000000000000000000000000015d41b922f2eafc5"), + } + + // math_mul_mod + checkOpcode(t, result, 3, vm.MULMOD, value, unknown, ed25519) + checkOpcode(t, result, 4, vm.POP, results[0]) + + // math_add_mod + checkOpcode(t, result, 5, vm.ADDMOD, results[0], ed25519, unknown) + checkOpcode(t, result, 6, vm.POP, results[1]) + + // math_div + checkOpcode(t, result, 7, vm.DIV, results[1], value[:8]) + checkOpcode(t, result, 8, vm.POP, results[2]) + + // math_pow + checkOpcode(t, result, 9, vm.EXP, results[2], ed25519[24:32]) + checkOpcode(t, result, 10, vm.POP, results[3]) + + // math_mod + checkOpcode(t, result, 11, vm.MOD, results[3], unknown[:8]) + checkOpcode(t, result, 12, vm.POP, results[4]) +} + +func TestStylusOpcodeTraceExit(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + // normal exit with return value + program := deployWasm(t, ctx, auth, l2client, rustFile("storage")) + key := testhelpers.RandomHash() + result := sendAndTraceTransaction(t, builder, program, nil, argsForStorageRead(key)) + size := intToBytes(32) + checkOpcode(t, result, 5, vm.RETURN, nil, size) + + // stop with exit early + program = deployWasm(t, ctx, auth, l2client, watFile("exit-early/exit-early")) + result = sendAndTraceTransaction(t, builder, program, nil, nil) + checkOpcode(t, result, 3, vm.STOP) + + // revert + program = deployWasm(t, ctx, auth, l2client, watFile("exit-early/panic-after-write")) + result = sendAndTraceTransaction(t, builder, program, nil, nil) + size = intToBytes(len("execution reverted")) + checkOpcode(t, result, 3, vm.REVERT, nil, size) +} + +func TestStylusOpcodeTraceEvmData(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2info := builder.L2Info + l2client := builder.L2.Client + defer cleanup() + + program := deployWasm(t, ctx, auth, l2client, rustFile("evm-data")) + + fundedAddr := l2info.GetAddress("Faucet") + ethPrecompile := common.BigToAddress(big.NewInt(1)) + arbTestAddress := types.ArbosTestAddress + burnArbGas, _ := util.NewCallParser(precompilesgen.ArbosTestABI, "burnArbGas") + gasToBurn := uint64(1000000) + callBurnData, err := burnArbGas(new(big.Int).SetUint64(gasToBurn)) + Require(t, err) + + data := []byte{} + data = append(data, fundedAddr.Bytes()...) + data = append(data, ethPrecompile.Bytes()...) + data = append(data, arbTestAddress.Bytes()...) + data = append(data, program.Bytes()...) + data = append(data, callBurnData...) + result := sendAndTraceTransaction(t, builder, program, nil, data) + + fundedBalance, err := l2client.BalanceAt(ctx, fundedAddr, nil) + Require(t, err) + programCode, err := l2client.CodeAt(ctx, program, nil) + Require(t, err) + programCodehash := crypto.Keccak256(programCode) + owner := l2info.GetAddress("Owner") + + // read_args + checkOpcode(t, result, 2, vm.CALLDATACOPY, nil, nil, intToBytes(len(data))) + + // account_balance + checkOpcode(t, result, 3, vm.BALANCE, fundedAddr[:]) + checkOpcode(t, result, 4, vm.POP, fundedBalance.Bytes()) + + // account_codehash + checkOpcode(t, result, 9, vm.EXTCODEHASH, program[:]) + checkOpcode(t, result, 10, vm.POP, programCodehash) + + // account_code_size + checkOpcode(t, result, 11, vm.EXTCODESIZE, program[:]) + checkOpcode(t, result, 12, vm.POP, intToBytes(len(programCode))) + + // account_code + checkOpcode(t, result, 13, vm.EXTCODECOPY, program[:], nil, nil, intToBytes(len(programCode))) + + // block_basefee + checkOpcode(t, result, 26, vm.BASEFEE) + checkOpcode(t, result, 27, vm.POP, skipCheck) + + // chainid + checkOpcode(t, result, 28, vm.CHAINID) + checkOpcode(t, result, 29, vm.POP, intToBytes(412346)) + + // block_coinbase + checkOpcode(t, result, 30, vm.COINBASE) + checkOpcode(t, result, 31, vm.POP, skipCheck) + + // block_gas_limit + checkOpcode(t, result, 32, vm.GASLIMIT) + checkOpcode(t, result, 33, vm.POP, skipCheck) + + // block_timestamp + checkOpcode(t, result, 34, vm.TIMESTAMP) + checkOpcode(t, result, 35, vm.POP, skipCheck) + + // contract_address + checkOpcode(t, result, 36, vm.ADDRESS) + checkOpcode(t, result, 37, vm.POP, program[:]) + + // msg_sender + checkOpcode(t, result, 38, vm.CALLER) + checkOpcode(t, result, 39, vm.POP, owner[:]) + + // msg_value + checkOpcode(t, result, 40, vm.CALLVALUE) + checkOpcode(t, result, 41, vm.POP, nil) + + // tx_origin + checkOpcode(t, result, 42, vm.ORIGIN) + checkOpcode(t, result, 43, vm.POP, owner[:]) + + // tx_gas_price + checkOpcode(t, result, 44, vm.GASPRICE) + checkOpcode(t, result, 45, vm.POP, skipCheck) + + // tx_ink_price + checkOpcode(t, result, 46, vm.GASPRICE) + checkOpcode(t, result, 47, vm.POP, skipCheck) + + // block_number + checkOpcode(t, result, 48, vm.NUMBER) + checkOpcode(t, result, 49, vm.POP, skipCheck) + + // evm_gas_left + checkOpcode(t, result, 50, vm.GAS) + checkOpcode(t, result, 51, vm.POP, skipCheck) + + // evm_ink_left + checkOpcode(t, result, 52, vm.GAS) + checkOpcode(t, result, 53, vm.POP, skipCheck) +} + +func TestStylusOpcodeTraceLog(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + program := deployWasm(t, ctx, auth, l2client, rustFile("log")) + + const numTopics = 4 + const logSize = 123 + expectedStack := [][]byte{nil, intToBytes(logSize)} + args := []byte{numTopics} + for i := 0; i < numTopics; i++ { + topic := testhelpers.RandomSlice(32) + expectedStack = append(expectedStack, topic) + args = append(args, topic...) // topic + } + args = append(args, testhelpers.RandomSlice(logSize)...) // log + + result := sendAndTraceTransaction(t, builder, program, nil, args) + + // emit_log + checkOpcode(t, result, 3, vm.LOG4, expectedStack...) +} + +func TestStylusOpcodeTraceReturnDataSize(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + program := deployWasm(t, ctx, auth, l2client, watFile("timings/return_data_size")) + args := binary.LittleEndian.AppendUint32(nil, 1) // rounds + result := sendAndTraceTransaction(t, builder, program, nil, args) + + // return_data_size + checkOpcode(t, result, 3, vm.RETURNDATASIZE) + checkOpcode(t, result, 4, vm.POP, nil) +} + +func TestStylusOpcodeTraceCall(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + storage := deployWasm(t, ctx, auth, l2client, rustFile("storage")) + multicall := deployWasm(t, ctx, auth, l2client, rustFile("multicall")) + key := testhelpers.RandomHash() + gas := skipCheck + innerArgs := argsForStorageRead(key) + argsLen := intToBytes(len(innerArgs)) + returnLen := intToBytes(32) + + args := argsForMulticall(vm.CALL, storage, nil, innerArgs) + args = multicallAppend(args, vm.DELEGATECALL, storage, innerArgs) + args = multicallAppend(args, vm.STATICCALL, storage, innerArgs) + result := sendAndTraceTransaction(t, builder, multicall, nil, args) + + // call_contract + checkOpcode(t, result, 3, vm.CALL, gas, storage[:], nil, nil, argsLen, nil, nil) + + // read_return_data + checkOpcode(t, result, 8, vm.RETURNDATACOPY, nil, nil, returnLen) + + // delegate_call_contract + checkOpcode(t, result, 9, vm.DELEGATECALL, gas, storage[:], nil, argsLen, nil, nil) + + // static_call_contract + checkOpcode(t, result, 15, vm.STATICCALL, gas, storage[:], nil, argsLen, nil, nil) +} + +func TestStylusOpcodeTraceCreate(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + program := deployWasm(t, ctx, auth, l2client, rustFile("create")) + + deployWasm, _ := readWasmFile(t, rustFile("storage")) + deployCode := deployContractInitCode(deployWasm, false) + startValue := testhelpers.RandomCallValue(1e5) + salt := testhelpers.RandomHash() + create1Addr := crypto.CreateAddress(program, 1) + create2Addr := crypto.CreateAddress2(program, salt, crypto.Keccak256(deployCode)) + + // create1 + create1Args := []byte{0x01} + create1Args = append(create1Args, common.BigToHash(startValue).Bytes()...) + create1Args = append(create1Args, deployCode...) + result := sendAndTraceTransaction(t, builder, program, startValue, create1Args) + checkOpcode(t, result, 10, vm.CREATE, startValue.Bytes(), nil, intToBytes(len(deployCode))) + checkOpcode(t, result, 11, vm.POP, create1Addr[:]) + + // create2 + create2Args := []byte{0x02} + create2Args = append(create2Args, common.BigToHash(startValue).Bytes()...) + create2Args = append(create2Args, salt[:]...) + create2Args = append(create2Args, deployCode...) + result = sendAndTraceTransaction(t, builder, program, startValue, create2Args) + checkOpcode(t, result, 10, vm.CREATE2, startValue.Bytes(), nil, intToBytes(len(deployCode)), salt[:]) + checkOpcode(t, result, 11, vm.POP, create2Addr[:]) +} + +// TestStylusOpcodeTraceEquivalence compares a Stylus trace with a equivalent Solidity/EVM trace. Notice +// the Stylus trace does not contain all opcodes from the Solidity/EVM trace. Instead, this test +// only checks that both traces contain the same basic opcodes. +func TestStylusOpcodeTraceEquivalence(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + key := testhelpers.RandomHash() + value := testhelpers.RandomHash() + args := multicallEmptyArgs() + // We have to load first; otherwise, Stylus optimize-out the load after a store. + args = multicallAppendLoad(args, key, true) + args = multicallAppendStore(args, key, value, true) + + // Trace recursive call in wasm + wasmMulticall := deployWasm(t, ctx, auth, l2client, rustFile("multicall")) + colors.PrintGrey("wasm multicall deployed at ", wasmMulticall) + wasmArgs := argsForMulticall(vm.CALL, wasmMulticall, nil, args) + wasmResult := sendAndTraceTransaction(t, builder, wasmMulticall, nil, wasmArgs) + + // Trace recursive call in evm + evmMulticall, tx, _, err := mocksgen.DeployMultiCallTest(&auth, builder.L2.Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) + colors.PrintGrey("evm multicall deployed at ", evmMulticall) + evmArgs := argsForMulticall(vm.CALL, evmMulticall, nil, args) + evmResult := sendAndTraceTransaction(t, builder, evmMulticall, nil, evmArgs) + + // For some opcodes in the wasmTrace, make sure there is an equivalent one in the evmTrace. + argsLen := intToBytes(len(args)) + offset := skipCheck + checkOpcode(t, wasmResult, 3, vm.CALL, skipCheck, wasmMulticall[:], nil, offset, argsLen, offset, nil) + checkOpcode(t, evmResult, 3120, vm.CALL, skipCheck, evmMulticall[:], nil, offset, argsLen, offset, nil) + + checkOpcode(t, wasmResult, 5, vm.SLOAD, key[:]) + checkOpcode(t, evmResult, 3853, vm.SLOAD, key[:]) + + topic := common.Hex2Bytes("6ab08a9a891703dcd5859f8e8328215fef6d9f250e7d58267bee45aabaee2fa8") + logLen := intToBytes(0x60) + checkOpcode(t, wasmResult, 7, vm.LOG1, offset, logLen, topic) + checkOpcode(t, evmResult, 3970, vm.LOG1, offset, logLen, topic) + + checkOpcode(t, wasmResult, 8, vm.SSTORE, key[:], value[:]) + checkOpcode(t, evmResult, 4723, vm.SSTORE, key[:], value[:]) + + // inner return + returnLen := intToBytes(0x20) + checkOpcode(t, wasmResult, 10, vm.RETURN, offset, returnLen) + checkOpcode(t, evmResult, 4828, vm.RETURN, offset, returnLen) + + // outer return + checkOpcode(t, wasmResult, 12, vm.RETURN, offset, returnLen) + checkOpcode(t, evmResult, 5078, vm.RETURN, offset, returnLen) +} diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 59e3b0e0f9..2c12ffec50 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -13,12 +13,13 @@ import ( "github.com/offchainlabs/nitro/arbutil" ) -const CHOSENSEQ_KEY string = "coordinator.chosen" // Never overwritten. Expires or released only -const MSG_COUNT_KEY string = "coordinator.msgCount" // Only written by sequencer holding CHOSEN key -const PRIORITIES_KEY string = "coordinator.priorities" // Read only -const WANTS_LOCKOUT_KEY_PREFIX string = "coordinator.liveliness." // Per server. Only written by self -const MESSAGE_KEY_PREFIX string = "coordinator.msg." // Per Message. Only written by sequencer holding CHOSEN -const SIGNATURE_KEY_PREFIX string = "coordinator.msg.sig." // Per Message. Only written by sequencer holding CHOSEN +const CHOSENSEQ_KEY string = "coordinator.chosen" // Never overwritten. Expires or released only +const MSG_COUNT_KEY string = "coordinator.msgCount" // Only written by sequencer holding CHOSEN key +const FINALIZED_MSG_COUNT_KEY string = "coordinator.finalizedMsgCount" // Only written by sequencer holding CHOSEN key +const PRIORITIES_KEY string = "coordinator.priorities" // Read only +const WANTS_LOCKOUT_KEY_PREFIX string = "coordinator.liveliness." // Per server. Only written by self +const MESSAGE_KEY_PREFIX string = "coordinator.msg." // Per Message. Only written by sequencer holding CHOSEN +const SIGNATURE_KEY_PREFIX string = "coordinator.msg.sig." // Per Message. Only written by sequencer holding CHOSEN const WANTS_LOCKOUT_VAL string = "OK" const INVALID_VAL string = "INVALID" const INVALID_URL string = ""