diff --git a/.dockerignore b/.dockerignore index 51424900e8..2d5303a3be 100644 --- a/.dockerignore +++ b/.dockerignore @@ -36,6 +36,7 @@ arbitrator/tools/wasmer/target/ arbitrator/tools/wasm-tools/ arbitrator/tools/pricers/ arbitrator/tools/module_roots/ +arbitrator/tools/stylus_benchmark arbitrator/langs/rust/target/ arbitrator/langs/bf/target/ diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 47646017ac..dd58a30571 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -171,6 +171,9 @@ jobs: - name: Rustfmt - langs/rust run: cargo fmt --all --manifest-path arbitrator/langs/rust/Cargo.toml -- --check + - name: Rustfmt - tools/stylus_benchmark + run: cargo fmt --all --manifest-path arbitrator/tools/stylus_benchmark/Cargo.toml -- --check + - name: Make proofs from test cases run: make -j test-gen-proofs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8ed49634ad..1eda1d9b7e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: redis: image: redis ports: - - 6379:6379 + - 6379:6379 strategy: fail-fast: false @@ -192,7 +192,7 @@ jobs: - name: run challenge tests if: matrix.test-mode == 'challenge' - run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags challengetest --run TestChallenge --cover + run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags challengetest --run TestChallenge --timeout 60m --cover - name: run stylus tests if: matrix.test-mode == 'stylus' diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 60dd8ad827..94fa705655 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -25,9 +25,9 @@ jobs: run: | status_state="pending" declare -Ar exceptions=( - [contracts]=origin/develop + [contracts]=origin/pre-bold [nitro-testnode]=origin/master - + #TODO Rachel to check these are the intended branches. [arbitrator/langs/c]=origin/vm-storage-cache [arbitrator/tools/wasmer]=origin/adopt-v4.2.8 @@ -38,7 +38,7 @@ jobs: if [[ -v exceptions[$mod] ]]; then branch=${exceptions[$mod]} fi - + if ! git -C $mod merge-base --is-ancestor HEAD $branch; then echo $mod diverges from $branch divergent=1 diff --git a/.gitmodules b/.gitmodules index d4d26282ae..24df007a79 100644 --- a/.gitmodules +++ b/.gitmodules @@ -23,6 +23,9 @@ [submodule "nitro-testnode"] path = nitro-testnode url = https://github.com/OffchainLabs/nitro-testnode.git +[submodule "bold"] + path = bold + url = https://github.com/OffchainLabs/bold.git [submodule "arbitrator/langs/rust"] path = arbitrator/langs/rust url = https://github.com/OffchainLabs/stylus-sdk-rs.git diff --git a/Dockerfile b/Dockerfile index c64d07ad16..de15567131 100644 --- a/Dockerfile +++ b/Dockerfile @@ -234,6 +234,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ COPY go.mod go.sum ./ COPY go-ethereum/go.mod go-ethereum/go.sum go-ethereum/ COPY fastcache/go.mod fastcache/go.sum fastcache/ +COPY bold/go.mod bold/go.sum bold/ RUN go mod download COPY . ./ COPY --from=contracts-builder workspace/contracts/build/ contracts/build/ diff --git a/LICENSE.md b/LICENSE.md index 25768b3010..13e28a591a 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -19,7 +19,7 @@ Additional Use Grant: You may use the Licensed Work in a production environment validating the correctness of the posted chain state, or to deploy and operate (x) a blockchain that settles to a Covered Arbitrum Chain or (y) a blockchain in accordance with, and subject to, the [Arbitrum - Expansion Program Term of Use](https://docs.arbitrum.foundation/assets/files/Arbitrum%20Expansion%20Program%20Jan182024-4f08b0c2cb476a55dc153380fa3e64b0.pdf). For purposes of this + Expansion Program Term of Use](https://docs.arbitrum.foundation/aep/ArbitrumExpansionProgramTerms.pdf). For purposes of this Additional Use Grant, the "Covered Arbitrum Chains" are (a) Arbitrum One (chainid:42161), Arbitrum Nova (chainid:42170), Arbitrum Rinkeby testnet/Rinkarby (chainid:421611),Arbitrum Nitro diff --git a/README.md b/README.md index 1f0e4ac81c..30904238dc 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ Nitro is currently licensed under a [Business Source License](./LICENSE.md), sim The Additional Use Grant also permits the deployment of the Nitro software, in a permissionless fashion and without cost, as a new blockchain provided that the chain settles to either Arbitrum One or Arbitrum Nova. -For those that prefer to deploy the Nitro software either directly on Ethereum (i.e. an L2) or have it settle to another Layer-2 on top of Ethereum, the [Arbitrum Expansion Program (the "AEP")](https://docs.arbitrum.foundation/assets/files/Arbitrum%20Expansion%20Program%20Jan182024-4f08b0c2cb476a55dc153380fa3e64b0.pdf) was recently established. The AEP allows for the permissionless deployment in the aforementioned fashion provided that 10% of net revenue (as more fully described in the AEP) is contributed back to the Arbitrum community in accordance with the requirements of the AEP. +For those that prefer to deploy the Nitro software either directly on Ethereum (i.e. an L2) or have it settle to another Layer-2 on top of Ethereum, the [Arbitrum Expansion Program (the "AEP")](https://docs.arbitrum.foundation/aep/ArbitrumExpansionProgramTerms.pdf) was recently established. The AEP allows for the permissionless deployment in the aforementioned fashion provided that 10% of net revenue (as more fully described in the AEP) is contributed back to the Arbitrum community in accordance with the requirements of the AEP. ## Contact diff --git a/arbitrator/Cargo.toml b/arbitrator/Cargo.toml index eaafb6e439..3c5228daf2 100644 --- a/arbitrator/Cargo.toml +++ b/arbitrator/Cargo.toml @@ -12,6 +12,7 @@ members = [ exclude = [ "stylus/tests/", "tools/wasmer/", + "tools/stylus_benchmark", ] resolver = "2" diff --git a/arbitrator/arbutil/src/benchmark.rs b/arbitrator/arbutil/src/benchmark.rs new file mode 100644 index 0000000000..580d0191a0 --- /dev/null +++ b/arbitrator/arbutil/src/benchmark.rs @@ -0,0 +1,14 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +use crate::evm::api::Ink; +use std::time::{Duration, Instant}; + +// Benchmark is used to track the performance of blocks of code in stylus +#[derive(Clone, Copy, Debug, Default)] +pub struct Benchmark { + pub timer: Option, + pub elapsed_total: Duration, + pub ink_start: Option, + pub ink_total: Ink, +} diff --git a/arbitrator/arbutil/src/lib.rs b/arbitrator/arbutil/src/lib.rs index 9c48a9fefc..e17e8d9448 100644 --- a/arbitrator/arbutil/src/lib.rs +++ b/arbitrator/arbutil/src/lib.rs @@ -1,6 +1,7 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE +pub mod benchmark; /// cbindgen:ignore pub mod color; pub mod crypto; diff --git a/arbitrator/jit/src/lib.rs b/arbitrator/jit/src/lib.rs new file mode 100644 index 0000000000..d0ad76bd03 --- /dev/null +++ b/arbitrator/jit/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use std::path::PathBuf; +use structopt::StructOpt; + +mod arbcompress; +mod caller_env; +pub mod machine; +mod prepare; +pub mod program; +mod socket; +pub mod stylus_backend; +mod test; +mod wasip1_stub; +mod wavmio; + +#[derive(StructOpt)] +#[structopt(name = "jit-prover")] +pub struct Opts { + #[structopt(short, long)] + binary: PathBuf, + #[structopt(long, default_value = "0")] + inbox_position: u64, + #[structopt(long, default_value = "0")] + delayed_inbox_position: u64, + #[structopt(long, default_value = "0")] + position_within_message: u64, + #[structopt(long)] + last_block_hash: Option, + #[structopt(long)] + last_send_root: Option, + #[structopt(long)] + inbox: Vec, + #[structopt(long)] + delayed_inbox: Vec, + #[structopt(long)] + preimages: Option, + #[structopt(long)] + cranelift: bool, + #[structopt(long)] + forks: bool, + #[structopt(long)] + pub debug: bool, + #[structopt(long)] + pub require_success: bool, + // JSON inputs supercede any of the command-line inputs which could + // be specified in the JSON file. + #[structopt(long)] + json_inputs: Option, +} diff --git a/arbitrator/jit/src/main.rs b/arbitrator/jit/src/main.rs index 6e44500215..e19fabc250 100644 --- a/arbitrator/jit/src/main.rs +++ b/arbitrator/jit/src/main.rs @@ -1,58 +1,13 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -use crate::machine::{Escape, WasmEnv}; use arbutil::{color, Color}; use eyre::Result; -use std::path::PathBuf; +use jit::machine; +use jit::machine::{Escape, WasmEnv}; +use jit::Opts; use structopt::StructOpt; -mod arbcompress; -mod caller_env; -mod machine; -mod prepare; -mod program; -mod socket; -mod stylus_backend; -mod test; -mod wasip1_stub; -mod wavmio; - -#[derive(StructOpt)] -#[structopt(name = "jit-prover")] -pub struct Opts { - #[structopt(short, long)] - binary: PathBuf, - #[structopt(long, default_value = "0")] - inbox_position: u64, - #[structopt(long, default_value = "0")] - delayed_inbox_position: u64, - #[structopt(long, default_value = "0")] - position_within_message: u64, - #[structopt(long)] - last_block_hash: Option, - #[structopt(long)] - last_send_root: Option, - #[structopt(long)] - inbox: Vec, - #[structopt(long)] - delayed_inbox: Vec, - #[structopt(long)] - preimages: Option, - #[structopt(long)] - cranelift: bool, - #[structopt(long)] - forks: bool, - #[structopt(long)] - debug: bool, - #[structopt(long)] - require_success: bool, - // JSON inputs supercede any of the command-line inputs which could - // be specified in the JSON file. - #[structopt(long)] - json_inputs: Option, -} - fn main() -> Result<()> { let opts = Opts::from_args(); let env = match WasmEnv::cli(&opts) { diff --git a/arbitrator/jit/src/prepare.rs b/arbitrator/jit/src/prepare.rs index e7a7ba0f4d..62dd063b75 100644 --- a/arbitrator/jit/src/prepare.rs +++ b/arbitrator/jit/src/prepare.rs @@ -1,7 +1,7 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -use crate::WasmEnv; +use crate::machine::WasmEnv; use arbutil::{Bytes32, PreimageType}; use eyre::Ok; use prover::parse_input::FileData; diff --git a/arbitrator/jit/src/program.rs b/arbitrator/jit/src/program.rs index f10a059748..d80b3771c6 100644 --- a/arbitrator/jit/src/program.rs +++ b/arbitrator/jit/src/program.rs @@ -4,8 +4,8 @@ #![allow(clippy::too_many_arguments)] use crate::caller_env::JitEnv; -use crate::machine::{Escape, MaybeEscape, WasmEnvMut}; -use crate::stylus_backend::exec_wasm; +use crate::machine::{Escape, MaybeEscape, WasmEnv, WasmEnvMut}; +use crate::stylus_backend::{exec_wasm, MessageFromCothread}; use arbutil::evm::api::Gas; use arbutil::Bytes32; use arbutil::{evm::EvmData, format::DebugBytes, heapify}; @@ -16,6 +16,7 @@ use prover::{ machine::Module, programs::{config::PricingParams, prelude::*}, }; +use std::sync::Arc; const DEFAULT_STYLUS_ARBOS_VERSION: u64 = 31; @@ -130,10 +131,6 @@ pub fn new_program( let evm_data: EvmData = unsafe { *Box::from_raw(evm_data_handler as *mut EvmData) }; let config: JitConfig = unsafe { *Box::from_raw(stylus_config_handler as *mut JitConfig) }; - // buy ink - let pricing = config.stylus.pricing; - let ink = pricing.gas_to_ink(Gas(gas)); - let Some(module) = exec.module_asms.get(&compiled_hash).cloned() else { return Err(Escape::Failure(format!( "module hash {:?} not found in {:?}", @@ -142,6 +139,21 @@ pub fn new_program( ))); }; + exec_program(exec, module, calldata, config, evm_data, gas) +} + +pub fn exec_program( + exec: &mut WasmEnv, + module: Arc<[u8]>, + calldata: Vec, + config: JitConfig, + evm_data: EvmData, + gas: u64, +) -> Result { + // buy ink + let pricing = config.stylus.pricing; + let ink = pricing.gas_to_ink(Gas(gas)); + let cothread = exec_wasm( module, calldata, @@ -162,7 +174,10 @@ pub fn new_program( /// returns request_id for the first request from the program pub fn start_program(mut env: WasmEnvMut, module: u32) -> Result { let (_, exec) = env.jit_env(); + start_program_with_wasm_env(exec, module) +} +pub fn start_program_with_wasm_env(exec: &mut WasmEnv, module: u32) -> Result { if exec.threads.len() as u32 != module || module == 0 { return Escape::hostio(format!( "got request for thread {module} but len is {}", @@ -179,13 +194,18 @@ pub fn start_program(mut env: WasmEnvMut, module: u32) -> Result { /// request_id MUST be last request id returned from start_program or send_response pub fn get_request(mut env: WasmEnvMut, id: u32, len_ptr: GuestPtr) -> Result { let (mut mem, exec) = env.jit_env(); + let msg = get_last_msg(exec, id)?; + mem.write_u32(len_ptr, msg.req_data.len() as u32); + Ok(msg.req_type) +} + +pub fn get_last_msg(exec: &mut WasmEnv, id: u32) -> Result { let thread = exec.threads.last_mut().unwrap(); let msg = thread.last_message()?; if msg.1 != id { return Escape::hostio("get_request id doesn't match"); }; - mem.write_u32(len_ptr, msg.0.req_data.len() as u32); - Ok(msg.0.req_type) + Ok(msg.0) } // gets data associated with last request. @@ -193,12 +213,8 @@ pub fn get_request(mut env: WasmEnvMut, id: u32, len_ptr: GuestPtr) -> Result MaybeEscape { let (mut mem, exec) = env.jit_env(); - let thread = exec.threads.last_mut().unwrap(); - let msg = thread.last_message()?; - if msg.1 != id { - return Escape::hostio("get_request id doesn't match"); - }; - mem.write_slice(data_ptr, &msg.0.req_data); + let msg = get_last_msg(exec, id)?; + mem.write_slice(data_ptr, &msg.req_data); Ok(()) } @@ -217,11 +233,21 @@ pub fn set_response( let result = mem.read_slice(result_ptr, result_len as usize); let raw_data = mem.read_slice(raw_data_ptr, raw_data_len as usize); + set_response_with_wasm_env(exec, id, gas, result, raw_data) +} + +pub fn set_response_with_wasm_env( + exec: &mut WasmEnv, + id: u32, + gas: u64, + result: Vec, + raw_data: Vec, +) -> MaybeEscape { let thread = exec.threads.last_mut().unwrap(); thread.set_response(id, result, raw_data, Gas(gas)) } -/// sends previos response +/// sends previous response /// MUST be called right after set_response to the same id /// returns request_id for the next request pub fn send_response(mut env: WasmEnvMut, req_id: u32) -> Result { @@ -239,7 +265,10 @@ pub fn send_response(mut env: WasmEnvMut, req_id: u32) -> Result { /// removes the last created program pub fn pop(mut env: WasmEnvMut) -> MaybeEscape { let (_, exec) = env.jit_env(); + pop_with_wasm_env(exec) +} +pub fn pop_with_wasm_env(exec: &mut WasmEnv) -> MaybeEscape { match exec.threads.pop() { None => Err(Escape::Child(eyre!("no child"))), Some(mut thread) => thread.wait_done(), @@ -247,8 +276,8 @@ pub fn pop(mut env: WasmEnvMut) -> MaybeEscape { } pub struct JitConfig { - stylus: StylusConfig, - compile: CompileConfig, + pub stylus: StylusConfig, + pub compile: CompileConfig, } /// Creates a `StylusConfig` from its component parts. diff --git a/arbitrator/jit/src/stylus_backend.rs b/arbitrator/jit/src/stylus_backend.rs index 0d8c477c6c..d250780dd9 100644 --- a/arbitrator/jit/src/stylus_backend.rs +++ b/arbitrator/jit/src/stylus_backend.rs @@ -4,6 +4,7 @@ #![allow(clippy::too_many_arguments)] use crate::machine::{Escape, MaybeEscape}; +use arbutil::benchmark::Benchmark; use arbutil::evm::api::{Gas, Ink, VecReader}; use arbutil::evm::{ api::{EvmApiMethod, EVM_API_METHOD_REQ_OFFSET}, @@ -35,6 +36,7 @@ struct MessageToCothread { pub struct MessageFromCothread { pub req_type: u32, pub req_data: Vec, + pub benchmark: Benchmark, } struct CothreadRequestor { @@ -51,6 +53,7 @@ impl RequestHandler for CothreadRequestor { let msg = MessageFromCothread { req_type: req_type as u32 + EVM_API_METHOD_REQ_OFFSET, req_data: req_data.as_ref().to_vec(), + benchmark: Benchmark::default(), }; if let Err(error) = self.tx.send(msg) { @@ -169,6 +172,7 @@ pub fn exec_wasm( let msg = MessageFromCothread { req_data: output, req_type: out_kind as u32, + benchmark: instance.env().benchmark, }; instance .env_mut() diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 08473c2598..a147786086 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -36,6 +36,7 @@ use once_cell::sync::OnceCell; use static_assertions::const_assert_eq; use std::{ ffi::CStr, + marker::PhantomData, num::NonZeroUsize, os::raw::{c_char, c_int}, path::Path, @@ -59,11 +60,67 @@ pub struct CByteArray { } #[repr(C)] -#[derive(Clone, Copy)] -pub struct RustByteArray { +pub struct RustSlice<'a> { + pub ptr: *const u8, + pub len: usize, + pub phantom: PhantomData<&'a [u8]>, +} + +impl<'a> RustSlice<'a> { + pub fn new(slice: &'a [u8]) -> Self { + if slice.is_empty() { + return Self { + ptr: ptr::null(), + len: 0, + phantom: PhantomData, + }; + } + Self { + ptr: slice.as_ptr(), + len: slice.len(), + phantom: PhantomData, + } + } +} + +#[repr(C)] +pub struct RustBytes { pub ptr: *mut u8, pub len: usize, - pub capacity: usize, + pub cap: usize, +} + +impl RustBytes { + pub unsafe fn into_vec(self) -> Vec { + Vec::from_raw_parts(self.ptr, self.len, self.cap) + } + + pub unsafe fn write(&mut self, mut vec: Vec) { + if vec.capacity() == 0 { + *self = RustBytes { + ptr: ptr::null_mut(), + len: 0, + cap: 0, + }; + return; + } + self.ptr = vec.as_mut_ptr(); + self.len = vec.len(); + self.cap = vec.capacity(); + std::mem::forget(vec); + } +} + +/// Frees the vector. Does nothing when the vector is null. +/// +/// # Safety +/// +/// Must only be called once per vec. +#[no_mangle] +pub unsafe extern "C" fn free_rust_bytes(vec: RustBytes) { + if !vec.ptr.is_null() { + drop(vec.into_vec()) + } } #[no_mangle] @@ -127,6 +184,12 @@ pub unsafe extern "C" fn arbitrator_load_wavm_binary(binary_path: *const c_char) } } +#[no_mangle] +#[cfg(feature = "native")] +pub unsafe extern "C" fn arbitrator_new_finished(gs: GlobalState) -> *mut Machine { + Box::into_raw(Box::new(Machine::new_finished(gs))) +} + unsafe fn cstr_to_string(c_str: *const c_char) -> String { CStr::from_ptr(c_str).to_string_lossy().into_owned() } @@ -404,18 +467,6 @@ pub unsafe extern "C" fn arbitrator_module_root(mach: *mut Machine) -> Bytes32 { #[no_mangle] #[cfg(feature = "native")] -pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine) -> RustByteArray { - let mut proof = (*mach).serialize_proof(); - let ret = RustByteArray { - ptr: proof.as_mut_ptr(), - len: proof.len(), - capacity: proof.capacity(), - }; - std::mem::forget(proof); - ret -} - -#[no_mangle] -pub unsafe extern "C" fn arbitrator_free_proof(proof: RustByteArray) { - drop(Vec::from_raw_parts(proof.ptr, proof.len, proof.capacity)) +pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine, out: *mut RustBytes) { + (*out).write((*mach).serialize_proof()); } diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index dec355ac7c..0d39d87e77 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -1565,6 +1565,36 @@ impl Machine { Ok(mach) } + // new_finished returns a Machine in the Finished state at step 0. + // + // This allows the Mahine to be set up to model the final state of the + // machine at the end of the execution of a block. + pub fn new_finished(gs: GlobalState) -> Machine { + Machine { + steps: 0, + status: MachineStatus::Finished, + global_state: gs, + // The machine is in the Finished state, so nothing else really matters. + // values_stacks and frame_stacks cannot be empty for proof serialization, + // but everything else can just be entirely blank. + thread_state: ThreadState::Main, + value_stacks: vec![Vec::new()], + frame_stacks: vec![Vec::new()], + internal_stack: Default::default(), + modules: Default::default(), + modules_merkle: Default::default(), + pc: Default::default(), + stdio_output: Default::default(), + inbox_contents: Default::default(), + first_too_far: Default::default(), + preimage_resolver: PreimageResolverWrapper::new(Arc::new(|_, _, _| None)), + stylus_modules: Default::default(), + initial_hash: Default::default(), + context: Default::default(), + debug_info: Default::default(), + } + } + pub fn new_from_wavm(wavm_binary: &Path) -> Result { let mut modules: Vec = { let compressed = std::fs::read(wavm_binary)?; @@ -2867,6 +2897,15 @@ impl Machine { let mod_merkle = self.get_modules_merkle(); out!(mod_merkle.root()); + if self.is_halted() { + // If the machine is halted, instead of serializing the module, + // serialize the global state and return. + // This is for the "kickstart" BoLD proof, but it's backwards compatible + // with the old OSP behavior which reads no further. + out!(self.global_state.serialize()); + return data; + } + // End machine serialization, serialize module let module = &self.modules[self.pc.module()]; diff --git a/arbitrator/stylus/src/env.rs b/arbitrator/stylus/src/env.rs index a153fb5bf1..a2c8189029 100644 --- a/arbitrator/stylus/src/env.rs +++ b/arbitrator/stylus/src/env.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, evm::{ api::{DataReader, EvmApi, Ink}, EvmData, @@ -48,6 +49,8 @@ pub struct WasmEnv> { pub compile: CompileConfig, /// The runtime config pub config: Option, + // Used to benchmark execution blocks of code + pub benchmark: Benchmark, // Using the unused generic parameter D in a PhantomData field _data_reader_marker: PhantomData, } @@ -68,6 +71,7 @@ impl> WasmEnv { outs: vec![], memory: None, meter: None, + benchmark: Benchmark::default(), _data_reader_marker: PhantomData, } } diff --git a/arbitrator/stylus/src/evm_api.rs b/arbitrator/stylus/src/evm_api.rs index 0dd27e3f8c..7aa605dfe7 100644 --- a/arbitrator/stylus/src/evm_api.rs +++ b/arbitrator/stylus/src/evm_api.rs @@ -1,11 +1,12 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE -use crate::{GoSliceData, RustSlice}; +use crate::GoSliceData; use arbutil::evm::{ api::{EvmApiMethod, Gas, EVM_API_METHOD_REQ_OFFSET}, req::RequestHandler, }; +use prover::RustSlice; #[repr(C)] pub struct NativeRequestHandler { diff --git a/arbitrator/stylus/src/host.rs b/arbitrator/stylus/src/host.rs index c72cafc316..67497302a1 100644 --- a/arbitrator/stylus/src/host.rs +++ b/arbitrator/stylus/src/host.rs @@ -5,6 +5,7 @@ use crate::env::{Escape, HostioInfo, MaybeEscape, WasmEnv, WasmEnvMut}; use arbutil::{ + benchmark::Benchmark, evm::{ api::{DataReader, EvmApi, Gas, Ink}, EvmData, @@ -46,6 +47,10 @@ where &self.evm_data } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.env.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { &mut self.evm_data.return_data_len } @@ -464,3 +469,13 @@ pub(crate) fn console_tee, T: Into + Copy>( } pub(crate) fn null_host>(_: WasmEnvMut) {} + +pub(crate) fn start_benchmark>( + mut env: WasmEnvMut, +) -> MaybeEscape { + hostio!(env, start_benchmark()) +} + +pub(crate) fn end_benchmark>(mut env: WasmEnvMut) -> MaybeEscape { + hostio!(env, end_benchmark()) +} diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index e7f10c2400..c73c4b2c2e 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -15,9 +15,12 @@ use cache::{deserialize_module, CacheMetrics, InitCache}; use evm_api::NativeRequestHandler; use eyre::ErrReport; use native::NativeInstance; -use prover::programs::{prelude::*, StylusData}; +use prover::{ + programs::{prelude::*, StylusData}, + RustBytes, +}; use run::RunProgram; -use std::{marker::PhantomData, mem, ptr}; +use std::ptr; use target_cache::{target_cache_get, target_cache_set}; pub use brotli; @@ -76,52 +79,15 @@ impl DataReader for GoSliceData { } } -#[repr(C)] -pub struct RustSlice<'a> { - ptr: *const u8, - len: usize, - phantom: PhantomData<&'a [u8]>, -} - -impl<'a> RustSlice<'a> { - fn new(slice: &'a [u8]) -> Self { - Self { - ptr: slice.as_ptr(), - len: slice.len(), - phantom: PhantomData, - } - } -} - -#[repr(C)] -pub struct RustBytes { - ptr: *mut u8, - len: usize, - cap: usize, +unsafe fn write_err(output: &mut RustBytes, err: ErrReport) -> UserOutcomeKind { + output.write(err.debug_bytes()); + UserOutcomeKind::Failure } -impl RustBytes { - unsafe fn into_vec(self) -> Vec { - Vec::from_raw_parts(self.ptr, self.len, self.cap) - } - - unsafe fn write(&mut self, mut vec: Vec) { - self.ptr = vec.as_mut_ptr(); - self.len = vec.len(); - self.cap = vec.capacity(); - mem::forget(vec); - } - - unsafe fn write_err(&mut self, err: ErrReport) -> UserOutcomeKind { - self.write(err.debug_bytes()); - UserOutcomeKind::Failure - } - - unsafe fn write_outcome(&mut self, outcome: UserOutcome) -> UserOutcomeKind { - let (status, outs) = outcome.into_data(); - self.write(outs); - status - } +unsafe fn write_outcome(output: &mut RustBytes, outcome: UserOutcome) -> UserOutcomeKind { + let (status, outs) = outcome.into_data(); + output.write(outs); + status } /// "activates" a user wasm. @@ -164,7 +130,7 @@ pub unsafe extern "C" fn stylus_activate( gas, ) { Ok(val) => val, - Err(err) => return output.write_err(err), + Err(err) => return write_err(output, err), }; *module_hash = module.hash(); @@ -194,16 +160,16 @@ pub unsafe extern "C" fn stylus_compile( let output = &mut *output; let name = match String::from_utf8(name.slice().to_vec()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; let target = match target_cache_get(&name) { Ok(val) => val, - Err(err) => return output.write_err(err), + Err(err) => return write_err(output, err), }; let asm = match native::compile(wasm, version, debug, target) { Ok(val) => val, - Err(err) => return output.write_err(err), + Err(err) => return write_err(output, err), }; output.write(asm); @@ -218,7 +184,7 @@ pub unsafe extern "C" fn wat_to_wasm(wat: GoSliceData, output: *mut RustBytes) - let output = &mut *output; let wasm = match wasmer::wat2wasm(wat.slice()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; output.write(wasm.into_owned()); UserOutcomeKind::Success @@ -241,16 +207,16 @@ pub unsafe extern "C" fn stylus_target_set( let output = &mut *output; let name = match String::from_utf8(name.slice().to_vec()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; let desc_str = match String::from_utf8(description.slice().to_vec()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; if let Err(err) = target_cache_set(name, desc_str, native) { - return output.write_err(err); + return write_err(output, err); }; UserOutcomeKind::Success @@ -298,8 +264,8 @@ pub unsafe extern "C" fn stylus_call( }; let status = match instance.run_main(&calldata, config, ink) { - Err(e) | Ok(UserOutcome::Failure(e)) => output.write_err(e.wrap_err("call failed")), - Ok(outcome) => output.write_outcome(outcome), + Err(e) | Ok(UserOutcome::Failure(e)) => write_err(output, e.wrap_err("call failed")), + Ok(outcome) => write_outcome(output, outcome), }; let ink_left = match status { UserOutcomeKind::OutOfStack => Ink(0), // take all gas when out of stack @@ -352,18 +318,6 @@ pub extern "C" fn stylus_reorg_vm(_block: u64, arbos_tag: u32) { InitCache::clear_long_term(arbos_tag); } -/// Frees the vector. Does nothing when the vector is null. -/// -/// # Safety -/// -/// Must only be called once per vec. -#[no_mangle] -pub unsafe extern "C" fn stylus_drop_vec(vec: RustBytes) { - if !vec.ptr.is_null() { - mem::drop(vec.into_vec()) - } -} - /// Gets cache metrics. /// /// # Safety diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 0fbdb342f3..a31df1034c 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -212,6 +212,8 @@ impl> NativeInstance { imports.define("console", "tee_f32", func!(host::console_tee::)); imports.define("console", "tee_f64", func!(host::console_tee::)); imports.define("debug", "null_host", func!(host::null_host)); + imports.define("debug", "start_benchmark", func!(host::start_benchmark)); + imports.define("debug", "end_benchmark", func!(host::end_benchmark)); } let instance = Instance::new(&mut store, &module, &imports)?; let exports = &instance.exports; @@ -429,6 +431,8 @@ pub fn module(wasm: &[u8], compile: CompileConfig, target: Target) -> Result) { + let _ = match str::from_utf8(req_data) { + Ok(v) => v, + Err(e) => panic!("Invalid UTF-8 sequence: {}", e), + }; + + match req_type { + 0 => return, + 1 => panic!("ErrExecutionReverted user revert"), + 2 => panic!("ErrExecutionReverted user failure"), + 3 => panic!("ErrOutOfGas user out of ink"), + 4 => panic!("ErrDepth user out of stack"), + _ => panic!("ErrExecutionReverted user unknown"), + } +} + +fn run(compiled_module: Vec) -> (Duration, Ink) { + let calldata = Vec::from([0u8; 32]); + let evm_data = EvmData::default(); + let config = JitConfig { + stylus: StylusConfig { + version: 2, + max_depth: 10000, + pricing: PricingParams { ink_price: 1 }, + }, + compile: CompileConfig::version(2, true), + }; + + let exec = &mut WasmEnv::default(); + + let module = jit::program::exec_program( + exec, + compiled_module.into(), + calldata, + config, + evm_data, + u64::MAX, + ) + .unwrap(); + + let req_id = jit::program::start_program_with_wasm_env(exec, module).unwrap(); + let msg = jit::program::get_last_msg(exec, req_id).unwrap(); + if msg.req_type < EVM_API_METHOD_REQ_OFFSET { + let _ = jit::program::pop_with_wasm_env(exec); + + let req_data = msg.req_data[8..].to_vec(); + check_result(msg.req_type, &req_data); + } else { + panic!("unsupported request type {:?}", msg.req_type); + } + + (msg.benchmark.elapsed_total, msg.benchmark.ink_total) +} + +pub fn benchmark(wat: Vec) -> eyre::Result<()> { + let wasm = wasmer::wat2wasm(&wat)?; + + let compiled_module = native::compile(&wasm, 2, true, Target::default())?; + + let mut durations: Vec = Vec::new(); + let mut ink_spent = Ink(0); + for i in 0..NUMBER_OF_BENCHMARK_RUNS { + print!("Run {:?}, ", i); + let (duration_run, ink_spent_run) = run(compiled_module.clone()); + durations.push(duration_run); + ink_spent = ink_spent_run; + println!( + "duration: {:?}, ink_spent: {:?}", + duration_run, ink_spent_run + ); + } + + // discard top and bottom runs + durations.sort(); + let l = NUMBER_OF_TOP_AND_BOTTOM_RUNS_TO_DISCARD as usize; + let r = NUMBER_OF_BENCHMARK_RUNS as usize - NUMBER_OF_TOP_AND_BOTTOM_RUNS_TO_DISCARD as usize; + durations = durations[l..r].to_vec(); + + let avg_duration = durations.iter().sum::() / (r - l) as u32; + let avg_ink_spent_per_micro_second = ink_spent.0 / avg_duration.as_micros() as u64; + println!("After discarding top and bottom runs: "); + println!( + "avg_duration: {:?}, avg_ink_spent_per_micro_second: {:?}", + avg_duration, avg_ink_spent_per_micro_second + ); + + Ok(()) +} diff --git a/arbitrator/tools/stylus_benchmark/src/main.rs b/arbitrator/tools/stylus_benchmark/src/main.rs new file mode 100644 index 0000000000..4b8971ecab --- /dev/null +++ b/arbitrator/tools/stylus_benchmark/src/main.rs @@ -0,0 +1,44 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +mod benchmark; +mod scenario; + +use clap::Parser; +use scenario::Scenario; +use std::path::PathBuf; +use strum::IntoEnumIterator; + +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct Args { + #[arg(short, long)] + output_wat_dir_path: Option, + + #[arg(short, long)] + scenario: Option, +} + +fn handle_scenario(scenario: Scenario, output_wat_dir_path: Option) -> eyre::Result<()> { + println!("Benchmarking {}", scenario); + let wat = scenario::generate_wat(scenario, output_wat_dir_path); + benchmark::benchmark(wat) +} + +fn main() -> eyre::Result<()> { + let args = Args::parse(); + + match args.scenario { + Some(scenario) => handle_scenario(scenario, args.output_wat_dir_path), + None => { + println!("No scenario specified, benchmarking all scenarios\n"); + for scenario in Scenario::iter() { + let benchmark_result = handle_scenario(scenario, args.output_wat_dir_path.clone()); + if let Err(err) = benchmark_result { + return Err(err); + } + } + Ok(()) + } + } +} diff --git a/arbitrator/tools/stylus_benchmark/src/scenario.rs b/arbitrator/tools/stylus_benchmark/src/scenario.rs new file mode 100644 index 0000000000..348678ed69 --- /dev/null +++ b/arbitrator/tools/stylus_benchmark/src/scenario.rs @@ -0,0 +1,128 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; +use strum_macros::{Display, EnumIter, EnumString}; + +#[derive(Copy, Clone, PartialEq, Eq, Debug, EnumString, Display, EnumIter)] +pub enum Scenario { + #[strum(serialize = "add_i32")] + AddI32, + #[strum(serialize = "xor_i32")] + XorI32, +} + +// Programs to be benchmarked have a loop in which several similar operations are executed. +// The number of operations per loop is chosen to be large enough so the overhead related to the loop is negligible, +// but not too large to avoid a big program size. +// Keeping a small program size is important to better use CPU cache, trying to keep the code in the cache. + +fn write_wat_beginning(wat: &mut Vec) { + wat.write_all(b"(module\n").unwrap(); + wat.write_all(b" (import \"debug\" \"start_benchmark\" (func $start_benchmark))\n") + .unwrap(); + wat.write_all(b" (import \"debug\" \"end_benchmark\" (func $end_benchmark))\n") + .unwrap(); + wat.write_all(b" (memory (export \"memory\") 0 0)\n") + .unwrap(); + wat.write_all(b" (global $ops_counter (mut i32) (i32.const 0))\n") + .unwrap(); + wat.write_all(b" (func (export \"user_entrypoint\") (param i32) (result i32)\n") + .unwrap(); + + wat.write_all(b" call $start_benchmark\n").unwrap(); + + wat.write_all(b" (loop $loop\n").unwrap(); +} + +fn write_wat_end( + wat: &mut Vec, + number_of_loop_iterations: usize, + number_of_ops_per_loop_iteration: usize, +) { + let number_of_ops = number_of_loop_iterations * number_of_ops_per_loop_iteration; + + // update ops_counter + wat.write_all(b" global.get $ops_counter\n") + .unwrap(); + wat.write_all( + format!( + " i32.const {}\n", + number_of_ops_per_loop_iteration + ) + .as_bytes(), + ) + .unwrap(); + wat.write_all(b" i32.add\n").unwrap(); + wat.write_all(b" global.set $ops_counter\n") + .unwrap(); + + // check if we need to continue looping + wat.write_all(b" global.get $ops_counter\n") + .unwrap(); + wat.write_all(format!(" i32.const {}\n", number_of_ops).as_bytes()) + .unwrap(); + wat.write_all(b" i32.lt_s\n").unwrap(); + wat.write_all(b" br_if $loop)\n").unwrap(); + + wat.write_all(b" call $end_benchmark\n").unwrap(); + + wat.write_all(b" i32.const 0)\n").unwrap(); + wat.write_all(b")").unwrap(); +} + +fn wat(write_wat_ops: fn(&mut Vec, usize)) -> Vec { + let number_of_loop_iterations = 200_000; + let number_of_ops_per_loop_iteration = 2000; + + let mut wat = Vec::new(); + + write_wat_beginning(&mut wat); + + write_wat_ops(&mut wat, number_of_ops_per_loop_iteration); + + write_wat_end( + &mut wat, + number_of_loop_iterations, + number_of_ops_per_loop_iteration, + ); + + wat.to_vec() +} + +fn write_add_i32_wat_ops(wat: &mut Vec, number_of_ops_per_loop_iteration: usize) { + wat.write_all(b" i32.const 0\n").unwrap(); + for _ in 0..number_of_ops_per_loop_iteration { + wat.write_all(b" i32.const 1\n").unwrap(); + wat.write_all(b" i32.add\n").unwrap(); + } + wat.write_all(b" drop\n").unwrap(); +} + +fn write_xor_i32_wat_ops(wat: &mut Vec, number_of_ops_per_loop_iteration: usize) { + wat.write_all(b" i32.const 1231\n").unwrap(); + for _ in 0..number_of_ops_per_loop_iteration { + wat.write_all(b" i32.const 12312313\n").unwrap(); + wat.write_all(b" i32.xor\n").unwrap(); + } + wat.write_all(b" drop\n").unwrap(); +} + +pub fn generate_wat(scenario: Scenario, output_wat_dir_path: Option) -> Vec { + let wat = match scenario { + Scenario::AddI32 => wat(write_add_i32_wat_ops), + Scenario::XorI32 => wat(write_xor_i32_wat_ops), + }; + + // print wat to file if needed + if let Some(output_wat_dir_path) = output_wat_dir_path { + let mut output_wat_path = output_wat_dir_path; + output_wat_path.push(format!("{}.wat", scenario)); + let mut file = File::create(output_wat_path).unwrap(); + file.write_all(&wat).unwrap(); + } + + wat +} diff --git a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs index 2f410849fc..25163e25bc 100644 --- a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs +++ b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, crypto, evm::{ self, @@ -21,6 +22,7 @@ use prover::{ }; use ruint2::Uint; use std::fmt::Display; +use std::time::Instant; macro_rules! be { ($int:expr) => { @@ -68,6 +70,7 @@ pub trait UserHost: GasMeteredMachine { fn evm_api(&mut self) -> &mut Self::A; fn evm_data(&self) -> &EvmData; + fn benchmark(&mut self) -> &mut Benchmark; fn evm_return_data_len(&mut self) -> &mut u32; fn read_slice(&self, ptr: GuestPtr, len: u32) -> Result, Self::MemoryErr>; @@ -962,4 +965,38 @@ pub trait UserHost: GasMeteredMachine { self.say(value.into()); Ok(value) } + + // Initializes benchmark data related to a code block. + // A code block is defined by the instructions between start_benchmark and end_benchmark calls. + // If start_benchmark is called multiple times without end_benchmark being called, + // then only the last start_benchmark before end_benchmark will be used. + // It is possible to have multiple code blocks benchmarked in the same program. + fn start_benchmark(&mut self) -> Result<(), Self::Err> { + let ink_curr = self.ink_ready()?; + + let benchmark = self.benchmark(); + benchmark.timer = Some(Instant::now()); + benchmark.ink_start = Some(ink_curr); + + Ok(()) + } + + // Updates cumulative benchmark data related to a code block. + // If end_benchmark is called without a corresponding start_benchmark nothing will happen. + fn end_benchmark(&mut self) -> Result<(), Self::Err> { + let ink_curr = self.ink_ready()?; + + let benchmark = self.benchmark(); + if let Some(timer) = benchmark.timer { + benchmark.elapsed_total = benchmark.elapsed_total.saturating_add(timer.elapsed()); + + let code_block_ink = benchmark.ink_start.unwrap().saturating_sub(ink_curr); + benchmark.ink_total = benchmark.ink_total.saturating_add(code_block_ink); + + benchmark.timer = None; + benchmark.ink_start = None; + }; + + Ok(()) + } } diff --git a/arbitrator/wasm-libraries/user-host/src/program.rs b/arbitrator/wasm-libraries/user-host/src/program.rs index 7b3782b2e5..a2973ce56f 100644 --- a/arbitrator/wasm-libraries/user-host/src/program.rs +++ b/arbitrator/wasm-libraries/user-host/src/program.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, evm::{ api::{EvmApiMethod, Gas, Ink, VecReader, EVM_API_METHOD_REQ_OFFSET}, req::{EvmApiRequestor, RequestHandler}, @@ -75,6 +76,8 @@ pub(crate) struct Program { pub evm_api: EvmApiRequestor, /// EVM Context info. pub evm_data: EvmData, + // Used to benchmark execution blocks of code + pub benchmark: Benchmark, /// WAVM module index. pub module: u32, /// Call configuration. @@ -167,6 +170,7 @@ impl Program { outs: vec![], evm_api: EvmApiRequestor::new(UserHostRequester::default()), evm_data, + benchmark: Benchmark::default(), module, config, early_exit: None, @@ -237,6 +241,10 @@ impl UserHost for Program { &self.evm_data } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { &mut self.evm_data.return_data_len } diff --git a/arbitrator/wasm-libraries/user-test/src/program.rs b/arbitrator/wasm-libraries/user-test/src/program.rs index 299fca08c3..99252a38f0 100644 --- a/arbitrator/wasm-libraries/user-test/src/program.rs +++ b/arbitrator/wasm-libraries/user-test/src/program.rs @@ -3,6 +3,7 @@ use crate::{ARGS, EVER_PAGES, EVM_DATA, KEYS, LOGS, OPEN_PAGES, OUTS}; use arbutil::{ + benchmark::Benchmark, evm::{ api::{EvmApi, Gas, Ink, VecReader}, user::UserOutcomeKind, @@ -28,6 +29,7 @@ impl From for eyre::ErrReport { /// Mock type representing a `user_host::Program` pub struct Program { evm_api: MockEvmApi, + benchmark: Benchmark, } #[allow(clippy::unit_arg)] @@ -52,6 +54,10 @@ impl UserHost for Program { &EVM_DATA } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { unimplemented!() } @@ -91,6 +97,7 @@ impl Program { pub fn current() -> Self { Self { evm_api: MockEvmApi, + benchmark: Benchmark::default(), } } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index a3256cb78f..70c5952042 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/bold/solgen/go/bridgegen" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/redislock" @@ -44,7 +45,6 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/execution" - "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/blobs" @@ -80,8 +80,10 @@ var ( const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" - sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" - sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" + sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" + sequencerBatchPostDelayProofMethodName = "addSequencerL2BatchFromOriginDelayProof" + sequencerBatchPostWithBlobsDelayProofMethodName = "addSequencerL2BatchFromBlobsDelayProof" ) type batchPosterPosition struct { @@ -172,6 +174,7 @@ type BatchPosterConfig struct { ReorgResistanceMargin time.Duration `koanf:"reorg-resistance-margin" reload:"hot"` CheckBatchCorrectness bool `koanf:"check-batch-correctness"` MaxEmptyBatchDelay time.Duration `koanf:"max-empty-batch-delay"` + DelayBufferThresholdMargin uint64 `koanf:"delay-buffer-threshold-margin"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -230,6 +233,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".reorg-resistance-margin", DefaultBatchPosterConfig.ReorgResistanceMargin, "do not post batch if its within this duration from layer 1 minimum bounds. Requires l1-block-bound option not be set to \"ignore\"") f.Bool(prefix+".check-batch-correctness", DefaultBatchPosterConfig.CheckBatchCorrectness, "setting this to true will run the batch against an inbox multiplexer and verifies that it produces the correct set of messages") f.Duration(prefix+".max-empty-batch-delay", DefaultBatchPosterConfig.MaxEmptyBatchDelay, "maximum empty batch posting delay, batch poster will only be able to post an empty batch if this time period building a batch has passed") + f.Uint64(prefix+".delay-buffer-threshold-margin", DefaultBatchPosterConfig.DelayBufferThresholdMargin, "the number of blocks to post the batch before reaching the delay buffer threshold") redislock.AddConfigOptions(prefix+".redis-lock", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) @@ -263,6 +267,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ ReorgResistanceMargin: 10 * time.Minute, CheckBatchCorrectness: true, MaxEmptyBatchDelay: 3 * 24 * time.Hour, + DelayBufferThresholdMargin: 25, // 5 minutes considering 12-second blocks } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ @@ -294,6 +299,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ UseAccessLists: true, GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2, CheckBatchCorrectness: true, + DelayBufferThresholdMargin: 0, } type BatchPosterOpts struct { @@ -725,6 +731,7 @@ type buildingBatch struct { haveUsefulMessage bool use4844 bool muxBackend *simulatedMuxBackend + firstDelayedMsg *arbostypes.MessageWithMetadata firstNonDelayedMsg *arbostypes.MessageWithMetadata firstUsefulMsg *arbostypes.MessageWithMetadata } @@ -963,41 +970,45 @@ func (b *BatchPoster) encodeAddBatch( l2MessageData []byte, delayedMsg uint64, use4844 bool, + delayProof *bridgegen.DelayProof, ) ([]byte, []kzg4844.Blob, error) { - methodName := sequencerBatchPostMethodName + var methodName string if use4844 { - methodName = sequencerBatchPostWithBlobsMethodName + if delayProof != nil { + methodName = sequencerBatchPostWithBlobsDelayProofMethodName + } else { + methodName = sequencerBatchPostWithBlobsMethodName + } + } else if delayProof != nil { + methodName = sequencerBatchPostDelayProofMethodName + } else { + methodName = sequencerBatchPostMethodName } method, ok := b.seqInboxABI.Methods[methodName] if !ok { return nil, nil, errors.New("failed to find add batch method") } - var calldata []byte + var args []any var kzgBlobs []kzg4844.Blob var err error + args = append(args, seqNum) if use4844 { kzgBlobs, err = blobs.EncodeBlobs(l2MessageData) if err != nil { return nil, nil, fmt.Errorf("failed to encode blobs: %w", err) } - // EIP4844 transactions to the sequencer inbox will not use transaction calldata for L2 info. - calldata, err = method.Inputs.Pack( - seqNum, - new(big.Int).SetUint64(delayedMsg), - b.config().gasRefunder, - new(big.Int).SetUint64(uint64(prevMsgNum)), - new(big.Int).SetUint64(uint64(newMsgNum)), - ) } else { - calldata, err = method.Inputs.Pack( - seqNum, - l2MessageData, - new(big.Int).SetUint64(delayedMsg), - b.config().gasRefunder, - new(big.Int).SetUint64(uint64(prevMsgNum)), - new(big.Int).SetUint64(uint64(newMsgNum)), - ) + // EIP4844 transactions to the sequencer inbox will not use transaction calldata for L2 info. + args = append(args, l2MessageData) } + args = append(args, new(big.Int).SetUint64(delayedMsg)) + args = append(args, b.config().gasRefunder) + args = append(args, new(big.Int).SetUint64(uint64(prevMsgNum))) + args = append(args, new(big.Int).SetUint64(uint64(newMsgNum))) + if delayProof != nil { + args = append(args, delayProof) + } + calldata, err := method.Inputs.Pack(args...) if err != nil { return nil, nil, err } @@ -1023,7 +1034,17 @@ func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimat return uint64(gas), err } -func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { +func (b *BatchPoster) estimateGas( + ctx context.Context, + sequencerMessage []byte, + delayedMessages uint64, + realData []byte, + realBlobs []kzg4844.Blob, + realNonce uint64, + realAccessList types.AccessList, + delayProof *bridgegen.DelayProof, +) (uint64, error) { + config := b.config() rpcClient := b.l1Reader.Client() rawRpcClient := rpcClient.Client() @@ -1065,7 +1086,7 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, // However, we set nextMsgNum to 1 because it is necessary for a correct estimation for the final to be non-zero. // Because we're likely estimating against older state, this might not be the actual next message, // but the gas used should be the same. - data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0) + data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0, delayProof) if err != nil { return 0, err } @@ -1136,7 +1157,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - if arbOSVersion >= 20 { + if arbOSVersion >= params.ArbosVersion_20 { if config.IgnoreBlobPrice { use4844 = true } else { @@ -1319,7 +1340,11 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) b.building.firstUsefulMsg = msg } } - if !isDelayed && b.building.firstNonDelayedMsg == nil { + if isDelayed { + if b.building.firstDelayedMsg == nil { + b.building.firstDelayedMsg = msg + } + } else if b.building.firstNonDelayedMsg == nil { b.building.firstNonDelayedMsg = msg } b.building.msgCount++ @@ -1334,6 +1359,27 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } + delayBuffer, err := GetDelayBufferConfig(ctx, b.seqInbox) + if err != nil { + return false, err + } + if delayBuffer.Enabled && b.building.firstDelayedMsg != nil { + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + latestBlock := latestHeader.Number.Uint64() + firstDelayedMsgBlock := b.building.firstDelayedMsg.Message.Header.BlockNumber + threasholdLimit := firstDelayedMsgBlock + delayBuffer.Threshold - b.config().DelayBufferThresholdMargin + if latestBlock >= threasholdLimit { + log.Info("force post batch because of the delay buffer", + "firstDelayedMsgBlock", firstDelayedMsgBlock, + "threshold", delayBuffer.Threshold, + "latestBlock", latestBlock) + forcePostBatch = true + } + } + if b.building.firstNonDelayedMsg != nil && hasL1Bound && config.ReorgResistanceMargin > 0 { firstMsgBlockNumber := b.building.firstNonDelayedMsg.Message.Header.BlockNumber firstMsgTimeStamp := b.building.firstNonDelayedMsg.Message.Header.Timestamp @@ -1425,7 +1471,15 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) prevMessageCount = 0 } - data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), prevMessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) + var delayProof *bridgegen.DelayProof + if delayBuffer.Enabled && b.building.firstDelayedMsg != nil { + delayProof, err = GenDelayProof(ctx, b.building.firstDelayedMsg, b.inbox) + if err != nil { + return false, fmt.Errorf("failed to generate delay proof: %w", err) + } + } + + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), prevMessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844, delayProof) if err != nil { return false, err } @@ -1440,7 +1494,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // In theory, this might reduce gas usage, but only by a factor that's already // accounted for in `config.ExtraBatchGas`, as that same factor can appear if a user // posts a new delayed message that we didn't see while gas estimating. - gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList) + gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList, delayProof) if err != nil { return false, err } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 65d8f579fa..a977b9fc08 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -712,13 +712,23 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } -func (p *DataPoster) PostSimpleTransaction(ctx context.Context, nonce uint64, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { - return p.PostTransaction(ctx, time.Now(), nonce, nil, to, calldata, gasLimit, value, nil, nil) +func (p *DataPoster) PostSimpleTransaction(ctx context.Context, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + nonce, _, _, _, err := p.getNextNonceAndMaybeMeta(ctx, 1) + if err != nil { + return nil, err + } + return p.postTransactionWithMutex(ctx, time.Now(), nonce, nil, to, calldata, gasLimit, value, nil, nil) } func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, kzgBlobs []kzg4844.Blob, accessList types.AccessList) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() + return p.postTransactionWithMutex(ctx, dataCreatedAt, nonce, meta, to, calldata, gasLimit, value, kzgBlobs, accessList) +} + +func (p *DataPoster) postTransactionWithMutex(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, kzgBlobs []kzg4844.Blob, accessList types.AccessList) (*types.Transaction, error) { if p.config().DisableNewTx { return nil, fmt.Errorf("posting new transaction is disabled") diff --git a/arbnode/delay_buffer.go b/arbnode/delay_buffer.go new file mode 100644 index 0000000000..3f0514bbe2 --- /dev/null +++ b/arbnode/delay_buffer.go @@ -0,0 +1,87 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// This file contains functions related to the delay buffer feature that are used mostly in the +// batch poster. + +package arbnode + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/offchainlabs/bold/solgen/go/bridgegen" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/util/headerreader" +) + +// DelayBufferConfig originates from the sequencer inbox contract. +type DelayBufferConfig struct { + Enabled bool + Threshold uint64 +} + +// GetBufferConfig gets the delay buffer config from the sequencer inbox contract. +// If the contract doesn't support the delay buffer, it returns a config with Enabled set to false. +func GetDelayBufferConfig(ctx context.Context, sequencerInbox *bridgegen.SequencerInbox) ( + *DelayBufferConfig, error) { + + callOpts := bind.CallOpts{Context: ctx} + enabled, err := sequencerInbox.IsDelayBufferable(&callOpts) + if err != nil { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + return &DelayBufferConfig{Enabled: false}, nil + } + return nil, fmt.Errorf("retrieve SequencerInbox.isDelayBufferable: %w", err) + } + if !enabled { + return &DelayBufferConfig{Enabled: false}, nil + } + bufferData, err := sequencerInbox.Buffer(&callOpts) + if err != nil { + return nil, fmt.Errorf("retrieve SequencerInbox.buffer: %w", err) + } + config := &DelayBufferConfig{ + Enabled: true, + Threshold: bufferData.Threshold, + } + return config, nil +} + +// GenDelayProof generates the delay proof based on batch's first delayed message and the delayed +// accumulater from the inbox. +func GenDelayProof(ctx context.Context, message *arbostypes.MessageWithMetadata, inbox *InboxTracker) ( + *bridgegen.DelayProof, error) { + + if message.DelayedMessagesRead == 0 { + return nil, fmt.Errorf("BUG: trying to generate delay proof without delayed message") + } + seqNum := message.DelayedMessagesRead - 1 + var beforeDelayedAcc common.Hash + if seqNum > 0 { + var err error + beforeDelayedAcc, err = inbox.GetDelayedAcc(seqNum - 1) + if err != nil { + return nil, err + } + } + delayedMessage := bridgegen.MessagesMessage{ + Kind: message.Message.Header.Kind, + Sender: message.Message.Header.Poster, + BlockNumber: message.Message.Header.BlockNumber, + Timestamp: message.Message.Header.Timestamp, + InboxSeqNum: new(big.Int).SetUint64(seqNum), + BaseFeeL1: message.Message.Header.L1BaseFee, + MessageDataHash: crypto.Keccak256Hash(message.Message.L2msg), + } + delayProof := &bridgegen.DelayProof{ + BeforeDelayedAcc: beforeDelayedAcc, + DelayedMessage: delayedMessage, + } + return delayProof, nil +} diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index abd24dbd12..235a747446 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -9,6 +9,7 @@ import ( "fmt" "math/big" "sync" + "time" flag "github.com/spf13/pflag" @@ -30,16 +31,17 @@ type DelayedSequencer struct { reader *InboxReader exec execution.ExecutionSequencer coordinator *SeqCoordinator - waitingForFinalizedBlock uint64 + waitingForFinalizedBlock *uint64 mutex sync.Mutex config DelayedSequencerConfigFetcher } type DelayedSequencerConfig struct { - Enable bool `koanf:"enable" reload:"hot"` - FinalizeDistance int64 `koanf:"finalize-distance" reload:"hot"` - RequireFullFinality bool `koanf:"require-full-finality" reload:"hot"` - UseMergeFinality bool `koanf:"use-merge-finality" reload:"hot"` + Enable bool `koanf:"enable" reload:"hot"` + FinalizeDistance int64 `koanf:"finalize-distance" reload:"hot"` + RequireFullFinality bool `koanf:"require-full-finality" reload:"hot"` + UseMergeFinality bool `koanf:"use-merge-finality" reload:"hot"` + RescanInterval time.Duration `koanf:"rescan-interval" reload:"hot"` } type DelayedSequencerConfigFetcher func() *DelayedSequencerConfig @@ -49,6 +51,7 @@ func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int64(prefix+".finalize-distance", DefaultDelayedSequencerConfig.FinalizeDistance, "how many blocks in the past L1 block is considered final (ignored when using Merge finality)") f.Bool(prefix+".require-full-finality", DefaultDelayedSequencerConfig.RequireFullFinality, "whether to wait for full finality before sequencing delayed messages") f.Bool(prefix+".use-merge-finality", DefaultDelayedSequencerConfig.UseMergeFinality, "whether to use The Merge's notion of finality before sequencing delayed messages") + f.Duration(prefix+".rescan-interval", DefaultDelayedSequencerConfig.RescanInterval, "frequency to rescan for new delayed messages (the parent chain reader's poll-interval config is more important than this)") } var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ @@ -56,6 +59,7 @@ var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ FinalizeDistance: 20, RequireFullFinality: false, UseMergeFinality: true, + RescanInterval: time.Second, } var TestDelayedSequencerConfig = DelayedSequencerConfig{ @@ -63,6 +67,7 @@ var TestDelayedSequencerConfig = DelayedSequencerConfig{ FinalizeDistance: 20, RequireFullFinality: false, UseMergeFinality: false, + RescanInterval: time.Millisecond * 100, } func NewDelayedSequencer(l1Reader *headerreader.HeaderReader, reader *InboxReader, exec execution.ExecutionSequencer, coordinator *SeqCoordinator, config DelayedSequencerConfigFetcher) (*DelayedSequencer, error) { @@ -126,13 +131,12 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock finalized = uint64(currentNum - config.FinalizeDistance) } - if d.waitingForFinalizedBlock > finalized { + if d.waitingForFinalizedBlock != nil && *d.waitingForFinalizedBlock > finalized { return nil } - // Unless we find an unfinalized message (which sets waitingForBlock), - // we won't find a new finalized message until FinalizeDistance blocks in the future. - d.waitingForFinalizedBlock = lastBlockHeader.Number.Uint64() + 1 + // Reset what block we're waiting for if we've caught up + d.waitingForFinalizedBlock = nil dbDelayedCount, err := d.inbox.GetDelayedCount() if err != nil { @@ -153,8 +157,8 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock return err } if parentChainBlockNumber > finalized { - // Message isn't finalized yet; stop here - d.waitingForFinalizedBlock = parentChainBlockNumber + // Message isn't finalized yet; wait for it to be + d.waitingForFinalizedBlock = &parentChainBlockNumber break } if lastDelayedAcc != (common.Hash{}) { @@ -216,20 +220,40 @@ func (d *DelayedSequencer) run(ctx context.Context) { headerChan, cancel := d.l1Reader.Subscribe(false) defer cancel() + latestHeader, err := d.l1Reader.LastHeader(ctx) + if err != nil { + log.Warn("delayed sequencer: failed to get latest header", "err", err) + latestHeader = nil + } + rescanTimer := time.NewTimer(d.config().RescanInterval) for { + if !rescanTimer.Stop() { + select { + case <-rescanTimer.C: + default: + } + } + if latestHeader != nil { + rescanTimer.Reset(d.config().RescanInterval) + } + var ok bool select { - case nextHeader, ok := <-headerChan: + case latestHeader, ok = <-headerChan: if !ok { - log.Info("delayed sequencer: header channel close") + log.Debug("delayed sequencer: header channel close") return } - if err := d.trySequence(ctx, nextHeader); err != nil { - log.Error("Delayed sequencer error", "err", err) + case <-rescanTimer.C: + if latestHeader == nil { + continue } case <-ctx.Done(): - log.Info("delayed sequencer: context done", "err", ctx.Err()) + log.Debug("delayed sequencer: context done", "err", ctx.Err()) return } + if err := d.trySequence(ctx, latestHeader); err != nil { + log.Error("Delayed sequencer error", "err", err) + } } } diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 840a15f328..08f568796d 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -46,7 +46,7 @@ type MessagePrunerConfigFetcher func() *MessagePrunerConfig var DefaultMessagePrunerConfig = MessagePrunerConfig{ Enable: true, PruneInterval: time.Minute, - MinBatchesLeft: 2, + MinBatchesLeft: 1000, } func MessagePrunerConfigAddOptions(prefix string, f *flag.FlagSet) { diff --git a/arbnode/node.go b/arbnode/node.go index 77562817dd..f2e3433ecd 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -42,6 +42,9 @@ import ( "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" + boldstaker "github.com/offchainlabs/nitro/staker/bold" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" + multiprotocolstaker "github.com/offchainlabs/nitro/staker/multi_protocol" "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" @@ -79,22 +82,23 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } type Config struct { - Sequencer bool `koanf:"sequencer"` - ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` - InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` - DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` - BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` - MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` - BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` - Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` - Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` - SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` - DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` - Dangerous DangerousConfig `koanf:"dangerous"` - TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` - Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` - ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` + Sequencer bool `koanf:"sequencer"` + ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` + InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` + DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` + BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` + MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` + BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` + Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` + Staker legacystaker.L1ValidatorConfig `koanf:"staker" reload:"hot"` + Bold boldstaker.BoldConfig `koanf:"bold"` + SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` + Dangerous DangerousConfig `koanf:"dangerous"` + TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` + Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` + ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` // SnapSyncConfig is only used for testing purposes, these should not be configured in production. SnapSyncTest SnapSyncConfig } @@ -153,7 +157,8 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed MessagePrunerConfigAddOptions(prefix+".message-pruner", f) staker.BlockValidatorConfigAddOptions(prefix+".block-validator", f) broadcastclient.FeedConfigAddOptions(prefix+".feed", f, feedInputEnable, feedOutputEnable) - staker.L1ValidatorConfigAddOptions(prefix+".staker", f) + legacystaker.L1ValidatorConfigAddOptions(prefix+".staker", f) + boldstaker.BoldConfigAddOptions(prefix+".bold", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) @@ -171,7 +176,8 @@ var ConfigDefault = Config{ MessagePruner: DefaultMessagePrunerConfig, BlockValidator: staker.DefaultBlockValidatorConfig, Feed: broadcastclient.FeedConfigDefault, - Staker: staker.DefaultL1ValidatorConfig, + Staker: legacystaker.DefaultL1ValidatorConfig, + Bold: boldstaker.DefaultBoldConfig, SeqCoordinator: DefaultSeqCoordinatorConfig, DataAvailability: das.DefaultDataAvailabilityConfig, SyncMonitor: DefaultSyncMonitorConfig, @@ -203,7 +209,7 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.SeqCoordinator.Enable = false config.BlockValidator = staker.TestBlockValidatorConfig config.SyncMonitor = TestSyncMonitorConfig - config.Staker = staker.TestL1ValidatorConfig + config.Staker = legacystaker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} @@ -219,7 +225,7 @@ func ConfigDefaultL2Test() *Config { config.Feed.Output.Signed = false config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true - config.Staker = staker.TestL1ValidatorConfig + config.Staker = legacystaker.TestL1ValidatorConfig config.SyncMonitor = TestSyncMonitorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} @@ -267,7 +273,7 @@ type Node struct { MessagePruner *MessagePruner BlockValidator *staker.BlockValidator StatelessBlockValidator *staker.StatelessBlockValidator - Staker *staker.Staker + Staker *multiprotocolstaker.MultiProtocolStaker BroadcastServer *broadcaster.Broadcaster BroadcastClients *broadcastclients.BroadcastClients SeqCoordinator *SeqCoordinator @@ -633,7 +639,7 @@ func createNodeImpl( } } - var stakerObj *staker.Staker + var stakerObj *multiprotocolstaker.MultiProtocolStaker var messagePruner *MessagePruner var stakerAddr common.Address @@ -653,7 +659,7 @@ func createNodeImpl( getExtraGas := func() uint64 { return configFetcher.Get().Staker.ExtraGas } // TODO: factor this out into separate helper, and split rest of node // creation into multiple helpers. - var wallet staker.ValidatorWalletInterface = validatorwallet.NewNoOp(l1client, deployInfo.Rollup) + var wallet legacystaker.ValidatorWalletInterface = validatorwallet.NewNoOp(l1client, deployInfo.Rollup) if !strings.EqualFold(config.Staker.Strategy, "watchtower") { if config.Staker.UseSmartContractWallet || (txOptsValidator == nil && config.Staker.DataPoster.ExternalSigner.URL == "") { var existingWalletAddress *common.Address @@ -681,13 +687,13 @@ func createNodeImpl( } } - var confirmedNotifiers []staker.LatestConfirmedNotifier + var confirmedNotifiers []legacystaker.LatestConfirmedNotifier if config.MessagePruner.Enable { messagePruner = NewMessagePruner(txStreamer, inboxTracker, func() *MessagePrunerConfig { return &configFetcher.Get().MessagePruner }) confirmedNotifiers = append(confirmedNotifiers, messagePruner) } - stakerObj, err = staker.NewStaker(l1Reader, wallet, bind.CallOpts{}, func() *staker.L1ValidatorConfig { return &configFetcher.Get().Staker }, blockValidator, statelessBlockValidator, nil, confirmedNotifiers, deployInfo.ValidatorUtils, fatalErrChan) + stakerObj, err = multiprotocolstaker.NewMultiProtocolStaker(stack, l1Reader, wallet, bind.CallOpts{}, func() *legacystaker.L1ValidatorConfig { return &configFetcher.Get().Staker }, &configFetcher.Get().Bold, blockValidator, statelessBlockValidator, nil, deployInfo.StakeToken, confirmedNotifiers, deployInfo.ValidatorUtils, deployInfo.Bridge, fatalErrChan) if err != nil { return nil, err } @@ -697,11 +703,6 @@ func createNodeImpl( if dp != nil { stakerAddr = dp.Sender() } - whitelisted, err := stakerObj.IsWhitelisted(ctx) - if err != nil { - return nil, err - } - log.Info("running as validator", "txSender", stakerAddr, "actingAsWallet", wallet.Address(), "whitelisted", whitelisted, "strategy", config.Staker.Strategy) } var batchPoster *BatchPoster diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 5987801d5f..fc2f3c9cf6 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -662,7 +662,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { for msgToRead < readUntil && localMsgCount >= remoteFinalizedMsgCount { var resString string resString, msgReadErr = client.Get(ctx, redisutil.MessageKeyFor(msgToRead)).Result() - if msgReadErr != nil { + if msgReadErr != nil && c.sequencer.Synced() { log.Warn("coordinator failed reading message", "pos", msgToRead, "err", msgReadErr) break } diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 4bb87e614d..ccd780aa11 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -9,6 +9,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" "github.com/offchainlabs/nitro/arbos/util" @@ -185,7 +186,7 @@ func (as *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { if err != nil { return err } - if arbosVersion >= 11 { + if arbosVersion >= params.ArbosVersion_11 { err = as.byAddress.Set(atSize, util.UintToHash(slot)) if err != nil { return err diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index a3d1ae8386..5ee070f942 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -63,9 +63,6 @@ type ArbosState struct { Burner burn.Burner } -const MaxArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes -const MaxDebugArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes - var ErrUninitializedArbOS = errors.New("ArbOS uninitialized") var ErrAlreadyInitialized = errors.New("ArbOS is already initialized") @@ -205,7 +202,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p _ = sto.SetUint64ByUint64(uint64(versionOffset), 1) // initialize to version 1; upgrade at end of this func if needed _ = sto.SetUint64ByUint64(uint64(upgradeVersionOffset), 0) _ = sto.SetUint64ByUint64(uint64(upgradeTimestampOffset), 0) - if desiredArbosVersion >= 2 { + if desiredArbosVersion >= params.ArbosVersion_2 { _ = sto.SetByUint64(uint64(networkFeeAccountOffset), util.AddressToHash(initialChainOwner)) } else { _ = sto.SetByUint64(uint64(networkFeeAccountOffset), common.Hash{}) // the 0 address until an owner sets it @@ -217,7 +214,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p _ = sto.SetUint64ByUint64(uint64(brotliCompressionLevelOffset), 0) // default brotliCompressionLevel for fast compression is 0 initialRewardsRecipient := l1pricing.BatchPosterAddress - if desiredArbosVersion >= 2 { + if desiredArbosVersion >= params.ArbosVersion_2 { initialRewardsRecipient = initialChainOwner } _ = l1pricing.InitializeL1PricingState(sto.OpenCachedSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) @@ -274,29 +271,29 @@ func (state *ArbosState) UpgradeArbosVersion( nextArbosVersion := state.arbosVersion + 1 switch nextArbosVersion { - case 2: + case params.ArbosVersion_2: ensure(state.l1PricingState.SetLastSurplus(common.Big0, 1)) - case 3: + case params.ArbosVersion_3: ensure(state.l1PricingState.SetPerBatchGasCost(0)) ensure(state.l1PricingState.SetAmortizedCostCapBips(math.MaxUint64)) - case 4: + case params.ArbosVersion_4: // no state changes needed - case 5: + case params.ArbosVersion_5: // no state changes needed - case 6: + case params.ArbosVersion_6: // no state changes needed - case 7: + case params.ArbosVersion_7: // no state changes needed - case 8: + case params.ArbosVersion_8: // no state changes needed - case 9: + case params.ArbosVersion_9: // no state changes needed - case 10: + case params.ArbosVersion_10: ensure(state.l1PricingState.SetL1FeesAvailable(stateDB.GetBalance( l1pricing.L1PricerFundsPoolAddress, ).ToBig())) - case 11: + case params.ArbosVersion_11: // Update the PerBatchGasCost to a more accurate value compared to the old v6 default. ensure(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV12)) @@ -316,23 +313,23 @@ func (state *ArbosState) UpgradeArbosVersion( case 12, 13, 14, 15, 16, 17, 18, 19: // these versions are left to Orbit chains for custom upgrades. - case 20: + case params.ArbosVersion_20: // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) case 21, 22, 23, 24, 25, 26, 27, 28, 29: // these versions are left to Orbit chains for custom upgrades. - case 30: + case params.ArbosVersion_30: programs.Initialize(state.backingStorage.OpenSubStorage(programsSubspace)) - case 31: + case params.ArbosVersion_31: params, err := state.Programs().Params() ensure(err) ensure(params.UpgradeToVersion(2)) ensure(params.Save()) - case 32: + case params.ArbosVersion_32: // no change state needed default: @@ -353,8 +350,8 @@ func (state *ArbosState) UpgradeArbosVersion( state.arbosVersion = nextArbosVersion } - if firstTime && upgradeTo >= 6 { - if upgradeTo < 11 { + if firstTime && upgradeTo >= params.ArbosVersion_6 { + if upgradeTo < params.ArbosVersion_11 { state.Restrict(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV6)) } state.Restrict(state.l1PricingState.SetEquilibrationUnits(l1pricing.InitialEquilibrationUnitsV6)) diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index 29cb75b758..8fd417c2b2 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -54,7 +54,7 @@ func MakeGenesisBlock(parentHash common.Hash, blockNumber uint64, timestamp uint } genesisHeaderInfo.UpdateHeaderWithInfo(head) - return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) + return types.NewBlock(head, nil, nil, trie.NewStackTrie(nil)) } func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, initData statetransfer.InitDataReader, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, timestamp uint64, accountsPerSync uint) (root common.Hash, err error) { diff --git a/arbos/block_processor.go b/arbos/block_processor.go index fe0a39d230..77475856ac 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -460,7 +460,7 @@ func ProduceBlockAdvanced( FinalizeBlock(header, complete, statedb, chainConfig) // Touch up the block hashes in receipts - tmpBlock := types.NewBlock(header, complete, nil, receipts, trie.NewStackTrie(nil)) + tmpBlock := types.NewBlock(header, &types.Body{Transactions: complete}, receipts, trie.NewStackTrie(nil)) blockHash := tmpBlock.Hash() for _, receipt := range receipts { @@ -470,7 +470,7 @@ func ProduceBlockAdvanced( } } - block := types.NewBlock(header, complete, nil, receipts, trie.NewStackTrie(nil)) + block := types.NewBlock(header, &types.Body{Transactions: complete}, receipts, trie.NewStackTrie(nil)) if len(block.Transactions()) != len(receipts) { return nil, nil, fmt.Errorf("block has %d txes but %d receipts", len(block.Transactions()), len(receipts)) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index ff29bbca9a..df5078fd2c 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -8,6 +8,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" ) @@ -56,7 +57,7 @@ func (bh *Blockhashes) RecordNewL1Block(number uint64, blockHash common.Hash, ar // fill in hashes for any "skipped over" blocks nextNumber++ var nextNumBuf [8]byte - if arbosVersion >= 8 { + if arbosVersion >= params.ArbosVersion_8 { binary.LittleEndian.PutUint64(nextNumBuf[:], nextNumber) } diff --git a/arbos/blockhash/blockhash_test.go b/arbos/blockhash/blockhash_test.go index c7cc04d966..8dec2181a3 100644 --- a/arbos/blockhash/blockhash_test.go +++ b/arbos/blockhash/blockhash_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/storage" @@ -15,7 +16,7 @@ import ( ) func TestBlockhash(t *testing.T) { - arbosVersion := uint64(8) + arbosVersion := params.ArbosVersion_8 sto := storage.NewMemoryBacked(burn.NewSystemBurner(nil, false)) InitializeBlockhashes(sto) diff --git a/arbos/engine.go b/arbos/engine.go index a4aa9c46a9..a812e5486b 100644 --- a/arbos/engine.go +++ b/arbos/engine.go @@ -56,7 +56,7 @@ func (e Engine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *t e.Finalize(chain, header, state, body) - block := types.NewBlock(header, body.Transactions, nil, receipts, trie.NewStackTrie(nil)) + block := types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)) return block, nil } diff --git a/arbos/internal_tx.go b/arbos/internal_tx.go index 64dede6290..0ecdfe74cf 100644 --- a/arbos/internal_tx.go +++ b/arbos/internal_tx.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/util" @@ -56,11 +57,11 @@ func ApplyInternalTxUpdate(tx *types.ArbitrumInternalTx, state *arbosState.Arbos l1BlockNumber := util.SafeMapGet[uint64](inputs, "l1BlockNumber") timePassed := util.SafeMapGet[uint64](inputs, "timePassed") - if state.ArbOSVersion() < 3 { + if state.ArbOSVersion() < params.ArbosVersion_3 { // (incorrectly) use the L2 block number instead timePassed = util.SafeMapGet[uint64](inputs, "l2BlockNumber") } - if state.ArbOSVersion() < 8 { + if state.ArbOSVersion() < params.ArbosVersion_8 { // in old versions we incorrectly used an L1 block number one too high l1BlockNumber++ } diff --git a/arbos/l1pricing/l1PricingOldVersions.go b/arbos/l1pricing/l1PricingOldVersions.go index 1377351af3..e4cbf5e1b3 100644 --- a/arbos/l1pricing/l1PricingOldVersions.go +++ b/arbos/l1pricing/l1PricingOldVersions.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" am "github.com/offchainlabs/nitro/util/arbmath" @@ -24,7 +25,7 @@ func (ps *L1PricingState) _preversion10_UpdateForBatchPosterSpending( l1Basefee *big.Int, scenario util.TracingScenario, ) error { - if arbosVersion < 2 { + if arbosVersion < params.ArbosVersion_2 { return ps._preVersion2_UpdateForBatchPosterSpending(statedb, evm, updateTime, currentTime, batchPoster, weiSpent, scenario) } @@ -69,7 +70,7 @@ func (ps *L1PricingState) _preversion10_UpdateForBatchPosterSpending( } // impose cap on amortized cost, if there is one - if arbosVersion >= 3 { + if arbosVersion >= params.ArbosVersion_3 { amortizedCostCapBips, err := ps.AmortizedCostCapBips() if err != nil { return err diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 37dae08c33..195df3708c 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "math/big" - "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" @@ -216,7 +215,7 @@ func (ps *L1PricingState) LastSurplus() (*big.Int, error) { } func (ps *L1PricingState) SetLastSurplus(val *big.Int, arbosVersion uint64) error { - if arbosVersion < 7 { + if arbosVersion < params.ArbosVersion_7 { return ps.lastSurplus.Set_preVersion7(val) } return ps.lastSurplus.SetSaturatingWithWarning(val, "L1 pricer last surplus") @@ -309,7 +308,7 @@ func (ps *L1PricingState) UpdateForBatchPosterSpending( l1Basefee *big.Int, scenario util.TracingScenario, ) error { - if arbosVersion < 10 { + if arbosVersion < params.ArbosVersion_10 { return ps._preversion10_UpdateForBatchPosterSpending(statedb, evm, arbosVersion, updateTime, currentTime, batchPoster, weiSpent, l1Basefee, scenario) } @@ -359,7 +358,7 @@ func (ps *L1PricingState) UpdateForBatchPosterSpending( } // impose cap on amortized cost, if there is one - if arbosVersion >= 3 { + if arbosVersion >= params.ArbosVersion_3 { amortizedCostCapBips, err := ps.AmortizedCostCapBips() if err != nil { return err @@ -520,10 +519,13 @@ func (ps *L1PricingState) GetPosterInfo(tx *types.Transaction, poster common.Add if poster != BatchPosterAddress { return common.Big0, 0 } - units := atomic.LoadUint64(&tx.CalldataUnits) - if units == 0 { + var units uint64 + if cachedUnits := tx.GetCachedCalldataUnits(brotliCompressionLevel); cachedUnits != nil { + units = *cachedUnits + } else { + // The cache is empty or invalid, so we need to compute the calldata units units = ps.getPosterUnitsWithoutCache(tx, poster, brotliCompressionLevel) - atomic.StoreUint64(&tx.CalldataUnits, units) + tx.SetCachedCalldataUnits(brotliCompressionLevel, units) } // Approximate the l1 fee charged for posting this tx's calldata diff --git a/arbos/programs/native.go b/arbos/programs/native.go index f162704995..a996d50d8a 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -109,7 +109,7 @@ func activateProgramInternal( (*u64)(gasLeft), )) - module, msg, err := status_mod.toResult(output.intoBytes(), debug) + module, msg, err := status_mod.toResult(rustBytesIntoBytes(output), debug) if err != nil { if debug { log.Warn("activation failed", "err", err, "msg", msg, "program", addressForLogging) @@ -119,7 +119,7 @@ func activateProgramInternal( } return nil, nil, err } - hash := moduleHash.toHash() + hash := bytes32ToHash(moduleHash) targets := db.Database().WasmTargets() type result struct { target ethdb.WasmTarget @@ -141,7 +141,7 @@ func activateProgramInternal( goSlice([]byte(target)), output, ) - asm := output.intoBytes() + asm := rustBytesIntoBytes(output) if status_asm != 0 { results <- result{target, nil, fmt.Errorf("%w: %s", ErrProgramActivation, string(asm))} return @@ -279,7 +279,7 @@ func callProgram( )) depth := interpreter.Depth() - data, msg, err := status.toResult(output.intoBytes(), debug) + data, msg, err := status.toResult(rustBytesIntoBytes(output), debug) if status == userFailure && debug { log.Warn("program failure", "err", err, "msg", msg, "program", address, "depth", depth) } @@ -292,7 +292,7 @@ func callProgram( //export handleReqImpl func handleReqImpl(apiId usize, req_type u32, data *rustSlice, costPtr *u64, out_response *C.GoSliceData, out_raw_data *C.GoSliceData) { api := getApi(apiId) - reqData := data.read() + reqData := readRustSlice(data) reqType := RequestType(req_type - EvmApiMethodReqOffset) response, raw_data, cost := api.handler(reqType, reqData) *costPtr = u64(cost) @@ -418,14 +418,14 @@ func SetTarget(name ethdb.WasmTarget, description string, native bool) error { cbool(native), )) if status != userSuccess { - msg := arbutil.ToStringOrHex(output.intoBytes()) + msg := arbutil.ToStringOrHex(rustBytesIntoBytes(output)) log.Error("failed to set stylus compilation target", "status", status, "msg", msg) return fmt.Errorf("failed to set stylus compilation target, status %v: %v", status, msg) } return nil } -func (value bytes32) toHash() common.Hash { +func bytes32ToHash(value *bytes32) common.Hash { hash := common.Hash{} for index, b := range value.bytes { hash[index] = byte(b) @@ -449,22 +449,28 @@ func addressToBytes20(addr common.Address) bytes20 { return value } -func (slice *rustSlice) read() []byte { +func readRustSlice(slice *rustSlice) []byte { + if slice.len == 0 { + return nil + } return arbutil.PointerToSlice((*byte)(slice.ptr), int(slice.len)) } -func (vec *rustBytes) read() []byte { +func readRustBytes(vec *rustBytes) []byte { + if vec.len == 0 { + return nil + } return arbutil.PointerToSlice((*byte)(vec.ptr), int(vec.len)) } -func (vec *rustBytes) intoBytes() []byte { - slice := vec.read() - vec.drop() +func rustBytesIntoBytes(vec *rustBytes) []byte { + slice := readRustBytes(vec) + dropRustBytes(vec) return slice } -func (vec *rustBytes) drop() { - C.stylus_drop_vec(*vec) +func dropRustBytes(vec *rustBytes) { + C.free_rust_bytes(*vec) } func goSlice(slice []byte) C.GoSliceData { diff --git a/arbos/programs/testcompile.go b/arbos/programs/testcompile.go index 8a4e38444a..58afa228d5 100644 --- a/arbos/programs/testcompile.go +++ b/arbos/programs/testcompile.go @@ -35,10 +35,10 @@ func Wat2Wasm(wat []byte) ([]byte, error) { status := C.wat_to_wasm(goSlice(wat), output) if status != 0 { - return nil, fmt.Errorf("failed reading wat file: %v", string(output.intoBytes())) + return nil, fmt.Errorf("failed reading wat file: %v", string(rustBytesIntoBytes(output))) } - return output.intoBytes(), nil + return rustBytesIntoBytes(output), nil } func testCompileArch(store bool) error { @@ -66,7 +66,7 @@ func testCompileArch(store bool) error { cbool(nativeArm64)) if status != 0 { - return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target arm: %v", string(rustBytesIntoBytes(output))) } status = C.stylus_target_set(goSlice(amd64CompileName), @@ -75,7 +75,7 @@ func testCompileArch(store bool) error { cbool(nativeAmd64)) if status != 0 { - return fmt.Errorf("failed setting compilation target amd: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target amd: %v", string(rustBytesIntoBytes(output))) } source, err := os.ReadFile("../../arbitrator/stylus/tests/add.wat") @@ -107,7 +107,7 @@ func testCompileArch(store bool) error { output, ) if status == 0 { - return fmt.Errorf("succeeded compiling non-existent arch: %v", string(output.intoBytes())) + return fmt.Errorf("succeeded compiling non-existent arch: %v", string(rustBytesIntoBytes(output))) } status = C.stylus_compile( @@ -118,7 +118,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling native: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling native: %v", string(rustBytesIntoBytes(output))) } if store && !nativeAmd64 && !nativeArm64 { _, err := fmt.Printf("writing host file\n") @@ -126,7 +126,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/host.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/host.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -140,7 +140,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling arm: %v", string(rustBytesIntoBytes(output))) } if store { _, err := fmt.Printf("writing arm file\n") @@ -148,7 +148,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/arm64.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/arm64.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -162,7 +162,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling amd: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling amd: %v", string(rustBytesIntoBytes(output))) } if store { _, err := fmt.Printf("writing amd64 file\n") @@ -170,7 +170,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/amd64.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/amd64.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -195,7 +195,7 @@ func resetNativeTarget() error { cbool(true)) if status != 0 { - return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target arm: %v", string(rustBytesIntoBytes(output))) } return nil @@ -260,7 +260,7 @@ func testCompileLoad() error { return err } - _, msg, err := status.toResult(output.intoBytes(), true) + _, msg, err := status.toResult(rustBytesIntoBytes(output), true) if status == userFailure { err = fmt.Errorf("%w: %v", err, msg) } diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index aec08b15b5..7cebd8da37 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -307,7 +307,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r // pay for the retryable's gas and update the pools gascost := arbmath.BigMulByUint(effectiveBaseFee, usergas) networkCost := gascost - if p.state.ArbOSVersion() >= 11 { + if p.state.ArbOSVersion() >= params.ArbosVersion_11 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -576,7 +576,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { takeFunds(maxRefund, arbmath.BigMulByUint(effectiveBaseFee, gasUsed)) // Refund any unused gas, without overdrafting the L1 deposit. networkRefund := gasRefund - if p.state.ArbOSVersion() >= 11 { + if p.state.ArbOSVersion() >= params.ArbosVersion_11 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -629,7 +629,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { } purpose := "feeCollection" - if p.state.ArbOSVersion() > 4 { + if p.state.ArbOSVersion() > params.ArbosVersion_4 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -646,11 +646,11 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { util.MintBalance(&networkFeeAccount, computeCost, p.evm, scenario, purpose) } posterFeeDestination := l1pricing.L1PricerFundsPoolAddress - if p.state.ArbOSVersion() < 2 { + if p.state.ArbOSVersion() < params.ArbosVersion_2 { posterFeeDestination = p.evm.Context.Coinbase } util.MintBalance(&posterFeeDestination, p.PosterFee, p.evm, scenario, purpose) - if p.state.ArbOSVersion() >= 10 { + if p.state.ArbOSVersion() >= params.ArbosVersion_10 { if _, err := p.state.L1PricingState().AddToL1FeesAvailable(p.PosterFee); err != nil { log.Error("failed to update L1FeesAvailable: ", "err", err) } @@ -748,13 +748,13 @@ func (p *TxProcessor) L1BlockHash(blockCtx vm.BlockContext, l1BlockNumber uint64 func (p *TxProcessor) DropTip() bool { version := p.state.ArbOSVersion() - return version != 9 || p.delayedInbox + return version != params.ArbosVersion_9 || p.delayedInbox } func (p *TxProcessor) GetPaidGasPrice() *big.Int { gasPrice := p.evm.GasPrice version := p.state.ArbOSVersion() - if version != 9 { + if version != params.ArbosVersion_9 { // p.evm.Context.BaseFee is already lowered to 0 when vm runs with NoBaseFee flag and 0 gas price gasPrice = p.evm.Context.BaseFee } @@ -762,7 +762,7 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { } func (p *TxProcessor) GasPriceOp(evm *vm.EVM) *big.Int { - if p.state.ArbOSVersion() >= 3 { + if p.state.ArbOSVersion() >= params.ArbosVersion_3 { return p.GetPaidGasPrice() } return evm.GasPrice diff --git a/arbos/util/transfer.go b/arbos/util/transfer.go index 37437e01f6..0b61868abe 100644 --- a/arbos/util/transfer.go +++ b/arbos/util/transfer.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -66,11 +67,10 @@ func TransferBalance( if arbmath.BigLessThan(balance.ToBig(), amount) { return fmt.Errorf("%w: addr %v have %v want %v", vm.ErrInsufficientBalance, *from, balance, amount) } - evm.StateDB.SubBalance(*from, uint256.MustFromBig(amount), tracing.BalanceChangeTransfer) - if evm.Context.ArbOSVersion >= 30 { - // ensure the from account is "touched" for EIP-161 - evm.StateDB.AddBalance(*from, &uint256.Int{}, tracing.BalanceChangeTransfer) + if evm.Context.ArbOSVersion < params.ArbosVersion_30 && amount.Sign() == 0 { + evm.StateDB.CreateZombieIfDeleted(*from) } + evm.StateDB.SubBalance(*from, uint256.MustFromBig(amount), tracing.BalanceChangeTransfer) } if to != nil { evm.StateDB.AddBalance(*to, uint256.MustFromBig(amount), tracing.BalanceChangeTransfer) diff --git a/bold b/bold new file mode 160000 index 0000000000..d0a87de774 --- /dev/null +++ b/bold @@ -0,0 +1 @@ +Subproject commit d0a87de774aecfa97161efd1b0a924d4d5fbcf74 diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index f862c6dfbf..d0da391cf8 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -44,6 +44,7 @@ "sequencer-inbox": "0x1c479675ad559dc151f6ec7ed3fbf8cee79582b6", "validator-utils": "0x9e40625f52829cf04bc4839f186d621ee33b0e67", "validator-wallet-creator": "0x960953f7c69cd2bc2322db9223a815c680ccc7ea", + "stake-token": "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1", "deployed-at": 15411056 } }, @@ -90,6 +91,7 @@ "sequencer-inbox": "0x211e1c4c7f1bf5351ac850ed10fd68cffcf6c21b", "validator-utils": "0x2B081fbaB646D9013f2699BebEf62B7e7d7F0976", "validator-wallet-creator": "0xe05465Aab36ba1277dAE36aa27a7B74830e74DE4", + "stake-token": "0x765277eebeca2e31912c9946eae1021199b39c61", "deployed-at": 15016829 } }, @@ -245,6 +247,7 @@ "rollup": "0xd80810638dbDF9081b72C1B33c65375e807281C8", "validator-utils": "0x1f6860C3cac255fFFa72B7410b1183c3a0D261e0", "validator-wallet-creator": "0x894fC71fA0A666352824EC954B401573C861D664", + "stake-token": "0xefb383126640fe4a760010c6e59c397d2b6c7141", "deployed-at": 4139226 } }, diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index aa40d6514f..35f28bebb9 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -120,5 +120,6 @@ type RollupAddresses struct { UpgradeExecutor common.Address `json:"upgrade-executor"` ValidatorUtils common.Address `json:"validator-utils"` ValidatorWalletCreator common.Address `json:"validator-wallet-creator"` + StakeToken common.Address `json:"stake-token"` DeployedAt uint64 `json:"deployed-at"` } diff --git a/cmd/conf/database.go b/cmd/conf/database.go index 8857b615f3..8d05c44500 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -112,16 +112,19 @@ func (c *PersistentConfig) Validate() error { } type PebbleConfig struct { + SyncMode bool `koanf:"sync-mode"` MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` Experimental PebbleExperimentalConfig `koanf:"experimental"` } var PebbleConfigDefault = PebbleConfig{ + SyncMode: false, // use NO-SYNC mode, see: https://github.com/ethereum/go-ethereum/issues/29819 MaxConcurrentCompactions: runtime.NumCPU(), Experimental: PebbleExperimentalConfigDefault, } func PebbleConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *PebbleConfig) { + f.Bool(prefix+".sync-mode", defaultConfig.SyncMode, "if true sync mode is used (data needs to be written to WAL before the write is marked as completed)") f.Int(prefix+".max-concurrent-compactions", defaultConfig.MaxConcurrentCompactions, "maximum number of concurrent compactions") PebbleExperimentalConfigAddOptions(prefix+".experimental", f, &defaultConfig.Experimental) } @@ -180,7 +183,7 @@ var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ BlockSize: 4 << 10, // 4 KB IndexBlockSize: 4 << 10, // 4 KB TargetFileSize: 2 << 20, // 2 MB - TargetFileSizeEqualLevels: true, + TargetFileSizeEqualLevels: false, L0CompactionConcurrency: 10, CompactionDebtConcurrency: 1 << 30, // 1GB @@ -251,6 +254,7 @@ func (c *PebbleConfig) ExtraOptions(namespace string) *pebble.ExtraOptions { walDir = path.Join(walDir, namespace) } return &pebble.ExtraOptions{ + SyncMode: c.SyncMode, BytesPerSync: c.Experimental.BytesPerSync, L0CompactionFileThreshold: c.Experimental.L0CompactionFileThreshold, L0CompactionThreshold: c.Experimental.L0CompactionThreshold, diff --git a/cmd/conf/init.go b/cmd/conf/init.go index cd2b6c8805..74bd89fd16 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -27,6 +27,7 @@ type InitConfig struct { ImportWasm bool `koanf:"import-wasm"` AccountsPerSync uint `koanf:"accounts-per-sync"` ImportFile string `koanf:"import-file"` + GenesisJsonFile string `koanf:"genesis-json-file"` ThenQuit bool `koanf:"then-quit"` Prune string `koanf:"prune"` PruneBloomSize uint64 `koanf:"prune-bloom-size"` @@ -54,6 +55,7 @@ var InitConfigDefault = InitConfig{ Empty: false, ImportWasm: false, ImportFile: "", + GenesisJsonFile: "", AccountsPerSync: 100000, ThenQuit: false, Prune: "", @@ -83,6 +85,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".import-wasm", InitConfigDefault.ImportWasm, "if set, import the wasm directory when downloading a database (contains executable code - only use with highly trusted source)") f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done") f.String(prefix+".import-file", InitConfigDefault.ImportFile, "path for json data to import") + f.String(prefix+".genesis-json-file", InitConfigDefault.GenesisJsonFile, "path for genesis json file") f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index eb6d7df6fc..93c51a0040 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -335,12 +335,12 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo } // Make sure we don't allow accidentally downgrading ArbOS if chainConfig.DebugMode() { - if currentArbosState.ArbOSVersion() > arbosState.MaxDebugArbosVersionSupported { - return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", arbosState.MaxDebugArbosVersionSupported, currentArbosState.ArbOSVersion()) + if currentArbosState.ArbOSVersion() > params.MaxDebugArbosVersionSupported { + return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", params.MaxDebugArbosVersionSupported, currentArbosState.ArbOSVersion()) } } else { - if currentArbosState.ArbOSVersion() > arbosState.MaxArbosVersionSupported { - return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", arbosState.MaxArbosVersionSupported, currentArbosState.ArbOSVersion()) + if currentArbosState.ArbOSVersion() > params.MaxArbosVersionSupported { + return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", params.MaxArbosVersionSupported, currentArbosState.ArbOSVersion()) } } @@ -689,6 +689,36 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var chainConfig *params.ChainConfig + if config.Init.GenesisJsonFile != "" { + if initDataReader != nil { + return chainDb, nil, errors.New("multiple init methods supplied") + } + genesisJson, err := os.ReadFile(config.Init.GenesisJsonFile) + if err != nil { + return chainDb, nil, err + } + var gen core.Genesis + if err := json.Unmarshal(genesisJson, &gen); err != nil { + return chainDb, nil, err + } + var accounts []statetransfer.AccountInitializationInfo + for address, account := range gen.Alloc { + accounts = append(accounts, statetransfer.AccountInitializationInfo{ + Addr: address, + EthBalance: account.Balance, + Nonce: account.Nonce, + ContractInfo: &statetransfer.AccountInitContractInfo{ + Code: account.Code, + ContractStorage: account.Storage, + }, + }) + } + initDataReader = statetransfer.NewMemoryInitDataReader(&statetransfer.ArbosInitializationInfo{ + Accounts: accounts, + }) + chainConfig = gen.Config + } + var l2BlockChain *core.BlockChain txIndexWg := sync.WaitGroup{} if initDataReader == nil { @@ -714,9 +744,11 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, err } - chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, config.Chain.InfoFiles, config.Chain.InfoJson) - if err != nil { - return chainDb, nil, err + if chainConfig == nil { + chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, config.Chain.InfoFiles, config.Chain.InfoJson) + if err != nil { + return chainDb, nil, err + } } if config.Init.DevInit && config.Init.DevMaxCodeSize != 0 { chainConfig.ArbitrumChainParams.MaxCodeSize = config.Init.DevMaxCodeSize diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index f3cf99cc50..e7afa6389e 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -59,7 +59,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/staker" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/dbutil" @@ -257,7 +257,7 @@ func mainImpl() int { defaultL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) nodeConfig.Node.Staker.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - defaultValidatorL1WalletConfig := staker.DefaultValidatorL1WalletConfig + defaultValidatorL1WalletConfig := legacystaker.DefaultValidatorL1WalletConfig defaultValidatorL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) nodeConfig.Node.BatchPoster.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) @@ -290,11 +290,11 @@ func mainImpl() int { flag.Usage() log.Crit("validator must have the parent chain reader enabled") } - strategy, err := nodeConfig.Node.Staker.ParseStrategy() + strategy, err := legacystaker.ParseStrategy(nodeConfig.Node.Staker.Strategy) if err != nil { log.Crit("couldn't parse staker strategy", "err", err) } - if strategy != staker.WatchtowerStrategy && !nodeConfig.Node.Staker.Dangerous.WithoutBlockValidator { + if strategy != legacystaker.WatchtowerStrategy && !nodeConfig.Node.Staker.Dangerous.WithoutBlockValidator { nodeConfig.Node.BlockValidator.Enable = true } } @@ -616,6 +616,46 @@ func mainImpl() int { } } } + + // Before starting the node, wait until the transaction that deployed rollup is finalized + if nodeConfig.EnsureRollupDeployment && + nodeConfig.Node.ParentChainReader.Enable && + rollupAddrs.DeployedAt > 0 { + currentFinalized, err := l1Reader.LatestFinalizedBlockNr(ctx) + if err != nil && errors.Is(err, headerreader.ErrBlockNumberNotSupported) { + log.Info("Finality not supported by parent chain, disabling the check to verify if rollup deployment tx was finalized", "err", err) + } else { + newHeaders, unsubscribe := l1Reader.Subscribe(false) + retriesOnError := 10 + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, os.Interrupt, syscall.SIGTERM) + for currentFinalized < rollupAddrs.DeployedAt && retriesOnError > 0 { + select { + case <-newHeaders: + if finalized, err := l1Reader.LatestFinalizedBlockNr(ctx); err != nil { + if errors.Is(err, headerreader.ErrBlockNumberNotSupported) { + log.Error("Finality support was removed from parent chain mid way, disabling the check to verify if the rollup deployment tx was finalized", "err", err) + retriesOnError = 0 // Break out of for loop as well + break + } + log.Error("Error getting latestFinalizedBlockNr from l1Reader", "err", err) + retriesOnError-- + } else { + currentFinalized = finalized + log.Debug("Finalized block number updated", "finalized", finalized) + } + case <-ctx.Done(): + log.Error("Context done while checking if the rollup deployment tx was finalized") + return 1 + case <-sigint: + log.Info("shutting down because of sigint") + return 0 + } + } + unsubscribe() + } + } + gqlConf := nodeConfig.GraphQL if gqlConf.Enable { if err := graphql.New(stack, execNode.Backend.APIBackend(), execNode.FilterSystem, gqlConf.CORSDomain, gqlConf.VHosts); err != nil { @@ -687,53 +727,55 @@ func mainImpl() int { } type NodeConfig struct { - Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` - Node arbnode.Config `koanf:"node" reload:"hot"` - Execution gethexec.Config `koanf:"execution" reload:"hot"` - Validation valnode.Config `koanf:"validation" reload:"hot"` - ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` - Chain conf.L2Config `koanf:"chain"` - LogLevel string `koanf:"log-level" reload:"hot"` - LogType string `koanf:"log-type" reload:"hot"` - FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` - Persistent conf.PersistentConfig `koanf:"persistent"` - HTTP genericconf.HTTPConfig `koanf:"http"` - WS genericconf.WSConfig `koanf:"ws"` - IPC genericconf.IPCConfig `koanf:"ipc"` - Auth genericconf.AuthRPCConfig `koanf:"auth"` - GraphQL genericconf.GraphQLConfig `koanf:"graphql"` - Metrics bool `koanf:"metrics"` - MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` - PProf bool `koanf:"pprof"` - PprofCfg genericconf.PProf `koanf:"pprof-cfg"` - Init conf.InitConfig `koanf:"init"` - Rpc genericconf.RpcConfig `koanf:"rpc"` - BlocksReExecutor blocksreexecutor.Config `koanf:"blocks-reexecutor"` + Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` + Node arbnode.Config `koanf:"node" reload:"hot"` + Execution gethexec.Config `koanf:"execution" reload:"hot"` + Validation valnode.Config `koanf:"validation" reload:"hot"` + ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` + Chain conf.L2Config `koanf:"chain"` + LogLevel string `koanf:"log-level" reload:"hot"` + LogType string `koanf:"log-type" reload:"hot"` + FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` + Persistent conf.PersistentConfig `koanf:"persistent"` + HTTP genericconf.HTTPConfig `koanf:"http"` + WS genericconf.WSConfig `koanf:"ws"` + IPC genericconf.IPCConfig `koanf:"ipc"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` + GraphQL genericconf.GraphQLConfig `koanf:"graphql"` + Metrics bool `koanf:"metrics"` + MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` + PProf bool `koanf:"pprof"` + PprofCfg genericconf.PProf `koanf:"pprof-cfg"` + Init conf.InitConfig `koanf:"init"` + Rpc genericconf.RpcConfig `koanf:"rpc"` + BlocksReExecutor blocksreexecutor.Config `koanf:"blocks-reexecutor"` + EnsureRollupDeployment bool `koanf:"ensure-rollup-deployment" reload:"hot"` } var NodeConfigDefault = NodeConfig{ - Conf: genericconf.ConfConfigDefault, - Node: arbnode.ConfigDefault, - Execution: gethexec.ConfigDefault, - Validation: valnode.DefaultValidationConfig, - ParentChain: conf.L1ConfigDefault, - Chain: conf.L2ConfigDefault, - LogLevel: "INFO", - LogType: "plaintext", - FileLogging: genericconf.DefaultFileLoggingConfig, - Persistent: conf.PersistentConfigDefault, - HTTP: genericconf.HTTPConfigDefault, - WS: genericconf.WSConfigDefault, - IPC: genericconf.IPCConfigDefault, - Auth: genericconf.AuthRPCConfigDefault, - GraphQL: genericconf.GraphQLConfigDefault, - Metrics: false, - MetricsServer: genericconf.MetricsServerConfigDefault, - Init: conf.InitConfigDefault, - Rpc: genericconf.DefaultRpcConfig, - PProf: false, - PprofCfg: genericconf.PProfDefault, - BlocksReExecutor: blocksreexecutor.DefaultConfig, + Conf: genericconf.ConfConfigDefault, + Node: arbnode.ConfigDefault, + Execution: gethexec.ConfigDefault, + Validation: valnode.DefaultValidationConfig, + ParentChain: conf.L1ConfigDefault, + Chain: conf.L2ConfigDefault, + LogLevel: "INFO", + LogType: "plaintext", + FileLogging: genericconf.DefaultFileLoggingConfig, + Persistent: conf.PersistentConfigDefault, + HTTP: genericconf.HTTPConfigDefault, + WS: genericconf.WSConfigDefault, + IPC: genericconf.IPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, + GraphQL: genericconf.GraphQLConfigDefault, + Metrics: false, + MetricsServer: genericconf.MetricsServerConfigDefault, + Init: conf.InitConfigDefault, + Rpc: genericconf.DefaultRpcConfig, + PProf: false, + PprofCfg: genericconf.PProfDefault, + BlocksReExecutor: blocksreexecutor.DefaultConfig, + EnsureRollupDeployment: true, } func NodeConfigAddOptions(f *flag.FlagSet) { @@ -760,6 +802,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { conf.InitConfigAddOptions("init", f) genericconf.RpcConfigAddOptions("rpc", f) blocksreexecutor.ConfigAddOptions("blocks-reexecutor", f) + f.Bool("ensure-rollup-deployment", NodeConfigDefault.EnsureRollupDeployment, "before starting the node, wait until the transaction that deployed rollup is finalized") } func (c *NodeConfig) ResolveDirectoryNames() error { diff --git a/contracts b/contracts index bec7d629c5..763bd77906 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit bec7d629c5f4a9dc4ec786e9d6e99734a11d109b +Subproject commit 763bd77906b7677da691eaa31c6e195d455197a4 diff --git a/das/aggregator.go b/das/aggregator.go index 372e448e76..85fccb078f 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -254,7 +254,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64) var sigs []blsSignatures.Signature var aggSignersMask uint64 var successfullyStoredCount int - var returned bool + var returned int // 0-no status, 1-succeeded, 2-failed for i := 0; i < len(a.services); i++ { select { case <-ctx.Done(): @@ -276,26 +276,26 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64) // certDetailsChan, so the Store function can return, but also continue // running until all responses are received (or the context is canceled) // in order to produce accurate logs/metrics. - if !returned { + if returned == 0 { if successfullyStoredCount >= a.requiredServicesForStore { cd := certDetails{} cd.pubKeys = append(cd.pubKeys, pubKeys...) cd.sigs = append(cd.sigs, sigs...) cd.aggSignersMask = aggSignersMask certDetailsChan <- cd - returned = true - if a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet - int(storeFailures.Load())+1 > a.maxAllowedServiceStoreFailures { - log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"") - } + returned = 1 } else if int(storeFailures.Load()) > a.maxAllowedServiceStoreFailures { cd := certDetails{} cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, daprovider.ErrBatchToDasFailed) certDetailsChan <- cd - returned = true + returned = 2 } } - + } + if returned == 1 && + a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet + int(storeFailures.Load())+1 > a.maxAllowedServiceStoreFailures { + log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"") } }() diff --git a/das/reader_aggregator_strategies.go b/das/reader_aggregator_strategies.go index 8e10d52c16..e072fdd85c 100644 --- a/das/reader_aggregator_strategies.go +++ b/das/reader_aggregator_strategies.go @@ -5,6 +5,7 @@ package das import ( "errors" + "maps" "math/rand" "sort" "sync" @@ -33,10 +34,7 @@ func (s *abstractAggregatorStrategy) update(readers []daprovider.DASReader, stat s.readers = make([]daprovider.DASReader, len(readers)) copy(s.readers, readers) - s.stats = make(map[daprovider.DASReader]readerStats) - for k, v := range stats { - s.stats[k] = v - } + s.stats = maps.Clone(stats) } // Exponentially growing Explore Exploit Strategy diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 69535e82be..ffc6ceee9f 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -789,7 +789,8 @@ func (s *ExecutionEngine) cacheL1PriceDataOfMsg(seqNum arbutil.MessageIndex, rec gasUsedForL1 += receipts[i].GasUsedForL1 } for _, tx := range block.Transactions() { - callDataUnits += tx.CalldataUnits + _, cachedUnits := tx.GetRawCachedCalldataUnits() + callDataUnits += cachedUnits } } l1GasCharged := gasUsedForL1 * block.BaseFee().Uint64() diff --git a/execution/gethexec/express_lane_service.go b/execution/gethexec/express_lane_service.go index c981160814..6dbcc4e9af 100644 --- a/execution/gethexec/express_lane_service.go +++ b/execution/gethexec/express_lane_service.go @@ -363,8 +363,12 @@ func (es *expressLaneService) sequenceExpressLaneSubmission( nextMsg.Transaction, msg.Options, ); err != nil { + txHash := common.Hash{} + if nextMsg.Transaction != nil { + txHash = nextMsg.Transaction.Hash() + } // If the tx fails we return an error with all the necessary info for the controller to successfully try again - return fmt.Errorf("express lane transaction of sequence number: %d and transaction hash: %v, failed with an error: %w", nextMsg.SequenceNumber, nextMsg.Transaction.Hash(), err) + return fmt.Errorf("express lane transaction of sequence number: %d and transaction hash: %v, failed with an error: %w", nextMsg.SequenceNumber, txHash, err) } // Increase the global round sequence number. control.sequence += 1 diff --git a/execution/gethexec/express_lane_service_test.go b/execution/gethexec/express_lane_service_test.go index 736fff53eb..6ec62b937b 100644 --- a/execution/gethexec/express_lane_service_test.go +++ b/execution/gethexec/express_lane_service_test.go @@ -298,7 +298,7 @@ func makeStubPublisher(els *expressLaneService) *stubPublisher { } func (s *stubPublisher) PublishTimeboostedTransaction(parentCtx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { - if tx.CalldataUnits != 0 { + if tx == nil { return errors.New("oops, bad tx") } control, _ := s.els.roundControl.Get(0) @@ -425,16 +425,15 @@ func Test_expressLaneService_sequenceExpressLaneSubmission_erroredTx(t *testing. }, { SequenceNumber: 2, - Transaction: types.NewTx(&types.DynamicFeeTx{}), + Transaction: nil, }, { SequenceNumber: 2, Transaction: &types.Transaction{}, }, } - messages[2].Transaction.CalldataUnits = 1 for _, msg := range messages { - if msg.Transaction.CalldataUnits != 0 { + if msg.Transaction == nil { err := els.sequenceExpressLaneSubmission(ctx, msg) require.ErrorContains(t, err, "oops, bad tx") } else { diff --git a/go-ethereum b/go-ethereum index ed53c04acc..5cb0b54511 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit ed53c04acc1637bbe1e07725fff82066c6687512 +Subproject commit 5cb0b545113ed1dd1c56a281a7c9f9c8534dae99 diff --git a/go.mod b/go.mod index 2ef5cef441..417d2f7fa1 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,8 @@ replace github.com/VictoriaMetrics/fastcache => ./fastcache replace github.com/ethereum/go-ethereum => ./go-ethereum +replace github.com/offchainlabs/bold => ./bold + require ( cloud.google.com/go/storage v1.43.0 github.com/DATA-DOG/go-sqlmock v1.5.2 @@ -19,11 +21,12 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.27 github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1 github.com/cavaliergopher/grab/v3 v3.0.1 + github.com/ccoveille/go-safecast v1.1.0 github.com/cockroachdb/pebble v1.1.0 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v4 v4.2.0 github.com/enescakir/emoji v1.0.0 - github.com/ethereum/go-ethereum v1.10.26 + github.com/ethereum/go-ethereum v1.13.15 github.com/fatih/structtag v1.2.0 github.com/gdamore/tcell/v2 v2.7.1 github.com/gobwas/httphead v0.1.0 @@ -40,6 +43,7 @@ require ( github.com/mailru/easygo v0.0.0-20190618140210-3c14a0dc985f github.com/mattn/go-sqlite3 v1.14.22 github.com/mitchellh/mapstructure v1.4.1 + github.com/offchainlabs/bold v0.0.0-00010101000000-000000000000 github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/redis/go-redis/v9 v9.6.1 @@ -49,7 +53,7 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/crypto v0.24.0 - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.8.0 golang.org/x/sys v0.21.0 golang.org/x/term v0.21.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d @@ -73,8 +77,6 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.5 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/gomega v1.18.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect @@ -86,15 +88,14 @@ require ( google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - google.golang.org/grpc v1.64.1 // indirect ) require ( - github.com/DataDog/zstd v1.4.5 // indirect + github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 // indirect @@ -113,7 +114,7 @@ require ( github.com/aws/smithy-go v1.22.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect @@ -148,6 +149,7 @@ require ( github.com/google/flatbuffers v1.12.1 // indirect github.com/google/go-github/v62 v62.0.0 github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect + github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/h2non/filetype v1.0.6 // indirect @@ -165,7 +167,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -175,14 +177,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/rhnvrm/simples3 v0.6.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect @@ -193,6 +195,7 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.26.0 // indirect diff --git a/go.sum b/go.sum index 3f03f6b95b..1e1dc34653 100644 --- a/go.sum +++ b/go.sum @@ -1,73 +1,33 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38= cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.32.1 h1:Bz7CciDnYSaa0mX5xODh6GUITRSx+cVhjNoOR4JssBo= @@ -128,8 +88,6 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.31.4/go.mod h1:yMWe0F+XG0DkRZK5ODZhG github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -139,12 +97,14 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= -github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= +github.com/ccoveille/go-safecast v1.1.0 h1:iHKNWaZm+OznO7Eh6EljXPjGfGQsSfa6/sxPlIEKO+g= +github.com/ccoveille/go-safecast v1.1.0/go.mod h1:QqwNjxQ7DAqY0C721OIO9InMk9zCwcsO7tnRuHytad8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= @@ -244,32 +204,19 @@ github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkN github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= @@ -281,7 +228,6 @@ github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484 h1:XC9N1eiAyO1z github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484/go.mod h1:5nDZF4afNA1S7ZKcBXCMvDo4nuCTp1931DND7/W4aXo= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= @@ -289,24 +235,13 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -315,16 +250,12 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= @@ -333,13 +264,10 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -347,26 +275,13 @@ github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwM github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= @@ -375,10 +290,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= @@ -420,8 +335,6 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= @@ -431,13 +344,6 @@ github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/juju/clock v0.0.0-20180524022203-d293bb356ca4/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= github.com/juju/errors v0.0.0-20150916125642-1b5e39b83d18/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 h1:rhqTjzJlm7EbkELJDKMTU7udov+Se0xZkWmugr6zGok= @@ -450,8 +356,6 @@ github.com/juju/testing v0.0.0-20200510222523-6c8c298c77a0 h1:+WWUkhnTjV6RNOxkcw github.com/juju/testing v0.0.0-20200510222523-6c8c298c77a0/go.mod h1:hpGvhGHPVbNBraRLZEhoQwFLMrjK8PSlO4D3nDjKYXo= github.com/juju/utils v0.0.0-20180808125547-9dfc6dbfb02b/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= github.com/juju/version v0.0.0-20161031051906-1f41e27e54f2/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= @@ -459,9 +363,6 @@ github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -493,9 +394,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -515,33 +415,20 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -551,39 +438,20 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= @@ -596,7 +464,6 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -606,23 +473,20 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= -github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -650,18 +514,13 @@ github.com/wealdtech/go-merkletree v1.0.0 h1:DsF1xMzj5rK3pSQM6mPv8jlyJyHXhFxpnA2 github.com/wealdtech/go-merkletree v1.0.0/go.mod h1:cdil512d/8ZC7Kx3bfrDvGMQXB25NTKbsm0rFrmDax4= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= @@ -677,45 +536,18 @@ go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35 go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -726,52 +558,21 @@ golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -780,67 +581,32 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -861,9 +627,7 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -874,53 +638,16 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -929,61 +656,16 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.187.0 h1:Mxs7VATVC2v7CY+7Xwm4ndkX71hpElcvx0D1Ji/p1eo= google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= @@ -992,18 +674,10 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= @@ -1015,18 +689,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -1040,10 +709,8 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170712054546-1be3d31502d6/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1052,14 +719,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/nitro-testnode b/nitro-testnode index 72141dd495..c177f28234 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 72141dd495ad965aa2a23723ea3e755037903ad7 +Subproject commit c177f282340285bcdae2d6a784547e2bb8b97498 diff --git a/precompiles/ArbAggregator.go b/precompiles/ArbAggregator.go index b74e280fe8..cee395189c 100644 --- a/precompiles/ArbAggregator.go +++ b/precompiles/ArbAggregator.go @@ -36,6 +36,7 @@ func (con ArbAggregator) GetBatchPosters(c ctx, evm mech) ([]addr, error) { return c.State.L1PricingState().BatchPosterTable().AllPosters(65536) } +// Adds additional batch poster address func (con ArbAggregator) AddBatchPoster(c ctx, evm mech, newBatchPoster addr) error { isOwner, err := c.State.ChainOwners().IsMember(c.caller) if err != nil { @@ -90,12 +91,14 @@ func (con ArbAggregator) SetFeeCollector(c ctx, evm mech, batchPoster addr, newF } // GetTxBaseFee gets an aggregator's current fixed fee to submit a tx +// Deprecated: always returns zero func (con ArbAggregator) GetTxBaseFee(c ctx, evm mech, aggregator addr) (huge, error) { // This is deprecated and now always returns zero. return big.NewInt(0), nil } // SetTxBaseFee sets an aggregator's fixed fee (caller must be the aggregator, its fee collector, or an owner) +// Deprecated: no-op func (con ArbAggregator) SetTxBaseFee(c ctx, evm mech, aggregator addr, feeInL1Gas huge) error { // This is deprecated and is now a no-op. return nil diff --git a/precompiles/ArbDebug.go b/precompiles/ArbDebug.go index bf85d5e18f..60e520da3e 100644 --- a/precompiles/ArbDebug.go +++ b/precompiles/ArbDebug.go @@ -24,6 +24,7 @@ type ArbDebug struct { UnusedError func() error } +// Emits events with values based on the args provided func (con ArbDebug) Events(c ctx, evm mech, paid huge, flag bool, value bytes32) (addr, huge, error) { // Emits 2 events that cover each case // Basic tests an index'd value & a normal value @@ -42,11 +43,13 @@ func (con ArbDebug) Events(c ctx, evm mech, paid huge, flag bool, value bytes32) return c.caller, paid, nil } +// Tries (and fails) to emit logs in a view context func (con ArbDebug) EventsView(c ctx, evm mech) error { _, _, err := con.Events(c, evm, common.Big0, true, bytes32{}) return err } +// Throws a custom error func (con ArbDebug) CustomRevert(c ctx, number uint64) error { return con.CustomError(number, "This spider family wards off bugs: /\\oo/\\ //\\(oo)//\\ /\\oo/\\", true) } @@ -61,6 +64,7 @@ func (con ArbDebug) Panic(c ctx, evm mech) error { panic("called ArbDebug's debug-only Panic method") } +// Throws a hardcoded error func (con ArbDebug) LegacyError(c ctx) error { return errors.New("example legacy error") } diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index 8d916926f3..c85ed93f39 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -29,7 +29,7 @@ func (con ArbGasInfo) GetPricesInWeiWithAggregator( evm mech, aggregator addr, ) (huge, huge, huge, huge, huge, huge, error) { - if c.State.ArbOSVersion() < 4 { + if c.State.ArbOSVersion() < params.ArbosVersion_4 { return con._preVersion4_GetPricesInWeiWithAggregator(c, evm, aggregator) } @@ -105,7 +105,7 @@ func (con ArbGasInfo) GetPricesInWei(c ctx, evm mech) (huge, huge, huge, huge, h // GetPricesInArbGasWithAggregator gets prices in ArbGas when using the provided aggregator func (con ArbGasInfo) GetPricesInArbGasWithAggregator(c ctx, evm mech, aggregator addr) (huge, huge, huge, error) { - if c.State.ArbOSVersion() < 4 { + if c.State.ArbOSVersion() < params.ArbosVersion_4 { return con._preVersion4_GetPricesInArbGasWithAggregator(c, evm, aggregator) } l1GasPrice, err := c.State.L1PricingState().PricePerUnit() @@ -220,7 +220,7 @@ func (con ArbGasInfo) GetGasBacklogTolerance(c ctx, evm mech) (uint64, error) { // GetL1PricingSurplus gets the surplus of funds for L1 batch posting payments (may be negative) func (con ArbGasInfo) GetL1PricingSurplus(c ctx, evm mech) (*big.Int, error) { - if c.State.ArbOSVersion() < 10 { + if c.State.ArbOSVersion() < params.ArbosVersion_10 { return con._preversion10_GetL1PricingSurplus(c, evm) } ps := c.State.L1PricingState() diff --git a/precompiles/ArbOwner.go b/precompiles/ArbOwner.go index 90a7b4ccc2..a6df0bd0dc 100644 --- a/precompiles/ArbOwner.go +++ b/precompiles/ArbOwner.go @@ -120,38 +120,48 @@ func (con ArbOwner) ScheduleArbOSUpgrade(c ctx, evm mech, newVersion uint64, tim return c.State.ScheduleArbOSUpgrade(newVersion, timestamp) } +// Sets equilibration units parameter for L1 price adjustment algorithm func (con ArbOwner) SetL1PricingEquilibrationUnits(c ctx, evm mech, equilibrationUnits huge) error { return c.State.L1PricingState().SetEquilibrationUnits(equilibrationUnits) } +// Sets inertia parameter for L1 price adjustment algorithm func (con ArbOwner) SetL1PricingInertia(c ctx, evm mech, inertia uint64) error { return c.State.L1PricingState().SetInertia(inertia) } +// Sets reward recipient address for L1 price adjustment algorithm func (con ArbOwner) SetL1PricingRewardRecipient(c ctx, evm mech, recipient addr) error { return c.State.L1PricingState().SetPayRewardsTo(recipient) } +// Sets reward amount for L1 price adjustment algorithm, in wei per unit func (con ArbOwner) SetL1PricingRewardRate(c ctx, evm mech, weiPerUnit uint64) error { return c.State.L1PricingState().SetPerUnitReward(weiPerUnit) } +// Set how much ArbOS charges per L1 gas spent on transaction data. func (con ArbOwner) SetL1PricePerUnit(c ctx, evm mech, pricePerUnit *big.Int) error { return c.State.L1PricingState().SetPricePerUnit(pricePerUnit) } +// Sets the base charge (in L1 gas) attributed to each data batch in the calldata pricer func (con ArbOwner) SetPerBatchGasCharge(c ctx, evm mech, cost int64) error { return c.State.L1PricingState().SetPerBatchGasCost(cost) } +// Sets the cost amortization cap in basis points func (con ArbOwner) SetAmortizedCostCapBips(c ctx, evm mech, cap uint64) error { return c.State.L1PricingState().SetAmortizedCostCapBips(cap) } +// Sets the Brotli compression level used for fast compression +// Available in ArbOS version 12 with default level as 1 func (con ArbOwner) SetBrotliCompressionLevel(c ctx, evm mech, level uint64) error { return c.State.SetBrotliCompressionLevel(level) } +// Releases surplus funds from L1PricerFundsPoolAddress for use func (con ArbOwner) ReleaseL1PricerSurplusFunds(c ctx, evm mech, maxWeiToRelease huge) (huge, error) { balance := evm.StateDB.GetBalance(l1pricing.L1PricerFundsPoolAddress) l1p := c.State.L1PricingState() @@ -295,6 +305,7 @@ func (con ArbOwner) RemoveWasmCacheManager(c ctx, _ mech, manager addr) error { return managers.Remove(manager, c.State.ArbOSVersion()) } +// Sets serialized chain config in ArbOS state func (con ArbOwner) SetChainConfig(c ctx, evm mech, serializedChainConfig []byte) error { if c == nil { return errors.New("nil context") diff --git a/precompiles/ArbOwnerPublic.go b/precompiles/ArbOwnerPublic.go index 451e18e1cc..792b4bb59d 100644 --- a/precompiles/ArbOwnerPublic.go +++ b/precompiles/ArbOwnerPublic.go @@ -5,6 +5,7 @@ package precompiles import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" ) // ArbOwnerPublic precompile provides non-owners with info about the current chain owners. @@ -42,7 +43,7 @@ func (con ArbOwnerPublic) GetNetworkFeeAccount(c ctx, evm mech) (addr, error) { // GetInfraFeeAccount gets the infrastructure fee collector func (con ArbOwnerPublic) GetInfraFeeAccount(c ctx, evm mech) (addr, error) { - if c.State.ArbOSVersion() < 6 { + if c.State.ArbOSVersion() < params.ArbosVersion_6 { return c.State.NetworkFeeAccount() } return c.State.InfraFeeAccount() diff --git a/precompiles/ArbOwner_test.go b/precompiles/ArbOwner_test.go index 51b2fc0cd9..74b29a79b5 100644 --- a/precompiles/ArbOwner_test.go +++ b/precompiles/ArbOwner_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/burn" @@ -218,7 +219,7 @@ func TestArbInfraFeeAccount(t *testing.T) { err = prec.SetInfraFeeAccount(callCtx, evm, newAddr) // this should be a no-op (because ArbOS version 0) Require(t, err) - version5 := uint64(5) + version5 := params.ArbosVersion_5 evm = newMockEVMForTestingWithVersion(&version5) callCtx = testContext(caller, evm) prec = &ArbOwner{} diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index 49cc9a3264..06e5ccd352 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -39,7 +39,7 @@ type ArbRetryableTx struct { var ErrSelfModifyingRetryable = errors.New("retryable cannot modify itself") func (con ArbRetryableTx) oldNotFoundError(c ctx) error { - if c.State.ArbOSVersion() >= 3 { + if c.State.ArbOSVersion() >= params.ArbosVersion_3 { return con.NoTicketWithIDError() } return errors.New("ticketId not found") @@ -222,6 +222,9 @@ func (con ArbRetryableTx) Cancel(c ctx, evm mech, ticketId bytes32) error { return con.Canceled(c, evm, ticketId) } +// Gets the redeemer of the current retryable redeem attempt. +// Returns the zero address if the current transaction is not a retryable redeem attempt. +// If this is an auto-redeem, returns the fee refund address of the retryable. func (con ArbRetryableTx) GetCurrentRedeemer(c ctx, evm mech) (common.Address, error) { if c.txProcessor.CurrentRefundTo != nil { return *c.txProcessor.CurrentRefundTo, nil @@ -229,6 +232,7 @@ func (con ArbRetryableTx) GetCurrentRedeemer(c ctx, evm mech) (common.Address, e return common.Address{}, nil } +// Do not call. This method represents a retryable submission to aid explorers. Calling it will always revert. func (con ArbRetryableTx) SubmitRetryable( c ctx, evm mech, requestId bytes32, l1BaseFee, deposit, callvalue, gasFeeCap huge, gasLimit uint64, maxSubmissionFee huge, diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go index 04cde46ebe..9742ed51f4 100644 --- a/precompiles/ArbSys.go +++ b/precompiles/ArbSys.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" @@ -37,7 +38,7 @@ func (con *ArbSys) ArbBlockNumber(c ctx, evm mech) (huge, error) { // ArbBlockHash gets the L2 block hash, if sufficiently recent func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes32, error) { if !arbBlockNumber.IsUint64() { - if c.State.ArbOSVersion() >= 11 { + if c.State.ArbOSVersion() >= params.ArbosVersion_11 { return bytes32{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) } return bytes32{}, errors.New("invalid block number") @@ -46,7 +47,7 @@ func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes currentNumber := evm.Context.BlockNumber.Uint64() if requestedBlockNum >= currentNumber || requestedBlockNum+256 < currentNumber { - if c.State.ArbOSVersion() >= 11 { + if c.State.ArbOSVersion() >= params.ArbosVersion_11 { return common.Hash{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) } return common.Hash{}, errors.New("invalid block number for ArbBlockHAsh") @@ -84,7 +85,7 @@ func (con *ArbSys) MapL1SenderContractAddressToL2Alias(c ctx, sender addr, dest // WasMyCallersAddressAliased checks if the caller's caller was aliased func (con *ArbSys) WasMyCallersAddressAliased(c ctx, evm mech) (bool, error) { topLevel := con.isTopLevel(c, evm) - if c.State.ArbOSVersion() < 6 { + if c.State.ArbOSVersion() < params.ArbosVersion_6 { topLevel = evm.Depth() == 2 } aliased := topLevel && util.DoesTxTypeAlias(c.txProcessor.TopTxType) @@ -180,7 +181,7 @@ func (con *ArbSys) SendTxToL1(c ctx, evm mech, value huge, destination addr, cal calldataForL1, ) - if c.State.ArbOSVersion() >= 4 { + if c.State.ArbOSVersion() >= params.ArbosVersion_4 { return leafNum, nil } return sendHash.Big(), err diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 5b5376a4ca..54d18a0cc9 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -361,7 +361,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr args = args[2:] version := arbosState.ArbOSVersion(state) - if callerCtx.readOnly && version >= 11 { + if callerCtx.readOnly && version >= params.ArbosVersion_11 { return []reflect.Value{reflect.ValueOf(vm.ErrWriteProtection)} } @@ -531,14 +531,14 @@ func Precompiles() map[addr]ArbosPrecompile { insert(MakePrecompile(pgen.ArbFunctionTableMetaData, &ArbFunctionTable{Address: types.ArbFunctionTableAddress})) insert(MakePrecompile(pgen.ArbosTestMetaData, &ArbosTest{Address: types.ArbosTestAddress})) ArbGasInfo := insert(MakePrecompile(pgen.ArbGasInfoMetaData, &ArbGasInfo{Address: types.ArbGasInfoAddress})) - ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = 10 - ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = 11 - ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = 11 - ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = params.ArbosVersion_10 + ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = params.ArbosVersion_11 + ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = params.ArbosVersion_11 + ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = params.ArbosVersion_20 insert(MakePrecompile(pgen.ArbAggregatorMetaData, &ArbAggregator{Address: types.ArbAggregatorAddress})) insert(MakePrecompile(pgen.ArbStatisticsMetaData, &ArbStatistics{Address: types.ArbStatisticsAddress})) @@ -554,10 +554,10 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwnerPublicImpl := &ArbOwnerPublic{Address: types.ArbOwnerPublicAddress} ArbOwnerPublic := insert(MakePrecompile(pgen.ArbOwnerPublicMetaData, ArbOwnerPublicImpl)) - ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 - ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = 11 - ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = 20 - ArbOwnerPublic.methodsByName["GetScheduledUpgrade"].arbosVersion = 20 + ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = params.ArbosVersion_11 + ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = params.ArbosVersion_20 + ArbOwnerPublic.methodsByName["GetScheduledUpgrade"].arbosVersion = params.ArbosVersion_20 ArbWasmImpl := &ArbWasm{Address: types.ArbWasmAddress} ArbWasm := insert(MakePrecompile(pgen.ArbWasmMetaData, ArbWasmImpl)) @@ -611,11 +611,11 @@ func Precompiles() map[addr]ArbosPrecompile { return ArbOwnerImpl.OwnerActs(context, evm, method, owner, data) } _, ArbOwner := MakePrecompile(pgen.ArbOwnerMetaData, ArbOwnerImpl) - ArbOwner.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 - ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = 5 - ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = 10 - ArbOwner.methodsByName["SetChainConfig"].arbosVersion = 11 - ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = 20 + ArbOwner.methodsByName["GetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = params.ArbosVersion_10 + ArbOwner.methodsByName["SetChainConfig"].arbosVersion = params.ArbosVersion_11 + ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = params.ArbosVersion_20 stylusMethods := []string{ "SetInkPrice", "SetWasmMaxStackDepth", "SetWasmFreePages", "SetWasmPageGas", "SetWasmPageLimit", "SetWasmMinInitGas", "SetWasmInitCostScalar", @@ -798,7 +798,7 @@ func (p *Precompile) Call( ) } // nolint:errorlint - if arbosVersion >= 11 || errRet == vm.ErrExecutionReverted { + if arbosVersion >= params.ArbosVersion_11 || errRet == vm.ErrExecutionReverted { return nil, callerCtx.gasLeft, vm.ErrExecutionReverted } // Preserve behavior with old versions which would zero out gas on this type of error diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index c8b8a46b96..75fed711eb 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -190,13 +190,13 @@ func TestPrecompilesPerArbosVersion(t *testing.T) { log.SetDefault(log.NewLogger(glogger)) expectedNewMethodsPerArbosVersion := map[uint64]int{ - 0: 89, - 5: 3, - 10: 2, - 11: 4, - 20: 8, - 30: 38, - 31: 1, + 0: 89, + params.ArbosVersion_5: 3, + params.ArbosVersion_10: 2, + params.ArbosVersion_11: 4, + params.ArbosVersion_20: 8, + params.ArbosVersion_30: 38, + params.ArbosVersion_31: 1, } precompiles := Precompiles() diff --git a/precompiles/wrapper.go b/precompiles/wrapper.go index edc079fc5b..028aed755b 100644 --- a/precompiles/wrapper.go +++ b/precompiles/wrapper.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/util" @@ -102,7 +103,7 @@ func (wrapper *OwnerPrecompile) Call( } version := arbosState.ArbOSVersion(evm.StateDB) - if !readOnly || version < 11 { + if !readOnly || version < params.ArbosVersion_11 { // log that the owner operation succeeded if err := wrapper.emitSuccess(evm, *(*[4]byte)(input[:4]), caller, input); err != nil { log.Error("failed to emit OwnerActs event", "err", err) diff --git a/staker/block_validator.go b/staker/block_validator.go index 0a1a38ba17..43e5c7d28f 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1117,7 +1117,7 @@ func (v *BlockValidator) Reorg(ctx context.Context, count arbutil.MessageIndex) } v.validations.Delete(iPos) } - v.nextCreateStartGS = buildGlobalState(*res, endPosition) + v.nextCreateStartGS = BuildGlobalState(*res, endPosition) v.nextCreatePrevDelayed = msg.DelayedMessagesRead v.nextCreateBatchReread = true v.prevBatchCache = make(map[uint64][]byte) diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go new file mode 100644 index 0000000000..1a8eed80fa --- /dev/null +++ b/staker/bold/bold_staker.go @@ -0,0 +1,536 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/nitro/blob/main/LICENSE +package bold + +import ( + "context" + "errors" + "fmt" + "math/big" + "time" + + flag "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + + protocol "github.com/offchainlabs/bold/chain-abstraction" + solimpl "github.com/offchainlabs/bold/chain-abstraction/sol-implementation" + challengemanager "github.com/offchainlabs/bold/challenge-manager" + boldtypes "github.com/offchainlabs/bold/challenge-manager/types" + l2stateprovider "github.com/offchainlabs/bold/layer2-state-provider" + "github.com/offchainlabs/bold/solgen/go/challengeV2gen" + boldrollup "github.com/offchainlabs/bold/solgen/go/rollupgen" + "github.com/offchainlabs/bold/util" + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/staker" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/offchainlabs/nitro/validator" +) + +var assertionCreatedId common.Hash + +func init() { + rollupAbi, err := boldrollup.RollupCoreMetaData.GetAbi() + if err != nil { + panic(err) + } + assertionCreatedEvent, ok := rollupAbi.Events["AssertionCreated"] + if !ok { + panic("RollupCore ABI missing AssertionCreated event") + } + assertionCreatedId = assertionCreatedEvent.ID +} + +type BoldConfig struct { + Enable bool `koanf:"enable"` + Strategy string `koanf:"strategy"` + // How often to post assertions onchain. + AssertionPostingInterval time.Duration `koanf:"assertion-posting-interval"` + // How often to scan for newly created assertions onchain. + AssertionScanningInterval time.Duration `koanf:"assertion-scanning-interval"` + // How often to confirm assertions onchain. + AssertionConfirmingInterval time.Duration `koanf:"assertion-confirming-interval"` + API bool `koanf:"api"` + APIHost string `koanf:"api-host"` + APIPort uint16 `koanf:"api-port"` + APIDBPath string `koanf:"api-db-path"` + TrackChallengeParentAssertionHashes []string `koanf:"track-challenge-parent-assertion-hashes"` + CheckStakerSwitchInterval time.Duration `koanf:"check-staker-switch-interval"` + StateProviderConfig StateProviderConfig `koanf:"state-provider-config"` + StartValidationFromStaked bool `koanf:"start-validation-from-staked"` + strategy legacystaker.StakerStrategy +} + +func (c *BoldConfig) Validate() error { + strategy, err := legacystaker.ParseStrategy(c.Strategy) + if err != nil { + return err + } + c.strategy = strategy + return nil +} + +type StateProviderConfig struct { + // A name identifier for the validator for cosmetic purposes. + ValidatorName string `koanf:"validator-name"` + CheckBatchFinality bool `koanf:"check-batch-finality"` + // Path to a filesystem directory that will cache machine hashes for BOLD. + MachineLeavesCachePath string `koanf:"machine-leaves-cache-path"` +} + +var DefaultStateProviderConfig = StateProviderConfig{ + ValidatorName: "default-validator", + CheckBatchFinality: true, + MachineLeavesCachePath: "machine-hashes-cache", +} + +var DefaultBoldConfig = BoldConfig{ + Enable: false, + Strategy: "Watchtower", + AssertionPostingInterval: time.Minute * 15, + AssertionScanningInterval: time.Minute, + AssertionConfirmingInterval: time.Minute, + API: false, + APIHost: "127.0.0.1", + APIPort: 9393, + APIDBPath: "bold-api-db", + TrackChallengeParentAssertionHashes: []string{}, + CheckStakerSwitchInterval: time.Minute, // Every minute, check if the Nitro node staker should switch to using BOLD. + StateProviderConfig: DefaultStateProviderConfig, + StartValidationFromStaked: true, +} + +var BoldModes = map[legacystaker.StakerStrategy]boldtypes.Mode{ + legacystaker.WatchtowerStrategy: boldtypes.WatchTowerMode, + legacystaker.DefensiveStrategy: boldtypes.DefensiveMode, + legacystaker.ResolveNodesStrategy: boldtypes.ResolveMode, + legacystaker.MakeNodesStrategy: boldtypes.MakeMode, +} + +func BoldConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultBoldConfig.Enable, "enable bold challenge protocol") + f.String(prefix+".strategy", DefaultBoldConfig.Strategy, "define the bold validator staker strategy, either watchtower, defensive, stakeLatest, or makeNodes") + f.Duration(prefix+".assertion-posting-interval", DefaultBoldConfig.AssertionPostingInterval, "assertion posting interval") + f.Duration(prefix+".assertion-scanning-interval", DefaultBoldConfig.AssertionScanningInterval, "scan assertion interval") + f.Duration(prefix+".assertion-confirming-interval", DefaultBoldConfig.AssertionConfirmingInterval, "confirm assertion interval") + f.Duration(prefix+".check-staker-switch-interval", DefaultBoldConfig.CheckStakerSwitchInterval, "how often to check if staker can switch to bold") + f.Bool(prefix+".api", DefaultBoldConfig.API, "enable api") + f.String(prefix+".api-host", DefaultBoldConfig.APIHost, "bold api host") + f.Uint16(prefix+".api-port", DefaultBoldConfig.APIPort, "bold api port") + f.String(prefix+".api-db-path", DefaultBoldConfig.APIDBPath, "bold api db path") + f.StringSlice(prefix+".track-challenge-parent-assertion-hashes", DefaultBoldConfig.TrackChallengeParentAssertionHashes, "only track challenges/edges with these parent assertion hashes") + StateProviderConfigAddOptions(prefix+".state-provider-config", f) + f.Bool(prefix+".start-validation-from-staked", DefaultBoldConfig.StartValidationFromStaked, "assume staked nodes are valid") +} + +func StateProviderConfigAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".validator-name", DefaultStateProviderConfig.ValidatorName, "name identifier for cosmetic purposes") + f.Bool(prefix+".check-batch-finality", DefaultStateProviderConfig.CheckBatchFinality, "check batch finality") + f.String(prefix+".machine-leaves-cache-path", DefaultStateProviderConfig.MachineLeavesCachePath, "path to machine cache") +} + +type BOLDStaker struct { + stopwaiter.StopWaiter + config *BoldConfig + chalManager *challengemanager.Manager + blockValidator *staker.BlockValidator + statelessBlockValidator *staker.StatelessBlockValidator + rollupAddress common.Address + l1Reader *headerreader.HeaderReader + client protocol.ChainBackend + callOpts bind.CallOpts + wallet legacystaker.ValidatorWalletInterface + stakedNotifiers []legacystaker.LatestStakedNotifier + confirmedNotifiers []legacystaker.LatestConfirmedNotifier +} + +func NewBOLDStaker( + ctx context.Context, + stack *node.Node, + rollupAddress common.Address, + callOpts bind.CallOpts, + txOpts *bind.TransactOpts, + l1Reader *headerreader.HeaderReader, + blockValidator *staker.BlockValidator, + statelessBlockValidator *staker.StatelessBlockValidator, + config *BoldConfig, + dataPoster *dataposter.DataPoster, + wallet legacystaker.ValidatorWalletInterface, + stakedNotifiers []legacystaker.LatestStakedNotifier, + confirmedNotifiers []legacystaker.LatestConfirmedNotifier, +) (*BOLDStaker, error) { + if err := config.Validate(); err != nil { + return nil, err + } + wrappedClient := util.NewBackendWrapper(l1Reader.Client(), rpc.LatestBlockNumber) + manager, err := newBOLDChallengeManager(ctx, stack, rollupAddress, txOpts, l1Reader, wrappedClient, blockValidator, statelessBlockValidator, config, dataPoster) + if err != nil { + return nil, err + } + return &BOLDStaker{ + config: config, + chalManager: manager, + blockValidator: blockValidator, + statelessBlockValidator: statelessBlockValidator, + rollupAddress: rollupAddress, + l1Reader: l1Reader, + client: wrappedClient, + callOpts: callOpts, + wallet: wallet, + stakedNotifiers: stakedNotifiers, + confirmedNotifiers: confirmedNotifiers, + }, nil +} + +// Initialize Updates the block validator module root. +// And updates the init state of the block validator if block validator has not started yet. +func (b *BOLDStaker) Initialize(ctx context.Context) error { + err := b.updateBlockValidatorModuleRoot(ctx) + if err != nil { + log.Warn("error updating latest wasm module root", "err", err) + } + walletAddressOrZero := b.wallet.AddressOrZero() + var stakerAddr common.Address + if b.wallet.DataPoster() != nil { + stakerAddr = b.wallet.DataPoster().Sender() + } + log.Info("running as validator", "txSender", stakerAddr, "actingAsWallet", walletAddressOrZero, "strategy", b.config.Strategy) + + if b.blockValidator != nil && b.config.StartValidationFromStaked && !b.blockValidator.Started() { + rollupUserLogic, err := boldrollup.NewRollupUserLogic(b.rollupAddress, b.client) + if err != nil { + return err + } + latestStaked, err := rollupUserLogic.LatestStakedAssertion(b.getCallOpts(ctx), walletAddressOrZero) + if err != nil { + return err + } + if latestStaked == [32]byte{} { + latestConfirmed, err := rollupUserLogic.LatestConfirmed(&bind.CallOpts{Context: ctx}) + if err != nil { + return err + } + latestStaked = latestConfirmed + } + assertion, err := readBoldAssertionCreationInfo( + ctx, + rollupUserLogic, + b.client, + b.rollupAddress, + latestStaked, + ) + if err != nil { + return err + } + afterState := protocol.GoGlobalStateFromSolidity(assertion.AfterState.GlobalState) + return b.blockValidator.InitAssumeValid(validator.GoGlobalState(afterState)) + } + return nil +} + +func (b *BOLDStaker) Start(ctxIn context.Context) { + b.StopWaiter.Start(ctxIn, b) + b.chalManager.Start(ctxIn) + b.CallIteratively(func(ctx context.Context) time.Duration { + err := b.updateBlockValidatorModuleRoot(ctx) + if err != nil { + log.Warn("error updating latest wasm module root", "err", err) + } + agreedMsgCount, agreedGlobalState, err := b.getLatestState(ctx, false) + if err != nil { + log.Error("staker: error checking latest agreed", "err", err) + } + + if agreedGlobalState != nil { + for _, notifier := range b.stakedNotifiers { + notifier.UpdateLatestStaked(agreedMsgCount, *agreedGlobalState) + } + } + confirmedMsgCount, confirmedGlobalState, err := b.getLatestState(ctx, true) + if err != nil { + log.Error("staker: error checking latest confirmed", "err", err) + } + + if confirmedGlobalState != nil { + for _, notifier := range b.confirmedNotifiers { + notifier.UpdateLatestConfirmed(confirmedMsgCount, *confirmedGlobalState) + } + } + return b.config.AssertionPostingInterval + }) +} + +func (b *BOLDStaker) getLatestState(ctx context.Context, confirmed bool) (arbutil.MessageIndex, *validator.GoGlobalState, error) { + var globalState protocol.GoGlobalState + var err error + if confirmed { + globalState, err = b.chalManager.LatestConfirmedState(ctx) + } else { + globalState, err = b.chalManager.LatestAgreedState(ctx) + } + var assertionType string + if confirmed { + assertionType = "confirmed" + } else { + assertionType = "agreed" + } + if err != nil { + return 0, nil, fmt.Errorf("error getting latest %s: %w", assertionType, err) + } + caughtUp, count, err := staker.GlobalStateToMsgCount(b.statelessBlockValidator.InboxTracker(), b.statelessBlockValidator.InboxStreamer(), validator.GoGlobalState(globalState)) + if err != nil { + if errors.Is(err, staker.ErrGlobalStateNotInChain) { + return 0, nil, fmt.Errorf("latest %s assertion of %v not yet in our node: %w", assertionType, globalState, err) + } + return 0, nil, fmt.Errorf("error getting message count: %w", err) + } + + if !caughtUp { + log.Info(fmt.Sprintf("latest %s assertion not yet in our node", assertionType), "state", globalState) + return 0, nil, nil + } + + processedCount, err := b.statelessBlockValidator.InboxStreamer().GetProcessedMessageCount() + if err != nil { + return 0, nil, err + } + + if processedCount < count { + log.Info("execution catching up to rollup", "rollupCount", count, "processedCount", processedCount) + return 0, nil, nil + } + + return count, (*validator.GoGlobalState)(&globalState), nil +} + +func (b *BOLDStaker) StopAndWait() { + b.chalManager.StopAndWait() + b.StopWaiter.StopAndWait() +} + +func (b *BOLDStaker) updateBlockValidatorModuleRoot(ctx context.Context) error { + if b.blockValidator == nil { + return nil + } + boldRollup, err := boldrollup.NewRollupUserLogic(b.rollupAddress, b.client) + if err != nil { + return err + } + moduleRoot, err := boldRollup.WasmModuleRoot(b.getCallOpts(ctx)) + if err != nil { + return err + } + return b.blockValidator.SetCurrentWasmModuleRoot(moduleRoot) +} + +func (b *BOLDStaker) getCallOpts(ctx context.Context) *bind.CallOpts { + opts := b.callOpts + opts.Context = ctx + return &opts +} + +// Sets up a BOLD challenge manager implementation by providing it with +// its necessary dependencies and configuration. The challenge manager can then be started, as it +// implements the StopWaiter pattern as part of the Nitro validator. +func newBOLDChallengeManager( + ctx context.Context, + stack *node.Node, + rollupAddress common.Address, + txOpts *bind.TransactOpts, + l1Reader *headerreader.HeaderReader, + client protocol.ChainBackend, + blockValidator *staker.BlockValidator, + statelessBlockValidator *staker.StatelessBlockValidator, + config *BoldConfig, + dataPoster *dataposter.DataPoster, +) (*challengemanager.Manager, error) { + // Initializes the BOLD contract bindings and the assertion chain abstraction. + rollupBindings, err := boldrollup.NewRollupUserLogic(rollupAddress, client) + if err != nil { + return nil, fmt.Errorf("could not create rollup bindings: %w", err) + } + chalManager, err := rollupBindings.ChallengeManager(&bind.CallOpts{}) + if err != nil { + return nil, fmt.Errorf("could not get challenge manager: %w", err) + } + chalManagerBindings, err := challengeV2gen.NewEdgeChallengeManager(chalManager, client) + if err != nil { + return nil, fmt.Errorf("could not create challenge manager bindings: %w", err) + } + assertionChain, err := solimpl.NewAssertionChain(ctx, rollupAddress, chalManager, txOpts, client, NewDataPosterTransactor(dataPoster)) + if err != nil { + return nil, fmt.Errorf("could not create assertion chain: %w", err) + } + + blockChallengeHeightBig, err := chalManagerBindings.LAYERZEROBLOCKEDGEHEIGHT(&bind.CallOpts{}) + if err != nil { + return nil, fmt.Errorf("could not get block challenge height: %w", err) + } + if !blockChallengeHeightBig.IsUint64() { + return nil, errors.New("block challenge height was not a uint64") + } + bigStepHeightBig, err := chalManagerBindings.LAYERZEROBIGSTEPEDGEHEIGHT(&bind.CallOpts{}) + if err != nil { + return nil, fmt.Errorf("could not get big step challenge height: %w", err) + } + if !bigStepHeightBig.IsUint64() { + return nil, errors.New("big step challenge height was not a uint64") + } + smallStepHeightBig, err := chalManagerBindings.LAYERZEROSMALLSTEPEDGEHEIGHT(&bind.CallOpts{}) + if err != nil { + return nil, fmt.Errorf("could not get small step challenge height: %w", err) + } + if !smallStepHeightBig.IsUint64() { + return nil, errors.New("small step challenge height was not a uint64") + } + numBigSteps, err := chalManagerBindings.NUMBIGSTEPLEVEL(&bind.CallOpts{}) + if err != nil { + return nil, fmt.Errorf("could not get number of big steps: %w", err) + } + blockChallengeLeafHeight := l2stateprovider.Height(blockChallengeHeightBig.Uint64()) + bigStepHeight := l2stateprovider.Height(bigStepHeightBig.Uint64()) + smallStepHeight := l2stateprovider.Height(smallStepHeightBig.Uint64()) + + apiDBPath := config.APIDBPath + if apiDBPath != "" { + apiDBPath = stack.ResolvePath(apiDBPath) + } + machineHashesPath := config.StateProviderConfig.MachineLeavesCachePath + if machineHashesPath != "" { + machineHashesPath = stack.ResolvePath(machineHashesPath) + } + + // Sets up the state provider interface that BOLD will use to request data such as + // execution states for assertions, history commitments for machine execution, and one step proofs. + stateProvider, err := NewBOLDStateProvider( + blockValidator, + statelessBlockValidator, + // Specify the height constants needed for the state provider. + // TODO: Fetch these from the smart contract instead. + blockChallengeLeafHeight, + &config.StateProviderConfig, + machineHashesPath, + ) + if err != nil { + return nil, fmt.Errorf("could not create state manager: %w", err) + } + providerHeights := []l2stateprovider.Height{blockChallengeLeafHeight} + for i := uint8(0); i < numBigSteps; i++ { + providerHeights = append(providerHeights, bigStepHeight) + } + providerHeights = append(providerHeights, smallStepHeight) + provider := l2stateprovider.NewHistoryCommitmentProvider( + stateProvider, + stateProvider, + stateProvider, + providerHeights, + stateProvider, + nil, // Nil API database for the history commitment provider, as it will be provided later. TODO: Improve this dependency injection. + ) + // The interval at which the challenge manager will attempt to post assertions. + postingInterval := config.AssertionPostingInterval + // The interval at which the manager will scan for newly created assertions onchain. + scanningInterval := config.AssertionScanningInterval + // The interval at which the manager will attempt to confirm assertions. + confirmingInterval := config.AssertionConfirmingInterval + + stackOpts := []challengemanager.StackOpt{ + challengemanager.StackWithName(config.StateProviderConfig.ValidatorName), + challengemanager.StackWithMode(BoldModes[config.strategy]), + challengemanager.StackWithPollingInterval(scanningInterval), + challengemanager.StackWithPostingInterval(postingInterval), + challengemanager.StackWithConfirmationInterval(confirmingInterval), + challengemanager.StackWithTrackChallengeParentAssertionHashes(config.TrackChallengeParentAssertionHashes), + challengemanager.StackWithHeaderProvider(l1Reader), + } + if config.API { + apiAddr := fmt.Sprintf("%s:%d", config.APIHost, config.APIPort) + stackOpts = append(stackOpts, challengemanager.StackWithAPIEnabled(apiAddr, apiDBPath)) + } + + manager, err := challengemanager.NewChallengeStack( + assertionChain, + provider, + stackOpts..., + ) + if err != nil { + return nil, fmt.Errorf("could not create challenge manager: %w", err) + } + return manager, nil +} + +// Read the creation info for an assertion by looking up its creation +// event from the rollup contracts. +func readBoldAssertionCreationInfo( + ctx context.Context, + rollup *boldrollup.RollupUserLogic, + client bind.ContractFilterer, + rollupAddress common.Address, + assertionHash common.Hash, +) (*protocol.AssertionCreatedInfo, error) { + var creationBlock uint64 + var topics [][]common.Hash + if assertionHash == (common.Hash{}) { + rollupDeploymentBlock, err := rollup.RollupDeploymentBlock(&bind.CallOpts{Context: ctx}) + if err != nil { + return nil, err + } + if !rollupDeploymentBlock.IsUint64() { + return nil, errors.New("rollup deployment block was not a uint64") + } + creationBlock = rollupDeploymentBlock.Uint64() + } else { + var b [32]byte + copy(b[:], assertionHash[:]) + node, err := rollup.GetAssertion(&bind.CallOpts{Context: ctx}, b) + if err != nil { + return nil, err + } + creationBlock = node.CreatedAtBlock + } + topics = [][]common.Hash{{assertionCreatedId}, {assertionHash}} + var query = ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(creationBlock), + ToBlock: new(big.Int).SetUint64(creationBlock), + Addresses: []common.Address{rollupAddress}, + Topics: topics, + } + logs, err := client.FilterLogs(ctx, query) + if err != nil { + return nil, err + } + if len(logs) == 0 { + return nil, errors.New("no assertion creation logs found") + } + if len(logs) > 1 { + return nil, errors.New("found multiple instances of requested node") + } + ethLog := logs[0] + parsedLog, err := rollup.ParseAssertionCreated(ethLog) + if err != nil { + return nil, err + } + afterState := parsedLog.Assertion.AfterState + return &protocol.AssertionCreatedInfo{ + ConfirmPeriodBlocks: parsedLog.ConfirmPeriodBlocks, + RequiredStake: parsedLog.RequiredStake, + ParentAssertionHash: protocol.AssertionHash{Hash: parsedLog.ParentAssertionHash}, + BeforeState: parsedLog.Assertion.BeforeState, + AfterState: afterState, + InboxMaxCount: parsedLog.InboxMaxCount, + AfterInboxBatchAcc: parsedLog.AfterInboxBatchAcc, + AssertionHash: protocol.AssertionHash{Hash: parsedLog.AssertionHash}, + WasmModuleRoot: parsedLog.WasmModuleRoot, + ChallengeManager: parsedLog.ChallengeManager, + TransactionHash: ethLog.TxHash, + CreationBlock: ethLog.BlockNumber, + }, nil +} diff --git a/staker/bold/bold_state_provider.go b/staker/bold/bold_state_provider.go new file mode 100644 index 0000000000..48b7cbd91e --- /dev/null +++ b/staker/bold/bold_state_provider.go @@ -0,0 +1,542 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see +// https://github.com/offchainlabs/bold/blob/main/LICENSE +package bold + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + + protocol "github.com/offchainlabs/bold/chain-abstraction" + "github.com/offchainlabs/bold/containers/option" + l2stateprovider "github.com/offchainlabs/bold/layer2-state-provider" + "github.com/offchainlabs/bold/state-commitments/history" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/staker" + challengecache "github.com/offchainlabs/nitro/staker/challenge-cache" + "github.com/offchainlabs/nitro/validator" + "github.com/offchainlabs/nitro/validator/server_arb" +) + +var ( + _ l2stateprovider.ProofCollector = (*BOLDStateProvider)(nil) + _ l2stateprovider.L2MessageStateCollector = (*BOLDStateProvider)(nil) + _ l2stateprovider.MachineHashCollector = (*BOLDStateProvider)(nil) + _ l2stateprovider.ExecutionProvider = (*BOLDStateProvider)(nil) +) + +var executionNodeOfflineGauge = metrics.NewRegisteredGauge("arb/state_provider/execution_node_offline", nil) + +var ( + ErrChainCatchingUp = errors.New("chain catching up") +) + +type BOLDStateProvider struct { + validator *staker.BlockValidator + statelessValidator *staker.StatelessBlockValidator + historyCache challengecache.HistoryCommitmentCacher + blockChallengeLeafHeight l2stateprovider.Height + stateProviderConfig *StateProviderConfig + sync.RWMutex +} + +func NewBOLDStateProvider( + blockValidator *staker.BlockValidator, + statelessValidator *staker.StatelessBlockValidator, + blockChallengeLeafHeight l2stateprovider.Height, + stateProviderConfig *StateProviderConfig, + machineHashesCachePath string, +) (*BOLDStateProvider, error) { + historyCache, err := challengecache.New(machineHashesCachePath) + if err != nil { + return nil, err + } + sp := &BOLDStateProvider{ + validator: blockValidator, + statelessValidator: statelessValidator, + historyCache: historyCache, + blockChallengeLeafHeight: blockChallengeLeafHeight, + stateProviderConfig: stateProviderConfig, + } + return sp, nil +} + +// ExecutionStateAfterPreviousState Produces the L2 execution state for the next +// assertion. Returns the state at maxSeqInboxCount or blockChallengeLeafHeight +// after the previous state, whichever is earlier. If previousGlobalState is +// nil, defaults to returning the state at maxSeqInboxCount. +func (s *BOLDStateProvider) ExecutionStateAfterPreviousState( + ctx context.Context, + maxSeqInboxCount uint64, + previousGlobalState protocol.GoGlobalState, +) (*protocol.ExecutionState, error) { + if maxSeqInboxCount == 0 { + return nil, errors.New("max inbox count cannot be zero") + } + batchIndex := maxSeqInboxCount + maxNumberOfBlocks := uint64(s.blockChallengeLeafHeight) + messageCount, err := s.statelessValidator.InboxTracker().GetBatchMessageCount(batchIndex - 1) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, fmt.Errorf("%w: batch count %d", l2stateprovider.ErrChainCatchingUp, maxSeqInboxCount) + } + return nil, err + } + var previousMessageCount arbutil.MessageIndex + if previousGlobalState.Batch > 0 { + previousMessageCount, err = s.statelessValidator.InboxTracker().GetBatchMessageCount(previousGlobalState.Batch - 1) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, fmt.Errorf("%w: batch count %d", l2stateprovider.ErrChainCatchingUp, maxSeqInboxCount) + } + return nil, err + } + } + previousMessageCount += arbutil.MessageIndex(previousGlobalState.PosInBatch) + messageDiffBetweenBatches := messageCount - previousMessageCount + maxMessageCount := previousMessageCount + arbutil.MessageIndex(maxNumberOfBlocks) + if messageDiffBetweenBatches > maxMessageCount { + messageCount = maxMessageCount + batchIndex, _, err = s.statelessValidator.InboxTracker().FindInboxBatchContainingMessage(messageCount) + if err != nil { + return nil, err + } + } + globalState, err := s.findGlobalStateFromMessageCountAndBatch(messageCount, l2stateprovider.Batch(batchIndex)) + if err != nil { + return nil, err + } + // If the state we are requested to produce is neither validated nor past + // threshold, we return ErrChainCatchingUp as an error. + stateValidatedAndMessageCountPastThreshold, err := s.isStateValidatedAndMessageCountPastThreshold(ctx, globalState, messageCount) + if err != nil { + return nil, err + } + if !stateValidatedAndMessageCountPastThreshold { + return nil, fmt.Errorf("%w: batch count %d", l2stateprovider.ErrChainCatchingUp, maxSeqInboxCount) + } + + executionState := &protocol.ExecutionState{ + GlobalState: protocol.GoGlobalState(globalState), + MachineStatus: protocol.MachineStatusFinished, + } + toBatch := executionState.GlobalState.Batch + historyCommitStates, _, err := s.StatesInBatchRange( + ctx, + previousGlobalState, + toBatch, + l2stateprovider.Height(maxNumberOfBlocks), + ) + if err != nil { + return nil, err + } + historyCommit, err := history.NewCommitment(historyCommitStates, maxNumberOfBlocks+1) + if err != nil { + return nil, err + } + executionState.EndHistoryRoot = historyCommit.Merkle + return executionState, nil +} + +func (s *BOLDStateProvider) isStateValidatedAndMessageCountPastThreshold( + ctx context.Context, gs validator.GoGlobalState, messageCount arbutil.MessageIndex, +) (bool, error) { + if s.stateProviderConfig.CheckBatchFinality { + finalizedMessageCount, err := s.statelessValidator.InboxReader().GetFinalizedMsgCount(ctx) + if err != nil { + return false, err + } + if messageCount > finalizedMessageCount { + return false, nil + } + } + if s.validator == nil { + // If we do not have a validator, we cannot check if the state is validated. + // So we assume it is validated and return true. + return true, nil + } + lastValidatedGs, err := s.validator.ReadLastValidatedInfo() + if err != nil { + return false, err + } + if lastValidatedGs == nil { + return false, ErrChainCatchingUp + } + stateValidated := gs.Batch < lastValidatedGs.GlobalState.Batch || (gs.Batch == lastValidatedGs.GlobalState.Batch && gs.PosInBatch <= lastValidatedGs.GlobalState.PosInBatch) + return stateValidated, nil +} + +func (s *BOLDStateProvider) StatesInBatchRange( + ctx context.Context, + fromState protocol.GoGlobalState, + batchLimit uint64, + toHeight l2stateprovider.Height, +) ([]common.Hash, []validator.GoGlobalState, error) { + // Check the integrity of the arguments. + if batchLimit < fromState.Batch || (batchLimit == fromState.Batch && fromState.PosInBatch > 0) { + return nil, nil, fmt.Errorf("batch limit %v cannot be less than from batch %v", batchLimit, fromState.Batch) + } + // Compute the total desired hashes from this request. + totalDesiredHashes := uint64(toHeight + 1) + machineHashes := make([]common.Hash, 0) + states := make([]validator.GoGlobalState, 0) + + var prevBatchMsgCount arbutil.MessageIndex + var err error + if fromState.Batch > 0 { + prevBatchMsgCount, err = s.statelessValidator.InboxTracker().GetBatchMessageCount(uint64(fromState.Batch) - 1) + if err != nil { + return nil, nil, err + } + } + + batchNum := fromState.Batch + currBatchMsgCount, err := s.statelessValidator.InboxTracker().GetBatchMessageCount(batchNum) + if err != nil { + return nil, nil, err + } + posInBatch := fromState.PosInBatch + initialPos := prevBatchMsgCount + arbutil.MessageIndex(posInBatch) + if initialPos >= currBatchMsgCount { + return nil, nil, fmt.Errorf("initial position %v is past end of from batch %v message count %v", initialPos, batchNum, currBatchMsgCount) + } + for pos := initialPos; uint64(len(states)) < totalDesiredHashes; pos++ { + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } + executionResult, err := s.statelessValidator.InboxStreamer().ResultAtCount(arbutil.MessageIndex(pos)) + if err != nil { + return nil, nil, err + } + state := validator.GoGlobalState{ + BlockHash: executionResult.BlockHash, + SendRoot: executionResult.SendRoot, + Batch: batchNum, + PosInBatch: posInBatch, + } + states = append(states, state) + machineHashes = append(machineHashes, machineHash(state)) + if batchNum >= batchLimit { + break + } + // Check if the next message is in the next batch. + if uint64(pos+1) == uint64(currBatchMsgCount) { + posInBatch = 0 + batchNum++ + // Only get the next batch metadata if it'll be needed. + // Otherwise, we might try to read too many batches, and hit an error that + // the next batch isn't found. + if uint64(len(states)) < totalDesiredHashes && batchNum < batchLimit { + currBatchMsgCount, err = s.statelessValidator.InboxTracker().GetBatchMessageCount(batchNum) + if err != nil { + return nil, nil, err + } + } + } else { + posInBatch++ + } + } + return machineHashes, states, nil +} + +func machineHash(gs validator.GoGlobalState) common.Hash { + return crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()) +} + +func (s *BOLDStateProvider) findGlobalStateFromMessageCountAndBatch(count arbutil.MessageIndex, batchIndex l2stateprovider.Batch) (validator.GoGlobalState, error) { + var prevBatchMsgCount arbutil.MessageIndex + var err error + if batchIndex > 0 { + prevBatchMsgCount, err = s.statelessValidator.InboxTracker().GetBatchMessageCount(uint64(batchIndex) - 1) + if err != nil { + return validator.GoGlobalState{}, err + } + if prevBatchMsgCount > count { + return validator.GoGlobalState{}, fmt.Errorf("bad batch %v provided for message count %v as previous batch ended at message count %v", batchIndex, count, prevBatchMsgCount) + } + } + if count != prevBatchMsgCount { + batchMsgCount, err := s.statelessValidator.InboxTracker().GetBatchMessageCount(uint64(batchIndex)) + if err != nil { + return validator.GoGlobalState{}, err + } + if count > batchMsgCount { + return validator.GoGlobalState{}, fmt.Errorf("message count %v is past end of batch %v message count %v", count, batchIndex, batchMsgCount) + } + } + res, err := s.statelessValidator.InboxStreamer().ResultAtCount(count) + if err != nil { + return validator.GoGlobalState{}, fmt.Errorf("%s: could not check if we have result at count %d: %w", s.stateProviderConfig.ValidatorName, count, err) + } + return validator.GoGlobalState{ + BlockHash: res.BlockHash, + SendRoot: res.SendRoot, + Batch: uint64(batchIndex), + PosInBatch: uint64(count - prevBatchMsgCount), + }, nil +} + +// L2MessageStatesUpTo Computes a block history commitment from a start L2 +// message to an end L2 message index and up to a required batch index. The +// hashes used for this commitment are the machine hashes at each message +// number. +func (s *BOLDStateProvider) L2MessageStatesUpTo( + ctx context.Context, + fromState protocol.GoGlobalState, + batchLimit l2stateprovider.Batch, + toHeight option.Option[l2stateprovider.Height], +) ([]common.Hash, error) { + var to l2stateprovider.Height + if !toHeight.IsNone() { + to = toHeight.Unwrap() + } else { + to = s.blockChallengeLeafHeight + } + items, _, err := s.StatesInBatchRange(ctx, fromState, uint64(batchLimit), to) + if err != nil { + return nil, err + } + return items, nil +} + +// CollectMachineHashes Collects a list of machine hashes at a message number +// based on some configuration parameters. +func (s *BOLDStateProvider) CollectMachineHashes( + ctx context.Context, cfg *l2stateprovider.HashCollectorConfig, +) ([]common.Hash, error) { + s.Lock() + defer s.Unlock() + batchLimit := cfg.AssertionMetadata.BatchLimit + messageNum, err := s.messageNum(cfg.AssertionMetadata, cfg.BlockChallengeHeight) + if err != nil { + return nil, err + } + // Check if we have a virtual global state. + vs, err := s.virtualState(messageNum, batchLimit) + if err != nil { + return nil, err + } + if vs.IsSome() { + m := server_arb.NewFinishedMachine(vs.Unwrap()) + defer m.Destroy() + return []common.Hash{m.Hash()}, nil + } + stepHeights := make([]uint64, len(cfg.StepHeights)) + for i, h := range cfg.StepHeights { + stepHeights[i] = uint64(h) + } + messageResult, err := s.statelessValidator.InboxStreamer().ResultAtCount(arbutil.MessageIndex(messageNum + 1)) + if err != nil { + return nil, err + } + cacheKey := &challengecache.Key{ + RollupBlockHash: messageResult.BlockHash, + WavmModuleRoot: cfg.AssertionMetadata.WasmModuleRoot, + MessageHeight: uint64(messageNum), + StepHeights: stepHeights, + } + if s.historyCache != nil { + cachedRoots, err := s.historyCache.Get(cacheKey, cfg.NumDesiredHashes) + switch { + case err == nil: + log.Info( + "In collect machine hashes", + "cfg", fmt.Sprintf("%+v", cfg), + "firstHash", fmt.Sprintf("%#x", cachedRoots[0]), + "lastHash", fmt.Sprintf("%#x", cachedRoots[len(cachedRoots)-1]), + ) + return cachedRoots, nil + case !errors.Is(err, challengecache.ErrNotFoundInCache): + return nil, err + } + } + entry, err := s.statelessValidator.CreateReadyValidationEntry(ctx, messageNum) + if err != nil { + return nil, err + } + input, err := entry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm}) + if err != nil { + return nil, err + } + // TODO: Enable Redis streams. + wasmModRoot := cfg.AssertionMetadata.WasmModuleRoot + execRun, err := s.statelessValidator.ExecutionSpawners()[0].CreateExecutionRun(wasmModRoot, input, true).Await(ctx) + if err != nil { + return nil, err + } + defer execRun.Close() + ctxCheckAlive, cancelCheckAlive := ctxWithCheckAlive(ctx, execRun) + defer cancelCheckAlive() + stepLeaves := execRun.GetMachineHashesWithStepSize(uint64(cfg.MachineStartIndex), uint64(cfg.StepSize), cfg.NumDesiredHashes) + result, err := stepLeaves.Await(ctxCheckAlive) + if err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Finished gathering machine hashes for request %+v", cfg)) + // Do not save a history commitment of length 1 to the cache. + if len(result) > 1 && s.historyCache != nil { + if err := s.historyCache.Put(cacheKey, result); err != nil { + if !errors.Is(err, challengecache.ErrFileAlreadyExists) { + return nil, err + } + } + } + return result, nil +} + +// messageNum returns the message number at which the BoLD protocol should +// process machine hashes based on the AssociatedAssertionMetadata and +// chalHeight. +func (s *BOLDStateProvider) messageNum(md *l2stateprovider.AssociatedAssertionMetadata, chalHeight l2stateprovider.Height) (arbutil.MessageIndex, error) { + var prevBatchMsgCount arbutil.MessageIndex + bNum := md.FromState.Batch + posInBatch := md.FromState.PosInBatch + if bNum > 0 { + var err error + prevBatchMsgCount, err = s.statelessValidator.InboxTracker().GetBatchMessageCount(uint64(bNum - 1)) + if err != nil { + return 0, fmt.Errorf("could not get prevBatchMsgCount at %d: %w", bNum-1, err) + } + } + return prevBatchMsgCount + arbutil.MessageIndex(posInBatch) + arbutil.MessageIndex(chalHeight), nil +} + +// virtualState returns an optional global state. +// +// If messageNum is a virtual block or the last real block to which this +// validator's assertion committed, then this function retuns a global state +// representing that virtual block's finished machine. Otherwise, it returns +// an Option.None. +// +// This can happen in the BoLD protocol when the rival block-level challenge +// edge has committed to more blocks that this validator expected for the +// current batch. In that case, the chalHeight will be a block in the virtual +// padding of the history commitment of this validator. +// +// If there is an Option.Some() retrun value, it means that callers don't need +// to actually step through a machine to produce a series of hashes, because all +// of the hashes can just be "virtual" copies of a single machine in the +// FINISHED state's hash. +func (s *BOLDStateProvider) virtualState(msgNum arbutil.MessageIndex, limit l2stateprovider.Batch) (option.Option[validator.GoGlobalState], error) { + gs := option.None[validator.GoGlobalState]() + limitMsgCount, err := s.statelessValidator.InboxTracker().GetBatchMessageCount(uint64(limit) - 1) + if err != nil { + return gs, fmt.Errorf("could not get limitMsgCount at %d: %w", limit, err) + } + if msgNum >= limitMsgCount { + result, err := s.statelessValidator.InboxStreamer().ResultAtCount(arbutil.MessageIndex(limitMsgCount)) + if err != nil { + return gs, fmt.Errorf("could not get global state at limitMsgCount %d: %w", limitMsgCount, err) + } + gs = option.Some(validator.GoGlobalState{ + BlockHash: result.BlockHash, + SendRoot: result.SendRoot, + Batch: uint64(limit), + PosInBatch: 0, + }) + } + return gs, nil +} + +// CtxWithCheckAlive Creates a context with a check alive routine that will +// cancel the context if the check alive routine fails. +func ctxWithCheckAlive(ctxIn context.Context, execRun validator.ExecutionRun) (context.Context, context.CancelFunc) { + // Create a context that will cancel if the check alive routine fails. + // This is to ensure that we do not have the validator froze indefinitely if + // the execution run is no longer alive. + ctx, cancel := context.WithCancel(ctxIn) + go func() { + // Call cancel so that the calling function is canceled if the check alive + // routine fails/returns. + defer cancel() + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + // Create a context with a timeout, so that the check alive routine does + // not run indefinitely. + ctxCheckAliveWithTimeout, cancelCheckAliveWithTimeout := context.WithTimeout(ctx, 5*time.Second) + err := execRun.CheckAlive(ctxCheckAliveWithTimeout) + if err != nil { + executionNodeOfflineGauge.Inc(1) + cancelCheckAliveWithTimeout() + return + } + cancelCheckAliveWithTimeout() + } + } + }() + return ctx, cancel +} + +// CollectProof collects a one-step proof at a message number and OpcodeIndex. +func (s *BOLDStateProvider) CollectProof( + ctx context.Context, + assertionMetadata *l2stateprovider.AssociatedAssertionMetadata, + blockChallengeHeight l2stateprovider.Height, + machineIndex l2stateprovider.OpcodeIndex, +) ([]byte, error) { + messageNum, err := s.messageNum(assertionMetadata, blockChallengeHeight) + if err != nil { + return nil, err + } + // Check if we have a virtual global state. + vs, err := s.virtualState(messageNum, assertionMetadata.BatchLimit) + if err != nil { + return nil, err + } + if vs.IsSome() { + m := server_arb.NewFinishedMachine(vs.Unwrap()) + defer m.Destroy() + log.Info( + "Getting machine OSP from virtual state", + "fromBatch", assertionMetadata.FromState.Batch, + "fromPosInBatch", assertionMetadata.FromState.PosInBatch, + "blockChallengeHeight", blockChallengeHeight, + "messageNum", messageNum, + "machineIndex", machineIndex, + ) + return m.ProveNextStep(), nil + } + entry, err := s.statelessValidator.CreateReadyValidationEntry(ctx, messageNum) + if err != nil { + return nil, err + } + input, err := entry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm}) + if err != nil { + return nil, err + } + log.Info( + "Getting machine OSP", + "fromBatch", assertionMetadata.FromState.Batch, + "fromPosInBatch", assertionMetadata.FromState.PosInBatch, + "blockChallengeHeight", blockChallengeHeight, + "messageNum", messageNum, + "machineIndex", machineIndex, + "startState", fmt.Sprintf("%+v", input.StartState), + ) + wasmModRoot := assertionMetadata.WasmModuleRoot + execRun, err := s.statelessValidator.ExecutionSpawners()[0].CreateExecutionRun(wasmModRoot, input, true).Await(ctx) + if err != nil { + return nil, err + } + defer execRun.Close() + ctxCheckAlive, cancelCheckAlive := ctxWithCheckAlive(ctx, execRun) + defer cancelCheckAlive() + oneStepProofPromise := execRun.GetProofAt(uint64(machineIndex)) + return oneStepProofPromise.Await(ctxCheckAlive) +} diff --git a/staker/bold/data_poster_transactor.go b/staker/bold/data_poster_transactor.go new file mode 100644 index 0000000000..aa5f8d9768 --- /dev/null +++ b/staker/bold/data_poster_transactor.go @@ -0,0 +1,44 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/nitro/blob/main/LICENSE +package bold + +import ( + "context" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + + solimpl "github.com/offchainlabs/bold/chain-abstraction/sol-implementation" + "github.com/offchainlabs/nitro/arbnode/dataposter" +) + +// DataPosterTransactor is a wrapper around a DataPoster that implements the Transactor interface. +type DataPosterTransactor struct { + fifo *solimpl.FIFO + *dataposter.DataPoster +} + +func NewDataPosterTransactor(dataPoster *dataposter.DataPoster) *DataPosterTransactor { + return &DataPosterTransactor{ + fifo: solimpl.NewFIFO(1000), + DataPoster: dataPoster, + } +} + +func (d *DataPosterTransactor) SendTransaction(ctx context.Context, fn func(opts *bind.TransactOpts) (*types.Transaction, error), opts *bind.TransactOpts, gas uint64) (*types.Transaction, error) { + // Try to acquire lock and if it fails, wait for a bit and try again. + for !d.fifo.Lock() { + select { + case <-time.After(100 * time.Millisecond): + case <-ctx.Done(): + return nil, ctx.Err() + } + } + defer d.fifo.Unlock() + tx, err := fn(opts) + if err != nil { + return nil, err + } + return d.PostSimpleTransaction(ctx, *tx.To(), tx.Data(), gas, tx.Value()) +} diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index 5dca2764e8..98310c742a 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -79,29 +79,21 @@ type Cache struct { // New cache from a base directory path. func New(baseDir string) (*Cache, error) { - return &Cache{ - baseDir: baseDir, - tempWritesDir: "", - }, nil -} - -// Init a cache by verifying its base directory exists. -func (c *Cache) Init(_ context.Context) error { - if _, err := os.Stat(c.baseDir); err != nil { - if err := os.MkdirAll(c.baseDir, os.ModePerm); err != nil { - return fmt.Errorf("could not make initialize challenge cache directory %s: %w", c.baseDir, err) - } + if err := os.MkdirAll(baseDir, os.ModePerm); err != nil { + return nil, err } // We create a temp directory to write our hashes to first when putting to the cache. // Once writing succeeds, we rename in an atomic operation to the correct file name // in the cache directory hierarchy in the `Put` function. All of these temporary writes // will occur in a subdir of the base directory called temp. - tempWritesDir, err := os.MkdirTemp(c.baseDir, "temp") + tempWritesDir, err := os.MkdirTemp(baseDir, "temp") if err != nil { - return err + return nil, err } - c.tempWritesDir = tempWritesDir - return nil + return &Cache{ + baseDir: baseDir, + tempWritesDir: tempWritesDir, + }, nil } // Get a list of hashes from the cache from index 0 up to a certain index. Hashes are saved as files in the directory @@ -217,11 +209,11 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { } // Reads 32 bytes at a time from a reader up to a specified height. If none, then read all. -func readHashes(r io.Reader, numToRead uint64) ([]common.Hash, error) { +func readHashes(r io.Reader, toReadLimit uint64) ([]common.Hash, error) { br := bufio.NewReader(r) hashes := make([]common.Hash, 0) buf := make([]byte, 0, common.HashLength) - for totalRead := uint64(0); totalRead < numToRead; totalRead++ { + for totalRead := uint64(0); totalRead < toReadLimit; totalRead++ { n, err := br.Read(buf[:cap(buf)]) if err != nil { // If we try to read but reach EOF, we break out of the loop. @@ -236,13 +228,6 @@ func readHashes(r io.Reader, numToRead uint64) ([]common.Hash, error) { } hashes = append(hashes, common.BytesToHash(buf)) } - if numToRead > uint64(len(hashes)) { - return nil, fmt.Errorf( - "wanted to read %d hashes, but only read %d hashes", - numToRead, - len(hashes), - ) - } return hashes, nil } diff --git a/staker/challenge-cache/cache_test.go b/staker/challenge-cache/cache_test.go index 40be627b7a..4328ceee12 100644 --- a/staker/challenge-cache/cache_test.go +++ b/staker/challenge-cache/cache_test.go @@ -18,8 +18,6 @@ import ( var _ HistoryCommitmentCacher = (*Cache)(nil) func TestCache(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() basePath := t.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { t.Fatal(err) @@ -28,9 +26,6 @@ func TestCache(t *testing.T) { if err != nil { t.Fatal(err) } - if err = cache.Init(ctx); err != nil { - t.Fatal(err) - } key := &Key{ WavmModuleRoot: common.BytesToHash([]byte("foo")), MessageHeight: 0, @@ -81,9 +76,6 @@ func TestPrune(t *testing.T) { if err != nil { t.Fatal(err) } - if err = cache.Init(ctx); err != nil { - t.Fatal(err) - } key := &Key{ WavmModuleRoot: common.BytesToHash([]byte("foo")), MessageHeight: 20, @@ -212,16 +204,6 @@ func TestPrune(t *testing.T) { } func TestReadWriteStatehashes(t *testing.T) { - t.Run("read up to, but had empty reader", func(t *testing.T) { - b := bytes.NewBuffer([]byte{}) - _, err := readHashes(b, 100) - if err == nil { - t.Fatal("Wanted error") - } - if !strings.Contains(err.Error(), "only read 0 hashes") { - t.Fatal("Unexpected error") - } - }) t.Run("read single root", func(t *testing.T) { b := bytes.NewBuffer([]byte{}) want := common.BytesToHash([]byte("foo")) @@ -324,20 +306,20 @@ func Test_readHashes(t *testing.T) { t.Fatalf("Unexpected error: %v", err) } }) - t.Run("EOF, but did not read as much as was expected", func(t *testing.T) { + t.Run("EOF, but did not read as much as was possible", func(t *testing.T) { want := []common.Hash{ common.BytesToHash([]byte("foo")), common.BytesToHash([]byte("bar")), common.BytesToHash([]byte("baz")), } - m := &mockReader{wantErr: true, hashes: want, err: io.EOF} - _, err := readHashes(m, 100) - if err == nil { - t.Fatal(err) - } - if !strings.Contains(err.Error(), "wanted to read 100") { + m := &mockReader{wantErr: false, hashes: want, bytesRead: 32} + hashes, err := readHashes(m, 100) + if err != nil { t.Fatalf("Unexpected error: %v", err) } + if len(hashes) != len(want) { + t.Fatalf("Wrong number of hashes. Expected %d, got %d", len(want), len(hashes)) + } }) t.Run("Reads wrong number of bytes", func(t *testing.T) { want := []common.Hash{ @@ -424,8 +406,6 @@ func Test_determineFilePath(t *testing.T) { } func BenchmarkCache_Read_32Mb(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() b.StopTimer() basePath := os.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { @@ -435,9 +415,6 @@ func BenchmarkCache_Read_32Mb(b *testing.B) { if err != nil { b.Fatal(err) } - if err = cache.Init(ctx); err != nil { - b.Fatal(err) - } key := &Key{ WavmModuleRoot: common.BytesToHash([]byte("foo")), MessageHeight: 0, diff --git a/staker/block_challenge_backend.go b/staker/legacy/block_challenge_backend.go similarity index 96% rename from staker/block_challenge_backend.go rename to staker/legacy/block_challenge_backend.go index a8a6e917a2..969c482586 100644 --- a/staker/block_challenge_backend.go +++ b/staker/legacy/block_challenge_backend.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package legacystaker import ( "context" @@ -16,17 +16,18 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" + "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" ) type BlockChallengeBackend struct { - streamer TransactionStreamerInterface + streamer staker.TransactionStreamerInterface startMsgCount arbutil.MessageIndex startPosition uint64 endPosition uint64 startGs validator.GoGlobalState endGs validator.GoGlobalState - inboxTracker InboxTrackerInterface + inboxTracker staker.InboxTrackerInterface tooFarStartsAtPosition uint64 } @@ -36,8 +37,8 @@ var _ ChallengeBackend = (*BlockChallengeBackend)(nil) func NewBlockChallengeBackend( initialState *challengegen.ChallengeManagerInitiatedChallenge, maxBatchesRead uint64, - streamer TransactionStreamerInterface, - inboxTracker InboxTrackerInterface, + streamer staker.TransactionStreamerInterface, + inboxTracker staker.InboxTrackerInterface, ) (*BlockChallengeBackend, error) { startGs := validator.GoGlobalStateFromSolidity(initialState.StartState) diff --git a/staker/challenge_manager.go b/staker/legacy/challenge_manager.go similarity index 97% rename from staker/challenge_manager.go rename to staker/legacy/challenge_manager.go index 96e496acf8..1aa13a9e05 100644 --- a/staker/challenge_manager.go +++ b/staker/legacy/challenge_manager.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package legacystaker import ( "context" @@ -10,7 +10,7 @@ import ( "fmt" "math/big" - "github.com/ethereum/go-ethereum" + ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" @@ -22,6 +22,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" + "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" ) @@ -49,7 +50,7 @@ type ChallengeBackend interface { } // Assert that ExecutionChallengeBackend implements ChallengeBackend -var _ ChallengeBackend = (*ExecutionChallengeBackend)(nil) +var _ ChallengeBackend = (*staker.ExecutionChallengeBackend)(nil) type challengeCore struct { con *challengegen.ChallengeManager @@ -70,13 +71,13 @@ type ChallengeManager struct { blockChallengeBackend *BlockChallengeBackend // fields below are only used to create execution challenge from block challenge - validator *StatelessBlockValidator + validator *staker.StatelessBlockValidator maxBatchesRead uint64 wasmModuleRoot common.Hash // these fields are empty until working on execution challenge initialMachineMessageCount arbutil.MessageIndex - executionChallengeBackend *ExecutionChallengeBackend + executionChallengeBackend *staker.ExecutionChallengeBackend machineFinalStepCount uint64 } @@ -89,7 +90,7 @@ func NewChallengeManager( fromAddr common.Address, challengeManagerAddr common.Address, challengeIndex uint64, - val *StatelessBlockValidator, + val *staker.StatelessBlockValidator, startL1Block uint64, confirmationBlocks int64, ) (*ChallengeManager, error) { @@ -129,8 +130,8 @@ func NewChallengeManager( backend, err := NewBlockChallengeBackend( parsedLog, challengeInfo.MaxInboxMessages, - val.streamer, - val.inboxTracker, + val.InboxStreamer(), + val.InboxTracker(), ) if err != nil { return nil, fmt.Errorf("error creating block challenge backend for challenge %v: %w", challengeIndex, err) @@ -167,7 +168,7 @@ func NewExecutionChallengeManager( if err != nil { return nil, err } - backend, err := NewExecutionChallengeBackend(exec) + backend, err := staker.NewExecutionChallengeBackend(exec) if err != nil { return nil, err } @@ -482,9 +483,9 @@ func (m *ChallengeManager) createExecutionBackend(ctx context.Context, step uint } input.BatchInfo = prunedBatches var execRun validator.ExecutionRun - for _, spawner := range m.validator.execSpawners { + for _, spawner := range m.validator.ExecutionSpawners() { if validator.SpawnerSupportsModule(spawner, m.wasmModuleRoot) { - execRun, err = spawner.CreateExecutionRun(m.wasmModuleRoot, input).Await(ctx) + execRun, err = spawner.CreateExecutionRun(m.wasmModuleRoot, input, false).Await(ctx) if err != nil { return fmt.Errorf("error creating execution backend for msg %v: %w", initialCount, err) } @@ -494,7 +495,7 @@ func (m *ChallengeManager) createExecutionBackend(ctx context.Context, step uint if execRun == nil { return fmt.Errorf("did not find valid execution backend") } - backend, err := NewExecutionChallengeBackend(execRun) + backend, err := staker.NewExecutionChallengeBackend(execRun) if err != nil { return err } diff --git a/staker/challenge_test.go b/staker/legacy/challenge_test.go similarity index 93% rename from staker/challenge_test.go rename to staker/legacy/challenge_test.go index ede1295a13..a34e4e885d 100644 --- a/staker/challenge_test.go +++ b/staker/legacy/challenge_test.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package legacystaker import ( "context" @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" @@ -98,12 +98,12 @@ func createTransactOpts(t *testing.T) *bind.TransactOpts { return opts } -func createGenesisAlloc(accts ...*bind.TransactOpts) core.GenesisAlloc { - alloc := make(core.GenesisAlloc) +func createGenesisAlloc(accts ...*bind.TransactOpts) types.GenesisAlloc { + alloc := make(types.GenesisAlloc) amount := big.NewInt(10) amount.Exp(amount, big.NewInt(20), nil) for _, opts := range accts { - alloc[opts.From] = core.GenesisAccount{ + alloc[opts.From] = types.Account{ Balance: new(big.Int).Set(amount), } } @@ -242,7 +242,7 @@ func runChallengeTest( func createBaseMachine(t *testing.T, wasmname string, wasmModules []string) *server_arb.ArbitratorMachine { _, filename, _, _ := runtime.Caller(0) - wasmDir := path.Join(path.Dir(filename), "../arbitrator/prover/test-cases/") + wasmDir := path.Join(path.Dir(filename), "../../arbitrator/prover/test-cases/") wasmPath := path.Join(wasmDir, wasmname) @@ -259,31 +259,31 @@ func createBaseMachine(t *testing.T, wasmname string, wasmModules []string) *ser func TestChallengeToOSP(t *testing.T) { machine := createBaseMachine(t, "global-state.wasm", []string{"global-state-wrapper.wasm"}) - IncorrectMachine := server_arb.NewIncorrectMachine(machine, 200) + IncorrectMachine := NewIncorrectMachine(machine, 200) runChallengeTest(t, machine, IncorrectMachine, false, false, 0) } func TestChallengeToFailedOSP(t *testing.T) { machine := createBaseMachine(t, "global-state.wasm", []string{"global-state-wrapper.wasm"}) - IncorrectMachine := server_arb.NewIncorrectMachine(machine, 200) + IncorrectMachine := NewIncorrectMachine(machine, 200) runChallengeTest(t, machine, IncorrectMachine, true, false, 0) } func TestChallengeToErroredOSP(t *testing.T) { machine := createBaseMachine(t, "const.wasm", nil) - IncorrectMachine := server_arb.NewIncorrectMachine(machine, 10) + IncorrectMachine := NewIncorrectMachine(machine, 10) runChallengeTest(t, machine, IncorrectMachine, false, false, 0) } func TestChallengeToFailedErroredOSP(t *testing.T) { machine := createBaseMachine(t, "const.wasm", nil) - IncorrectMachine := server_arb.NewIncorrectMachine(machine, 10) + IncorrectMachine := NewIncorrectMachine(machine, 10) runChallengeTest(t, machine, IncorrectMachine, true, false, 0) } func TestChallengeToTimeout(t *testing.T) { machine := createBaseMachine(t, "global-state.wasm", []string{"global-state-wrapper.wasm"}) - IncorrectMachine := server_arb.NewIncorrectMachine(machine, 200) + IncorrectMachine := NewIncorrectMachine(machine, 200) runChallengeTest(t, machine, IncorrectMachine, false, true, 0) } diff --git a/staker/common_test.go b/staker/legacy/common_test.go similarity index 95% rename from staker/common_test.go rename to staker/legacy/common_test.go index eec6882fde..06ebeeffa3 100644 --- a/staker/common_test.go +++ b/staker/legacy/common_test.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package legacystaker import ( "testing" diff --git a/staker/fast_confirm.go b/staker/legacy/fast_confirm.go similarity index 94% rename from staker/fast_confirm.go rename to staker/legacy/fast_confirm.go index 5dc7f01205..13ce32b849 100644 --- a/staker/fast_confirm.go +++ b/staker/legacy/fast_confirm.go @@ -1,7 +1,7 @@ // Copyright 2023-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE -package staker +package legacystaker import ( "context" @@ -46,7 +46,7 @@ func NewFastConfirmSafe( gasRefunder: gasRefunder, l1Reader: l1Reader, } - safe, err := contractsgen.NewSafe(fastConfirmSafeAddress, builder) + safe, err := contractsgen.NewSafe(fastConfirmSafeAddress, wallet.L1Client()) if err != nil { return nil, err } @@ -127,11 +127,7 @@ func (f *FastConfirmSafe) tryFastConfirmation(ctx context.Context, blockHash com } log.Info("Approving Safe tx hash to fast confirm", "safeHash", safeTxHash, "nodeHash", nodeHash) - auth, err := f.builder.Auth(ctx) - if err != nil { - return err - } - _, err = f.safe.ApproveHash(auth, safeTxHash) + _, err = f.safe.ApproveHash(f.builder.Auth(ctx), safeTxHash) if err != nil { return err } @@ -160,7 +156,7 @@ func (f *FastConfirmSafe) tryFastConfirmation(ctx context.Context, blockHash com } func (f *FastConfirmSafe) flushTransactions(ctx context.Context) error { - arbTx, err := f.wallet.ExecuteTransactions(ctx, f.builder, f.gasRefunder) + arbTx, err := f.builder.ExecuteTransactions(ctx) if err != nil { return err } @@ -172,7 +168,6 @@ func (f *FastConfirmSafe) flushTransactions(ctx context.Context) error { return fmt.Errorf("error waiting for tx receipt: %w", err) } } - f.builder.ClearTransactions() return nil } @@ -229,13 +224,9 @@ func (f *FastConfirmSafe) checkApprovedHashAndExecTransaction(ctx context.Contex } } if approvedHashCount >= f.threshold { - auth, err := f.builder.Auth(ctx) - if err != nil { - return false, err - } log.Info("Executing Safe tx to fast confirm", "safeHash", safeTxHash) - _, err = f.safe.ExecTransaction( - auth, + _, err := f.safe.ExecTransaction( + f.builder.Auth(ctx), f.wallet.RollupAddress(), big.NewInt(0), fastConfirmCallData, diff --git a/staker/l1_validator.go b/staker/legacy/l1_validator.go similarity index 90% rename from staker/l1_validator.go rename to staker/legacy/l1_validator.go index 8ee05dda22..f88ab93d0e 100644 --- a/staker/l1_validator.go +++ b/staker/legacy/l1_validator.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package legacystaker import ( "context" @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" @@ -43,7 +44,7 @@ const ( ) type L1Validator struct { - rollup *RollupWatcher + rollup *staker.RollupWatcher rollupAddress common.Address validatorUtils *rollupgen.ValidatorUtils client *ethclient.Client @@ -51,9 +52,9 @@ type L1Validator struct { wallet ValidatorWalletInterface callOpts bind.CallOpts - inboxTracker InboxTrackerInterface - txStreamer TransactionStreamerInterface - blockValidator *BlockValidator + inboxTracker staker.InboxTrackerInterface + txStreamer staker.TransactionStreamerInterface + blockValidator *staker.BlockValidator lastWasmModuleRoot common.Hash } @@ -61,16 +62,17 @@ func NewL1Validator( client *ethclient.Client, wallet ValidatorWalletInterface, validatorUtilsAddress common.Address, + gasRefunder common.Address, callOpts bind.CallOpts, - inboxTracker InboxTrackerInterface, - txStreamer TransactionStreamerInterface, - blockValidator *BlockValidator, + inboxTracker staker.InboxTrackerInterface, + txStreamer staker.TransactionStreamerInterface, + blockValidator *staker.BlockValidator, ) (*L1Validator, error) { - builder, err := txbuilder.NewBuilder(wallet) + builder, err := txbuilder.NewBuilder(wallet, gasRefunder) if err != nil { return nil, err } - rollup, err := NewRollupWatcher(wallet.RollupAddress(), builder, callOpts) + rollup, err := staker.NewRollupWatcher(wallet.RollupAddress(), wallet.L1Client(), callOpts) if err != nil { return nil, err } @@ -102,8 +104,7 @@ func (v *L1Validator) getCallOpts(ctx context.Context) *bind.CallOpts { } func (v *L1Validator) Initialize(ctx context.Context) error { - err := v.rollup.Initialize(ctx) - if err != nil { + if err := v.rollup.Initialize(ctx); err != nil { return err } return v.updateBlockValidatorModuleRoot(ctx) @@ -141,7 +142,7 @@ func (v *L1Validator) resolveTimedOutChallenges(ctx context.Context) (*types.Tra return v.wallet.TimeoutChallenges(ctx, challengesToEliminate) } -func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, latestConfirmedNode *uint64) (bool, error) { +func (v *L1Validator) resolveNextNode(ctx context.Context, info *staker.StakerInfo, latestConfirmedNode *uint64) (bool, error) { callOpts := v.getCallOpts(ctx) confirmType, err := v.validatorUtils.CheckDecidableNextNode(callOpts, v.rollupAddress) if err != nil { @@ -159,11 +160,7 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat return false, nil } log.Warn("rejecting node", "node", unresolvedNodeIndex) - auth, err := v.builder.Auth(ctx) - if err != nil { - return false, err - } - _, err = v.rollup.RejectNextNode(auth, *addr) + _, err = v.rollup.RejectNextNode(v.builder.Auth(ctx), *addr) return true, err case CONFIRM_TYPE_VALID: nodeInfo, err := v.rollup.LookupNode(ctx, unresolvedNodeIndex) @@ -172,11 +169,7 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat } afterGs := nodeInfo.AfterState().GlobalState log.Info("confirming node", "node", unresolvedNodeIndex) - auth, err := v.builder.Auth(ctx) - if err != nil { - return false, err - } - _, err = v.rollup.ConfirmNextNode(auth, afterGs.BlockHash, afterGs.SendRoot) + _, err = v.rollup.ConfirmNextNode(v.builder.Auth(ctx), afterGs.BlockHash, afterGs.SendRoot) if err != nil { return false, err } @@ -205,7 +198,7 @@ func (v *L1Validator) isRequiredStakeElevated(ctx context.Context) (bool, error) } type createNodeAction struct { - assertion *Assertion + assertion *staker.Assertion prevInboxMaxCount *big.Int hash common.Hash } @@ -222,7 +215,7 @@ type OurStakerInfo struct { LatestStakedNodeHash common.Hash CanProgress bool StakeExists bool - *StakerInfo + *staker.StakerInfo } func (v *L1Validator) generateNodeAction( @@ -266,16 +259,16 @@ func (v *L1Validator) generateNodeAction( return nil, false, nil } - caughtUp, startCount, err := GlobalStateToMsgCount(v.inboxTracker, v.txStreamer, startState.GlobalState) + caughtUp, startCount, err := staker.GlobalStateToMsgCount(v.inboxTracker, v.txStreamer, startState.GlobalState) if err != nil { return nil, false, fmt.Errorf("start state not in chain: %w", err) } if !caughtUp { - target := GlobalStatePosition{ + target := staker.GlobalStatePosition{ BatchNumber: startState.GlobalState.Batch, PosInBatch: startState.GlobalState.PosInBatch, } - var current GlobalStatePosition + var current staker.GlobalStatePosition head, err := v.txStreamer.GetProcessedMessageCount() if err != nil { _, current, err = v.blockValidator.GlobalStatePositionsAtCount(head) @@ -296,7 +289,7 @@ func (v *L1Validator) generateNodeAction( return nil, false, err } validatedGlobalState = valInfo.GlobalState - caughtUp, validatedCount, err = GlobalStateToMsgCount( + caughtUp, validatedCount, err = staker.GlobalStateToMsgCount( v.inboxTracker, v.txStreamer, valInfo.GlobalState, ) if err != nil { @@ -355,11 +348,11 @@ func (v *L1Validator) generateNodeAction( if err != nil { return nil, false, err } - _, gsPos, err := GlobalStatePositionsAtCount(v.inboxTracker, validatedCount, batchNum) + _, gsPos, err := staker.GlobalStatePositionsAtCount(v.inboxTracker, validatedCount, batchNum) if err != nil { return nil, false, fmt.Errorf("%w: failed calculating GSposition for count %d", err, validatedCount) } - validatedGlobalState = buildGlobalState(*execResult, gsPos) + validatedGlobalState = staker.BuildGlobalState(*execResult, gsPos) } currentL1BlockNum, err := v.client.BlockNumber(ctx) @@ -426,8 +419,8 @@ func (v *L1Validator) generateNodeAction( log.Error("Found incorrect assertion: Machine status not finished", "node", nd.NodeNum, "machineStatus", nd.Assertion.AfterState.MachineStatus) continue } - caughtUp, nodeMsgCount, err := GlobalStateToMsgCount(v.inboxTracker, v.txStreamer, afterGS) - if errors.Is(err, ErrGlobalStateNotInChain) { + caughtUp, nodeMsgCount, err := staker.GlobalStateToMsgCount(v.inboxTracker, v.txStreamer, afterGS) + if errors.Is(err, staker.ErrGlobalStateNotInChain) { wrongNodesExist = true log.Error("Found incorrect assertion", "node", nd.NodeNum, "afterGS", afterGS, "err", err) continue @@ -510,7 +503,7 @@ func (v *L1Validator) createNewNodeAction( hasSiblingByte[0] = 1 } assertionNumBlocks := uint64(validatedCount - startCount) - assertion := &Assertion{ + assertion := &staker.Assertion{ BeforeState: startState, AfterState: &validator.ExecutionState{ GlobalState: validatedGS, @@ -540,13 +533,13 @@ func (v *L1Validator) createNewNodeAction( } // Returns (execution state, inbox max count, L1 block proposed, parent chain block proposed, error) -func lookupNodeStartState(ctx context.Context, rollup *RollupWatcher, nodeNum uint64, nodeHash common.Hash) (*validator.ExecutionState, *big.Int, uint64, uint64, error) { +func lookupNodeStartState(ctx context.Context, rollup *staker.RollupWatcher, nodeNum uint64, nodeHash common.Hash) (*validator.ExecutionState, *big.Int, uint64, uint64, error) { if nodeNum == 0 { creationEvent, err := rollup.LookupCreation(ctx) if err != nil { return nil, nil, 0, 0, fmt.Errorf("error looking up rollup creation event: %w", err) } - l1BlockNumber, err := arbutil.CorrespondingL1BlockNumber(ctx, rollup.client, creationEvent.Raw.BlockNumber) + l1BlockNumber, err := arbutil.CorrespondingL1BlockNumber(ctx, rollup.Client(), creationEvent.Raw.BlockNumber) if err != nil { return nil, nil, 0, 0, err } diff --git a/validator/server_arb/mock_machine.go b/staker/legacy/mock_machine_test.go similarity index 78% rename from validator/server_arb/mock_machine.go rename to staker/legacy/mock_machine_test.go index 00512d1d77..a5dc5400a7 100644 --- a/validator/server_arb/mock_machine.go +++ b/staker/legacy/mock_machine_test.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package server_arb +package legacystaker import ( "context" @@ -9,26 +9,29 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/validator" + "github.com/offchainlabs/nitro/validator/server_arb" ) +// IncorrectMachine will report a bad global state after the incorrectStep onwards. +// It'll also extend the step count to incorrectStep if necessary. type IncorrectMachine struct { - inner *ArbitratorMachine + inner *server_arb.ArbitratorMachine incorrectStep uint64 stepCount uint64 } var badGlobalState = validator.GoGlobalState{Batch: 0xbadbadbadbad, PosInBatch: 0xbadbadbadbad} -var _ MachineInterface = (*IncorrectMachine)(nil) +var _ server_arb.MachineInterface = (*IncorrectMachine)(nil) -func NewIncorrectMachine(inner *ArbitratorMachine, incorrectStep uint64) *IncorrectMachine { +func NewIncorrectMachine(inner *server_arb.ArbitratorMachine, incorrectStep uint64) *IncorrectMachine { return &IncorrectMachine{ inner: inner.Clone(), incorrectStep: incorrectStep, } } -func (m *IncorrectMachine) CloneMachineInterface() MachineInterface { +func (m *IncorrectMachine) CloneMachineInterface() server_arb.MachineInterface { return &IncorrectMachine{ inner: m.inner.Clone(), incorrectStep: m.incorrectStep, @@ -58,6 +61,10 @@ func (m *IncorrectMachine) IsRunning() bool { return m.inner.IsRunning() || m.stepCount < m.incorrectStep } +func (m *IncorrectMachine) IsErrored() bool { + return !m.IsRunning() && m.inner.IsErrored() +} + func (m *IncorrectMachine) ValidForStep(step uint64) bool { return m.inner.ValidForStep(step) } diff --git a/staker/staker.go b/staker/legacy/staker.go similarity index 92% rename from staker/staker.go rename to staker/legacy/staker.go index c5f9c1cd65..504e8c8421 100644 --- a/staker/staker.go +++ b/staker/legacy/staker.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package legacystaker import ( "context" @@ -27,7 +27,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/staker/txbuilder" + "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" @@ -101,8 +101,8 @@ type L1ValidatorConfig struct { gasRefunder common.Address } -func (c *L1ValidatorConfig) ParseStrategy() (StakerStrategy, error) { - switch strings.ToLower(c.Strategy) { +func ParseStrategy(strategy string) (StakerStrategy, error) { + switch strings.ToLower(strategy) { case "watchtower": return WatchtowerStrategy, nil case "defensive": @@ -114,7 +114,7 @@ func (c *L1ValidatorConfig) ParseStrategy() (StakerStrategy, error) { case "makenodes": return MakeNodesStrategy, nil default: - return WatchtowerStrategy, fmt.Errorf("unknown staker strategy \"%v\"", c.Strategy) + return WatchtowerStrategy, fmt.Errorf("unknown staker strategy \"%v\"", strategy) } } @@ -132,7 +132,7 @@ func (c *L1ValidatorConfig) ValidatorRequired() bool { } func (c *L1ValidatorConfig) Validate() error { - strategy, err := c.ParseStrategy() + strategy, err := ParseStrategy(c.Strategy) if err != nil { return err } @@ -144,7 +144,12 @@ func (c *L1ValidatorConfig) Validate() error { return nil } -type L1ValidatorConfigFetcher func() *L1ValidatorConfig +func (c *L1ValidatorConfig) GasRefunder() common.Address { + return c.gasRefunder +} +func (c *L1ValidatorConfig) StrategyType() StakerStrategy { + return c.strategy +} var DefaultL1ValidatorConfig = L1ValidatorConfig{ Enable: true, @@ -253,6 +258,8 @@ type validatedNode struct { hash common.Hash } +type L1ValidatorConfigFetcher func() *L1ValidatorConfig + type Staker struct { *L1Validator stopwaiter.StopWaiter @@ -267,8 +274,8 @@ type Staker struct { inactiveLastCheckedNode *nodeAndHash inactiveValidatedNodes *btree.BTreeG[validatedNode] bringActiveUntilNode uint64 - inboxReader InboxReaderInterface - statelessBlockValidator *StatelessBlockValidator + inboxReader staker.InboxReaderInterface + statelessBlockValidator *staker.StatelessBlockValidator fatalErr chan<- error fastConfirmSafe *FastConfirmSafe } @@ -284,7 +291,7 @@ type ValidatorWalletInterface interface { ChallengeManagerAddress() common.Address L1Client() *ethclient.Client TestTransactions(context.Context, []*types.Transaction) error - ExecuteTransactions(context.Context, *txbuilder.Builder, common.Address) (*types.Transaction, error) + ExecuteTransactions(context.Context, []*types.Transaction, common.Address) (*types.Transaction, error) TimeoutChallenges(context.Context, []uint64) (*types.Transaction, error) CanBatchTxs() bool AuthIfEoa() *bind.TransactOpts @@ -299,8 +306,8 @@ func NewStaker( wallet ValidatorWalletInterface, callOpts bind.CallOpts, config L1ValidatorConfigFetcher, - blockValidator *BlockValidator, - statelessBlockValidator *StatelessBlockValidator, + blockValidator *staker.BlockValidator, + statelessBlockValidator *staker.StatelessBlockValidator, stakedNotifiers []LatestStakedNotifier, confirmedNotifiers []LatestConfirmedNotifier, validatorUtilsAddress common.Address, @@ -310,15 +317,12 @@ func NewStaker( return nil, err } client := l1Reader.Client() - val, err := NewL1Validator(client, wallet, validatorUtilsAddress, callOpts, - statelessBlockValidator.inboxTracker, statelessBlockValidator.streamer, blockValidator) + val, err := NewL1Validator(client, wallet, validatorUtilsAddress, config().GasRefunder(), callOpts, + statelessBlockValidator.InboxTracker(), statelessBlockValidator.InboxStreamer(), blockValidator) if err != nil { return nil, err } stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) - if config().StartValidationFromStaked && blockValidator != nil { - stakedNotifiers = append(stakedNotifiers, blockValidator) - } inactiveValidatedNodes := btree.NewG(2, func(a, b validatedNode) bool { return a.number < b.number || (a.number == b.number && a.hash.Cmp(b.hash) < 0) }) @@ -331,7 +335,7 @@ func NewStaker( config: config, highGasBlocksBuffer: big.NewInt(config().PostingStrategy.HighGasDelayBlocks), lastActCalledBlock: nil, - inboxReader: statelessBlockValidator.inboxReader, + inboxReader: statelessBlockValidator.InboxReader(), statelessBlockValidator: statelessBlockValidator, fatalErr: fatalErr, inactiveValidatedNodes: inactiveValidatedNodes, @@ -347,6 +351,21 @@ func (s *Staker) Initialize(ctx context.Context) error { if walletAddressOrZero != (common.Address{}) { s.updateStakerBalanceMetric(ctx) } + var stakerAddr common.Address + if s.L1Validator.wallet.DataPoster() != nil { + stakerAddr = s.L1Validator.wallet.DataPoster().Sender() + } + whiteListed, err := s.isWhitelisted(ctx) + if err != nil { + return fmt.Errorf("error checking if whitelisted: %w", err) + } + log.Info( + "running as validator", + "txSender", stakerAddr, + "actingAsWallet", walletAddressOrZero, + "whitelisted", whiteListed, + "strategy", s.Strategy(), + ) if s.blockValidator != nil && s.config().StartValidationFromStaked { latestStaked, _, err := s.validatorUtils.LatestStaked(&s.baseCallOpts, s.rollupAddress, walletAddressOrZero) if err != nil { @@ -407,7 +426,7 @@ func (s *Staker) setupFastConfirmation(ctx context.Context) error { fastConfirmer, s.builder, s.wallet, - cfg.gasRefunder, + cfg.GasRefunder(), s.l1Reader, ) if err != nil { @@ -444,23 +463,20 @@ func (s *Staker) tryFastConfirmation(ctx context.Context, blockHash common.Hash, if s.fastConfirmSafe != nil { return s.fastConfirmSafe.tryFastConfirmation(ctx, blockHash, sendRoot, nodeHash) } - auth, err := s.builder.Auth(ctx) - if err != nil { - return err - } + auth := s.builder.Auth(ctx) log.Info("Fast confirming node with wallet", "wallet", auth.From, "nodeHash", nodeHash) - _, err = s.rollup.FastConfirmNextNode(auth, blockHash, sendRoot, nodeHash) + _, err := s.rollup.FastConfirmNextNode(auth, blockHash, sendRoot, nodeHash) return err } -func (s *Staker) getLatestStakedState(ctx context.Context, staker common.Address) (uint64, arbutil.MessageIndex, *validator.GoGlobalState, error) { +func (s *Staker) getLatestStakedState(ctx context.Context, stakerAddress common.Address) (uint64, arbutil.MessageIndex, *validator.GoGlobalState, error) { callOpts := s.getCallOpts(ctx) if s.l1Reader.UseFinalityData() { callOpts.BlockNumber = big.NewInt(int64(rpc.FinalizedBlockNumber)) } - latestStaked, _, err := s.validatorUtils.LatestStaked(s.getCallOpts(ctx), s.rollupAddress, staker) + latestStaked, _, err := s.validatorUtils.LatestStaked(s.getCallOpts(ctx), s.rollupAddress, stakerAddress) if err != nil { - return 0, 0, nil, fmt.Errorf("couldn't get LatestStaked(%v): %w", staker, err) + return 0, 0, nil, fmt.Errorf("couldn't get LatestStaked(%v): %w", stakerAddress, err) } if latestStaked == 0 { return latestStaked, 0, nil, nil @@ -468,21 +484,21 @@ func (s *Staker) getLatestStakedState(ctx context.Context, staker common.Address stakedInfo, err := s.rollup.LookupNode(ctx, latestStaked) if err != nil { - return 0, 0, nil, fmt.Errorf("couldn't look up latest assertion of %v (%v): %w", staker, latestStaked, err) + return 0, 0, nil, fmt.Errorf("couldn't look up latest assertion of %v (%v): %w", stakerAddress, latestStaked, err) } globalState := stakedInfo.AfterState().GlobalState - caughtUp, count, err := GlobalStateToMsgCount(s.inboxTracker, s.txStreamer, globalState) + caughtUp, count, err := staker.GlobalStateToMsgCount(s.inboxTracker, s.txStreamer, globalState) if err != nil { - if errors.Is(err, ErrGlobalStateNotInChain) && s.fatalErr != nil { - fatal := fmt.Errorf("latest assertion of %v (%v) not in chain: %w", staker, latestStaked, err) + if errors.Is(err, staker.ErrGlobalStateNotInChain) && s.fatalErr != nil { + fatal := fmt.Errorf("latest assertion of %v (%v) not in chain: %w", stakerAddress, latestStaked, err) s.fatalErr <- fatal } - return 0, 0, nil, fmt.Errorf("latest assertion of %v (%v): %w", staker, latestStaked, err) + return 0, 0, nil, fmt.Errorf("latest assertion of %v (%v): %w", stakerAddress, latestStaked, err) } if !caughtUp { - log.Info("latest assertion not yet in our node", "staker", staker, "assertion", latestStaked, "state", globalState) + log.Info("latest assertion not yet in our node", "stakerAddress", stakerAddress, "assertion", latestStaked, "state", globalState) return latestStaked, 0, nil, nil } @@ -492,7 +508,7 @@ func (s *Staker) getLatestStakedState(ctx context.Context, staker common.Address } if processedCount < count { - log.Info("execution catching up to rollup", "staker", staker, "rollupCount", count, "processedCount", processedCount) + log.Info("execution catching up to rollup", "stakerAddress", stakerAddress, "rollupCount", count, "processedCount", processedCount) return latestStaked, 0, nil, nil } @@ -507,9 +523,6 @@ func (s *Staker) StopAndWait() { } func (s *Staker) Start(ctxIn context.Context) { - if s.Strategy() != WatchtowerStrategy { - s.wallet.Start(ctxIn) - } s.StopWaiter.Start(ctxIn, s) backoff := time.Second isAheadOfOnChainNonceEphemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "is ahead of on-chain nonce", 0) @@ -607,7 +620,7 @@ func (s *Staker) Start(ctxIn context.Context) { }) } -func (s *Staker) IsWhitelisted(ctx context.Context) (bool, error) { +func (s *Staker) isWhitelisted(ctx context.Context) (bool, error) { callOpts := s.getCallOpts(ctx) whitelistDisabled, err := s.rollup.ValidatorWhitelistDisabled(callOpts) if err != nil { @@ -696,12 +709,12 @@ func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error { func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { cfg := s.config() - if cfg.strategy != WatchtowerStrategy { + if cfg.StrategyType() != WatchtowerStrategy { err := s.confirmDataPosterIsReady(ctx) if err != nil { return nil, err } - whitelisted, err := s.IsWhitelisted(ctx) + whitelisted, err := s.isWhitelisted(ctx) if err != nil { return nil, fmt.Errorf("error checking if whitelisted: %w", err) } @@ -715,7 +728,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { } callOpts := s.getCallOpts(ctx) s.builder.ClearTransactions() - var rawInfo *StakerInfo + var rawInfo *staker.StakerInfo walletAddressOrZero := s.wallet.AddressOrZero() if walletAddressOrZero != (common.Address{}) { var err error @@ -751,7 +764,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { StakeExists: rawInfo != nil, } - effectiveStrategy := cfg.strategy + effectiveStrategy := cfg.StrategyType() nodesLinear, err := s.validatorUtils.AreUnresolvedNodesLinear(callOpts, s.rollupAddress) if err != nil { return nil, fmt.Errorf("error checking for rollup assertion fork: %w", err) @@ -818,7 +831,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { } if s.builder.BuildingTransactionCount() > 0 { // Try to fast confirm previous nodes before working on new ones - return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder) + return s.builder.ExecuteTransactions(ctx) } } } @@ -888,24 +901,17 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { stakeIsUnwanted := effectiveStrategy < StakeLatestStrategy if stakeIsTooOutdated || stakeIsUnwanted { // Note: we must have an address if rawInfo != nil - auth, err := s.builder.Auth(ctx) - if err != nil { - return nil, err - } + auth := s.builder.Auth(ctx) _, err = s.rollup.ReturnOldDeposit(auth, walletAddressOrZero) if err != nil { return nil, fmt.Errorf("error returning old deposit (from our staker %v): %w", walletAddressOrZero, err) } - auth, err = s.builder.Auth(ctx) - if err != nil { - return nil, err - } _, err = s.rollup.WithdrawStakerFunds(auth) if err != nil { return nil, fmt.Errorf("error withdrawing staker funds from our staker %v: %w", walletAddressOrZero, err) } log.Info("removing old stake and withdrawing funds") - return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder) + return s.builder.ExecuteTransactions(ctx) } } @@ -915,11 +921,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { return nil, fmt.Errorf("error checking withdrawable funds of our staker %v: %w", walletAddressOrZero, err) } if withdrawable.Sign() > 0 { - auth, err := s.builder.Auth(ctx) - if err != nil { - return nil, err - } - _, err = s.rollup.WithdrawStakerFunds(auth) + _, err = s.rollup.WithdrawStakerFunds(s.builder.Auth(ctx)) if err != nil { return nil, fmt.Errorf("error withdrawing our staker %v funds: %w", walletAddressOrZero, err) } @@ -959,10 +961,10 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { if info.StakerInfo == nil && info.StakeExists { log.Info("staking to execute transactions") } - return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder) + return s.builder.ExecuteTransactions(ctx) } -func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error { +func (s *Staker) handleConflict(ctx context.Context, info *staker.StakerInfo) error { if info.CurrentChallenge == nil { s.activeChallenge = nil return nil @@ -978,8 +980,8 @@ func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error { newChallengeManager, err := NewChallengeManager( ctx, - s.builder, - s.builder.BuilderAuth(), + s.client, + s.builder.Auth(context.TODO()), *s.builder.WalletAddress(), s.wallet.ChallengeManagerAddress(), *info.CurrentChallenge, @@ -1037,11 +1039,7 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv // We'll return early if we already have a stake if info.StakeExists { - auth, err := s.builder.Auth(ctx) - if err != nil { - return err - } - _, err = s.rollup.StakeOnNewNode(auth, action.assertion.AsSolidityStruct(), action.hash, action.prevInboxMaxCount) + _, err = s.rollup.StakeOnNewNode(s.builder.Auth(ctx), action.assertion.AsSolidityStruct(), action.hash, action.prevInboxMaxCount) if err != nil { return fmt.Errorf("error staking on new node: %w", err) } @@ -1053,12 +1051,8 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv if err != nil { return fmt.Errorf("error getting current required stake: %w", err) } - auth, err := s.builder.AuthWithAmount(ctx, stakeAmount) - if err != nil { - return err - } _, err = s.rollup.NewStakeOnNewNode( - auth, + s.builder.AuthWithAmount(ctx, stakeAmount), action.assertion.AsSolidityStruct(), action.hash, action.prevInboxMaxCount, @@ -1091,11 +1085,7 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv log.Info("staking on existing node", "node", action.number) // We'll return early if we already havea stake if info.StakeExists { - auth, err := s.builder.Auth(ctx) - if err != nil { - return err - } - _, err = s.rollup.StakeOnExistingNode(auth, action.number, action.hash) + _, err = s.rollup.StakeOnExistingNode(s.builder.Auth(ctx), action.number, action.hash) if err != nil { return fmt.Errorf("error staking on existing node: %w", err) } @@ -1107,12 +1097,8 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv if err != nil { return fmt.Errorf("error getting current required stake: %w", err) } - auth, err := s.builder.AuthWithAmount(ctx, stakeAmount) - if err != nil { - return err - } _, err = s.rollup.NewStakeOnExistingNode( - auth, + s.builder.AuthWithAmount(ctx, stakeAmount), action.number, action.hash, ) @@ -1126,7 +1112,7 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv } } -func (s *Staker) createConflict(ctx context.Context, info *StakerInfo) error { +func (s *Staker) createConflict(ctx context.Context, info *staker.StakerInfo) error { if info.CurrentChallenge != nil { return nil } @@ -1188,12 +1174,8 @@ func (s *Staker) createConflict(ctx context.Context, info *StakerInfo) error { return fmt.Errorf("error looking up node %v: %w", conflictInfo.Node2, err) } log.Warn("creating challenge", "node1", conflictInfo.Node1, "node2", conflictInfo.Node2, "otherStaker", staker) - auth, err := s.builder.Auth(ctx) - if err != nil { - return err - } _, err = s.rollup.CreateChallenge( - auth, + s.builder.Auth(ctx), [2]common.Address{staker1, staker2}, [2]uint64{conflictInfo.Node1, conflictInfo.Node2}, node1Info.MachineStatuses(), @@ -1212,10 +1194,10 @@ func (s *Staker) createConflict(ctx context.Context, info *StakerInfo) error { } func (s *Staker) Strategy() StakerStrategy { - return s.config().strategy + return s.config().StrategyType() } -func (s *Staker) Rollup() *RollupWatcher { +func (s *Staker) Rollup() *staker.RollupWatcher { return s.rollup } diff --git a/staker/multi_protocol/multi_protocol_staker.go b/staker/multi_protocol/multi_protocol_staker.go new file mode 100644 index 0000000000..0c104094ed --- /dev/null +++ b/staker/multi_protocol/multi_protocol_staker.go @@ -0,0 +1,249 @@ +package multiprotocolstaker + +import ( + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + + "github.com/offchainlabs/bold/solgen/go/bridgegen" + boldrollup "github.com/offchainlabs/bold/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/staker" + boldstaker "github.com/offchainlabs/nitro/staker/bold" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" + "github.com/offchainlabs/nitro/staker/txbuilder" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/stopwaiter" +) + +const boldArt = ` + _______ __ _______ +/ \ / | / \ +$$$$$$$ | ______ $$ | $$$$$$$ | +$$ |__$$ | / \ $$ | $$ | $$ | +$$ $$< /$$$$$$ |$$ | $$ | $$ | +$$$$$$$ |$$ | $$ |$$ | $$ | $$ | +$$ |__$$ |$$ \__$$ |$$ |_____ $$ |__$$ | +$$ $$/ $$ $$/ $$ |$$ $$/ +$$$$$$$/ $$$$$$/ $$$$$$$$/ $$$$$$$/ +` + +type MultiProtocolStaker struct { + stopwaiter.StopWaiter + bridge *bridgegen.IBridge + oldStaker *legacystaker.Staker + boldStaker *boldstaker.BOLDStaker + legacyConfig legacystaker.L1ValidatorConfigFetcher + stakedNotifiers []legacystaker.LatestStakedNotifier + confirmedNotifiers []legacystaker.LatestConfirmedNotifier + statelessBlockValidator *staker.StatelessBlockValidator + wallet legacystaker.ValidatorWalletInterface + l1Reader *headerreader.HeaderReader + blockValidator *staker.BlockValidator + callOpts bind.CallOpts + boldConfig *boldstaker.BoldConfig + stakeTokenAddress common.Address + stack *node.Node +} + +func NewMultiProtocolStaker( + stack *node.Node, + l1Reader *headerreader.HeaderReader, + wallet legacystaker.ValidatorWalletInterface, + callOpts bind.CallOpts, + legacyConfig legacystaker.L1ValidatorConfigFetcher, + boldConfig *boldstaker.BoldConfig, + blockValidator *staker.BlockValidator, + statelessBlockValidator *staker.StatelessBlockValidator, + stakedNotifiers []legacystaker.LatestStakedNotifier, + stakeTokenAddress common.Address, + confirmedNotifiers []legacystaker.LatestConfirmedNotifier, + validatorUtilsAddress common.Address, + bridgeAddress common.Address, + fatalErr chan<- error, +) (*MultiProtocolStaker, error) { + if err := legacyConfig().Validate(); err != nil { + return nil, err + } + if legacyConfig().StartValidationFromStaked && blockValidator != nil { + stakedNotifiers = append(stakedNotifiers, blockValidator) + } + oldStaker, err := legacystaker.NewStaker( + l1Reader, + wallet, + callOpts, + legacyConfig, + blockValidator, + statelessBlockValidator, + stakedNotifiers, + confirmedNotifiers, + validatorUtilsAddress, + fatalErr, + ) + if err != nil { + return nil, err + } + bridge, err := bridgegen.NewIBridge(bridgeAddress, l1Reader.Client()) + if err != nil { + return nil, err + } + return &MultiProtocolStaker{ + oldStaker: oldStaker, + boldStaker: nil, + bridge: bridge, + legacyConfig: legacyConfig, + stakedNotifiers: stakedNotifiers, + confirmedNotifiers: confirmedNotifiers, + statelessBlockValidator: statelessBlockValidator, + wallet: wallet, + l1Reader: l1Reader, + blockValidator: blockValidator, + callOpts: callOpts, + boldConfig: boldConfig, + stakeTokenAddress: stakeTokenAddress, + stack: stack, + }, nil +} + +func (m *MultiProtocolStaker) Initialize(ctx context.Context) error { + boldActive, rollupAddress, err := m.isBoldActive(ctx) + if err != nil { + return err + } + if boldActive { + log.Info("BoLD protocol is active, initializing BoLD staker") + log.Info(boldArt) + if err := m.setupBoldStaker(ctx, rollupAddress); err != nil { + return err + } + m.oldStaker = nil + return m.boldStaker.Initialize(ctx) + } + log.Info("BoLD protocol not detected on startup, using old staker until upgrade") + return m.oldStaker.Initialize(ctx) +} + +func (m *MultiProtocolStaker) Start(ctxIn context.Context) { + m.StopWaiter.Start(ctxIn, m) + m.wallet.Start(ctxIn) + if m.boldStaker != nil { + log.Info("Starting BOLD staker") + m.boldStaker.Start(ctxIn) + } else { + log.Info("Starting pre-BOLD staker") + m.oldStaker.Start(ctxIn) + stakerSwitchInterval := m.boldConfig.CheckStakerSwitchInterval + m.CallIteratively(func(ctx context.Context) time.Duration { + switchedToBoldProtocol, err := m.checkAndSwitchToBoldStaker(ctxIn) + if err != nil { + log.Warn("staker: error in checking switch to bold staker", "err", err) + return stakerSwitchInterval + } + if switchedToBoldProtocol { + log.Info("Detected BOLD protocol upgrade, stopping old staker and starting BOLD staker") + // Ready to stop the old staker. + m.oldStaker.StopOnly() + m.StopOnly() + } + return stakerSwitchInterval + }) + } +} + +func (m *MultiProtocolStaker) StopAndWait() { + if m.boldStaker != nil { + m.boldStaker.StopAndWait() + } + if m.oldStaker != nil { + m.oldStaker.StopAndWait() + } + m.StopWaiter.StopAndWait() +} + +func (m *MultiProtocolStaker) isBoldActive(ctx context.Context) (bool, common.Address, error) { + var addr common.Address + if !m.boldConfig.Enable { + return false, addr, nil + } + callOpts := m.getCallOpts(ctx) + rollupAddress, err := m.bridge.Rollup(callOpts) + if err != nil { + return false, addr, err + } + userLogic, err := boldrollup.NewRollupUserLogic(rollupAddress, m.l1Reader.Client()) + if err != nil { + return false, addr, err + } + _, err = userLogic.ChallengeGracePeriodBlocks(callOpts) + if err != nil && !headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + // Unexpected error, perhaps an L1 issue? + return false, addr, err + } + // ChallengeGracePeriodBlocks only exists in the BOLD rollup contracts. + return err == nil, rollupAddress, nil +} + +func (m *MultiProtocolStaker) checkAndSwitchToBoldStaker(ctx context.Context) (bool, error) { + shouldSwitch, rollupAddress, err := m.isBoldActive(ctx) + if err != nil { + return false, err + } + if !shouldSwitch { + return false, nil + } + if err := m.setupBoldStaker(ctx, rollupAddress); err != nil { + return false, err + } + if err = m.boldStaker.Initialize(ctx); err != nil { + return false, err + } + m.boldStaker.Start(ctx) + return true, nil +} + +func (m *MultiProtocolStaker) getCallOpts(ctx context.Context) *bind.CallOpts { + opts := m.callOpts + opts.Context = ctx + return &opts +} + +func (m *MultiProtocolStaker) setupBoldStaker( + ctx context.Context, + rollupAddress common.Address, +) error { + stakeTokenContract, err := m.l1Reader.Client().CodeAt(ctx, m.stakeTokenAddress, nil) + if err != nil { + return err + } + if len(stakeTokenContract) == 0 { + return fmt.Errorf("stake token address for BoLD %v does not point to a contract", m.stakeTokenAddress) + } + txBuilder, err := txbuilder.NewBuilder(m.wallet, m.legacyConfig().GasRefunder()) + if err != nil { + return err + } + boldStaker, err := boldstaker.NewBOLDStaker( + ctx, + m.stack, + rollupAddress, + m.callOpts, + txBuilder.SingleTxAuth(), + m.l1Reader, + m.blockValidator, + m.statelessBlockValidator, + m.boldConfig, + m.wallet.DataPoster(), + m.wallet, + m.stakedNotifiers, + m.confirmedNotifiers, + ) + if err != nil { + return err + } + m.boldStaker = boldStaker + return nil +} diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 8b27e544b1..b117b30c2b 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -137,6 +137,10 @@ func (r *RollupWatcher) Initialize(ctx context.Context) error { return err } +func (r *RollupWatcher) Client() RollupWatcherL1Interface { + return r.client +} + func (r *RollupWatcher) LookupCreation(ctx context.Context) (*rollupgen.RollupUserLogicRollupInitialized, error) { var query = ethereum.FilterQuery{ FromBlock: r.fromBlock, diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index bb25a38f5d..62e772d5f8 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -68,6 +68,7 @@ type TransactionStreamerInterface interface { type InboxReaderInterface interface { GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) + GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) } type GlobalStatePosition struct { @@ -281,6 +282,22 @@ func (v *StatelessBlockValidator) readPostedBatch(ctx context.Context, batchNum return postedData, err } +func (v *StatelessBlockValidator) InboxTracker() InboxTrackerInterface { + return v.inboxTracker +} + +func (v *StatelessBlockValidator) InboxReader() InboxReaderInterface { + return v.inboxReader +} + +func (v *StatelessBlockValidator) InboxStreamer() TransactionStreamerInterface { + return v.streamer +} + +func (v *StatelessBlockValidator) ExecutionSpawners() []validator.ExecutionSpawner { + return v.execSpawners +} + func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum uint64) (bool, *FullBatchInfo, error) { batchCount, err := v.inboxTracker.GetBatchCount() if err != nil { @@ -379,7 +396,7 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * return nil } -func buildGlobalState(res execution.MessageResult, pos GlobalStatePosition) validator.GoGlobalState { +func BuildGlobalState(res execution.MessageResult, pos GlobalStatePosition) validator.GoGlobalState { return validator.GoGlobalState{ BlockHash: res.BlockHash, SendRoot: res.SendRoot, @@ -431,8 +448,8 @@ func (v *StatelessBlockValidator) CreateReadyValidationEntry(ctx context.Context if err != nil { return nil, fmt.Errorf("failed calculating position for validation: %w", err) } - start := buildGlobalState(*prevResult, startPos) - end := buildGlobalState(*result, endPos) + start := BuildGlobalState(*prevResult, startPos) + end := BuildGlobalState(*result, endPos) found, fullBatchInfo, err := v.readFullBatch(ctx, start.Batch) if err != nil { return nil, err diff --git a/staker/txbuilder/builder.go b/staker/txbuilder/builder.go index f52b03a781..b352036c7a 100644 --- a/staker/txbuilder/builder.go +++ b/staker/txbuilder/builder.go @@ -5,59 +5,97 @@ package txbuilder import ( "context" + "fmt" "math/big" + "sync" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" ) type ValidatorWalletInterface interface { // Address must be able to be called concurrently with other functions Address() *common.Address - L1Client() *ethclient.Client - TestTransactions(context.Context, []*types.Transaction) error - ExecuteTransactions(context.Context, *Builder, common.Address) (*types.Transaction, error) + TestTransactions(ctx context.Context, txs []*types.Transaction) error + ExecuteTransactions(ctx context.Context, txs []*types.Transaction, gasRefunder common.Address) (*types.Transaction, error) AuthIfEoa() *bind.TransactOpts } -// Builder combines any transactions sent to it via SendTransaction into one batch, +// Builder combines any transactions signed via it into one batch, // which is then sent to the validator wallet. // This lets the validator make multiple atomic transactions. -// This inherits from an ethclient.Client so it can be used to transparently -// intercept calls to SendTransaction and queue them for the next batch. type Builder struct { - *ethclient.Client transactions []*types.Transaction - builderAuth *bind.TransactOpts + singleTxAuth bind.TransactOpts + multiTxAuth bind.TransactOpts isAuthFake bool + authMutex sync.Mutex wallet ValidatorWalletInterface + gasRefunder common.Address } -func NewBuilder(wallet ValidatorWalletInterface) (*Builder, error) { - randKey, err := crypto.GenerateKey() - if err != nil { - return nil, err - } - builderAuth := wallet.AuthIfEoa() +func NewBuilder(wallet ValidatorWalletInterface, gasRefunder common.Address) (*Builder, error) { + var builderAuth bind.TransactOpts var isAuthFake bool - if builderAuth == nil { - // Make a fake auth so we have txs to give to the smart contract wallet - builderAuth, err = bind.NewKeyedTransactorWithChainID(randKey, big.NewInt(9999999)) + if authIfEoa := wallet.AuthIfEoa(); authIfEoa != nil { + builderAuth = *authIfEoa + } else { + isAuthFake = true + var addressOrZero common.Address + if addr := wallet.Address(); addr != nil { + addressOrZero = *addr + } + builderAuth = bind.TransactOpts{ + From: addressOrZero, + GasLimit: 123, // don't gas estimate, that's done when the real tx is created + Signer: func(_ common.Address, tx *types.Transaction) (*types.Transaction, error) { + return tx, nil + }, + } + } + builderAuth.NoSend = true + builder := &Builder{ + singleTxAuth: builderAuth, + multiTxAuth: builderAuth, + wallet: wallet, + isAuthFake: isAuthFake, + gasRefunder: gasRefunder, + } + originalSigner := builderAuth.Signer + builder.multiTxAuth.Signer = func(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + tx, err := originalSigner(addr, tx) if err != nil { return nil, err } - isAuthFake = true + // Append the transaction to the builder's queue of transactions + builder.transactions = append(builder.transactions, tx) + err = builder.wallet.TestTransactions(context.TODO(), builder.transactions) + if err != nil { + // Remove the bad tx + builder.transactions = builder.transactions[:len(builder.transactions)-1] + return nil, err + } + return tx, nil + } + builder.singleTxAuth.Signer = func(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + if !isAuthFake { + return originalSigner(addr, tx) + } + // Try to process the transaction on its own + ctx := context.TODO() + txs := []*types.Transaction{tx} + err := builder.wallet.TestTransactions(ctx, txs) + if err != nil { + return nil, fmt.Errorf("failed to test builder transaction: %w", err) + } + signedTx, err := builder.wallet.ExecuteTransactions(ctx, txs, gasRefunder) + if err != nil { + return nil, fmt.Errorf("failed to execute builder transaction: %w", err) + } + return signedTx, nil } - return &Builder{ - builderAuth: builderAuth, - wallet: wallet, - Client: wallet.L1Client(), - isAuthFake: isAuthFake, - }, nil + return builder, nil } func (b *Builder) BuildingTransactionCount() int { @@ -68,59 +106,45 @@ func (b *Builder) ClearTransactions() { b.transactions = nil } -func (b *Builder) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { - if len(b.transactions) == 0 && !b.isAuthFake { - return b.Client.EstimateGas(ctx, call) +func (b *Builder) tryToFillAuthAddress() { + if b.multiTxAuth.From == (common.Address{}) { + if addr := b.wallet.Address(); addr != nil { + b.multiTxAuth.From = *addr + b.singleTxAuth.From = *addr + } } - return 0, nil } -func (b *Builder) SendTransaction(ctx context.Context, tx *types.Transaction) error { - b.transactions = append(b.transactions, tx) - err := b.wallet.TestTransactions(ctx, b.transactions) - if err != nil { - // Remove the bad tx - b.transactions = b.transactions[:len(b.transactions)-1] - return err - } - return nil -} - -// While this is not currently required, it's recommended not to reuse the returned auth for multiple transactions, -// as for an EOA this has the nonce in it. However, the EOA wwallet currently will only publish the first created tx, -// which is why that doesn't really matter. -func (b *Builder) AuthWithAmount(ctx context.Context, amount *big.Int) (*bind.TransactOpts, error) { - nonce, err := b.NonceAt(ctx, b.builderAuth.From, nil) - if err != nil { - return nil, err - } - return &bind.TransactOpts{ - From: b.builderAuth.From, - Nonce: new(big.Int).SetUint64(nonce), - Signer: b.builderAuth.Signer, - Value: amount, - GasPrice: b.builderAuth.GasPrice, - GasLimit: b.builderAuth.GasLimit, - Context: ctx, - }, nil +func (b *Builder) AuthWithAmount(ctx context.Context, amount *big.Int) *bind.TransactOpts { + b.authMutex.Lock() + defer b.authMutex.Unlock() + b.tryToFillAuthAddress() + auth := b.multiTxAuth + auth.Context = ctx + auth.Value = amount + return &auth } // Auth is the same as AuthWithAmount with a 0 amount specified. -// See AuthWithAmount docs for important details. -func (b *Builder) Auth(ctx context.Context) (*bind.TransactOpts, error) { +func (b *Builder) Auth(ctx context.Context) *bind.TransactOpts { return b.AuthWithAmount(ctx, common.Big0) } -func (b *Builder) Transactions() []*types.Transaction { - return b.transactions -} - -// Auth is the same as AuthWithAmount with a 0 amount specified. -// See AuthWithAmount docs for important details. -func (b *Builder) BuilderAuth() *bind.TransactOpts { - return b.builderAuth +// SingleTxAuth should be used if you need an auth without the transaction batching of the builder. +func (b *Builder) SingleTxAuth() *bind.TransactOpts { + b.authMutex.Lock() + defer b.authMutex.Unlock() + b.tryToFillAuthAddress() + auth := b.singleTxAuth + return &auth } func (b *Builder) WalletAddress() *common.Address { return b.wallet.Address() } + +func (b *Builder) ExecuteTransactions(ctx context.Context) (*types.Transaction, error) { + tx, err := b.wallet.ExecuteTransactions(ctx, b.transactions, b.gasRefunder) + b.ClearTransactions() + return tx, err +} diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 4d4f8288ef..22f579b82d 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -9,6 +9,7 @@ import ( "fmt" "math/big" "strings" + "sync" "sync/atomic" "github.com/ethereum/go-ethereum" @@ -22,7 +23,6 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" ) @@ -61,6 +61,7 @@ type Contract struct { challengeManagerAddress common.Address dataPoster *dataposter.DataPoster getExtraGas func() uint64 + populateWalletMutex sync.Mutex } func NewContract(dp *dataposter.DataPoster, address *common.Address, walletFactoryAddr, rollupAddress common.Address, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, rollupFromBlock int64, onWalletCreated func(common.Address), @@ -155,42 +156,22 @@ func (v *Contract) From() common.Address { return v.auth.From } -// nil value == 0 value -func getAuthWithUpdatedNonceFromL1(ctx context.Context, l1Reader *headerreader.HeaderReader, auth bind.TransactOpts, value *big.Int) (*bind.TransactOpts, error) { - auth.Context = ctx - auth.Value = value - nonce, err := l1Reader.Client().NonceAt(ctx, auth.From, nil) - if err != nil { - return nil, err - } - auth.Nonce = new(big.Int).SetUint64(nonce) - return &auth, nil -} - -func (v *Contract) getAuth(ctx context.Context, value *big.Int) (*bind.TransactOpts, error) { - return getAuthWithUpdatedNonceFromL1(ctx, v.l1Reader, *v.auth, value) -} - func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction, gasRefunder common.Address) (*types.Transaction, error) { - auth, err := v.getAuth(ctx, tx.Value()) - if err != nil { - return nil, err - } data, err := validatorABI.Pack("executeTransactionWithGasRefunder", gasRefunder, tx.Data(), *tx.To(), tx.Value()) if err != nil { return nil, fmt.Errorf("packing arguments for executeTransactionWithGasRefunder: %w", err) } - gas, err := v.gasForTxData(ctx, auth, data) + gas, err := v.gasForTxData(ctx, data, tx.Value()) if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) + return v.dataPoster.PostSimpleTransaction(ctx, *v.Address(), data, gas, tx.Value()) } func createWalletContract( ctx context.Context, l1Reader *headerreader.HeaderReader, - auth *bind.TransactOpts, + from common.Address, dataPoster *dataposter.DataPoster, getExtraGas func() uint64, validatorWalletFactoryAddr common.Address, @@ -204,19 +185,22 @@ func createWalletContract( gas, err := gasForTxData( ctx, l1Reader, - auth, + from, &validatorWalletFactoryAddr, txData, + common.Big0, getExtraGas, ) if err != nil { return nil, fmt.Errorf("getting gas for tx data when creating validator wallet, validatorWalletFactory=%v: %w", validatorWalletFactoryAddr, err) } - return dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), validatorWalletFactoryAddr, txData, gas, common.Big0) + return dataPoster.PostSimpleTransaction(ctx, validatorWalletFactoryAddr, txData, gas, common.Big0) } func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error { + v.populateWalletMutex.Lock() + defer v.populateWalletMutex.Unlock() if v.con != nil { return nil } @@ -269,9 +253,7 @@ func combineTxes(txes []*types.Transaction) ([][]byte, []common.Address, []*big. return data, dest, amount, totalAmount } -// Not thread safe! Don't call this from multiple threads at the same time. -func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.Builder, gasRefunder common.Address) (*types.Transaction, error) { - txes := builder.Transactions() +func (v *Contract) ExecuteTransactions(ctx context.Context, txes []*types.Transaction, gasRefunder common.Address) (*types.Transaction, error) { if len(txes) == 0 { return nil, nil } @@ -286,7 +268,6 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B if err != nil { return nil, err } - builder.ClearTransactions() return arbTx, nil } @@ -311,31 +292,22 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B if callValue.Sign() < 0 { callValue.SetInt64(0) } - auth, err := v.getAuth(ctx, callValue) - if err != nil { - return nil, err - } txData, err := validatorABI.Pack("executeTransactionsWithGasRefunder", gasRefunder, data, dest, amount) if err != nil { return nil, fmt.Errorf("packing arguments for executeTransactionWithGasRefunder: %w", err) } - gas, err := v.gasForTxData(ctx, auth, txData) + gas, err := v.gasForTxData(ctx, txData, callValue) if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - arbTx, err := v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), txData, gas, auth.Value) + arbTx, err := v.dataPoster.PostSimpleTransaction(ctx, *v.Address(), txData, gas, callValue) if err != nil { return nil, err } - builder.ClearTransactions() return arbTx, nil } -func gasForTxData(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, to *common.Address, data []byte, getExtraGas func() uint64) (uint64, error) { - if auth.GasLimit != 0 { - return auth.GasLimit, nil - } - +func gasForTxData(ctx context.Context, l1Reader *headerreader.HeaderReader, from common.Address, to *common.Address, data []byte, value *big.Int, getExtraGas func() uint64) (uint64, error) { h, err := l1Reader.LastHeader(ctx) if err != nil { return 0, fmt.Errorf("getting the last header: %w", err) @@ -351,9 +323,9 @@ func gasForTxData(ctx context.Context, l1Reader *headerreader.HeaderReader, auth g, err := l1Reader.Client().EstimateGas( ctx, ethereum.CallMsg{ - From: auth.From, + From: from, To: to, - Value: auth.Value, + Value: value, Data: data, GasFeeCap: gasFeeCap, GasTipCap: gasTipCap, @@ -365,24 +337,20 @@ func gasForTxData(ctx context.Context, l1Reader *headerreader.HeaderReader, auth return g + getExtraGas(), nil } -func (v *Contract) gasForTxData(ctx context.Context, auth *bind.TransactOpts, data []byte) (uint64, error) { - return gasForTxData(ctx, v.l1Reader, auth, v.Address(), data, v.getExtraGas) +func (v *Contract) gasForTxData(ctx context.Context, data []byte, value *big.Int) (uint64, error) { + return gasForTxData(ctx, v.l1Reader, v.From(), v.Address(), data, value, v.getExtraGas) } func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) { - auth, err := v.getAuth(ctx, nil) - if err != nil { - return nil, err - } data, err := validatorABI.Pack("timeoutChallenges", v.challengeManagerAddress, challenges) if err != nil { return nil, fmt.Errorf("packing arguments for timeoutChallenges: %w", err) } - gas, err := v.gasForTxData(ctx, auth, data) + gas, err := v.gasForTxData(ctx, data, common.Big0) if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) + return v.dataPoster.PostSimpleTransaction(ctx, *v.Address(), data, gas, common.Big0) } func (v *Contract) L1Client() *ethclient.Client { @@ -486,12 +454,7 @@ func GetValidatorWalletContract( return nil, nil } - transactAuth, err = getAuthWithUpdatedNonceFromL1(ctx, l1Reader, *transactAuth, nil) - if err != nil { - return nil, err - } - - tx, err := createWalletContract(ctx, l1Reader, transactAuth, dataPoster, getExtraGas, validatorWalletFactoryAddr) + tx, err := createWalletContract(ctx, l1Reader, transactAuth.From, dataPoster, getExtraGas, validatorWalletFactoryAddr) if err != nil { return nil, err } diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go index 870a959152..80b805b396 100644 --- a/staker/validatorwallet/eoa.go +++ b/staker/validatorwallet/eoa.go @@ -15,9 +15,11 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/staker/txbuilder" ) +// EOA is a ValidatorWallet that uses an Externally Owned Account to sign transactions. +// An Ethereum Externally Owned Account is directly represented by a private key, +// as opposed to a smart contract wallet where the smart contract authorizes transactions. type EOA struct { auth *bind.TransactOpts client *ethclient.Client @@ -81,21 +83,17 @@ func (w *EOA) TestTransactions(context.Context, []*types.Transaction) error { return nil } -func (w *EOA) ExecuteTransactions(ctx context.Context, builder *txbuilder.Builder, _ common.Address) (*types.Transaction, error) { - if len(builder.Transactions()) == 0 { +func (w *EOA) ExecuteTransactions(ctx context.Context, txes []*types.Transaction, _ common.Address) (*types.Transaction, error) { + if len(txes) == 0 { return nil, nil } - tx := builder.Transactions()[0] // we ignore future txs and only execute the first + tx := txes[0] // we ignore future txs and only execute the first return w.postTransaction(ctx, tx) } func (w *EOA) postTransaction(ctx context.Context, baseTx *types.Transaction) (*types.Transaction, error) { - nonce, err := w.L1Client().NonceAt(ctx, w.auth.From, nil) - if err != nil { - return nil, err - } gas := baseTx.Gas() + w.getExtraGas() - newTx, err := w.dataPoster.PostSimpleTransaction(ctx, nonce, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) + newTx, err := w.dataPoster.PostSimpleTransaction(ctx, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } diff --git a/staker/validatorwallet/noop.go b/staker/validatorwallet/noop.go index 24c7280811..b483927753 100644 --- a/staker/validatorwallet/noop.go +++ b/staker/validatorwallet/noop.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/offchainlabs/nitro/arbnode/dataposter" - "github.com/offchainlabs/nitro/staker/txbuilder" ) // NoOp validator wallet is used for watchtower mode. @@ -39,7 +38,7 @@ func (*NoOp) TxSenderAddress() *common.Address { return nil } func (*NoOp) From() common.Address { return common.Address{} } -func (*NoOp) ExecuteTransactions(context.Context, *txbuilder.Builder, common.Address) (*types.Transaction, error) { +func (*NoOp) ExecuteTransactions(context.Context, []*types.Transaction, common.Address) (*types.Transaction, error) { return nil, errors.New("no op validator wallet cannot execute transactions") } diff --git a/system_tests/arbos_upgrade_test.go b/system_tests/arbos_upgrade_test.go new file mode 100644 index 0000000000..a7103a8585 --- /dev/null +++ b/system_tests/arbos_upgrade_test.go @@ -0,0 +1,271 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" +) + +func TestScheduleArbosUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + + arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + callOpts := &bind.CallOpts{Context: ctx} + scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. + tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test + var testVersion uint64 = 100 + var testTimestamp uint64 = 1 << 62 + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") + if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { + t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } +} + +func checkArbOSVersion(t *testing.T, testClient *TestClient, expectedVersion uint64, scenario string) { + statedb, err := testClient.ExecNode.Backend.ArbInterface().BlockChain().State() + Require(t, err, "could not get statedb", scenario) + state, err := arbosState.OpenSystemArbosState(statedb, nil, true) + Require(t, err, "could not open ArbOS state", scenario) + if state.ArbOSVersion() != expectedVersion { + t.Errorf("%s: expected ArbOS version %v, got %v", scenario, expectedVersion, state.ArbOSVersion()) + } + +} + +func TestArbos11To32UpgradeWithMcopy(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialVersion := uint64(11) + finalVersion := uint64(32) + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithArbOSVersion(initialVersion) + cleanup := builder.Build(t) + defer cleanup() + seqTestClient := builder.L2 + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasLimit = 32000000 + + // makes Owner a chain owner + arbDebug, err := precompilesgen.NewArbDebug(types.ArbDebugAddress, seqTestClient.Client) + Require(t, err) + tx, err := arbDebug.BecomeChainOwner(&auth) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // deploys test contract + _, tx, contract, err := mocksgen.DeployArbOS11To32UpgradeTest(&auth, seqTestClient.Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // build replica node + replicaConfig := arbnode.ConfigDefaultL1Test() + replicaConfig.BatchPoster.Enable = false + replicaTestClient, replicaCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: replicaConfig}) + defer replicaCleanup() + + checkArbOSVersion(t, seqTestClient, initialVersion, "initial sequencer") + checkArbOSVersion(t, replicaTestClient, initialVersion, "initial replica") + + // mcopy should fail since arbos 11 doesn't support it + tx, err = contract.Mcopy(&auth) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + if (err == nil) || !strings.Contains(err.Error(), "invalid opcode: MCOPY") { + t.Errorf("expected MCOPY to fail, got %v", err) + } + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // upgrade arbos to final version + arbOwner, err := precompilesgen.NewArbOwner(types.ArbOwnerAddress, seqTestClient.Client) + Require(t, err) + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, finalVersion, 0) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // checks upgrade worked + tx, err = contract.Mcopy(&auth) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + checkArbOSVersion(t, seqTestClient, finalVersion, "final sequencer") + checkArbOSVersion(t, replicaTestClient, finalVersion, "final replica") + + // generates more blocks + builder.L2Info.GenerateAccount("User2") + for i := 0; i < 3; i++ { + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + err = seqTestClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + } + + blockNumberSeq, err := seqTestClient.Client.BlockNumber(ctx) + Require(t, err) + blockNumberReplica, err := replicaTestClient.Client.BlockNumber(ctx) + Require(t, err) + if blockNumberSeq != blockNumberReplica { + t.Errorf("expected sequencer and replica to have same block number, got %v and %v", blockNumberSeq, blockNumberReplica) + } + // #nosec G115 + blockNumber := big.NewInt(int64(blockNumberSeq)) + + blockSeq, err := seqTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + blockReplica, err := replicaTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + if blockSeq.Hash() != blockReplica.Hash() { + t.Errorf("expected sequencer and replica to have same block hash, got %v and %v", blockSeq.Hash(), blockReplica.Hash()) + } +} + +func TestArbos11To32UpgradeWithCalldata(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialVersion := uint64(11) + finalVersion := uint64(32) + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithArbOSVersion(initialVersion) + builder.execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible + cleanup := builder.Build(t) + defer cleanup() + seqTestClient := builder.L2 + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasLimit = 32000000 + + // makes Owner a chain owner + arbDebug, err := precompilesgen.NewArbDebug(types.ArbDebugAddress, seqTestClient.Client) + Require(t, err) + tx, err := arbDebug.BecomeChainOwner(&auth) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // build replica node + replicaConfig := arbnode.ConfigDefaultL1Test() + replicaConfig.BatchPoster.Enable = false + replicaTestClient, replicaCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: replicaConfig}) + defer replicaCleanup() + + checkArbOSVersion(t, seqTestClient, initialVersion, "initial sequencer") + checkArbOSVersion(t, replicaTestClient, initialVersion, "initial replica") + + // upgrade arbos to final version + arbOwner, err := precompilesgen.NewArbOwner(types.ArbOwnerAddress, seqTestClient.Client) + Require(t, err) + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, finalVersion, 0) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // checks upgrade worked + var data []byte + for i := range 10 { + for range 100 { + data = append(data, byte(i)) + } + } + tx = builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, big.NewInt(1e12), data) + err = seqTestClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + checkArbOSVersion(t, seqTestClient, finalVersion, "final sequencer") + checkArbOSVersion(t, replicaTestClient, finalVersion, "final replica") + + blockNumberSeq, err := seqTestClient.Client.BlockNumber(ctx) + Require(t, err) + blockNumberReplica, err := replicaTestClient.Client.BlockNumber(ctx) + Require(t, err) + if blockNumberSeq != blockNumberReplica { + t.Errorf("expected sequencer and replica to have same block number, got %v and %v", blockNumberSeq, blockNumberReplica) + } + // #nosec G115 + blockNumber := big.NewInt(int64(blockNumberSeq)) + + blockSeq, err := seqTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + blockReplica, err := replicaTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + if blockSeq.Hash() != blockReplica.Hash() { + t.Errorf("expected sequencer and replica to have same block hash, got %v and %v", blockSeq.Hash(), blockReplica.Hash()) + } +} diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 39d7fa576c..ee0c3b4a3a 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -6,6 +6,7 @@ package arbtest import ( "context" "crypto/rand" + "errors" "fmt" "math/big" "strings" @@ -15,6 +16,7 @@ import ( "github.com/andybalholm/brotli" "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -364,3 +366,119 @@ func TestAllowPostingFirstBatchWhenSequencerMessageCountMismatchEnabled(t *testi func TestAllowPostingFirstBatchWhenSequencerMessageCountMismatchDisabled(t *testing.T) { testAllowPostingFirstBatchWhenSequencerMessageCountMismatch(t, false) } + +func GetBatchCount(t *testing.T, builder *NodeBuilder) uint64 { + t.Helper() + sequenceInbox, err := bridgegen.NewSequencerInbox(builder.L1Info.GetAddress("SequencerInbox"), builder.L1.Client) + Require(t, err) + batchCount, err := sequenceInbox.BatchCount(&bind.CallOpts{Context: builder.ctx}) + Require(t, err) + return batchCount.Uint64() +} + +func CheckBatchCount(t *testing.T, builder *NodeBuilder, want uint64) { + if got := GetBatchCount(t, builder); got != want { + t.Fatalf("invalid batch count, want %v, got %v", want, got) + } +} + +func testBatchPosterDelayBuffer(t *testing.T, delayBufferEnabled bool) { + const messagesPerBatch = 3 + const numBatches = 3 + var threshold uint64 + if delayBufferEnabled { + threshold = 100 + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithBoldDeployment(). + WithDelayBuffer(threshold) + builder.L2Info.GenerateAccount("User2") + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour // set high max-delay so we can test the delay buffer + cleanup := builder.Build(t) + defer cleanup() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() + + initialBatchCount := GetBatchCount(t, builder) + for batch := uint64(0); batch < numBatches; batch++ { + txs := make(types.Transactions, messagesPerBatch) + for i := range txs { + txs[i] = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + } + SendSignedTxesInBatchViaL1(t, ctx, builder.L1Info, builder.L1.Client, builder.L2.Client, txs) + + // Check batch wasn't sent + _, err := WaitForTx(ctx, testClientB.Client, txs[0].Hash(), 100*time.Millisecond) + if err == nil || !errors.Is(err, context.DeadlineExceeded) { + Fatal(t, "expected context-deadline exceeded error, but got:", err) + } + CheckBatchCount(t, builder, initialBatchCount+batch) + + // Advance L1 to force a batch given the delay buffer threshold + AdvanceL1(t, ctx, builder.L1.Client, builder.L1Info, int(threshold)) // #nosec G115 + if !delayBufferEnabled { + // If the delay buffer is disabled, set max delay to zero to force it + CheckBatchCount(t, builder, initialBatchCount+batch) + builder.nodeConfig.BatchPoster.MaxDelay = 0 + } + for _, tx := range txs { + _, err := testClientB.EnsureTxSucceeded(tx) + Require(t, err, "tx not found on second node") + } + CheckBatchCount(t, builder, initialBatchCount+batch+1) + if !delayBufferEnabled { + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour + } + } +} + +func TestBatchPosterDelayBufferEnabled(t *testing.T) { + testBatchPosterDelayBuffer(t, true) +} + +func TestBatchPosterDelayBufferDisabled(t *testing.T) { + testBatchPosterDelayBuffer(t, false) +} + +func TestBatchPosterDelayBufferDontForceNonDelayedMessages(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const threshold = 100 + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithBoldDeployment(). + WithDelayBuffer(threshold) + builder.L2Info.GenerateAccount("User2") + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour // set high max-delay so we can test the delay buffer + cleanup := builder.Build(t) + defer cleanup() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() + + // Send non-delayed message and advance L1 + initialBatchCount := GetBatchCount(t, builder) + const numTxs = 3 + txs := make(types.Transactions, numTxs) + for i := range txs { + txs[i] = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + } + builder.L2.SendWaitTestTransactions(t, txs) + AdvanceL1(t, ctx, builder.L1.Client, builder.L1Info, threshold) + + // Even advancing the L1, the batch won't be posted because it doesn't contain a delayed message + CheckBatchCount(t, builder, initialBatchCount) + + // Set delay to zero to force non-delayed messages + builder.nodeConfig.BatchPoster.MaxDelay = 0 + for _, tx := range txs { + _, err := testClientB.EnsureTxSucceeded(tx) + Require(t, err, "tx not found on second node") + } + CheckBatchCount(t, builder, initialBatchCount+1) +} diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 9125c3921e..d6ae4973ac 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l2pricing" @@ -58,7 +59,7 @@ func testBlockValidatorSimple(t *testing.T, opts Options) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, opts.dasModeString) defer lifecycleManager.StopAndWaitUntil(time.Second) if opts.workload == upgradeArbOs { - chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 + chainConfig.ArbitrumChainParams.InitialArbOSVersion = params.ArbosVersion_10 } var delayEvery int @@ -202,8 +203,6 @@ func testBlockValidatorSimple(t *testing.T, opts Options) { builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000), }) - // give the inbox reader a bit of time to pick up the delayed message - time.Sleep(time.Millisecond * 500) // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go new file mode 100644 index 0000000000..777817bf3e --- /dev/null +++ b/system_tests/bold_challenge_protocol_test.go @@ -0,0 +1,922 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +//go:build challengetest && !race + +package arbtest + +import ( + "bytes" + "context" + "encoding/json" + "io" + "math/big" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" + protocol "github.com/offchainlabs/bold/chain-abstraction" + solimpl "github.com/offchainlabs/bold/chain-abstraction/sol-implementation" + challengemanager "github.com/offchainlabs/bold/challenge-manager" + modes "github.com/offchainlabs/bold/challenge-manager/types" + l2stateprovider "github.com/offchainlabs/bold/layer2-state-provider" + "github.com/offchainlabs/bold/solgen/go/bridgegen" + "github.com/offchainlabs/bold/solgen/go/challengeV2gen" + "github.com/offchainlabs/bold/solgen/go/mocksgen" + "github.com/offchainlabs/bold/solgen/go/rollupgen" + challengetesting "github.com/offchainlabs/bold/testing" + "github.com/offchainlabs/bold/testing/setup" + butil "github.com/offchainlabs/bold/util" + "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/staker/bold" + "github.com/offchainlabs/nitro/statetransfer" + "github.com/offchainlabs/nitro/util" + "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/util/testhelpers" + "github.com/offchainlabs/nitro/validator/server_arb" + "github.com/offchainlabs/nitro/validator/server_common" + "github.com/offchainlabs/nitro/validator/valnode" +) + +func TestChallengeProtocolBOLDReadInboxChallenge(t *testing.T) { + testChallengeProtocolBOLD(t) +} + +func TestChallengeProtocolBOLDStartStepChallenge(t *testing.T) { + opts := []server_arb.SpawnerOption{ + server_arb.WithWrapper(func(inner server_arb.MachineInterface) server_arb.MachineInterface { + // This wrapper is applied after the BOLD wrapper, so step 0 is the finished machine. + // Modifying its hash results in invalid inclusion proofs for the evil validator, + // so we start modifying hashes at step 1 (the first machine step in the running state). + return NewIncorrectIntermediateMachine(inner, 1) + }), + } + testChallengeProtocolBOLD(t, opts...) +} + +func testChallengeProtocolBOLD(t *testing.T, spawnerOpts ...server_arb.SpawnerOption) { + goodDir, err := os.MkdirTemp("", "good_*") + Require(t, err) + evilDir, err := os.MkdirTemp("", "evil_*") + Require(t, err) + t.Cleanup(func() { + Require(t, os.RemoveAll(goodDir)) + Require(t, os.RemoveAll(evilDir)) + }) + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs + l2chainConfig := chaininfo.ArbitrumDevTestChainConfig() + l2info := NewBlockChainTestInfo( + t, + types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + transferGas, + ) + ownerBal := big.NewInt(params.Ether) + ownerBal.Mul(ownerBal, big.NewInt(1_000_000)) + l2info.GenerateGenesisAccount("Owner", ownerBal) + sconf := setup.RollupStackConfig{ + UseMockBridge: false, + UseMockOneStepProver: false, + MinimumAssertionPeriod: 0, + } + + _, l2nodeA, _, _, l1info, _, l1client, l1stack, assertionChain, stakeTokenAddr := createTestNodeOnL1ForBoldProtocol( + t, + ctx, + true, + nil, + l2chainConfig, + nil, + sconf, + l2info, + ) + defer requireClose(t, l1stack) + defer l2nodeA.StopAndWait() + + // Make sure we shut down test functionality before the rest of the node + ctx, cancelCtx = context.WithCancel(ctx) + defer cancelCtx() + + go keepChainMoving(t, ctx, l1info, l1client) + + l2nodeConfig := arbnode.ConfigDefaultL1Test() + _, l2nodeB, _ := create2ndNodeWithConfigForBoldProtocol( + t, + ctx, + l2nodeA, + l1stack, + l1info, + &l2info.ArbInitData, + l2nodeConfig, + nil, + sconf, + stakeTokenAddr, + ) + defer l2nodeB.StopAndWait() + + genesisA, err := l2nodeA.Execution.ResultAtPos(0) + Require(t, err) + genesisB, err := l2nodeB.Execution.ResultAtPos(0) + Require(t, err) + if genesisA.BlockHash != genesisB.BlockHash { + Fatal(t, "genesis blocks mismatch between nodes") + } + + balance := big.NewInt(params.Ether) + balance.Mul(balance, big.NewInt(100)) + TransferBalance(t, "Faucet", "Asserter", balance, l1info, l1client, ctx) + TransferBalance(t, "Faucet", "EvilAsserter", balance, l1info, l1client, ctx) + + valCfg := valnode.TestValidationConfig + valCfg.UseJit = false + _, valStack := createTestValidationNode(t, ctx, &valCfg) + blockValidatorConfig := staker.TestBlockValidatorConfig + + statelessA, err := staker.NewStatelessBlockValidator( + l2nodeA.InboxReader, + l2nodeA.InboxTracker, + l2nodeA.TxStreamer, + l2nodeA.Execution, + l2nodeA.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + err = statelessA.Start(ctx) + Require(t, err) + _, valStackB := createTestValidationNode(t, ctx, &valCfg, spawnerOpts...) + + statelessB, err := staker.NewStatelessBlockValidator( + l2nodeB.InboxReader, + l2nodeB.InboxTracker, + l2nodeB.TxStreamer, + l2nodeB.Execution, + l2nodeB.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStackB, + ) + Require(t, err) + err = statelessB.Start(ctx) + Require(t, err) + + blockValidatorA, err := staker.NewBlockValidator( + statelessA, + l2nodeA.InboxTracker, + l2nodeA.TxStreamer, + StaticFetcherFrom(t, &blockValidatorConfig), + nil, + ) + Require(t, err) + Require(t, blockValidatorA.Initialize(ctx)) + Require(t, blockValidatorA.Start(ctx)) + + blockValidatorB, err := staker.NewBlockValidator( + statelessB, + l2nodeB.InboxTracker, + l2nodeB.TxStreamer, + StaticFetcherFrom(t, &blockValidatorConfig), + nil, + ) + Require(t, err) + Require(t, blockValidatorB.Initialize(ctx)) + Require(t, blockValidatorB.Start(ctx)) + + stateManager, err := bold.NewBOLDStateProvider( + blockValidatorA, + statelessA, + l2stateprovider.Height(blockChallengeLeafHeight), + &bold.StateProviderConfig{ + ValidatorName: "good", + MachineLeavesCachePath: goodDir, + CheckBatchFinality: false, + }, + goodDir, + ) + Require(t, err) + + stateManagerB, err := bold.NewBOLDStateProvider( + blockValidatorB, + statelessB, + l2stateprovider.Height(blockChallengeLeafHeight), + &bold.StateProviderConfig{ + ValidatorName: "evil", + MachineLeavesCachePath: evilDir, + CheckBatchFinality: false, + }, + evilDir, + ) + Require(t, err) + + Require(t, l2nodeA.Start(ctx)) + Require(t, l2nodeB.Start(ctx)) + + chalManagerAddr := assertionChain.SpecChallengeManager() + evilOpts := l1info.GetDefaultTransactOpts("EvilAsserter", ctx) + l1ChainId, err := l1client.ChainID(ctx) + Require(t, err) + dp, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), + l2nodeB.L1Reader, + &evilOpts, + NewFetcherFromConfig(l2nodeConfig), + l2nodeB.SyncMonitor, + l1ChainId, + ) + Require(t, err) + chainB, err := solimpl.NewAssertionChain( + ctx, + assertionChain.RollupAddress(), + chalManagerAddr.Address(), + &evilOpts, + butil.NewBackendWrapper(l1client, rpc.LatestBlockNumber), + bold.NewDataPosterTransactor(dp), + solimpl.WithRpcHeadBlockNumber(rpc.LatestBlockNumber), + ) + Require(t, err) + + l2info.GenerateAccount("Destination") + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + honestSeqInbox := l1info.GetAddress("SequencerInbox") + evilSeqInbox := l1info.GetAddress("EvilSequencerInbox") + honestSeqInboxBinding, err := bridgegen.NewSequencerInbox(honestSeqInbox, l1client) + Require(t, err) + evilSeqInboxBinding, err := bridgegen.NewSequencerInbox(evilSeqInbox, l1client) + Require(t, err) + + // Post batches to the honest and evil sequencer inbox that are internally equal. + // This means the honest and evil sequencer inboxes will agree with all messages in the batch. + seqInboxABI, err := abi.JSON(strings.NewReader(bridgegen.SequencerInboxABI)) + Require(t, err) + + honestUpgradeExec, err := mocksgen.NewUpgradeExecutorMock(l1info.GetAddress("UpgradeExecutor"), l1client) + Require(t, err) + data, err := seqInboxABI.Pack( + "setIsBatchPoster", + sequencerTxOpts.From, + true, + ) + Require(t, err) + honestRollupOwnerOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + _, err = honestUpgradeExec.ExecuteCall(&honestRollupOwnerOpts, honestSeqInbox, data) + Require(t, err) + + evilUpgradeExec, err := mocksgen.NewUpgradeExecutorMock(l1info.GetAddress("EvilUpgradeExecutor"), l1client) + Require(t, err) + data, err = seqInboxABI.Pack( + "setIsBatchPoster", + sequencerTxOpts.From, + true, + ) + Require(t, err) + evilRollupOwnerOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + _, err = evilUpgradeExec.ExecuteCall(&evilRollupOwnerOpts, evilSeqInbox, data) + Require(t, err) + + totalMessagesPosted := int64(0) + numMessagesPerBatch := int64(5) + divergeAt := int64(-1) + makeBoldBatch(t, l2nodeA, l2info, l1client, &sequencerTxOpts, honestSeqInboxBinding, honestSeqInbox, numMessagesPerBatch, divergeAt) + l2info.Accounts["Owner"].Nonce.Store(0) + makeBoldBatch(t, l2nodeB, l2info, l1client, &sequencerTxOpts, evilSeqInboxBinding, evilSeqInbox, numMessagesPerBatch, divergeAt) + totalMessagesPosted += numMessagesPerBatch + + // Next, we post another batch, this time containing more messages. + // We diverge at message index 5 within the evil node's batch. + l2info.Accounts["Owner"].Nonce.Store(5) + numMessagesPerBatch = int64(10) + makeBoldBatch(t, l2nodeA, l2info, l1client, &sequencerTxOpts, honestSeqInboxBinding, honestSeqInbox, numMessagesPerBatch, divergeAt) + l2info.Accounts["Owner"].Nonce.Store(5) + divergeAt = int64(5) + makeBoldBatch(t, l2nodeB, l2info, l1client, &sequencerTxOpts, evilSeqInboxBinding, evilSeqInbox, numMessagesPerBatch, divergeAt) + totalMessagesPosted += numMessagesPerBatch + + bcA, err := l2nodeA.InboxTracker.GetBatchCount() + Require(t, err) + bcB, err := l2nodeB.InboxTracker.GetBatchCount() + Require(t, err) + msgA, err := l2nodeA.InboxTracker.GetBatchMessageCount(bcA - 1) + Require(t, err) + msgB, err := l2nodeB.InboxTracker.GetBatchMessageCount(bcB - 1) + Require(t, err) + + t.Logf("Node A batch count %d, msgs %d", bcA, msgA) + t.Logf("Node B batch count %d, msgs %d", bcB, msgB) + + // Wait for both nodes' chains to catch up. + nodeAExec, ok := l2nodeA.Execution.(*gethexec.ExecutionNode) + if !ok { + Fatal(t, "not geth execution node") + } + nodeBExec, ok := l2nodeB.Execution.(*gethexec.ExecutionNode) + if !ok { + Fatal(t, "not geth execution node") + } + for { + nodeALatest := nodeAExec.Backend.APIBackend().CurrentHeader() + nodeBLatest := nodeBExec.Backend.APIBackend().CurrentHeader() + isCaughtUp := nodeALatest.Number.Uint64() == uint64(totalMessagesPosted) + areEqual := nodeALatest.Number.Uint64() == nodeBLatest.Number.Uint64() + if isCaughtUp && areEqual { + if nodeALatest.Hash() == nodeBLatest.Hash() { + Fatal(t, "node A L2 hash", nodeALatest, "matches node B L2 hash", nodeBLatest) + } + break + } + } + + bridgeBinding, err := bridgegen.NewBridge(l1info.GetAddress("Bridge"), l1client) + Require(t, err) + totalBatchesBig, err := bridgeBinding.SequencerMessageCount(&bind.CallOpts{Context: ctx}) + Require(t, err) + totalBatches := totalBatchesBig.Uint64() + + // Wait until the validators have validated the batches. + for { + lastInfo, err := blockValidatorA.ReadLastValidatedInfo() + if lastInfo == nil || err != nil { + continue + } + t.Log(lastInfo.GlobalState.Batch, totalBatches-1) + if lastInfo.GlobalState.Batch >= totalBatches-1 { + break + } + time.Sleep(time.Millisecond * 200) + } + for { + lastInfo, err := blockValidatorB.ReadLastValidatedInfo() + if lastInfo == nil || err != nil { + continue + } + t.Log(lastInfo.GlobalState.Batch, totalBatches-1) + if lastInfo.GlobalState.Batch >= totalBatches-1 { + break + } + time.Sleep(time.Millisecond * 200) + } + + provider := l2stateprovider.NewHistoryCommitmentProvider( + stateManager, + stateManager, + stateManager, + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + stateManager, + nil, // Api db + ) + + evilProvider := l2stateprovider.NewHistoryCommitmentProvider( + stateManagerB, + stateManagerB, + stateManagerB, + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + stateManagerB, + nil, // Api db + ) + + stackOpts := []challengemanager.StackOpt{ + challengemanager.StackWithName("honest"), + challengemanager.StackWithMode(modes.MakeMode), + challengemanager.StackWithPostingInterval(time.Second * 3), + challengemanager.StackWithPollingInterval(time.Second), + challengemanager.StackWithAverageBlockCreationTime(time.Second), + } + + manager, err := challengemanager.NewChallengeStack( + assertionChain, + provider, + stackOpts..., + ) + Require(t, err) + + evilStackOpts := append(stackOpts, challengemanager.StackWithName("evil")) + + managerB, err := challengemanager.NewChallengeStack( + chainB, + evilProvider, + evilStackOpts..., + ) + Require(t, err) + + manager.Start(ctx) + managerB.Start(ctx) + + chalManager := assertionChain.SpecChallengeManager() + filterer, err := challengeV2gen.NewEdgeChallengeManagerFilterer(chalManager.Address(), l1client) + Require(t, err) + + fromBlock := uint64(0) + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + latestBlock, err := l1client.HeaderByNumber(ctx, nil) + Require(t, err) + toBlock := latestBlock.Number.Uint64() + if fromBlock == toBlock { + continue + } + filterOpts := &bind.FilterOpts{ + Start: fromBlock, + End: &toBlock, + Context: ctx, + } + it, err := filterer.FilterEdgeConfirmedByOneStepProof(filterOpts, nil, nil) + Require(t, err) + for it.Next() { + if it.Error() != nil { + t.Fatalf("Error in filter iterator: %v", it.Error()) + } + t.Log("Received event of OSP confirmation!") + tx, _, err := l1client.TransactionByHash(ctx, it.Event.Raw.TxHash) + Require(t, err) + signer := types.NewCancunSigner(tx.ChainId()) + address, err := signer.Sender(tx) + Require(t, err) + if address == l1info.GetDefaultTransactOpts("Asserter", ctx).From { + t.Log("Honest party won OSP, impossible for evil party to win if honest party continues") + Require(t, it.Close()) + return + } + } + fromBlock = toBlock + case <-ctx.Done(): + return + } + } +} + +// Every 3 seconds, send an L1 transaction to keep the chain moving. +func keepChainMoving(t *testing.T, ctx context.Context, l1Info *BlockchainTestInfo, l1Client *ethclient.Client) { + delay := time.Second * 3 + for { + select { + case <-ctx.Done(): + return + default: + time.Sleep(delay) + if ctx.Err() != nil { + break + } + TransferBalance(t, "Faucet", "Faucet", common.Big0, l1Info, l1Client, ctx) + latestBlock, err := l1Client.BlockNumber(ctx) + if ctx.Err() != nil { + break + } + Require(t, err) + if latestBlock > 150 { + delay = time.Second + } + } + } +} + +func createTestNodeOnL1ForBoldProtocol( + t *testing.T, + ctx context.Context, + isSequencer bool, + nodeConfig *arbnode.Config, + chainConfig *params.ChainConfig, + _ *node.Config, + rollupStackConf setup.RollupStackConfig, + l2infoIn info, +) ( + l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l2stack *node.Node, + l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, + assertionChain *solimpl.AssertionChain, stakeTokenAddr common.Address, +) { + if nodeConfig == nil { + nodeConfig = arbnode.ConfigDefaultL1Test() + } + nodeConfig.ParentChainReader.OldHeaderTimeout = time.Minute * 10 + if chainConfig == nil { + chainConfig = chaininfo.ArbitrumDevTestChainConfig() + } + nodeConfig.BatchPoster.DataPoster.MaxMempoolTransactions = 18 + fatalErrChan := make(chan error, 10) + l1info, l1client, l1backend, l1stack = createTestL1BlockChain(t, nil) + var l2chainDb ethdb.Database + var l2arbDb ethdb.Database + var l2blockchain *core.BlockChain + l2info = l2infoIn + if l2info == nil { + l2info = NewArbTestInfo(t, chainConfig.ChainID) + } + + l1info.GenerateAccount("RollupOwner") + l1info.GenerateAccount("Sequencer") + l1info.GenerateAccount("User") + l1info.GenerateAccount("Asserter") + l1info.GenerateAccount("EvilAsserter") + + SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ + l1info.PrepareTx("Faucet", "RollupOwner", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "Sequencer", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "Asserter", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "EvilAsserter", 30000, big.NewInt(9223372036854775807), nil), + }) + + l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + stakeToken, tx, tokenBindings, err := mocksgen.DeployTestWETH9( + &l1TransactionOpts, + l1client, + "Weth", + "WETH", + ) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) + stakeTokenAddr = stakeToken + value, ok := new(big.Int).SetString("10000", 10) + if !ok { + t.Fatal(t, "could not set value") + } + l1TransactionOpts.Value = value + tx, err = tokenBindings.Deposit(&l1TransactionOpts) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) + l1TransactionOpts.Value = nil + + addresses := deployContractsOnly(t, ctx, l1info, l1client, chainConfig.ChainID, rollupStackConf, stakeToken) + rollupUser, err := rollupgen.NewRollupUserLogic(addresses.Rollup, l1client) + Require(t, err) + chalManagerAddr, err := rollupUser.ChallengeManager(&bind.CallOpts{}) + Require(t, err) + l1info.SetContract("Bridge", addresses.Bridge) + l1info.SetContract("SequencerInbox", addresses.SequencerInbox) + l1info.SetContract("Inbox", addresses.Inbox) + l1info.SetContract("Rollup", addresses.Rollup) + l1info.SetContract("UpgradeExecutor", addresses.UpgradeExecutor) + + execConfig := ExecConfigDefaultNonSequencerTest(t) + Require(t, execConfig.Validate()) + execConfig.Caching.StateScheme = rawdb.HashScheme + useWasmCache := uint32(1) + initMessage := getInitMessage(ctx, t, l1client, addresses) + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createNonL1BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, nil, execConfig, useWasmCache) + var sequencerTxOptsPtr *bind.TransactOpts + var dataSigner signature.DataSignerFunc + if isSequencer { + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + sequencerTxOptsPtr = &sequencerTxOpts + dataSigner = signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) + } + + if !isSequencer { + nodeConfig.BatchPoster.Enable = false + nodeConfig.DelayedSequencer.Enable = false + } + + AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", "") + + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher) + Require(t, err) + + parentChainId, err := l1client.ChainID(ctx) + Require(t, err) + currentNode, err = arbnode.CreateNode( + ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, + addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, parentChainId, + nil, // Blob reader. + ) + Require(t, err) + + l2client = ClientForStack(t, l2stack) + + StartWatchChanErr(t, ctx, fatalErrChan, currentNode) + + opts := l1info.GetDefaultTransactOpts("Asserter", ctx) + dp, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(l2arbDb, storage.StakerPrefix), + currentNode.L1Reader, + &opts, + NewFetcherFromConfig(nodeConfig), + currentNode.SyncMonitor, + parentChainId, + ) + Require(t, err) + assertionChainBindings, err := solimpl.NewAssertionChain( + ctx, + addresses.Rollup, + chalManagerAddr, + &opts, + butil.NewBackendWrapper(l1client, rpc.LatestBlockNumber), + bold.NewDataPosterTransactor(dp), + solimpl.WithRpcHeadBlockNumber(rpc.LatestBlockNumber), + ) + Require(t, err) + assertionChain = assertionChainBindings + + return +} + +func deployContractsOnly( + t *testing.T, + ctx context.Context, + l1info info, + backend *ethclient.Client, + chainId *big.Int, + rollupStackConf setup.RollupStackConfig, + stakeToken common.Address, +) *chaininfo.RollupAddresses { + l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + wasmModuleRoot := locator.LatestWasmModuleRoot() + + loserStakeEscrow := l1TransactionOpts.From + genesisExecutionState := rollupgen.AssertionState{ + GlobalState: rollupgen.GlobalState{}, + MachineStatus: 1, + EndHistoryRoot: [32]byte{}, + } + genesisInboxCount := big.NewInt(0) + anyTrustFastConfirmer := common.Address{} + miniStakeValues := []*big.Int{big.NewInt(5), big.NewInt(4), big.NewInt(3), big.NewInt(2), big.NewInt(1)} + cfg := challengetesting.GenerateRollupConfig( + false, + wasmModuleRoot, + l1TransactionOpts.From, + chainId, + loserStakeEscrow, + miniStakeValues, + stakeToken, + genesisExecutionState, + genesisInboxCount, + anyTrustFastConfirmer, + challengetesting.WithLayerZeroHeights(&protocol.LayerZeroHeights{ + BlockChallengeHeight: protocol.Height(blockChallengeLeafHeight), + BigStepChallengeHeight: protocol.Height(bigStepChallengeLeafHeight), + SmallStepChallengeHeight: protocol.Height(smallStepChallengeLeafHeight), + }), + challengetesting.WithNumBigStepLevels(uint8(3)), // TODO: Hardcoded. + challengetesting.WithConfirmPeriodBlocks(uint64(120)), // TODO: Hardcoded. + ) + config, err := json.Marshal(chaininfo.ArbitrumDevTestChainConfig()) + Require(t, err) + cfg.ChainConfig = string(config) + addresses, err := setup.DeployFullRollupStack( + ctx, + butil.NewBackendWrapper(backend, rpc.LatestBlockNumber), + &l1TransactionOpts, + l1info.GetAddress("Sequencer"), + cfg, + rollupStackConf, + ) + Require(t, err) + + asserter := l1info.GetDefaultTransactOpts("Asserter", ctx) + evilAsserter := l1info.GetDefaultTransactOpts("EvilAsserter", ctx) + userLogic, err := rollupgen.NewRollupUserLogic(addresses.Rollup, backend) + Require(t, err) + chalManagerAddr, err := userLogic.ChallengeManager(&bind.CallOpts{}) + Require(t, err) + seed, ok := new(big.Int).SetString("1000", 10) + if !ok { + t.Fatal("not ok") + } + value, ok := new(big.Int).SetString("10000", 10) + if !ok { + t.Fatal(t, "could not set value") + } + tokenBindings, err := mocksgen.NewTestWETH9(stakeToken, backend) + Require(t, err) + tx, err := tokenBindings.TestWETH9Transactor.Transfer(&l1TransactionOpts, asserter.From, seed) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&asserter, addresses.Rollup, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&asserter, chalManagerAddr, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + tx, err = tokenBindings.TestWETH9Transactor.Transfer(&l1TransactionOpts, evilAsserter.From, seed) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&evilAsserter, addresses.Rollup, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&evilAsserter, chalManagerAddr, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + return &chaininfo.RollupAddresses{ + Bridge: addresses.Bridge, + Inbox: addresses.Inbox, + SequencerInbox: addresses.SequencerInbox, + Rollup: addresses.Rollup, + ValidatorUtils: addresses.ValidatorUtils, + ValidatorWalletCreator: addresses.ValidatorWalletCreator, + DeployedAt: addresses.DeployedAt, + UpgradeExecutor: addresses.UpgradeExecutor, + } +} + +func create2ndNodeWithConfigForBoldProtocol( + t *testing.T, + ctx context.Context, + first *arbnode.Node, + l1stack *node.Node, + l1info *BlockchainTestInfo, + l2InitData *statetransfer.ArbosInitializationInfo, + nodeConfig *arbnode.Config, + stackConfig *node.Config, + rollupStackConf setup.RollupStackConfig, + stakeTokenAddr common.Address, +) (*ethclient.Client, *arbnode.Node, *solimpl.AssertionChain) { + fatalErrChan := make(chan error, 10) + l1rpcClient := l1stack.Attach() + l1client := ethclient.NewClient(l1rpcClient) + firstExec, ok := first.Execution.(*gethexec.ExecutionNode) + if !ok { + Fatal(t, "not geth execution node") + } + chainConfig := firstExec.ArbInterface.BlockChain().Config() + addresses := deployContractsOnly(t, ctx, l1info, l1client, chainConfig.ChainID, rollupStackConf, stakeTokenAddr) + + l1info.SetContract("EvilBridge", addresses.Bridge) + l1info.SetContract("EvilSequencerInbox", addresses.SequencerInbox) + l1info.SetContract("EvilInbox", addresses.Inbox) + l1info.SetContract("EvilRollup", addresses.Rollup) + l1info.SetContract("EvilUpgradeExecutor", addresses.UpgradeExecutor) + + if nodeConfig == nil { + nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() + } + nodeConfig.ParentChainReader.OldHeaderTimeout = 10 * time.Minute + nodeConfig.BatchPoster.DataPoster.MaxMempoolTransactions = 18 + if stackConfig == nil { + stackConfig = testhelpers.CreateStackConfigForTest(t.TempDir()) + } + l2stack, err := node.New(stackConfig) + Require(t, err) + + l2chainDb, err := l2stack.OpenDatabase("chaindb", 0, 0, "", false) + Require(t, err) + l2arbDb, err := l2stack.OpenDatabase("arbdb", 0, 0, "", false) + Require(t, err) + + AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", "") + + dataSigner := signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) + txOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + initReader := statetransfer.NewMemoryInitDataReader(l2InitData) + initMessage := getInitMessage(ctx, t, l1client, first.DeployInfo) + + execConfig := ExecConfigDefaultNonSequencerTest(t) + Require(t, execConfig.Validate()) + execConfig.Caching.StateScheme = rawdb.HashScheme + coreCacheConfig := gethexec.DefaultCacheConfigFor(l2stack, &execConfig.Caching) + l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, execConfig.TxLookupLimit, 0) + Require(t, err) + + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher) + Require(t, err) + l1ChainId, err := l1client.ChainID(ctx) + Require(t, err) + l2node, err := arbnode.CreateNode(ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, addresses, &txOpts, &txOpts, dataSigner, fatalErrChan, l1ChainId, nil /* blob reader */) + Require(t, err) + + l2client := ClientForStack(t, l2stack) + + StartWatchChanErr(t, ctx, fatalErrChan, l2node) + + rollupUserLogic, err := rollupgen.NewRollupUserLogic(addresses.Rollup, l1client) + Require(t, err) + chalManagerAddr, err := rollupUserLogic.ChallengeManager(&bind.CallOpts{}) + Require(t, err) + evilOpts := l1info.GetDefaultTransactOpts("EvilAsserter", ctx) + dp, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(l2arbDb, storage.StakerPrefix), + l2node.L1Reader, + &evilOpts, + NewFetcherFromConfig(nodeConfig), + l2node.SyncMonitor, + l1ChainId, + ) + Require(t, err) + assertionChain, err := solimpl.NewAssertionChain( + ctx, + addresses.Rollup, + chalManagerAddr, + &evilOpts, + butil.NewBackendWrapper(l1client, rpc.LatestBlockNumber), + bold.NewDataPosterTransactor(dp), + ) + Require(t, err) + + return l2client, l2node, assertionChain +} + +func makeBoldBatch( + t *testing.T, + l2Node *arbnode.Node, + l2Info *BlockchainTestInfo, + backend *ethclient.Client, + sequencer *bind.TransactOpts, + seqInbox *bridgegen.SequencerInbox, + seqInboxAddr common.Address, + numMessages, + divergeAtIndex int64, +) { + ctx := context.Background() + + batchBuffer := bytes.NewBuffer([]byte{}) + for i := int64(0); i < numMessages; i++ { + value := i + if i == divergeAtIndex { + value++ + } + err := writeTxToBatchBold(batchBuffer, l2Info.PrepareTx("Owner", "Destination", 1000000, big.NewInt(value), []byte{})) + Require(t, err) + } + compressed, err := arbcompress.CompressWell(batchBuffer.Bytes()) + Require(t, err) + message := append([]byte{0}, compressed...) + + seqNum := new(big.Int).Lsh(common.Big1, 256) + seqNum.Sub(seqNum, common.Big1) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + nodeSeqInbox, err := arbnode.NewSequencerInbox(backend, seqInboxAddr, 0) + Require(t, err) + batches, err := nodeSeqInbox.LookupBatchesInRange(ctx, receipt.BlockNumber, receipt.BlockNumber) + Require(t, err) + if len(batches) == 0 { + Fatal(t, "batch not found after AddSequencerL2BatchFromOrigin") + } + err = l2Node.InboxTracker.AddSequencerBatches(ctx, backend, batches) + Require(t, err) + batchMetaData, err := l2Node.InboxTracker.GetBatchMetadata(batches[0].SequenceNumber) + log.Info("Batch metadata", "md", batchMetaData) + Require(t, err, "failed to get batch metadata after adding batch:") +} + +func writeTxToBatchBold(writer io.Writer, tx *types.Transaction) error { + txData, err := tx.MarshalBinary() + if err != nil { + return err + } + var segment []byte + segment = append(segment, arbstate.BatchSegmentKindL2Message) + segment = append(segment, arbos.L2MessageKind_SignedTx) + segment = append(segment, txData...) + err = rlp.Encode(writer, segment) + return err +} diff --git a/system_tests/bold_new_challenge_test.go b/system_tests/bold_new_challenge_test.go new file mode 100644 index 0000000000..ad6e44bc71 --- /dev/null +++ b/system_tests/bold_new_challenge_test.go @@ -0,0 +1,358 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +//go:build challengetest && !race + +package arbtest + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" + protocol "github.com/offchainlabs/bold/chain-abstraction" + solimpl "github.com/offchainlabs/bold/chain-abstraction/sol-implementation" + challengemanager "github.com/offchainlabs/bold/challenge-manager" + modes "github.com/offchainlabs/bold/challenge-manager/types" + "github.com/offchainlabs/bold/containers/option" + l2stateprovider "github.com/offchainlabs/bold/layer2-state-provider" + "github.com/offchainlabs/bold/solgen/go/challengeV2gen" + "github.com/offchainlabs/bold/solgen/go/mocksgen" + "github.com/offchainlabs/bold/solgen/go/rollupgen" + "github.com/offchainlabs/bold/state-commitments/history" + butil "github.com/offchainlabs/bold/util" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/staker/bold" +) + +type incorrectBlockStateProvider struct { + honest BoldStateProviderInterface + chain protocol.AssertionChain + wrongAtFirstVirtual bool + wrongAtBlockHeight uint64 + honestMachineHash common.Hash + evilMachineHash common.Hash +} + +func (s *incorrectBlockStateProvider) ExecutionStateAfterPreviousState( + ctx context.Context, + maxInboxCount uint64, + previousGlobalState protocol.GoGlobalState, +) (*protocol.ExecutionState, error) { + maxNumberOfBlocks := s.chain.SpecChallengeManager().LayerZeroHeights().BlockChallengeHeight.Uint64() + executionState, err := s.honest.ExecutionStateAfterPreviousState(ctx, maxInboxCount, previousGlobalState) + if err != nil { + return nil, err + } + evilStates, err := s.L2MessageStatesUpTo(ctx, previousGlobalState, l2stateprovider.Batch(maxInboxCount), option.Some(l2stateprovider.Height(maxNumberOfBlocks))) + if err != nil { + return nil, err + } + historyCommit, err := history.NewCommitment(evilStates, maxNumberOfBlocks+1) + if err != nil { + return nil, err + } + executionState.EndHistoryRoot = historyCommit.Merkle + return executionState, nil +} + +func (s *incorrectBlockStateProvider) L2MessageStatesUpTo( + ctx context.Context, + fromState protocol.GoGlobalState, + batchLimit l2stateprovider.Batch, + toHeight option.Option[l2stateprovider.Height], +) ([]common.Hash, error) { + states, err := s.honest.L2MessageStatesUpTo(ctx, fromState, batchLimit, toHeight) + if err != nil { + return nil, err + } + // Double check that virtual blocks aren't being enumerated by the honest impl + for i := len(states) - 1; i >= 1; i-- { + if states[i] == states[i-1] { + panic("Virtual block found repeated in honest impl (test case currently doesn't accomodate this)") + } else { + break + } + } + if s.wrongAtFirstVirtual && (toHeight.IsNone() || uint64(len(states)) < uint64(toHeight.Unwrap())) { + // We've found the first virtual block, now let's make it wrong + s.wrongAtFirstVirtual = false + s.wrongAtBlockHeight = uint64(len(states)) + } + if toHeight.IsNone() || uint64(toHeight.Unwrap()) >= s.wrongAtBlockHeight { + for uint64(len(states)) <= s.wrongAtBlockHeight { + states = append(states, states[len(states)-1]) + } + s.honestMachineHash = states[s.wrongAtBlockHeight] + states[s.wrongAtBlockHeight][0] ^= 0xFF + s.evilMachineHash = states[s.wrongAtBlockHeight] + if uint64(len(states)) == s.wrongAtBlockHeight+1 && (toHeight.IsNone() || uint64(len(states)) < uint64(toHeight.Unwrap())) { + // don't break the end inclusion proof + states = append(states, s.honestMachineHash) + } + } + return states, nil +} + +func (s *incorrectBlockStateProvider) CollectMachineHashes( + ctx context.Context, cfg *l2stateprovider.HashCollectorConfig, +) ([]common.Hash, error) { + honestHashes, err := s.honest.CollectMachineHashes(ctx, cfg) + if err != nil { + return nil, err + } + if uint64(cfg.BlockChallengeHeight)+1 == s.wrongAtBlockHeight { + if uint64(len(honestHashes)) < cfg.NumDesiredHashes && honestHashes[len(honestHashes)-1] == s.honestMachineHash { + honestHashes = append(honestHashes, s.evilMachineHash) + } + } else if uint64(cfg.BlockChallengeHeight) >= s.wrongAtBlockHeight { + panic(fmt.Sprintf("challenge occured at block height %v at or after wrongAtBlockHeight %v", cfg.BlockChallengeHeight, s.wrongAtBlockHeight)) + } + return honestHashes, nil +} + +func (s *incorrectBlockStateProvider) CollectProof( + ctx context.Context, + assertionMetadata *l2stateprovider.AssociatedAssertionMetadata, + blockChallengeHeight l2stateprovider.Height, + machineIndex l2stateprovider.OpcodeIndex, +) ([]byte, error) { + return s.honest.CollectProof(ctx, assertionMetadata, blockChallengeHeight, machineIndex) +} + +func testChallengeProtocolBOLDVirtualBlocks(t *testing.T, wrongAtFirstVirtual bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithBoldDeployment() + + // Block validation requires db hash scheme + builder.execConfig.Caching.StateScheme = rawdb.HashScheme + builder.nodeConfig.BlockValidator.Enable = true + builder.valnodeConfig.UseJit = false + + cleanup := builder.Build(t) + defer cleanup() + + evilNodeConfig := arbnode.ConfigDefaultL1NonSequencerTest() + evilNodeConfig.BlockValidator.Enable = true + evilNode, cleanupEvilNode := builder.Build2ndNode(t, &SecondNodeParams{ + nodeConfig: evilNodeConfig, + }) + defer cleanupEvilNode() + + go keepChainMoving(t, ctx, builder.L1Info, builder.L1.Client) + + builder.L1Info.GenerateAccount("HonestAsserter") + fundBoldStaker(t, ctx, builder, "HonestAsserter") + builder.L1Info.GenerateAccount("EvilAsserter") + fundBoldStaker(t, ctx, builder, "EvilAsserter") + + assertionChain, cleanupHonestChallengeManager := startBoldChallengeManager(t, ctx, builder, builder.L2, "HonestAsserter", nil) + defer cleanupHonestChallengeManager() + + _, cleanupEvilChallengeManager := startBoldChallengeManager(t, ctx, builder, evilNode, "EvilAsserter", func(stateManager BoldStateProviderInterface) BoldStateProviderInterface { + p := &incorrectBlockStateProvider{ + honest: stateManager, + chain: assertionChain, + wrongAtFirstVirtual: wrongAtFirstVirtual, + } + if !wrongAtFirstVirtual { + p.wrongAtBlockHeight = blockChallengeLeafHeight - 2 + } + return p + }) + defer cleanupEvilChallengeManager() + + TransferBalance(t, "Faucet", "Faucet", common.Big0, builder.L2Info, builder.L2.Client, ctx) + + // Everything's setup, now just wait for the challenge to complete and ensure the honest party won + + chalManager := assertionChain.SpecChallengeManager() + filterer, err := challengeV2gen.NewEdgeChallengeManagerFilterer(chalManager.Address(), builder.L1.Client) + Require(t, err) + + fromBlock := uint64(0) + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + latestBlock, err := builder.L1.Client.HeaderByNumber(ctx, nil) + Require(t, err) + toBlock := latestBlock.Number.Uint64() + if fromBlock == toBlock { + continue + } + filterOpts := &bind.FilterOpts{ + Start: fromBlock, + End: &toBlock, + Context: ctx, + } + it, err := filterer.FilterEdgeConfirmedByOneStepProof(filterOpts, nil, nil) + Require(t, err) + for it.Next() { + if it.Error() != nil { + t.Fatalf("Error in filter iterator: %v", it.Error()) + } + t.Log("Received event of OSP confirmation!") + tx, _, err := builder.L1.Client.TransactionByHash(ctx, it.Event.Raw.TxHash) + Require(t, err) + signer := types.NewCancunSigner(tx.ChainId()) + address, err := signer.Sender(tx) + Require(t, err) + if address == builder.L1Info.GetAddress("HonestAsserter") { + t.Log("Honest party won OSP, impossible for evil party to win if honest party continues") + Require(t, it.Close()) + return + } + } + fromBlock = toBlock + case <-ctx.Done(): + return + } + } +} + +func fundBoldStaker(t *testing.T, ctx context.Context, builder *NodeBuilder, name string) { + balance := big.NewInt(params.Ether) + balance.Mul(balance, big.NewInt(100)) + TransferBalance(t, "Faucet", name, balance, builder.L1Info, builder.L1.Client, ctx) + + rollupUserLogic, err := rollupgen.NewRollupUserLogic(builder.addresses.Rollup, builder.L1.Client) + Require(t, err) + stakeToken, err := rollupUserLogic.StakeToken(&bind.CallOpts{Context: ctx}) + Require(t, err) + stakeTokenWeth, err := mocksgen.NewTestWETH9(stakeToken, builder.L1.Client) + Require(t, err) + + txOpts := builder.L1Info.GetDefaultTransactOpts(name, ctx) + + txOpts.Value = big.NewInt(params.Ether) + tx, err := stakeTokenWeth.Deposit(&txOpts) + Require(t, err) + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + txOpts.Value = nil + + tx, err = stakeTokenWeth.Approve(&txOpts, builder.addresses.Rollup, balance) + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + + challengeManager, err := rollupUserLogic.ChallengeManager(&bind.CallOpts{Context: ctx}) + Require(t, err) + tx, err = stakeTokenWeth.Approve(&txOpts, challengeManager, balance) + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) +} + +func TestChallengeProtocolBOLDNearLastVirtualBlock(t *testing.T) { + testChallengeProtocolBOLDVirtualBlocks(t, false) +} + +func TestChallengeProtocolBOLDFirstVirtualBlock(t *testing.T) { + testChallengeProtocolBOLDVirtualBlocks(t, true) +} + +type BoldStateProviderInterface interface { + l2stateprovider.L2MessageStateCollector + l2stateprovider.MachineHashCollector + l2stateprovider.ProofCollector + l2stateprovider.ExecutionProvider +} + +func startBoldChallengeManager(t *testing.T, ctx context.Context, builder *NodeBuilder, node *TestClient, addressName string, mockStateProvider func(BoldStateProviderInterface) BoldStateProviderInterface) (*solimpl.AssertionChain, func()) { + if !builder.deployBold { + t.Fatal("bold deployment not enabled") + } + + var stateManager BoldStateProviderInterface + var err error + cacheDir := t.TempDir() + stateManager, err = bold.NewBOLDStateProvider( + node.ConsensusNode.BlockValidator, + node.ConsensusNode.StatelessBlockValidator, + l2stateprovider.Height(blockChallengeLeafHeight), + &bold.StateProviderConfig{ + ValidatorName: addressName, + MachineLeavesCachePath: cacheDir, + CheckBatchFinality: false, + }, + cacheDir, + ) + Require(t, err) + + if mockStateProvider != nil { + stateManager = mockStateProvider(stateManager) + } + + provider := l2stateprovider.NewHistoryCommitmentProvider( + stateManager, + stateManager, + stateManager, + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + stateManager, + nil, // Api db + ) + + rollupUserLogic, err := rollupgen.NewRollupUserLogic(builder.addresses.Rollup, builder.L1.Client) + Require(t, err) + chalManagerAddr, err := rollupUserLogic.ChallengeManager(&bind.CallOpts{}) + Require(t, err) + + txOpts := builder.L1Info.GetDefaultTransactOpts(addressName, ctx) + + dp, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(node.ConsensusNode.ArbDB, storage.StakerPrefix), + node.ConsensusNode.L1Reader, + &txOpts, + NewFetcherFromConfig(builder.nodeConfig), + node.ConsensusNode.SyncMonitor, + builder.L1Info.Signer.ChainID(), + ) + Require(t, err) + + assertionChain, err := solimpl.NewAssertionChain( + ctx, + builder.addresses.Rollup, + chalManagerAddr, + &txOpts, + butil.NewBackendWrapper(builder.L1.Client, rpc.LatestBlockNumber), + bold.NewDataPosterTransactor(dp), + ) + Require(t, err) + + stackOpts := []challengemanager.StackOpt{ + challengemanager.StackWithName(addressName), + challengemanager.StackWithMode(modes.MakeMode), + challengemanager.StackWithPostingInterval(time.Second * 3), + challengemanager.StackWithPollingInterval(time.Second), + challengemanager.StackWithAverageBlockCreationTime(time.Second), + } + + challengeManager, err := challengemanager.NewChallengeStack( + assertionChain, + provider, + stackOpts..., + ) + Require(t, err) + + challengeManager.Start(ctx) + return assertionChain, challengeManager.StopAndWait +} diff --git a/system_tests/bold_state_provider_test.go b/system_tests/bold_state_provider_test.go new file mode 100644 index 0000000000..0ecce5ba64 --- /dev/null +++ b/system_tests/bold_state_provider_test.go @@ -0,0 +1,419 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE + +//go:build challengetest && !race + +package arbtest + +import ( + "context" + "errors" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/staker/bold" + "github.com/offchainlabs/nitro/util" + "github.com/offchainlabs/nitro/validator/valnode" + + protocol "github.com/offchainlabs/bold/chain-abstraction" + "github.com/offchainlabs/bold/containers/option" + l2stateprovider "github.com/offchainlabs/bold/layer2-state-provider" + "github.com/offchainlabs/bold/solgen/go/bridgegen" + "github.com/offchainlabs/bold/solgen/go/mocksgen" + prefixproofs "github.com/offchainlabs/bold/state-commitments/prefix-proofs" + mockmanager "github.com/offchainlabs/bold/testing/mocks/state-provider" + "github.com/offchainlabs/bold/testing/setup" +) + +func TestChallengeProtocolBOLD_Bisections(t *testing.T) { + t.Parallel() + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + l2node, l1info, l2info, l1stack, l1client, stateManager, blockValidator := setupBoldStateProvider(t, ctx, 1<<5) + defer requireClose(t, l1stack) + defer l2node.StopAndWait() + l2info.GenerateAccount("Destination") + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + seqInbox := l1info.GetAddress("SequencerInbox") + seqInboxBinding, err := bridgegen.NewSequencerInbox(seqInbox, l1client) + Require(t, err) + + seqInboxABI, err := abi.JSON(strings.NewReader(bridgegen.SequencerInboxABI)) + Require(t, err) + + honestUpgradeExec, err := mocksgen.NewUpgradeExecutorMock(l1info.GetAddress("UpgradeExecutor"), l1client) + Require(t, err) + data, err := seqInboxABI.Pack( + "setIsBatchPoster", + sequencerTxOpts.From, + true, + ) + Require(t, err) + honestRollupOwnerOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + _, err = honestUpgradeExec.ExecuteCall(&honestRollupOwnerOpts, seqInbox, data) + Require(t, err) + + // Make two batchs. One with 5 messages, and one with 10 messages. + numMessagesPerBatch := int64(5) + divergeAt := int64(-1) // No divergence. + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + numMessagesPerBatch = int64(10) + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + + bridgeBinding, err := bridgegen.NewBridge(l1info.GetAddress("Bridge"), l1client) + Require(t, err) + totalBatchesBig, err := bridgeBinding.SequencerMessageCount(&bind.CallOpts{Context: ctx}) + Require(t, err) + totalBatches := totalBatchesBig.Uint64() + totalMessageCount, err := l2node.InboxTracker.GetBatchMessageCount(totalBatches - 1) + Require(t, err) + log.Info("Status", "totalBatches", totalBatches, "totalMessageCount", totalMessageCount) + t.Logf("totalBatches: %v, totalMessageCount: %v\n", totalBatches, totalMessageCount) + + // Wait until the validator has validated the batches. + for { + time.Sleep(time.Millisecond * 100) + lastInfo, err := blockValidator.ReadLastValidatedInfo() + if lastInfo == nil || err != nil { + continue + } + if lastInfo.GlobalState.Batch >= totalBatches { + break + } + batchMsgCount, err := l2node.InboxTracker.GetBatchMessageCount(lastInfo.GlobalState.Batch) + if err != nil { + continue + } + if batchMsgCount >= totalMessageCount { + break + } + } + + historyCommitter := l2stateprovider.NewHistoryCommitmentProvider( + stateManager, + stateManager, + stateManager, []l2stateprovider.Height{ + 1 << 5, + 1 << 5, + 1 << 5, + }, + stateManager, + nil, // api db + ) + bisectionHeight := l2stateprovider.Height(16) + request := &l2stateprovider.HistoryCommitmentRequest{ + AssertionMetadata: &l2stateprovider.AssociatedAssertionMetadata{ + FromState: protocol.GoGlobalState{ + Batch: 1, + }, + BatchLimit: 3, + WasmModuleRoot: common.Hash{}, + }, + UpperChallengeOriginHeights: []l2stateprovider.Height{}, + UpToHeight: option.Some(bisectionHeight), + } + bisectionCommitment, err := historyCommitter.HistoryCommitment(ctx, request) + Require(t, err) + + request.UpToHeight = option.None[l2stateprovider.Height]() + packedProof, err := historyCommitter.PrefixProof(ctx, request, bisectionHeight) + Require(t, err) + + dataItem, err := mockmanager.ProofArgs.Unpack(packedProof) + Require(t, err) + preExpansion, ok := dataItem[0].([][32]byte) + if !ok { + Fatal(t, "wrong type") + } + + hashes := make([]common.Hash, len(preExpansion)) + for i, h := range preExpansion { + hash := h + hashes[i] = hash + } + + computed, err := prefixproofs.Root(hashes) + Require(t, err) + if computed != bisectionCommitment.Merkle { + Fatal(t, "wrong commitment") + } +} + +func TestChallengeProtocolBOLD_StateProvider(t *testing.T) { + // t.Parallel() + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + maxNumBlocks := uint64(1 << 14) + l2node, l1info, l2info, l1stack, l1client, stateManager, blockValidator := setupBoldStateProvider(t, ctx, maxNumBlocks) + defer requireClose(t, l1stack) + defer l2node.StopAndWait() + l2info.GenerateAccount("Destination") + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + seqInbox := l1info.GetAddress("SequencerInbox") + seqInboxBinding, err := bridgegen.NewSequencerInbox(seqInbox, l1client) + Require(t, err) + + seqInboxABI, err := abi.JSON(strings.NewReader(bridgegen.SequencerInboxABI)) + Require(t, err) + + honestUpgradeExec, err := mocksgen.NewUpgradeExecutorMock(l1info.GetAddress("UpgradeExecutor"), l1client) + Require(t, err) + data, err := seqInboxABI.Pack( + "setIsBatchPoster", + sequencerTxOpts.From, + true, + ) + Require(t, err) + honestRollupOwnerOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + _, err = honestUpgradeExec.ExecuteCall(&honestRollupOwnerOpts, seqInbox, data) + Require(t, err) + + // We will make two batches, with 5 messages in each batch. + numMessagesPerBatch := int64(5) + divergeAt := int64(-1) // No divergence. + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + + bridgeBinding, err := bridgegen.NewBridge(l1info.GetAddress("Bridge"), l1client) + Require(t, err) + totalBatchesBig, err := bridgeBinding.SequencerMessageCount(&bind.CallOpts{Context: ctx}) + Require(t, err) + totalBatches := totalBatchesBig.Uint64() + totalMessageCount, err := l2node.InboxTracker.GetBatchMessageCount(totalBatches - 1) + Require(t, err) + + // Wait until the validator has validated the batches. + for { + time.Sleep(time.Millisecond * 100) + lastInfo, err := blockValidator.ReadLastValidatedInfo() + if lastInfo == nil || err != nil { + continue + } + if lastInfo.GlobalState.Batch >= totalBatches { + break + } + } + + t.Run("StatesInBatchRange", func(t *testing.T) { + toBatch := uint64(3) + toHeight := l2stateprovider.Height(10) + fromState := protocol.GoGlobalState{ + Batch: 1, + } + stateRoots, states, err := stateManager.StatesInBatchRange(ctx, fromState, toBatch, toHeight) + Require(t, err) + want := 11 + got := len(stateRoots) + + if got != want { + t.Errorf("len(stateRoots): got %v, want %v", got, want) + } + firstState := states[0] + if firstState.Batch != 1 && firstState.PosInBatch != 0 { + Fatal(t, "wrong first state") + } + lastState := states[len(states)-1] + if lastState.Batch != 3 && lastState.PosInBatch != 0 { + Fatal(t, "wrong last state") + } + }) + t.Run("AgreesWithExecutionState", func(t *testing.T) { + // Non-zero position in batch should fail. + _, err = stateManager.ExecutionStateAfterPreviousState( + ctx, + 0, + protocol.GoGlobalState{ + Batch: 0, + PosInBatch: 1, + }, + ) + if err == nil { + Fatal(t, "should not agree with execution state") + } + if !strings.Contains(err.Error(), "max inbox count cannot be zero") { + Fatal(t, "wrong error message") + } + + // Always agrees with genesis. + genesis, err := stateManager.ExecutionStateAfterPreviousState( + ctx, + 1, + protocol.GoGlobalState{ + Batch: 0, + PosInBatch: 0, + }, + ) + Require(t, err) + if genesis == nil { + Fatal(t, "genesis should not be nil") + } + + // Always agrees with the init message. + first, err := stateManager.ExecutionStateAfterPreviousState( + ctx, + 2, + genesis.GlobalState, + ) + Require(t, err) + if first == nil { + Fatal(t, "genesis should not be nil") + } + + // Chain catching up if it has not seen batch 10. + _, err = stateManager.ExecutionStateAfterPreviousState( + ctx, + 10, + first.GlobalState, + ) + if err == nil { + Fatal(t, "should not agree with execution state") + } + if !errors.Is(err, l2stateprovider.ErrChainCatchingUp) { + Fatal(t, "wrong error") + } + + // Check if we agree with the last posted batch to the inbox. + result, err := l2node.TxStreamer.ResultAtCount(totalMessageCount) + Require(t, err) + _ = result + + state := protocol.GoGlobalState{ + BlockHash: result.BlockHash, + SendRoot: result.SendRoot, + Batch: 3, + } + got, err := stateManager.ExecutionStateAfterPreviousState(ctx, 3, first.GlobalState) + Require(t, err) + if state.Batch != got.GlobalState.Batch { + Fatal(t, "wrong batch") + } + if state.SendRoot != got.GlobalState.SendRoot { + Fatal(t, "wrong send root") + } + if state.BlockHash != got.GlobalState.BlockHash { + Fatal(t, "wrong batch") + } + + // See if we agree with one batch immediately after that and see that we fail with + // "ErrChainCatchingUp". + _, err = stateManager.ExecutionStateAfterPreviousState( + ctx, + state.Batch+1, + got.GlobalState, + ) + if err == nil { + Fatal(t, "should not agree with execution state") + } + if !errors.Is(err, l2stateprovider.ErrChainCatchingUp) { + Fatal(t, "wrong error") + } + }) + t.Run("ExecutionStateAfterBatchCount", func(t *testing.T) { + _, err = stateManager.ExecutionStateAfterPreviousState(ctx, 0, protocol.GoGlobalState{}) + if err == nil { + Fatal(t, "should have failed") + } + if !strings.Contains(err.Error(), "max inbox count cannot be zero") { + Fatal(t, "wrong error message", err) + } + + genesis, err := stateManager.ExecutionStateAfterPreviousState(ctx, 1, protocol.GoGlobalState{}) + Require(t, err) + execState, err := stateManager.ExecutionStateAfterPreviousState(ctx, totalBatches, genesis.GlobalState) + Require(t, err) + if execState == nil { + Fatal(t, "should not be nil") + } + }) +} + +func setupBoldStateProvider(t *testing.T, ctx context.Context, blockChallengeHeight uint64) (*arbnode.Node, *BlockchainTestInfo, *BlockchainTestInfo, *node.Node, *ethclient.Client, *bold.BOLDStateProvider, *staker.BlockValidator) { + var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs + l2chainConfig := chaininfo.ArbitrumDevTestChainConfig() + l2info := NewBlockChainTestInfo( + t, + types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + transferGas, + ) + ownerBal := big.NewInt(params.Ether) + ownerBal.Mul(ownerBal, big.NewInt(1_000_000)) + l2info.GenerateGenesisAccount("Owner", ownerBal) + sconf := setup.RollupStackConfig{ + UseMockBridge: false, + UseMockOneStepProver: false, + MinimumAssertionPeriod: 0, + } + + _, l2node, _, _, l1info, _, l1client, l1stack, _, _ := createTestNodeOnL1ForBoldProtocol( + t, + ctx, + false, + nil, + l2chainConfig, + nil, + sconf, + l2info, + ) + + valnode.TestValidationConfig.UseJit = false + _, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig) + blockValidatorConfig := staker.TestBlockValidatorConfig + + stateless, err := staker.NewStatelessBlockValidator( + l2node.InboxReader, + l2node.InboxTracker, + l2node.TxStreamer, + l2node.Execution, + l2node.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + Require(t, stateless.Start(ctx)) + + blockValidator, err := staker.NewBlockValidator( + stateless, + l2node.InboxTracker, + l2node.TxStreamer, + StaticFetcherFrom(t, &blockValidatorConfig), + nil, + ) + Require(t, err) + Require(t, blockValidator.Initialize(ctx)) + Require(t, blockValidator.Start(ctx)) + + dir := t.TempDir() + stateManager, err := bold.NewBOLDStateProvider( + blockValidator, + stateless, + l2stateprovider.Height(blockChallengeHeight), + &bold.StateProviderConfig{ + ValidatorName: "", + MachineLeavesCachePath: dir, + CheckBatchFinality: false, + }, + dir, + ) + Require(t, err) + + Require(t, l2node.Start(ctx)) + return l2node, l1info, l2info, l1stack, l1client, stateManager, blockValidator +} diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 277c97858a..346a5feec4 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -51,6 +51,10 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + boldMocksgen "github.com/offchainlabs/bold/solgen/go/mocksgen" + "github.com/offchainlabs/bold/solgen/go/rollupgen" + "github.com/offchainlabs/bold/testing/setup" + butil "github.com/offchainlabs/bold/util" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -79,6 +83,7 @@ import ( "github.com/offchainlabs/nitro/util/testhelpers/github" "github.com/offchainlabs/nitro/validator/inputs" "github.com/offchainlabs/nitro/validator/server_api" + "github.com/offchainlabs/nitro/validator/server_arb" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" rediscons "github.com/offchainlabs/nitro/validator/valnode/redis" @@ -237,6 +242,7 @@ type NodeBuilder struct { l2StackConfig *node.Config valnodeConfig *valnode.Config l3Config *NitroConfig + deployBold bool L1Info info L2Info info L3Info info @@ -252,6 +258,7 @@ type NodeBuilder struct { l3InitMessage *arbostypes.ParsedInitMessage withProdConfirmPeriodBlocks bool wasmCacheTag uint32 + delayBufferThreshold uint64 // Created nodes L1 *TestClient @@ -345,6 +352,11 @@ func (b *NodeBuilder) WithProdConfirmPeriodBlocks() *NodeBuilder { return b } +func (b *NodeBuilder) WithBoldDeployment() *NodeBuilder { + b.deployBold = true + return b +} + func (b *NodeBuilder) WithWasmRootDir(wasmRootDir string) *NodeBuilder { b.valnodeConfig.Wasm.RootPath = wasmRootDir return b @@ -364,6 +376,14 @@ func (b *NodeBuilder) WithStylusLongTermCache(enabled bool) *NodeBuilder { return b } +// WithDelayBuffer sets the delay-buffer threshold, which is the number of blocks the batch-poster +// is allowed to delay a batch with a delayed message. +// Setting the threshold to zero disabled the delay buffer (default behaviour). +func (b *NodeBuilder) WithDelayBuffer(threshold uint64) *NodeBuilder { + b.delayBufferThreshold = threshold + return b +} + func (b *NodeBuilder) Build(t *testing.T) func() { b.CheckConfig(t) if b.withL1 { @@ -413,6 +433,8 @@ func (b *NodeBuilder) BuildL1(t *testing.T) { locator.LatestWasmModuleRoot(), b.withProdConfirmPeriodBlocks, true, + b.deployBold, + b.delayBufferThreshold, ) b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } } @@ -516,6 +538,8 @@ func (b *NodeBuilder) BuildL3OnL2(t *testing.T) func() { locator.LatestWasmModuleRoot(), b.l3Config.withProdConfirmPeriodBlocks, false, + b.deployBold, + 0, ) b.L3 = buildOnParentChain( @@ -873,6 +897,21 @@ func BridgeBalance( return tx, res } +// AdvanceL1 sends dummy transactions to L1 to create blocks. +func AdvanceL1( + t *testing.T, + ctx context.Context, + l1client *ethclient.Client, + l1info *BlockchainTestInfo, + numBlocks int, +) { + for i := 0; i < numBlocks; i++ { + SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ + l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), + }) + } +} + func SendSignedTxesInBatchViaL1( t *testing.T, ctx context.Context, @@ -892,12 +931,7 @@ func SendSignedTxesInBatchViaL1( _, err = EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) - // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), - }) - } + AdvanceL1(t, ctx, l1client, l1info, 30) var receipts types.Receipts for _, tx := range delayedTxes { receipt, err := EnsureTxSucceeded(ctx, l2client, tx) @@ -944,12 +978,7 @@ func SendSignedTxViaL1( _, err = EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) - // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), - }) - } + AdvanceL1(t, ctx, l1client, l1info, 30) receipt, err := EnsureTxSucceeded(ctx, l2client, delayedTx) Require(t, err) return receipt @@ -995,12 +1024,7 @@ func SendUnsignedTxViaL1( _, err = EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) - // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), - }) - } + AdvanceL1(t, ctx, l1client, l1info, 30) receipt, err := EnsureTxSucceeded(ctx, l2client, unsignedTx) Require(t, err) return receipt @@ -1079,7 +1103,7 @@ func destroyRedisGroup(ctx context.Context, t *testing.T, streamName string, cli } } -func createTestValidationNode(t *testing.T, ctx context.Context, config *valnode.Config) (*valnode.ValidationNode, *node.Node) { +func createTestValidationNode(t *testing.T, ctx context.Context, config *valnode.Config, spawnerOpts ...server_arb.SpawnerOption) (*valnode.ValidationNode, *node.Node) { stackConf := node.DefaultConfig stackConf.HTTPPort = 0 stackConf.DataDir = "" @@ -1096,7 +1120,7 @@ func createTestValidationNode(t *testing.T, ctx context.Context, config *valnode Require(t, err) configFetcher := func() *valnode.Config { return config } - valnode, err := valnode.CreateValidationNode(configFetcher, stack, nil) + valnode, err := valnode.CreateValidationNode(configFetcher, stack, nil, spawnerOpts...) Require(t, err) err = stack.Start() @@ -1247,6 +1271,12 @@ func getInitMessage(ctx context.Context, t *testing.T, parentChainClient *ethcli return initMessage } +var ( + blockChallengeLeafHeight = uint64(1 << 5) // 32 + bigStepChallengeLeafHeight = uint64(1 << 10) + smallStepChallengeLeafHeight = uint64(1 << 10) +) + func deployOnParentChain( t *testing.T, ctx context.Context, @@ -1257,6 +1287,8 @@ func deployOnParentChain( wasmModuleRoot common.Hash, prodConfirmPeriodBlocks bool, chainSupportsBlobs bool, + deployBold bool, + delayBufferThreshold uint64, ) (*chaininfo.RollupAddresses, *arbostypes.ParsedInitMessage) { parentChainInfo.GenerateAccount("RollupOwner") parentChainInfo.GenerateAccount("Sequencer") @@ -1281,18 +1313,94 @@ func deployOnParentChain( nativeToken := common.Address{} maxDataSize := big.NewInt(117964) - addresses, err := deploy.DeployOnParentChain( - ctx, - parentChainReader, - &parentChainTransactionOpts, - []common.Address{parentChainInfo.GetAddress("Sequencer")}, - parentChainInfo.GetAddress("RollupOwner"), - 0, - arbnode.GenerateRollupConfig(prodConfirmPeriodBlocks, wasmModuleRoot, parentChainInfo.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), - nativeToken, - maxDataSize, - chainSupportsBlobs, - ) + var addresses *chaininfo.RollupAddresses + if deployBold { + stakeToken, tx, _, err := boldMocksgen.DeployTestWETH9( + &parentChainTransactionOpts, + parentChainReader.Client(), + "Weth", + "WETH", + ) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, parentChainReader.Client(), tx) + Require(t, err) + miniStakeValues := []*big.Int{big.NewInt(5), big.NewInt(4), big.NewInt(3), big.NewInt(2), big.NewInt(1)} + genesisExecutionState := rollupgen.AssertionState{ + GlobalState: rollupgen.GlobalState{}, + MachineStatus: 1, // Finished + EndHistoryRoot: [32]byte{}, + } + bufferConfig := rollupgen.BufferConfig{ + Threshold: delayBufferThreshold, // number of blocks + Max: 14400, // 2 days of blocks + ReplenishRateInBasis: 500, // 5% + } + cfg := rollupgen.Config{ + MiniStakeValues: miniStakeValues, + ConfirmPeriodBlocks: 120, + StakeToken: stakeToken, + BaseStake: big.NewInt(1), + WasmModuleRoot: wasmModuleRoot, + Owner: parentChainTransactionOpts.From, + LoserStakeEscrow: parentChainTransactionOpts.From, + ChainId: chainConfig.ChainID, + ChainConfig: string(serializedChainConfig), + SequencerInboxMaxTimeVariation: rollupgen.ISequencerInboxMaxTimeVariation{ + DelayBlocks: big.NewInt(60 * 60 * 24 / 15), + FutureBlocks: big.NewInt(12), + DelaySeconds: big.NewInt(60 * 60 * 24), + FutureSeconds: big.NewInt(60 * 60), + }, + LayerZeroBlockEdgeHeight: new(big.Int).SetUint64(blockChallengeLeafHeight), + LayerZeroBigStepEdgeHeight: new(big.Int).SetUint64(bigStepChallengeLeafHeight), + LayerZeroSmallStepEdgeHeight: new(big.Int).SetUint64(smallStepChallengeLeafHeight), + GenesisAssertionState: genesisExecutionState, + GenesisInboxCount: common.Big0, + AnyTrustFastConfirmer: common.Address{}, + NumBigStepLevel: 3, + ChallengeGracePeriodBlocks: 3, + BufferConfig: bufferConfig, + } + wrappedClient := butil.NewBackendWrapper(parentChainReader.Client(), rpc.LatestBlockNumber) + boldAddresses, err := setup.DeployFullRollupStack( + ctx, + wrappedClient, + &parentChainTransactionOpts, + parentChainInfo.GetAddress("Sequencer"), + cfg, + setup.RollupStackConfig{ + UseMockBridge: false, + UseMockOneStepProver: false, + MinimumAssertionPeriod: 0, + }, + ) + Require(t, err) + addresses = &chaininfo.RollupAddresses{ + Bridge: boldAddresses.Bridge, + Inbox: boldAddresses.Inbox, + SequencerInbox: boldAddresses.SequencerInbox, + Rollup: boldAddresses.Rollup, + NativeToken: nativeToken, + UpgradeExecutor: boldAddresses.UpgradeExecutor, + ValidatorUtils: boldAddresses.ValidatorUtils, + ValidatorWalletCreator: boldAddresses.ValidatorWalletCreator, + StakeToken: stakeToken, + DeployedAt: boldAddresses.DeployedAt, + } + } else { + addresses, err = deploy.DeployOnParentChain( + ctx, + parentChainReader, + &parentChainTransactionOpts, + []common.Address{parentChainInfo.GetAddress("Sequencer")}, + parentChainInfo.GetAddress("RollupOwner"), + 0, + arbnode.GenerateRollupConfig(prodConfirmPeriodBlocks, wasmModuleRoot, parentChainInfo.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), + nativeToken, + maxDataSize, + chainSupportsBlobs, + ) + } Require(t, err) parentChainInfo.SetContract("Bridge", addresses.Bridge) parentChainInfo.SetContract("SequencerInbox", addresses.SequencerInbox) diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index 6be79ed4c9..fd1aa746a3 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -3,6 +3,8 @@ package arbtest import ( "context" "encoding/json" + "fmt" + "math/big" "testing" "github.com/ethereum/go-ethereum/common" @@ -10,10 +12,16 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/gasestimator" "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/arbos/retryables" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestDebugAPI(t *testing.T) { @@ -57,3 +65,230 @@ func TestDebugAPI(t *testing.T) { err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), &tracers.TraceConfig{Tracer: &flatCallTracer}) Require(t, err) } + +type account struct { + Balance *hexutil.Big `json:"balance,omitempty"` + Code []byte `json:"code,omitempty"` + Nonce uint64 `json:"nonce,omitempty"` + Storage map[common.Hash]common.Hash `json:"storage,omitempty"` +} +type prestateTrace struct { + Post map[common.Address]*account `json:"post"` + Pre map[common.Address]*account `json:"pre"` +} + +func TestPrestateTracingSimple(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + sender := builder.L2Info.GetAddress("Owner") + receiver := builder.L2Info.GetAddress("User2") + ownerOldBalance, err := builder.L2.Client.BalanceAt(ctx, sender, nil) + Require(t, err) + user2OldBalance, err := builder.L2.Client.BalanceAt(ctx, receiver, nil) + Require(t, err) + + value := big.NewInt(1e6) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, value, nil) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + l2rpc := builder.L2.Stack.Attach() + + var result prestateTrace + traceConfig := map[string]interface{}{ + "tracer": "prestateTracer", + "tracerConfig": map[string]interface{}{ + "diffMode": true, + }, + } + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), traceConfig) + Require(t, err) + + if !arbmath.BigEquals(result.Pre[sender].Balance.ToInt(), ownerOldBalance) { + Fatal(t, "Unexpected initial balance of sender") + } + if !arbmath.BigEquals(result.Pre[receiver].Balance.ToInt(), user2OldBalance) { + Fatal(t, "Unexpected initial balance of receiver") + } + if !arbmath.BigEquals(result.Post[sender].Balance.ToInt(), arbmath.BigSub(ownerOldBalance, value)) { + Fatal(t, "Unexpected final balance of sender") + } + if !arbmath.BigEquals(result.Post[receiver].Balance.ToInt(), value) { + Fatal(t, "Unexpected final balance of receiver") + } + if result.Post[sender].Nonce != result.Pre[sender].Nonce+1 { + Fatal(t, "sender nonce increment wasn't registered") + } + if result.Post[receiver].Nonce != result.Pre[receiver].Nonce { + Fatal(t, "receiver nonce shouldn't change") + } +} + +func TestPrestateTracingComplex(t *testing.T) { + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + defer teardown() + + // Test prestate tracing of a ArbitrumDepositTx type tx + faucetAddr := builder.L1Info.GetAddress("Faucet") + oldBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, nil) + Require(t, err) + + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + txOpts.Value = big.NewInt(13) + + l1tx, err := delayedInbox.DepositEth439370b1(&txOpts) + Require(t, err) + + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("Got transaction status: %v, want: %v", l1Receipt.Status, types.ReceiptStatusSuccessful) + } + waitForL1DelayBlocks(t, builder) + + l2Tx := lookupL2Tx(l1Receipt) + l2Receipt, err := builder.L2.EnsureTxSucceeded(l2Tx) + Require(t, err) + newBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, l2Receipt.BlockNumber) + Require(t, err) + if got := new(big.Int); got.Sub(newBalance, oldBalance).Cmp(txOpts.Value) != 0 { + t.Errorf("Got transferred: %v, want: %v", got, txOpts.Value) + } + + l2rpc := builder.L2.Stack.Attach() + var result prestateTrace + traceConfig := map[string]interface{}{ + "tracer": "prestateTracer", + "tracerConfig": map[string]interface{}{ + "diffMode": true, + }, + } + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", l2Tx.Hash(), traceConfig) + Require(t, err) + + if _, ok := result.Pre[faucetAddr]; !ok { + Fatal(t, "Faucet account not found in the result of prestate tracer") + } + // Nonce shouldn't exist (in this case defaults to 0) in the Post map of the trace in DiffMode + if l2Tx.SkipAccountChecks() && result.Post[faucetAddr].Nonce != 0 { + Fatal(t, "Faucet account's nonce should remain unchanged ") + } + if !arbmath.BigEquals(result.Pre[faucetAddr].Balance.ToInt(), oldBalance) { + Fatal(t, "Unexpected initial balance of Faucet") + } + if !arbmath.BigEquals(result.Post[faucetAddr].Balance.ToInt(), arbmath.BigAdd(oldBalance, txOpts.Value)) { + Fatal(t, "Unexpected final balance of Faucet") + } + + // Test prestate tracing of a ArbitrumSubmitRetryableTx type tx + user2Address := builder.L2Info.GetAddress("User2") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") + + deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) + callValue := big.NewInt(1e6) + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) + Require(t, err, "failed to deploy NodeInterface") + + // estimate the gas needed to auto redeem the retryable + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL2.NoSend = true + usertxoptsL2.GasMargin = 0 + tx, err := nodeInterface.EstimateRetryableTicket( + &usertxoptsL2, + usertxoptsL2.From, + deposit, + user2Address, + callValue, + beneficiaryAddress, + beneficiaryAddress, + []byte{0x32, 0x42, 0x32, 0x88}, // increase the cost to beyond that of params.TxGas + ) + Require(t, err, "failed to estimate retryable submission") + estimate := tx.Gas() + expectedEstimate := params.TxGas + params.TxDataNonZeroGasEIP2028*4 + if float64(estimate) > float64(expectedEstimate)*(1+gasestimator.EstimateGasErrorRatio) { + t.Errorf("estimated retryable ticket at %v gas but expected %v, with error margin of %v", + estimate, + expectedEstimate, + gasestimator.EstimateGasErrorRatio, + ) + } + + // submit & auto redeem the retryable using the gas estimate + usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL1.Value = deposit + l1tx, err = delayedInbox.CreateRetryableTicket( + &usertxoptsL1, + user2Address, + callValue, + big.NewInt(1e16), + beneficiaryAddress, + beneficiaryAddress, + arbmath.UintToBig(estimate), + big.NewInt(l2pricing.InitialBaseFeeWei*2), + []byte{0x32, 0x42, 0x32, 0x88}, + ) + Require(t, err) + + l1Receipt, err = builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "l1Receipt indicated failure") + } + + waitForL1DelayBlocks(t, builder) + + l2Tx = lookupL2Tx(l1Receipt) + receipt, err := builder.L2.EnsureTxSucceeded(l2Tx) + Require(t, err) + if receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t) + } + + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) + Require(t, err) + if !arbmath.BigEquals(l2balance, callValue) { + Fatal(t, "Unexpected balance:", l2balance) + } + + ticketId := receipt.Logs[0].Topics[1] + firstRetryTxId := receipt.Logs[1].Topics[2] + fmt.Println("submitretryable txid ", ticketId) + fmt.Println("auto redeem txid ", firstRetryTxId) + + // Trace ArbitrumSubmitRetryableTx + result = prestateTrace{} + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", l2Tx.Hash(), traceConfig) + Require(t, err) + + escrowAddr := retryables.RetryableEscrowAddress(ticketId) + if _, ok := result.Pre[escrowAddr]; !ok { + Fatal(t, "Escrow account not found in the result of prestate tracer for a ArbitrumSubmitRetryableTx transaction") + } + + if !arbmath.BigEquals(result.Pre[escrowAddr].Balance.ToInt(), common.Big0) { + Fatal(t, "Unexpected initial balance of Escrow") + } + if !arbmath.BigEquals(result.Post[escrowAddr].Balance.ToInt(), callValue) { + Fatal(t, "Unexpected final balance of Escrow") + } + + // Trace ArbitrumRetryTx + result = prestateTrace{} + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", firstRetryTxId, traceConfig) + Require(t, err) + + if !arbmath.BigEquals(result.Pre[user2Address].Balance.ToInt(), common.Big0) { + Fatal(t, "Unexpected initial balance of User2") + } + if !arbmath.BigEquals(result.Post[user2Address].Balance.ToInt(), callValue) { + Fatal(t, "Unexpected final balance of User2") + } +} diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index e489b1864e..37e1efe8c5 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -162,7 +162,7 @@ func TestDifficultyForArbOSTen(t *testing.T) { defer cancel() builder := NewNodeBuilder(ctx).DefaultConfig(t, false) - builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 + builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = params.ArbosVersion_10 cleanup := builder.Build(t) defer cleanup() diff --git a/system_tests/fast_confirm_test.go b/system_tests/fast_confirm_test.go index dae2699b9f..8eb71bffd4 100644 --- a/system_tests/fast_confirm_test.go +++ b/system_tests/fast_confirm_test.go @@ -10,6 +10,7 @@ package arbtest import ( "context" "errors" + "fmt" "math/big" "strings" "testing" @@ -32,6 +33,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/validator/valnode" @@ -94,7 +96,7 @@ func TestFastConfirmation(t *testing.T) { _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) - valConfig := staker.TestL1ValidatorConfig + valConfig := legacystaker.TestL1ValidatorConfig valConfig.EnableFastConfirmation = true parentChainID, err := builder.L1.Client.ChainID(ctx) if err != nil { @@ -156,11 +158,11 @@ func TestFastConfirmation(t *testing.T) { Require(t, err) err = valWallet.Initialize(ctx) Require(t, err) - stakerA, err := staker.NewStaker( + stakerA, err := legacystaker.NewStaker( l2node.L1Reader, valWallet, bind.CallOpts{}, - func() *staker.L1ValidatorConfig { return &valConfig }, + func() *legacystaker.L1ValidatorConfig { return &valConfig }, nil, stateless, nil, @@ -211,7 +213,7 @@ func TestFastConfirmation(t *testing.T) { latestConfirmAfterAct, err := rollup.LatestConfirmed(&bind.CallOpts{}) Require(t, err) if latestConfirmAfterAct <= latestConfirmBeforeAct { - Fatal(t, "staker A didn't advance the latest confirmed node") + Fatal(t, fmt.Sprintf("staker A didn't advance the latest confirmed node: want > %d, got: %d", latestConfirmBeforeAct, latestConfirmAfterAct)) } } @@ -293,7 +295,7 @@ func TestFastConfirmationWithSafe(t *testing.T) { _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) - valConfigA := staker.TestL1ValidatorConfig + valConfigA := legacystaker.TestL1ValidatorConfig valConfigA.EnableFastConfirmation = true parentChainID, err := builder.L1.Client.ChainID(ctx) @@ -357,11 +359,11 @@ func TestFastConfirmationWithSafe(t *testing.T) { Require(t, err) err = valWalletA.Initialize(ctx) Require(t, err) - stakerA, err := staker.NewStaker( + stakerA, err := legacystaker.NewStaker( l2nodeA.L1Reader, valWalletA, bind.CallOpts{}, - func() *staker.L1ValidatorConfig { return &valConfigA }, + func() *legacystaker.L1ValidatorConfig { return &valConfigA }, nil, statelessA, nil, @@ -391,7 +393,7 @@ func TestFastConfirmationWithSafe(t *testing.T) { } valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), func() uint64 { return 0 }) Require(t, err) - valConfigB := staker.TestL1ValidatorConfig + valConfigB := legacystaker.TestL1ValidatorConfig valConfigB.EnableFastConfirmation = true valConfigB.Strategy = "watchtower" statelessB, err := staker.NewStatelessBlockValidator( @@ -409,11 +411,11 @@ func TestFastConfirmationWithSafe(t *testing.T) { Require(t, err) err = valWalletB.Initialize(ctx) Require(t, err) - stakerB, err := staker.NewStaker( + stakerB, err := legacystaker.NewStaker( l2nodeB.L1Reader, valWalletB, bind.CallOpts{}, - func() *staker.L1ValidatorConfig { return &valConfigB }, + func() *legacystaker.L1ValidatorConfig { return &valConfigB }, nil, statelessB, nil, diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 76de23e2cb..5540728df8 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -89,10 +89,10 @@ func TestSequencerFeePaid(t *testing.T) { feePaidForL2 := arbmath.BigMulByUint(gasPrice, gasUsedForL2) tipPaidToNet := arbmath.BigMulByUint(tipCap, receipt.GasUsedForL1) gotTip := arbmath.BigEquals(networkRevenue, arbmath.BigAdd(feePaidForL2, tipPaidToNet)) - if !gotTip && version == 9 { + if !gotTip && version == params.ArbosVersion_9 { Fatal(t, "network didn't receive expected payment", networkRevenue, feePaidForL2, tipPaidToNet) } - if gotTip && version != 9 { + if gotTip && version != params.ArbosVersion_9 { Fatal(t, "tips are somehow enabled") } @@ -110,7 +110,7 @@ func TestSequencerFeePaid(t *testing.T) { return networkRevenue, tipPaidToNet } - if version != 9 { + if version != params.ArbosVersion_9 { testFees(3) return } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index bf30c928d8..4d902f87ba 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -32,6 +32,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/staker" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_common" ) @@ -101,8 +102,8 @@ func CreateChallenge( auth, wasmModuleRoot, [2]uint8{ - staker.StatusFinished, - staker.StatusFinished, + legacystaker.StatusFinished, + legacystaker.StatusFinished, }, [2]mocksgen.GlobalState{ { @@ -397,7 +398,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall Fatal(t, err) } defer asserterValidator.Stop() - asserterManager, err := staker.NewChallengeManager(ctx, l1Backend, &asserterTxOpts, asserterTxOpts.From, challengeManagerAddr, 1, asserterValidator, 0, 0) + asserterManager, err := legacystaker.NewChallengeManager(ctx, l1Backend, &asserterTxOpts, asserterTxOpts.From, challengeManagerAddr, 1, asserterValidator, 0, 0) if err != nil { Fatal(t, err) } @@ -414,7 +415,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall Fatal(t, err) } defer challengerValidator.Stop() - challengerManager, err := staker.NewChallengeManager(ctx, l1Backend, &challengerTxOpts, challengerTxOpts.From, challengeManagerAddr, 1, challengerValidator, 0, 0) + challengerManager, err := legacystaker.NewChallengeManager(ctx, l1Backend, &challengerTxOpts, challengerTxOpts.From, challengeManagerAddr, 1, challengerValidator, 0, 0) if err != nil { Fatal(t, err) } diff --git a/system_tests/mock_machine_test.go b/system_tests/mock_machine_test.go new file mode 100644 index 0000000000..ea7fcbaef1 --- /dev/null +++ b/system_tests/mock_machine_test.go @@ -0,0 +1,41 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/offchainlabs/nitro/validator/server_arb" +) + +// IncorrectIntermediateMachine will report an incorrect hash while running from incorrectStep onwards. +// However, it'll reach the correct final hash and global state once finished. +type IncorrectIntermediateMachine struct { + server_arb.MachineInterface + incorrectStep uint64 +} + +var _ server_arb.MachineInterface = (*IncorrectIntermediateMachine)(nil) + +func NewIncorrectIntermediateMachine(inner server_arb.MachineInterface, incorrectStep uint64) *IncorrectIntermediateMachine { + return &IncorrectIntermediateMachine{ + MachineInterface: inner, + incorrectStep: incorrectStep, + } +} + +func (m *IncorrectIntermediateMachine) CloneMachineInterface() server_arb.MachineInterface { + return &IncorrectIntermediateMachine{ + MachineInterface: m.MachineInterface.CloneMachineInterface(), + incorrectStep: m.incorrectStep, + } +} + +func (m *IncorrectIntermediateMachine) Hash() common.Hash { + h := m.MachineInterface.Hash() + if m.GetStepCount() >= m.incorrectStep && m.IsRunning() { + h[0] ^= 0xFF + } + return h +} diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index ea6dc2be8b..10d1ebec42 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -54,7 +54,6 @@ func TestP256VerifyEnabled(t *testing.T) { func TestOutboxProofs(t *testing.T) { t.Parallel() gethhook.RequireHookedGeth() - rand.Seed(time.Now().UTC().UnixNano()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/system_tests/overflow_assertions_test.go b/system_tests/overflow_assertions_test.go new file mode 100644 index 0000000000..c024a43070 --- /dev/null +++ b/system_tests/overflow_assertions_test.go @@ -0,0 +1,316 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see: +// https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +//go:build challengetest && !race + +package arbtest + +import ( + "context" + "math/big" + "os" + "strings" + "testing" + "time" + + "github.com/ccoveille/go-safecast" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + + protocol "github.com/offchainlabs/bold/chain-abstraction" + challengemanager "github.com/offchainlabs/bold/challenge-manager" + modes "github.com/offchainlabs/bold/challenge-manager/types" + l2stateprovider "github.com/offchainlabs/bold/layer2-state-provider" + "github.com/offchainlabs/bold/solgen/go/bridgegen" + "github.com/offchainlabs/bold/solgen/go/mocksgen" + "github.com/offchainlabs/bold/solgen/go/rollupgen" + "github.com/offchainlabs/bold/testing/setup" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/staker/bold" + "github.com/offchainlabs/nitro/util" + "github.com/offchainlabs/nitro/validator/valnode" +) + +func TestOverflowAssertions(t *testing.T) { + // Get a simulated geth backend running. + // + // Create enough messages in batches to overflow the block level challenge + // height. (height == 32, messages = 45) + // + // Start the challenge manager with a minimumAssertionPeriod of 7 and make + // sure that it posts overflow-assertions right away instead of waiting for + // the 7 blocks to pass. + goodDir, err := os.MkdirTemp("", "good_*") + Require(t, err) + t.Cleanup(func() { + Require(t, os.RemoveAll(goodDir)) + }) + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs + l2chainConfig := chaininfo.ArbitrumDevTestChainConfig() + l2info := NewBlockChainTestInfo( + t, + types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + transferGas, + ) + // This is important to show that overflow assertions don't wait. + minAssertionBlocks := int64(7) + ownerBal := big.NewInt(params.Ether) + ownerBal.Mul(ownerBal, big.NewInt(1_000_000)) + l2info.GenerateGenesisAccount("Owner", ownerBal) + sconf := setup.RollupStackConfig{ + UseMockBridge: false, + UseMockOneStepProver: false, + MinimumAssertionPeriod: minAssertionBlocks, + } + + _, l2node, _, _, l1info, _, l1client, l1stack, assertionChain, _ := createTestNodeOnL1ForBoldProtocol(t, ctx, true, nil, l2chainConfig, nil, sconf, l2info) + defer requireClose(t, l1stack) + defer l2node.StopAndWait() + + // Make sure we shut down test functionality before the rest of the node + ctx, cancelCtx = context.WithCancel(ctx) + defer cancelCtx() + + go keepChainMoving(t, ctx, l1info, l1client) + + balance := big.NewInt(params.Ether) + balance.Mul(balance, big.NewInt(100)) + TransferBalance(t, "Faucet", "Asserter", balance, l1info, l1client, ctx) + + valCfg := valnode.TestValidationConfig + valCfg.UseJit = false + _, valStack := createTestValidationNode(t, ctx, &valCfg) + blockValidatorConfig := staker.TestBlockValidatorConfig + + stateless, err := staker.NewStatelessBlockValidator( + l2node.InboxReader, + l2node.InboxTracker, + l2node.TxStreamer, + l2node.Execution, + l2node.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + err = stateless.Start(ctx) + Require(t, err) + + blockValidator, err := staker.NewBlockValidator( + stateless, + l2node.InboxTracker, + l2node.TxStreamer, + StaticFetcherFrom(t, &blockValidatorConfig), + nil, + ) + Require(t, err) + Require(t, blockValidator.Initialize(ctx)) + Require(t, blockValidator.Start(ctx)) + + stateManager, err := bold.NewBOLDStateProvider( + blockValidator, + stateless, + l2stateprovider.Height(blockChallengeLeafHeight), + &bold.StateProviderConfig{ + ValidatorName: "good", + MachineLeavesCachePath: goodDir, + CheckBatchFinality: false, + }, + goodDir, + ) + Require(t, err) + + Require(t, l2node.Start(ctx)) + + l2info.GenerateAccount("Destination") + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + honestSeqInbox := l1info.GetAddress("SequencerInbox") + honestSeqInboxBinding, err := bridgegen.NewSequencerInbox(honestSeqInbox, l1client) + Require(t, err) + + // Post batches to the honest and inbox. + seqInboxABI, err := abi.JSON(strings.NewReader(bridgegen.SequencerInboxABI)) + Require(t, err) + + honestUpgradeExec, err := mocksgen.NewUpgradeExecutorMock(l1info.GetAddress("UpgradeExecutor"), l1client) + Require(t, err) + data, err := seqInboxABI.Pack( + "setIsBatchPoster", + sequencerTxOpts.From, + true, + ) + Require(t, err) + honestRollupOwnerOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + _, err = honestUpgradeExec.ExecuteCall(&honestRollupOwnerOpts, honestSeqInbox, data) + Require(t, err) + + // Post enough messages (45 across 2 batches) to overflow the block level + // challenge height (32). + totalMessagesPosted := int64(0) + numMessagesPerBatch := int64(32) + divergeAt := int64(-1) + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, honestSeqInboxBinding, honestSeqInbox, numMessagesPerBatch, divergeAt) + totalMessagesPosted += numMessagesPerBatch + + numMessagesPerBatch = int64(13) + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, honestSeqInboxBinding, honestSeqInbox, numMessagesPerBatch, divergeAt) + totalMessagesPosted += numMessagesPerBatch + + bc, err := l2node.InboxTracker.GetBatchCount() + Require(t, err) + msgs, err := l2node.InboxTracker.GetBatchMessageCount(bc - 1) + Require(t, err) + + t.Logf("Node batch count %d, msgs %d", bc, msgs) + + // Wait for the node to catch up. + nodeExec, ok := l2node.Execution.(*gethexec.ExecutionNode) + if !ok { + Fatal(t, "not geth execution node") + } + for { + latest := nodeExec.Backend.APIBackend().CurrentHeader() + isCaughtUp := latest.Number.Uint64() == uint64(totalMessagesPosted) + if isCaughtUp { + break + } + time.Sleep(time.Millisecond * 200) + } + + bridgeBinding, err := bridgegen.NewBridge(l1info.GetAddress("Bridge"), l1client) + Require(t, err) + totalBatchesBig, err := bridgeBinding.SequencerMessageCount(&bind.CallOpts{Context: ctx}) + Require(t, err) + totalBatches := totalBatchesBig.Uint64() + + // Wait until the validator has validated the batches. + for { + lastInfo, err := blockValidator.ReadLastValidatedInfo() + if lastInfo == nil || err != nil { + continue + } + t.Log("Batch", lastInfo.GlobalState.Batch, "Total", totalBatches-1) + if lastInfo.GlobalState.Batch >= totalBatches-1 { + break + } + time.Sleep(time.Millisecond * 200) + } + + provider := l2stateprovider.NewHistoryCommitmentProvider( + stateManager, + stateManager, + stateManager, + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + stateManager, + nil, // Api db + ) + + stackOpts := []challengemanager.StackOpt{ + challengemanager.StackWithName("default"), + challengemanager.StackWithMode(modes.MakeMode), + challengemanager.StackWithPostingInterval(time.Second), + challengemanager.StackWithPollingInterval(time.Millisecond * 500), + challengemanager.StackWithAverageBlockCreationTime(time.Second), + } + + manager, err := challengemanager.NewChallengeStack( + assertionChain, + provider, + stackOpts..., + ) + Require(t, err) + manager.Start(ctx) + + filterer, err := rollupgen.NewRollupUserLogicFilterer(assertionChain.RollupAddress(), assertionChain.Backend()) + Require(t, err) + + // The goal of this test is to observe: + // + // 1. The genisis assertion (non-overflow) + // 2. The assertion of the first 32 blocks of the two batches manually set up + // above (non-overflow) + // 3. The overflow assertion that should be posted in fewer than + // minAssertionBlocks. (overflow) + // 4. One more normal assertion in >= minAssertionBlocks. (non-overflow) + + overflow := true + nonOverflow := false + expectedAssertions := []bool{nonOverflow, nonOverflow, overflow, nonOverflow} + mab64, err := safecast.ToUint64(minAssertionBlocks) + Require(t, err) + + lastInboxMax := uint64(0) + lastAssertionBlock := uint64(0) + fromBlock := uint64(0) + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for len(expectedAssertions) > 0 { + select { + case <-ticker.C: + latestBlock, err := l1client.HeaderByNumber(ctx, nil) + Require(t, err) + toBlock := latestBlock.Number.Uint64() + if fromBlock >= toBlock { + continue + } + filterOpts := &bind.FilterOpts{ + Start: fromBlock, + End: &toBlock, + Context: ctx, + } + it, err := filterer.FilterAssertionCreated(filterOpts, nil, nil) + Require(t, err) + for it.Next() { + if it.Error() != nil { + t.Fatalf("Error in filter iterator: %v", it.Error()) + } + t.Log("Received event of assertion created!") + assertionHash := protocol.AssertionHash{Hash: it.Event.AssertionHash} + creationInfo, err := assertionChain.ReadAssertionCreationInfo(ctx, assertionHash) + Require(t, err) + t.Logf("Created assertion in block: %d", creationInfo.CreationBlock) + newState := protocol.GoGlobalStateFromSolidity(creationInfo.AfterState.GlobalState) + t.Logf("NewState PosInBatch: %d", newState.PosInBatch) + inboxMax := creationInfo.InboxMaxCount.Uint64() + t.Logf("InboxMax: %d", inboxMax) + blocks := creationInfo.CreationBlock - lastAssertionBlock + // PosInBatch == 0 && inboxMax > lastInboxMax means it is NOT an overflow assertion. + if newState.PosInBatch == 0 && inboxMax > lastInboxMax { + if expectedAssertions[0] == overflow { + t.Errorf("Expected overflow assertion, got non-overflow assertion") + } + if blocks < mab64 { + t.Errorf("non-overflow assertions should have >= =%d blocks between them. Got: %d", mab64, blocks) + } + } else { + if expectedAssertions[0] == nonOverflow { + t.Errorf("Expected non-overflow assertion, got overflow assertion") + } + if blocks >= mab64 { + t.Errorf("overflow assertions should not have %d blocks between them. Got: %d", mab64, blocks) + } + } + lastAssertionBlock = creationInfo.CreationBlock + lastInboxMax = inboxMax + expectedAssertions = expectedAssertions[1:] + } + fromBlock = toBlock + 1 + case <-ctx.Done(): + return + } + } + // PASS: All expected assertions were seen. +} diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index 78f34df6c7..5bc6315086 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/l1pricing" @@ -27,7 +28,7 @@ func TestPurePrecompileMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - arbosVersion := uint64(31) + arbosVersion := params.ArbosVersion_31 builder := NewNodeBuilder(ctx). DefaultConfig(t, false). WithArbOSVersion(arbosVersion) @@ -504,57 +505,6 @@ func TestGetBrotliCompressionLevel(t *testing.T) { } } -func TestScheduleArbosUpgrade(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - builder := NewNodeBuilder(ctx).DefaultConfig(t, false) - cleanup := builder.Build(t) - defer cleanup() - - auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) - - arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) - Require(t, err, "could not bind ArbOwner contract") - - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) - Require(t, err, "could not bind ArbOwner contract") - - callOpts := &bind.CallOpts{Context: ctx} - scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") - if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { - t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } - - // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. - tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) - Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) - Require(t, err) - - scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") - if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { - t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } - - // TODO: Once we have an ArbOS 30, test a real upgrade with it - // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test - var testVersion uint64 = 100 - var testTimestamp uint64 = 1 << 62 - tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) - Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) - Require(t, err) - - scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") - if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { - t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } -} - func TestArbStatistics(t *testing.T) { t.Parallel() diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 55d26c8372..49bba81374 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -316,7 +316,7 @@ func testSubmitRetryableEmptyEscrow(t *testing.T, arbosVersion uint64) { state, err := builder.L2.ExecNode.ArbInterface.BlockChain().State() Require(t, err) escrowExists := state.Exist(escrowAccount) - if escrowExists != (arbosVersion < 30) { + if escrowExists != (arbosVersion < params.ArbosVersion_30) { Fatal(t, "Escrow account existance", escrowExists, "doesn't correspond to ArbOS version", arbosVersion) } } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 67ce260529..69645d8878 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -33,6 +33,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" @@ -152,7 +153,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, builder.L1.Client) Require(t, err) - valConfigA := staker.TestL1ValidatorConfig + valConfigA := legacystaker.TestL1ValidatorConfig parentChainID, err := builder.L1.Client.ChainID(ctx) if err != nil { t.Fatalf("Failed to get parent chain id: %v", err) @@ -208,11 +209,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) err = statelessA.Start(ctx) Require(t, err) - stakerA, err := staker.NewStaker( + stakerA, err := legacystaker.NewStaker( l2nodeA.L1Reader, valWalletA, bind.CallOpts{}, - func() *staker.L1ValidatorConfig { return &valConfigA }, + func() *legacystaker.L1ValidatorConfig { return &valConfigA }, nil, statelessA, nil, @@ -222,7 +223,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) ) Require(t, err) err = stakerA.Initialize(ctx) - if stakerA.Strategy() != staker.WatchtowerStrategy { + if stakerA.Strategy() != legacystaker.WatchtowerStrategy { err = valWalletA.Initialize(ctx) Require(t, err) } @@ -246,7 +247,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), func() uint64 { return 0 }) Require(t, err) - valConfigB := staker.TestL1ValidatorConfig + valConfigB := legacystaker.TestL1ValidatorConfig valConfigB.Strategy = "MakeNodes" statelessB, err := staker.NewStatelessBlockValidator( l2nodeB.InboxReader, @@ -261,11 +262,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) err = statelessB.Start(ctx) Require(t, err) - stakerB, err := staker.NewStaker( + stakerB, err := legacystaker.NewStaker( l2nodeB.L1Reader, valWalletB, bind.CallOpts{}, - func() *staker.L1ValidatorConfig { return &valConfigB }, + func() *legacystaker.L1ValidatorConfig { return &valConfigB }, nil, statelessB, nil, @@ -276,18 +277,18 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) err = stakerB.Initialize(ctx) Require(t, err) - if stakerB.Strategy() != staker.WatchtowerStrategy { + if stakerB.Strategy() != legacystaker.WatchtowerStrategy { err = valWalletB.Initialize(ctx) Require(t, err) } valWalletC := validatorwallet.NewNoOp(builder.L1.Client, l2nodeA.DeployInfo.Rollup) - valConfigC := staker.TestL1ValidatorConfig + valConfigC := legacystaker.TestL1ValidatorConfig valConfigC.Strategy = "Watchtower" - stakerC, err := staker.NewStaker( + stakerC, err := legacystaker.NewStaker( l2nodeA.L1Reader, valWalletC, bind.CallOpts{}, - func() *staker.L1ValidatorConfig { return &valConfigC }, + func() *legacystaker.L1ValidatorConfig { return &valConfigC }, nil, statelessA, nil, @@ -296,7 +297,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) nil, ) Require(t, err) - if stakerC.Strategy() != staker.WatchtowerStrategy { + if stakerC.Strategy() != legacystaker.WatchtowerStrategy { err = valWalletC.Initialize(ctx) Require(t, err) } @@ -409,7 +410,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if faultyStaker { conflictInfo, err := validatorUtils.FindStakerConflict(&bind.CallOpts{}, l2nodeA.DeployInfo.Rollup, l1authA.From, srv.Address, big.NewInt(1024)) Require(t, err) - if staker.ConflictType(conflictInfo.Ty) == staker.CONFLICT_TYPE_FOUND { + if legacystaker.ConflictType(conflictInfo.Ty) == legacystaker.CONFLICT_TYPE_FOUND { cancelBackgroundTxs() } } diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index a49e059351..c221ecc137 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" ) func TestTransfer(t *testing.T) { @@ -51,12 +52,12 @@ func TestP256Verify(t *testing.T) { }{ { desc: "p256 should not be enabled on arbOS 20", - initialVersion: 20, + initialVersion: params.ArbosVersion_20, want: nil, }, { desc: "p256 should be enabled on arbOS 20", - initialVersion: 30, + initialVersion: params.ArbosVersion_30, want: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), }, } { diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index ad19203093..98dab7ad39 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -84,7 +84,7 @@ func (s *mockSpawner) Stop() {} func (s *mockSpawner) Name() string { return "mock" } func (s *mockSpawner) Room() int { return 4 } -func (s *mockSpawner) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput) containers.PromiseInterface[validator.ExecutionRun] { +func (s *mockSpawner) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput, _ bool) containers.PromiseInterface[validator.ExecutionRun] { s.ExecSpawned = append(s.ExecSpawned, input.Id) return containers.NewReadyPromise[validator.ExecutionRun](&mockExecRun{ startState: input.StartState, @@ -155,6 +155,10 @@ func (r *mockExecRun) PrepareRange(uint64, uint64) containers.PromiseInterface[s return containers.NewReadyPromise[struct{}](struct{}{}, nil) } +func (r *mockExecRun) CheckAlive(ctx context.Context) error { + return nil +} + func (r *mockExecRun) Close() {} func createMockValidationNode(t *testing.T, ctx context.Context, config *server_arb.ArbitratorSpawnerConfig) (*mockSpawner, *node.Node) { @@ -256,7 +260,7 @@ func TestValidationServerAPI(t *testing.T) { if res != endState { t.Error("unexpected mock validation run") } - execRun, err := client.CreateExecutionRun(wasmRoot, &valInput).Await(ctx) + execRun, err := client.CreateExecutionRun(wasmRoot, &valInput, false).Await(ctx) Require(t, err) step0 := execRun.GetStepAt(0) step0Res, err := step0.Await(ctx) @@ -381,9 +385,9 @@ func TestExecutionKeepAlive(t *testing.T) { Require(t, err) valInput := validator.ValidationInput{} - runDefault, err := clientDefault.CreateExecutionRun(wasmRoot, &valInput).Await(ctx) + runDefault, err := clientDefault.CreateExecutionRun(wasmRoot, &valInput, false).Await(ctx) Require(t, err) - runShortTO, err := clientShortTO.CreateExecutionRun(wasmRoot, &valInput).Await(ctx) + runShortTO, err := clientShortTO.CreateExecutionRun(wasmRoot, &valInput, false).Await(ctx) Require(t, err) <-time.After(time.Second * 10) stepDefault := runDefault.GetStepAt(0) diff --git a/util/redisutil/redisutil.go b/util/redisutil/redisutil.go index 01ba836d5b..fafb816b8a 100644 --- a/util/redisutil/redisutil.go +++ b/util/redisutil/redisutil.go @@ -1,14 +1,231 @@ package redisutil -import "github.com/redis/go-redis/v9" +import ( + "fmt" + "net" + "net/url" + "sort" + "strconv" + "strings" + "time" -func RedisClientFromURL(url string) (redis.UniversalClient, error) { - if url == "" { + "github.com/redis/go-redis/v9" +) + +// RedisClientFromURL creates a new Redis client based on the provided URL. +// The URL scheme can be either `redis` or `redis+sentinel`. +func RedisClientFromURL(redisUrl string) (redis.UniversalClient, error) { + if redisUrl == "" { return nil, nil } - redisOptions, err := redis.ParseURL(url) + u, err := url.Parse(redisUrl) + if err != nil { + return nil, err + } + if u.Scheme == "redis+sentinel" { + redisOptions, err := parseFailoverRedisUrl(redisUrl) + if err != nil { + return nil, err + } + return redis.NewFailoverClient(redisOptions), nil + } + redisOptions, err := redis.ParseURL(redisUrl) if err != nil { return nil, err } return redis.NewClient(redisOptions), nil } + +// Designed using https://github.com/redis/go-redis/blob/a8590e987945b7ba050569cc3b94b8ece49e99e3/options.go#L283 as reference +// Example Usage : +// +// redis+sentinel://:@:,:,:/?dial_timeout=3&db=1&read_timeout=6s&max_retries=2 +func parseFailoverRedisUrl(redisUrl string) (*redis.FailoverOptions, error) { + u, err := url.Parse(redisUrl) + if err != nil { + return nil, err + } + o := &redis.FailoverOptions{} + o.SentinelUsername, o.SentinelPassword = getUserPassword(u) + o.SentinelAddrs = getAddressesWithDefaults(u) + f := strings.FieldsFunc(u.Path, func(r rune) bool { + return r == '/' + }) + switch len(f) { + case 0: + return nil, fmt.Errorf("redis: master name is required") + case 1: + o.DB = 0 + o.MasterName = f[0] + case 2: + o.MasterName = f[0] + var err error + if o.DB, err = strconv.Atoi(f[1]); err != nil { + return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) + } + default: + return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) + } + + return setupConnParams(u, o) +} + +func getUserPassword(u *url.URL) (string, string) { + var user, password string + if u.User != nil { + user = u.User.Username() + if p, ok := u.User.Password(); ok { + password = p + } + } + return user, password +} + +func getAddressesWithDefaults(u *url.URL) []string { + urlHosts := strings.Split(u.Host, ",") + var addresses []string + for _, urlHost := range urlHosts { + host, port, err := net.SplitHostPort(urlHost) + if err != nil { + host = u.Host + } + if host == "" { + host = "localhost" + } + if port == "" { + port = "6379" + } + addresses = append(addresses, net.JoinHostPort(host, port)) + } + return addresses +} + +type queryOptions struct { + q url.Values + err error +} + +func (o *queryOptions) has(name string) bool { + return len(o.q[name]) > 0 +} + +func (o *queryOptions) string(name string) string { + vs := o.q[name] + if len(vs) == 0 { + return "" + } + delete(o.q, name) // enable detection of unknown parameters + return vs[len(vs)-1] +} + +func (o *queryOptions) int(name string) int { + s := o.string(name) + if s == "" { + return 0 + } + i, err := strconv.Atoi(s) + if err == nil { + return i + } + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s number: %w", name, err) + } + return 0 +} + +func (o *queryOptions) duration(name string) time.Duration { + s := o.string(name) + if s == "" { + return 0 + } + // try plain number first + if i, err := strconv.Atoi(s); err == nil { + if i <= 0 { + // disable timeouts + return -1 + } + return time.Duration(i) * time.Second + } + dur, err := time.ParseDuration(s) + if err == nil { + return dur + } + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err) + } + return 0 +} + +func (o *queryOptions) bool(name string) bool { + switch s := o.string(name); s { + case "true", "1": + return true + case "false", "0", "": + return false + default: + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s) + } + return false + } +} + +func (o *queryOptions) remaining() []string { + if len(o.q) == 0 { + return nil + } + keys := make([]string, 0, len(o.q)) + for k := range o.q { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func setupConnParams(u *url.URL, o *redis.FailoverOptions) (*redis.FailoverOptions, error) { + q := queryOptions{q: u.Query()} + + // compat: a future major release may use q.int("db") + if tmp := q.string("db"); tmp != "" { + db, err := strconv.Atoi(tmp) + if err != nil { + return nil, fmt.Errorf("redis: invalid database number: %w", err) + } + o.DB = db + } + + o.Protocol = q.int("protocol") + o.ClientName = q.string("client_name") + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.PoolTimeout = q.duration("pool_timeout") + o.MinIdleConns = q.int("min_idle_conns") + o.MaxIdleConns = q.int("max_idle_conns") + o.MaxActiveConns = q.int("max_active_conns") + if q.has("conn_max_idle_time") { + o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + } else { + o.ConnMaxIdleTime = q.duration("idle_timeout") + } + if q.has("conn_max_lifetime") { + o.ConnMaxLifetime = q.duration("conn_max_lifetime") + } else { + o.ConnMaxLifetime = q.duration("max_conn_age") + } + if q.err != nil { + return nil, q.err + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} diff --git a/util/stopwaiter/stopwaiter.go b/util/stopwaiter/stopwaiter.go index 993768dd85..c242ac26ab 100644 --- a/util/stopwaiter/stopwaiter.go +++ b/util/stopwaiter/stopwaiter.go @@ -96,20 +96,12 @@ func (s *StopWaiterSafe) Start(ctx context.Context, parent any) error { } func (s *StopWaiterSafe) StopOnly() { - _ = s.stopOnly() -} - -// returns true if stop function was called -func (s *StopWaiterSafe) stopOnly() bool { - stopWasCalled := false s.mutex.Lock() defer s.mutex.Unlock() if s.started && !s.stopped { s.stopFunc() - stopWasCalled = true } s.stopped = true - return stopWasCalled } // StopAndWait may be called multiple times, even before start. @@ -126,9 +118,15 @@ func getAllStackTraces() string { } func (s *StopWaiterSafe) stopAndWaitImpl(warningTimeout time.Duration) error { - if !s.stopOnly() { + s.StopOnly() + if !s.Started() { + // No need to wait, because nothing can be started if it's already stopped. return nil } + // Even if StopOnly has been previously called, make sure we wait for everything to shut down. + // Otherwise, a StopOnly call followed by StopAndWait might return early without waiting. + // At this point started must be true (because it was true above and cannot go back to false), + // so GetWaitChannel won't return an error. waitChan, err := s.GetWaitChannel() if err != nil { return err diff --git a/util/stopwaiter/stopwaiter_test.go b/util/stopwaiter/stopwaiter_test.go index c561e1f43b..68e49ac2be 100644 --- a/util/stopwaiter/stopwaiter_test.go +++ b/util/stopwaiter/stopwaiter_test.go @@ -5,6 +5,7 @@ package stopwaiter import ( "context" + "sync/atomic" "testing" "time" @@ -73,3 +74,19 @@ func TestStopWaiterStopAndWaitMultipleTimes(t *testing.T) { sw.StopAndWait() sw.StopAndWait() } + +func TestStopWaiterStopOnlyThenStopAndWait(t *testing.T) { + t.Parallel() + sw := StopWaiter{} + sw.Start(context.Background(), &TestStruct{}) + var threadStopping atomic.Bool + sw.LaunchThread(func(context.Context) { + time.Sleep(time.Second) + threadStopping.Store(true) + }) + sw.StopOnly() + sw.StopAndWait() + if !threadStopping.Load() { + t.Error("StopAndWait returned before background thread stopped") + } +} diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index 0a6555121e..c04817d654 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -153,10 +153,14 @@ func NewExecutionClient(config rpcclient.ClientConfigFetcher, stack *node.Node) } } -func (c *ExecutionClient) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput) containers.PromiseInterface[validator.ExecutionRun] { - return stopwaiter.LaunchPromiseThread[validator.ExecutionRun](c, func(ctx context.Context) (validator.ExecutionRun, error) { +func (c *ExecutionClient) CreateExecutionRun( + wasmModuleRoot common.Hash, + input *validator.ValidationInput, + useBoldMachine bool, +) containers.PromiseInterface[validator.ExecutionRun] { + return stopwaiter.LaunchPromiseThread(c, func(ctx context.Context) (validator.ExecutionRun, error) { var res uint64 - err := c.client.CallContext(ctx, &res, server_api.Namespace+"_createExecutionRun", wasmModuleRoot, server_api.ValidationInputToJson(input)) + err := c.client.CallContext(ctx, &res, server_api.Namespace+"_createExecutionRun", wasmModuleRoot, server_api.ValidationInputToJson(input), useBoldMachine) if err != nil { return nil, err } @@ -194,6 +198,10 @@ func (r *ExecutionClientRun) SendKeepAlive(ctx context.Context) time.Duration { return time.Minute // TODO: configurable } +func (r *ExecutionClientRun) CheckAlive(ctx context.Context) error { + return r.client.client.CallContext(ctx, nil, server_api.Namespace+"_checkAlive", r.id) +} + func (r *ExecutionClientRun) Start(ctx_in context.Context) { r.StopWaiter.Start(ctx_in, r) r.CallIteratively(r.SendKeepAlive) diff --git a/validator/execution_state.go b/validator/execution_state.go index b9cea8ec3b..81e32a6992 100644 --- a/validator/execution_state.go +++ b/validator/execution_state.go @@ -19,6 +19,13 @@ type GoGlobalState struct { PosInBatch uint64 } +func (g GoGlobalState) String() string { + return fmt.Sprintf( + "BlockHash: %s, SendRoot: %s, Batch: %d, PosInBatch: %d", + g.BlockHash.Hex(), g.SendRoot.Hex(), g.Batch, g.PosInBatch, + ) +} + type MachineStatus uint8 const ( diff --git a/validator/interface.go b/validator/interface.go index bfccaefcfa..249cf1b1c3 100644 --- a/validator/interface.go +++ b/validator/interface.go @@ -26,7 +26,7 @@ type ValidationRun interface { type ExecutionSpawner interface { ValidationSpawner - CreateExecutionRun(wasmModuleRoot common.Hash, input *ValidationInput) containers.PromiseInterface[ExecutionRun] + CreateExecutionRun(wasmModuleRoot common.Hash, input *ValidationInput, useBoldMachine bool) containers.PromiseInterface[ExecutionRun] LatestWasmModuleRoot() containers.PromiseInterface[common.Hash] } @@ -37,4 +37,5 @@ type ExecutionRun interface { GetProofAt(uint64) containers.PromiseInterface[[]byte] PrepareRange(uint64, uint64) containers.PromiseInterface[struct{}] Close() + CheckAlive(ctx context.Context) error } diff --git a/validator/server_arb/bold_machine.go b/validator/server_arb/bold_machine.go new file mode 100644 index 0000000000..6ca48ba228 --- /dev/null +++ b/validator/server_arb/bold_machine.go @@ -0,0 +1,145 @@ +package server_arb + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + + "github.com/offchainlabs/nitro/validator" +) + +// boldMachine wraps a server_arb.MachineInterface. +type BoldMachine struct { + inner MachineInterface + zeroMachine *ArbitratorMachine + hasStepped bool +} + +// Ensure boldMachine implements server_arb.MachineInterface. +var _ MachineInterface = (*BoldMachine)(nil) + +func newBoldMachine(inner MachineInterface) *BoldMachine { + z := NewFinishedMachine(inner.GetGlobalState()) + return &BoldMachine{ + inner: inner, + zeroMachine: z, + hasStepped: false, + } +} + +// Wraps a server_arb.MachineInterface and adds one step to the +// front of the machine's execution. +// +// This zeroth step should be at the same global state as the inner arbitrator +// machine has at step 0, but the machine is in the Finished state rather than +// the Running state. +func BoldMachineWrapper(inner MachineInterface) MachineInterface { + return newBoldMachine(inner) +} + +// CloneMachineInterface returns a new boldMachine with the same inner machine. +func (m *BoldMachine) CloneMachineInterface() MachineInterface { + bMach := newBoldMachine(m.inner.CloneMachineInterface()) + bMach.hasStepped = m.hasStepped + return bMach +} + +// GetStepCount returns zero if the machine has not stepped, otherwise it +// returns the inner machine's step count plus one. +func (m *BoldMachine) GetStepCount() uint64 { + if !m.hasStepped { + return 0 + } + return m.inner.GetStepCount() + 1 +} + +// Hash returns the hash of the inner machine if the machine has not stepped, +// otherwise it returns the hash of the zeroth step machine. +func (m *BoldMachine) Hash() common.Hash { + if !m.hasStepped { + return m.zeroMachine.Hash() + } + return m.inner.Hash() +} + +// Destroy destroys the inner machine and the zeroth step machine. +func (m *BoldMachine) Destroy() { + m.inner.Destroy() + m.zeroMachine.Destroy() +} + +// Freeze freezes the inner machine and the zeroth step machine. +func (m *BoldMachine) Freeze() { + m.inner.Freeze() + m.zeroMachine.Freeze() +} + +// Status returns the status of the inner machine if the machine has not +// stepped, otherwise it returns the status of the zeroth step machine. +func (m *BoldMachine) Status() uint8 { + if !m.hasStepped { + return m.zeroMachine.Status() + } + return m.inner.Status() +} + +// IsRunning returns true if the machine has not stepped, otherwise it +// returns the running state of the inner machine. +func (m *BoldMachine) IsRunning() bool { + if !m.hasStepped { + return true + } + return m.inner.IsRunning() +} + +// IsErrored returns the errored state of the inner machine, or false if the +// machine has not stepped. +func (m *BoldMachine) IsErrored() bool { + if !m.hasStepped { + return false + } + return m.inner.IsErrored() +} + +// Step steps the inner machine if the machine has not stepped, otherwise it +// steps the zeroth step machine. +func (m *BoldMachine) Step(ctx context.Context, steps uint64) error { + if !m.hasStepped { + if steps == 0 { + // Zero is okay, but doesn't advance the machine. + return nil + } + m.hasStepped = true + // Only the first step or set of steps needs to be adjusted. + steps = steps - 1 + } + return m.inner.Step(ctx, steps) +} + +// ValidForStep returns true for step 0 if and only if the machine has not stepped yet, +// and the inner machine's ValidForStep for the step minus one otherwise. +func (m *BoldMachine) ValidForStep(step uint64) bool { + if step == 0 { + return !m.hasStepped + } + return m.inner.ValidForStep(step - 1) +} + +// GetGlobalState returns the global state of the inner machine if the machine +// has stepped, otherwise it returns the global state of the zeroth step. +func (m *BoldMachine) GetGlobalState() validator.GoGlobalState { + if !m.hasStepped { + return m.zeroMachine.GetGlobalState() + } + return m.inner.GetGlobalState() +} + +// ProveNextStep returns the proof of the next step of the inner machine if the +// machine has stepped, otherwise it returns the proof that the zeroth step +// results in the inner machine's initial global state. +func (m *BoldMachine) ProveNextStep() []byte { + if !m.hasStepped { + return m.zeroMachine.ProveNextStep() + } + return m.inner.ProveNextStep() +} diff --git a/validator/server_arb/execution_run.go b/validator/server_arb/execution_run.go index 270ace3180..66d8e158d0 100644 --- a/validator/server_arb/execution_run.go +++ b/validator/server_arb/execution_run.go @@ -10,7 +10,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/util/containers" @@ -25,7 +24,8 @@ type executionRun struct { } // NewExecutionRun creates a backend with the given arguments. -// Note: machineCache may be nil, but if present, it must not have a restricted range. +// Note: machineCache may be nil, but if present, it must not have a restricted +// range. func NewExecutionRun( ctxIn context.Context, initialMachineGetter func(context.Context) (MachineInterface, error), @@ -105,21 +105,9 @@ func (e *executionRun) machineHashesWithStepSize( if err != nil { return nil, err } - log.Debug(fmt.Sprintf("Advanced machine to index %d, beginning hash computation", machineStartIndex)) - - // In BOLD, the hash of a machine at index 0 is a special hash that is computed as the - // `machineFinishedHash(gs)` where `gs` is the global state of the machine at index 0. - // This is so that the hash aligns with the start state of the claimed challenge edge - // at the level above, as required by the BOLD protocol. - var machineHashes []common.Hash - if machineStartIndex == 0 { - gs := machine.GetGlobalState() - log.Debug(fmt.Sprintf("Start global state for machine index 0: %+v", gs)) - machineHashes = append(machineHashes, machineFinishedHash(gs)) - } else { - // Otherwise, we simply append the machine hash at the specified start index. - machineHashes = append(machineHashes, machine.Hash()) - } + log.Info("Advanced WASM machine index, beginning challenge hash computation", "machineStartIndex", machineStartIndex) + + machineHashes := []common.Hash{machine.Hash()} startHash := machineHashes[0] // If we only want 1 hash, we can return early. @@ -195,6 +183,6 @@ func (e *executionRun) GetLastStep() containers.PromiseInterface[*validator.Mach return e.GetStepAt(^uint64(0)) } -func machineFinishedHash(gs validator.GoGlobalState) common.Hash { - return crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()) +func (e *executionRun) CheckAlive(ctx context.Context) error { + return nil } diff --git a/validator/server_arb/execution_run_test.go b/validator/server_arb/execution_run_test.go index 1f8e9625c1..381cfa63a8 100644 --- a/validator/server_arb/execution_run_test.go +++ b/validator/server_arb/execution_run_test.go @@ -16,9 +16,6 @@ type mockMachine struct { } func (m *mockMachine) Hash() common.Hash { - if m.gs.PosInBatch == m.totalSteps-1 { - return machineFinishedHash(m.gs) - } return m.gs.Hash() } @@ -48,6 +45,9 @@ func (m *mockMachine) GetStepCount() uint64 { func (m *mockMachine) IsRunning() bool { return m.gs.PosInBatch < m.totalSteps-1 } +func (m *mockMachine) IsErrored() bool { + return false +} func (m *mockMachine) ValidForStep(uint64) bool { return true } @@ -103,7 +103,7 @@ func Test_machineHashesWithStep(t *testing.T) { if err != nil { t.Fatal(err) } - expected := machineFinishedHash(mm.gs) + expected := mm.gs.Hash() if len(hashes) != 1 { t.Error("Wanted one hash") } @@ -137,7 +137,7 @@ func Test_machineHashesWithStep(t *testing.T) { expectedHashes := make([]common.Hash, 0) for i := uint64(0); i < 4; i++ { if i == 0 { - expectedHashes = append(expectedHashes, machineFinishedHash(initialGs)) + expectedHashes = append(expectedHashes, initialGs.Hash()) continue } gs := validator.GoGlobalState{ @@ -182,7 +182,7 @@ func Test_machineHashesWithStep(t *testing.T) { expectedHashes := make([]common.Hash, 0) for i := uint64(0); i < 4; i++ { if i == 0 { - expectedHashes = append(expectedHashes, machineFinishedHash(initialGs)) + expectedHashes = append(expectedHashes, initialGs.Hash()) continue } gs := validator.GoGlobalState{ @@ -191,10 +191,10 @@ func Test_machineHashesWithStep(t *testing.T) { } expectedHashes = append(expectedHashes, gs.Hash()) } - expectedHashes = append(expectedHashes, machineFinishedHash(validator.GoGlobalState{ + expectedHashes = append(expectedHashes, validator.GoGlobalState{ Batch: 1, PosInBatch: mm.totalSteps - 1, - })) + }.Hash()) if uint64(len(hashes)) >= maxIterations { t.Fatal("Wanted fewer hashes than the max iterations") } diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go index c429fa6101..e4e07d3c2d 100644 --- a/validator/server_arb/machine.go +++ b/validator/server_arb/machine.go @@ -39,6 +39,7 @@ type MachineInterface interface { CloneMachineInterface() MachineInterface GetStepCount() uint64 IsRunning() bool + IsErrored() bool ValidForStep(uint64) bool Status() uint8 Step(context.Context, uint64) error @@ -117,6 +118,14 @@ func LoadSimpleMachine(wasm string, libraries []string, debugChain bool) (*Arbit return machineFromPointer(mach), nil } +func NewFinishedMachine(gs validator.GoGlobalState) *ArbitratorMachine { + mach := C.arbitrator_new_finished(GlobalStateToC(gs)) + if mach == nil { + return nil + } + return machineFromPointer(mach) +} + func (m *ArbitratorMachine) Freeze() { m.frozen = true } @@ -295,9 +304,13 @@ func (m *ArbitratorMachine) ProveNextStep() []byte { m.mutex.Lock() defer m.mutex.Unlock() - rustProof := C.arbitrator_gen_proof(m.ptr) - proofBytes := C.GoBytes(unsafe.Pointer(rustProof.ptr), C.int(rustProof.len)) - C.arbitrator_free_proof(rustProof) + output := &C.RustBytes{} + C.arbitrator_gen_proof(m.ptr, output) + defer C.free_rust_bytes(*output) + if output.len == 0 { + return nil + } + proofBytes := C.GoBytes(unsafe.Pointer(output.ptr), C.int(output.len)) return proofBytes } diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index bb7fbcf97d..4c74bca695 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -56,20 +56,42 @@ func DefaultArbitratorSpawnerConfigFetcher() *ArbitratorSpawnerConfig { return &DefaultArbitratorSpawnerConfig } +// MachineWrapper is a function that wraps a MachineInterface +// +// This is a mechanism to allow clients of the AribtratorSpawner to inject +// functionality around the arbitrator machine. Possible use cases include +// mocking out the machine for testing purposes, or having the machine behave +// differently when certain features (like BoLD) are enabled. +type MachineWrapper func(MachineInterface) MachineInterface + +type SpawnerOption func(*ArbitratorSpawner) + type ArbitratorSpawner struct { stopwaiter.StopWaiter count atomic.Int32 locator *server_common.MachineLocator machineLoader *ArbMachineLoader - config ArbitratorSpawnerConfigFecher + // Oreder of wrappers is important. The first wrapper is the innermost. + machineWrappers []MachineWrapper + config ArbitratorSpawnerConfigFecher +} + +func WithWrapper(wrapper MachineWrapper) SpawnerOption { + return func(s *ArbitratorSpawner) { + s.machineWrappers = append(s.machineWrappers, wrapper) + } } -func NewArbitratorSpawner(locator *server_common.MachineLocator, config ArbitratorSpawnerConfigFecher) (*ArbitratorSpawner, error) { +func NewArbitratorSpawner(locator *server_common.MachineLocator, config ArbitratorSpawnerConfigFecher, opts ...SpawnerOption) (*ArbitratorSpawner, error) { // TODO: preload machines spawner := &ArbitratorSpawner{ - locator: locator, - machineLoader: NewArbMachineLoader(&DefaultArbitratorMachineConfig, locator), - config: config, + locator: locator, + machineLoader: NewArbMachineLoader(&DefaultArbitratorMachineConfig, locator), + machineWrappers: make([]MachineWrapper, 0), + config: config, + } + for _, opt := range opts { + opt(spawner) } return spawner, nil } @@ -159,12 +181,16 @@ func (v *ArbitratorSpawner) execute( return validator.GoGlobalState{}, fmt.Errorf("unabled to get WASM machine: %w", err) } - mach := basemachine.Clone() - defer mach.Destroy() - err = v.loadEntryToMachine(ctx, entry, mach) + arbMach := basemachine.Clone() + defer arbMach.Destroy() + err = v.loadEntryToMachine(ctx, entry, arbMach) if err != nil { return validator.GoGlobalState{}, err } + var mach MachineInterface = arbMach + for _, wrapper := range v.machineWrappers { + mach = wrapper(mach) + } var steps uint64 for mach.IsRunning() { var count uint64 = 500000000 @@ -189,9 +215,8 @@ func (v *ArbitratorSpawner) execute( } func (v *ArbitratorSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { - println("LAUCHING ARBITRATOR VALIDATION") v.count.Add(1) - promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](v, func(ctx context.Context) (validator.GoGlobalState, error) { + promise := stopwaiter.LaunchPromiseThread(v, func(ctx context.Context) (validator.GoGlobalState, error) { defer v.count.Add(-1) return v.execute(ctx, entry, moduleRoot) }) @@ -206,7 +231,7 @@ func (v *ArbitratorSpawner) Room() int { return avail } -func (v *ArbitratorSpawner) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput) containers.PromiseInterface[validator.ExecutionRun] { +func (v *ArbitratorSpawner) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput, useBoldMachine bool) containers.PromiseInterface[validator.ExecutionRun] { getMachine := func(ctx context.Context) (MachineInterface, error) { initialFrozenMachine, err := v.machineLoader.GetZeroStepMachine(ctx, wasmModuleRoot) if err != nil { @@ -218,7 +243,16 @@ func (v *ArbitratorSpawner) CreateExecutionRun(wasmModuleRoot common.Hash, input machine.Destroy() return nil, err } - return machine, nil + var wrapped MachineInterface + if useBoldMachine { + wrapped = BoldMachineWrapper(machine) + } else { + wrapped = MachineInterface(machine) + } + for _, wrapper := range v.machineWrappers { + wrapped = wrapper(wrapped) + } + return wrapped, nil } currentExecConfig := v.config().Execution return stopwaiter.LaunchPromiseThread[validator.ExecutionRun](v, func(ctx context.Context) (validator.ExecutionRun, error) { diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go index ef3e1b2c49..dab74f6e29 100644 --- a/validator/valnode/validation_api.go +++ b/validator/valnode/validation_api.go @@ -80,12 +80,16 @@ func NewExecutionServerAPI(valSpawner validator.ValidationSpawner, execution val } } -func (a *ExecServerAPI) CreateExecutionRun(ctx context.Context, wasmModuleRoot common.Hash, jsonInput *server_api.InputJSON) (uint64, error) { +func (a *ExecServerAPI) CreateExecutionRun(ctx context.Context, wasmModuleRoot common.Hash, jsonInput *server_api.InputJSON, useBoldMachineOptional *bool) (uint64, error) { input, err := server_api.ValidationInputFromJson(jsonInput) if err != nil { return 0, err } - execRun, err := a.execSpawner.CreateExecutionRun(wasmModuleRoot, input).Await(ctx) + useBoldMachine := false + if useBoldMachineOptional != nil { + useBoldMachine = *useBoldMachineOptional + } + execRun, err := a.execSpawner.CreateExecutionRun(wasmModuleRoot, input, useBoldMachine).Await(ctx) if err != nil { return 0, err } @@ -187,6 +191,14 @@ func (a *ExecServerAPI) ExecKeepAlive(ctx context.Context, execid uint64) error return nil } +func (a *ExecServerAPI) CheckAlive(ctx context.Context, execid uint64) error { + run, err := a.getRun(execid) + if err != nil { + return err + } + return run.CheckAlive(ctx) +} + func (a *ExecServerAPI) CloseExec(execid uint64) { a.runIdLock.Lock() defer a.runIdLock.Unlock() diff --git a/validator/valnode/valnode.go b/validator/valnode/valnode.go index e2f4f79bef..e3bf662aaa 100644 --- a/validator/valnode/valnode.go +++ b/validator/valnode/valnode.go @@ -94,7 +94,7 @@ func EnsureValidationExposedViaAuthRPC(stackConf *node.Config) { } } -func CreateValidationNode(configFetcher ValidationConfigFetcher, stack *node.Node, fatalErrChan chan error) (*ValidationNode, error) { +func CreateValidationNode(configFetcher ValidationConfigFetcher, stack *node.Node, fatalErrChan chan error, spawnerOpts ...server_arb.SpawnerOption) (*ValidationNode, error) { config := configFetcher() locator, err := server_common.NewMachineLocator(config.Wasm.RootPath) if err != nil { @@ -103,7 +103,7 @@ func CreateValidationNode(configFetcher ValidationConfigFetcher, stack *node.Nod arbConfigFetcher := func() *server_arb.ArbitratorSpawnerConfig { return &configFetcher().Arbitrator } - arbSpawner, err := server_arb.NewArbitratorSpawner(locator, arbConfigFetcher) + arbSpawner, err := server_arb.NewArbitratorSpawner(locator, arbConfigFetcher, spawnerOpts...) if err != nil { return nil, err }