From e7fa979c15e260d52e8bc3afa596677f59042817 Mon Sep 17 00:00:00 2001 From: Alexander Kolotov Date: Thu, 4 Jul 2024 05:54:00 -0600 Subject: [PATCH 01/32] fix: indexer first block usage to halt Arbitrum missed messages discovery (#10280) --- .../arbitrum/rollup_messages_catchup.ex | 19 +++--- .../workers/historical_messages_on_l2.ex | 65 +++++++++++-------- 2 files changed, 48 insertions(+), 36 deletions(-) diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/rollup_messages_catchup.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/rollup_messages_catchup.ex index 0f6f8b09cdeb..cbd7cf085353 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/rollup_messages_catchup.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/rollup_messages_catchup.ex @@ -82,6 +82,8 @@ defmodule Indexer.Fetcher.Arbitrum.RollupMessagesCatchup do def init(args) do Logger.metadata(fetcher: :arbitrum_bridge_l2_catchup) + indexer_first_block = Application.get_all_env(:indexer)[:first_block] + config_common = Application.get_all_env(:indexer)[Indexer.Fetcher.Arbitrum] rollup_chunk_size = config_common[:rollup_chunk_size] @@ -97,7 +99,8 @@ defmodule Indexer.Fetcher.Arbitrum.RollupMessagesCatchup do config: %{ rollup_rpc: %{ json_rpc_named_arguments: args[:json_rpc_named_arguments], - chunk_size: rollup_chunk_size + chunk_size: rollup_chunk_size, + first_block: indexer_first_block }, json_l2_rpc_named_arguments: args[:json_rpc_named_arguments], recheck_interval: recheck_interval, @@ -272,9 +275,7 @@ defmodule Indexer.Fetcher.Arbitrum.RollupMessagesCatchup do @impl GenServer def handle_info( :historical_msg_to_l2, - %{ - data: %{duration: _, historical_msg_to_l2_end_block: _, progressed: _} - } = state + %{data: %{duration: _, historical_msg_to_l2_end_block: _, progressed: _}} = state ) do end_block = state.data.historical_msg_to_l2_end_block @@ -297,9 +298,9 @@ defmodule Indexer.Fetcher.Arbitrum.RollupMessagesCatchup do # Decides whether to stop or continue the fetcher based on the current state of message discovery. # - # If both `historical_msg_from_l2_end_block` and `historical_msg_to_l2_end_block` are 0 or less, - # indicating that there are no more historical messages to fetch, the task is stopped with a normal - # termination. + # If both `historical_msg_from_l2_end_block` and `historical_msg_to_l2_end_block` are lesser than + # the indexer first block, indicating that there are no more historical messages to fetch, the + # task is stopped with a normal termination. # # ## Parameters # - `:plan_next_iteration`: The message that triggers this function. @@ -311,13 +312,15 @@ defmodule Indexer.Fetcher.Arbitrum.RollupMessagesCatchup do def handle_info( :plan_next_iteration, %{ + config: %{rollup_rpc: %{first_block: rollup_first_block}}, data: %{ historical_msg_from_l2_end_block: from_l2_end_block, historical_msg_to_l2_end_block: to_l2_end_block } } = state ) - when from_l2_end_block <= 0 and to_l2_end_block <= 0 do + when from_l2_end_block <= rollup_first_block and + to_l2_end_block <= rollup_first_block do {:stop, :normal, state} end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/historical_messages_on_l2.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/historical_messages_on_l2.ex index f34f037b4346..449c2fa7bb6c 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/historical_messages_on_l2.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/historical_messages_on_l2.ex @@ -34,20 +34,21 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.HistoricalMessagesOnL2 do ## Parameters - `end_block`: The ending block number up to which the discovery should occur. - If `nil` or negative, the function returns with no action taken. + If `nil` or lesser than the indexer first block, the function + returns with no action taken. - `state`: Contains the operational configuration, including the depth of blocks to consider for the starting point of message discovery. ## Returns - `{:ok, nil}`: If `end_block` is `nil`, indicating no discovery action was required. - - `{:ok, 0}`: If `end_block` is negative, indicating that the genesis of the block - chain was reached. + - `{:ok, rollup_first_block}`: If `end_block` is lesser than the indexer first block, + indicating that the "genesis" of the block chain was reached. - `{:ok, start_block}`: Upon successful discovery of historical messages, where - `start_block` indicates the necessity to consider another - block range in the next iteration of message discovery. + `start_block` indicates the necessity to consider another block range in the next + iteration of message discovery. - `{:ok, end_block + 1}`: If the required block range is not fully indexed, - indicating that the next iteration of message discovery - should start with the same block range. + indicating that the next iteration of message discovery should start with the same + block range. """ @spec discover_historical_messages_from_l2(nil | integer(), %{ :config => %{ @@ -62,18 +63,22 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.HistoricalMessagesOnL2 do {:ok, nil} end - def discover_historical_messages_from_l2(end_block, _) - when is_integer(end_block) and end_block < 0 do - {:ok, 0} + def discover_historical_messages_from_l2(end_block, %{config: %{rollup_rpc: %{first_block: rollup_first_block}}}) + when is_integer(end_block) and end_block < rollup_first_block do + {:ok, rollup_first_block} end def discover_historical_messages_from_l2( end_block, - %{config: %{messages_from_l2_blocks_depth: messages_from_l2_blocks_depth}} = _state + %{ + config: %{ + messages_from_l2_blocks_depth: messages_from_l2_blocks_depth, + rollup_rpc: %{first_block: rollup_first_block} + } + } = _state ) - when is_integer(end_block) and is_integer(messages_from_l2_blocks_depth) and - messages_from_l2_blocks_depth > 0 do - start_block = max(0, end_block - messages_from_l2_blocks_depth + 1) + when is_integer(end_block) do + start_block = max(rollup_first_block, end_block - messages_from_l2_blocks_depth + 1) if Db.indexed_blocks?(start_block, end_block) do do_discover_historical_messages_from_l2(start_block, end_block) @@ -130,21 +135,22 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.HistoricalMessagesOnL2 do `:relayed`, as they represent completed actions from L1 to L2. ## Parameters - - `end_block`: The ending block number for the discovery operation. If `nil` or negative, - the function returns immediately with no action. + - `end_block`: The ending block number for the discovery operation. + If `nil` or lesser than the indexer first block, the function + returns with no action taken. - `state`: The current state of the operation, containing configuration parameters - including `messages_to_l2_blocks_depth`, `chunk_size`, and JSON RPC connection settings. + including `messages_to_l2_blocks_depth`, `chunk_size`, and JSON RPC connection + settings. ## Returns - `{:ok, nil}`: If `end_block` is `nil`, indicating no action was necessary. - - `{:ok, 0}`: If `end_block` is negative, indicating that the genesis of the block chain - was reached. + - `{:ok, rollup_first_block}`: If `end_block` is lesser than the indexer first block, + indicating that the "genesis" of the block chain was reached. - `{:ok, start_block}`: On successful completion of historical message discovery, where - `start_block` indicates the necessity to consider another block - range in the next iteration of message discovery. + `start_block` indicates the necessity to consider another block range in the next + iteration of message discovery. - `{:ok, end_block + 1}`: If the required block range is not fully indexed, indicating - that the next iteration of message discovery should start with - the same block range. + that the next iteration of message discovery should start with the same block range. """ @spec discover_historical_messages_to_l2(nil | integer(), %{ :config => %{ @@ -164,14 +170,17 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.HistoricalMessagesOnL2 do {:ok, nil} end - def discover_historical_messages_to_l2(end_block, _) - when is_integer(end_block) and end_block < 0 do - {:ok, 0} + def discover_historical_messages_to_l2(end_block, %{config: %{rollup_rpc: %{first_block: rollup_first_block}}}) + when is_integer(end_block) and end_block < rollup_first_block do + {:ok, rollup_first_block} end - def discover_historical_messages_to_l2(end_block, %{config: %{messages_to_l2_blocks_depth: _} = config} = _state) + def discover_historical_messages_to_l2( + end_block, + %{config: %{messages_to_l2_blocks_depth: _, rollup_rpc: %{first_block: _}} = config} = _state + ) when is_integer(end_block) do - start_block = max(0, end_block - config.messages_to_l2_blocks_depth + 1) + start_block = max(config.rollup_rpc.first_block, end_block - config.messages_to_l2_blocks_depth + 1) # Although indexing blocks is not necessary to determine the completion of L1-to-L2 messages, # for database consistency, it is preferable to delay marking these messages as completed. From bd09249c175419ab49608f65252f55a79649df69 Mon Sep 17 00:00:00 2001 From: Alexander Kolotov Date: Thu, 4 Jul 2024 13:18:54 -0600 Subject: [PATCH 02/32] fix: proper lookup of confirmed Arbitrum cross-chain messages (#10322) * extended types documentation * proper query for confirmed messages --- .../explorer/chain/arbitrum/batch_block.ex | 29 +++++++++--- .../chain/arbitrum/batch_transaction.ex | 21 ++++++--- .../lib/explorer/chain/arbitrum/l1_batch.ex | 40 +++++++++++----- .../explorer/chain/arbitrum/l1_execution.ex | 24 ++++++++-- .../chain/arbitrum/lifecycle_transaction.ex | 40 +++++++++++----- .../lib/explorer/chain/arbitrum/message.ex | 47 +++++++++++++++---- .../lib/explorer/chain/arbitrum/reader.ex | 30 +++++++++--- .../lib/indexer/fetcher/arbitrum/utils/db.ex | 19 ++------ .../fetcher/arbitrum/workers/new_batches.ex | 25 ++++++++++ .../arbitrum/workers/new_confirmations.ex | 25 ++++++++-- .../arbitrum/workers/new_l1_executions.ex | 32 ++++++++----- .../arbitrum/workers/new_messages_to_l2.ex | 10 ++++ 12 files changed, 255 insertions(+), 87 deletions(-) diff --git a/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex b/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex index 9ba9a0e806dd..51b8f541a196 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex @@ -17,16 +17,31 @@ defmodule Explorer.Chain.Arbitrum.BatchBlock do @required_attrs ~w(batch_number block_number)a - @type t :: %__MODULE__{ - batch_number: non_neg_integer(), - batch: %Ecto.Association.NotLoaded{} | L1Batch.t() | nil, - block_number: non_neg_integer(), - confirmation_id: non_neg_integer() | nil, - confirmation_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil + @typedoc """ + Descriptor of the a rollup block included in an Arbitrum batch: + * `batch_number` - The number of the Arbitrum batch. + * `block_number` - The number of the rollup block. + * `confirmation_id` - The ID of the confirmation L1 transaction from + `Explorer.Chain.LifecycleTransaction`, or `nil` if the + block is not confirmed yet. + """ + @type to_import :: %{ + :batch_number => non_neg_integer(), + :block_number => non_neg_integer(), + :confirmation_id => non_neg_integer() | nil } + @typedoc """ + * `batch_number` - The number of the Arbitrum batch. + * `block_number` - The number of the rollup block. + * `confirmation_id` - The ID of the confirmation L1 transaction from + `Explorer.Chain.Arbitrum.LifecycleTransaction`, or `nil` + if the block is not confirmed yet. + * `confirmation_transaction` - An instance of `Explorer.Chain.Arbitrum.LifecycleTransaction` + referenced by `confirmation_id`. + """ @primary_key {:block_number, :integer, autogenerate: false} - schema "arbitrum_batch_l2_blocks" do + typed_schema "arbitrum_batch_l2_blocks" do belongs_to(:batch, L1Batch, foreign_key: :batch_number, references: :number, type: :integer) belongs_to(:confirmation_transaction, LifecycleTransaction, diff --git a/apps/explorer/lib/explorer/chain/arbitrum/batch_transaction.ex b/apps/explorer/lib/explorer/chain/arbitrum/batch_transaction.ex index c4ac8c6213c8..c163a8942295 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/batch_transaction.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/batch_transaction.ex @@ -16,15 +16,24 @@ defmodule Explorer.Chain.Arbitrum.BatchTransaction do @required_attrs ~w(batch_number tx_hash)a - @type t :: %__MODULE__{ - batch_number: non_neg_integer(), - batch: %Ecto.Association.NotLoaded{} | L1Batch.t() | nil, - tx_hash: Hash.t(), - l2_transaction: %Ecto.Association.NotLoaded{} | Transaction.t() | nil + @typedoc """ + Descriptor of the a rollup transaction included in an Arbitrum batch: + * `batch_number` - The number of the Arbitrum batch. + * `tx_hash` - The hash of the rollup transaction. + """ + @type to_import :: %{ + :batch_number => non_neg_integer(), + :tx_hash => binary() } + @typedoc """ + * `tx_hash` - The hash of the rollup transaction. + * `l2_transaction` - An instance of `Explorer.Chain.Transaction` referenced by `tx_hash`. + * `batch_number` - The number of the Arbitrum batch. + * `batch` - An instance of `Explorer.Chain.Arbitrum.L1Batch` referenced by `batch_number`. + """ @primary_key false - schema "arbitrum_batch_l2_transactions" do + typed_schema "arbitrum_batch_l2_transactions" do belongs_to(:batch, L1Batch, foreign_key: :batch_number, references: :number, type: :integer) belongs_to(:l2_transaction, Transaction, diff --git a/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex b/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex index 8ec71726ea61..b41402acb980 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex @@ -11,28 +11,44 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do use Explorer.Schema - alias Explorer.Chain.{ - Block, - Hash - } + alias Explorer.Chain.Hash alias Explorer.Chain.Arbitrum.LifecycleTransaction @required_attrs ~w(number transactions_count start_block end_block before_acc after_acc commitment_id)a - @type t :: %__MODULE__{ + @typedoc """ + Descriptor of the a L1 batch for Arbitrum rollups: + * `number` - The number of the Arbitrum batch. + * `transactions_count` - The number of transactions in the batch. + * `start_block` - The number of the first block in the batch. + * `end_block` - The number of the last block in the batch. + * `before_acc` - The hash of the state before the batch. + * `after_acc` - The hash of the state after the batch. + * `commitment_id` - The ID of the commitment L1 transaction from Explorer.Chain.LifecycleTransaction. + """ + @type to_import :: %{ number: non_neg_integer(), transactions_count: non_neg_integer(), - start_block: Block.block_number(), - end_block: Block.block_number(), - before_acc: Hash.t(), - after_acc: Hash.t(), - commitment_id: non_neg_integer(), - commitment_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil + start_block: non_neg_integer(), + end_block: non_neg_integer(), + before_acc: binary(), + after_acc: binary(), + commitment_id: non_neg_integer() } + @typedoc """ + * `number` - The number of the Arbitrum batch. + * `transactions_count` - The number of transactions in the batch. + * `start_block` - The number of the first block in the batch. + * `end_block` - The number of the last block in the batch. + * `before_acc` - The hash of the state before the batch. + * `after_acc` - The hash of the state after the batch. + * `commitment_id` - The ID of the commitment L1 transaction from `Explorer.Chain.Arbitrum.LifecycleTransaction`. + * `commitment_transaction` - An instance of `Explorer.Chain.Arbitrum.LifecycleTransaction` referenced by `commitment_id`. + """ @primary_key {:number, :integer, autogenerate: false} - schema "arbitrum_l1_batches" do + typed_schema "arbitrum_l1_batches" do field(:transactions_count, :integer) field(:start_block, :integer) field(:end_block, :integer) diff --git a/apps/explorer/lib/explorer/chain/arbitrum/l1_execution.ex b/apps/explorer/lib/explorer/chain/arbitrum/l1_execution.ex index 32ae344d2d68..8537d26f15b9 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/l1_execution.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/l1_execution.ex @@ -15,14 +15,28 @@ defmodule Explorer.Chain.Arbitrum.L1Execution do @required_attrs ~w(message_id execution_id)a - @type t :: %__MODULE__{ - message_id: non_neg_integer(), - execution_id: non_neg_integer(), - execution_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil + @typedoc """ + Descriptor of the a L1 execution transaction related to a L2 to L1 message on Arbitrum rollups: + * `message_id` - The ID of the message from `Explorer.Chain.Arbitrum.Message`. + There could be situations when an execution of a message is + discovered, but the message itself is not indexed yet. + * `execution_id` - The ID of the execution transaction from `Explorer.Chain.Arbitrum.LifecycleTransaction`. + """ + @type to_import :: %{ + :message_id => non_neg_integer(), + :execution_id => non_neg_integer() } + @typedoc """ + * `message_id` - The ID of the message from `Explorer.Chain.Arbitrum.Message`. + There could be situations when an execution of a message is + discovered, but the message itself is not indexed yet. + * `execution_id` - The ID of the execution transaction from `Explorer.Chain.Arbitrum.LifecycleTransaction`. + * `execution_transaction` - An instance of `Explorer.Chain.Arbitrum.LifecycleTransaction` + referenced by `execution_id`. + """ @primary_key {:message_id, :integer, autogenerate: false} - schema "arbitrum_l1_executions" do + typed_schema "arbitrum_l1_executions" do belongs_to(:execution_transaction, LifecycleTransaction, foreign_key: :execution_id, references: :id, diff --git a/apps/explorer/lib/explorer/chain/arbitrum/lifecycle_transaction.ex b/apps/explorer/lib/explorer/chain/arbitrum/lifecycle_transaction.ex index 5cd8dc05462a..91b30610acb2 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/lifecycle_transaction.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/lifecycle_transaction.ex @@ -1,6 +1,6 @@ defmodule Explorer.Chain.Arbitrum.LifecycleTransaction do @moduledoc """ - Models an L1 lifecycle transaction for Arbitrum. + Models an L1 lifecycle transaction for Arbitrum. Lifecycle transactions are transactions that change the state of transactions and blocks on Arbitrum rollups. Changes in the schema should be reflected in the bulk import module: - Explorer.Chain.Import.Runner.Arbitrum.LifecycleTransactions @@ -11,25 +11,41 @@ defmodule Explorer.Chain.Arbitrum.LifecycleTransaction do use Explorer.Schema - alias Explorer.Chain.{ - Block, - Hash - } + alias Explorer.Chain.Hash alias Explorer.Chain.Arbitrum.{BatchBlock, L1Batch} @required_attrs ~w(id hash block_number timestamp status)a - @type t :: %__MODULE__{ - id: non_neg_integer(), - hash: Hash.t(), - block_number: Block.block_number(), - timestamp: DateTime.t(), - status: String.t() + @typedoc """ + Descriptor of the a L1 transaction changing state of transactions and blocks of Arbitrum rollups: + * `id` - The ID of the transaction used for referencing. + * `hash` - The hash of the L1 transaction. + * `block_number` - The number of the L1 block where the transaction is included. + * `timestamp` - The timestamp of the block in which the transaction is included. + * `status` - The status of the transaction: `:unfinalized` or `:finalized` + """ + @type to_import :: %{ + :id => non_neg_integer(), + :hash => binary(), + :block_number => non_neg_integer(), + :timestamp => DateTime.t(), + :status => :unfinalized | :finalized } + @typedoc """ + * `id` - The ID of the transaction used for referencing. + * `hash` - The hash of the L1 transaction. + * `block_number` - The number of the L1 block where the transaction is included. + * `timestamp` - The timestamp of the block in which the transaction is included. + * `status` - The status of the transaction: `:unfinalized` or `:finalized`. + * `committed_batches` - A list of `Explorer.Chain.Arbitrum.L1Batch` instances + that are committed by the transaction. + * `confirmed_blocks` - A list of `Explorer.Chain.Arbitrum.BatchBlock` instances + that are confirmed by the transaction. + """ @primary_key {:id, :integer, autogenerate: false} - schema "arbitrum_lifecycle_l1_transactions" do + typed_schema "arbitrum_lifecycle_l1_transactions" do field(:hash, Hash.Full) field(:block_number, :integer) field(:timestamp, :utc_datetime_usec) diff --git a/apps/explorer/lib/explorer/chain/arbitrum/message.ex b/apps/explorer/lib/explorer/chain/arbitrum/message.ex index e3899078a61a..051219ab982d 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/message.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/message.ex @@ -11,7 +11,7 @@ defmodule Explorer.Chain.Arbitrum.Message do use Explorer.Schema - alias Explorer.Chain.{Block, Hash} + alias Explorer.Chain.Hash @optional_attrs ~w(originator_address originating_transaction_hash origination_timestamp originating_transaction_block_number completion_transaction_hash)a @@ -19,19 +19,48 @@ defmodule Explorer.Chain.Arbitrum.Message do @allowed_attrs @optional_attrs ++ @required_attrs - @type t :: %__MODULE__{ - direction: String.t(), + @typedoc """ + Descriptor of the a L1<->L2 message on Arbitrum rollups: + * `direction` - The direction of the message: `:to_l2` or `:from_l2`. + * `message_id` - The ID of the message used for referencing. + * `originator_address` - The address of the message originator. The fields + related to the origination can be `nil` if a completion + transaction is discovered when the originating + transaction is not indexed yet. + * `originating_transaction_hash` - The hash of the originating transaction. + * `origination_timestamp` - The timestamp of the origination. + * `originating_transaction_block_number` - The number of the block where the + originating transaction is included. + * `completion_transaction_hash` - The hash of the completion transaction. + * `status` - The status of the message: `:initiated`, `:sent`, `:confirmed`, `:relayed` + """ + @type to_import :: %{ + direction: :to_l2 | :from_l2, message_id: non_neg_integer(), - originator_address: Hash.Address.t() | nil, - originating_transaction_hash: Hash.t() | nil, + originator_address: binary() | nil, + originating_transaction_hash: binary() | nil, origination_timestamp: DateTime.t() | nil, - originating_transaction_block_number: Block.block_number() | nil, - completion_transaction_hash: Hash.t() | nil, - status: String.t() + originating_transaction_block_number: non_neg_integer() | nil, + completion_transaction_hash: binary() | nil, + status: :initiated | :sent | :confirmed | :relayed } + @typedoc """ + * `direction` - The direction of the message: `:to_l2` or `:from_l2`. + * `message_id` - The ID of the message used for referencing. + * `originator_address` - The address of the message originator. The fields + related to the origination can be `nil` if a completion + transaction is discovered when the originating + transaction is not indexed yet. + * `originating_transaction_hash` - The hash of the originating transaction. + * `origination_timestamp` - The timestamp of the origination. + * `originating_transaction_block_number` - The number of the block where the + originating transaction is included. + * `completion_transaction_hash` - The hash of the completion transaction. + * `status` - The status of the message: `:initiated`, `:sent`, `:confirmed`, `:relayed`. + """ @primary_key false - schema "arbitrum_crosslevel_messages" do + typed_schema "arbitrum_crosslevel_messages" do field(:direction, Ecto.Enum, values: [:to_l2, :from_l2], primary_key: true) field(:message_id, :integer, primary_key: true) field(:originator_address, Hash.Address) diff --git a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex index 0ea40f28e44a..7b8dc92a45ef 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex @@ -504,18 +504,24 @@ defmodule Explorer.Chain.Arbitrum.Reader do end @doc """ - Retrieves all L2-to-L1 messages with the specified status that originated in rollup blocks with numbers not higher than `block_number`. + Retrieves all L2-to-L1 messages with the specified status. + + If `block_number` is not `nil`, only messages originating in rollup blocks with + numbers not higher than the specified block are considered. Otherwise, all + messages are considered. ## Parameters - - `status`: The status of the messages to retrieve, such as `:initiated`, `:sent`, `:confirmed`, or `:relayed`. - - `block_number`: The number of a rollup block that limits the messages lookup. + - `status`: The status of the messages to retrieve, such as `:initiated`, + `:sent`, `:confirmed`, or `:relayed`. + - `block_number`: The number of a rollup block that limits the messages lookup, + or `nil`. ## Returns - - Instances of `Explorer.Chain.Arbitrum.Message` corresponding to the criteria, or `[]` if no messages - with the given status are found in the rollup blocks up to the specified number. + - Instances of `Explorer.Chain.Arbitrum.Message` corresponding to the criteria, + or `[]` if no messages with the given status are found. """ - @spec l2_to_l1_messages(:confirmed | :initiated | :relayed | :sent, FullBlock.block_number()) :: [ - Message + @spec l2_to_l1_messages(:confirmed | :initiated | :relayed | :sent, FullBlock.block_number() | nil) :: [ + Message.t() ] def l2_to_l1_messages(status, block_number) when status in [:initiated, :sent, :confirmed, :relayed] and @@ -532,6 +538,16 @@ defmodule Explorer.Chain.Arbitrum.Reader do Repo.all(query, timeout: :infinity) end + def l2_to_l1_messages(status, nil) when status in [:initiated, :sent, :confirmed, :relayed] do + query = + from(msg in Message, + where: msg.direction == :from_l2 and msg.status == ^status, + order_by: [desc: msg.message_id] + ) + + Repo.all(query, timeout: :infinity) + end + @doc """ Retrieves the numbers of the L1 blocks containing the confirmation transactions bounding the first interval where missed confirmation transactions could be found. diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex index 5c56001464c4..a76361a5cd52 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex @@ -8,6 +8,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_warning: 1] alias Explorer.{Chain, Repo} + alias Explorer.Chain.Arbitrum alias Explorer.Chain.Arbitrum.Reader alias Explorer.Chain.Block, as: FullBlock alias Explorer.Chain.{Data, Hash, Log} @@ -558,21 +559,10 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do database import operation. If no messages with the 'confirmed' status are found by the specified block number, an empty list is returned. """ - @spec confirmed_l2_to_l1_messages(FullBlock.block_number()) :: [ - %{ - direction: :from_l2, - message_id: non_neg_integer(), - originator_address: binary(), - originating_transaction_hash: binary(), - originating_transaction_block_number: FullBlock.block_number(), - completion_transaction_hash: nil, - status: :confirmed - } - ] - def confirmed_l2_to_l1_messages(block_number) - when is_integer(block_number) and block_number >= 0 do + @spec confirmed_l2_to_l1_messages() :: [Arbitrum.Message.to_import()] + def confirmed_l2_to_l1_messages do # credo:disable-for-lines:2 Credo.Check.Refactor.PipeChainStart - Reader.l2_to_l1_messages(:confirmed, block_number) + Reader.l2_to_l1_messages(:confirmed, nil) |> Enum.map(&message_to_map/1) end @@ -739,6 +729,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do |> db_record_to_map(block) end + @spec message_to_map(Arbitrum.Message.t()) :: Arbitrum.Message.to_import() defp message_to_map(message) do [ :direction, diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex index 649cede3c15b..8e3cdb017f6d 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex @@ -33,6 +33,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do alias Indexer.Fetcher.Arbitrum.Utils.{Db, Logging, Rpc} alias Explorer.Chain + alias Explorer.Chain.Arbitrum require Logger @@ -409,6 +410,30 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - A tuple containing lists of batches, lifecycle transactions, rollup blocks, # rollup transactions, and committed messages (with the status `:sent`), all # ready for database import. + @spec handle_batches_from_logs( + [%{String.t() => any()}], + non_neg_integer(), + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + optional(any()) => any() + }, + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + optional(any()) => any() + } + ) :: { + [Arbitrum.L1Batch.to_import()], + [Arbitrum.LifecycleTransaction.to_import()], + [Arbitrum.BatchBlock.to_import()], + [Arbitrum.BatchTransaction.to_import()], + [Arbitrum.Message.to_import()] + } + defp handle_batches_from_logs(logs, msg_to_block_shift, l1_rpc_config, rollup_rpc_config) + + defp handle_batches_from_logs([], _, _, _), do: {[], [], [], [], []} + defp handle_batches_from_logs( logs, msg_to_block_shift, diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_confirmations.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_confirmations.ex index ea8816ba0ccb..ceb0d1695df7 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_confirmations.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_confirmations.ex @@ -41,6 +41,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewConfirmations do alias Indexer.Fetcher.Arbitrum.Utils.Helper, as: ArbitrumHelper alias Explorer.Chain + alias Explorer.Chain.Arbitrum require Logger @@ -313,8 +314,24 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewConfirmations do # rollup # - `rollup_blocks` is a list of rollup blocks associated with the corresponding # lifecycle transactions - # - `confirmed_txs` is a list of L2-to-L1 messages identified up to the highest - # confirmed block number, to be imported with the new status `:confirmed` + # - `confirmed_messages` is a list of L2-to-L1 messages identified up to the + # highest confirmed block number, to be imported with the new status + # `:confirmed` + @spec handle_confirmations_from_logs( + [%{String.t() => any()}], + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :logs_block_range => non_neg_integer(), + :chunk_size => non_neg_integer(), + :finalized_confirmations => boolean() + }, + binary() + ) :: + {:ok | :confirmation_missed, + {[Arbitrum.LifecycleTransaction.to_import()], [Arbitrum.BatchBlock.to_import()], + [Arbitrum.Message.to_import()]}} + defp handle_confirmations_from_logs(logs, l1_rpc_config, outbox_address) + defp handle_confirmations_from_logs([], _, _) do {:ok, {[], [], []}} end @@ -359,9 +376,9 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewConfirmations do # Drawback of marking messages as confirmed during a new confirmation handling # is that the status change could become stuck if confirmations are not handled. # For example, due to DB inconsistency: some blocks/batches are missed. - confirmed_txs = get_confirmed_l2_to_l1_messages(highest_confirmed_block_number) + confirmed_messages = get_confirmed_l2_to_l1_messages(highest_confirmed_block_number) - {retcode, {lifecycle_txs, rollup_blocks, confirmed_txs}} + {retcode, {lifecycle_txs, rollup_blocks, confirmed_messages}} end end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_l1_executions.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_l1_executions.ex index d74f0edab229..7aedf324a87e 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_l1_executions.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_l1_executions.ex @@ -29,6 +29,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewL1Executions do alias Indexer.Helper, as: IndexerHelper alias Explorer.Chain + alias Explorer.Chain.Arbitrum require Logger @@ -216,7 +217,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewL1Executions do # that have not yet been indexed. This ensures that as soon as a new unexecuted # message is added to the database, it can be marked as relayed, considering # the execution transactions that have already been indexed. - messages = get_relayed_messages(end_block) + messages = get_relayed_messages() unless messages == [] do log_info("Marking #{length(messages)} l2-to-l1 messages as completed") @@ -269,6 +270,19 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewL1Executions do # statuses, and unique identifiers. # - A list of detailed execution information for L2-to-L1 messages. # Both lists are prepared for database importation. + @spec get_executions_from_logs( + [%{String.t() => any()}], + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + :track_finalization => boolean(), + optional(any()) => any() + } + ) :: {[Arbitrum.LifecycleTransaction.to_import()], [Arbitrum.L1Execution.to_import()]} + defp get_executions_from_logs(logs, l1_rpc_config) + + defp get_executions_from_logs([], _), do: {[], []} + defp get_executions_from_logs( logs, %{ @@ -370,23 +384,19 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewL1Executions do # Retrieves unexecuted messages from L2 to L1, marking them as completed if their # corresponding execution transactions are identified. # - # This function fetches messages confirmed on L1 up to the specified rollup block - # number and matches these messages with their corresponding execution transactions. - # For matched pairs, it updates the message status to `:relayed` and links them with - # the execution transactions. - # - # ## Parameters - # - `block_number`: The block number up to which messages are considered for - # completion. + # This function fetches messages confirmed on L1 and matches these messages with + # their corresponding execution transactions. For matched pairs, it updates the + # message status to `:relayed` and links them with the execution transactions. # # ## Returns # - A list of messages marked as completed, ready for database import. - defp get_relayed_messages(block_number) do + @spec get_relayed_messages() :: [Arbitrum.Message.to_import()] + defp get_relayed_messages do # Assuming that both catchup block fetcher and historical messages catchup fetcher # will check all discovered historical messages to be marked as executed it is not # needed to handle :initiated and :sent of historical messages here, only for # new messages discovered and changed their status from `:sent` to `:confirmed` - confirmed_messages = Db.confirmed_l2_to_l1_messages(block_number) + confirmed_messages = Db.confirmed_l2_to_l1_messages() if Enum.empty?(confirmed_messages) do [] diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex index b5ee6bfd9db4..d0f155679880 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex @@ -24,6 +24,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do alias Indexer.Helper, as: IndexerHelper alias Explorer.Chain + alias Explorer.Chain.Arbitrum require Logger @@ -260,6 +261,15 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do # ## Returns # - A list of maps describing discovered messages compatible with the database # import operation. + @spec get_messages_from_logs( + [%{String.t() => any()}], + EthereumJSONRPC.json_rpc_named_arguments(), + non_neg_integer() + ) :: [Arbitrum.Message.to_import()] + defp get_messages_from_logs(logs, json_rpc_named_arguments, chunk_size) + + defp get_messages_from_logs([], _, _), do: [] + defp get_messages_from_logs(logs, json_rpc_named_arguments, chunk_size) do {messages, txs_requests} = parse_logs_for_l1_to_l2_messages(logs) From 98c56687654bddc9701c4de12433ab64da344ca0 Mon Sep 17 00:00:00 2001 From: alik-agaev Date: Fri, 5 Jul 2024 12:26:25 +0400 Subject: [PATCH 03/32] chore: Update buildkit builders (#10377) * test new armbuilder * test new armbuilder * test new armbuilder * Revert "test new armbuilder" This reverts commit 9410698faaa83be9d1c570f13735c930bc3bb6c1. * Revert "test new armbuilder" This reverts commit 5460783c51d8c1f73cb666904a55cfb8fac78572. * Change buildkit connection * Change buildkit connection --- .github/actions/setup-repo/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/setup-repo/action.yml b/.github/actions/setup-repo/action.yml index 0465ac9ce2c2..5b2149aff20e 100644 --- a/.github/actions/setup-repo/action.yml +++ b/.github/actions/setup-repo/action.yml @@ -46,7 +46,7 @@ runs: - name: Find builder if: ${{ inputs.docker-remote-multi-platform }} shell: bash - run: echo "BUILDER_IP=$(./.github/scripts/select-builder.sh ${{ inputs.docker-arm-host }} ubuntu ~/.ssh/id_rsa)" >> $GITHUB_ENV + run: echo "BUILDER_IP=$(./.github/scripts/select-builder.sh ${{ inputs.docker-arm-host }} root ~/.ssh/id_rsa)" >> $GITHUB_ENV - name: Set up SSH if: ${{ inputs.docker-remote-multi-platform }} uses: MrSquaare/ssh-setup-action@523473d91581ccbf89565e12b40faba93f2708bd # v1.1.0 @@ -68,7 +68,7 @@ runs: with: platforms: linux/amd64 append: | - - endpoint: ssh://ubuntu@${{ env.BUILDER_IP }} + - endpoint: ssh://root@${{ env.BUILDER_IP }} platforms: linux/arm64/v8 - name: Log in to Docker Hub From d6080e04a1b49db1d2b9cee42b077f32bd1329ce Mon Sep 17 00:00:00 2001 From: Alexander Kolotov Date: Fri, 5 Jul 2024 02:27:38 -0600 Subject: [PATCH 04/32] fix: proper handling for re-discovered Arbitrum batches (#10326) * extended types documentation * proper handling for re-discovered batches * specification added * Code review comment addressed --- .../explorer/chain/arbitrum/batch_block.ex | 2 +- .../lib/explorer/chain/arbitrum/reader.ex | 31 ++- .../lib/indexer/fetcher/arbitrum/utils/db.ex | 33 ++- .../fetcher/arbitrum/workers/new_batches.ex | 231 +++++++++++++----- 4 files changed, 222 insertions(+), 75 deletions(-) diff --git a/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex b/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex index 51b8f541a196..ab1ed9db994c 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex @@ -22,7 +22,7 @@ defmodule Explorer.Chain.Arbitrum.BatchBlock do * `batch_number` - The number of the Arbitrum batch. * `block_number` - The number of the rollup block. * `confirmation_id` - The ID of the confirmation L1 transaction from - `Explorer.Chain.LifecycleTransaction`, or `nil` if the + `Explorer.Chain.Arbitrum.LifecycleTransaction`, or `nil` if the block is not confirmed yet. """ @type to_import :: %{ diff --git a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex index 7b8dc92a45ef..822072b544e3 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex @@ -176,6 +176,29 @@ defmodule Explorer.Chain.Arbitrum.Reader do |> Repo.one() end + @doc """ + Reads a list of L1 transactions by their hashes from the `arbitrum_lifecycle_l1_transactions` table and returns their IDs. + + ## Parameters + - `l1_tx_hashes`: A list of hashes to retrieve L1 transactions for. + + ## Returns + - A list of tuples containing transaction hashes and IDs for the transaction + hashes from the input list. The output list may be smaller than the input + list. + """ + @spec lifecycle_transaction_ids([binary()]) :: [{Hash.t(), non_neg_integer}] + def lifecycle_transaction_ids(l1_tx_hashes) when is_list(l1_tx_hashes) do + query = + from( + lt in LifecycleTransaction, + select: {lt.hash, lt.id}, + where: lt.hash in ^l1_tx_hashes + ) + + Repo.all(query, timeout: :infinity) + end + @doc """ Reads a list of L1 transactions by their hashes from the `arbitrum_lifecycle_l1_transactions` table. @@ -183,15 +206,15 @@ defmodule Explorer.Chain.Arbitrum.Reader do - `l1_tx_hashes`: A list of hashes to retrieve L1 transactions for. ## Returns - - A list of `Explorer.Chain.Arbitrum.LifecycleTransaction` corresponding to the hashes from - the input list. The output list may be smaller than the input list. + - A list of `Explorer.Chain.Arbitrum.LifecycleTransaction` corresponding to the + hashes from the input list. The output list may be smaller than the input + list. """ - @spec lifecycle_transactions(maybe_improper_list(Hash.t(), [])) :: [LifecycleTransaction] + @spec lifecycle_transactions([binary()]) :: [LifecycleTransaction.t()] def lifecycle_transactions(l1_tx_hashes) when is_list(l1_tx_hashes) do query = from( lt in LifecycleTransaction, - select: {lt.hash, lt.id}, where: lt.hash in ^l1_tx_hashes ) diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex index a76361a5cd52..5ca90219df43 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex @@ -34,16 +34,23 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do the key `:id`, representing the index of the L1 transaction in the `arbitrum_lifecycle_l1_transactions` table. """ - @spec get_indices_for_l1_transactions(map()) :: map() + @spec get_indices_for_l1_transactions(%{ + binary() => %{ + :hash => binary(), + :block_number => FullBlock.block_number(), + :timestamp => DateTime.t(), + :status => :unfinalized | :finalized, + optional(:id) => non_neg_integer() + } + }) :: %{binary() => Arbitrum.LifecycleTransaction.to_import()} # TODO: consider a way to remove duplicate with ZkSync.Utils.Db - # credo:disable-for-next-line Credo.Check.Design.DuplicatedCode def get_indices_for_l1_transactions(new_l1_txs) when is_map(new_l1_txs) do # Get indices for l1 transactions previously handled l1_txs = new_l1_txs |> Map.keys() - |> Reader.lifecycle_transactions() + |> Reader.lifecycle_transaction_ids() |> Enum.reduce(new_l1_txs, fn {hash, id}, txs -> {_, txs} = Map.get_and_update!(txs, hash.bytes, fn l1_tx -> @@ -79,6 +86,25 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do updated_l1_txs end + @doc """ + Reads a list of L1 transactions by their hashes from the + `arbitrum_lifecycle_l1_transactions` table and converts them to maps. + + ## Parameters + - `l1_tx_hashes`: A list of hashes to retrieve L1 transactions for. + + ## Returns + - A list of maps representing the `Explorer.Chain.Arbitrum.LifecycleTransaction` + corresponding to the hashes from the input list. The output list is + compatible with the database import operation. + """ + @spec lifecycle_transactions([binary()]) :: [Arbitrum.LifecycleTransaction.to_import()] + def lifecycle_transactions(l1_tx_hashes) do + l1_tx_hashes + |> Reader.lifecycle_transactions() + |> Enum.map(&lifecycle_transaction_to_map/1) + end + @doc """ Calculates the next L1 block number to search for the latest committed batch. @@ -719,6 +745,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do Chain.timestamp_to_block_number(timestamp, :after, false) end + @spec lifecycle_transaction_to_map(Arbitrum.LifecycleTransaction.t()) :: Arbitrum.LifecycleTransaction.to_import() defp lifecycle_transaction_to_map(tx) do [:id, :hash, :block_number, :timestamp, :status] |> db_record_to_map(tx) diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex index 8e3cdb017f6d..c0672139ecf1 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex @@ -448,13 +448,20 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do |> parse_logs_to_get_batch_numbers() |> Db.batches_exist() - {batches, txs_requests, blocks_requests} = parse_logs_for_new_batches(logs, existing_batches) + {batches, txs_requests, blocks_requests, existing_commitment_txs} = + parse_logs_for_new_batches(logs, existing_batches) blocks_to_ts = Rpc.execute_blocks_requests_and_get_ts(blocks_requests, json_rpc_named_arguments, chunk_size) - {lifecycle_txs_wo_indices, batches_to_import} = + {initial_lifecycle_txs, batches_to_import} = execute_tx_requests_parse_txs_calldata(txs_requests, msg_to_block_shift, blocks_to_ts, batches, l1_rpc_config) + # Check if the commitment transactions for the batches which are already in the database + # needs to be updated in case of reorgs + lifecycle_txs_wo_indices = + initial_lifecycle_txs + |> Map.merge(update_lifecycle_txs_for_new_blocks(existing_commitment_txs, blocks_to_ts)) + {blocks_to_import, rollup_txs_to_import} = get_rollup_blocks_and_transactions(batches_to_import, rollup_rpc_config) lifecycle_txs = @@ -482,14 +489,20 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do ] end) - committed_txs = - blocks_to_import - |> Map.keys() - |> Enum.max() - |> get_committed_l2_to_l1_messages() + # It is safe to not re-mark messages as committed for the batches that are already in the database + committed_messages = + if Enum.empty?(blocks_to_import) do + [] + else + # Without check on the empty list of keys `Enum.max()` will raise an error + blocks_to_import + |> Map.keys() + |> Enum.max() + |> get_committed_l2_to_l1_messages() + end {batches_list_to_import, Map.values(lifecycle_txs), Map.values(blocks_to_import), rollup_txs_to_import, - committed_txs} + committed_messages} end # Extracts batch numbers from logs of SequencerBatchDelivered events. @@ -506,8 +519,10 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # This function sifts through logs of SequencerBatchDelivered events, extracts the # necessary data, and assembles a map of new batch descriptions. Additionally, it # prepares RPC `eth_getTransactionByHash` and `eth_getBlockByNumber` requests to - # fetch details not present in the logs. To minimize subsequent RPC calls, only - # batches not previously known (i.e., absent in `existing_batches`) are processed. + # fetch details not present in the logs. To minimize subsequent RPC calls, requests to + # get the transactions details are only made for batches not previously known. + # For the existing batches, the function prepares a map of commitment transactions + # assuming that they must be updated if reorgs occur. # # ## Parameters # - `logs`: A list of event logs to be processed. @@ -520,50 +535,66 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # the L1 transactions associated with these batches. # - A list of RPC requests to fetch details of the L1 blocks where these batches # were included. + # - A map of commitment transactions for the existing batches where the value is + # the block number of the transaction. + @spec parse_logs_for_new_batches( + [%{String.t() => any()}], + [non_neg_integer()] + ) :: { + %{:number => non_neg_integer(), :before_acc => binary(), :after_acc => binary(), :tx_hash => binary()}, + [EthereumJSONRPC.Transport.request()], + [EthereumJSONRPC.Transport.request()], + %{binary() => non_neg_integer()} + } defp parse_logs_for_new_batches(logs, existing_batches) do - {batches, txs_requests, blocks_requests} = + {batches, txs_requests, blocks_requests, existing_commitment_txs} = logs - |> Enum.reduce({%{}, [], %{}}, fn event, {batches, txs_requests, blocks_requests} -> + |> Enum.reduce({%{}, [], %{}, %{}}, fn event, {batches, txs_requests, blocks_requests, existing_commitment_txs} -> {batch_num, before_acc, after_acc} = sequencer_batch_delivered_event_parse(event) tx_hash_raw = event["transactionHash"] tx_hash = Rpc.string_hash_to_bytes_hash(tx_hash_raw) blk_num = quantity_to_integer(event["blockNumber"]) - if batch_num in existing_batches do - {batches, txs_requests, blocks_requests} - else - updated_batches = - Map.put( - batches, - batch_num, - %{ - number: batch_num, - before_acc: before_acc, - after_acc: after_acc, - tx_hash: tx_hash - } - ) - - updated_txs_requests = [ - Rpc.transaction_by_hash_request(%{id: 0, hash: tx_hash_raw}) - | txs_requests - ] - - updated_blocks_requests = - Map.put( - blocks_requests, - blk_num, - BlockByNumber.request(%{id: 0, number: blk_num}, false, true) - ) - - log_info("New batch #{batch_num} found in #{tx_hash_raw}") + {updated_batches, updated_txs_requests, updated_existing_commitment_txs} = + if batch_num in existing_batches do + {batches, txs_requests, Map.put(existing_commitment_txs, tx_hash, blk_num)} + else + log_info("New batch #{batch_num} found in #{tx_hash_raw}") + + updated_batches = + Map.put( + batches, + batch_num, + %{ + number: batch_num, + before_acc: before_acc, + after_acc: after_acc, + tx_hash: tx_hash + } + ) + + updated_txs_requests = [ + Rpc.transaction_by_hash_request(%{id: 0, hash: tx_hash_raw}) + | txs_requests + ] + + {updated_batches, updated_txs_requests, existing_commitment_txs} + end + + # In order to have an ability to update commitment transaction for the existing batches + # in case of reorgs, we need to re-execute the block requests + updated_blocks_requests = + Map.put( + blocks_requests, + blk_num, + BlockByNumber.request(%{id: 0, number: blk_num}, false, true) + ) - {updated_batches, updated_txs_requests, updated_blocks_requests} - end + {updated_batches, updated_txs_requests, updated_blocks_requests, updated_existing_commitment_txs} end) - {batches, txs_requests, Map.values(blocks_requests)} + {batches, txs_requests, Map.values(blocks_requests), existing_commitment_txs} end # Parses SequencerBatchDelivered event to get batch sequence number and associated accumulators @@ -693,6 +724,51 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do end end + # Updates lifecycle transactions for new blocks by setting the block number and + # timestamp for each transaction. + # + # The function checks if a transaction's block number and timestamp match the + # new values. If they do not, the transaction is updated with the new block + # number and timestamp. + # + # Parameters: + # - `existing_commitment_txs`: A map where keys are transaction hashes and + # values are block numbers. + # - `block_to_ts`: A map where keys are block numbers and values are timestamps. + # + # Returns: + # - A map where keys are transaction hashes and values are updated lifecycle + # transactions with the block number and timestamp set, compatible with the + # database import operation. + @spec update_lifecycle_txs_for_new_blocks(%{binary() => non_neg_integer()}, %{non_neg_integer() => non_neg_integer()}) :: + %{binary() => Arbitrum.LifecycleTransaction.to_import()} + defp update_lifecycle_txs_for_new_blocks(existing_commitment_txs, block_to_ts) do + existing_commitment_txs + |> Map.keys() + |> Db.lifecycle_transactions() + |> Enum.reduce(%{}, fn tx, txs -> + block_num = existing_commitment_txs[tx.hash] + ts = block_to_ts[block_num] + + if tx.block_number == block_num and DateTime.compare(tx.timestamp, ts) == :eq do + txs + else + log_info( + "The commitment transaction 0x#{tx.hash |> Base.encode16(case: :lower)} will be updated with the new block number and timestamp" + ) + + Map.put( + txs, + tx.hash, + Map.merge(tx, %{ + block_number: block_num, + timestamp: ts + }) + ) + end + end) + end + # Retrieves rollup blocks and transactions for a list of batches. # # This function extracts rollup block ranges from each batch's data to determine @@ -706,8 +782,24 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # # ## Returns # - A tuple containing: - # - A map of rollup blocks, ready for database import. + # - A map of rollup blocks, where each block is ready for database import. # - A list of rollup transactions, ready for database import. + @spec get_rollup_blocks_and_transactions( + %{ + non_neg_integer() => %{ + :number => non_neg_integer(), + :start_block => non_neg_integer(), + :end_block => non_neg_integer(), + optional(any()) => any() + } + }, + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + optional(any()) => any() + } + ) :: + {%{non_neg_integer() => Arbitrum.BatchBlock.to_import()}, [Arbitrum.BatchTransaction.to_import()]} defp get_rollup_blocks_and_transactions( batches, rollup_rpc_config @@ -715,31 +807,36 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do blocks_to_batches = unwrap_rollup_block_ranges(batches) required_blocks_numbers = Map.keys(blocks_to_batches) - log_debug("Identified #{length(required_blocks_numbers)} rollup blocks") - - {blocks_to_import_map, txs_to_import_list} = - get_rollup_blocks_and_txs_from_db(required_blocks_numbers, blocks_to_batches) - - # While it's not entirely aligned with data integrity principles to recover - # rollup blocks and transactions from RPC that are not yet indexed, it's - # a practical compromise to facilitate the progress of batch discovery. Given - # the potential high frequency of new batch appearances and the substantial - # volume of blocks and transactions, prioritizing discovery process advancement - # is deemed reasonable. - {blocks_to_import, txs_to_import} = - recover_data_if_necessary( - blocks_to_import_map, - txs_to_import_list, - required_blocks_numbers, - blocks_to_batches, - rollup_rpc_config - ) - log_info( - "Found #{length(Map.keys(blocks_to_import))} rollup blocks and #{length(txs_to_import)} rollup transactions in DB" - ) + if required_blocks_numbers == [] do + {%{}, []} + else + log_debug("Identified #{length(required_blocks_numbers)} rollup blocks") + + {blocks_to_import_map, txs_to_import_list} = + get_rollup_blocks_and_txs_from_db(required_blocks_numbers, blocks_to_batches) + + # While it's not entirely aligned with data integrity principles to recover + # rollup blocks and transactions from RPC that are not yet indexed, it's + # a practical compromise to facilitate the progress of batch discovery. Given + # the potential high frequency of new batch appearances and the substantial + # volume of blocks and transactions, prioritizing discovery process advancement + # is deemed reasonable. + {blocks_to_import, txs_to_import} = + recover_data_if_necessary( + blocks_to_import_map, + txs_to_import_list, + required_blocks_numbers, + blocks_to_batches, + rollup_rpc_config + ) - {blocks_to_import, txs_to_import} + log_info( + "Found #{length(Map.keys(blocks_to_import))} rollup blocks and #{length(txs_to_import)} rollup transactions in DB" + ) + + {blocks_to_import, txs_to_import} + end end # Unwraps rollup block ranges from batch data to create a block-to-batch number map. From b2345b159fa49428c9ecb61d6f599e56adb2c78a Mon Sep 17 00:00:00 2001 From: Alexander Kolotov Date: Fri, 5 Jul 2024 02:29:19 -0600 Subject: [PATCH 05/32] feat: broadcast updates about new Arbitrum batches and L1-L2 messages through WebSocket (#10272) * publish new batches through websockets * publish new L1-L2 messages amount through websockets * credo issues * Consitent handling of transaction hash * new types re-used --- .../channels/arbitrum_channel.ex | 14 +++ .../channels/user_socket_v2.ex | 5 + .../lib/block_scout_web/notifier.ex | 18 ++++ .../lib/block_scout_web/notifiers/arbitrum.ex | 31 +++++++ .../block_scout_web/realtime_event_handler.ex | 16 ++++ .../views/api/v2/arbitrum_view.ex | 92 ++++++++++++++++--- .../lib/explorer/chain/events/publisher.ex | 17 +++- .../lib/explorer/chain/events/subscriber.ex | 17 +++- .../fetcher/arbitrum/workers/new_batches.ex | 35 ++++++- .../arbitrum/workers/new_messages_to_l2.ex | 28 ++++-- 10 files changed, 248 insertions(+), 25 deletions(-) create mode 100644 apps/block_scout_web/lib/block_scout_web/channels/arbitrum_channel.ex create mode 100644 apps/block_scout_web/lib/block_scout_web/notifiers/arbitrum.ex diff --git a/apps/block_scout_web/lib/block_scout_web/channels/arbitrum_channel.ex b/apps/block_scout_web/lib/block_scout_web/channels/arbitrum_channel.ex new file mode 100644 index 000000000000..27bc00e8b2ea --- /dev/null +++ b/apps/block_scout_web/lib/block_scout_web/channels/arbitrum_channel.ex @@ -0,0 +1,14 @@ +defmodule BlockScoutWeb.ArbitrumChannel do + @moduledoc """ + Establishes pub/sub channel for live updates of Arbitrum related events. + """ + use BlockScoutWeb, :channel + + def join("arbitrum:new_batch", _params, socket) do + {:ok, %{}, socket} + end + + def join("arbitrum:new_messages_to_rollup_amount", _params, socket) do + {:ok, %{}, socket} + end +end diff --git a/apps/block_scout_web/lib/block_scout_web/channels/user_socket_v2.ex b/apps/block_scout_web/lib/block_scout_web/channels/user_socket_v2.ex index 8ac5295d60af..57cdf442c95a 100644 --- a/apps/block_scout_web/lib/block_scout_web/channels/user_socket_v2.ex +++ b/apps/block_scout_web/lib/block_scout_web/channels/user_socket_v2.ex @@ -14,6 +14,11 @@ defmodule BlockScoutWeb.UserSocketV2 do channel("token_instances:*", BlockScoutWeb.TokenInstanceChannel) channel("zkevm_batches:*", BlockScoutWeb.PolygonZkevmConfirmedBatchChannel) + case Application.compile_env(:explorer, :chain_type) do + :arbitrum -> channel("arbitrum:*", BlockScoutWeb.ArbitrumChannel) + _ -> nil + end + def connect(_params, socket) do {:ok, socket} end diff --git a/apps/block_scout_web/lib/block_scout_web/notifier.ex b/apps/block_scout_web/lib/block_scout_web/notifier.ex index 951579c042c9..f364dfa64e00 100644 --- a/apps/block_scout_web/lib/block_scout_web/notifier.ex +++ b/apps/block_scout_web/lib/block_scout_web/notifier.ex @@ -29,6 +29,14 @@ defmodule BlockScoutWeb.Notifier do @check_broadcast_sequence_period 500 + case Application.compile_env(:explorer, :chain_type) do + :arbitrum -> + @chain_type_specific_events ~w(new_arbitrum_batches new_messages_to_arbitrum_amount)a + + _ -> + nil + end + def handle_event({:chain_event, :addresses, type, addresses}) when type in [:realtime, :on_demand] do Endpoint.broadcast("addresses:new_address", "count", %{count: Counters.address_estimated_count()}) @@ -280,6 +288,16 @@ defmodule BlockScoutWeb.Notifier do }) end + case Application.compile_env(:explorer, :chain_type) do + :arbitrum -> + def handle_event({:chain_event, topic, _, _} = event) when topic in @chain_type_specific_events, + # credo:disable-for-next-line Credo.Check.Design.AliasUsage + do: BlockScoutWeb.Notifiers.Arbitrum.handle_event(event) + + _ -> + nil + end + def handle_event(event) do Logger.warning("Unknown broadcasted event #{inspect(event)}.") nil diff --git a/apps/block_scout_web/lib/block_scout_web/notifiers/arbitrum.ex b/apps/block_scout_web/lib/block_scout_web/notifiers/arbitrum.ex new file mode 100644 index 000000000000..2b7589dc0f3b --- /dev/null +++ b/apps/block_scout_web/lib/block_scout_web/notifiers/arbitrum.ex @@ -0,0 +1,31 @@ +defmodule BlockScoutWeb.Notifiers.Arbitrum do + @moduledoc """ + Module to handle and broadcast Arbitrum related events. + """ + + alias BlockScoutWeb.API.V2.ArbitrumView + alias BlockScoutWeb.Endpoint + + require Logger + + def handle_event({:chain_event, :new_arbitrum_batches, :realtime, batches}) do + batches + |> Enum.sort_by(& &1.number, :asc) + |> Enum.each(fn batch -> + Endpoint.broadcast("arbitrum:new_batch", "new_arbitrum_batch", %{ + batch: ArbitrumView.render_base_info_for_batch(batch) + }) + end) + end + + def handle_event({:chain_event, :new_messages_to_arbitrum_amount, :realtime, new_messages_amount}) do + Endpoint.broadcast("arbitrum:new_messages_to_rollup_amount", "new_messages_to_rollup_amount", %{ + new_messages_to_rollup_amount: new_messages_amount + }) + end + + def handle_event(event) do + Logger.warning("Unknown broadcasted event #{inspect(event)}.") + nil + end +end diff --git a/apps/block_scout_web/lib/block_scout_web/realtime_event_handler.ex b/apps/block_scout_web/lib/block_scout_web/realtime_event_handler.ex index b19ead1cc046..c2aa239fb049 100644 --- a/apps/block_scout_web/lib/block_scout_web/realtime_event_handler.ex +++ b/apps/block_scout_web/lib/block_scout_web/realtime_event_handler.ex @@ -12,6 +12,19 @@ defmodule BlockScoutWeb.RealtimeEventHandler do GenServer.start_link(__MODULE__, [], name: __MODULE__) end + case Application.compile_env(:explorer, :chain_type) do + :arbitrum -> + def chain_type_specific_subscriptions do + Subscriber.to(:new_arbitrum_batches, :realtime) + Subscriber.to(:new_messages_to_arbitrum_amount, :realtime) + end + + _ -> + def chain_type_specific_subscriptions do + nil + end + end + @impl true def init([]) do Subscriber.to(:address_coin_balances, :realtime) @@ -34,6 +47,9 @@ defmodule BlockScoutWeb.RealtimeEventHandler do # Does not come from the indexer Subscriber.to(:exchange_rate) Subscriber.to(:transaction_stats) + + chain_type_specific_subscriptions() + {:ok, []} end diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex index 3fa32628848b..185a00da1a69 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex @@ -114,21 +114,71 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do # transaction that committed the batch to L1. # # ## Parameters - # - `batches`: A list of `Explorer.Chain.Arbitrum.L1Batch` entries. + # - `batches`: A list of `Explorer.Chain.Arbitrum.L1Batch` entries or a list of maps + # with the corresponding fields. # # ## Returns # - A list of maps with detailed information about each batch, formatted for use # in JSON HTTP responses. - @spec render_arbitrum_batches([L1Batch]) :: [map()] + @spec render_arbitrum_batches( + [L1Batch.t()] + | [ + %{ + :number => non_neg_integer(), + :transactions_count => non_neg_integer(), + :start_block => non_neg_integer(), + :end_block => non_neg_integer(), + :commitment_transaction => %{ + :hash => binary(), + :block_number => non_neg_integer(), + :timestamp => DateTime.t(), + :status => :finalized | :unfinalized, + optional(any()) => any() + }, + optional(any()) => any() + } + ] + ) :: [map()] defp render_arbitrum_batches(batches) do - Enum.map(batches, fn batch -> - %{ - "number" => batch.number, - "transactions_count" => batch.transactions_count, - "blocks_count" => batch.end_block - batch.start_block + 1 - } - |> add_l1_tx_info(batch) - end) + Enum.map(batches, &render_base_info_for_batch/1) + end + + # Transforms a L1 batch into a map format for HTTP response. + # + # This function processes an Arbitrum L1 batch and converts it into a map that + # includes basic batch information and details of the associated transaction + # that committed the batch to L1. + # + # ## Parameters + # - `batch`: Either an `Explorer.Chain.Arbitrum.L1Batch` entry or a map with + # the corresponding fields. + # + # ## Returns + # - A map with detailed information about the batch, formatted for use in JSON HTTP responses. + @spec render_base_info_for_batch( + L1Batch.t() + | %{ + :number => non_neg_integer(), + :transactions_count => non_neg_integer(), + :start_block => non_neg_integer(), + :end_block => non_neg_integer(), + :commitment_transaction => %{ + :hash => binary(), + :block_number => non_neg_integer(), + :timestamp => DateTime.t(), + :status => :finalized | :unfinalized, + optional(any()) => any() + }, + optional(any()) => any() + } + ) :: map() + def render_base_info_for_batch(batch) do + %{ + "number" => batch.number, + "transactions_count" => batch.transactions_count, + "blocks_count" => batch.end_block - batch.start_block + 1 + } + |> add_l1_tx_info(batch) end @doc """ @@ -228,8 +278,7 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do # Augments an output JSON with commit transaction details and its status. @spec add_l1_tx_info(map(), %{ - :__struct__ => L1Batch, - :commitment_transaction => any(), + :commitment_transaction => LifecycleTransaction.t() | LifecycleTransaction.to_import(), optional(any()) => any() }) :: map() defp add_l1_tx_info(out_json, %L1Batch{} = batch) do @@ -246,6 +295,25 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do }) end + defp add_l1_tx_info(out_json, %{ + commitment_transaction: %{ + hash: hash, + block_number: block_number, + timestamp: ts, + status: status + } + }) do + out_json + |> Map.merge(%{ + "commitment_transaction" => %{ + "hash" => %Hash{byte_count: 32, bytes: hash}, + "block_number" => block_number, + "timestamp" => ts, + "status" => status + } + }) + end + # Augments an output JSON with commit and confirm transaction details and their statuses. @spec add_l1_txs_info_and_status(map(), %{ :commitment_transaction => any(), diff --git a/apps/explorer/lib/explorer/chain/events/publisher.ex b/apps/explorer/lib/explorer/chain/events/publisher.ex index 55bdd4a217ce..adea72f104f0 100644 --- a/apps/explorer/lib/explorer/chain/events/publisher.ex +++ b/apps/explorer/lib/explorer/chain/events/publisher.ex @@ -3,7 +3,22 @@ defmodule Explorer.Chain.Events.Publisher do Publishes events related to the Chain context. """ - @allowed_events ~w(addresses address_coin_balances address_token_balances address_current_token_balances blocks block_rewards internal_transactions last_block_number optimism_deposits token_transfers transactions contract_verification_result token_total_supply changed_bytecode fetched_bytecode fetched_token_instance_metadata smart_contract_was_verified zkevm_confirmed_batches eth_bytecode_db_lookup_started smart_contract_was_not_verified)a + @common_allowed_events ~w(addresses address_coin_balances address_token_balances + address_current_token_balances blocks block_rewards internal_transactions + last_block_number optimism_deposits token_transfers transactions contract_verification_result + token_total_supply changed_bytecode fetched_bytecode fetched_token_instance_metadata + smart_contract_was_verified zkevm_confirmed_batches eth_bytecode_db_lookup_started + smart_contract_was_not_verified)a + + case Application.compile_env(:explorer, :chain_type) do + :arbitrum -> + @chain_type_specific_allowed_events ~w(new_arbitrum_batches new_messages_to_arbitrum_amount)a + + _ -> + @chain_type_specific_allowed_events ~w()a + end + + @allowed_events @common_allowed_events ++ @chain_type_specific_allowed_events def broadcast(_data, false), do: :ok diff --git a/apps/explorer/lib/explorer/chain/events/subscriber.ex b/apps/explorer/lib/explorer/chain/events/subscriber.ex index f5998223417b..3e76f6579615 100644 --- a/apps/explorer/lib/explorer/chain/events/subscriber.ex +++ b/apps/explorer/lib/explorer/chain/events/subscriber.ex @@ -3,7 +3,22 @@ defmodule Explorer.Chain.Events.Subscriber do Subscribes to events related to the Chain context. """ - @allowed_broadcast_events ~w(addresses address_coin_balances address_token_balances address_current_token_balances blocks block_rewards internal_transactions last_block_number optimism_deposits token_transfers transactions contract_verification_result token_total_supply changed_bytecode fetched_bytecode fetched_token_instance_metadata smart_contract_was_verified zkevm_confirmed_batches eth_bytecode_db_lookup_started smart_contract_was_not_verified)a + @common_allowed_broadcast_events ~w(addresses address_coin_balances address_token_balances + address_current_token_balances blocks block_rewards internal_transactions + last_block_number optimism_deposits token_transfers transactions contract_verification_result + token_total_supply changed_bytecode fetched_bytecode fetched_token_instance_metadata + smart_contract_was_verified zkevm_confirmed_batches eth_bytecode_db_lookup_started + smart_contract_was_not_verified)a + + case Application.compile_env(:explorer, :chain_type) do + :arbitrum -> + @chain_type_specific_allowed_broadcast_events ~w(new_arbitrum_batches new_messages_to_arbitrum_amount)a + + _ -> + @chain_type_specific_allowed_broadcast_events ~w()a + end + + @allowed_broadcast_events @common_allowed_broadcast_events ++ @chain_type_specific_allowed_broadcast_events @allowed_broadcast_types ~w(catchup realtime on_demand contract_verification_result)a diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex index c0672139ecf1..cd44cb1d637b 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex @@ -34,6 +34,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do alias Explorer.Chain alias Explorer.Chain.Arbitrum + alias Explorer.Chain.Events.Publisher require Logger @@ -296,7 +297,8 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # constructs comprehensive data structures for batches, lifecycle transactions, rollup # blocks, and rollup transactions. Additionally, it identifies any L2-to-L1 messages that # have been committed within these batches and updates their status. All discovered and - # processed data are then imported into the database. + # processed data are then imported into the database. If new batches were found, they are + # announced to be broadcasted through a websocket. # # ## Parameters # - `sequencer_inbox_address`: The SequencerInbox contract address used to filter logs. @@ -326,10 +328,14 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do l1_rpc_config.json_rpc_named_arguments ) + new_batches_discovery? = end_block >= start_block + logs = - if end_block >= start_block do + if new_batches_discovery? do + # called by `discover` raw_logs else + # called by `discover_historical` Enum.reverse(raw_logs) end @@ -355,6 +361,13 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do arbitrum_messages: %{params: committed_txs}, timeout: :infinity }) + + if not Enum.empty?(batches) and new_batches_discovery? do + Publisher.broadcast( + [{:new_arbitrum_batches, extend_batches_with_commitment_transactions(batches, lifecycle_txs)}], + :realtime + ) + end end) end @@ -1094,4 +1107,22 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do Map.put(tx, :status, :sent) end) end + + # Extends the provided list of batches with their corresponding commitment transactions. + @spec extend_batches_with_commitment_transactions( + [%{:commitment_id => non_neg_integer(), optional(any()) => any()}], + [%{:id => non_neg_integer(), optional(any()) => any()}] + ) :: [ + %{ + :commitment_id => non_neg_integer(), + :commitment_transaction => %{:id => non_neg_integer(), optional(any()) => any()}, + optional(any()) => any() + } + ] + defp extend_batches_with_commitment_transactions(batches, lifecycle_txs) do + Enum.map(batches, fn batch -> + lifecycle_tx = Enum.find(lifecycle_txs, fn tx -> tx.id == batch.commitment_id end) + Map.put(batch, :commitment_transaction, lifecycle_tx) + end) + end end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex index d0f155679880..ab030735bebd 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex @@ -25,6 +25,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do alias Explorer.Chain alias Explorer.Chain.Arbitrum + alias Explorer.Chain.Events.Publisher require Logger @@ -47,7 +48,9 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do This function calculates the block range for discovering new messages from L1 to L2 based on the latest block number available on the network. It then fetches logs related to L1-to-L2 events within this range, extracts message details from both - the log and the corresponding L1 transaction, and imports them into the database. + the log and the corresponding L1 transaction, and imports them into the database. If + new messages were discovered, their amount is announced to be broadcasted through + a websocket. ## Parameters - A map containing: @@ -101,13 +104,18 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do if start_block <= end_block do log_info("Block range for discovery new messages from L1: #{start_block}..#{end_block}") - discover( - bridge_address, - start_block, - end_block, - json_rpc_named_arguments, - chunk_size - ) + new_messages_amount = + discover( + bridge_address, + start_block, + end_block, + json_rpc_named_arguments, + chunk_size + ) + + if new_messages_amount > 0 do + Publisher.broadcast(%{new_messages_to_arbitrum_amount: new_messages_amount}, :realtime) + end {:ok, end_block} else @@ -201,7 +209,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do # - `chunk_size`: The size of chunks for processing RPC calls in batches. # # ## Returns - # - N/A + # - amount of discovered messages defp discover(bridge_address, start_block, end_block, json_rpc_named_argument, chunk_size) do logs = get_logs_for_l1_to_l2_messages( @@ -222,6 +230,8 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do arbitrum_messages: %{params: messages}, timeout: :infinity }) + + length(messages) end # Retrieves logs representing the `MessageDelivered` events. From c89696b41226e211222b1a2d4bc3fc2fc28055d2 Mon Sep 17 00:00:00 2001 From: Kirill Fedoseev Date: Fri, 5 Jul 2024 15:19:52 +0400 Subject: [PATCH 06/32] fix: empty revert reasons in geth variant (#10243) --- .../lib/ethereum_jsonrpc/geth.ex | 42 ++--- .../lib/ethereum_jsonrpc/geth/call.ex | 164 +++++------------- 2 files changed, 56 insertions(+), 150 deletions(-) diff --git a/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth.ex b/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth.ex index 01c7ca4871e0..2033f5f95f18 100644 --- a/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth.ex +++ b/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth.ex @@ -400,32 +400,22 @@ defmodule EthereumJSONRPC.Geth do type when type in ~w(call callcode delegatecall staticcall create create2 selfdestruct revert stop invalid) -> new_trace_address = [index | trace_address] - formatted_call = - %{ - "type" => if(type in ~w(call callcode delegatecall staticcall), do: "call", else: type), - "callType" => type, - "from" => from, - "to" => Map.get(call, "to", "0x"), - "createdContractAddressHash" => Map.get(call, "to", "0x"), - "value" => Map.get(call, "value", "0x0"), - "gas" => Map.get(call, "gas", "0x0"), - "gasUsed" => Map.get(call, "gasUsed", "0x0"), - "input" => Map.get(call, "input", "0x"), - "init" => Map.get(call, "input", "0x"), - "createdContractCode" => Map.get(call, "output", "0x"), - "traceAddress" => if(inner?, do: Enum.reverse(new_trace_address), else: []), - "error" => call["error"] - } - |> case do - %{"error" => nil} = ok_call -> - ok_call - |> Map.delete("error") - # to handle staticcall, all other cases handled by EthereumJSONRPC.Geth.Call.elixir_to_internal_transaction_params/1 - |> Map.put("output", Map.get(call, "output", "0x")) - - error_call -> - error_call - end + formatted_call = %{ + "type" => if(type in ~w(call callcode delegatecall staticcall), do: "call", else: type), + "callType" => type, + "from" => from, + "to" => Map.get(call, "to", "0x"), + "createdContractAddressHash" => Map.get(call, "to", "0x"), + "value" => Map.get(call, "value", "0x0"), + "gas" => Map.get(call, "gas", "0x0"), + "gasUsed" => Map.get(call, "gasUsed", "0x0"), + "input" => Map.get(call, "input", "0x"), + "output" => Map.get(call, "output", "0x"), + "init" => Map.get(call, "input", "0x"), + "createdContractCode" => Map.get(call, "output", "0x"), + "traceAddress" => if(inner?, do: Enum.reverse(new_trace_address), else: []), + "error" => call["error"] + } parse_call_tracer_calls( Map.get(call, "calls", []), diff --git a/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth/call.ex b/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth/call.ex index a76a0dd955ac..d019e0210ea6 100644 --- a/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth/call.ex +++ b/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/geth/call.ex @@ -4,6 +4,7 @@ defmodule EthereumJSONRPC.Geth.Call do using a custom tracer (`priv/js/ethereum_jsonrpc/geth/debug_traceTransaction/tracer.js`). """ import EthereumJSONRPC, only: [quantity_to_integer: 1] + import EthereumJSONRPC.Transaction, only: [put_if_present: 3] @doc """ A call can call another another contract: @@ -75,7 +76,9 @@ defmodule EthereumJSONRPC.Geth.Call do from_address_hash: "0x8ec75ef3adf6c953775d0738e0e7bd60e647e5ef", to_address_hash: "0xaae465ad04b12e90c32291e59b65ca781c57e361", gas: 1225, + gas_used: 1225, input: "0xa83627de", + output: nil, value: 0 } @@ -110,9 +113,11 @@ defmodule EthereumJSONRPC.Geth.Call do from_address_hash: "0xaf7cf620c3df1b9ccbc640be903d5ea6cea7bc96", to_address_hash: "0x80629758f88b3f30b7f1244e4588444d6276eef0", input: "0x49b46d5d", + output: nil, error: "stack limit reached 1024 (1024)", gas: 1445580, - value: 0 + gas_used: 1445580, + value: 0, } A contract creation: @@ -179,6 +184,7 @@ defmodule EthereumJSONRPC.Geth.Call do init: "0xf49e4745", error: "stack underflow (0 <=> 6)", gas: 540776, + gas_used: 540776, value: 5287885714285715 } @@ -254,8 +260,7 @@ defmodule EthereumJSONRPC.Geth.Call do gas_used: 1040, input: "0x0f370699", output: "0x", - value: 0, - error: nil + value: 0 } A selfdestruct destroys the calling contract and sends any left over balance to the to address. @@ -300,6 +305,8 @@ defmodule EthereumJSONRPC.Geth.Call do Enum.into(call, %{}, &entry_to_elixir/1) end + defp entry_to_elixir({"error", nil} = entry), do: entry + defp entry_to_elixir({key, value} = entry) when key in ~w(callType createdContractAddressHash createdContractCode error from init input output to transactionHash type) and is_binary(value), @@ -318,74 +325,6 @@ defmodule EthereumJSONRPC.Geth.Call do entry end - defp elixir_to_internal_transaction_params(%{ - "blockNumber" => block_number, - "transactionIndex" => transaction_index, - "transactionHash" => transaction_hash, - "index" => index, - "traceAddress" => trace_address, - "type" => type, - "callType" => call_type, - "from" => from_address_hash, - "to" => to_address_hash, - "gas" => gas, - "input" => input, - "error" => error, - "value" => value - }) - when type in ~w(call invalid) and call_type in ~w(call callcode delegatecall invalid) do - %{ - block_number: block_number, - transaction_index: transaction_index, - transaction_hash: transaction_hash, - index: index, - trace_address: trace_address, - type: "call", - call_type: call_type, - from_address_hash: from_address_hash, - to_address_hash: to_address_hash, - gas: gas, - input: input, - error: error, - value: value - } - end - - defp elixir_to_internal_transaction_params(%{ - "blockNumber" => block_number, - "transactionIndex" => transaction_index, - "transactionHash" => transaction_hash, - "index" => index, - "traceAddress" => trace_address, - "type" => type, - "callType" => call_type, - "from" => from_address_hash, - "to" => to_address_hash, - "gas" => gas, - "gasUsed" => gas_used, - "input" => input, - "output" => output, - "value" => value - }) - when type in ~w(call invalid) and call_type in ~w(call callcode delegatecall invalid) do - %{ - block_number: block_number, - transaction_index: transaction_index, - transaction_hash: transaction_hash, - index: index, - trace_address: trace_address, - type: "call", - call_type: call_type, - from_address_hash: from_address_hash, - to_address_hash: to_address_hash, - gas: gas, - gas_used: gas_used, - input: input, - output: output, - value: value - } - end - defp elixir_to_internal_transaction_params( %{ "blockNumber" => block_number, @@ -393,23 +332,24 @@ defmodule EthereumJSONRPC.Geth.Call do "transactionHash" => transaction_hash, "index" => index, "traceAddress" => trace_address, - "type" => "call" = type, - "callType" => "staticcall" = call_type, + "type" => type, + "callType" => call_type, "from" => from_address_hash, "to" => to_address_hash, - "input" => input, "gas" => gas, "gasUsed" => gas_used, - "value" => 0 = value + "input" => input, + "value" => value } = params - ) do + ) + when type in ~w(call invalid) and call_type in ~w(call callcode delegatecall staticcall invalid) do %{ block_number: block_number, transaction_index: transaction_index, transaction_hash: transaction_hash, index: index, trace_address: trace_address, - type: type, + type: "call", call_type: call_type, from_address_hash: from_address_hash, to_address_hash: to_address_hash, @@ -417,55 +357,28 @@ defmodule EthereumJSONRPC.Geth.Call do gas_used: gas_used, input: input, output: params["output"], - value: value, - error: params["error"] - } - end - - defp elixir_to_internal_transaction_params(%{ - "blockNumber" => block_number, - "transactionIndex" => transaction_index, - "transactionHash" => transaction_hash, - "index" => index, - "traceAddress" => trace_address, - "type" => type, - "from" => from_address_hash, - "error" => error, - "gas" => gas, - "init" => init, - "value" => value - }) - when type in ~w(create create2) do - %{ - block_number: block_number, - transaction_index: transaction_index, - transaction_hash: transaction_hash, - index: index, - trace_address: trace_address, - type: type, - from_address_hash: from_address_hash, - gas: gas, - error: error, - init: init, value: value } + |> put_if_present(params, [ + {"error", :error} + ]) end - defp elixir_to_internal_transaction_params(%{ - "blockNumber" => block_number, - "transactionIndex" => transaction_index, - "transactionHash" => transaction_hash, - "index" => index, - "traceAddress" => trace_address, - "type" => type, - "from" => from_address_hash, - "createdContractAddressHash" => created_contract_address_hash, - "gas" => gas, - "gasUsed" => gas_used, - "init" => init, - "createdContractCode" => created_contract_code, - "value" => value - }) + defp elixir_to_internal_transaction_params( + %{ + "blockNumber" => block_number, + "transactionIndex" => transaction_index, + "transactionHash" => transaction_hash, + "index" => index, + "traceAddress" => trace_address, + "type" => type, + "from" => from_address_hash, + "gas" => gas, + "gasUsed" => gas_used, + "init" => init, + "value" => value + } = params + ) when type in ~w(create create2) do %{ block_number: block_number, @@ -477,11 +390,14 @@ defmodule EthereumJSONRPC.Geth.Call do from_address_hash: from_address_hash, gas: gas, gas_used: gas_used, - created_contract_address_hash: created_contract_address_hash, init: init, - created_contract_code: created_contract_code, value: value } + |> put_if_present(params, [ + {"error", :error}, + {"createdContractAddressHash", :created_contract_address_hash}, + {"createdContractCode", :created_contract_code} + ]) end defp elixir_to_internal_transaction_params(%{ From 2a00b0cd1b51b2d2c60b5e6c51bac6e6c9e8989a Mon Sep 17 00:00:00 2001 From: Qwerty5Uiop <105209995+Qwerty5Uiop@users.noreply.github.com> Date: Fri, 5 Jul 2024 16:19:15 +0400 Subject: [PATCH 07/32] chore: Refactor init functions to use continue if needed (#10300) --- .../explorer/counters/average_block_time.ex | 7 +- .../explorer/migrator/filling_migration.ex | 9 +- .../sanitize_incorrect_nft_token_transfers.ex | 9 +- ...sanitize_incorrect_weth_token_transfers.ex | 9 +- .../migrator/sanitize_missing_block_ranges.ex | 14 +-- .../indexer/fetcher/polygon_edge/deposit.ex | 26 ++++-- .../fetcher/polygon_edge/deposit_execute.ex | 28 +++--- .../fetcher/polygon_edge/withdrawal.ex | 28 +++--- .../fetcher/polygon_edge/withdrawal_exit.ex | 26 ++++-- .../fetcher/token_instance/sanitize_erc721.ex | 7 +- .../lib/indexer/fetcher/transaction_action.ex | 90 ++++++++++--------- .../indexer/lib/indexer/fetcher/withdrawal.ex | 8 +- 12 files changed, 162 insertions(+), 99 deletions(-) diff --git a/apps/explorer/lib/explorer/counters/average_block_time.ex b/apps/explorer/lib/explorer/counters/average_block_time.ex index 02e8e3464705..44d459cd384d 100644 --- a/apps/explorer/lib/explorer/counters/average_block_time.ex +++ b/apps/explorer/lib/explorer/counters/average_block_time.ex @@ -44,7 +44,12 @@ defmodule Explorer.Counters.AverageBlockTime do refresh_period = Application.get_env(:explorer, __MODULE__)[:cache_period] Process.send_after(self(), :refresh_timestamps, refresh_period) - {:ok, refresh_timestamps()} + {:ok, %{}, {:continue, :ok}} + end + + @impl true + def handle_continue(:ok, _state) do + {:noreply, refresh_timestamps()} end @impl true diff --git a/apps/explorer/lib/explorer/migrator/filling_migration.ex b/apps/explorer/lib/explorer/migrator/filling_migration.ex index 507dfcb6e5f7..37d1264810a0 100644 --- a/apps/explorer/lib/explorer/migrator/filling_migration.ex +++ b/apps/explorer/lib/explorer/migrator/filling_migration.ex @@ -32,15 +32,20 @@ defmodule Explorer.Migrator.FillingMigration do @impl true def init(_) do + {:ok, %{}, {:continue, :ok}} + end + + @impl true + def handle_continue(:ok, state) do case MigrationStatus.get_status(migration_name()) do "completed" -> update_cache() - :ignore + {:stop, :normal, state} _ -> MigrationStatus.set_status(migration_name(), "started") schedule_batch_migration() - {:ok, %{}} + {:noreply, %{}} end end diff --git a/apps/explorer/lib/explorer/migrator/sanitize_incorrect_nft_token_transfers.ex b/apps/explorer/lib/explorer/migrator/sanitize_incorrect_nft_token_transfers.ex index 4933abbf5bb6..81eaa1ac5bba 100644 --- a/apps/explorer/lib/explorer/migrator/sanitize_incorrect_nft_token_transfers.ex +++ b/apps/explorer/lib/explorer/migrator/sanitize_incorrect_nft_token_transfers.ex @@ -24,14 +24,19 @@ defmodule Explorer.Migrator.SanitizeIncorrectNFTTokenTransfers do @impl true def init(_) do + {:ok, %{}, {:continue, :ok}} + end + + @impl true + def handle_continue(:ok, state) do case MigrationStatus.get_status(@migration_name) do "completed" -> - :ignore + {:stop, :normal, state} _ -> MigrationStatus.set_status(@migration_name, "started") schedule_batch_migration() - {:ok, %{step: :delete}} + {:noreply, %{step: :delete}} end end diff --git a/apps/explorer/lib/explorer/migrator/sanitize_incorrect_weth_token_transfers.ex b/apps/explorer/lib/explorer/migrator/sanitize_incorrect_weth_token_transfers.ex index 038ce5fd6d7b..9bd33e3444a2 100644 --- a/apps/explorer/lib/explorer/migrator/sanitize_incorrect_weth_token_transfers.ex +++ b/apps/explorer/lib/explorer/migrator/sanitize_incorrect_weth_token_transfers.ex @@ -24,14 +24,19 @@ defmodule Explorer.Migrator.SanitizeIncorrectWETHTokenTransfers do @impl true def init(_) do + {:ok, %{}, {:continue, :ok}} + end + + @impl true + def handle_continue(:ok, state) do case MigrationStatus.get_status(@migration_name) do "completed" -> - :ignore + {:stop, :normal, state} _ -> MigrationStatus.set_status(@migration_name, "started") schedule_batch_migration() - {:ok, %{step: :delete_duplicates}} + {:noreply, %{step: :delete_duplicates}} end end diff --git a/apps/explorer/lib/explorer/migrator/sanitize_missing_block_ranges.ex b/apps/explorer/lib/explorer/migrator/sanitize_missing_block_ranges.ex index 29408229c021..03166816f981 100644 --- a/apps/explorer/lib/explorer/migrator/sanitize_missing_block_ranges.ex +++ b/apps/explorer/lib/explorer/migrator/sanitize_missing_block_ranges.ex @@ -15,19 +15,19 @@ defmodule Explorer.Migrator.SanitizeMissingBlockRanges do end def init(_) do + {:ok, %{}, {:continue, :ok}} + end + + def handle_continue(:ok, state) do case MigrationStatus.get_status(@migration_name) do "completed" -> - :ignore + :ok _ -> MigrationStatus.set_status(@migration_name, "started") - {:ok, %{}, {:continue, :ok}} + MissingBlockRange.sanitize_missing_block_ranges() + MigrationStatus.set_status(@migration_name, "completed") end - end - - def handle_continue(:ok, state) do - MissingBlockRange.sanitize_missing_block_ranges() - MigrationStatus.set_status(@migration_name, "completed") {:stop, :normal, state} end diff --git a/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit.ex b/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit.ex index 556acfd892a6..6864d662733b 100644 --- a/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit.ex +++ b/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit.ex @@ -45,19 +45,27 @@ defmodule Indexer.Fetcher.PolygonEdge.Deposit do @impl GenServer def init(_args) do + {:ok, %{}, {:continue, :ok}} + end + + @impl GenServer + def handle_continue(:ok, state) do Logger.metadata(fetcher: @fetcher_name) env = Application.get_all_env(:indexer)[__MODULE__] - PolygonEdge.init_l1( - Deposit, - env, - self(), - env[:state_sender], - "State Sender", - "polygon_edge_deposits", - "Deposits" - ) + case PolygonEdge.init_l1( + Deposit, + env, + self(), + env[:state_sender], + "State Sender", + "polygon_edge_deposits", + "Deposits" + ) do + :ignore -> {:stop, :normal, state} + {:ok, new_state} -> {:noreply, new_state} + end end @impl GenServer diff --git a/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit_execute.ex b/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit_execute.ex index c7167748369f..90159b16ccd2 100644 --- a/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit_execute.ex +++ b/apps/indexer/lib/indexer/fetcher/polygon_edge/deposit_execute.ex @@ -44,21 +44,29 @@ defmodule Indexer.Fetcher.PolygonEdge.DepositExecute do @impl GenServer def init(args) do + {:ok, %{}, {:continue, args}} + end + + @impl GenServer + def handle_continue(args, state) do Logger.metadata(fetcher: @fetcher_name) json_rpc_named_arguments = args[:json_rpc_named_arguments] env = Application.get_all_env(:indexer)[__MODULE__] - PolygonEdge.init_l2( - DepositExecute, - env, - self(), - env[:state_receiver], - "StateReceiver", - "polygon_edge_deposit_executes", - "Deposit Executes", - json_rpc_named_arguments - ) + case PolygonEdge.init_l2( + DepositExecute, + env, + self(), + env[:state_receiver], + "StateReceiver", + "polygon_edge_deposit_executes", + "Deposit Executes", + json_rpc_named_arguments + ) do + :ignore -> {:stop, :normal, state} + {:ok, new_state} -> {:noreply, new_state} + end end @impl GenServer diff --git a/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal.ex b/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal.ex index c629b1df3343..47f6661830e5 100644 --- a/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal.ex +++ b/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal.ex @@ -49,21 +49,29 @@ defmodule Indexer.Fetcher.PolygonEdge.Withdrawal do @impl GenServer def init(args) do + {:ok, %{}, {:continue, args}} + end + + @impl GenServer + def handle_continue(args, state) do Logger.metadata(fetcher: @fetcher_name) json_rpc_named_arguments = args[:json_rpc_named_arguments] env = Application.get_all_env(:indexer)[__MODULE__] - PolygonEdge.init_l2( - Withdrawal, - env, - self(), - env[:state_sender], - "L2StateSender", - "polygon_edge_withdrawals", - "Withdrawals", - json_rpc_named_arguments - ) + case PolygonEdge.init_l2( + Withdrawal, + env, + self(), + env[:state_sender], + "L2StateSender", + "polygon_edge_withdrawals", + "Withdrawals", + json_rpc_named_arguments + ) do + :ignore -> {:stop, :normal, state} + {:ok, new_state} -> {:noreply, new_state} + end end @impl GenServer diff --git a/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal_exit.ex b/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal_exit.ex index e19ea6517cf1..f949dbee7eb6 100644 --- a/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal_exit.ex +++ b/apps/indexer/lib/indexer/fetcher/polygon_edge/withdrawal_exit.ex @@ -37,19 +37,27 @@ defmodule Indexer.Fetcher.PolygonEdge.WithdrawalExit do @impl GenServer def init(_args) do + {:ok, %{}, {:continue, :ok}} + end + + @impl GenServer + def handle_continue(:ok, state) do Logger.metadata(fetcher: @fetcher_name) env = Application.get_all_env(:indexer)[__MODULE__] - PolygonEdge.init_l1( - WithdrawalExit, - env, - self(), - env[:exit_helper], - "Exit Helper", - "polygon_edge_withdrawal_exits", - "Withdrawals" - ) + case PolygonEdge.init_l1( + WithdrawalExit, + env, + self(), + env[:exit_helper], + "Exit Helper", + "polygon_edge_withdrawal_exits", + "Withdrawals" + ) do + :ignore -> {:stop, :normal, state} + {:ok, new_state} -> {:noreply, new_state} + end end @impl GenServer diff --git a/apps/indexer/lib/indexer/fetcher/token_instance/sanitize_erc721.ex b/apps/indexer/lib/indexer/fetcher/token_instance/sanitize_erc721.ex index bbe8bf7540b1..7d8938057c24 100644 --- a/apps/indexer/lib/indexer/fetcher/token_instance/sanitize_erc721.ex +++ b/apps/indexer/lib/indexer/fetcher/token_instance/sanitize_erc721.ex @@ -28,10 +28,15 @@ defmodule Indexer.Fetcher.TokenInstance.SanitizeERC721 do @impl true def init(opts) do + {:ok, %{}, {:continue, opts}} + end + + @impl true + def handle_continue(opts, _state) do last_token_address_hash = Constants.get_last_processed_token_address_hash() GenServer.cast(__MODULE__, :fetch_tokens_queue) - {:ok, Map.put(opts, :last_token_address_hash, last_token_address_hash)} + {:noreply, Map.put(opts, :last_token_address_hash, last_token_address_hash)} end @impl true diff --git a/apps/indexer/lib/indexer/fetcher/transaction_action.ex b/apps/indexer/lib/indexer/fetcher/transaction_action.ex index 46dda6639e18..0e1a8a1756ca 100644 --- a/apps/indexer/lib/indexer/fetcher/transaction_action.ex +++ b/apps/indexer/lib/indexer/fetcher/transaction_action.ex @@ -66,6 +66,47 @@ defmodule Indexer.Fetcher.TransactionAction do end end + @impl true + def handle_continue({opts, first_block, last_block}, _state) do + logger_metadata = Logger.metadata() + Logger.metadata(fetcher: :transaction_action) + + max_block_number = Chain.fetch_max_block_number() + + if last_block > max_block_number do + Logger.warning( + "Note, that the last block number (#{last_block}) provided to #{__MODULE__} exceeds max block number available in DB (#{max_block_number})." + ) + end + + supported_protocols = + TransactionAction.supported_protocols() + |> Enum.map(&Atom.to_string(&1)) + + protocols = + opts + |> Keyword.get(:reindex_protocols, "") + |> String.trim() + |> String.split(",") + |> Enum.map(&String.trim(&1)) + |> Enum.filter(&Enum.member?(supported_protocols, &1)) + + next_block = get_next_block(first_block, last_block, protocols) + + state = + %__MODULE__{ + first_block: first_block, + next_block: next_block, + last_block: last_block, + protocols: protocols + } + |> run_fetch() + + Logger.reset_metadata(logger_metadata) + + {:noreply, state} + end + @impl GenServer def handle_info(:fetch, %__MODULE__{} = state) do task = Task.Supervisor.async_nolink(Indexer.Fetcher.TransactionAction.TaskSupervisor, fn -> task(state) end) @@ -195,53 +236,14 @@ defmodule Indexer.Fetcher.TransactionAction do end defp init_fetching(opts, first_block, last_block) do - logger_metadata = Logger.metadata() - Logger.metadata(fetcher: :transaction_action) - first_block = parse_integer(first_block) last_block = parse_integer(last_block) - return = - if is_nil(first_block) or is_nil(last_block) or first_block <= 0 or last_block <= 0 or first_block > last_block do - {:stop, "Correct block range must be provided to #{__MODULE__}."} - else - max_block_number = Chain.fetch_max_block_number() - - if last_block > max_block_number do - Logger.warning( - "Note, that the last block number (#{last_block}) provided to #{__MODULE__} exceeds max block number available in DB (#{max_block_number})." - ) - end - - supported_protocols = - TransactionAction.supported_protocols() - |> Enum.map(&Atom.to_string(&1)) - - protocols = - opts - |> Keyword.get(:reindex_protocols, "") - |> String.trim() - |> String.split(",") - |> Enum.map(&String.trim(&1)) - |> Enum.filter(&Enum.member?(supported_protocols, &1)) - - next_block = get_next_block(first_block, last_block, protocols) - - state = - %__MODULE__{ - first_block: first_block, - next_block: next_block, - last_block: last_block, - protocols: protocols - } - |> run_fetch() - - {:ok, state} - end - - Logger.reset_metadata(logger_metadata) - - return + if is_nil(first_block) or is_nil(last_block) or first_block <= 0 or last_block <= 0 or first_block > last_block do + {:stop, "Correct block range must be provided to #{__MODULE__}."} + else + {:ok, %{}, {:continue, {opts, first_block, last_block}}} + end end defp get_next_block(first_block, last_block, protocols) do diff --git a/apps/indexer/lib/indexer/fetcher/withdrawal.ex b/apps/indexer/lib/indexer/fetcher/withdrawal.ex index fc4924aaf7f8..b8a7707bd93e 100644 --- a/apps/indexer/lib/indexer/fetcher/withdrawal.ex +++ b/apps/indexer/lib/indexer/fetcher/withdrawal.ex @@ -56,7 +56,6 @@ defmodule Indexer.Fetcher.Withdrawal do end state = %__MODULE__{ - blocks_to_fetch: first_block |> Helper.parse_integer() |> missing_block_numbers(), interval: opts[:interval] || @interval, json_rpc_named_arguments: json_rpc_named_arguments, max_batch_size: opts[:max_batch_size] || @batch_size, @@ -65,13 +64,18 @@ defmodule Indexer.Fetcher.Withdrawal do Process.send_after(self(), :fetch_withdrawals, state.interval) - {:ok, state} + {:ok, state, {:continue, first_block}} else Logger.warning("Please, specify the first block of the block range for #{__MODULE__}.") :ignore end end + @impl GenServer + def handle_continue(first_block, state) do + {:noreply, %{state | blocks_to_fetch: first_block |> Helper.parse_integer() |> missing_block_numbers()}} + end + @impl GenServer def handle_info( :fetch_withdrawals, From 28a7d59c42d867df19f40dad3bcd6b1a16afdefd Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Mon, 8 Jul 2024 11:31:00 +0300 Subject: [PATCH 08/32] fix: Fix address controller flickering test (#10382) --- .../api/v2/address_controller_test.exs | 32 +++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs index 6e07605fdf58..951095d6eea8 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs @@ -71,7 +71,6 @@ defmodule BlockScoutWeb.API.V2.AddressControllerTest do "creation_tx_hash" => nil, "token" => nil, "coin_balance" => nil, - "exchange_rate" => nil, # todo: added for backward compatibility, remove when frontend unbound from these props "implementation_address" => nil, "implementation_name" => nil, @@ -89,10 +88,37 @@ defmodule BlockScoutWeb.API.V2.AddressControllerTest do } request = get(conn, "/api/v2/addresses/#{Address.checksum(address.hash)}") - assert ^correct_response = json_response(request, 200) + check_response(correct_response, json_response(request, 200)) request = get(conn, "/api/v2/addresses/#{String.downcase(to_string(address.hash))}") - assert ^correct_response = json_response(request, 200) + check_response(correct_response, json_response(request, 200)) + end + + defp check_response(pattern_response, response) do + assert pattern_response["hash"] == response["hash"] + assert pattern_response["is_contract"] == response["is_contract"] + assert pattern_response["is_verified"] == response["is_verified"] + assert pattern_response["name"] == response["name"] + assert pattern_response["private_tags"] == response["private_tags"] + assert pattern_response["public_tags"] == response["public_tags"] + assert pattern_response["watchlist_names"] == response["watchlist_names"] + assert pattern_response["creator_address_hash"] == response["creator_address_hash"] + assert pattern_response["creation_tx_hash"] == response["creation_tx_hash"] + assert pattern_response["token"] == response["token"] + assert pattern_response["coin_balance"] == response["coin_balance"] + assert pattern_response["implementation_address"] == response["implementation_address"] + assert pattern_response["implementation_name"] == response["implementation_name"] + assert pattern_response["implementations"] == response["implementations"] + assert pattern_response["block_number_balance_updated_at"] == response["block_number_balance_updated_at"] + assert pattern_response["has_decompiled_code"] == response["has_decompiled_code"] + assert pattern_response["has_validated_blocks"] == response["has_validated_blocks"] + assert pattern_response["has_logs"] == response["has_logs"] + assert pattern_response["has_tokens"] == response["has_tokens"] + assert pattern_response["has_token_transfers"] == response["has_token_transfers"] + assert pattern_response["watchlist_address_id"] == response["watchlist_address_id"] + assert pattern_response["has_beacon_chain_withdrawals"] == response["has_beacon_chain_withdrawals"] + assert pattern_response["ens_domain_name"] == response["ens_domain_name"] + assert pattern_response["metadata"] == response["metadata"] end test "get EIP-1167 proxy contract info", %{conn: conn} do From b5cd86277a710272021b8de2b52e36c82076b41f Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Mon, 8 Jul 2024 11:47:00 +0300 Subject: [PATCH 09/32] Fix gas price oracle flickering test (#10381) --- apps/explorer/lib/explorer/chain/cache/gas_price_oracle.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/explorer/lib/explorer/chain/cache/gas_price_oracle.ex b/apps/explorer/lib/explorer/chain/cache/gas_price_oracle.ex index 756f5502fa15..5716fa113bfc 100644 --- a/apps/explorer/lib/explorer/chain/cache/gas_price_oracle.ex +++ b/apps/explorer/lib/explorer/chain/cache/gas_price_oracle.ex @@ -269,7 +269,7 @@ defmodule Explorer.Chain.Cache.GasPriceOracle do defp fiat_fee(fee, exchange_rate) do fee && - exchange_rate.usd_value && + exchange_rate.usd_value && simple_transaction_gas() && fee |> Wei.to(:ether) |> Decimal.mult(exchange_rate.usd_value) From a5a7ebbba20bdaf8bdb1de6f58c617ebb92bb076 Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Mon, 8 Jul 2024 12:07:37 +0300 Subject: [PATCH 10/32] fix exchange rate flickering test (#10383) --- .../controllers/api/v2/stats_controller_test.exs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs index 85ac41e1d14d..907569ccb33a 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs @@ -37,6 +37,17 @@ defmodule BlockScoutWeb.API.V2.StatsControllerTest do end describe "/stats/charts/market" do + setup do + configuration = Application.get_env(:explorer, Explorer.ExchangeRates) + Application.put_env(:explorer, Explorer.ExchangeRates, enabled: false) + + :ok + + on_exit(fn -> + Application.put_env(:explorer, Explorer.ExchangeRates, configuration) + end) + end + test "get empty data", %{conn: conn} do request = get(conn, "/api/v2/stats/charts/market") assert response = json_response(request, 200) From 3c268d21962883a09d7410b1867c2c4c2bdc548e Mon Sep 17 00:00:00 2001 From: Alexander Kolotov Date: Mon, 8 Jul 2024 05:46:50 -0600 Subject: [PATCH 11/32] feat: AnyTrust and Celestia support as DA for Arbitrum batches (#10144) * Initial version of x-level messages indexer * fixes for cspell and credo * new state of x-level messages * Monitoring of new L1-to-L2 messages on L1 * new batches discovery * fetcher workers in separate modules * proper name * Fix for responses without "id", e.g. "Too Many Requests" * update DB with new batches and corresponding data * update DB with confirmed blocks * fixes for cspell and credo * tracking commitments confirmations for L1 to L2 messages * Proper usign of max function * tracking completion of L2 to L1 messages * catchup historical messages to L2 * incorrect version of committed file * catchup historical messages from L2 and completion of L1-to-L2 messages * historical batches catchup * status for historical l2-to-l1 messages * address matching issue * catchup historical executions of L2-to-L1 messages * db query to find unconfirmed blocks gaps * first changes to catchup historical confirmations * finalized catchup of historical confirmations * 4844 blobs support * fix for the issue with multiple confirmations * limit amount of batches to handle at once * Use latest L1 block by fetchers if start block is not configured * merge issue fix * missed file * historical messages discovery * reduce logs severity * first iteration to improve documentation for new functionality * second iteration to improve documentation for new functionality * third iteration to improve documentation for new functionality * fourth iteration to improve documentation for new functionality * fifth iteration to improve documentation for new functionality * final iteration to improve documentation for new functionality * Arbitrum related info in Transaction and Block views * Views to get info about batches and messages * usage of committed for batches instead of confirmed * merge issues addressed * merge issues addressed * code review issues addressed * code review issues addressed * fix merge issue * raising exception in the case of DB inconsistency * fix formatting issue * termination case for RollupMessagesCatchup * code review comments addressed * code review comments addressed * consistency in primary keys * dialyzer fix * code review comments addressed * missed doc comment * code review comments addressed * changes after merge * formatting issue fix * block and transaction views extended * updated indices creation as per code review comments * code review comment addressed * fix merge issue * configuration of intervals as time variables * TODO added to reflect improvement ability * database fields refactoring * association renaming * associations and fields in api response renamed * format issue addressed * feat: APIv2 endpoints for Arbitrum messages and batches (#9963) * Arbitrum related info in Transaction and Block views * Views to get info about batches and messages * usage of committed for batches instead of confirmed * merge issues addressed * changes after merge * formatting issue fix * code review comment addressed * associations and fields in api response renamed * format issue addressed * feat: Arbitrum-specific fields in the block and transaction API endpoints (#10067) * Arbitrum related info in Transaction and Block views * Views to get info about batches and messages * usage of committed for batches instead of confirmed * merge issues addressed * changes after merge * formatting issue fix * block and transaction views extended * code review comment addressed * associations and fields in api response renamed * format issue addressed * fix credo issue * fix tests issues * ethereumjsonrpc test fail investigation * test issues fixes * initial version to get DA infromation from batch transactions * merge issues fix * keep discovered da information in db * show the batch data source in API response * formatting, spelling and credo issues * Documentation and specs improved * covered a case with empty extra data * API endpoints updated * changed order of params for celestia * more robust string hash identification * duplcitated alias removed * missed field in the type documentation * mapset used instead of map * comments for unfolding results of getKeysetCreationBlock call * common function to get data key for Celestia blobs --- .../controllers/api/v2/arbitrum_controller.ex | 40 +- .../lib/block_scout_web/routers/api_router.ex | 2 + .../views/api/v2/arbitrum_view.ex | 141 +++++- .../explorer/chain/arbitrum/batch_block.ex | 3 +- .../chain/arbitrum/da_multi_purpose_record.ex | 105 +++++ .../lib/explorer/chain/arbitrum/l1_batch.ex | 16 +- .../lib/explorer/chain/arbitrum/reader.ex | 117 ++++- .../arbitrum/da_multi_purpose_records.ex | 106 +++++ .../import/runner/arbitrum/l1_batches.ex | 6 +- .../chain/import/stage/block_referencing.ex | 3 +- .../migrations/20240527212653_add_da_info.exs | 25 ++ .../indexer/fetcher/arbitrum/da/anytrust.ex | 414 ++++++++++++++++++ .../indexer/fetcher/arbitrum/da/celestia.ex | 113 +++++ .../lib/indexer/fetcher/arbitrum/da/common.ex | 143 ++++++ .../lib/indexer/fetcher/arbitrum/utils/db.ex | 67 ++- .../indexer/fetcher/arbitrum/utils/helper.ex | 14 + .../lib/indexer/fetcher/arbitrum/utils/rpc.ex | 55 +++ .../fetcher/arbitrum/workers/new_batches.ex | 228 +++++++--- .../lib/indexer/fetcher/zksync/utils/rpc.ex | 24 +- apps/indexer/lib/indexer/helper.ex | 32 ++ cspell.json | 3 + 21 files changed, 1510 insertions(+), 147 deletions(-) create mode 100644 apps/explorer/lib/explorer/chain/arbitrum/da_multi_purpose_record.ex create mode 100644 apps/explorer/lib/explorer/chain/import/runner/arbitrum/da_multi_purpose_records.ex create mode 100644 apps/explorer/priv/arbitrum/migrations/20240527212653_add_da_info.exs create mode 100644 apps/indexer/lib/indexer/fetcher/arbitrum/da/anytrust.ex create mode 100644 apps/indexer/lib/indexer/fetcher/arbitrum/da/celestia.ex create mode 100644 apps/indexer/lib/indexer/fetcher/arbitrum/da/common.ex diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/arbitrum_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/arbitrum_controller.ex index 3230371b2da8..e6faabdc8f1c 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/arbitrum_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/arbitrum_controller.ex @@ -5,15 +5,18 @@ defmodule BlockScoutWeb.API.V2.ArbitrumController do only: [ next_page_params: 4, paging_options: 1, - split_list_by_page: 1 + split_list_by_page: 1, + parse_block_hash_or_number_param: 1 ] + import Explorer.Chain.Arbitrum.DaMultiPurposeRecord.Helper, only: [calculate_celestia_data_key: 2] + alias Explorer.PagingOptions alias Explorer.Chain.Arbitrum.{L1Batch, Message, Reader} action_fallback(BlockScoutWeb.API.V2.FallbackController) - @batch_necessity_by_association %{:commitment_transaction => :optional} + @batch_necessity_by_association %{:commitment_transaction => :required} @doc """ Function to handle GET requests to `/api/v2/arbitrum/messages/:direction` endpoint. @@ -76,6 +79,39 @@ defmodule BlockScoutWeb.API.V2.ArbitrumController do end end + @doc """ + Function to handle GET requests to `/api/v2/arbitrum/batches/da/:data_hash` or + `/api/v2/arbitrum/batches/da/:tx_commitment/:height` endpoints. + """ + @spec batch_by_data_availability_info(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batch_by_data_availability_info(conn, %{"data_hash" => data_hash} = _params) do + # In case of AnyTrust, `data_key` is the hash of the data itself + case Reader.get_da_record_by_data_key(data_hash, api?: true) do + {:ok, {batch_number, _}} -> + batch(conn, %{"batch_number" => batch_number}) + + {:error, :not_found} = res -> + res + end + end + + def batch_by_data_availability_info(conn, %{"tx_commitment" => tx_commitment, "height" => height} = _params) do + # In case of Celestia, `data_key` is the hash of the height and the commitment hash + with {:ok, :hash, tx_commitment_hash} <- parse_block_hash_or_number_param(tx_commitment), + key <- calculate_celestia_data_key(height, tx_commitment_hash) do + case Reader.get_da_record_by_data_key(key, api?: true) do + {:ok, {batch_number, _}} -> + batch(conn, %{"batch_number" => batch_number}) + + {:error, :not_found} = res -> + res + end + else + res -> + res + end + end + @doc """ Function to handle GET requests to `/api/v2/arbitrum/batches/count` endpoint. """ diff --git a/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex b/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex index a2f6bd9d4b83..a3ae9fda52fe 100644 --- a/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex +++ b/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex @@ -332,6 +332,8 @@ defmodule BlockScoutWeb.Routers.ApiRouter do get("/batches", V2.ArbitrumController, :batches) get("/batches/count", V2.ArbitrumController, :batches_count) get("/batches/:batch_number", V2.ArbitrumController, :batch) + get("/batches/da/anytrust/:data_hash", V2.ArbitrumController, :batch_by_data_availability_info) + get("/batches/da/celestia/:height/:tx_commitment", V2.ArbitrumController, :batch_by_data_availability_info) end end diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex index 185a00da1a69..b9414e6fdecf 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex @@ -3,7 +3,7 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do alias BlockScoutWeb.API.V2.Helper, as: APIV2Helper alias Explorer.Chain.{Block, Hash, Transaction, Wei} - alias Explorer.Chain.Arbitrum.{L1Batch, LifecycleTransaction} + alias Explorer.Chain.Arbitrum.{L1Batch, LifecycleTransaction, Reader} @doc """ Function to render GET requests to `/api/v2/arbitrum/messages/:direction` endpoint. @@ -71,6 +71,7 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do "after_acc" => batch.after_acc } |> add_l1_tx_info(batch) + |> add_da_info(batch) end @doc """ @@ -128,13 +129,8 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do :transactions_count => non_neg_integer(), :start_block => non_neg_integer(), :end_block => non_neg_integer(), - :commitment_transaction => %{ - :hash => binary(), - :block_number => non_neg_integer(), - :timestamp => DateTime.t(), - :status => :finalized | :unfinalized, - optional(any()) => any() - }, + :batch_container => atom() | nil, + :commitment_transaction => LifecycleTransaction.to_import(), optional(any()) => any() } ] @@ -162,13 +158,8 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do :transactions_count => non_neg_integer(), :start_block => non_neg_integer(), :end_block => non_neg_integer(), - :commitment_transaction => %{ - :hash => binary(), - :block_number => non_neg_integer(), - :timestamp => DateTime.t(), - :status => :finalized | :unfinalized, - optional(any()) => any() - }, + :batch_container => atom() | nil, + :commitment_transaction => LifecycleTransaction.to_import(), optional(any()) => any() } ) :: map() @@ -176,7 +167,8 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do %{ "number" => batch.number, "transactions_count" => batch.transactions_count, - "blocks_count" => batch.end_block - batch.start_block + 1 + "blocks_count" => batch.end_block - batch.start_block + 1, + "batch_data_container" => batch.batch_container } |> add_l1_tx_info(batch) end @@ -258,6 +250,7 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do commitment_transaction: arbitrum_entity.arbitrum_commitment_transaction, confirmation_transaction: arbitrum_entity.arbitrum_confirmation_transaction }) + |> Map.put("batch_data_container", get_batch_data_container(arbitrum_entity)) |> Map.put("batch_number", get_batch_number(arbitrum_entity)) end @@ -276,6 +269,21 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do end end + # Retrieves the batch data container label from an Arbitrum block or transaction + # if the batch data is loaded. + @spec get_batch_data_container(%{ + :__struct__ => Block | Transaction, + :arbitrum_batch => any(), + optional(any()) => any() + }) :: nil | String.t() + defp get_batch_data_container(arbitrum_entity) do + case Map.get(arbitrum_entity, :arbitrum_batch) do + nil -> nil + %Ecto.Association.NotLoaded{} -> nil + value -> to_string(value.batch_container) + end + end + # Augments an output JSON with commit transaction details and its status. @spec add_l1_tx_info(map(), %{ :commitment_transaction => LifecycleTransaction.t() | LifecycleTransaction.to_import(), @@ -314,6 +322,107 @@ defmodule BlockScoutWeb.API.V2.ArbitrumView do }) end + # Adds data availability (DA) information to the given output JSON based on the batch container type. + # + # This function enriches the output JSON with data availability information based on + # the type of batch container. It handles different DA types, including AnyTrust and + # Celestia, and generates the appropriate DA data for inclusion in the output. + # + # ## Parameters + # - `out_json`: The initial JSON map to be enriched with DA information. + # - `batch`: The batch struct containing information about the rollup batch. + # + # ## Returns + # - An updated JSON map containing the data availability information. + @spec add_da_info(map(), %{ + :__struct__ => L1Batch, + :batch_container => :in_anytrust | :in_celestia | atom() | nil, + :number => non_neg_integer(), + optional(any()) => any() + }) :: map() + defp add_da_info(out_json, %L1Batch{} = batch) do + da_info = + case batch.batch_container do + nil -> %{"batch_data_container" => nil} + :in_anytrust -> generate_anytrust_certificate(batch.number) + :in_celestia -> generate_celestia_da_info(batch.number) + value -> %{"batch_data_container" => to_string(value)} + end + + out_json + |> Map.put("data_availability", da_info) + end + + # Generates an AnyTrust certificate for the specified batch number. + @spec generate_anytrust_certificate(non_neg_integer()) :: map() + defp generate_anytrust_certificate(batch_number) do + out = %{"batch_data_container" => "in_anytrust"} + + da_info = + with raw_info <- Reader.get_da_info_by_batch_number(batch_number), + false <- Enum.empty?(raw_info) do + prepare_anytrust_certificate(raw_info) + else + _ -> %{"data_hash" => nil, "timeout" => nil, "bls_signature" => nil, "signers" => []} + end + + out + |> Map.merge(da_info) + end + + # Prepares an AnyTrust certificate from the given DA information. + # + # This function retrieves the corresponding AnyTrust keyset based on the provided + # DA information, constructs a list of signers and the signers' mask, and assembles + # the certificate data. + # + # ## Parameters + # - `da_info`: A map containing the DA information, including the keyset hash, data + # hash, timeout, aggregated BLS signature, and signers' mask. + # + # ## Returns + # - A map representing the AnyTrust certificate, containing the data hash, data + # availability timeout, aggregated BLS signature, and the list of committee + # members who guaranteed availability of data for the specified timeout. + @spec prepare_anytrust_certificate(map()) :: map() + defp prepare_anytrust_certificate(da_info) do + keyset = Reader.get_anytrust_keyset(da_info["keyset_hash"]) + + signers = + if Enum.empty?(keyset) do + [] + else + signers_mask = da_info["signers_mask"] + + # Matches the signers' mask with the keyset to extract the list of signers. + keyset["pubkeys"] + |> Enum.with_index() + |> Enum.filter(fn {_, index} -> Bitwise.band(signers_mask, Bitwise.bsl(1, index)) != 0 end) + |> Enum.map(fn {pubkey, _} -> pubkey end) + end + + %{ + "data_hash" => da_info["data_hash"], + "timeout" => da_info["timeout"], + "bls_signature" => da_info["bls_signature"], + "signers" => signers + } + end + + # Generates Celestia DA information for the given batch number. + @spec generate_celestia_da_info(non_neg_integer()) :: map() + defp generate_celestia_da_info(batch_number) do + out = %{"batch_data_container" => "in_celestia"} + + da_info = Reader.get_da_info_by_batch_number(batch_number) + + out + |> Map.merge(%{ + "height" => Map.get(da_info, "height"), + "tx_commitment" => Map.get(da_info, "tx_commitment") + }) + end + # Augments an output JSON with commit and confirm transaction details and their statuses. @spec add_l1_txs_info_and_status(map(), %{ :commitment_transaction => any(), diff --git a/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex b/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex index ab1ed9db994c..cd5151de68a0 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex @@ -32,8 +32,9 @@ defmodule Explorer.Chain.Arbitrum.BatchBlock do } @typedoc """ - * `batch_number` - The number of the Arbitrum batch. * `block_number` - The number of the rollup block. + * `batch_number` - The number of the Arbitrum batch. + * `batch` - An instance of `Explorer.Chain.Arbitrum.L1Batch` referenced by `batch_number`. * `confirmation_id` - The ID of the confirmation L1 transaction from `Explorer.Chain.Arbitrum.LifecycleTransaction`, or `nil` if the block is not confirmed yet. diff --git a/apps/explorer/lib/explorer/chain/arbitrum/da_multi_purpose_record.ex b/apps/explorer/lib/explorer/chain/arbitrum/da_multi_purpose_record.ex new file mode 100644 index 000000000000..5cbc89afdcf9 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/arbitrum/da_multi_purpose_record.ex @@ -0,0 +1,105 @@ +defmodule Explorer.Chain.Arbitrum.DaMultiPurposeRecord do + @moduledoc """ + Models a multi purpose record related to Data Availability for Arbitrum. + + Changes in the schema should be reflected in the bulk import module: + - Explorer.Chain.Import.Runner.Arbitrum.DAMultiPurposeRecords + + Migrations: + - Explorer.Repo.Arbitrum.Migrations.AddDaInfo + """ + + use Explorer.Schema + + alias Explorer.Chain.Hash + + alias Explorer.Chain.Arbitrum.L1Batch + + @optional_attrs ~w(batch_number)a + + @required_attrs ~w(data_key data_type data)a + + @allowed_attrs @optional_attrs ++ @required_attrs + + @typedoc """ + Descriptor of the a multi purpose record related to Data Availability for Arbitrum rollups: + * `data_key` - The hash of the data key. + * `data_type` - The type of the data. + * `data` - The data + * `batch_number` - The number of the Arbitrum batch associated with the data for the + records where applicable. + """ + @type to_import :: %{ + data_key: binary(), + data_type: non_neg_integer(), + data: map(), + batch_number: non_neg_integer() | nil + } + + @typedoc """ + * `data_key` - The hash of the data key. + * `data_type` - The type of the data. + * `data` - The data to be stored as a json in the database. + * `batch_number` - The number of the Arbitrum batch associated with the data for the + records where applicable. + * `batch` - An instance of `Explorer.Chain.Arbitrum.L1Batch` referenced by `batch_number`. + """ + @primary_key false + typed_schema "arbitrum_da_multi_purpose" do + field(:data_key, Hash.Full) + field(:data_type, :integer) + field(:data, :map) + + belongs_to(:batch, L1Batch, + foreign_key: :batch_number, + references: :number, + type: :integer + ) + + timestamps() + end + + @doc """ + Validates that the `attrs` are valid. + """ + @spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() + def changeset(%__MODULE__{} = da_records, attrs \\ %{}) do + da_records + |> cast(attrs, @allowed_attrs) + |> validate_required(@required_attrs) + |> foreign_key_constraint(:batch_number) + |> unique_constraint(:data_key) + end +end + +defmodule Explorer.Chain.Arbitrum.DaMultiPurposeRecord.Helper do + @moduledoc """ + Helper functions to work with `Explorer.Chain.Arbitrum.DaMultiPurposeRecord` data + """ + + alias Explorer.Chain.Hash + + @doc """ + Calculates the data key for `Explorer.Chain.Arbitrum.DaMultiPurposeRecord` that contains Celestia blob data. + + ## Parameters + - `height`: The height of the block in the Celestia network. + - `tx_commitment`: The transaction commitment. + + ## Returns + - A binary representing the calculated data key for the record containing + Celestia blob data. + """ + @spec calculate_celestia_data_key(binary() | non_neg_integer(), binary() | Explorer.Chain.Hash.t()) :: binary() + def calculate_celestia_data_key(height, tx_commitment) when is_binary(height) do + calculate_celestia_data_key(String.to_integer(height), tx_commitment) + end + + def calculate_celestia_data_key(height, %Hash{} = tx_commitment) when is_integer(height) do + calculate_celestia_data_key(height, tx_commitment.bytes) + end + + def calculate_celestia_data_key(height, tx_commitment) when is_integer(height) and is_binary(tx_commitment) do + :crypto.hash(:sha256, :binary.encode_unsigned(height) <> tx_commitment) + end +end diff --git a/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex b/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex index b41402acb980..f99ce884143f 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex @@ -7,6 +7,7 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do Migrations: - Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables + - Explorer.Repo.Arbitrum.Migrations.AddDaInfo """ use Explorer.Schema @@ -15,8 +16,12 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do alias Explorer.Chain.Arbitrum.LifecycleTransaction + @optional_attrs ~w(batch_container)a + @required_attrs ~w(number transactions_count start_block end_block before_acc after_acc commitment_id)a + @allowed_attrs @optional_attrs ++ @required_attrs + @typedoc """ Descriptor of the a L1 batch for Arbitrum rollups: * `number` - The number of the Arbitrum batch. @@ -25,7 +30,8 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do * `end_block` - The number of the last block in the batch. * `before_acc` - The hash of the state before the batch. * `after_acc` - The hash of the state after the batch. - * `commitment_id` - The ID of the commitment L1 transaction from Explorer.Chain.LifecycleTransaction. + * `commitment_id` - The ID of the commitment L1 transaction from Explorer.Chain.Arbitrum.LifecycleTransaction. + * `batch_container` - The tag meaning the container of the batch data: `:in_blob4844`, `:in_calldata`, `:in_celestia`, `:in_anytrust` """ @type to_import :: %{ number: non_neg_integer(), @@ -34,7 +40,8 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do end_block: non_neg_integer(), before_acc: binary(), after_acc: binary(), - commitment_id: non_neg_integer() + commitment_id: non_neg_integer(), + batch_container: :in_blob4844 | :in_calldata | :in_celestia | :in_anytrust } @typedoc """ @@ -46,6 +53,7 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do * `after_acc` - The hash of the state after the batch. * `commitment_id` - The ID of the commitment L1 transaction from `Explorer.Chain.Arbitrum.LifecycleTransaction`. * `commitment_transaction` - An instance of `Explorer.Chain.Arbitrum.LifecycleTransaction` referenced by `commitment_id`. + * `batch_container` - The tag meaning the container of the batch data: `:in_blob4844`, `:in_calldata`, `:in_celestia`, `:in_anytrust` """ @primary_key {:number, :integer, autogenerate: false} typed_schema "arbitrum_l1_batches" do @@ -61,6 +69,8 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do type: :integer ) + field(:batch_container, Ecto.Enum, values: [:in_blob4844, :in_calldata, :in_celestia, :in_anytrust]) + timestamps() end @@ -70,7 +80,7 @@ defmodule Explorer.Chain.Arbitrum.L1Batch do @spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() def changeset(%__MODULE__{} = batches, attrs \\ %{}) do batches - |> cast(attrs, @required_attrs) + |> cast(attrs, @allowed_attrs) |> validate_required(@required_attrs) |> foreign_key_constraint(:commitment_id) |> unique_constraint(:number) diff --git a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex index 822072b544e3..da2fe3a18cff 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex @@ -6,7 +6,15 @@ defmodule Explorer.Chain.Arbitrum.Reader do import Ecto.Query, only: [from: 2, limit: 2, order_by: 2, subquery: 1, where: 2, where: 3] import Explorer.Chain, only: [select_repo: 1] - alias Explorer.Chain.Arbitrum.{BatchBlock, BatchTransaction, L1Batch, L1Execution, LifecycleTransaction, Message} + alias Explorer.Chain.Arbitrum.{ + BatchBlock, + BatchTransaction, + DaMultiPurposeRecord, + L1Batch, + L1Execution, + LifecycleTransaction, + Message + } alias Explorer.{Chain, PagingOptions, Repo} @@ -232,7 +240,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do the input list. The output list may be smaller than the input list if some IDs do not correspond to any existing transactions. """ - @spec l1_executions(maybe_improper_list(non_neg_integer(), [])) :: [L1Execution] + @spec l1_executions(maybe_improper_list(non_neg_integer(), [])) :: [L1Execution.t()] def l1_executions(message_ids) when is_list(message_ids) do query = from( @@ -287,7 +295,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do - A list of `Explorer.Chain.Arbitrum.LifecycleTransaction` representing unfinalized transactions, or `[]` if no unfinalized transactions are found. """ - @spec lifecycle_unfinalized_transactions(FullBlock.block_number()) :: [LifecycleTransaction] + @spec lifecycle_unfinalized_transactions(FullBlock.block_number()) :: [LifecycleTransaction.t()] def lifecycle_unfinalized_transactions(finalized_block) when is_integer(finalized_block) and finalized_block >= 0 do query = @@ -361,7 +369,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do - An instance of `Explorer.Chain.Arbitrum.L1Batch` representing the batch containing the specified rollup block number, or `nil` if no corresponding batch is found. """ - @spec get_batch_by_rollup_block_number(FullBlock.block_number()) :: L1Batch | nil + @spec get_batch_by_rollup_block_number(FullBlock.block_number()) :: L1Batch.t() | nil def get_batch_by_rollup_block_number(number) when is_integer(number) and number >= 0 do query = @@ -491,7 +499,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do unconfirmed block within the range. Returns `[]` if no unconfirmed blocks are found within the range, or if the block fetcher has not indexed them. """ - @spec unconfirmed_rollup_blocks(FullBlock.block_number(), FullBlock.block_number()) :: [BatchBlock] + @spec unconfirmed_rollup_blocks(FullBlock.block_number(), FullBlock.block_number()) :: [BatchBlock.t()] def unconfirmed_rollup_blocks(first_block, last_block) when is_integer(first_block) and first_block >= 0 and is_integer(last_block) and first_block <= last_block do @@ -695,7 +703,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do @spec messages(binary(), paging_options: PagingOptions.t(), api?: boolean() - ) :: [Message] + ) :: [Message.t()] def messages(direction, options) when direction == "from-rollup" do do_messages(:from_l2, options) end @@ -720,7 +728,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do @spec do_messages(:from_l2 | :to_l2, paging_options: PagingOptions.t(), api?: boolean() - ) :: [Message] + ) :: [Message.t()] defp do_messages(direction, options) do base_query = from(msg in Message, @@ -756,7 +764,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do @spec relayed_l1_to_l2_messages( paging_options: PagingOptions.t(), api?: boolean() - ) :: [Message] + ) :: [Message.t()] def relayed_l1_to_l2_messages(options) do paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) @@ -802,7 +810,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do """ def batch(number, options) - @spec batch(:latest, api?: boolean()) :: {:error, :not_found} | {:ok, L1Batch} + @spec batch(:latest, api?: boolean()) :: {:error, :not_found} | {:ok, L1Batch.t()} def batch(:latest, options) do L1Batch |> order_by(desc: :number) @@ -817,7 +825,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do @spec batch(binary() | non_neg_integer(), necessity_by_association: %{atom() => :optional | :required}, api?: boolean() - ) :: {:error, :not_found} | {:ok, L1Batch} + ) :: {:error, :not_found} | {:ok, L1Batch.t()} def batch(number, options) do necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) @@ -852,7 +860,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do committed?: boolean(), paging_options: PagingOptions.t(), api?: boolean() - ) :: [L1Batch] + ) :: [L1Batch.t()] def batches(options) do necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) @@ -895,7 +903,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do ## Returns - A list of `Explorer.Chain.Arbitrum.BatchTransaction` entries belonging to the specified batch. """ - @spec batch_transactions(non_neg_integer() | binary(), api?: boolean()) :: [BatchTransaction] + @spec batch_transactions(non_neg_integer() | binary(), api?: boolean()) :: [BatchTransaction.t()] def batch_transactions(batch_number, options) do query = from(tx in BatchTransaction, where: tx.batch_number == ^batch_number) @@ -921,7 +929,7 @@ defmodule Explorer.Chain.Arbitrum.Reader do necessity_by_association: %{atom() => :optional | :required}, api?: boolean(), paging_options: PagingOptions.t() - ) :: [FullBlock] + ) :: [FullBlock.t()] def batch_blocks(batch_number, options) do necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) @@ -949,4 +957,87 @@ defmodule Explorer.Chain.Arbitrum.Reader do defp page_blocks(query, %PagingOptions{key: {block_number}}) do where(query, [block], block.number < ^block_number) end + + @doc """ + Retrieves an AnyTrust keyset from the database using the provided keyset hash. + + ## Parameters + - `keyset_hash`: A binary representing the hash of the keyset to be retrieved. + + ## Returns + - A map containing information about the AnyTrust keyset, otherwise an empty map. + """ + @spec get_anytrust_keyset(binary()) :: map() | nil + def get_anytrust_keyset("0x" <> <<_::binary-size(64)>> = keyset_hash) do + get_anytrust_keyset(keyset_hash |> Chain.string_to_block_hash() |> Kernel.elem(1) |> Map.get(:bytes)) + end + + def get_anytrust_keyset(keyset_hash) do + query = + from( + da_records in DaMultiPurposeRecord, + where: da_records.data_key == ^keyset_hash and da_records.data_type == 1 + ) + + case Repo.one(query) do + nil -> %{} + keyset -> keyset.data + end + end + + @doc """ + Retrieves Data Availability (DA) information from the database using the provided + batch number. + + ## Parameters + - `batch_number`: The batch number to be used for retrieval. + + ## Returns + - A map containing the DA information if found, otherwise an empty map. + """ + @spec get_da_info_by_batch_number(non_neg_integer()) :: map() + def get_da_info_by_batch_number(batch_number) do + query = + from( + da_records in DaMultiPurposeRecord, + where: da_records.batch_number == ^batch_number and da_records.data_type == 0 + ) + + case Repo.one(query) do + nil -> %{} + keyset -> keyset.data + end + end + + @doc """ + Retrieves a Data Availability (DA) record from the database using the provided + data key. + + ## Parameters + - `data_key`: The key of the data to be retrieved. + + ## Returns + - `{:ok, {batch_number, da_info}}`, where + - `batch_number` is the number of the batch associated with the DA record + - `da_info` is a map containing the DA record. + - `{:error, :not_found}` if no record with the specified `data_key` exists. + """ + @spec get_da_record_by_data_key(binary(), api?: boolean()) :: {:ok, {non_neg_integer(), map()}} | {:error, :not_found} + def get_da_record_by_data_key("0x" <> _ = data_key, options) do + data_key_bytes = data_key |> Chain.string_to_block_hash() |> Kernel.elem(1) |> Map.get(:bytes) + get_da_record_by_data_key(data_key_bytes, options) + end + + def get_da_record_by_data_key(data_key, options) do + query = + from( + da_records in DaMultiPurposeRecord, + where: da_records.data_key == ^data_key and da_records.data_type == 0 + ) + + case select_repo(options).one(query) do + nil -> {:error, :not_found} + keyset -> {:ok, {keyset.batch_number, keyset.data}} + end + end end diff --git a/apps/explorer/lib/explorer/chain/import/runner/arbitrum/da_multi_purpose_records.ex b/apps/explorer/lib/explorer/chain/import/runner/arbitrum/da_multi_purpose_records.ex new file mode 100644 index 000000000000..ca8ef10c95ee --- /dev/null +++ b/apps/explorer/lib/explorer/chain/import/runner/arbitrum/da_multi_purpose_records.ex @@ -0,0 +1,106 @@ +defmodule Explorer.Chain.Import.Runner.Arbitrum.DaMultiPurposeRecords do + @moduledoc """ + Bulk imports of Explorer.Chain.Arbitrum.DaMultiPurposeRecord. + """ + + require Ecto.Query + + alias Ecto.{Changeset, Multi, Repo} + alias Explorer.Chain.Arbitrum.DaMultiPurposeRecord + alias Explorer.Chain.Import + alias Explorer.Prometheus.Instrumenter + + import Ecto.Query, only: [from: 2] + + @behaviour Import.Runner + + # milliseconds + @timeout 60_000 + + @type imported :: [DaMultiPurposeRecord.t()] + + @impl Import.Runner + def ecto_schema_module, do: DaMultiPurposeRecord + + @impl Import.Runner + def option_key, do: :arbitrum_da_multi_purpose_records + + @impl Import.Runner + @spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} + def imported_table_row do + %{ + value_type: "[#{ecto_schema_module()}.t()]", + value_description: "List of `t:#{ecto_schema_module()}.t/0`s" + } + end + + @impl Import.Runner + @spec run(Multi.t(), list(), map()) :: Multi.t() + def run(multi, changes_list, %{timestamps: timestamps} = options) do + insert_options = + options + |> Map.get(option_key(), %{}) + |> Map.take(~w(on_conflict timeout)a) + |> Map.put_new(:timeout, @timeout) + |> Map.put(:timestamps, timestamps) + + Multi.run(multi, :insert_da_multi_purpose_records, fn repo, _ -> + Instrumenter.block_import_stage_runner( + fn -> insert(repo, changes_list, insert_options) end, + :block_referencing, + :arbitrum_da_multi_purpose_records, + :arbitrum_da_multi_purpose_records + ) + end) + end + + @impl Import.Runner + def timeout, do: @timeout + + @spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: + {:ok, [DaMultiPurposeRecord.t()]} + | {:error, [Changeset.t()]} + def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do + on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) + + # Enforce Arbitrum.DaMultiPurposeRecord ShareLocks order (see docs: sharelock.md) + ordered_changes_list = Enum.sort_by(changes_list, & &1.data_key) + + {:ok, inserted} = + Import.insert_changes_list( + repo, + ordered_changes_list, + for: DaMultiPurposeRecord, + returning: true, + timeout: timeout, + timestamps: timestamps, + conflict_target: :data_key, + on_conflict: on_conflict + ) + + {:ok, inserted} + end + + defp default_on_conflict do + from( + rec in DaMultiPurposeRecord, + update: [ + set: [ + # don't update `data_key` as it is a primary key and used for the conflict target + data_type: fragment("EXCLUDED.data_type"), + data: fragment("EXCLUDED.data"), + batch_number: fragment("EXCLUDED.batch_number"), + inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", rec.inserted_at), + updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", rec.updated_at) + ] + ], + where: + fragment( + "(EXCLUDED.data_type, EXCLUDED.data, EXCLUDED.batch_number) IS DISTINCT FROM (?, ?, ?)", + rec.data_type, + rec.data, + rec.batch_number + ) + ) + end +end diff --git a/apps/explorer/lib/explorer/chain/import/runner/arbitrum/l1_batches.ex b/apps/explorer/lib/explorer/chain/import/runner/arbitrum/l1_batches.ex index 8003f94522f1..ddca21b5de95 100644 --- a/apps/explorer/lib/explorer/chain/import/runner/arbitrum/l1_batches.ex +++ b/apps/explorer/lib/explorer/chain/import/runner/arbitrum/l1_batches.ex @@ -93,19 +93,21 @@ defmodule Explorer.Chain.Import.Runner.Arbitrum.L1Batches do before_acc: fragment("EXCLUDED.before_acc"), after_acc: fragment("EXCLUDED.after_acc"), commitment_id: fragment("EXCLUDED.commitment_id"), + batch_container: fragment("EXCLUDED.batch_container"), inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at), updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at) ] ], where: fragment( - "(EXCLUDED.transactions_count, EXCLUDED.start_block, EXCLUDED.end_block, EXCLUDED.before_acc, EXCLUDED.after_acc, EXCLUDED.commitment_id) IS DISTINCT FROM (?, ?, ?, ?, ?, ?)", + "(EXCLUDED.transactions_count, EXCLUDED.start_block, EXCLUDED.end_block, EXCLUDED.before_acc, EXCLUDED.after_acc, EXCLUDED.commitment_id, EXCLUDED.batch_container) IS DISTINCT FROM (?, ?, ?, ?, ?, ?, ?)", tb.transactions_count, tb.start_block, tb.end_block, tb.before_acc, tb.after_acc, - tb.commitment_id + tb.commitment_id, + tb.batch_container ) ) end diff --git a/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex b/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex index 7ee67fe8565f..049a540d7082 100644 --- a/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex +++ b/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex @@ -65,7 +65,8 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do Runner.Arbitrum.L1Executions, Runner.Arbitrum.L1Batches, Runner.Arbitrum.BatchBlocks, - Runner.Arbitrum.BatchTransactions + Runner.Arbitrum.BatchTransactions, + Runner.Arbitrum.DaMultiPurposeRecords ] @impl Stage diff --git a/apps/explorer/priv/arbitrum/migrations/20240527212653_add_da_info.exs b/apps/explorer/priv/arbitrum/migrations/20240527212653_add_da_info.exs new file mode 100644 index 000000000000..ee81ae9f74e8 --- /dev/null +++ b/apps/explorer/priv/arbitrum/migrations/20240527212653_add_da_info.exs @@ -0,0 +1,25 @@ +defmodule Explorer.Repo.Arbitrum.Migrations.AddDaInfo do + use Ecto.Migration + + def change do + execute( + "CREATE TYPE arbitrum_da_containers_types AS ENUM ('in_blob4844', 'in_calldata', 'in_celestia', 'in_anytrust')", + "DROP TYPE arbitrum_da_containers_types" + ) + + alter table(:arbitrum_l1_batches) do + add(:batch_container, :arbitrum_da_containers_types) + end + + create table(:arbitrum_da_multi_purpose, primary_key: false) do + add(:data_key, :bytea, null: false, primary_key: true) + add(:data_type, :integer, null: false) + add(:data, :map, null: false) + add(:batch_number, :integer) + timestamps(null: false, type: :utc_datetime_usec) + end + + create(index(:arbitrum_da_multi_purpose, [:data_type, :data_key])) + create(index(:arbitrum_da_multi_purpose, [:data_type, :batch_number])) + end +end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/da/anytrust.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/da/anytrust.ex new file mode 100644 index 000000000000..59c401c54ed4 --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/da/anytrust.ex @@ -0,0 +1,414 @@ +defmodule Indexer.Fetcher.Arbitrum.DA.Anytrust do + @moduledoc """ + Provides functionality for handling AnyTrust data availability information + within the Arbitrum rollup context. + """ + + import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_error: 1, log_info: 1, log_debug: 1] + + import Explorer.Helper, only: [decode_data: 2] + + alias Indexer.Fetcher.Arbitrum.Utils.{Db, Rpc} + alias Indexer.Fetcher.Arbitrum.Utils.Helper, as: ArbitrumHelper + alias Indexer.Helper, as: IndexerHelper + + alias Explorer.Chain.Arbitrum + + @enforce_keys [ + :batch_number, + :keyset_hash, + :data_hash, + :timeout, + :signers_mask, + :bls_signature + ] + defstruct @enforce_keys + + @typedoc """ + AnyTrust DA info struct: + * `batch_number` - The batch number in the Arbitrum rollup associated with the + AnyTrust data blob. + * `keyset_hash` - The hash identifying a keyset that defines the rules (threshold + and committee members) to issue the DA certificate. + * `data_hash` - The hash of the data blob stored by the AnyTrust committee. + * `timeout` - Expiration timeout for the data blob. + * `signers_mask` - Mask identifying committee members who guaranteed data availability. + * `bls_signature` - Aggregated BLS signature of the committee members. + """ + @type t :: %__MODULE__{ + batch_number: non_neg_integer(), + keyset_hash: binary(), + data_hash: binary(), + timeout: DateTime.t(), + signers_mask: non_neg_integer(), + bls_signature: binary() + } + + @typedoc """ + AnyTrust DA certificate struct: + * `keyset_hash` - The hash identifying a keyset that defines the rules (threshold + and committee members) to issue the DA certificate. + * `data_hash` - The hash of the data blob stored by the AnyTrust committee. + * `timeout` - Expiration timeout for the data blob. + * `signers_mask` - Mask identifying committee members who guaranteed data availability. + * `bls_signature` - Aggregated BLS signature of the committee members. + """ + @type certificate :: %{ + :keyset_hash => String.t(), + :data_hash => String.t(), + :timeout => DateTime.t(), + :signers_mask => non_neg_integer(), + :bls_signature => String.t() + } + + @typedoc """ + AnyTrust committee member public key struct: + * `trusted` - A boolean indicating whether the member is trusted. + * `key` - The public key of the member. + * `proof` - The proof of the member's public key. + """ + @type signer :: %{ + :trusted => boolean(), + :key => String.t(), + optional(:proof) => String.t() + } + + @typedoc """ + AnyTrust committee struct: + * `threshold` - The threshold of honest members for the keyset. + * `pubkeys` - A list of public keys of the committee members. + """ + @type keyset :: %{ + :threshold => non_neg_integer(), + :pubkeys => [signer()] + } + + # keccak256("SetValidKeyset(bytes32,bytes)") + @set_valid_keyset_event "0xabca9b7986bc22ad0160eb0cb88ae75411eacfba4052af0b457a9335ef655722" + @set_valid_keyset_event_unindexed_params [:bytes] + + @doc """ + Parses batch accompanying data to extract AnyTrust data availability information. + + This function decodes the provided binary data to extract information related to + AnyTrust data availability. + + ## Parameters + - `batch_number`: The batch number associated with the AnyTrust data. + - `binary_data`: The binary data to be parsed, containing AnyTrust data fields. + + ## Returns + - `{:ok, :in_anytrust, da_info}` if the parsing is successful, where `da_info` is + the AnyTrust data availability information struct. + - `{:error, nil, nil}` if the parsing fails. + """ + @spec parse_batch_accompanying_data(non_neg_integer(), binary()) :: + {:ok, :in_anytrust, __MODULE__.t()} | {:error, nil, nil} + def parse_batch_accompanying_data(batch_number, << + keyset_hash::binary-size(32), + data_hash::binary-size(32), + timeout::big-unsigned-integer-size(64), + _version::size(8), + signers_mask::big-unsigned-integer-size(64), + bls_signature::binary-size(96) + >>) do + # https://github.com/OffchainLabs/nitro/blob/ad9ab00723e13cf98307b9b65774ad455594ef7b/arbstate/das_reader.go#L95-L151 + {:ok, :in_anytrust, + %__MODULE__{ + batch_number: batch_number, + keyset_hash: keyset_hash, + data_hash: data_hash, + timeout: IndexerHelper.timestamp_to_datetime(timeout), + signers_mask: signers_mask, + bls_signature: bls_signature + }} + end + + def parse_batch_accompanying_data(_, _) do + log_error("Can not parse Anytrust DA message.") + {:error, nil, nil} + end + + @doc """ + Prepares AnyTrust data availability information for import. + + This function prepares a list of data structures for import into the database, + ensuring that AnyTrust DA information and related keysets are included. It + verifies if the keyset associated with the AnyTrust DA certificate is already + known or needs to be fetched from L1. + + To avoid fetching the same keyset multiple times, the function uses a cache. + + ## Parameters + - `source`: The initial list of data to be imported. + - `da_info`: The AnyTrust DA info struct containing details about the data blob. + - `l1_connection_config`: A map containing the address of the Sequencer Inbox contract + and configuration parameters for the JSON RPC connection. + - `cache`: A set of unique elements used to cache the checked keysets. + + ## Returns + - A tuple containing: + - An updated list of data structures ready for import, including the DA + certificate (`data_type` is `0`) and potentially a new keyset (`data_type` + is `1`) if required. + - The updated cache with the checked keysets. + """ + @spec prepare_for_import( + list(), + __MODULE__.t(), + %{ + :sequencer_inbox_address => String.t(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments() + }, + MapSet.t() + ) :: + {[Arbitrum.DaMultiPurposeRecord.to_import()], MapSet.t()} + def prepare_for_import(source, %__MODULE__{} = da_info, l1_connection_config, cache) do + data = %{ + keyset_hash: ArbitrumHelper.bytes_to_hex_str(da_info.keyset_hash), + data_hash: ArbitrumHelper.bytes_to_hex_str(da_info.data_hash), + timeout: da_info.timeout, + signers_mask: da_info.signers_mask, + bls_signature: ArbitrumHelper.bytes_to_hex_str(da_info.bls_signature) + } + + res = [ + %{ + data_type: 0, + data_key: da_info.data_hash, + data: data, + batch_number: da_info.batch_number + } + ] + + {check_result, keyset_map, updated_cache} = check_if_new_keyset(da_info.keyset_hash, l1_connection_config, cache) + + updated_res = + case check_result do + :new_keyset -> + [ + %{ + data_type: 1, + data_key: da_info.keyset_hash, + data: keyset_map, + batch_number: nil + } + | res + ] + + _ -> + res + end + + {updated_res ++ source, updated_cache} + end + + # Verifies the existence of an AnyTrust committee keyset in the database and fetches it from L1 if not found. + # + # To avoid fetching the same keyset multiple times, the function uses a cache. + # + # ## Parameters + # - `keyset_hash`: A binary representing the hash of the keyset. + # - `l1_connection_config`: A map containing the address of the Sequencer Inbox + # contract and configuration parameters for the JSON RPC + # connection. + # - `cache`: A set of unique elements used to cache the checked keysets. + # + # ## Returns + # - `{:new_keyset, keyset_info, updated_cache}` if the keyset is not found and fetched from L1. + # - `{:existing_keyset, nil, cache}` if the keyset is found in the cache or database. + @spec check_if_new_keyset( + binary(), + %{ + :sequencer_inbox_address => binary(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments() + }, + MapSet.t() + ) :: + {:new_keyset, __MODULE__.keyset(), MapSet.t()} + | {:existing_keyset, nil, MapSet.t()} + defp check_if_new_keyset(keyset_hash, l1_connection_config, cache) do + if MapSet.member?(cache, keyset_hash) do + {:existing_keyset, nil, cache} + else + updated_cache = MapSet.put(cache, keyset_hash) + + case Db.anytrust_keyset_exists?(keyset_hash) do + true -> + {:existing_keyset, nil, updated_cache} + + false -> + {:new_keyset, get_keyset_info_from_l1(keyset_hash, l1_connection_config), updated_cache} + end + end + end + + # Retrieves and decodes AnyTrust committee keyset information from L1 using the provided keyset hash. + # + # This function fetches the block number when the keyset was applied, retrieves + # the raw keyset data from L1, and decodes it to extract the threshold and public + # keys information. + # + # ## Parameters + # - `keyset_hash`: The hash of the keyset to be retrieved. + # - A map containing: + # - `:sequencer_inbox_address`: The address of the Sequencer Inbox contract. + # - `:json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + # + # ## Returns + # - A map describing an AnyTrust committee. + @spec get_keyset_info_from_l1( + binary(), + %{ + :sequencer_inbox_address => binary(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments() + } + ) :: __MODULE__.keyset() + defp get_keyset_info_from_l1(keyset_hash, %{ + sequencer_inbox_address: sequencer_inbox_address, + json_rpc_named_arguments: json_rpc_named_arguments + }) do + keyset_applied_block_number = + Rpc.get_block_number_for_keyset(sequencer_inbox_address, keyset_hash, json_rpc_named_arguments) + + log_debug("Keyset applied block number: #{keyset_applied_block_number}") + + raw_keyset_data = + get_keyset_raw_data(keyset_hash, keyset_applied_block_number, sequencer_inbox_address, json_rpc_named_arguments) + + decode_keyset(raw_keyset_data) + end + + # Retrieves the raw data of a keyset by querying logs for the `SetValidKeyset` event. + # + # This function fetches logs for the `SetValidKeyset` event within a specific block + # emitted by the Sequencer Inbox contract and extracts the keyset data if available. + # + # ## Parameters + # - `keyset_hash`: The hash of the keyset to retrieve. + # - `block_number`: The block number to search for the logs. + # - `sequencer_inbox_address`: The address of the Sequencer Inbox contract. + # - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + # + # ## Returns + # - The raw data of the keyset if found, otherwise `nil`. + @spec get_keyset_raw_data( + binary(), + non_neg_integer(), + binary(), + EthereumJSONRPC.json_rpc_named_arguments() + ) :: binary() | nil + defp get_keyset_raw_data(keyset_hash, block_number, sequencer_inbox_address, json_rpc_named_arguments) do + {:ok, logs} = + IndexerHelper.get_logs( + block_number, + block_number, + sequencer_inbox_address, + [@set_valid_keyset_event, ArbitrumHelper.bytes_to_hex_str(keyset_hash)], + json_rpc_named_arguments + ) + + if length(logs) > 0 do + log_info("Found #{length(logs)} SetValidKeyset logs") + + set_valid_keyset_event_parse(List.first(logs)) + else + log_error("No SetValidKeyset logs found in the block #{block_number}") + nil + end + end + + defp set_valid_keyset_event_parse(event) do + [keyset_data] = decode_data(event["data"], @set_valid_keyset_event_unindexed_params) + + keyset_data + end + + # Decodes an AnyTrust committee keyset from a binary input. + # + # This function extracts the threshold of committee members configured for the + # keyset and the number of member public keys from the binary input, then decodes + # the specified number of public keys. + # + # Implemented as per: https://github.com/OffchainLabs/nitro/blob/ad9ab00723e13cf98307b9b65774ad455594ef7b/arbstate/das_reader.go#L217-L248 + # + # ## Parameters + # - A binary input containing the threshold value, the number of public keys, + # and the public keys themselves. + # + # ## Returns + # - A map describing an AnyTrust committee. + @spec decode_keyset(binary()) :: __MODULE__.keyset() + defp decode_keyset(<< + threshold::big-unsigned-integer-size(64), + num_keys::big-unsigned-integer-size(64), + rest::binary + >>) + when num_keys <= 64 do + {pubkeys, _} = decode_pubkeys(rest, num_keys, []) + + %{ + threshold: threshold, + pubkeys: pubkeys + } + end + + # Decodes a list of AnyTrust committee member public keys from a binary input. + # + # This function recursively processes a binary input to extract a specified number + # of public keys. + # + # ## Parameters + # - `data`: The binary input containing the public keys. + # - `num_keys`: The number of public keys to decode. + # - `acc`: An accumulator list to collect the decoded public keys. + # + # ## Returns + # - A tuple containing: + # - `{:error, "Insufficient data to decode public keys"}` if the input is insufficient + # to decode the specified number of keys. + # - A list of decoded AnyTrust committee member public keys and a binary entity + # of zero length, if successful. + @spec decode_pubkeys(binary(), non_neg_integer(), [ + signer() + ]) :: {:error, String.t()} | {[signer()], binary()} + defp decode_pubkeys(<<>>, 0, acc), do: {Enum.reverse(acc), <<>>} + defp decode_pubkeys(<<>>, _num_keys, _acc), do: {:error, "Insufficient data to decode public keys"} + + defp decode_pubkeys(data, num_keys, acc) when num_keys > 0 do + <> = data + pubkey_len = high_byte * 256 + low_byte + + <> = rest + pubkey = parse_pubkey(pubkey_data) + decode_pubkeys(remaining, num_keys - 1, [pubkey | acc]) + end + + # Parses a public key of an AnyTrust AnyTrust committee member from a binary input. + # + # This function extracts either the public key (for trusted sources) or the proof + # bytes and key bytes (for untrusted sources). + # + # Implemented as per: https://github.com/OffchainLabs/nitro/blob/35bd2aa59611702e6403051af581fddda7c17f74/blsSignatures/blsSignatures.go#L206C6-L242 + # + # ## Parameters + # - A binary input containing the proof length and the rest of the data. + # + # ## Returns + # - A map describing an AnyTrust committee member public key. + @spec parse_pubkey(binary()) :: signer() + defp parse_pubkey(<>) do + if proof_len == 0 do + # Trusted source, no proof bytes, the rest is the key + %{trusted: true, key: ArbitrumHelper.bytes_to_hex_str(rest)} + else + <> = rest + + %{ + trusted: false, + proof: ArbitrumHelper.bytes_to_hex_str(proof_bytes), + key: ArbitrumHelper.bytes_to_hex_str(key_bytes) + } + end + end +end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/da/celestia.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/da/celestia.ex new file mode 100644 index 000000000000..57c6c523ff92 --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/da/celestia.ex @@ -0,0 +1,113 @@ +defmodule Indexer.Fetcher.Arbitrum.DA.Celestia do + @moduledoc """ + Provides functionality for parsing and preparing Celestia data availability + information associated with Arbitrum rollup batches. + """ + + import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_error: 1] + import Explorer.Chain.Arbitrum.DaMultiPurposeRecord.Helper, only: [calculate_celestia_data_key: 2] + + alias Indexer.Fetcher.Arbitrum.Utils.Helper, as: ArbitrumHelper + + alias Explorer.Chain.Arbitrum + + @enforce_keys [:batch_number, :height, :tx_commitment, :raw] + defstruct @enforce_keys + + @typedoc """ + Celestia Blob Pointer struct: + * `batch_number` - The batch number in Arbitrum rollup associated with the + Celestia data. + * `height` - The height of the block in Celestia. + * `tx_commitment` - Data commitment in Celestia. + * `raw` - Unparsed blob pointer data containing data root, proof, etc. + """ + @type t :: %__MODULE__{ + batch_number: non_neg_integer(), + height: non_neg_integer(), + tx_commitment: binary(), + raw: binary() + } + + @typedoc """ + Celestia Blob Descriptor struct: + * `height` - The height of the block in Celestia. + * `tx_commitment` - Data commitment in Celestia. + * `raw` - Unparsed blob pointer data containing data root, proof, etc. + """ + @type blob_descriptor :: %{ + :height => non_neg_integer(), + :tx_commitment => String.t(), + :raw => String.t() + } + + @doc """ + Parses the batch accompanying data for Celestia. + + This function extracts Celestia blob descriptor information, representing + information required to address a data blob and prove data availability, + from a binary input associated with a given batch number. + + ## Parameters + - `batch_number`: The batch number in the Arbitrum rollup associated with the Celestia data. + - `binary`: A binary input containing the Celestia blob descriptor data. + + ## Returns + - `{:ok, :in_celestia, da_info}` if the data is successfully parsed. + - `{:error, nil, nil}` if the data cannot be parsed. + """ + @spec parse_batch_accompanying_data(non_neg_integer(), binary()) :: + {:ok, :in_celestia, __MODULE__.t()} | {:error, nil, nil} + def parse_batch_accompanying_data( + batch_number, + << + height::big-unsigned-integer-size(64), + _start_index::binary-size(8), + _shares_length::binary-size(8), + _key::big-unsigned-integer-size(64), + _num_leaves::big-unsigned-integer-size(64), + _tuple_root_nonce::big-unsigned-integer-size(64), + tx_commitment::binary-size(32), + _data_root::binary-size(32), + _side_nodes_length::big-unsigned-integer-size(64), + _rest::binary + >> = raw + ) do + # https://github.com/celestiaorg/nitro-contracts/blob/celestia/blobstream/src/bridge/SequencerInbox.sol#L334-L360 + {:ok, :in_celestia, %__MODULE__{batch_number: batch_number, height: height, tx_commitment: tx_commitment, raw: raw}} + end + + def parse_batch_accompanying_data(_, _) do + log_error("Can not parse Celestia DA message.") + {:error, nil, nil} + end + + @doc """ + Prepares Celestia Blob data for import. + + ## Parameters + - `source`: The initial list of data to be imported. + - `da_info`: The Celestia blob descriptor struct containing details about the data blob. + + ## Returns + - An updated list of data structures ready for import, including the Celestia blob descriptor. + """ + @spec prepare_for_import(list(), __MODULE__.t()) :: [Arbitrum.DaMultiPurposeRecord.to_import()] + def prepare_for_import(source, %__MODULE__{} = da_info) do + data = %{ + height: da_info.height, + tx_commitment: ArbitrumHelper.bytes_to_hex_str(da_info.tx_commitment), + raw: ArbitrumHelper.bytes_to_hex_str(da_info.raw) + } + + [ + %{ + data_type: 0, + data_key: calculate_celestia_data_key(da_info.height, da_info.tx_commitment), + data: data, + batch_number: da_info.batch_number + } + | source + ] + end +end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/da/common.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/da/common.ex new file mode 100644 index 000000000000..493ea49c0900 --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/da/common.ex @@ -0,0 +1,143 @@ +defmodule Indexer.Fetcher.Arbitrum.DA.Common do + @moduledoc """ + This module provides common functionalities for handling data availability (DA) + information in the Arbitrum rollup. + """ + + import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_error: 1] + + alias Indexer.Fetcher.Arbitrum.DA.{Anytrust, Celestia} + + alias Explorer.Chain.Arbitrum + + @doc """ + Examines the batch accompanying data to determine its type and parse it accordingly. + + This function examines the batch accompanying data to identify its type and then + parses it based on the identified type if necessary. + + ## Parameters + - `batch_number`: The batch number in the Arbitrum rollup. + - `batch_accompanying_data`: The binary data accompanying the batch. + + ## Returns + - `{status, da_type, da_info}` where `da_type` is one of `:in_blob4844`, + `:in_calldata`, `:in_celestia`, `:in_anytrust`, or `nil` if the accompanying + data cannot be parsed or is of an unsupported type. `da_info` contains the DA + info descriptor for Celestia or Anytrust. + """ + @spec examine_batch_accompanying_data(non_neg_integer(), binary()) :: + {:ok, :in_blob4844, nil} + | {:ok, :in_calldata, nil} + | {:ok, :in_celestia, Celestia.t()} + | {:ok, :in_anytrust, Anytrust.t()} + | {:error, nil, nil} + def examine_batch_accompanying_data(batch_number, batch_accompanying_data) do + case batch_accompanying_data do + nil -> {:ok, :in_blob4844, nil} + _ -> parse_data_availability_info(batch_number, batch_accompanying_data) + end + end + + @doc """ + Prepares data availability (DA) information for import. + + This function processes a list of DA information, either from Celestia or Anytrust, + preparing it for database import. + + ## Parameters + - `da_info`: A list of DA information structs. + - `l1_connection_config`: A map containing the address of the Sequencer Inbox contract + and configuration parameters for the JSON RPC connection. + + ## Returns + - A list of data structures ready for import, each containing: + - `:data_key`: A binary key identifying the data. + - `:data_type`: An integer indicating the type of data, which can be `0` + for data blob descriptors and `1` for Anytrust keyset descriptors. + - `:data`: A map containing the DA information. + - `:batch_number`: The batch number associated with the data, or `nil`. + """ + @spec prepare_for_import([Celestia.t() | Anytrust.t() | map()], %{ + :sequencer_inbox_address => String.t(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments() + }) :: [Arbitrum.DaMultiPurposeRecord.to_import()] + def prepare_for_import([], _), do: [] + + def prepare_for_import(da_info, l1_connection_config) do + da_info + |> Enum.reduce({[], MapSet.new()}, fn info, {acc, cache} -> + case info do + %Celestia{} -> + {Celestia.prepare_for_import(acc, info), cache} + + %Anytrust{} -> + Anytrust.prepare_for_import(acc, info, l1_connection_config, cache) + + _ -> + {acc, cache} + end + end) + |> Kernel.elem(0) + end + + @doc """ + Determines if data availability information requires import. + + This function checks the type of data availability (DA) and returns whether + the data should be imported based on its type. + + ## Parameters + - `da_type`: The type of data availability, which can be `:in_blob4844`, `:in_calldata`, + `:in_celestia`, `:in_anytrust`, or `nil`. + + ## Returns + - `true` if the DA type is `:in_celestia` or `:in_anytrust`, indicating that the data + requires import. + - `false` for all other DA types, indicating that the data does not require import. + """ + @spec required_import?(:in_blob4844 | :in_calldata | :in_celestia | :in_anytrust | nil) :: boolean() + def required_import?(da_type) do + da_type in [:in_celestia, :in_anytrust] + end + + # Parses data availability information based on the header flag. + @spec parse_data_availability_info(non_neg_integer(), binary()) :: + {:ok, :in_calldata, nil} + | {:ok, :in_celestia, Celestia.t()} + | {:ok, :in_anytrust, Anytrust.t()} + | {:error, nil, nil} + defp parse_data_availability_info(batch_number, << + header_flag::size(8), + rest::binary + >>) do + # https://github.com/OffchainLabs/nitro-contracts/blob/90037b996509312ef1addb3f9352457b8a99d6a6/src/bridge/SequencerInbox.sol#L69-L81 + case header_flag do + 0 -> + {:ok, :in_calldata, nil} + + 12 -> + Celestia.parse_batch_accompanying_data(batch_number, rest) + + 32 -> + log_error("ZERO HEAVY messages are not supported.") + {:error, nil, nil} + + 128 -> + log_error("DAS messages are not supported.") + {:error, nil, nil} + + 136 -> + Anytrust.parse_batch_accompanying_data(batch_number, rest) + + _ -> + log_error("Unknown header flag found during an attempt to parse DA data: #{header_flag}") + {:error, nil, nil} + end + end + + defp parse_data_availability_info(_, _) do + log_error("Failed to parse data availability information.") + {:error, nil, nil} + end +end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex index 5ca90219df43..1dc5ce20884a 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex @@ -371,7 +371,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do - A list of `Explorer.Chain.Block` instances containing detailed information for each block number in the input list. Returns an empty list if no blocks are found for the given numbers. """ - @spec rollup_blocks(maybe_improper_list(FullBlock.block_number(), [])) :: [FullBlock] + @spec rollup_blocks(maybe_improper_list(FullBlock.block_number(), [])) :: [FullBlock.t()] def rollup_blocks(list_of_block_numbers) when is_list(list_of_block_numbers) do query = @@ -402,15 +402,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do - A list of maps representing unfinalized L1 transactions and compatible with the database import operation. """ - @spec lifecycle_unfinalized_transactions(FullBlock.block_number()) :: [ - %{ - id: non_neg_integer(), - hash: Hash, - block_number: FullBlock.block_number(), - timestamp: DateTime, - status: :unfinalized - } - ] + @spec lifecycle_unfinalized_transactions(FullBlock.block_number()) :: [Arbitrum.LifecycleTransaction.to_import()] def lifecycle_unfinalized_transactions(finalized_block) when is_integer(finalized_block) and finalized_block >= 0 do finalized_block @@ -443,7 +435,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do - The `Explorer.Chain.Arbitrum.L1Batch` associated with the given rollup block number if it exists and its commit transaction is loaded. """ - @spec get_batch_by_rollup_block_number(FullBlock.block_number()) :: Explorer.Chain.Arbitrum.L1Batch | nil + @spec get_batch_by_rollup_block_number(FullBlock.block_number()) :: Arbitrum.L1Batch.t() | nil def get_batch_by_rollup_block_number(num) when is_integer(num) and num >= 0 do case Reader.get_batch_by_rollup_block_number(num) do @@ -476,11 +468,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do If no unconfirmed blocks are found within the range, an empty list is returned. """ @spec unconfirmed_rollup_blocks(FullBlock.block_number(), FullBlock.block_number()) :: [ - %{ - batch_number: non_neg_integer(), - block_number: FullBlock.block_number(), - confirmation_id: non_neg_integer() | nil - } + Arbitrum.BatchBlock.to_import() ] def unconfirmed_rollup_blocks(first_block, last_block) when is_integer(first_block) and first_block >= 0 and @@ -519,17 +507,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do database import operation. If no initiated messages are found up to the specified block number, an empty list is returned. """ - @spec initiated_l2_to_l1_messages(FullBlock.block_number()) :: [ - %{ - direction: :from_l2, - message_id: non_neg_integer(), - originator_address: binary(), - originating_transaction_hash: binary(), - originating_transaction_block_number: FullBlock.block_number(), - completion_transaction_hash: nil, - status: :initiated - } - ] + @spec initiated_l2_to_l1_messages(FullBlock.block_number()) :: [Arbitrum.Message.to_import()] def initiated_l2_to_l1_messages(block_number) when is_integer(block_number) and block_number >= 0 do # credo:disable-for-lines:2 Credo.Check.Refactor.PipeChainStart @@ -552,17 +530,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do database import operation. If no messages with the 'sent' status are found by the specified block number, an empty list is returned. """ - @spec sent_l2_to_l1_messages(FullBlock.block_number()) :: [ - %{ - direction: :from_l2, - message_id: non_neg_integer(), - originator_address: binary(), - originating_transaction_hash: binary(), - originating_transaction_block_number: FullBlock.block_number(), - completion_transaction_hash: nil, - status: :sent - } - ] + @spec sent_l2_to_l1_messages(FullBlock.block_number()) :: [Arbitrum.Message.to_import()] def sent_l2_to_l1_messages(block_number) when is_integer(block_number) and block_number >= 0 do # credo:disable-for-lines:2 Credo.Check.Refactor.PipeChainStart @@ -619,7 +587,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do the input list. The output list may be smaller than the input list if some IDs do not correspond to any existing transactions. """ - @spec l1_executions([non_neg_integer()]) :: [Explorer.Chain.Arbitrum.L1Execution] + @spec l1_executions([non_neg_integer()]) :: [Arbitrum.L1Execution.t()] def l1_executions(message_ids) when is_list(message_ids) do Reader.l1_executions(message_ids) end @@ -745,12 +713,32 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do Chain.timestamp_to_block_number(timestamp, :after, false) end + @doc """ + Checks if an AnyTrust keyset exists in the database using the provided keyset hash. + + ## Parameters + - `keyset_hash`: The hash of the keyset to be checked. + + ## Returns + - `true` if the keyset exists, `false` otherwise. + """ + @spec anytrust_keyset_exists?(binary()) :: boolean() + def anytrust_keyset_exists?(keyset_hash) do + not Enum.empty?(Reader.get_anytrust_keyset(keyset_hash)) + end + + @spec get_da_info_by_batch_number(non_neg_integer()) :: map() | nil + def get_da_info_by_batch_number(batch_number) do + Reader.get_da_info_by_batch_number(batch_number) + end + @spec lifecycle_transaction_to_map(Arbitrum.LifecycleTransaction.t()) :: Arbitrum.LifecycleTransaction.to_import() defp lifecycle_transaction_to_map(tx) do [:id, :hash, :block_number, :timestamp, :status] |> db_record_to_map(tx) end + @spec rollup_block_to_map(Arbitrum.BatchBlock.t()) :: Arbitrum.BatchBlock.to_import() defp rollup_block_to_map(block) do [:batch_number, :block_number, :confirmation_id] |> db_record_to_map(block) @@ -763,6 +751,7 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do :message_id, :originator_address, :originating_transaction_hash, + :origination_timestamp, :originating_transaction_block_number, :completion_transaction_hash, :status diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/helper.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/helper.ex index 1579b89009d4..cd114749175c 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/helper.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/helper.ex @@ -83,4 +83,18 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Helper do ) end) end + + @doc """ + Converts a binary data to a hexadecimal string. + + ## Parameters + - `data`: The binary data to convert to a hexadecimal string. + + ## Returns + - A hexadecimal string representation of the input data. + """ + @spec bytes_to_hex_str(binary()) :: String.t() + def bytes_to_hex_str(data) do + "0x" <> Base.encode16(data, case: :lower) + end end diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex index 2600fbc62222..a5875924d363 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex @@ -57,6 +57,18 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Rpc do } ] + # getKeysetCreationBlock(bytes32 ksHash) + @selector_get_keyset_creation_block "258f0495" + @selector_sequencer_inbox_contract_abi [ + %{ + "inputs" => [%{"internalType" => "bytes32", "name" => "ksHash", "type" => "bytes32"}], + "name" => "getKeysetCreationBlock", + "outputs" => [%{"internalType" => "uint256", "name" => "", "type" => "uint256"}], + "stateMutability" => "view", + "type" => "function" + } + ] + @doc """ Constructs a JSON RPC request to retrieve a transaction by its hash. @@ -114,6 +126,49 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Rpc do ) end + @doc """ + Retrieves the block number associated with a specific keyset from the Sequencer Inbox contract. + + This function performs an `eth_call` to the Sequencer Inbox contract to get the block number + when a keyset was created. + + ## Parameters + - `sequencer_inbox_address`: The address of the Sequencer Inbox contract. + - `keyset_hash`: The hash of the keyset for which the block number is to be retrieved. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - The block number. + """ + @spec get_block_number_for_keyset( + EthereumJSONRPC.address(), + EthereumJSONRPC.hash(), + EthereumJSONRPC.json_rpc_named_arguments() + ) :: non_neg_integer() + def get_block_number_for_keyset(sequencer_inbox_address, keyset_hash, json_rpc_named_arguments) do + [ + %{ + contract_address: sequencer_inbox_address, + method_id: @selector_get_keyset_creation_block, + args: [keyset_hash] + } + ] + |> IndexerHelper.read_contracts_with_retries( + @selector_sequencer_inbox_contract_abi, + json_rpc_named_arguments, + @rpc_resend_attempts + ) + # Extracts the list of responses from the tuple returned by read_contracts_with_retries. + |> Kernel.elem(0) + # Retrieves the first response from the list of responses. The responses are in a list + # because read_contracts_with_retries accepts a list of method calls. + |> List.first() + # Extracts the result from the {status, result} tuple which is composed in EthereumJSONRPC.Encoder.decode_result. + |> Kernel.elem(1) + # Extracts the first decoded value from the result, which is a list, even if it contains only one value. + |> List.first() + end + # Calls getter functions on a rollup contract and collects their return values. # # This function is designed to interact with a rollup contract and invoke specified getter methods. diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex index cd44cb1d637b..40368c85b7ef 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex @@ -7,12 +7,13 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do being created and historical batches processed in the past but not yet imported into the database. - The process involves fetching logs for the `SequencerBatchDelivered` event - emitted by the Arbitrum `SequencerInbox` contract, processing these logs to - extract batch details, and then building the link between batches and the - corresponding rollup blocks and transactions. It also discovers those - cross-chain messages initiated in rollup blocks linked with the new batches - and updates the status of messages to consider them as committed (`:sent`). + Fetch logs for the `SequencerBatchDelivered` event emitted by the Arbitrum + `SequencerInbox` contract. Process the logs to extract batch details. Build the + link between batches and the corresponding rollup blocks and transactions. If + the batch data is located in Data Availability solutions like AnyTrust or + Celestia, fetch DA information to locate the batch data. Discover cross-chain + messages initiated in rollup blocks linked with the new batches and update the + status of messages to consider them as committed (`:sent`). For any blocks or transactions missing in the database, data is requested in chunks from the rollup RPC endpoint by `eth_getBlockByNumber`. Additionally, @@ -29,8 +30,10 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do alias EthereumJSONRPC.Block.ByNumber, as: BlockByNumber - alias Indexer.Helper, as: IndexerHelper + alias Indexer.Fetcher.Arbitrum.DA.Common, as: DataAvailabilityInfo + alias Indexer.Fetcher.Arbitrum.DA.{Anytrust, Celestia} alias Indexer.Fetcher.Arbitrum.Utils.{Db, Logging, Rpc} + alias Indexer.Helper, as: IndexerHelper alias Explorer.Chain alias Explorer.Chain.Arbitrum @@ -292,25 +295,44 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # Performs the discovery of new or historical batches within a specified block range, # processing and importing the relevant data into the database. # - # This function retrieves SequencerBatchDelivered event logs from the specified block range - # and processes these logs to identify new batches and their corresponding details. It then - # constructs comprehensive data structures for batches, lifecycle transactions, rollup - # blocks, and rollup transactions. Additionally, it identifies any L2-to-L1 messages that - # have been committed within these batches and updates their status. All discovered and - # processed data are then imported into the database. If new batches were found, they are - # announced to be broadcasted through a websocket. + # This function retrieves SequencerBatchDelivered event logs from the specified block + # range and processes these logs to identify new batches and their corresponding details. + # It then constructs comprehensive data structures for batches, lifecycle transactions, + # rollup blocks, rollup transactions, and Data Availability related records. Additionally, + # it identifies any L2-to-L1 messages that have been committed within these batches and + # updates their status. All discovered and processed data are then imported into the + # database. If new batches were found, they are announced to be broadcasted through a + # websocket. # # ## Parameters # - `sequencer_inbox_address`: The SequencerInbox contract address used to filter logs. # - `start_block`: The starting block number for the discovery range. # - `end_block`: The ending block number for the discovery range. # - `new_batches_limit`: The maximum number of new batches to process in one iteration. - # - `messages_to_blocks_shift`: The value used to align message counts with rollup block numbers. + # - `messages_to_blocks_shift`: The value used to align message counts with rollup block + # numbers. # - `l1_rpc_config`: RPC configuration parameters for L1. # - `rollup_rpc_config`: RPC configuration parameters for rollup data. # # ## Returns # - N/A + @spec do_discover( + binary(), + non_neg_integer(), + non_neg_integer(), + non_neg_integer(), + non_neg_integer(), + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + optional(any()) => any() + }, + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + optional(any()) => any() + } + ) :: :ok defp do_discover( sequencer_inbox_address, start_block, @@ -344,11 +366,12 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do logs |> Enum.chunk_every(new_batches_limit) |> Enum.each(fn chunked_logs -> - {batches, lifecycle_txs, rollup_blocks, rollup_txs, committed_txs} = + {batches, lifecycle_txs, rollup_blocks, rollup_txs, committed_txs, da_records} = handle_batches_from_logs( chunked_logs, messages_to_blocks_shift, l1_rpc_config, + sequencer_inbox_address, rollup_rpc_config ) @@ -359,6 +382,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do arbitrum_batch_blocks: %{params: rollup_blocks}, arbitrum_batch_transactions: %{params: rollup_txs}, arbitrum_messages: %{params: committed_txs}, + arbitrum_da_multi_purpose_records: %{params: da_records}, timeout: :infinity }) @@ -384,6 +408,8 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # # ## Returns # - A list of logs for SequencerBatchDelivered events within the specified block range. + @spec get_logs_new_batches(non_neg_integer(), non_neg_integer(), binary(), EthereumJSONRPC.json_rpc_named_arguments()) :: + [%{String.t() => any()}] defp get_logs_new_batches(start_block, end_block, sequencer_inbox_address, json_rpc_named_arguments) when start_block <= end_block do {:ok, logs} = @@ -408,21 +434,23 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # and retrieves their details, avoiding the reprocessing of batches already known # in the database. It enriches the details of new batches with data from corresponding # L1 transactions and blocks, including timestamps and block ranges. The function - # then prepares batches, associated rollup blocks and transactions, and lifecycle - # transactions for database import. Additionally, L2-to-L1 messages initiated in the - # rollup blocks associated with the discovered batches are retrieved from the database, - # marked as `:sent`, and prepared for database import. + # then prepares batches, associated rollup blocks and transactions, lifecycle + # transactions and Data Availability related records for database import. + # Additionally, L2-to-L1 messages initiated in the rollup blocks associated with the + # discovered batches are retrieved from the database, marked as `:sent`, and prepared + # for database import. # # ## Parameters # - `logs`: The list of SequencerBatchDelivered event logs. # - `msg_to_block_shift`: The shift value for mapping batch messages to block numbers. # - `l1_rpc_config`: The RPC configuration for L1 requests. + # - `sequencer_inbox_address`: The address of the SequencerInbox contract. # - `rollup_rpc_config`: The RPC configuration for rollup data requests. # # ## Returns # - A tuple containing lists of batches, lifecycle transactions, rollup blocks, - # rollup transactions, and committed messages (with the status `:sent`), all - # ready for database import. + # rollup transactions, committed messages (with the status `:sent`), and records + # with DA-related information if applicable, all ready for database import. @spec handle_batches_from_logs( [%{String.t() => any()}], non_neg_integer(), @@ -431,21 +459,19 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do :chunk_size => non_neg_integer(), optional(any()) => any() }, + binary(), %{ :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), :chunk_size => non_neg_integer(), optional(any()) => any() } - ) :: { - [Arbitrum.L1Batch.to_import()], - [Arbitrum.LifecycleTransaction.to_import()], - [Arbitrum.BatchBlock.to_import()], - [Arbitrum.BatchTransaction.to_import()], - [Arbitrum.Message.to_import()] - } - defp handle_batches_from_logs(logs, msg_to_block_shift, l1_rpc_config, rollup_rpc_config) + ) :: + {[Arbitrum.L1Batch.to_import()], [Arbitrum.LifecycleTransaction.to_import()], + [Arbitrum.BatchBlock.to_import()], [Arbitrum.BatchTransaction.to_import()], [Arbitrum.Message.to_import()], + [Arbitrum.DaMultiPurposeRecord.to_import()]} + defp handle_batches_from_logs(logs, msg_to_block_shift, l1_rpc_config, sequencer_inbox_address, rollup_rpc_config) - defp handle_batches_from_logs([], _, _, _), do: {[], [], [], [], []} + defp handle_batches_from_logs([], _, _, _, _), do: {[], [], [], [], [], []} defp handle_batches_from_logs( logs, @@ -454,6 +480,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size } = l1_rpc_config, + sequencer_inbox_address, rollup_rpc_config ) do existing_batches = @@ -466,7 +493,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do blocks_to_ts = Rpc.execute_blocks_requests_and_get_ts(blocks_requests, json_rpc_named_arguments, chunk_size) - {initial_lifecycle_txs, batches_to_import} = + {initial_lifecycle_txs, batches_to_import, da_info} = execute_tx_requests_parse_txs_calldata(txs_requests, msg_to_block_shift, blocks_to_ts, batches, l1_rpc_config) # Check if the commitment transactions for the batches which are already in the database @@ -502,6 +529,12 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do ] end) + da_records = + DataAvailabilityInfo.prepare_for_import(da_info, %{ + sequencer_inbox_address: sequencer_inbox_address, + json_rpc_named_arguments: l1_rpc_config.json_rpc_named_arguments + }) + # It is safe to not re-mark messages as committed for the batches that are already in the database committed_messages = if Enum.empty?(blocks_to_import) do @@ -515,10 +548,11 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do end {batches_list_to_import, Map.values(lifecycle_txs), Map.values(blocks_to_import), rollup_txs_to_import, - committed_messages} + committed_messages, da_records} end # Extracts batch numbers from logs of SequencerBatchDelivered events. + @spec parse_logs_to_get_batch_numbers([%{String.t() => any()}]) :: [non_neg_integer()] defp parse_logs_to_get_batch_numbers(logs) do logs |> Enum.map(fn event -> @@ -554,7 +588,14 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do [%{String.t() => any()}], [non_neg_integer()] ) :: { - %{:number => non_neg_integer(), :before_acc => binary(), :after_acc => binary(), :tx_hash => binary()}, + %{ + non_neg_integer() => %{ + :number => non_neg_integer(), + :before_acc => binary(), + :after_acc => binary(), + :tx_hash => binary() + } + }, [EthereumJSONRPC.Transport.request()], [EthereumJSONRPC.Transport.request()], %{binary() => non_neg_integer()} @@ -611,6 +652,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do end # Parses SequencerBatchDelivered event to get batch sequence number and associated accumulators + @spec sequencer_batch_delivered_event_parse(%{String.t() => any()}) :: {non_neg_integer(), binary(), binary()} defp sequencer_batch_delivered_event_parse(event) do [_, batch_sequence_number, before_acc, after_acc] = event["topics"] @@ -622,7 +664,9 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # This function processes a list of RPC `eth_getTransactionByHash` requests, extracts # and decodes the calldata from the transactions to obtain batch details. It updates # the provided batch map with block ranges for new batches and constructs a map of - # lifecycle transactions with their timestamps and finalization status. + # lifecycle transactions with their timestamps and finalization status. Additionally, + # it examines the data availability (DA) information for Anytrust or Celestia and + # constructs a list of DA info structs. # # ## Parameters # - `txs_requests`: The list of RPC requests to fetch transaction data. @@ -631,15 +675,46 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - `blocks_to_ts`: A map of block numbers to their timestamps, required to complete # data for corresponding lifecycle transactions. # - `batches`: The current batch data to be updated. - # - A configuration map containing JSON RPC arguments, a track finalization flag, - # and a chunk size for batch processing. + # - A configuration map containing: + # - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + # - `track_finalization`: A boolean flag indicating if finalization tracking is needed. + # - `chunk_size`: The size of chunks for batch processing. # # ## Returns # - A tuple containing: - # - A map of lifecycle (L1) transactions, which are not yet compatible with - # database import and require further processing. - # - An updated map of batch descriptions, also requiring further processing - # before database import. + # - A map of lifecycle (L1) transactions, including their hashes, block numbers, + # timestamps, and statuses (finalized or unfinalized). + # - An updated map of batch descriptions with block ranges and data availability + # information. + # - A list of data availability information structs for Anytrust or Celestia. + @spec execute_tx_requests_parse_txs_calldata( + [EthereumJSONRPC.Transport.request()], + non_neg_integer(), + %{EthereumJSONRPC.block_number() => DateTime.t()}, + %{non_neg_integer() => map()}, + %{ + :chunk_size => non_neg_integer(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :track_finalization => boolean(), + optional(any()) => any() + } + ) :: + {%{ + binary() => %{ + :hash => binary(), + :block_number => non_neg_integer(), + :timestamp => DateTime.t(), + :status => :unfinalized | :finalized + } + }, + %{ + non_neg_integer() => %{ + :start_block => non_neg_integer(), + :end_block => non_neg_integer(), + :data_available => atom() | nil, + optional(any()) => any() + } + }, [Anytrust.t() | Celestia.t()]} defp execute_tx_requests_parse_txs_calldata(txs_requests, msg_to_block_shift, blocks_to_ts, batches, %{ json_rpc_named_arguments: json_rpc_named_arguments, track_finalization: track_finalization?, @@ -647,20 +722,26 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do }) do txs_requests |> Enum.chunk_every(chunk_size) - |> Enum.reduce({%{}, batches}, fn chunk, {l1_txs, updated_batches} -> + |> Enum.reduce({%{}, batches, []}, fn chunk, {l1_txs, updated_batches, da_info} -> chunk # each eth_getTransactionByHash will take time since it returns entire batch # in `input` which is heavy because contains dozens of rollup blocks |> Rpc.make_chunked_request(json_rpc_named_arguments, "eth_getTransactionByHash") - |> Enum.reduce({l1_txs, updated_batches}, fn resp, {txs_map, batches_map} -> + |> Enum.reduce({l1_txs, updated_batches, da_info}, fn resp, {txs_map, batches_map, da_info_list} -> block_num = quantity_to_integer(resp["blockNumber"]) tx_hash = Rpc.string_hash_to_bytes_hash(resp["hash"]) # Although they are called messages in the functions' ABI, in fact they are # rollup blocks - {batch_num, prev_message_count, new_message_count} = + {batch_num, prev_message_count, new_message_count, extra_data} = add_sequencer_l2_batch_from_origin_calldata_parse(resp["input"]) + {da_type, da_data} = + case DataAvailabilityInfo.examine_batch_accompanying_data(batch_num, extra_data) do + {:ok, t, d} -> {t, d} + {:error, _, _} -> {nil, nil} + end + # In some cases extracted numbers for messages does not linked directly # with rollup blocks, for this, the numbers are shifted by a value specific # for particular rollup @@ -670,7 +751,8 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do batch_num, Map.merge(batches_map[batch_num], %{ start_block: prev_message_count + msg_to_block_shift, - end_block: new_message_count + msg_to_block_shift - 1 + end_block: new_message_count + msg_to_block_shift - 1, + batch_container: da_type }) ) @@ -687,18 +769,28 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do end }) - {updated_txs_map, updated_batches_map} + # credo:disable-for-lines:6 Credo.Check.Refactor.Nesting + updated_da_info_list = + if DataAvailabilityInfo.required_import?(da_type) do + [da_data | da_info_list] + else + da_info_list + end + + {updated_txs_map, updated_batches_map, updated_da_info_list} end) end) end # Parses calldata of `addSequencerL2BatchFromOrigin` or `addSequencerL2BatchFromBlobs` # functions to extract batch information. + @spec add_sequencer_l2_batch_from_origin_calldata_parse(binary()) :: + {non_neg_integer(), non_neg_integer(), non_neg_integer(), binary() | nil} defp add_sequencer_l2_batch_from_origin_calldata_parse(calldata) do case calldata do "0x8f111f3c" <> encoded_params -> # addSequencerL2BatchFromOrigin(uint256 sequenceNumber, bytes calldata data, uint256 afterDelayedMessagesRead, address gasRefunder, uint256 prevMessageCount, uint256 newMessageCount) - [sequence_number, _data, _after_delayed_messages_read, _gas_refunder, prev_message_count, new_message_count] = + [sequence_number, data, _after_delayed_messages_read, _gas_refunder, prev_message_count, new_message_count] = TypeDecoder.decode( Base.decode16!(encoded_params, case: :lower), %FunctionSelector{ @@ -714,7 +806,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do } ) - {sequence_number, prev_message_count, new_message_count} + {sequence_number, prev_message_count, new_message_count, data} "0x3e5aa082" <> encoded_params -> # addSequencerL2BatchFromBlobs(uint256 sequenceNumber, uint256 afterDelayedMessagesRead, address gasRefunder, uint256 prevMessageCount, uint256 newMessageCount) @@ -733,7 +825,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do } ) - {sequence_number, prev_message_count, new_message_count} + {sequence_number, prev_message_count, new_message_count, nil} end end @@ -861,6 +953,14 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # ## Returns # - A map where each key is a rollup block number and its value is the # corresponding batch number. + @spec unwrap_rollup_block_ranges(%{ + non_neg_integer() => %{ + :start_block => non_neg_integer(), + :end_block => non_neg_integer(), + :number => non_neg_integer(), + optional(any()) => any() + } + }) :: %{non_neg_integer() => non_neg_integer()} defp unwrap_rollup_block_ranges(batches) do batches |> Map.values() @@ -933,6 +1033,18 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # ## Returns # - A tuple containing the updated map of rollup blocks and the updated list of # transactions, both are ready for database import. + @spec recover_data_if_necessary( + %{non_neg_integer() => Arbitrum.BatchBlock.to_import()}, + [Arbitrum.BatchTransaction.to_import()], + [non_neg_integer()], + %{non_neg_integer() => non_neg_integer()}, + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + optional(any()) => any() + } + ) :: + {%{non_neg_integer() => Arbitrum.BatchBlock.to_import()}, [Arbitrum.BatchTransaction.to_import()]} defp recover_data_if_necessary( current_rollup_blocks, current_rollup_txs, @@ -988,6 +1100,18 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - A list of transactions, each associated with its respective rollup block # and batch number, ready for database import. # - The updated counter of processed chunks (usually ignored). + @spec recover_rollup_blocks_and_txs_from_rpc( + [non_neg_integer()], + [non_neg_integer()], + %{non_neg_integer() => non_neg_integer()}, + %{ + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :chunk_size => non_neg_integer(), + optional(any()) => any() + } + ) :: + {%{non_neg_integer() => Arbitrum.BatchBlock.to_import()}, [Arbitrum.BatchTransaction.to_import()], + non_neg_integer()} defp recover_rollup_blocks_and_txs_from_rpc( required_blocks_numbers, found_blocks_numbers, @@ -1054,6 +1178,11 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # for database import. # - An updated list of transactions, each associated with its respective rollup # block and batch number, ready for database import. + @spec prepare_rollup_block_map_and_transactions_list( + [%{id: non_neg_integer(), result: %{String.t() => any()}}], + %{non_neg_integer() => Arbitrum.BatchBlock.to_import()}, + [Arbitrum.BatchTransaction.to_import()] + ) :: {%{non_neg_integer() => Arbitrum.BatchBlock.to_import()}, [Arbitrum.BatchTransaction.to_import()]} defp prepare_rollup_block_map_and_transactions_list(json_responses, rollup_blocks, rollup_txs) do json_responses |> Enum.reduce({rollup_blocks, rollup_txs}, fn resp, {blocks_map, txs_list} -> @@ -1100,6 +1229,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do end # Retrieves initiated L2-to-L1 messages up to specified block number and marks them as 'sent'. + @spec get_committed_l2_to_l1_messages(non_neg_integer()) :: [Arbitrum.Message.to_import()] defp get_committed_l2_to_l1_messages(block_number) do block_number |> Db.initiated_l2_to_l1_messages() diff --git a/apps/indexer/lib/indexer/fetcher/zksync/utils/rpc.ex b/apps/indexer/lib/indexer/fetcher/zksync/utils/rpc.ex index b354bc3baefa..71e594d51358 100644 --- a/apps/indexer/lib/indexer/fetcher/zksync/utils/rpc.ex +++ b/apps/indexer/lib/indexer/fetcher/zksync/utils/rpc.ex @@ -51,28 +51,10 @@ defmodule Indexer.Fetcher.ZkSync.Utils.Rpc do end) end - defp from_ts_to_datetime(time_ts) do - {_, unix_epoch_starts} = DateTime.from_unix(0) - - case is_nil(time_ts) or time_ts == 0 do - true -> - unix_epoch_starts - - false -> - case DateTime.from_unix(time_ts) do - {:ok, datetime} -> - datetime - - {:error, _} -> - unix_epoch_starts - end - end - end - defp from_iso8601_to_datetime(time_string) do case is_nil(time_string) do true -> - from_ts_to_datetime(0) + IndexerHelper.timestamp_to_datetime(0) false -> case DateTime.from_iso8601(time_string) do @@ -80,7 +62,7 @@ defmodule Indexer.Fetcher.ZkSync.Utils.Rpc do datetime {:error, _} -> - from_ts_to_datetime(0) + IndexerHelper.timestamp_to_datetime(0) end end end @@ -139,7 +121,7 @@ defmodule Indexer.Fetcher.ZkSync.Utils.Rpc do key_atom, case transform_type do :iso8601_to_datetime -> from_iso8601_to_datetime(value_in_json_response) - :ts_to_datetime -> from_ts_to_datetime(value_in_json_response) + :ts_to_datetime -> IndexerHelper.timestamp_to_datetime(value_in_json_response) :str_to_txhash -> json_tx_id_to_hash(value_in_json_response) :str_to_byteshash -> string_hash_to_bytes_hash(value_in_json_response) _ -> value_in_json_response diff --git a/apps/indexer/lib/indexer/helper.ex b/apps/indexer/lib/indexer/helper.ex index d79501c2bb79..23043f7802c0 100644 --- a/apps/indexer/lib/indexer/helper.ex +++ b/apps/indexer/lib/indexer/helper.ex @@ -62,6 +62,38 @@ defmodule Indexer.Helper do end end + @doc """ + Converts a Unix timestamp to a `DateTime`. + + If the given timestamp is `nil` or `0`, it returns the Unix epoch start. + If the conversion fails, it also returns the Unix epoch start. + + ## Parameters + - `time_ts`: A non-negative integer representing the Unix timestamp or `nil`. + + ## Returns + - A `DateTime` corresponding to the given Unix timestamp, or the Unix epoch start if + the timestamp is `nil`, `0`, or if the conversion fails. + """ + @spec timestamp_to_datetime(non_neg_integer() | nil) :: DateTime.t() + def timestamp_to_datetime(time_ts) do + {_, unix_epoch_starts} = DateTime.from_unix(0) + + case is_nil(time_ts) or time_ts == 0 do + true -> + unix_epoch_starts + + false -> + case DateTime.from_unix(time_ts) do + {:ok, datetime} -> + datetime + + {:error, _} -> + unix_epoch_starts + end + end + end + @doc """ Calculates average block time in milliseconds (based on the latest 100 blocks) divided by 2. Sends corresponding requests to the RPC node. diff --git a/cspell.json b/cspell.json index c19fa922c259..de3b000448eb 100644 --- a/cspell.json +++ b/cspell.json @@ -20,6 +20,7 @@ "Aiubo", "alloc", "amzootyukbugmx", + "anytrust", "apikey", "APIV", "Arbitrum", @@ -399,6 +400,8 @@ "progressbar", "proxiable", "psql", + "pubkey", + "pubkeys", "purrstige", "qdai", "Qebz", From fd22965fca66a013cf0a1125c71c639ca97b8dcf Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Mon, 8 Jul 2024 17:18:35 +0300 Subject: [PATCH 12/32] chore: remove single implementation name, address from API v2 response (#10390) --- .../views/api/v2/address_view.ex | 17 ----------------- .../lib/block_scout_web/views/api/v2/helper.ex | 17 ++++------------- .../account/api/v2/user_controller_test.exs | 6 ------ .../api/v2/address_controller_test.exs | 5 ----- 4 files changed, 4 insertions(+), 41 deletions(-) diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/address_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/address_view.ex index 5d131ce7fb27..22841d305916 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/address_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/address_view.ex @@ -113,9 +113,6 @@ defmodule BlockScoutWeb.API.V2.AddressView do creation_tx = creator_hash && AddressView.transaction_hash(address) token = address.token && TokenView.render("token.json", %{token: address.token}) - # todo: added for backward compatibility, remove when frontend unbound from these props - {implementation_address, implementation_name} = single_implementation(implementations) - extended_info = Map.merge(base_info, %{ "creator_address_hash" => creator_hash && Address.checksum(creator_hash), @@ -137,25 +134,11 @@ defmodule BlockScoutWeb.API.V2.AddressView do extended_info else Map.merge(extended_info, %{ - # todo: added for backward compatibility, remove when frontend unbound from these props - "implementation_address" => implementation_address, - "implementation_name" => implementation_name, "implementations" => implementations }) end end - defp single_implementation(implementations) do - %{"address" => implementation_address, "name" => implementation_name} = - if Enum.empty?(implementations) do - %{"address" => nil, "name" => nil} - else - implementations |> Enum.at(0) - end - - {implementation_address, implementation_name} - end - @spec prepare_token_balance(Chain.Address.TokenBalance.t(), boolean()) :: map() defp prepare_token_balance(token_balance, fetch_token_instance? \\ false) do %{ diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/helper.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/helper.ex index 2ad947c0412a..e9d1f963dbad 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/helper.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/helper.ex @@ -64,31 +64,25 @@ defmodule BlockScoutWeb.API.V2.Helper do def address_with_info(%Address{} = address, _address_hash) do smart_contract? = Address.smart_contract?(address) - {proxy_implementations, implementation_address_hashes, implementation_names, implementation_address, - implementation_name} = + {proxy_implementations, implementation_address_hashes, implementation_names} = case address.proxy_implementations do %NotLoaded{} -> - {nil, [], [], nil, nil} + {nil, [], []} nil -> - {nil, [], [], nil, nil} + {nil, [], []} proxy_implementations -> address_hashes = proxy_implementations.address_hashes names = proxy_implementations.names - address_hash = Enum.at(address_hashes, 0) && address_hashes |> Enum.at(0) |> Address.checksum() - - {proxy_implementations, address_hashes, names, address_hash, Enum.at(names, 0)} + {proxy_implementations, address_hashes, names} end %{ "hash" => Address.checksum(address), "is_contract" => smart_contract?, "name" => address_name(address), - # todo: added for backward compatibility, remove when frontend unbound from these props - "implementation_address" => implementation_address, - "implementation_name" => implementation_name, "implementations" => proxy_object_info(implementation_address_hashes, implementation_names), "is_verified" => verified?(address) || verified_minimal_proxy?(proxy_implementations), "ens_domain_name" => address.ens_domain_name, @@ -116,9 +110,6 @@ defmodule BlockScoutWeb.API.V2.Helper do "hash" => Address.checksum(address_hash), "is_contract" => false, "name" => nil, - # todo: added for backward compatibility, remove when frontend unbound from these props - "implementation_address" => nil, - "implementation_name" => nil, "implementations" => [], "is_verified" => nil, "ens_domain_name" => nil, diff --git a/apps/block_scout_web/test/block_scout_web/controllers/account/api/v2/user_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/account/api/v2/user_controller_test.exs index a5efaeb98df8..0bdb1d1ef89f 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/account/api/v2/user_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/account/api/v2/user_controller_test.exs @@ -151,9 +151,6 @@ defmodule BlockScoutWeb.Account.Api.V2.UserControllerTest do "name" => name, "address" => %{ "hash" => Address.checksum(addr), - # todo: added for backward compatibility, remove when frontend unbound from these props - "implementation_address" => nil, - "implementation_name" => nil, "implementations" => [], "is_contract" => false, "is_verified" => false, @@ -210,9 +207,6 @@ defmodule BlockScoutWeb.Account.Api.V2.UserControllerTest do "name" => name, "address" => %{ "hash" => Address.checksum(addr), - # todo: added for backward compatibility, remove when frontend unbound from these props - "implementation_address" => nil, - "implementation_name" => nil, "implementations" => [], "is_contract" => false, "is_verified" => false, diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs index 951095d6eea8..cf894b61f02b 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs @@ -71,9 +71,6 @@ defmodule BlockScoutWeb.API.V2.AddressControllerTest do "creation_tx_hash" => nil, "token" => nil, "coin_balance" => nil, - # todo: added for backward compatibility, remove when frontend unbound from these props - "implementation_address" => nil, - "implementation_name" => nil, "implementations" => [], "block_number_balance_updated_at" => nil, "has_decompiled_code" => false, @@ -209,7 +206,6 @@ defmodule BlockScoutWeb.API.V2.AddressControllerTest do "watchlist_names" => [], "creator_address_hash" => ^from, "creation_tx_hash" => ^tx_hash, - "implementation_address" => ^checksummed_implementation_contract_address_hash, "implementations" => [ %{"address" => ^checksummed_implementation_contract_address_hash, "name" => ^name} ] @@ -255,7 +251,6 @@ defmodule BlockScoutWeb.API.V2.AddressControllerTest do "watchlist_names" => [], "creator_address_hash" => ^from, "creation_tx_hash" => ^tx_hash, - "implementation_address" => ^implementation_address_hash_string, "implementations" => [%{"address" => ^implementation_address_hash_string, "name" => nil}] } = json_response(request, 200) end From e40f8be3a58df72736ad587ce6ededb14103d93e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 21:44:47 +0300 Subject: [PATCH 13/32] chore(deps-dev): bump ex_doc from 0.34.1 to 0.34.2 (#10394) Bumps [ex_doc](https://github.com/elixir-lang/ex_doc) from 0.34.1 to 0.34.2. - [Release notes](https://github.com/elixir-lang/ex_doc/releases) - [Changelog](https://github.com/elixir-lang/ex_doc/blob/main/CHANGELOG.md) - [Commits](https://github.com/elixir-lang/ex_doc/compare/v0.34.1...v0.34.2) --- updated-dependencies: - dependency-name: ex_doc dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mix.lock b/mix.lock index 233a7bb987e6..8a32e41228c0 100644 --- a/mix.lock +++ b/mix.lock @@ -36,7 +36,7 @@ "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, "dialyxir": {:hex, :dialyxir, "1.4.3", "edd0124f358f0b9e95bfe53a9fcf806d615d8f838e2202a9f430d59566b6b53b", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "bf2cfb75cd5c5006bec30141b131663299c661a864ec7fbbc72dfa557487a986"}, "digital_token": {:hex, :digital_token, "0.6.0", "13e6de581f0b1f6c686f7c7d12ab11a84a7b22fa79adeb4b50eec1a2d278d258", [:mix], [{:cldr_utils, "~> 2.17", [hex: :cldr_utils, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "2455d626e7c61a128b02a4a8caddb092548c3eb613ac6f6a85e4cbb6caddc4d1"}, - "earmark_parser": {:hex, :earmark_parser, "1.4.39", "424642f8335b05bb9eb611aa1564c148a8ee35c9c8a8bba6e129d51a3e3c6769", [:mix], [], "hexpm", "06553a88d1f1846da9ef066b87b57c6f605552cfbe40d20bd8d59cc6bde41944"}, + "earmark_parser": {:hex, :earmark_parser, "1.4.40", "f3534689f6b58f48aa3a9ac850d4f05832654fe257bf0549c08cc290035f70d5", [:mix], [], "hexpm", "cdb34f35892a45325bad21735fadb88033bcb7c4c296a999bde769783f53e46a"}, "ecto": {:hex, :ecto, "3.11.2", "e1d26be989db350a633667c5cda9c3d115ae779b66da567c68c80cfb26a8c9ee", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "3c38bca2c6f8d8023f2145326cc8a80100c3ffe4dcbd9842ff867f7fc6156c65"}, "ecto_sql": {:hex, :ecto_sql, "3.11.3", "4eb7348ff8101fbc4e6bbc5a4404a24fecbe73a3372d16569526b0cf34ebc195", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.11.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e5f36e3d736b99c7fee3e631333b8394ade4bafe9d96d35669fca2d81c2be928"}, "elixir_make": {:hex, :elixir_make, "0.7.7", "7128c60c2476019ed978210c245badf08b03dbec4f24d05790ef791da11aa17c", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}], "hexpm", "5bc19fff950fad52bbe5f211b12db9ec82c6b34a9647da0c2224b8b8464c7e6c"}, @@ -47,7 +47,7 @@ "ex_cldr_lists": {:hex, :ex_cldr_lists, "2.11.0", "1d39e75f0e493ccc95adfc85c55b4ca34f0771626350ce326d9ab8813d91444e", [:mix], [{:ex_cldr_numbers, "~> 2.25", [hex: :ex_cldr_numbers, repo: "hexpm", optional: false]}, {:ex_doc, "~> 0.18", [hex: :ex_doc, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "8132b30a5506ae8a09e5c9a21c23fd60c8837ce6c3a1de9966d813eb78951695"}, "ex_cldr_numbers": {:hex, :ex_cldr_numbers, "2.33.1", "49dc6e77e6d9ad22660aaa2480a7408ad3aedfbe517e4e83e5fe3a7bf5345770", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:digital_token, "~> 0.3 or ~> 1.0", [hex: :digital_token, repo: "hexpm", optional: false]}, {:ex_cldr, "~> 2.38", [hex: :ex_cldr, repo: "hexpm", optional: false]}, {:ex_cldr_currencies, "~> 2.16", [hex: :ex_cldr_currencies, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "c003bfaa3fdee6bab5195f128b94038c2ce1cf4879a759eef431dd075d9a5dac"}, "ex_cldr_units": {:hex, :ex_cldr_units, "3.17.0", "f26dcde31a8fbb7808afa106ce2c7cbf38e0e0e0678ac523e795cdfdc67ab502", [:mix], [{:cldr_utils, "~> 2.25", [hex: :cldr_utils, repo: "hexpm", optional: false]}, {:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:ex_cldr_lists, "~> 2.10", [hex: :ex_cldr_lists, repo: "hexpm", optional: false]}, {:ex_cldr_numbers, "~> 2.33.0", [hex: :ex_cldr_numbers, repo: "hexpm", optional: false]}, {:ex_doc, "~> 0.18", [hex: :ex_doc, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "b9f09c420f5e3b86ed41f135751086bc59bf2bb8e633516e8d3e9f24d6d9e777"}, - "ex_doc": {:hex, :ex_doc, "0.34.1", "9751a0419bc15bc7580c73fde506b17b07f6402a1e5243be9e0f05a68c723368", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "d441f1a86a235f59088978eff870de2e815e290e44a8bd976fe5d64470a4c9d2"}, + "ex_doc": {:hex, :ex_doc, "0.34.2", "13eedf3844ccdce25cfd837b99bea9ad92c4e511233199440488d217c92571e8", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "5ce5f16b41208a50106afed3de6a2ed34f4acfd65715b82a0b84b49d995f95c1"}, "ex_json_schema": {:hex, :ex_json_schema, "0.10.2", "7c4b8c1481fdeb1741e2ce66223976edfb9bccebc8014f6aec35d4efe964fb71", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "37f43be60f8407659d4d0155a7e45e7f406dab1f827051d3d35858a709baf6a6"}, "ex_keccak": {:hex, :ex_keccak, "0.7.5", "f3b733173510d48ae9a1ea1de415e694b2651f35c787e63f33b5ed0013fbfd35", [:mix], [{:rustler, ">= 0.0.0", [hex: :rustler, repo: "hexpm", optional: true]}, {:rustler_precompiled, "~> 0.7", [hex: :rustler_precompiled, repo: "hexpm", optional: false]}], "hexpm", "8a5e1cb7f96fff5e480ff6a121477b90c4fd8c150984086dffd98819f5d83763"}, "ex_machina": {:hex, :ex_machina, "2.8.0", "a0e847b5712065055ec3255840e2c78ef9366634d62390839d4880483be38abe", [:mix], [{:ecto, "~> 2.2 or ~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_sql, "~> 3.0", [hex: :ecto_sql, repo: "hexpm", optional: true]}], "hexpm", "79fe1a9c64c0c1c1fab6c4fa5d871682cb90de5885320c187d117004627a7729"}, From a8e483053e518084ca4572fcf99bab2cd352e651 Mon Sep 17 00:00:00 2001 From: Alexander Kolotov Date: Mon, 8 Jul 2024 12:47:27 -0600 Subject: [PATCH 14/32] fix: alternative way to detect blocks range for ArbitrumOne batches (#10295) * alternative way to detect rollup blocks range * code review comments addressed --- .../lib/explorer/chain/arbitrum/reader.ex | 21 ++ .../arbitrum/tracking_batches_statuses.ex | 4 +- .../lib/indexer/fetcher/arbitrum/utils/db.ex | 15 ++ .../lib/indexer/fetcher/arbitrum/utils/rpc.ex | 204 ++++++++++++++++ .../fetcher/arbitrum/workers/new_batches.ex | 227 +++++++++++++++--- config/runtime.exs | 4 +- docker-compose/envs/common-blockscout.env | 20 ++ 7 files changed, 465 insertions(+), 30 deletions(-) diff --git a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex index da2fe3a18cff..1fd6623f26ee 100644 --- a/apps/explorer/lib/explorer/chain/arbitrum/reader.ex +++ b/apps/explorer/lib/explorer/chain/arbitrum/reader.ex @@ -384,6 +384,27 @@ defmodule Explorer.Chain.Arbitrum.Reader do |> Repo.one() end + @doc """ + Retrieves the batch by its number. + + ## Parameters + - `number`: The number of a rollup batch. + + ## Returns + - An instance of `Explorer.Chain.Arbitrum.L1Batch`, or `nil` if no batch with + such a number is found. + """ + @spec get_batch_by_number(non_neg_integer()) :: L1Batch.t() | nil + def get_batch_by_number(number) do + query = + from(batch in L1Batch, + where: batch.number == ^number + ) + + query + |> Repo.one() + end + @doc """ Retrieves the L1 block number where the confirmation transaction of the highest confirmed rollup block was included. diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/tracking_batches_statuses.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/tracking_batches_statuses.ex index 1974ed14dcae..05e101696e53 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/tracking_batches_statuses.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/tracking_batches_statuses.ex @@ -90,6 +90,7 @@ defmodule Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses do finalized_confirmations = config_tracker[:finalized_confirmations] confirmation_batches_depth = config_tracker[:confirmation_batches_depth] new_batches_limit = config_tracker[:new_batches_limit] + node_interface_address = config_tracker[:node_interface_contract] Process.send(self(), :init_worker, []) @@ -113,7 +114,8 @@ defmodule Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses do l1_rollup_init_block: l1_rollup_init_block, new_batches_limit: new_batches_limit, messages_to_blocks_shift: messages_to_blocks_shift, - confirmation_batches_depth: confirmation_batches_depth + confirmation_batches_depth: confirmation_batches_depth, + node_interface_address: node_interface_address }, data: %{} }} diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex index 1dc5ce20884a..703b0693fe43 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex @@ -456,6 +456,21 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Db do end end + @doc """ + Retrieves a batch by its number. + + ## Parameters + - `number`: The number of a rollup batch. + + ## Returns + - An instance of `Explorer.Chain.Arbitrum.L1Batch`, or `nil` if no batch with + such a number is found. + """ + @spec get_batch_by_number(non_neg_integer()) :: Arbitrum.L1Batch.t() | nil + def get_batch_by_number(number) do + Reader.get_batch_by_number(number) + end + @doc """ Retrieves rollup blocks within a specified block range that have not yet been confirmed. diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex index a5875924d363..e099815a173d 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex @@ -12,8 +12,11 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Rpc do @zero_hash "0000000000000000000000000000000000000000000000000000000000000000" @rpc_resend_attempts 20 + # outbox() @selector_outbox "ce11e6ab" + # sequencerInbox() @selector_sequencer_inbox "ee35f327" + # bridge() @selector_bridge "e78cea92" @rollup_contract_abi [ %{ @@ -69,6 +72,30 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Rpc do } ] + # findBatchContainingBlock(uint64 blockNum) + @selector_find_batch_containing_block "81f1adaf" + @node_interface_contract_abi [ + %{ + "inputs" => [ + %{ + "internalType" => "uint64", + "name" => "blockNum", + "type" => "uint64" + } + ], + "name" => "findBatchContainingBlock", + "outputs" => [ + %{ + "internalType" => "uint64", + "name" => "batch", + "type" => "uint64" + } + ], + "stateMutability" => "view", + "type" => "function" + } + ] + @doc """ Constructs a JSON RPC request to retrieve a transaction by its hash. @@ -406,6 +433,183 @@ defmodule Indexer.Fetcher.Arbitrum.Utils.Rpc do end end + @doc """ + Identifies the block range for a batch by using the block number located on one end of the range. + + The function verifies suspicious block numbers by using the + `findBatchContainingBlock` method of the Node Interface contract in a binary + search. + + The sign of the step determines the direction of the search: + - A positive step indicates the search is for the lowest block in the range. + - A negative step indicates the search is for the highest block in the range. + + ## Parameters + - `initial_block`: The starting block number for the search. + - `initial_step`: The initial step size for the binary search. + - `required_batch_number`: The target batch for which the blocks range is + discovered. + - `rollup_config`: A map containing the `NodeInterface` contract address and + configuration parameters for the JSON RPC connection. + + ## Returns + - A tuple `{start_block, end_block}` representing the range of blocks included + in the specified batch. + """ + @spec get_block_range_for_batch( + EthereumJSONRPC.block_number(), + integer(), + non_neg_integer(), + %{ + node_interface_address: EthereumJSONRPC.address(), + json_rpc_named_arguments: EthereumJSONRPC.json_rpc_named_arguments() + } + ) :: {non_neg_integer(), non_neg_integer()} + def get_block_range_for_batch( + initial_block, + initial_step, + required_batch_number, + rollup_config + ) do + opposite_block = + do_binary_search_of_opposite_block( + initial_block - initial_step, + initial_step, + required_batch_number, + rollup_config, + required_batch_number, + initial_block + ) + + # the default direction for the block range exploration is chosen to be from the highest to lowest + # and the initial step is positive in this case + if initial_step > 0 do + {opposite_block, initial_block} + else + {initial_block, opposite_block} + end + end + + # Performs a binary search to find the opposite block for a rollup blocks + # range included in a batch with the specified number. The function calls + # `findBatchContainingBlock` of the Node Interface contract to determine the + # batch number of the inspected block and, based on the call result and the + # previously inspected block, decides whether the opposite block is found or + # another iteration is required. + # + # Assumptions: + # - The initial step is low enough to not jump more than one batch in a single + # iteration. + # - The function can discover the opposite block in any direction depending on + # the sign of the step. If the step is positive, the lookup happens for the + # lowest block in the range. If the step is negative, the lookup is for the + # highest block in the range. + # + # Parameters: + # - `inspected_block`: The block number currently being inspected. + # - `step`: The step size used for the binary search. + # - `required_batch_number`: The target batch for which blocks range is + # discovered. + # - `rollup_config`: A map containing the `NodeInterface` contract address and + # configuration parameters for the JSON RPC connection. + # - `prev_batch_number`: The number of the batch where the block was inspected + # on the previous iteration. + # - `prev_inspected_block`: The block number that was previously inspected. + # + # Returns: + # - The block number of the opposite block in the rollup. + @spec do_binary_search_of_opposite_block( + non_neg_integer(), + integer(), + non_neg_integer(), + %{ + node_interface_address: EthereumJSONRPC.address(), + json_rpc_named_arguments: EthereumJSONRPC.json_rpc_named_arguments() + }, + non_neg_integer(), + non_neg_integer() + ) :: non_neg_integer() + defp do_binary_search_of_opposite_block( + inspected_block, + step, + required_batch_number, + %{node_interface_address: _, json_rpc_named_arguments: _} = rollup_config, + prev_batch_number, + prev_inspected_block + ) do + new_batch_number = + get_batch_number_for_rollup_block( + rollup_config.node_interface_address, + inspected_block, + rollup_config.json_rpc_named_arguments + ) + + next_block_to_inspect = max(1, inspected_block - step) + + if new_batch_number == prev_batch_number do + do_binary_search_of_opposite_block( + next_block_to_inspect, + step, + required_batch_number, + rollup_config, + new_batch_number, + inspected_block + ) + else + if abs(prev_inspected_block - inspected_block) == 1 and new_batch_number == required_batch_number do + inspected_block + else + # credo:disable-for-next-line Credo.Check.Refactor.Nesting + new_step = if(abs(step) == 1, do: -step, else: -div(step, 2)) + + do_binary_search_of_opposite_block( + next_block_to_inspect, + new_step, + required_batch_number, + rollup_config, + new_batch_number, + inspected_block + ) + end + end + end + + # Retrieves the batch number for a given rollup block by interacting with the + # node interface contract. It calls the `findBatchContainingBlock` method of + # the contract to find the batch containing the specified block number. + # + # Parameters: + # - `node_interface_address`: The address of the node interface contract. + # - `block_number`: The rollup block number. + # - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC + # connection. + # + # Returns: + # - The number of a batch containing the specified rollup block. + @spec get_batch_number_for_rollup_block( + EthereumJSONRPC.address(), + EthereumJSONRPC.block_number(), + EthereumJSONRPC.json_rpc_named_arguments() + ) :: non_neg_integer() + defp get_batch_number_for_rollup_block(node_interface_address, block_number, json_rpc_named_arguments) do + [ + %{ + contract_address: node_interface_address, + method_id: @selector_find_batch_containing_block, + args: [block_number] + } + ] + |> IndexerHelper.read_contracts_with_retries( + @node_interface_contract_abi, + json_rpc_named_arguments, + @rpc_resend_attempts + ) + |> Kernel.elem(0) + |> List.first() + |> Kernel.elem(1) + |> List.first() + end + @doc """ Converts a transaction hash from its hexadecimal string representation to a binary format. diff --git a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex index 40368c85b7ef..8a222a8d6a99 100644 --- a/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex +++ b/apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex @@ -42,7 +42,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do require Logger # keccak256("SequencerBatchDelivered(uint256,bytes32,bytes32,bytes32,uint256,(uint64,uint64,uint64,uint64),uint8)") - @message_sequencer_batch_delivered "0x7394f4a19a13c7b92b5bb71033245305946ef78452f7b4986ac1390b5df4ebd7" + @event_sequencer_batch_delivered "0x7394f4a19a13c7b92b5bb71033245305946ef78452f7b4986ac1390b5df4ebd7" @doc """ Discovers and imports new batches of rollup transactions within the current L1 block range. @@ -88,6 +88,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do :chunk_size => non_neg_integer(), optional(any()) => any() }, + :node_interface_address => binary(), optional(any()) => any() }, :data => %{:new_batches_start_block => non_neg_integer(), optional(any()) => any()}, @@ -100,7 +101,8 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do rollup_rpc: rollup_rpc_config, l1_sequencer_inbox_address: sequencer_inbox_address, messages_to_blocks_shift: messages_to_blocks_shift, - new_batches_limit: new_batches_limit + new_batches_limit: new_batches_limit, + node_interface_address: node_interface_address }, data: %{new_batches_start_block: start_block} } = _state @@ -126,6 +128,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do new_batches_limit, messages_to_blocks_shift, l1_rpc_config, + node_interface_address, rollup_rpc_config ) @@ -181,6 +184,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do :chunk_size => non_neg_integer(), optional(any()) => any() }, + :node_interface_address => binary(), optional(any()) => any() }, :data => %{:historical_batches_end_block => any(), optional(any()) => any()}, @@ -194,7 +198,8 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do l1_sequencer_inbox_address: sequencer_inbox_address, messages_to_blocks_shift: messages_to_blocks_shift, l1_rollup_init_block: l1_rollup_init_block, - new_batches_limit: new_batches_limit + new_batches_limit: new_batches_limit, + node_interface_address: node_interface_address }, data: %{historical_batches_end_block: end_block} } = _state @@ -211,6 +216,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do new_batches_limit, messages_to_blocks_shift, l1_rpc_config, + node_interface_address, rollup_rpc_config ) @@ -232,6 +238,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - `new_batches_limit`: Limit of new batches to process in one iteration. # - `messages_to_blocks_shift`: Shift value for message to block number mapping. # - `l1_rpc_config`: Configuration for L1 RPC calls. + # - `node_interface_address`: The address of the NodeInterface contract on the rollup. # - `rollup_rpc_config`: Configuration for rollup RPC calls. # # ## Returns @@ -243,6 +250,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do new_batches_limit, messages_to_blocks_shift, l1_rpc_config, + node_interface_address, rollup_rpc_config ) do do_discover( @@ -252,6 +260,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do new_batches_limit, messages_to_blocks_shift, l1_rpc_config, + node_interface_address, rollup_rpc_config ) end @@ -268,6 +277,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - `new_batches_limit`: Limit of new batches to process in one iteration. # - `messages_to_blocks_shift`: Shift value for message to block number mapping. # - `l1_rpc_config`: Configuration for L1 RPC calls. + # - `node_interface_address`: The address of the NodeInterface contract on the rollup. # - `rollup_rpc_config`: Configuration for rollup RPC calls. # # ## Returns @@ -279,6 +289,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do new_batches_limit, messages_to_blocks_shift, l1_rpc_config, + node_interface_address, rollup_rpc_config ) do do_discover( @@ -288,6 +299,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do new_batches_limit, messages_to_blocks_shift, l1_rpc_config, + node_interface_address, rollup_rpc_config ) end @@ -312,6 +324,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - `messages_to_blocks_shift`: The value used to align message counts with rollup block # numbers. # - `l1_rpc_config`: RPC configuration parameters for L1. + # - `node_interface_address`: The address of the NodeInterface contract on the rollup. # - `rollup_rpc_config`: RPC configuration parameters for rollup data. # # ## Returns @@ -327,6 +340,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do :chunk_size => non_neg_integer(), optional(any()) => any() }, + binary(), %{ :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), :chunk_size => non_neg_integer(), @@ -340,6 +354,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do new_batches_limit, messages_to_blocks_shift, l1_rpc_config, + node_interface_address, rollup_rpc_config ) do raw_logs = @@ -372,6 +387,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do messages_to_blocks_shift, l1_rpc_config, sequencer_inbox_address, + node_interface_address, rollup_rpc_config ) @@ -417,7 +433,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do start_block, end_block, sequencer_inbox_address, - [@message_sequencer_batch_delivered], + [@event_sequencer_batch_delivered], json_rpc_named_arguments ) @@ -445,6 +461,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - `msg_to_block_shift`: The shift value for mapping batch messages to block numbers. # - `l1_rpc_config`: The RPC configuration for L1 requests. # - `sequencer_inbox_address`: The address of the SequencerInbox contract. + # - `node_interface_address`: The address of the NodeInterface contract on the rollup. # - `rollup_rpc_config`: The RPC configuration for rollup data requests. # # ## Returns @@ -460,18 +477,30 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do optional(any()) => any() }, binary(), + binary(), %{ :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), :chunk_size => non_neg_integer(), optional(any()) => any() } - ) :: - {[Arbitrum.L1Batch.to_import()], [Arbitrum.LifecycleTransaction.to_import()], - [Arbitrum.BatchBlock.to_import()], [Arbitrum.BatchTransaction.to_import()], [Arbitrum.Message.to_import()], - [Arbitrum.DaMultiPurposeRecord.to_import()]} - defp handle_batches_from_logs(logs, msg_to_block_shift, l1_rpc_config, sequencer_inbox_address, rollup_rpc_config) + ) :: { + [Arbitrum.L1Batch.to_import()], + [Arbitrum.LifecycleTransaction.to_import()], + [Arbitrum.BatchBlock.to_import()], + [Arbitrum.BatchTransaction.to_import()], + [Arbitrum.Message.to_import()], + [Arbitrum.DaMultiPurposeRecord.to_import()] + } + defp handle_batches_from_logs( + logs, + msg_to_block_shift, + l1_rpc_config, + sequencer_inbox_address, + node_interface_address, + rollup_rpc_config + ) - defp handle_batches_from_logs([], _, _, _, _), do: {[], [], [], [], [], []} + defp handle_batches_from_logs([], _, _, _, _, _), do: {[], [], [], [], [], []} defp handle_batches_from_logs( logs, @@ -481,6 +510,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do chunk_size: chunk_size } = l1_rpc_config, sequencer_inbox_address, + node_interface_address, rollup_rpc_config ) do existing_batches = @@ -494,7 +524,17 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do blocks_to_ts = Rpc.execute_blocks_requests_and_get_ts(blocks_requests, json_rpc_named_arguments, chunk_size) {initial_lifecycle_txs, batches_to_import, da_info} = - execute_tx_requests_parse_txs_calldata(txs_requests, msg_to_block_shift, blocks_to_ts, batches, l1_rpc_config) + execute_tx_requests_parse_txs_calldata( + txs_requests, + msg_to_block_shift, + blocks_to_ts, + batches, + l1_rpc_config, + %{ + node_interface_address: node_interface_address, + json_rpc_named_arguments: rollup_rpc_config.json_rpc_named_arguments + } + ) # Check if the commitment transactions for the batches which are already in the database # needs to be updated in case of reorgs @@ -675,15 +715,15 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # - `blocks_to_ts`: A map of block numbers to their timestamps, required to complete # data for corresponding lifecycle transactions. # - `batches`: The current batch data to be updated. - # - A configuration map containing: - # - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. - # - `track_finalization`: A boolean flag indicating if finalization tracking is needed. - # - `chunk_size`: The size of chunks for batch processing. + # - A configuration map containing L1 JSON RPC arguments, a track finalization flag, + # and a chunk size for batch processing. + # - A configuration map containing the rollup RPC arguments and the address of the + # NodeInterface contract. # # ## Returns # - A tuple containing: - # - A map of lifecycle (L1) transactions, including their hashes, block numbers, - # timestamps, and statuses (finalized or unfinalized). + # - A map of lifecycle (L1) transactions, which are not yet compatible with + # database import and require further processing. # - An updated map of batch descriptions with block ranges and data availability # information. # - A list of data availability information structs for Anytrust or Celestia. @@ -697,6 +737,11 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), :track_finalization => boolean(), optional(any()) => any() + }, + %{ + :node_interface_address => binary(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + optional(any()) => any() } ) :: {%{ @@ -715,11 +760,18 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do optional(any()) => any() } }, [Anytrust.t() | Celestia.t()]} - defp execute_tx_requests_parse_txs_calldata(txs_requests, msg_to_block_shift, blocks_to_ts, batches, %{ - json_rpc_named_arguments: json_rpc_named_arguments, - track_finalization: track_finalization?, - chunk_size: chunk_size - }) do + defp execute_tx_requests_parse_txs_calldata( + txs_requests, + msg_to_block_shift, + blocks_to_ts, + batches, + %{ + json_rpc_named_arguments: json_rpc_named_arguments, + track_finalization: track_finalization?, + chunk_size: chunk_size + }, + rollup_config + ) do txs_requests |> Enum.chunk_every(chunk_size) |> Enum.reduce({%{}, batches, []}, fn chunk, {l1_txs, updated_batches, da_info} -> @@ -736,22 +788,32 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do {batch_num, prev_message_count, new_message_count, extra_data} = add_sequencer_l2_batch_from_origin_calldata_parse(resp["input"]) + # For the case when the rollup blocks range is not discovered on the previous + # step due to handling of legacy events, it is required to make more + # sophisticated lookup based on the previously discovered batches and requests + # to the NodeInterface contract on the rollup. + {batch_start_block, batch_end_block} = + determine_batch_block_range( + batch_num, + prev_message_count, + new_message_count, + msg_to_block_shift, + rollup_config + ) + {da_type, da_data} = case DataAvailabilityInfo.examine_batch_accompanying_data(batch_num, extra_data) do {:ok, t, d} -> {t, d} {:error, _, _} -> {nil, nil} end - # In some cases extracted numbers for messages does not linked directly - # with rollup blocks, for this, the numbers are shifted by a value specific - # for particular rollup updated_batches_map = Map.put( batches_map, batch_num, Map.merge(batches_map[batch_num], %{ - start_block: prev_message_count + msg_to_block_shift, - end_block: new_message_count + msg_to_block_shift - 1, + start_block: batch_start_block, + end_block: batch_end_block, batch_container: da_type }) ) @@ -785,7 +847,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do # Parses calldata of `addSequencerL2BatchFromOrigin` or `addSequencerL2BatchFromBlobs` # functions to extract batch information. @spec add_sequencer_l2_batch_from_origin_calldata_parse(binary()) :: - {non_neg_integer(), non_neg_integer(), non_neg_integer(), binary() | nil} + {non_neg_integer(), non_neg_integer() | nil, non_neg_integer() | nil, binary() | nil} defp add_sequencer_l2_batch_from_origin_calldata_parse(calldata) do case calldata do "0x8f111f3c" <> encoded_params -> @@ -826,6 +888,115 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do ) {sequence_number, prev_message_count, new_message_count, nil} + + "0x6f12b0c9" <> encoded_params -> + # addSequencerL2BatchFromOrigin(uint256 sequenceNumber, bytes calldata data, uint256 afterDelayedMessagesRead, address gasRefunder) + [sequence_number, data, _after_delayed_messages_read, _gas_refunder] = + TypeDecoder.decode( + Base.decode16!(encoded_params, case: :lower), + %FunctionSelector{ + function: "addSequencerL2BatchFromOrigin", + types: [ + {:uint, 256}, + :bytes, + {:uint, 256}, + :address + ] + } + ) + + {sequence_number, nil, nil, data} + end + end + + # Determines the block range for a batch based on provided message counts and + # previously discovered batches. If the message counts are nil, it attempts to + # find the block range by inspecting neighboring batches. + # + # Parameters: + # - `batch_number`: The batch number for which the block range is determined. + # - `prev_message_count`: The message count of the previous batch, or nil if not + # available. + # - `new_message_count`: The message count of the current batch, or nil if not + # available. + # - `msg_to_block_shift`: A shift value used to adjust the block numbers based + # on message counts. + # - `rollup_config`: A map containing the `NodeInterface` contract address and + # configuration parameters for the JSON RPC connection. + # + # Returns: + # - A tuple `{start_block, end_block}` representing the range of blocks included + # in the specified batch. + # + # If both `prev_message_count` and `new_message_count` are nil, the function logs + # an attempt to determine the block range based on already discovered batches. + # It calculates the highest and lowest blocks for the neighboring batches and + # uses them to infer the block range for the current batch. If only one neighbor + # provides a block, it performs a binary search to find the opposite block. + # + # If the message counts are provided, it adjusts them by the specific shift value + # `msg_to_block_shift` and returns the adjusted block range. + @spec determine_batch_block_range( + non_neg_integer(), + non_neg_integer() | nil, + non_neg_integer() | nil, + non_neg_integer(), + %{ + node_interface_address: EthereumJSONRPC.address(), + json_rpc_named_arguments: EthereumJSONRPC.json_rpc_named_arguments() + } + ) :: {non_neg_integer(), non_neg_integer()} + defp determine_batch_block_range(batch_number, prev_message_count, new_message_count, _, rollup_config) + when is_nil(prev_message_count) and is_nil(new_message_count) do + log_info("No blocks range for batch ##{batch_number}. Trying to find it based on already discovered batches.") + + {highest_block, step_highest_to_lowest} = get_expected_highest_block_and_step(batch_number + 1) + {lowest_block, step_lowest_to_highest} = get_expected_lowest_block_and_step(batch_number - 1) + + {start_block, end_block} = + case {lowest_block, highest_block} do + {nil, nil} -> raise "Impossible to determine the block range for batch #{batch_number}" + {lowest, nil} -> Rpc.get_block_range_for_batch(lowest, step_lowest_to_highest, batch_number, rollup_config) + {nil, highest} -> Rpc.get_block_range_for_batch(highest, step_highest_to_lowest, batch_number, rollup_config) + {lowest, highest} -> {lowest, highest} + end + + log_info("Blocks range for batch ##{batch_number} is determined as #{start_block}..#{end_block}") + {start_block, end_block} + end + + defp determine_batch_block_range(_, prev_message_count, new_message_count, msg_to_block_shift, _) do + # In some cases extracted numbers for messages does not linked directly + # with rollup blocks, for this, the numbers are shifted by a value specific + # for particular rollup + {prev_message_count + msg_to_block_shift, new_message_count + msg_to_block_shift - 1} + end + + # Calculates the expected highest block and step required for the lowest block look up for a given batch number. + @spec get_expected_highest_block_and_step(non_neg_integer()) :: {non_neg_integer(), non_neg_integer()} | {nil, nil} + defp get_expected_highest_block_and_step(batch_number) do + # since the default direction for the block range exploration is chosen to be from the highest to lowest + # the step is calculated to be positive + case Db.get_batch_by_number(batch_number) do + nil -> + {nil, nil} + + %Arbitrum.L1Batch{start_block: start_block, end_block: end_block} -> + {start_block - 1, div(end_block - start_block, 2)} + end + end + + # Calculates the expected lowest block and step required for the highest block look up for a given batch number. + @spec get_expected_lowest_block_and_step(non_neg_integer()) :: {non_neg_integer(), integer()} | {nil, nil} + defp get_expected_lowest_block_and_step(batch_number) do + # since the default direction for the block range exploration is chosen to be from the highest to lowest + # the step is calculated to be negative + case Db.get_batch_by_number(batch_number) do + nil -> + {nil, nil} + + %Arbitrum.L1Batch{start_block: start_block, end_block: end_block} -> + {end_block + 1, div(start_block - end_block, 2)} end end diff --git a/config/runtime.exs b/config/runtime.exs index dfd8531baf29..49bd6a6f7115 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -902,7 +902,9 @@ config :indexer, Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses, messages_to_blocks_shift: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_BATCHES_TRACKING_MESSAGES_TO_BLOCKS_SHIFT", 0), finalized_confirmations: ConfigHelper.parse_bool_env_var("INDEXER_ARBITRUM_CONFIRMATIONS_TRACKING_FINALIZED", "true"), - new_batches_limit: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_NEW_BATCHES_LIMIT", 10) + new_batches_limit: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_NEW_BATCHES_LIMIT", 10), + node_interface_contract: + ConfigHelper.safe_get_env("INDEXER_ARBITRUM_NODE_INTERFACE_CONTRACT", "0x00000000000000000000000000000000000000C8") config :indexer, Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses.Supervisor, enabled: ConfigHelper.parse_bool_env_var("INDEXER_ARBITRUM_BATCHES_TRACKING_ENABLED") diff --git a/docker-compose/envs/common-blockscout.env b/docker-compose/envs/common-blockscout.env index 23611894a5d9..1f6138874d82 100644 --- a/docker-compose/envs/common-blockscout.env +++ b/docker-compose/envs/common-blockscout.env @@ -227,6 +227,26 @@ INDEXER_DISABLE_INTERNAL_TRANSACTIONS_FETCHER=false # INDEXER_ZKSYNC_NEW_BATCHES_RECHECK_INTERVAL= # INDEXER_ZKSYNC_L1_RPC= # INDEXER_ZKSYNC_BATCHES_STATUS_RECHECK_INTERVAL= +# INDEXER_ARBITRUM_ARBSYS_CONTRACT= +# INDEXER_ARBITRUM_NODE_INTERFACE_CONTRACT= +# INDEXER_ARBITRUM_L1_RPC= +# INDEXER_ARBITRUM_L1_RPC_CHUNK_SIZE= +# INDEXER_ARBITRUM_L1_RPC_HISTORICAL_BLOCKS_RANGE= +# INDEXER_ARBITRUM_L1_ROLLUP_CONTRACT= +# INDEXER_ARBITRUM_L1_ROLLUP_INIT_BLOCK= +# INDEXER_ARBITRUM_L1_COMMON_START_BLOCK= +# INDEXER_ARBITRUM_ROLLUP_CHUNK_SIZE= +# INDEXER_ARBITRUM_BATCHES_TRACKING_ENABLED= +# INDEXER_ARBITRUM_BATCHES_TRACKING_RECHECK_INTERVAL= +# INDEXER_ARBITRUM_NEW_BATCHES_LIMIT= +# INDEXER_ARBITRUM_BATCHES_TRACKING_MESSAGES_TO_BLOCKS_SHIFT= +# INDEXER_ARBITRUM_CONFIRMATIONS_TRACKING_FINALIZED= +# INDEXER_ARBITRUM_BATCHES_TRACKING_L1_FINALIZATION_CHECK_ENABLED= +# INDEXER_ARBITRUM_BRIDGE_MESSAGES_TRACKING_ENABLED= +# INDEXER_ARBITRUM_TRACKING_MESSAGES_ON_L1_RECHECK_INTERVAL= +# INDEXER_ARBITRUM_MISSED_MESSAGES_RECHECK_INTERVAL= +# INDEXER_ARBITRUM_MISSED_MESSAGES_TO_L2_BLOCK_DEPTH= +# INDEXER_ARBITRUM_MISSED_MESSAGES_TO_L1_BLOCK_DEPTH= # INDEXER_REALTIME_FETCHER_MAX_GAP= # INDEXER_FETCHER_INIT_QUERY_LIMIT= # INDEXER_TOKEN_BALANCES_FETCHER_INIT_QUERY_LIMIT= From 9a98839aca82a80386b54b6251c2e2210f3bf000 Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Mon, 8 Jul 2024 21:54:53 +0300 Subject: [PATCH 15/32] Fix /stats/charts/market test (#10392) --- .../controllers/api/v2/stats_controller_test.exs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs index 907569ccb33a..bed518776149 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/stats_controller_test.exs @@ -38,13 +38,13 @@ defmodule BlockScoutWeb.API.V2.StatsControllerTest do describe "/stats/charts/market" do setup do - configuration = Application.get_env(:explorer, Explorer.ExchangeRates) - Application.put_env(:explorer, Explorer.ExchangeRates, enabled: false) + configuration = Application.get_env(:explorer, Explorer.Market.MarketHistoryCache) + Application.put_env(:explorer, Explorer.Market.MarketHistoryCache, cache_period: 0) :ok on_exit(fn -> - Application.put_env(:explorer, Explorer.ExchangeRates, configuration) + Application.put_env(:explorer, Explorer.Market.MarketHistoryCache, configuration) end) end From a1817f4148ad163ceabfde00fff854f0a4bc1a8e Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Tue, 9 Jul 2024 14:26:13 +0300 Subject: [PATCH 16/32] Fix missing expectation in mock_beacon_storage_pointer_request (#10399) * Fix missing expectation in mock_beacon_storage_pointer_request * Update apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/utility/endpoint_availability_checker.ex Co-authored-by: Qwerty5Uiop <105209995+Qwerty5Uiop@users.noreply.github.com> --------- Co-authored-by: Qwerty5Uiop <105209995+Qwerty5Uiop@users.noreply.github.com> --- apps/block_scout_web/config/runtime/test.exs | 1 + .../utility/endpoint_availability_checker.ex | 8 ++++++-- config/runtime.exs | 2 ++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/apps/block_scout_web/config/runtime/test.exs b/apps/block_scout_web/config/runtime/test.exs index a83f9dd0eb1f..6653c17ab66d 100644 --- a/apps/block_scout_web/config/runtime/test.exs +++ b/apps/block_scout_web/config/runtime/test.exs @@ -2,6 +2,7 @@ import Config alias EthereumJSONRPC.Variant +config :ethereum_jsonrpc, EthereumJSONRPC.Utility.EndpointAvailabilityChecker, enabled: false config :explorer, Explorer.ExchangeRates, enabled: false, store: :none config :ueberauth, Ueberauth.Strategy.Auth0.OAuth, diff --git a/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/utility/endpoint_availability_checker.ex b/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/utility/endpoint_availability_checker.ex index 45f2f687d867..2f1c1fdb3f77 100644 --- a/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/utility/endpoint_availability_checker.ex +++ b/apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/utility/endpoint_availability_checker.ex @@ -16,9 +16,13 @@ defmodule EthereumJSONRPC.Utility.EndpointAvailabilityChecker do end def init(_) do - schedule_next_check() + if Application.get_env(:ethereum_jsonrpc, __MODULE__)[:enabled] do + schedule_next_check() - {:ok, %{unavailable_endpoints_arguments: []}} + {:ok, %{unavailable_endpoints_arguments: []}} + else + :ignore + end end def add_endpoint(json_rpc_named_arguments, url_type) do diff --git a/config/runtime.exs b/config/runtime.exs index 49bd6a6f7115..4ba4f097b3c4 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -200,6 +200,8 @@ config :ethereum_jsonrpc, EthereumJSONRPC.PendingTransaction, config :ethereum_jsonrpc, EthereumJSONRPC.RequestCoordinator, wait_per_timeout: ConfigHelper.parse_time_env_var("ETHEREUM_JSONRPC_WAIT_PER_TIMEOUT", "20s") +config :ethereum_jsonrpc, EthereumJSONRPC.Utility.EndpointAvailabilityChecker, enabled: true + ################ ### Explorer ### ################ From 82f053e392e3be732bc3fcc43d09e4f6721d8b3d Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Tue, 9 Jul 2024 15:55:28 +0300 Subject: [PATCH 17/32] fix: /addresses empty list flickering test fix (#10400) --- .../controllers/api/v2/address_controller_test.exs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs index cf894b61f02b..fd127eab9215 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/address_controller_test.exs @@ -2423,8 +2423,12 @@ defmodule BlockScoutWeb.API.V2.AddressControllerTest do total_supply = to_string(Chain.total_supply()) - assert %{"items" => [], "next_page_params" => nil, "exchange_rate" => nil, "total_supply" => ^total_supply} = - json_response(request, 200) + pattern_response = %{"items" => [], "next_page_params" => nil, "total_supply" => total_supply} + response = json_response(request, 200) + + assert pattern_response["items"] == response["items"] + assert pattern_response["next_page_params"] == response["next_page_params"] + assert pattern_response["total_supply"] == response["total_supply"] end test "check pagination", %{conn: conn} do From 772fae1750686873bfba7d3eee30066510d686bb Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Tue, 9 Jul 2024 17:32:44 +0300 Subject: [PATCH 18/32] Fix flickering "updates cache if initial value is zero" tests (#10402) --- apps/explorer/test/explorer/chain/cache/block_test.exs | 2 +- apps/explorer/test/explorer/chain/cache/transaction_test.exs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/explorer/test/explorer/chain/cache/block_test.exs b/apps/explorer/test/explorer/chain/cache/block_test.exs index 39a5da93ee59..70b258f3d048 100644 --- a/apps/explorer/test/explorer/chain/cache/block_test.exs +++ b/apps/explorer/test/explorer/chain/cache/block_test.exs @@ -22,7 +22,7 @@ defmodule Explorer.Chain.Cache.BlockTest do _result = Block.get_count() - Process.sleep(1000) + Process.sleep(2000) updated_value = Block.get_count() diff --git a/apps/explorer/test/explorer/chain/cache/transaction_test.exs b/apps/explorer/test/explorer/chain/cache/transaction_test.exs index 0be603a33b05..29f484a41a46 100644 --- a/apps/explorer/test/explorer/chain/cache/transaction_test.exs +++ b/apps/explorer/test/explorer/chain/cache/transaction_test.exs @@ -22,7 +22,7 @@ defmodule Explorer.Chain.Cache.TransactionTest do _result = Transaction.get_count() - Process.sleep(1000) + Process.sleep(2000) updated_value = Transaction.get_count() From 6488951e89438e64c9b258b5287724d41070525d Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Tue, 9 Jul 2024 18:21:16 +0300 Subject: [PATCH 19/32] Fix flickering transaction_estimated_count/1 test (#10403) --- apps/explorer/test/explorer/chain_test.exs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/apps/explorer/test/explorer/chain_test.exs b/apps/explorer/test/explorer/chain_test.exs index 78dd9085d63f..482d520c6373 100644 --- a/apps/explorer/test/explorer/chain_test.exs +++ b/apps/explorer/test/explorer/chain_test.exs @@ -2221,6 +2221,12 @@ defmodule Explorer.ChainTest do end describe "transaction_estimated_count/1" do + setup do + Supervisor.terminate_child(Explorer.Supervisor, Explorer.Chain.Cache.Transaction.child_id()) + Supervisor.restart_child(Explorer.Supervisor, Explorer.Chain.Cache.Transaction.child_id()) + :ok + end + test "returns integer" do assert is_integer(TransactionCache.estimated_count()) end From a22ee8bfb863420f4a57f837d253bb0efbb91fe2 Mon Sep 17 00:00:00 2001 From: nikitosing <32202610+nikitosing@users.noreply.github.com> Date: Wed, 10 Jul 2024 19:10:41 +0300 Subject: [PATCH 20/32] fix: Fix logs sorting in API v1 (#10405) --- apps/explorer/lib/explorer/etherscan/logs.ex | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/apps/explorer/lib/explorer/etherscan/logs.ex b/apps/explorer/lib/explorer/etherscan/logs.ex index 3eecb33b5e4b..d0eca16ef60f 100644 --- a/apps/explorer/lib/explorer/etherscan/logs.ex +++ b/apps/explorer/lib/explorer/etherscan/logs.ex @@ -83,7 +83,6 @@ defmodule Explorer.Etherscan.Logs do |> where([log], log.block_number >= ^prepared_filter.from_block) |> where([log], log.block_number <= ^prepared_filter.to_block) |> limit(1000) - |> order_by([log], asc: log.block_number, asc: log.index) |> page_logs(paging_options) all_transaction_logs_query = @@ -105,7 +104,6 @@ defmodule Explorer.Etherscan.Logs do query_with_blocks = from(log_transaction_data in subquery(all_transaction_logs_query), where: log_transaction_data.address_hash == ^address_hash, - order_by: log_transaction_data.block_number, select_merge: %{ block_consensus: log_transaction_data.block_consensus } @@ -121,6 +119,7 @@ defmodule Explorer.Etherscan.Logs do end query_with_consensus + |> order_by([log], asc: log.block_number, asc: log.index) |> Repo.replica().all() else logs_query = where_topic_match(Log, prepared_filter) From afa053a2f8b81ed5025c27017b95a78d9b5b624d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 12:06:14 +0300 Subject: [PATCH 21/32] chore(deps): bump mime from 2.0.5 to 2.0.6 (#10395) Bumps [mime](https://github.com/elixir-plug/mime) from 2.0.5 to 2.0.6. - [Changelog](https://github.com/elixir-plug/mime/blob/master/CHANGELOG.md) - [Commits](https://github.com/elixir-plug/mime/compare/v2.0.5...v2.0.6) --- updated-dependencies: - dependency-name: mime dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mix.lock b/mix.lock index 8a32e41228c0..dc9247dd8812 100644 --- a/mix.lock +++ b/mix.lock @@ -83,7 +83,7 @@ "meck": {:hex, :meck, "0.9.2", "85ccbab053f1db86c7ca240e9fc718170ee5bda03810a6292b5306bf31bae5f5", [:rebar3], [], "hexpm", "81344f561357dc40a8344afa53767c32669153355b626ea9fcbc8da6b3045826"}, "memento": {:hex, :memento, "0.3.2", "38cfc8ff9bcb1adff7cbd0f3b78a762636b86dff764729d1c82d0464c539bdd0", [:mix], [], "hexpm", "25cf691a98a0cb70262f4a7543c04bab24648cb2041d937eb64154a8d6f8012b"}, "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, - "mime": {:hex, :mime, "2.0.5", "dc34c8efd439abe6ae0343edbb8556f4d63f178594894720607772a041b04b02", [:mix], [], "hexpm", "da0d64a365c45bc9935cc5c8a7fc5e49a0e0f9932a761c55d6c52b142780a05c"}, + "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"}, "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, "mimetype_parser": {:hex, :mimetype_parser, "0.1.3", "628ac9fe56aa7edcedb534d68397dd66674ab82493c8ebe39acb9a19b666099d", [:mix], [], "hexpm", "7d8f80c567807ce78cd93c938e7f4b0a20b1aaaaab914bf286f68457d9f7a852"}, "mix_erlang_tasks": {:hex, :mix_erlang_tasks, "0.1.0", "36819fec60b80689eb1380938675af215565a89320a9e29c72c70d97512e4649", [:mix], [], "hexpm", "95d2839c422c482a70c08a8702da8242f86b773f8ab6e8602a4eb72da8da04ed"}, From 73ac62677ac58b28e1e43cd1b8317603a2fad57c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 12:06:38 +0300 Subject: [PATCH 22/32] chore(deps): bump phoenix_ecto from 4.6.1 to 4.6.2 (#10396) Bumps [phoenix_ecto](https://github.com/phoenixframework/phoenix_ecto) from 4.6.1 to 4.6.2. - [Changelog](https://github.com/phoenixframework/phoenix_ecto/blob/main/CHANGELOG.md) - [Commits](https://github.com/phoenixframework/phoenix_ecto/compare/v4.6.1...v4.6.2) --- updated-dependencies: - dependency-name: phoenix_ecto dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mix.lock b/mix.lock index dc9247dd8812..6502c9d0f0e1 100644 --- a/mix.lock +++ b/mix.lock @@ -30,7 +30,7 @@ "credo": {:hex, :credo, "1.7.7", "771445037228f763f9b2afd612b6aa2fd8e28432a95dbbc60d8e03ce71ba4446", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8bc87496c9aaacdc3f90f01b7b0582467b69b4bd2441fe8aae3109d843cc2f2e"}, "csv": {:hex, :csv, "2.5.0", "c47b5a5221bf2e56d6e8eb79e77884046d7fd516280dc7d9b674251e0ae46246", [:mix], [{:parallel_stream, "~> 1.0.4 or ~> 1.1.0", [hex: :parallel_stream, repo: "hexpm", optional: false]}], "hexpm", "e821f541487045c7591a1963eeb42afff0dfa99bdcdbeb3410795a2f59c77d34"}, "dataloader": {:hex, :dataloader, "2.0.0", "49b42d60b9bb06d761a71d7b034c4b34787957e713d4fae15387a25fcd639112", [:mix], [{:ecto, ">= 3.4.3 and < 4.0.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:opentelemetry_process_propagator, "~> 0.2.1", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: true]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "09d61781b76ce216e395cdbc883ff00d00f46a503e215c22722dba82507dfef0"}, - "db_connection": {:hex, :db_connection, "2.6.0", "77d835c472b5b67fc4f29556dee74bf511bbafecdcaf98c27d27fa5918152086", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c2f992d15725e721ec7fbc1189d4ecdb8afef76648c746a8e1cad35e3b8a35f3"}, + "db_connection": {:hex, :db_connection, "2.7.0", "b99faa9291bb09892c7da373bb82cba59aefa9b36300f6145c5f201c7adf48ec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "dcf08f31b2701f857dfc787fbad78223d61a32204f217f15e881dd93e4bdd3ff"}, "decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"}, "decorator": {:hex, :decorator, "1.4.0", "a57ac32c823ea7e4e67f5af56412d12b33274661bb7640ec7fc882f8d23ac419", [:mix], [], "hexpm", "0a07cedd9083da875c7418dea95b78361197cf2bf3211d743f6f7ce39656597f"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, @@ -100,7 +100,7 @@ "parallel_stream": {:hex, :parallel_stream, "1.1.0", "f52f73eb344bc22de335992377413138405796e0d0ad99d995d9977ac29f1ca9", [:mix], [], "hexpm", "684fd19191aedfaf387bbabbeb8ff3c752f0220c8112eb907d797f4592d6e871"}, "parse_trans": {:hex, :parse_trans, "3.4.1", "6e6aa8167cb44cc8f39441d05193be6e6f4e7c2946cb2759f015f8c56b76e5ff", [:rebar3], [], "hexpm", "620a406ce75dada827b82e453c19cf06776be266f5a67cff34e1ef2cbb60e49a"}, "phoenix": {:hex, :phoenix, "1.5.14", "2d5db884be496eefa5157505ec0134e66187cb416c072272420c5509d67bf808", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_html, "~> 2.13 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.0", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:plug, "~> 1.10", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 1.0 or ~> 2.2", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.1.2 or ~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "207f1aa5520320cbb7940d7ff2dde2342162cf513875848f88249ea0ba02fef7"}, - "phoenix_ecto": {:hex, :phoenix_ecto, "4.6.1", "96798325fab2fed5a824ca204e877b81f9afd2e480f581e81f7b4b64a5a477f2", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.17", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "0ae544ff99f3c482b0807c5cec2c8289e810ecacabc04959d82c3337f4703391"}, + "phoenix_ecto": {:hex, :phoenix_ecto, "4.6.2", "3b83b24ab5a2eb071a20372f740d7118767c272db386831b2e77638c4dcc606d", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "3f94d025f59de86be00f5f8c5dd7b5965a3298458d21ab1c328488be3b5fcd59"}, "phoenix_html": {:hex, :phoenix_html, "3.0.4", "232d41884fe6a9c42d09f48397c175cd6f0d443aaa34c7424da47604201df2e1", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "ce17fd3cf815b2ed874114073e743507704b1f5288bb03c304a77458485efc8b"}, "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.3.3", "3a53772a6118d5679bf50fc1670505a290e32a1d195df9e069d8c53ab040c054", [:mix], [{:file_system, "~> 0.2.1 or ~> 0.3", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "766796676e5f558dbae5d1bdb066849673e956005e3730dfd5affd7a6da4abac"}, "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, From c6ee4719ca908baf976e48a74e0e491179a75852 Mon Sep 17 00:00:00 2001 From: varasev <33550681+varasev@users.noreply.github.com> Date: Thu, 11 Jul 2024 17:58:28 +0300 Subject: [PATCH 23/32] feat: Add Celestia blobs support to Optimism batches fetcher (#10199) * Initial Celestia blob metadata indexing * Add Celestia blob metadata to API v2 and indexing EIP-4844 blob hash * Add API endpoint returning L1 batch data by Celestia blob commitment and height * intermediate changes * Intermediate changes * Add Explorer.Chain.Optimism.FrameSequenceBlob entity * Add l1_batch field to API v2 output for a block * Add /txn-batch-by-celestia-blob/ to API v2 * Fix Indexer.Fetcher.Optimism.TxnBatch * Partially add specs and docs for public functions * Add some docs * Update spelling * Change routes for Optimism * Change response format * Change response format * Intermediate changes * Add /api/v2/optimism/batches endpoint * Clear code * Output exact number for /api/v2/optimism/games/count * Add /api/v2/optimism/batches/:internal_id endpoint * Add /api/v2/transactions/optimism-batch/:batch_number endpoint * Remove redundant OP env variables, use SystemConfig instead * Small fixes for dialyzer * Review improvements * Fix spelling * Minor fixes * mix format --------- Co-authored-by: POA <33550681+poa@users.noreply.github.com> --- .../lib/block_scout_web/chain.ex | 7 + .../controllers/api/v2/block_controller.ex | 6 + .../controllers/api/v2/optimism_controller.ex | 111 ++- .../api/v2/transaction_controller.ex | 48 +- .../lib/block_scout_web/paging_helper.ex | 16 +- .../lib/block_scout_web/routers/api_router.ex | 9 + .../views/api/v2/block_view.ex | 10 + .../views/api/v2/optimism_view.ex | 99 ++- .../withdrawal_controller_test.exs | 3 +- apps/explorer/lib/explorer/chain/block.ex | 14 + .../runner/optimism/frame_sequence_blobs.ex | 109 +++ .../import/runner/optimism/txn_batches.ex | 1 + .../chain/import/stage/block_referencing.ex | 1 + .../lib/explorer/chain/optimism/deposit.ex | 25 +- .../explorer/chain/optimism/dispute_game.ex | 30 +- .../explorer/chain/optimism/frame_sequence.ex | 162 ++++- .../chain/optimism/frame_sequence_blob.ex | 116 +++ .../explorer/chain/optimism/output_root.ex | 25 +- .../lib/explorer/chain/optimism/txn_batch.ex | 110 ++- .../lib/explorer/chain/optimism/withdrawal.ex | 21 +- .../chain/optimism/withdrawal_event.ex | 20 +- .../lib/explorer/chain/transaction.ex | 18 + ...40503113124_add_celestia_blob_metadata.exs | 31 + .../20240612120541_add_view_ready_field.exs | 11 + ...65020_add_frame_sequence_id_prev_field.exs | 11 + apps/indexer/lib/indexer/fetcher/optimism.ex | 97 ++- .../lib/indexer/fetcher/optimism/deposit.ex | 24 +- .../indexer/fetcher/optimism/dispute_game.ex | 18 +- .../indexer/fetcher/optimism/output_root.ex | 2 +- .../lib/indexer/fetcher/optimism/txn_batch.ex | 662 +++++++++++++----- .../fetcher/optimism/withdrawal_event.ex | 5 +- .../fetcher/rollup_l1_reorg_monitor.ex | 32 +- config/runtime.exs | 21 +- cspell.json | 1 + docker-compose/envs/common-blockscout.env | 8 +- 35 files changed, 1550 insertions(+), 334 deletions(-) create mode 100644 apps/explorer/lib/explorer/chain/import/runner/optimism/frame_sequence_blobs.ex create mode 100644 apps/explorer/lib/explorer/chain/optimism/frame_sequence_blob.ex create mode 100644 apps/explorer/priv/optimism/migrations/20240503113124_add_celestia_blob_metadata.exs create mode 100644 apps/explorer/priv/optimism/migrations/20240612120541_add_view_ready_field.exs create mode 100644 apps/explorer/priv/optimism/migrations/20240613065020_add_frame_sequence_id_prev_field.exs diff --git a/apps/block_scout_web/lib/block_scout_web/chain.ex b/apps/block_scout_web/lib/block_scout_web/chain.ex index f17fc6b5d357..5e652ce4a5a0 100644 --- a/apps/block_scout_web/lib/block_scout_web/chain.ex +++ b/apps/block_scout_web/lib/block_scout_web/chain.ex @@ -44,6 +44,7 @@ defmodule BlockScoutWeb.Chain do } alias Explorer.Chain.Optimism.Deposit, as: OptimismDeposit + alias Explorer.Chain.Optimism.FrameSequence, as: OptimismFrameSequence alias Explorer.Chain.Optimism.OutputRoot, as: OptimismOutputRoot alias Explorer.Chain.PolygonZkevm.TransactionBatch @@ -435,6 +436,7 @@ defmodule BlockScoutWeb.Chain do # clause for pagination of entities: # - Account's entities + # - Optimism frame sequences # - Polygon Edge Deposits # - Polygon Edge Withdrawals # - Arbitrum cross chain messages @@ -450,6 +452,7 @@ defmodule BlockScoutWeb.Chain do # clause for pagination of entities: # - Account's entities + # - Optimism frame sequences # - Polygon Edge Deposits # - Polygon Edge Withdrawals # - Arbitrum cross chain messages @@ -621,6 +624,10 @@ defmodule BlockScoutWeb.Chain do paging_params(token) end + defp paging_params(%OptimismFrameSequence{id: id}) do + %{"id" => id} + end + defp paging_params(%TagAddress{id: id}) do %{"id" => id} end diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex index 0395d6080344..e4495740cdc7 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex @@ -31,6 +31,12 @@ defmodule BlockScoutWeb.API.V2.BlockController do [transactions: :beacon_blob_transaction] => :optional } + :optimism -> + @chain_type_transaction_necessity_by_association %{} + @chain_type_block_necessity_by_association %{ + :op_frame_sequence => :optional + } + :zksync -> @chain_type_transaction_necessity_by_association %{} @chain_type_block_necessity_by_association %{ diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/optimism_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/optimism_controller.ex index 928180129775..d4f9b960ec16 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/optimism_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/optimism_controller.ex @@ -8,13 +8,20 @@ defmodule BlockScoutWeb.API.V2.OptimismController do split_list_by_page: 1 ] + import BlockScoutWeb.PagingHelper, + only: [ + delete_parameters_from_next_page_params: 1 + ] + alias Explorer.Chain - alias Explorer.Chain.Optimism.{Deposit, DisputeGame, OutputRoot, TxnBatch, Withdrawal} + alias Explorer.Chain.Transaction + alias Explorer.Chain.Optimism.{Deposit, DisputeGame, FrameSequence, OutputRoot, TxnBatch, Withdrawal} action_fallback(BlockScoutWeb.API.V2.FallbackController) @doc """ - Function to handle GET requests to `/api/v2/optimism/txn-batches` endpoint. + Function to handle GET requests to `/api/v2/optimism/txn-batches` and + `/api/v2/optimism/txn-batches/:l2_block_range_start/:l2_block_range_end` endpoints. """ @spec txn_batches(Plug.Conn.t(), map()) :: Plug.Conn.t() def txn_batches(conn, params) do @@ -22,10 +29,12 @@ defmodule BlockScoutWeb.API.V2.OptimismController do params |> paging_options() |> Keyword.put(:api?, true) + |> Keyword.put(:l2_block_range_start, Map.get(params, "l2_block_range_start")) + |> Keyword.put(:l2_block_range_end, Map.get(params, "l2_block_range_end")) |> TxnBatch.list() |> split_list_by_page() - next_page_params = next_page_params(next_page, batches, params) + next_page_params = next_page_params(next_page, batches, delete_parameters_from_next_page_params(params)) conn |> put_status(200) @@ -43,6 +52,96 @@ defmodule BlockScoutWeb.API.V2.OptimismController do items_count(conn, TxnBatch) end + @doc """ + Function to handle GET requests to `/api/v2/optimism/batches` endpoint. + """ + @spec batches(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batches(conn, params) do + {batches, next_page} = + params + |> paging_options() + |> Keyword.put(:api?, true) + |> Keyword.put(:only_view_ready?, true) + |> FrameSequence.list() + |> split_list_by_page() + + next_page_params = next_page_params(next_page, batches, params) + + items = + batches + |> Enum.map(fn fs -> + Task.async(fn -> + l2_block_number_from = TxnBatch.edge_l2_block_number(fs.id, :min) + l2_block_number_to = TxnBatch.edge_l2_block_number(fs.id, :max) + tx_count = Transaction.tx_count_for_block_range(l2_block_number_from..l2_block_number_to) + + fs + |> Map.put(:l2_block_range, l2_block_number_from..l2_block_number_to) + |> Map.put(:tx_count, tx_count) + end) + end) + |> Task.yield_many(:infinity) + |> Enum.map(fn {_task, {:ok, item}} -> item end) + + conn + |> put_status(200) + |> render(:optimism_batches, %{ + batches: items, + next_page_params: next_page_params + }) + end + + @doc """ + Function to handle GET requests to `/api/v2/optimism/batches/count` endpoint. + """ + @spec batches_count(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batches_count(conn, _params) do + items_count(conn, FrameSequence) + end + + @doc """ + Function to handle GET requests to `/api/v2/optimism/batches/da/celestia/:height/:commitment` endpoint. + """ + @spec batch_by_celestia_blob(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batch_by_celestia_blob(conn, %{"height" => height, "commitment" => commitment}) do + {height, ""} = Integer.parse(height) + + commitment = + if String.starts_with?(String.downcase(commitment), "0x") do + commitment + else + "0x" <> commitment + end + + batch = FrameSequence.batch_by_celestia_blob(commitment, height, api?: true) + + if is_nil(batch) do + {:error, :not_found} + else + conn + |> put_status(200) + |> render(:optimism_batch, %{batch: batch}) + end + end + + @doc """ + Function to handle GET requests to `/api/v2/optimism/batches/:internal_id` endpoint. + """ + @spec batch_by_internal_id(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batch_by_internal_id(conn, %{"internal_id" => internal_id}) do + {internal_id, ""} = Integer.parse(internal_id) + + batch = FrameSequence.batch_by_internal_id(internal_id, api?: true) + + if is_nil(batch) do + {:error, :not_found} + else + conn + |> put_status(200) + |> render(:optimism_batch, %{batch: batch}) + end + end + @doc """ Function to handle GET requests to `/api/v2/optimism/output-roots` endpoint. """ @@ -100,7 +199,11 @@ defmodule BlockScoutWeb.API.V2.OptimismController do """ @spec games_count(Plug.Conn.t(), map()) :: Plug.Conn.t() def games_count(conn, _params) do - items_count(conn, DisputeGame) + count = DisputeGame.get_last_known_index() + 1 + + conn + |> put_status(200) + |> render(:optimism_items_count, %{count: count}) end @doc """ diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex index 3312fc6e2c2f..21bc5e7cb149 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex @@ -28,13 +28,19 @@ defmodule BlockScoutWeb.API.V2.TransactionController do import Explorer.MicroserviceInterfaces.Metadata, only: [maybe_preload_metadata: 1, maybe_preload_metadata_to_transaction: 1] + import Ecto.Query, + only: [ + preload: 2 + ] + alias BlockScoutWeb.AccessHelper alias BlockScoutWeb.MicroserviceInterfaces.TransactionInterpretation, as: TransactionInterpretationService alias BlockScoutWeb.Models.TransactionStateHelper - alias Explorer.Chain + alias Explorer.{Chain, PagingOptions, Repo} alias Explorer.Chain.Arbitrum.Reader, as: ArbitrumReader alias Explorer.Chain.Beacon.Reader, as: BeaconReader alias Explorer.Chain.{Hash, InternalTransaction, Transaction} + alias Explorer.Chain.Optimism.TxnBatch, as: OptimismTxnBatch alias Explorer.Chain.PolygonZkevm.Reader, as: PolygonZkevmReader alias Explorer.Chain.ZkSync.Reader, as: ZkSyncReader alias Explorer.Counters.{FreshPendingTransactionsCounter, Transactions24hStats} @@ -214,6 +220,46 @@ defmodule BlockScoutWeb.API.V2.TransactionController do handle_batch_transactions(conn, params, &ArbitrumReader.batch_transactions/2) end + @doc """ + Function to handle GET requests to `/api/v2/transactions/optimism-batch/:batch_number` endpoint. + It renders the list of L2 transactions bound to the specified batch. + """ + @spec optimism_batch(Plug.Conn.t(), map()) :: Plug.Conn.t() + def optimism_batch(conn, %{"batch_number" => batch_number_string} = params) do + {batch_number, ""} = Integer.parse(batch_number_string) + + l2_block_number_from = OptimismTxnBatch.edge_l2_block_number(batch_number, :min) + l2_block_number_to = OptimismTxnBatch.edge_l2_block_number(batch_number, :max) + + transactions_plus_one = + if is_nil(l2_block_number_from) or is_nil(l2_block_number_to) do + [] + else + paging_options = paging_options(params)[:paging_options] + + query = + case paging_options do + %PagingOptions{key: {0, 0}, is_index_in_asc_order: false} -> [] + _ -> Transaction.fetch_transactions(paging_options, l2_block_number_from - 1, l2_block_number_to) + end + + query + |> Chain.join_associations(@transaction_necessity_by_association) + |> preload([{:token_transfers, [:token, :from_address, :to_address]}]) + |> Repo.replica().all() + end + + {transactions, next_page} = split_list_by_page(transactions_plus_one) + next_page_params = next_page |> next_page_params(transactions, delete_parameters_from_next_page_params(params)) + + conn + |> put_status(200) + |> render(:transactions, %{ + transactions: transactions |> maybe_preload_ens() |> maybe_preload_metadata(), + next_page_params: next_page_params + }) + end + # Processes and renders transactions for a specified batch into an HTTP response. # # This function retrieves a list of transactions for a given batch using a specified function, diff --git a/apps/block_scout_web/lib/block_scout_web/paging_helper.ex b/apps/block_scout_web/lib/block_scout_web/paging_helper.ex index bcf159686983..25dec818db16 100644 --- a/apps/block_scout_web/lib/block_scout_web/paging_helper.ex +++ b/apps/block_scout_web/lib/block_scout_web/paging_helper.ex @@ -190,6 +190,17 @@ defmodule BlockScoutWeb.PagingHelper do block_type: "Block" ] + @doc """ + Removes redundant parameters from the parameter map used when calling + `next_page_params` function. + + ## Parameters + - `params`: A map of parameter entries. + + ## Returns + - A modified map without redundant parameters needed for `next_page_params` function. + """ + @spec delete_parameters_from_next_page_params(map()) :: map() | nil def delete_parameters_from_next_page_params(params) when is_map(params) do params |> Map.drop([ @@ -202,7 +213,10 @@ defmodule BlockScoutWeb.PagingHelper do "q", "sort", "order", - "state_filter" + "state_filter", + "l2_block_range_start", + "l2_block_range_end", + "batch_number" ]) end diff --git a/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex b/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex index a3ae9fda52fe..ff21af9fa6ed 100644 --- a/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex +++ b/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex @@ -126,6 +126,10 @@ defmodule BlockScoutWeb.Routers.ApiRouter do get("/arbitrum-batch/:batch_number", V2.TransactionController, :arbitrum_batch) end + if Application.compile_env(:explorer, :chain_type) == :optimism do + get("/optimism-batch/:batch_number", V2.TransactionController, :optimism_batch) + end + if Application.compile_env(:explorer, :chain_type) == :suave do get("/execution-node/:execution_node_hash_param", V2.TransactionController, :execution_node) end @@ -215,6 +219,11 @@ defmodule BlockScoutWeb.Routers.ApiRouter do if Application.compile_env(:explorer, :chain_type) == :optimism do get("/txn-batches", V2.OptimismController, :txn_batches) get("/txn-batches/count", V2.OptimismController, :txn_batches_count) + get("/txn-batches/:l2_block_range_start/:l2_block_range_end", V2.OptimismController, :txn_batches) + get("/batches", V2.OptimismController, :batches) + get("/batches/count", V2.OptimismController, :batches_count) + get("/batches/da/celestia/:height/:commitment", V2.OptimismController, :batch_by_celestia_blob) + get("/batches/:internal_id", V2.OptimismController, :batch_by_internal_id) get("/output-roots", V2.OptimismController, :output_roots) get("/output-roots/count", V2.OptimismController, :output_roots_count) get("/deposits", V2.OptimismController, :deposits) diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex index cf86d59ce54d..0df80c938421 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex @@ -110,6 +110,16 @@ defmodule BlockScoutWeb.API.V2.BlockView do end end + :optimism -> + defp chain_type_fields(result, block, single_block?) do + if single_block? do + # credo:disable-for-next-line Credo.Check.Design.AliasUsage + BlockScoutWeb.API.V2.OptimismView.extend_block_json_response(result, block) + else + result + end + end + :zksync -> defp chain_type_fields(result, block, single_block?) do if single_block? do diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/optimism_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/optimism_view.ex index e1737b7f104e..994050687c5c 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/optimism_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/optimism_view.ex @@ -7,7 +7,7 @@ defmodule BlockScoutWeb.API.V2.OptimismView do alias Explorer.{Chain, Repo} alias Explorer.Helper, as: ExplorerHelper alias Explorer.Chain.{Block, Transaction} - alias Explorer.Chain.Optimism.Withdrawal + alias Explorer.Chain.Optimism.{FrameSequenceBlob, Withdrawal} @doc """ Function to render GET requests to `/api/v2/optimism/txn-batches` endpoint. @@ -50,6 +50,42 @@ defmodule BlockScoutWeb.API.V2.OptimismView do } end + @doc """ + Function to render GET requests to `/api/v2/optimism/batches` endpoint. + """ + def render("optimism_batches.json", %{ + batches: batches, + next_page_params: next_page_params + }) do + items = + batches + |> Enum.map(fn batch -> + from..to = batch.l2_block_range + + %{ + "internal_id" => batch.id, + "l1_timestamp" => batch.l1_timestamp, + "l2_block_start" => from, + "l2_block_end" => to, + "tx_count" => batch.tx_count, + "l1_tx_hashes" => batch.l1_transaction_hashes + } + end) + + %{ + items: items, + next_page_params: next_page_params + } + end + + @doc """ + Function to render GET requests to `/api/v2/optimism/batches/da/celestia/:height/:commitment` + and `/api/v2/optimism/batches/:internal_id` endpoints. + """ + def render("optimism_batch.json", %{batch: batch}) do + batch + end + @doc """ Function to render GET requests to `/api/v2/optimism/output-roots` endpoint. """ @@ -210,10 +246,65 @@ defmodule BlockScoutWeb.API.V2.OptimismView do end @doc """ - Extends the json output with a sub-map containing information related - zksync: batch number and associated L1 transactions and their timestmaps. + Extends the json output for a block using Optimism frame sequence (bound + with the provided L2 block) - adds info about L1 batch to the output. + + ## Parameters + - `out_json`: A map defining output json which will be extended. + - `block`: block structure containing frame sequence info related to the block. + + ## Returns + An extended map containing `optimism` item with the Optimism batch info + (L1 transaction hashes, timestamp, related blobs). + """ + @spec extend_block_json_response(map(), %{ + :__struct__ => Explorer.Chain.Block, + :op_frame_sequence => any(), + optional(any()) => any() + }) :: map() + def extend_block_json_response(out_json, %Block{} = block) do + frame_sequence = Map.get(block, :op_frame_sequence) + + if is_nil(frame_sequence) do + out_json + else + {batch_data_container, blobs} = FrameSequenceBlob.list(frame_sequence.id, api?: true) + + batch_info = + %{ + "internal_id" => frame_sequence.id, + "l1_timestamp" => frame_sequence.l1_timestamp, + "l1_tx_hashes" => frame_sequence.l1_transaction_hashes, + "batch_data_container" => batch_data_container + } + |> extend_batch_info_by_blobs(blobs, "blobs") + + Map.put(out_json, "optimism", batch_info) + end + end + + defp extend_batch_info_by_blobs(batch_info, blobs, field_name) do + if Enum.empty?(blobs) do + batch_info + else + Map.put(batch_info, field_name, blobs) + end + end + + @doc """ + Extends the json output for a transaction adding Optimism-related info to the output. + + ## Parameters + - `out_json`: A map defining output json which will be extended. + - `transaction`: transaction structure containing extra Optimism-related info. + + ## Returns + An extended map containing `l1_*` and `op_withdrawals` items related to Optimism. """ - @spec extend_transaction_json_response(map(), map()) :: map() + @spec extend_transaction_json_response(map(), %{ + :__struct__ => Explorer.Chain.Transaction, + optional(any()) => any() + }) :: map() def extend_transaction_json_response(out_json, %Transaction{} = transaction) do out_json |> add_optional_transaction_field(transaction, :l1_fee) diff --git a/apps/block_scout_web/test/block_scout_web/controllers/withdrawal_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/withdrawal_controller_test.exs index f649e5787a59..c152318a1cc5 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/withdrawal_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/withdrawal_controller_test.exs @@ -40,8 +40,7 @@ defmodule BlockScoutWeb.WithdrawalControllerTest do conn = get(conn, withdrawal_path(conn, :index), %{"type" => "JSON"}) - expected_path = - withdrawal_path(conn, :index, index: index, items_count: "50") + expected_path = withdrawal_path(conn, :index, index: index, items_count: "50") assert Map.get(json_response(conn, 200), "next_page_path") == expected_path end diff --git a/apps/explorer/lib/explorer/chain/block.ex b/apps/explorer/lib/explorer/chain/block.ex index a8f982d1b342..785b5634daa2 100644 --- a/apps/explorer/lib/explorer/chain/block.ex +++ b/apps/explorer/lib/explorer/chain/block.ex @@ -9,6 +9,7 @@ defmodule Explorer.Chain.Block.Schema do alias Explorer.Chain.{Address, Block, Hash, PendingBlockOperation, Transaction, Wei, Withdrawal} alias Explorer.Chain.Arbitrum.BatchBlock, as: ArbitrumBatchBlock alias Explorer.Chain.Block.{Reward, SecondDegreeRelation} + alias Explorer.Chain.Optimism.TxnBatch, as: OptimismTxnBatch alias Explorer.Chain.ZkSync.BatchBlock, as: ZkSyncBatchBlock @chain_type_fields (case Application.compile_env(:explorer, :chain_type) do @@ -21,6 +22,19 @@ defmodule Explorer.Chain.Block.Schema do 2 ) + :optimism -> + elem( + quote do + has_one(:op_transaction_batch, OptimismTxnBatch, + foreign_key: :l2_block_number, + references: :number + ) + + has_one(:op_frame_sequence, through: [:op_transaction_batch, :frame_sequence]) + end, + 2 + ) + :rsk -> elem( quote do diff --git a/apps/explorer/lib/explorer/chain/import/runner/optimism/frame_sequence_blobs.ex b/apps/explorer/lib/explorer/chain/import/runner/optimism/frame_sequence_blobs.ex new file mode 100644 index 000000000000..56a2d6dcb13d --- /dev/null +++ b/apps/explorer/lib/explorer/chain/import/runner/optimism/frame_sequence_blobs.ex @@ -0,0 +1,109 @@ +defmodule Explorer.Chain.Import.Runner.Optimism.FrameSequenceBlobs do + @moduledoc """ + Bulk imports `t:Explorer.Chain.Optimism.FrameSequenceBlob.t/0`. + """ + + require Ecto.Query + + alias Ecto.{Changeset, Multi, Repo} + alias Explorer.Chain.Import + alias Explorer.Chain.Optimism.FrameSequenceBlob + alias Explorer.Prometheus.Instrumenter + + import Ecto.Query, only: [from: 2] + + @behaviour Import.Runner + + # milliseconds + @timeout 60_000 + + @type imported :: [FrameSequenceBlob.t()] + + @impl Import.Runner + def ecto_schema_module, do: FrameSequenceBlob + + @impl Import.Runner + def option_key, do: :optimism_frame_sequence_blobs + + @impl Import.Runner + def imported_table_row do + %{ + value_type: "[#{ecto_schema_module()}.t()]", + value_description: "List of `t:#{ecto_schema_module()}.t/0`s" + } + end + + @impl Import.Runner + def run(multi, changes_list, %{timestamps: timestamps} = options) do + insert_options = + options + |> Map.get(option_key(), %{}) + |> Map.take(~w(on_conflict timeout)a) + |> Map.put_new(:timeout, @timeout) + |> Map.put(:timestamps, timestamps) + + Multi.run(multi, :insert_frame_sequence_blobs, fn repo, _ -> + Instrumenter.block_import_stage_runner( + fn -> insert(repo, changes_list, insert_options) end, + :block_referencing, + :optimism_frame_sequence_blobs, + :optimism_frame_sequence_blobs + ) + end) + end + + @impl Import.Runner + def timeout, do: @timeout + + @spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: + {:ok, [FrameSequenceBlob.t()]} + | {:error, [Changeset.t()]} + def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do + on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) + + # Enforce FrameSequenceBlob ShareLocks order (see docs: sharelock.md) + ordered_changes_list = Enum.sort_by(changes_list, & &1.id) + + {:ok, inserted} = + Import.insert_changes_list( + repo, + ordered_changes_list, + for: FrameSequenceBlob, + returning: true, + timeout: timeout, + timestamps: timestamps, + conflict_target: [:key, :type], + on_conflict: on_conflict + ) + + {:ok, inserted} + end + + defp default_on_conflict do + from( + fsb in FrameSequenceBlob, + update: [ + set: [ + # don't update `key` as it is a part of the composite primary key and used for the conflict target + # don't update `type` as it is a part of the composite primary key and used for the conflict target + id: fragment("EXCLUDED.id"), + metadata: fragment("EXCLUDED.metadata"), + l1_transaction_hash: fragment("EXCLUDED.l1_transaction_hash"), + l1_timestamp: fragment("EXCLUDED.l1_timestamp"), + frame_sequence_id: fragment("EXCLUDED.frame_sequence_id"), + inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", fsb.inserted_at), + updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", fsb.updated_at) + ] + ], + where: + fragment( + "(EXCLUDED.id, EXCLUDED.metadata, EXCLUDED.l1_transaction_hash, EXCLUDED.l1_timestamp, EXCLUDED.frame_sequence_id) IS DISTINCT FROM (?, ?, ?, ?, ?)", + fsb.id, + fsb.metadata, + fsb.l1_transaction_hash, + fsb.l1_timestamp, + fsb.frame_sequence_id + ) + ) + end +end diff --git a/apps/explorer/lib/explorer/chain/import/runner/optimism/txn_batches.ex b/apps/explorer/lib/explorer/chain/import/runner/optimism/txn_batches.ex index 5b84ef3755e3..0814b3439dd9 100644 --- a/apps/explorer/lib/explorer/chain/import/runner/optimism/txn_batches.ex +++ b/apps/explorer/lib/explorer/chain/import/runner/optimism/txn_batches.ex @@ -86,6 +86,7 @@ defmodule Explorer.Chain.Import.Runner.Optimism.TxnBatches do set: [ # don't update `l2_block_number` as it is a primary key and used for the conflict target frame_sequence_id: fragment("EXCLUDED.frame_sequence_id"), + frame_sequence_id_prev: tb.frame_sequence_id, inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at), updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at) ] diff --git a/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex b/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex index 049a540d7082..66e7d6837603 100644 --- a/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex +++ b/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex @@ -21,6 +21,7 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do @optimism_runners [ Runner.Optimism.FrameSequences, + Runner.Optimism.FrameSequenceBlobs, Runner.Optimism.TxnBatches, Runner.Optimism.OutputRoots, Runner.Optimism.DisputeGames, diff --git a/apps/explorer/lib/explorer/chain/optimism/deposit.ex b/apps/explorer/lib/explorer/chain/optimism/deposit.ex index 30a58e7f79ae..48c6dc32237c 100644 --- a/apps/explorer/lib/explorer/chain/optimism/deposit.ex +++ b/apps/explorer/lib/explorer/chain/optimism/deposit.ex @@ -3,28 +3,25 @@ defmodule Explorer.Chain.Optimism.Deposit do use Explorer.Schema - import Explorer.Chain, only: [join_association: 3, select_repo: 1] + import Explorer.Chain, only: [default_paging_options: 0, join_association: 3, select_repo: 1] alias Explorer.Chain.{Hash, Transaction} alias Explorer.PagingOptions - @default_paging_options %PagingOptions{page_size: 50} - @required_attrs ~w(l1_block_number l1_transaction_hash l1_transaction_origin l2_transaction_hash)a @optional_attrs ~w(l1_block_timestamp)a @allowed_attrs @required_attrs ++ @optional_attrs - @type t :: %__MODULE__{ - l1_block_number: non_neg_integer(), - l1_block_timestamp: DateTime.t(), - l1_transaction_hash: Hash.t(), - l1_transaction_origin: Hash.t(), - l2_transaction_hash: Hash.t(), - l2_transaction: %Ecto.Association.NotLoaded{} | Transaction.t() - } - + @typedoc """ + * `l1_block_number` - The block number on L1 when the L1 transaction occurred. + * `l1_block_timestamp` - Timestamp of the L1 block. + * `l1_transaction_hash` - The deposit transaction hash on L1. + * `l1_transaction_origin` - Origin address of the deposit. + * `l2_transaction_hash` - The corresponding L2 transaction hash of the deposit. + * `l2_transaction` - An instance of `Explorer.Chain.Transaction` referenced by `l2_transaction_hash`. + """ @primary_key false - schema "op_deposits" do + typed_schema "op_deposits" do field(:l1_block_number, :integer) field(:l1_block_timestamp, :utc_datetime_usec) field(:l1_transaction_hash, Hash.Full) @@ -61,7 +58,7 @@ defmodule Explorer.Chain.Optimism.Deposit do """ @spec list :: [__MODULE__.t()] def list(options \\ []) do - paging_options = Keyword.get(options, :paging_options, @default_paging_options) + paging_options = Keyword.get(options, :paging_options, default_paging_options()) case paging_options do %PagingOptions{key: {0, _l2_tx_hash}} -> diff --git a/apps/explorer/lib/explorer/chain/optimism/dispute_game.ex b/apps/explorer/lib/explorer/chain/optimism/dispute_game.ex index f8dff4476f67..1ef62c4cde84 100644 --- a/apps/explorer/lib/explorer/chain/optimism/dispute_game.ex +++ b/apps/explorer/lib/explorer/chain/optimism/dispute_game.ex @@ -4,7 +4,7 @@ defmodule Explorer.Chain.Optimism.DisputeGame do use Explorer.Schema import Ecto.Query - import Explorer.Chain, only: [select_repo: 1] + import Explorer.Chain, only: [default_paging_options: 0, select_repo: 1] alias Explorer.Chain.{Data, Hash} alias Explorer.{PagingOptions, Repo} @@ -12,20 +12,20 @@ defmodule Explorer.Chain.Optimism.DisputeGame do @required_attrs ~w(index game_type address created_at)a @optional_attrs ~w(extra_data resolved_at status)a - @default_paging_options %PagingOptions{page_size: 50} - - @type t :: %__MODULE__{ - index: non_neg_integer(), - game_type: non_neg_integer(), - address: Hash.t(), - extra_data: Data.t() | nil, - created_at: DateTime.t(), - resolved_at: DateTime.t() | nil, - status: non_neg_integer() | nil - } - + @typedoc """ + * `index` - A unique index of the dispute game. + * `game_type` - A number encoding a type of the dispute game. + * `address` - The dispute game contract address. + * `extra_data` - An extra data of the dispute game (contains L2 block number). + Equals to `nil` when the game is written to database but the rest data is not known yet. + * `created_at` - UTC timestamp of when the dispute game was created. + * `resolved_at` - UTC timestamp of when the dispute game was resolved. + Equals to `nil` if the game is not resolved yet. + * `status` - 0 means the game is in progress (not resolved yet), 1 means a challenger wins, 2 means a defender wins. + Equals to `nil` when the game is written to database but the rest data is not known yet. + """ @primary_key false - schema "op_dispute_games" do + typed_schema "op_dispute_games" do field(:index, :integer, primary_key: true) field(:game_type, :integer) field(:address, Hash.Address) @@ -67,7 +67,7 @@ defmodule Explorer.Chain.Optimism.DisputeGame do """ @spec list :: [__MODULE__.t()] def list(options \\ []) do - paging_options = Keyword.get(options, :paging_options, @default_paging_options) + paging_options = Keyword.get(options, :paging_options, default_paging_options()) base_query = from(g in __MODULE__, diff --git a/apps/explorer/lib/explorer/chain/optimism/frame_sequence.ex b/apps/explorer/lib/explorer/chain/optimism/frame_sequence.ex index 49aceda7a4f1..8a5788e40da4 100644 --- a/apps/explorer/lib/explorer/chain/optimism/frame_sequence.ex +++ b/apps/explorer/lib/explorer/chain/optimism/frame_sequence.ex @@ -1,33 +1,175 @@ defmodule Explorer.Chain.Optimism.FrameSequence do - @moduledoc "Models a frame sequence for Optimism." + @moduledoc """ + Models a frame sequence for Optimism. + + Changes in the schema should be reflected in the bulk import module: + - Explorer.Chain.Import.Runner.Optimism.FrameSequences + + Migrations: + - Explorer.Repo.Migrations.AddOpFrameSequencesTable + - Explorer.Repo.Optimism.Migrations.AddViewReadyField + - Explorer.Repo.Optimism.Migrations.AddFrameSequenceIdPrevField + """ use Explorer.Schema - alias Explorer.Chain.Hash - alias Explorer.Chain.Optimism.TxnBatch + import Explorer.Chain, only: [default_paging_options: 0, select_repo: 1] - @required_attrs ~w(id l1_transaction_hashes l1_timestamp)a + alias Explorer.Chain.{Hash, Transaction} + alias Explorer.Chain.Optimism.{FrameSequenceBlob, TxnBatch} + alias Explorer.PagingOptions - @type t :: %__MODULE__{ - l1_transaction_hashes: [Hash.t()], - l1_timestamp: DateTime.t(), - transaction_batches: %Ecto.Association.NotLoaded{} | [TxnBatch.t()] - } + @required_attrs ~w(id l1_transaction_hashes l1_timestamp)a + @typedoc """ + * `l1_transaction_hashes` - The list of L1 transaction hashes where the frame sequence is stored. + * `l1_timestamp` - UTC timestamp of the last L1 transaction of `l1_transaction_hashes` list. + * `view_ready` - Boolean flag indicating if the frame sequence is ready for displaying on UI. + * `transaction_batches` - Instances of `Explorer.Chain.Optimism.TxnBatch` bound with this frame sequence. + * `blobs` - Instances of `Explorer.Chain.Optimism.FrameSequenceBlob` bound with this frame sequence. + """ @primary_key {:id, :integer, autogenerate: false} - schema "op_frame_sequences" do + typed_schema "op_frame_sequences" do field(:l1_transaction_hashes, {:array, Hash.Full}) field(:l1_timestamp, :utc_datetime_usec) + field(:view_ready, :boolean) has_many(:transaction_batches, TxnBatch, foreign_key: :frame_sequence_id) + has_many(:blobs, FrameSequenceBlob, foreign_key: :frame_sequence_id) timestamps() end + @doc """ + Validates that the attributes are valid. + """ def changeset(%__MODULE__{} = sequences, attrs \\ %{}) do sequences |> cast(attrs, @required_attrs) |> validate_required(@required_attrs) |> unique_constraint(:id) end + + @doc """ + Finds and returns L1 batch data from the op_frame_sequences and + op_frame_sequence_blobs DB tables by Celestia blob's commitment and height. + + ## Parameters + - `commitment`: Blob's commitment in the form of hex string beginning with 0x prefix. + - `height`: Blob's height. + - `options`: A keyword list of options that may include whether to use a replica database. + + ## Returns + - A map with info about L1 batch bound to the specified Celestia blob. + - nil if the blob is not found. + """ + @spec batch_by_celestia_blob(binary(), non_neg_integer(), list()) :: map() | nil + def batch_by_celestia_blob(commitment, height, options \\ []) do + commitment = Base.decode16!(String.trim_leading(commitment, "0x"), case: :mixed) + height = :binary.encode_unsigned(height) + key = :crypto.hash(:sha256, height <> commitment) + + query = + from(fsb in FrameSequenceBlob, + select: fsb.frame_sequence_id, + where: fsb.key == ^key and fsb.type == :celestia + ) + + frame_sequence_id = select_repo(options).one(query) + + if not is_nil(frame_sequence_id) do + batch_by_internal_id(frame_sequence_id, options) + end + end + + @doc """ + Finds and returns L1 batch data from the op_frame_sequences and + op_frame_sequence_blobs DB tables by the internal id of the batch. + + ## Parameters + - `internal_id`: Batch'es internal id. + - `options`: A keyword list of options that may include whether to use a replica database. + + ## Returns + - A map with info about L1 batch having the specified id. + - nil if the batch is not found. + """ + @spec batch_by_internal_id(non_neg_integer(), list()) :: map() | nil + def batch_by_internal_id(internal_id, options \\ []) do + query = + from(fs in __MODULE__, + where: fs.id == ^internal_id and fs.view_ready == true + ) + + batch = select_repo(options).one(query) + + if not is_nil(batch) do + l2_block_number_from = TxnBatch.edge_l2_block_number(internal_id, :min) + l2_block_number_to = TxnBatch.edge_l2_block_number(internal_id, :max) + tx_count = Transaction.tx_count_for_block_range(l2_block_number_from..l2_block_number_to) + + {batch_data_container, blobs} = FrameSequenceBlob.list(internal_id, options) + + result = %{ + "internal_id" => internal_id, + "l1_timestamp" => batch.l1_timestamp, + "l2_block_start" => l2_block_number_from, + "l2_block_end" => l2_block_number_to, + "tx_count" => tx_count, + "l1_tx_hashes" => batch.l1_transaction_hashes, + "batch_data_container" => batch_data_container + } + + if Enum.empty?(blobs) do + result + else + Map.put(result, "blobs", blobs) + end + end + end + + @doc """ + Lists `t:Explorer.Chain.Optimism.FrameSequence.t/0`'s' in descending order based on id. + + ## Parameters + - `options`: A keyword list of options that may include whether to use a replica database, + paging options, and `only_view_ready` option. + + ## Returns + - A list of found entities sorted by `id` in descending order. + """ + @spec list(list()) :: [__MODULE__.t()] + def list(options \\ []) do + paging_options = Keyword.get(options, :paging_options, default_paging_options()) + only_view_ready = Keyword.get(options, :only_view_ready?, false) + + case paging_options do + %PagingOptions{key: {0}} -> + [] + + _ -> + base_query = + if only_view_ready do + from(fs in __MODULE__, + where: fs.view_ready == true, + order_by: [desc: fs.id] + ) + else + from(fs in __MODULE__, + order_by: [desc: fs.id] + ) + end + + base_query + |> page_frame_sequences(paging_options) + |> limit(^paging_options.page_size) + |> select_repo(options).all() + end + end + + defp page_frame_sequences(query, %PagingOptions{key: nil}), do: query + + defp page_frame_sequences(query, %PagingOptions{key: {id}}) do + from(fs in query, where: fs.id < ^id) + end end diff --git a/apps/explorer/lib/explorer/chain/optimism/frame_sequence_blob.ex b/apps/explorer/lib/explorer/chain/optimism/frame_sequence_blob.ex new file mode 100644 index 000000000000..66d65984fa76 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/optimism/frame_sequence_blob.ex @@ -0,0 +1,116 @@ +defmodule Explorer.Chain.Optimism.FrameSequenceBlob do + @moduledoc """ + Models a blob related to Optimism frame sequence. + + Changes in the schema should be reflected in the bulk import module: + - Explorer.Chain.Import.Runner.Optimism.FrameSequenceBlobs + + Migrations: + - Explorer.Repo.Optimism.Migrations.AddCelestiaBlobMetadata + """ + + use Explorer.Schema + + import Explorer.Chain, only: [select_repo: 1] + + alias Explorer.Chain.Hash + alias Explorer.Chain.Optimism.FrameSequence + + @required_attrs ~w(id key type metadata l1_transaction_hash l1_timestamp frame_sequence_id)a + + @typedoc """ + * `key` - A unique id (key) of the blob. + * `type` - A type of the blob (`celestia` or `eip4844`). + * `metadata` - A map containing metadata of the blob. + * `l1_transaction_hash` - The corresponding L1 transaction hash which point to the blob. + * `l1_timestamp` - The timestamp of the L1 transaction. + * `frame_sequence_id` - A frame sequence ID which is bound with this blob. + * `frame_sequence` - An instance of `Explorer.Chain.Optimism.FrameSequence` referenced by `frame_sequence_id`. + """ + @primary_key {:id, :integer, autogenerate: false} + typed_schema "op_frame_sequence_blobs" do + field(:key, :binary) + field(:type, Ecto.Enum, values: [:celestia, :eip4844]) + field(:metadata, :map) + field(:l1_transaction_hash, Hash.Full) + field(:l1_timestamp, :utc_datetime_usec) + belongs_to(:frame_sequence, FrameSequence, foreign_key: :frame_sequence_id, references: :id, type: :integer) + timestamps() + end + + @doc """ + Validates that the attributes are valid. + """ + def changeset(%__MODULE__{} = blobs, attrs \\ %{}) do + blobs + |> cast(attrs, @required_attrs) + |> validate_required(@required_attrs) + |> unique_constraint(:id) + |> unique_constraint([:key, :type]) + |> foreign_key_constraint(:frame_sequence_id) + end + + @doc """ + Lists `t:Explorer.Chain.Optimism.FrameSequenceBlob.t/0`'s' related to the + specified frame sequence in ascending order based on an entity id. + + ## Parameters + - `frame_sequence_id`: A frame sequence identifier. + - `options`: A keyword list of options that may include whether to use a replica database. + + ## Returns + - A tuple {type, blobs} where `type` can be one of: `in_blob4844`, `in_celestia`, `in_calldata`. + The `blobs` in the list of blobs related to the specified frame sequence id sorted by an entity id. + """ + @spec list(non_neg_integer(), list()) :: {:in_blob4844 | :in_celestia | :in_calldata, [map()]} + def list(frame_sequence_id, options \\ []) do + repo = select_repo(options) + + query = + from(fsb in __MODULE__, + where: fsb.frame_sequence_id == ^frame_sequence_id, + order_by: [asc: fsb.id] + ) + + query + |> repo.all() + |> filter_blobs_by_type() + end + + defp filter_blobs_by_type(blobs) do + eip4844_blobs = + blobs + |> Enum.filter(fn b -> b.type == :eip4844 end) + |> Enum.map(fn b -> + %{ + "hash" => b.metadata["hash"], + "l1_transaction_hash" => b.l1_transaction_hash, + "l1_timestamp" => b.l1_timestamp + } + end) + + celestia_blobs = + blobs + |> Enum.filter(fn b -> b.type == :celestia end) + |> Enum.map(fn b -> + %{ + "height" => b.metadata["height"], + "namespace" => b.metadata["namespace"], + "commitment" => b.metadata["commitment"], + "l1_transaction_hash" => b.l1_transaction_hash, + "l1_timestamp" => b.l1_timestamp + } + end) + + cond do + not Enum.empty?(eip4844_blobs) -> + {:in_blob4844, eip4844_blobs} + + not Enum.empty?(celestia_blobs) -> + {:in_celestia, celestia_blobs} + + true -> + {:in_calldata, []} + end + end +end diff --git a/apps/explorer/lib/explorer/chain/optimism/output_root.ex b/apps/explorer/lib/explorer/chain/optimism/output_root.ex index 04611a3faaf5..e32b4a7f3508 100644 --- a/apps/explorer/lib/explorer/chain/optimism/output_root.ex +++ b/apps/explorer/lib/explorer/chain/optimism/output_root.ex @@ -3,26 +3,23 @@ defmodule Explorer.Chain.Optimism.OutputRoot do use Explorer.Schema - import Explorer.Chain, only: [select_repo: 1] + import Explorer.Chain, only: [default_paging_options: 0, select_repo: 1] alias Explorer.Chain.Hash alias Explorer.PagingOptions - @default_paging_options %PagingOptions{page_size: 50} - @required_attrs ~w(l2_output_index l2_block_number l1_transaction_hash l1_timestamp l1_block_number output_root)a - @type t :: %__MODULE__{ - l2_output_index: non_neg_integer(), - l2_block_number: non_neg_integer(), - l1_transaction_hash: Hash.t(), - l1_timestamp: DateTime.t(), - l1_block_number: non_neg_integer(), - output_root: Hash.t() - } - + @typedoc """ + * `l2_output_index` - A unique index of the output root. + * `l2_block_number` - An L2 block number of the output root. + * `l1_transaction_hash` - An L1 transaction hash where an event with the output root appeared. + * `l1_timestamp` - A timestamp of the L1 transaction block. + * `l1_block_number` - A block number of the L1 transaction. + * `output_root` - The output root. + """ @primary_key false - schema "op_output_roots" do + typed_schema "op_output_roots" do field(:l2_output_index, :integer, primary_key: true) field(:l2_block_number, :integer) field(:l1_transaction_hash, Hash.Full) @@ -45,7 +42,7 @@ defmodule Explorer.Chain.Optimism.OutputRoot do """ @spec list :: [__MODULE__.t()] def list(options \\ []) do - paging_options = Keyword.get(options, :paging_options, @default_paging_options) + paging_options = Keyword.get(options, :paging_options, default_paging_options()) case paging_options do %PagingOptions{key: {0}} -> diff --git a/apps/explorer/lib/explorer/chain/optimism/txn_batch.ex b/apps/explorer/lib/explorer/chain/optimism/txn_batch.ex index bbdf058d9009..eb6a159fa81b 100644 --- a/apps/explorer/lib/explorer/chain/optimism/txn_batch.ex +++ b/apps/explorer/lib/explorer/chain/optimism/txn_batch.ex @@ -1,14 +1,25 @@ defmodule Explorer.Chain.Optimism.TxnBatch do - @moduledoc "Models a batch of transactions for Optimism." + @moduledoc """ + Models a batch of transactions for Optimism. + + Changes in the schema should be reflected in the bulk import module: + - Explorer.Chain.Import.Runner.Optimism.TxnBatches + + Migrations: + - Explorer.Repo.Migrations.AddOpTransactionBatchesTable + - Explorer.Repo.Migrations.RenameFields + - Explorer.Repo.Migrations.AddOpFrameSequencesTable + - Explorer.Repo.Migrations.RemoveOpEpochNumberField + - Explorer.Repo.Optimism.Migrations.AddCelestiaBlobMetadata + - Explorer.Repo.Optimism.Migrations.AddFrameSequenceIdPrevField + """ use Explorer.Schema - import Explorer.Chain, only: [join_association: 3, select_repo: 1] + import Explorer.Chain, only: [default_paging_options: 0, join_association: 3, select_repo: 1] alias Explorer.Chain.Optimism.FrameSequence - alias Explorer.PagingOptions - - @default_paging_options %PagingOptions{page_size: 50} + alias Explorer.{PagingOptions, Repo} @required_attrs ~w(l2_block_number frame_sequence_id)a @@ -17,20 +28,24 @@ defmodule Explorer.Chain.Optimism.TxnBatch do @max_blob_data_size (4 * 31 + 3) * 1024 - 4 @rounds 1024 - @type t :: %__MODULE__{ - l2_block_number: non_neg_integer(), - frame_sequence_id: non_neg_integer(), - frame_sequence: %Ecto.Association.NotLoaded{} | FrameSequence.t() - } - + @typedoc """ + * `l2_block_number` - An L2 block number related to the specified frame sequence. + * `frame_sequence_id` - ID of the frame sequence the L2 block relates to. + * `frame_sequence_id_prev` - Previous ID of the frame sequence (should be 0 until the table row is updated). + * `frame_sequence` - An instance of `Explorer.Chain.Optimism.FrameSequence` referenced by `frame_sequence_id`. + """ @primary_key false - schema "op_transaction_batches" do + typed_schema "op_transaction_batches" do field(:l2_block_number, :integer, primary_key: true) belongs_to(:frame_sequence, FrameSequence, foreign_key: :frame_sequence_id, references: :id, type: :integer) + field(:frame_sequence_id_prev, :integer) timestamps() end + @doc """ + Validates that the attributes are valid. + """ def changeset(%__MODULE__{} = batches, attrs \\ %{}) do batches |> cast(attrs, @required_attrs) @@ -39,22 +54,79 @@ defmodule Explorer.Chain.Optimism.TxnBatch do end @doc """ - Lists `t:Explorer.Chain.Optimism.TxnBatch.t/0`'s' in descending order based on l2_block_number. + Returns an edge L2 block number (min or max) of an L2 block range + for the specified frame sequence. + + ## Parameters + - `id`: The ID of the frame sequence for which the edge block number must be returned. + - `type`: Can be :min or :max depending on which block number needs to be returned. + + ## Returns + - The min/max block number or `nil` if the block range is not found. + """ + @spec edge_l2_block_number(non_neg_integer(), :min | :max) :: non_neg_integer() | nil + def edge_l2_block_number(id, type) when type == :min and is_integer(id) and id >= 0 do + query = + id + |> edge_l2_block_number_query() + |> order_by([tb], asc: tb.l2_block_number) + + Repo.replica().one(query) + end + + def edge_l2_block_number(id, type) when type == :max and is_integer(id) and id >= 0 do + query = + id + |> edge_l2_block_number_query() + |> order_by([tb], desc: tb.l2_block_number) + + Repo.replica().one(query) + end + + def edge_l2_block_number(_id, _type), do: nil + + defp edge_l2_block_number_query(id) do + from( + tb in __MODULE__, + select: tb.l2_block_number, + where: tb.frame_sequence_id == ^id, + limit: 1 + ) + end + + @doc """ + Lists `t:Explorer.Chain.Optimism.TxnBatch.t/0`'s' in descending order based on l2_block_number. + ## Parameters + - `options`: A keyword list of options that may include whether to use a replica database, + paging options, and optional L2 block range for which to make the list of items. + + ## Returns + - A list of found entities sorted by `l2_block_number` in descending order. """ - @spec list :: [__MODULE__.t()] + @spec list(list()) :: [__MODULE__.t()] def list(options \\ []) do - paging_options = Keyword.get(options, :paging_options, @default_paging_options) + paging_options = Keyword.get(options, :paging_options, default_paging_options()) case paging_options do %PagingOptions{key: {0}} -> [] _ -> + l2_block_range_start = Keyword.get(options, :l2_block_range_start) + l2_block_range_end = Keyword.get(options, :l2_block_range_end) + base_query = - from(tb in __MODULE__, - order_by: [desc: tb.l2_block_number] - ) + if is_nil(l2_block_range_start) or is_nil(l2_block_range_end) do + from(tb in __MODULE__, + order_by: [desc: tb.l2_block_number] + ) + else + from(tb in __MODULE__, + order_by: [desc: tb.l2_block_number], + where: tb.l2_block_number >= ^l2_block_range_start and tb.l2_block_number <= ^l2_block_range_end + ) + end base_query |> join_association(:frame_sequence, :required) @@ -65,7 +137,7 @@ defmodule Explorer.Chain.Optimism.TxnBatch do end @doc """ - Decodes EIP-4844 blob to the raw data. Returns `nil` if the blob is invalid. + Decodes EIP-4844 blob to the raw data. Returns `nil` if the blob is invalid. """ @spec decode_eip4844_blob(binary()) :: binary() | nil def decode_eip4844_blob(b) do diff --git a/apps/explorer/lib/explorer/chain/optimism/withdrawal.ex b/apps/explorer/lib/explorer/chain/optimism/withdrawal.ex index dad0dac1168c..d397329c5844 100644 --- a/apps/explorer/lib/explorer/chain/optimism/withdrawal.ex +++ b/apps/explorer/lib/explorer/chain/optimism/withdrawal.ex @@ -3,7 +3,7 @@ defmodule Explorer.Chain.Optimism.Withdrawal do use Explorer.Schema - import Explorer.Chain, only: [select_repo: 1] + import Explorer.Chain, only: [default_paging_options: 0, select_repo: 1] alias Explorer.Application.Constants alias Explorer.Chain.{Block, Hash, Transaction} @@ -11,8 +11,6 @@ defmodule Explorer.Chain.Optimism.Withdrawal do alias Explorer.Chain.Optimism.{DisputeGame, OutputRoot, WithdrawalEvent} alias Explorer.{Helper, PagingOptions, Repo} - @default_paging_options %PagingOptions{page_size: 50} - @game_status_defender_wins 2 @withdrawal_status_waiting_for_state_root "Waiting for state root" @@ -28,15 +26,14 @@ defmodule Explorer.Chain.Optimism.Withdrawal do @required_attrs ~w(msg_nonce hash l2_transaction_hash l2_block_number)a - @type t :: %__MODULE__{ - msg_nonce: Decimal.t(), - hash: Hash.t(), - l2_transaction_hash: Hash.t(), - l2_block_number: non_neg_integer() - } - + @typedoc """ + * `msg_nonce` - A nonce of the withdrawal message. + * `hash` - A withdrawal hash. + * `l2_transaction_hash` - An L2 transaction hash which initiated the withdrawal. + * `l2_block_number` - A block number of the L2 transaction. + """ @primary_key false - schema "op_withdrawals" do + typed_schema "op_withdrawals" do field(:msg_nonce, :decimal, primary_key: true) field(:hash, Hash.Full) field(:l2_transaction_hash, Hash.Full) @@ -57,7 +54,7 @@ defmodule Explorer.Chain.Optimism.Withdrawal do """ @spec list :: [__MODULE__.t()] def list(options \\ []) do - paging_options = Keyword.get(options, :paging_options, @default_paging_options) + paging_options = Keyword.get(options, :paging_options, default_paging_options()) case paging_options do %PagingOptions{key: {0}} -> diff --git a/apps/explorer/lib/explorer/chain/optimism/withdrawal_event.ex b/apps/explorer/lib/explorer/chain/optimism/withdrawal_event.ex index 3210916fcfe2..bac79ac951c2 100644 --- a/apps/explorer/lib/explorer/chain/optimism/withdrawal_event.ex +++ b/apps/explorer/lib/explorer/chain/optimism/withdrawal_event.ex @@ -8,17 +8,17 @@ defmodule Explorer.Chain.Optimism.WithdrawalEvent do @required_attrs ~w(withdrawal_hash l1_event_type l1_timestamp l1_transaction_hash l1_block_number)a @optional_attrs ~w(game_index)a - @type t :: %__MODULE__{ - withdrawal_hash: Hash.t(), - l1_event_type: String.t(), - l1_timestamp: DateTime.t(), - l1_transaction_hash: Hash.t(), - l1_block_number: non_neg_integer(), - game_index: non_neg_integer() | nil - } - + @typedoc """ + * `withdrawal_hash` - A withdrawal hash. + * `l1_event_type` - A type of withdrawal event: `WithdrawalProven` or `WithdrawalFinalized`. + * `l1_timestamp` - A timestamp of when the withdrawal event appeared. + * `l1_transaction_hash` - An hash of L1 transaction that contains the event. + * `l1_block_number` - An L1 block number of the L1 transaction. + * `game_index` - An index of a dispute game (if available in L1 transaction input) when + the withdrawal is proven. Equals to `nil` if not available. + """ @primary_key false - schema "op_withdrawal_events" do + typed_schema "op_withdrawal_events" do field(:withdrawal_hash, Hash.Full, primary_key: true) field(:l1_event_type, Ecto.Enum, values: [:WithdrawalProven, :WithdrawalFinalized], primary_key: true) field(:l1_timestamp, :utc_datetime_usec) diff --git a/apps/explorer/lib/explorer/chain/transaction.ex b/apps/explorer/lib/explorer/chain/transaction.ex index c1c0918e8125..3b24bc235424 100644 --- a/apps/explorer/lib/explorer/chain/transaction.ex +++ b/apps/explorer/lib/explorer/chain/transaction.ex @@ -1857,4 +1857,22 @@ defmodule Explorer.Chain.Transaction do ) end end + + @doc """ + Returns the number of transactions included into the blocks of the specified block range. + Only consensus blocks are taken into account. + """ + @spec tx_count_for_block_range(Range.t()) :: non_neg_integer() + def tx_count_for_block_range(from..to) do + Repo.replica().aggregate( + from( + t in Transaction, + inner_join: b in Block, + on: b.number == t.block_number and b.consensus == true, + where: t.block_number >= ^from and t.block_number <= ^to + ), + :count, + timeout: :infinity + ) + end end diff --git a/apps/explorer/priv/optimism/migrations/20240503113124_add_celestia_blob_metadata.exs b/apps/explorer/priv/optimism/migrations/20240503113124_add_celestia_blob_metadata.exs new file mode 100644 index 000000000000..3878db97f015 --- /dev/null +++ b/apps/explorer/priv/optimism/migrations/20240503113124_add_celestia_blob_metadata.exs @@ -0,0 +1,31 @@ +defmodule Explorer.Repo.Optimism.Migrations.AddCelestiaBlobMetadata do + use Ecto.Migration + + def change do + execute( + "CREATE TYPE op_frame_sequence_blob_type AS ENUM ('celestia', 'eip4844')", + "DROP TYPE op_frame_sequence_blob_type" + ) + + create table(:op_frame_sequence_blobs, primary_key: false) do + add(:id, :bigint, null: false) + add(:key, :bytea, null: false, primary_key: true) + add(:type, :op_frame_sequence_blob_type, null: false, primary_key: true) + add(:metadata, :map, default: %{}, null: false) + add(:l1_transaction_hash, :bytea, null: false) + add(:l1_timestamp, :"timestamp without time zone", null: false) + + add( + :frame_sequence_id, + references(:op_frame_sequences, on_delete: :delete_all, on_update: :update_all, type: :bigint), + null: false + ) + + timestamps(null: false, type: :utc_datetime_usec) + end + + create(unique_index(:op_frame_sequence_blobs, :id)) + create(index(:op_frame_sequence_blobs, :frame_sequence_id)) + create(index(:op_transaction_batches, :frame_sequence_id)) + end +end diff --git a/apps/explorer/priv/optimism/migrations/20240612120541_add_view_ready_field.exs b/apps/explorer/priv/optimism/migrations/20240612120541_add_view_ready_field.exs new file mode 100644 index 000000000000..9b3e26b59cee --- /dev/null +++ b/apps/explorer/priv/optimism/migrations/20240612120541_add_view_ready_field.exs @@ -0,0 +1,11 @@ +defmodule Explorer.Repo.Optimism.Migrations.AddViewReadyField do + use Ecto.Migration + + def change do + alter table(:op_frame_sequences) do + add(:view_ready, :boolean, default: false, null: false) + end + + execute("UPDATE op_frame_sequences SET view_ready = TRUE") + end +end diff --git a/apps/explorer/priv/optimism/migrations/20240613065020_add_frame_sequence_id_prev_field.exs b/apps/explorer/priv/optimism/migrations/20240613065020_add_frame_sequence_id_prev_field.exs new file mode 100644 index 000000000000..35416d64b060 --- /dev/null +++ b/apps/explorer/priv/optimism/migrations/20240613065020_add_frame_sequence_id_prev_field.exs @@ -0,0 +1,11 @@ +defmodule Explorer.Repo.Optimism.Migrations.AddFrameSequenceIdPrevField do + use Ecto.Migration + + def change do + alter table(:op_transaction_batches) do + add(:frame_sequence_id_prev, :bigint, default: 0, null: false) + end + + create(index(:op_frame_sequences, :view_ready)) + end +end diff --git a/apps/indexer/lib/indexer/fetcher/optimism.ex b/apps/indexer/lib/indexer/fetcher/optimism.ex index 10cbbee29d41..d4605d9f08b1 100644 --- a/apps/indexer/lib/indexer/fetcher/optimism.ex +++ b/apps/indexer/lib/indexer/fetcher/optimism.ex @@ -17,9 +17,8 @@ defmodule Indexer.Fetcher.Optimism do request: 1 ] - import Explorer.Helper, only: [parse_integer: 1] - alias EthereumJSONRPC.Block.ByNumber + alias EthereumJSONRPC.Contract alias Indexer.Helper @fetcher_name :optimism @@ -227,7 +226,19 @@ defmodule Indexer.Fetcher.Optimism do ] end - def init_continue(env, contract_address, caller) + @doc """ + Does initializations for `Indexer.Fetcher.Optimism.WithdrawalEvent` or `Indexer.Fetcher.Optimism.OutputRoot` module. + Contains common code used by both modules. + + ## Parameters + - `output_oracle`: An address of L2OutputOracle contract on L1. Must be `nil` if the `caller` is not `OutputRoot` module. + - `caller`: The module that called this function. + + ## Returns + - A map for the `handle_continue` handler of the calling module. + """ + @spec init_continue(binary() | nil, module()) :: {:noreply, map()} | {:stop, :normal, %{}} + def init_continue(output_oracle, caller) when caller in [Indexer.Fetcher.Optimism.WithdrawalEvent, Indexer.Fetcher.Optimism.OutputRoot] do {contract_name, table_name, start_block_note} = if caller == Indexer.Fetcher.Optimism.WithdrawalEvent do @@ -236,22 +247,33 @@ defmodule Indexer.Fetcher.Optimism do {"Output Oracle", "op_output_roots", "Output Roots"} end - with {:start_block_l1_undefined, false} <- {:start_block_l1_undefined, is_nil(env[:start_block_l1])}, + optimism_env = Application.get_all_env(:indexer)[__MODULE__] + system_config = optimism_env[:optimism_l1_system_config] + optimism_l1_rpc = optimism_env[:optimism_l1_rpc] + + with {:system_config_valid, true} <- {:system_config_valid, Helper.address_correct?(system_config)}, {:reorg_monitor_started, true} <- {:reorg_monitor_started, !is_nil(Process.whereis(Indexer.Fetcher.RollupL1ReorgMonitor))}, - optimism_l1_rpc = Application.get_all_env(:indexer)[Indexer.Fetcher.Optimism][:optimism_l1_rpc], {:rpc_l1_undefined, false} <- {:rpc_l1_undefined, is_nil(optimism_l1_rpc)}, - {:contract_is_valid, true} <- {:contract_is_valid, Helper.address_correct?(contract_address)}, - start_block_l1 = parse_integer(env[:start_block_l1]), - false <- is_nil(start_block_l1), + json_rpc_named_arguments = json_rpc_named_arguments(optimism_l1_rpc), + {optimism_portal, start_block_l1} <- read_system_config(system_config, json_rpc_named_arguments), + {:contract_is_valid, true} <- + {:contract_is_valid, + caller == Indexer.Fetcher.Optimism.WithdrawalEvent or Helper.address_correct?(output_oracle)}, true <- start_block_l1 > 0, {last_l1_block_number, last_l1_transaction_hash} <- caller.get_last_l1_item(), {:start_block_l1_valid, true} <- {:start_block_l1_valid, start_block_l1 <= last_l1_block_number || last_l1_block_number == 0}, - json_rpc_named_arguments = json_rpc_named_arguments(optimism_l1_rpc), {:ok, last_l1_tx} <- get_transaction_by_hash(last_l1_transaction_hash, json_rpc_named_arguments), {:l1_tx_not_found, false} <- {:l1_tx_not_found, !is_nil(last_l1_transaction_hash) && is_nil(last_l1_tx)}, {:ok, block_check_interval, last_safe_block} <- get_block_check_interval(json_rpc_named_arguments) do + contract_address = + if caller == Indexer.Fetcher.Optimism.WithdrawalEvent do + optimism_portal + else + output_oracle + end + start_block = max(start_block_l1, last_l1_block_number) Process.send(self(), :continue, []) @@ -266,18 +288,21 @@ defmodule Indexer.Fetcher.Optimism do stop: false }} else - {:start_block_l1_undefined, true} -> - # the process shouldn't start if the start block is not defined - {:stop, :normal, %{}} - {:reorg_monitor_started, false} -> - Logger.error("Cannot start this process as reorg monitor in Indexer.Fetcher.Optimism is not started.") + Logger.error( + "Cannot start this process as reorg monitor in Indexer.Fetcher.RollupL1ReorgMonitor is not started." + ) + {:stop, :normal, %{}} {:rpc_l1_undefined, true} -> Logger.error("L1 RPC URL is not defined.") {:stop, :normal, %{}} + {:system_config_valid, false} -> + Logger.error("SystemConfig contract address is invalid or undefined.") + {:stop, :normal, %{}} + {:contract_is_valid, false} -> Logger.error("#{contract_name} contract address is invalid or not defined.") {:stop, :normal, %{}} @@ -300,6 +325,10 @@ defmodule Indexer.Fetcher.Optimism do {:stop, :normal, %{}} + nil -> + Logger.error("Cannot read SystemConfig contract.") + {:stop, :normal, %{}} + _ -> Logger.error("#{start_block_note} Start Block is invalid or zero.") {:stop, :normal, %{}} @@ -309,4 +338,44 @@ defmodule Indexer.Fetcher.Optimism do def repeated_request(req, error_message, json_rpc_named_arguments, retries) do Helper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, retries) end + + @doc """ + Reads some public getters of SystemConfig contract and returns retrieved values. + Gets `OptimismPortal` contract address from the `SystemConfig` contract and + the number of a start block (from which all Optimism fetchers should start). + + ## Parameters + - `contract_address`: An address of SystemConfig contract. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A tuple of OptimismPortal contract address and start block: {optimism_portal, start_block}. + - `nil` in case of error. + """ + @spec read_system_config(binary(), list()) :: {binary(), non_neg_integer()} | nil + def read_system_config(contract_address, json_rpc_named_arguments) do + requests = [ + # optimismPortal() public getter + Contract.eth_call_request("0x0a49cb03", contract_address, 0, nil, nil), + # startBlock() public getter + Contract.eth_call_request("0x48cd4cb1", contract_address, 1, nil, nil) + ] + + error_message = &"Cannot call public getters of SystemConfig. Error: #{inspect(&1)}" + + case Helper.repeated_call( + &json_rpc/2, + [requests, json_rpc_named_arguments], + error_message, + Helper.infinite_retries_number() + ) do + {:ok, responses} -> + "0x000000000000000000000000" <> optimism_portal = Enum.at(responses, 0).result + start_block = quantity_to_integer(Enum.at(responses, 1).result) + {"0x" <> optimism_portal, start_block} + + _ -> + nil + end + end end diff --git a/apps/indexer/lib/indexer/fetcher/optimism/deposit.ex b/apps/indexer/lib/indexer/fetcher/optimism/deposit.ex index c3a436358757..cd1116b87b9a 100644 --- a/apps/indexer/lib/indexer/fetcher/optimism/deposit.ex +++ b/apps/indexer/lib/indexer/fetcher/optimism/deposit.ex @@ -66,18 +66,16 @@ defmodule Indexer.Fetcher.Optimism.Deposit do Logger.metadata(fetcher: @fetcher_name) env = Application.get_all_env(:indexer)[__MODULE__] - optimism_env = Application.get_all_env(:indexer)[Indexer.Fetcher.Optimism] - optimism_portal = optimism_env[:optimism_l1_portal] + optimism_env = Application.get_all_env(:indexer)[Optimism] + system_config = optimism_env[:optimism_l1_system_config] optimism_l1_rpc = optimism_env[:optimism_l1_rpc] - with {:start_block_l1_undefined, false} <- {:start_block_l1_undefined, is_nil(env[:start_block_l1])}, - {:optimism_portal_valid, true} <- {:optimism_portal_valid, Helper.address_correct?(optimism_portal)}, + with {:system_config_valid, true} <- {:system_config_valid, Helper.address_correct?(system_config)}, {:rpc_l1_undefined, false} <- {:rpc_l1_undefined, is_nil(optimism_l1_rpc)}, - start_block_l1 <- parse_integer(env[:start_block_l1]), - false <- is_nil(start_block_l1), + json_rpc_named_arguments = Optimism.json_rpc_named_arguments(optimism_l1_rpc), + {optimism_portal, start_block_l1} <- Optimism.read_system_config(system_config, json_rpc_named_arguments), true <- start_block_l1 > 0, {last_l1_block_number, last_l1_tx_hash} <- get_last_l1_item(), - json_rpc_named_arguments = Optimism.json_rpc_named_arguments(optimism_l1_rpc), {:ok, last_l1_tx} <- Optimism.get_transaction_by_hash(last_l1_tx_hash, json_rpc_named_arguments), {:l1_tx_not_found, false} <- {:l1_tx_not_found, !is_nil(last_l1_tx_hash) && is_nil(last_l1_tx)}, {safe_block, _} = Optimism.get_safe_block(json_rpc_named_arguments), @@ -102,10 +100,6 @@ defmodule Indexer.Fetcher.Optimism.Deposit do batch_size: parse_integer(env[:batch_size]) || @batch_size }} else - {:start_block_l1_undefined, true} -> - # the process shouldn't start if the start block is not defined - {:stop, :normal, state} - {:start_block_l1_valid, false} -> Logger.error("Invalid L1 Start Block value. Please, check the value and op_deposits table.") {:stop, :normal, state} @@ -114,8 +108,8 @@ defmodule Indexer.Fetcher.Optimism.Deposit do Logger.error("L1 RPC URL is not defined.") {:stop, :normal, state} - {:optimism_portal_valid, false} -> - Logger.error("OptimismPortal contract address is invalid or undefined.") + {:system_config_valid, false} -> + Logger.error("SystemConfig contract address is invalid or undefined.") {:stop, :normal, state} {:error, error_data} -> @@ -130,6 +124,10 @@ defmodule Indexer.Fetcher.Optimism.Deposit do {:stop, :normal, state} + nil -> + Logger.error("Cannot read SystemConfig contract.") + {:stop, :normal, state} + _ -> Logger.error("Optimism deposits L1 Start Block is invalid or zero.") {:stop, :normal, state} diff --git a/apps/indexer/lib/indexer/fetcher/optimism/dispute_game.ex b/apps/indexer/lib/indexer/fetcher/optimism/dispute_game.ex index 7590a816aacf..1f6488859821 100644 --- a/apps/indexer/lib/indexer/fetcher/optimism/dispute_game.ex +++ b/apps/indexer/lib/indexer/fetcher/optimism/dispute_game.ex @@ -17,6 +17,7 @@ defmodule Indexer.Fetcher.Optimism.DisputeGame do alias Explorer.Application.Constants alias Explorer.{Chain, Helper, Repo} alias Explorer.Chain.Optimism.{DisputeGame, Withdrawal} + alias Indexer.Fetcher.Optimism alias Indexer.Helper, as: IndexerHelper @fetcher_name :optimism_dispute_games @@ -51,13 +52,14 @@ defmodule Indexer.Fetcher.Optimism.DisputeGame do def handle_continue(:ok, _state) do Logger.metadata(fetcher: @fetcher_name) - env = Application.get_all_env(:indexer)[Indexer.Fetcher.Optimism] + env = Application.get_all_env(:indexer)[Optimism] + system_config = env[:optimism_l1_system_config] rpc = env[:optimism_l1_rpc] - optimism_portal = env[:optimism_l1_portal] - with {:rpc_l1_undefined, false} <- {:rpc_l1_undefined, is_nil(rpc)}, - {:optimism_portal_valid, true} <- {:optimism_portal_valid, IndexerHelper.address_correct?(optimism_portal)}, + with {:system_config_valid, true} <- {:system_config_valid, IndexerHelper.address_correct?(system_config)}, + {:rpc_l1_undefined, false} <- {:rpc_l1_undefined, is_nil(rpc)}, json_rpc_named_arguments = IndexerHelper.json_rpc_named_arguments(rpc), + {optimism_portal, _} <- Optimism.read_system_config(system_config, json_rpc_named_arguments), dispute_game_factory = get_dispute_game_factory_address(optimism_portal, json_rpc_named_arguments), {:dispute_game_factory_available, true} <- {:dispute_game_factory_available, !is_nil(dispute_game_factory)}, game_count = get_game_count(dispute_game_factory, json_rpc_named_arguments), @@ -83,8 +85,8 @@ defmodule Indexer.Fetcher.Optimism.DisputeGame do Logger.error("L1 RPC URL is not defined.") {:stop, :normal, %{}} - {:optimism_portal_valid, false} -> - Logger.error("OptimismPortal contract address is invalid or undefined.") + {:system_config_valid, false} -> + Logger.error("SystemConfig contract address is invalid or undefined.") {:stop, :normal, %{}} {:dispute_game_factory_available, false} -> @@ -97,6 +99,10 @@ defmodule Indexer.Fetcher.Optimism.DisputeGame do {:game_count_available, false} -> Logger.error("Cannot read gameCount() public getter from the DisputeGameFactory contract.") {:stop, :normal, %{}} + + nil -> + Logger.error("Cannot read SystemConfig contract.") + {:stop, :normal, %{}} end end diff --git a/apps/indexer/lib/indexer/fetcher/optimism/output_root.ex b/apps/indexer/lib/indexer/fetcher/optimism/output_root.ex index 90fc83d0e937..cab47ce3423f 100644 --- a/apps/indexer/lib/indexer/fetcher/optimism/output_root.ex +++ b/apps/indexer/lib/indexer/fetcher/optimism/output_root.ex @@ -53,7 +53,7 @@ defmodule Indexer.Fetcher.Optimism.OutputRoot do {:stop, :normal, %{}} else env = Application.get_all_env(:indexer)[__MODULE__] - Optimism.init_continue(env, env[:output_oracle], __MODULE__) + Optimism.init_continue(env[:output_oracle], __MODULE__) end end diff --git a/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex b/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex index d92bb30d6b3c..090424157766 100644 --- a/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex +++ b/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex @@ -1,6 +1,20 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do @moduledoc """ - Fills op_transaction_batches DB table. + Fills op_transaction_batches, op_frame_sequence, and op_frame_sequence_blobs DB tables. + + This module parses L1 batch transactions, handles them to retrieve L2 block batches info + and save it to the database. + + If an L1 transaction is a blob transaction (either EIP-4844, Celestia, etc.), the blob + is read from the corresponding server by its API URL and parsed to get raw data. Blob + metadata is saved to op_frame_sequence_blobs DB table. + + According to EIP-4844, an L1 transaction can have more than one blob. Each EIP-4844 blob + contains a separate frame (part of a frame sequence that encodes a batch), so usually + L1 EIP-4844 transaction represents a frame sequence. + + Celestia L1 transaction can have only one blob. A batch can be split into several Celestia blobs + related to different L1 transactions (usually following each other). """ use GenServer @@ -15,11 +29,11 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do import Explorer.Helper, only: [parse_integer: 1] alias EthereumJSONRPC.Block.ByHash - alias EthereumJSONRPC.Blocks + alias EthereumJSONRPC.{Blocks, Contract} alias Explorer.{Chain, Repo} alias Explorer.Chain.Beacon.Blob, as: BeaconBlob alias Explorer.Chain.{Block, Hash} - alias Explorer.Chain.Optimism.FrameSequence + alias Explorer.Chain.Optimism.{FrameSequence, FrameSequenceBlob} alias Explorer.Chain.Optimism.TxnBatch, as: OptimismTxnBatch alias HTTPoison.Response alias Indexer.Fetcher.Beacon.Blob @@ -62,38 +76,49 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do end @impl GenServer - def handle_info(:init_with_delay, %{json_rpc_named_arguments_l2: json_rpc_named_arguments_l2} = state) do + def handle_info( + :init_with_delay, + %{json_rpc_named_arguments_l2: json_rpc_named_arguments_l2} = state + ) do env = Application.get_all_env(:indexer)[__MODULE__] - with {:start_block_l1_undefined, false} <- {:start_block_l1_undefined, is_nil(env[:start_block_l1])}, + optimism_env = Application.get_all_env(:indexer)[Indexer.Fetcher.Optimism] + system_config = optimism_env[:optimism_l1_system_config] + optimism_l1_rpc = optimism_env[:optimism_l1_rpc] + + with {:system_config_valid, true} <- + {:system_config_valid, Helper.address_correct?(system_config)}, {:genesis_block_l2_invalid, false} <- {:genesis_block_l2_invalid, is_nil(env[:genesis_block_l2]) or env[:genesis_block_l2] < 0}, - {:reorg_monitor_started, true} <- {:reorg_monitor_started, !is_nil(Process.whereis(RollupL1ReorgMonitor))}, - optimism_l1_rpc = Application.get_all_env(:indexer)[Indexer.Fetcher.Optimism][:optimism_l1_rpc], + {:reorg_monitor_started, true} <- + {:reorg_monitor_started, !is_nil(Process.whereis(RollupL1ReorgMonitor))}, {:rpc_l1_undefined, false} <- {:rpc_l1_undefined, is_nil(optimism_l1_rpc)}, - {:blobs_api_url_undefined, false} <- {:blobs_api_url_undefined, is_nil(env[:blobs_api_url])}, - {:batch_inbox_valid, true} <- {:batch_inbox_valid, Helper.address_correct?(env[:batch_inbox])}, - {:batch_submitter_valid, true} <- {:batch_submitter_valid, Helper.address_correct?(env[:batch_submitter])}, - start_block_l1 = parse_integer(env[:start_block_l1]), + json_rpc_named_arguments = Optimism.json_rpc_named_arguments(optimism_l1_rpc), + {start_block_l1, batch_inbox, batch_submitter} = read_system_config(system_config, json_rpc_named_arguments), + {:batch_inbox_valid, true} <- {:batch_inbox_valid, Helper.address_correct?(batch_inbox)}, + {:batch_submitter_valid, true} <- + {:batch_submitter_valid, Helper.address_correct?(batch_submitter)}, false <- is_nil(start_block_l1), true <- start_block_l1 > 0, chunk_size = parse_integer(env[:blocks_chunk_size]), {:chunk_size_valid, true} <- {:chunk_size_valid, !is_nil(chunk_size) && chunk_size > 0}, - json_rpc_named_arguments = Optimism.json_rpc_named_arguments(optimism_l1_rpc), {last_l1_block_number, last_l1_transaction_hash, last_l1_tx} = get_last_l1_item(json_rpc_named_arguments), {:start_block_l1_valid, true} <- {:start_block_l1_valid, start_block_l1 <= last_l1_block_number || last_l1_block_number == 0}, - {:l1_tx_not_found, false} <- {:l1_tx_not_found, !is_nil(last_l1_transaction_hash) && is_nil(last_l1_tx)}, - {:ok, block_check_interval, last_safe_block} <- Optimism.get_block_check_interval(json_rpc_named_arguments) do + {:l1_tx_not_found, false} <- + {:l1_tx_not_found, !is_nil(last_l1_transaction_hash) && is_nil(last_l1_tx)}, + {:ok, block_check_interval, last_safe_block} <- + Optimism.get_block_check_interval(json_rpc_named_arguments) do start_block = max(start_block_l1, last_l1_block_number) Process.send(self(), :continue, []) {:noreply, %{ - batch_inbox: String.downcase(env[:batch_inbox]), - batch_submitter: String.downcase(env[:batch_submitter]), - blobs_api_url: String.trim_trailing(env[:blobs_api_url], "/"), + batch_inbox: batch_inbox, + batch_submitter: batch_submitter, + eip4844_blobs_api_url: trim_url(env[:eip4844_blobs_api_url]), + celestia_blobs_api_url: trim_url(env[:celestia_blobs_api_url]), block_check_interval: block_check_interval, start_block: start_block, end_block: last_safe_block, @@ -104,8 +129,8 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do json_rpc_named_arguments_l2: json_rpc_named_arguments_l2 }} else - {:start_block_l1_undefined, true} -> - # the process shouldn't start if the start block is not defined + {:system_config_valid, false} -> + Logger.error("SystemConfig contract address is invalid or undefined.") {:stop, :normal, state} {:genesis_block_l2_invalid, true} -> @@ -123,10 +148,6 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do Logger.error("L1 RPC URL is not defined.") {:stop, :normal, state} - {:blobs_api_url_undefined, true} -> - Logger.error("L1 Blockscout Blobs API URL is not defined.") - {:stop, :normal, state} - {:batch_inbox_valid, false} -> Logger.error("Batch Inbox address is invalid or not defined.") {:stop, :normal, state} @@ -137,6 +158,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do {:start_block_l1_valid, false} -> Logger.error("Invalid L1 Start Block value. Please, check the value and op_transaction_batches table.") + {:stop, :normal, state} {:chunk_size_valid, false} -> @@ -155,19 +177,48 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do {:stop, :normal, state} + nil -> + Logger.error("Cannot read SystemConfig contract.") + {:stop, :normal, state} + _ -> Logger.error("Batch Start Block is invalid or zero.") {:stop, :normal, state} end end + # The main handler filtering L1 transactions and parsing them to retrieve + # batches of L2 blocks. + # + # The work is split into chunks (L1 block subranges) of the specified block range. + # The chunk size is configurable through INDEXER_OPTIMISM_L1_BATCH_BLOCKS_CHUNK_SIZE + # env variable. + # + # When the last block of the range is reached, the handler switches to `(last+1)..latest` + # block range and then handles the new blocks in realtime. The latest block number is checked + # every `block_check_interval` milliseconds which is calculated in the init function. + # + # ## Parameters (in the `state` param) + # - `batch_inbox`: L1 address which accepts L1 batch transactions + # - `batch_submitter`: L1 address which sends L1 batch transactions to the `batch_inbox` + # - `eip4844_blobs_api_url`: URL of Blockscout Blobs API to get EIP-4844 blobs + # - `celestia_blobs_api_url`: URL of the server where Celestia blobs can be read from + # - `block_check_interval`: time interval for checking latest block number + # - `start_block`: start block number of the block range + # - `end_block`: end block number of the block range + # - `chunk_size`: max number of L1 blocks in one chunk + # - `incomplete_channels`: intermediate map of channels (incomplete frame sequences) in memory + # - `genesis_block_l2`: Optimism BedRock upgrade L2 block number (used when parsing span batches) + # - `json_rpc_named_arguments`: data to connect to L1 RPC server + # - `json_rpc_named_arguments_l2`: data to connect to L2 RPC server @impl GenServer def handle_info( :continue, %{ batch_inbox: batch_inbox, batch_submitter: batch_submitter, - blobs_api_url: blobs_api_url, + eip4844_blobs_api_url: eip4844_blobs_api_url, + celestia_blobs_api_url: celestia_blobs_api_url, block_check_interval: block_check_interval, start_block: start_block, end_block: end_block, @@ -191,9 +242,16 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do new_incomplete_channels = if chunk_end >= chunk_start do - Helper.log_blocks_chunk_handling(chunk_start, chunk_end, start_block, end_block, nil, :L1) + Helper.log_blocks_chunk_handling( + chunk_start, + chunk_end, + start_block, + end_block, + nil, + :L1 + ) - {:ok, new_incomplete_channels, batches, sequences} = + {:ok, new_incomplete_channels, batches, sequences, blobs} = get_txn_batches( Range.new(chunk_start, chunk_end), batch_inbox, @@ -201,19 +259,28 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do genesis_block_l2, incomplete_channels_acc, {json_rpc_named_arguments, json_rpc_named_arguments_l2}, - blobs_api_url, + {eip4844_blobs_api_url, celestia_blobs_api_url}, Helper.infinite_retries_number() ) - {batches, sequences} = remove_duplicates(batches, sequences) + {batches, sequences, blobs} = remove_duplicates(batches, sequences, blobs) {:ok, _} = Chain.import(%{ optimism_frame_sequences: %{params: sequences}, + timeout: :infinity + }) + + {:ok, inserted} = + Chain.import(%{ + optimism_frame_sequence_blobs: %{params: blobs}, optimism_txn_batches: %{params: batches}, timeout: :infinity }) + remove_prev_frame_sequences(inserted) + set_frame_sequences_view_ready(sequences) + Helper.log_blocks_chunk_handling( chunk_start, chunk_end, @@ -232,6 +299,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do if !is_nil(reorg_block) && reorg_block > 0 do new_incomplete_channels = handle_l1_reorg(reorg_block, new_incomplete_channels) + {:halt, {if(reorg_block <= chunk_end, do: reorg_block - 1, else: chunk_end), new_incomplete_channels}} else {:cont, {chunk_end, new_incomplete_channels}} @@ -241,7 +309,11 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do new_start_block = last_written_block + 1 {:ok, new_end_block} = - Optimism.get_block_number_by_tag("latest", json_rpc_named_arguments, Helper.infinite_retries_number()) + Optimism.get_block_number_by_tag( + "latest", + json_rpc_named_arguments, + Helper.infinite_retries_number() + ) delay = if new_end_block == last_written_block do @@ -339,17 +411,13 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do ) ) - last_l1_transaction_hash = - if is_nil(l1_transaction_hashes) do - nil - else - List.last(l1_transaction_hashes) - end - - if is_nil(last_l1_transaction_hash) do + if is_nil(l1_transaction_hashes) do {0, nil, nil} else + last_l1_transaction_hash = List.last(l1_transaction_hashes) + {:ok, last_l1_tx} = Optimism.get_transaction_by_hash(last_l1_transaction_hash, json_rpc_named_arguments) + last_l1_block_number = quantity_to_integer(Map.get(last_l1_tx || %{}, "blockNumber", 0)) {last_l1_block_number, last_l1_transaction_hash, last_l1_tx} end @@ -409,9 +477,22 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do end end - defp blobs_to_inputs(transaction_hash, blob_versioned_hashes, block_timestamp, blobs_api_url) do + defp eip4844_blobs_to_inputs(_transaction_hash, _blob_versioned_hashes, _block_timestamp, "") do + Logger.error( + "Cannot read EIP-4844 blobs from the Blockscout Blobs API as the API URL is not defined. Please, check INDEXER_OPTIMISM_L1_BATCH_BLOCKSCOUT_BLOBS_API_URL env variable." + ) + + [] + end + + defp eip4844_blobs_to_inputs( + transaction_hash, + blob_versioned_hashes, + block_timestamp, + blobs_api_url + ) do blob_versioned_hashes - |> Enum.reduce([], fn blob_hash, acc -> + |> Enum.reduce([], fn blob_hash, inputs_acc -> with {:ok, response} <- http_get_request(blobs_api_url <> "/" <> blob_hash), blob_data = Map.get(response, "blob_data"), false <- is_nil(blob_data) do @@ -424,73 +505,157 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do if is_nil(decoded) do Logger.warning("Cannot decode the blob #{blob_hash} taken from the Blockscout Blobs API.") - acc + + inputs_acc else Logger.info( "The input for transaction #{transaction_hash} is taken from the Blockscout Blobs API. Blob hash: #{blob_hash}" ) - [decoded | acc] + input = %{ + bytes: decoded, + eip4844_blob_hash: blob_hash + } + + [input | inputs_acc] end else _ -> # read the data from the fallback source (beacon node) + eip4844_blobs_to_inputs_from_fallback( + transaction_hash, + blob_hash, + block_timestamp, + inputs_acc + ) + end + end) + |> Enum.reverse() + end - beacon_config = - :indexer - |> Application.get_env(Blob) - |> Keyword.take([:reference_slot, :reference_timestamp, :slot_duration]) - |> Enum.into(%{}) + defp eip4844_blobs_to_inputs_from_fallback( + transaction_hash, + blob_hash, + block_timestamp, + inputs_acc + ) do + beacon_config = + :indexer + |> Application.get_env(Blob) + |> Keyword.take([:reference_slot, :reference_timestamp, :slot_duration]) + |> Enum.into(%{}) + + {:ok, fetched_blobs} = + block_timestamp + |> DateTime.to_unix() + |> Blob.timestamp_to_slot(beacon_config) + |> BeaconClient.get_blob_sidecars() + + blobs = Map.get(fetched_blobs, "data", []) + + if Enum.empty?(blobs) do + raise "Empty data" + end - try do - {:ok, fetched_blobs} = - block_timestamp - |> DateTime.to_unix() - |> Blob.timestamp_to_slot(beacon_config) - |> BeaconClient.get_blob_sidecars() + decoded_blob_data = + blobs + |> Enum.find(fn b -> + b + |> Map.get("kzg_commitment", "0x") + |> String.trim_leading("0x") + |> Base.decode16!(case: :lower) + |> BeaconBlob.hash() + |> Hash.to_string() + |> Kernel.==(blob_hash) + end) + |> Map.get("blob") + |> String.trim_leading("0x") + |> Base.decode16!(case: :lower) + |> OptimismTxnBatch.decode_eip4844_blob() - blobs = Map.get(fetched_blobs, "data", []) + if is_nil(decoded_blob_data) do + raise "Invalid blob" + else + Logger.info( + "The input for transaction #{transaction_hash} is taken from the Beacon Node. Blob hash: #{blob_hash}" + ) - if Enum.empty?(blobs) do - raise "Empty data" - end + input = %{ + bytes: decoded_blob_data, + eip4844_blob_hash: blob_hash + } - decoded_blob_data = - blobs - |> Enum.find(fn b -> - b - |> Map.get("kzg_commitment", "0x") - |> String.trim_leading("0x") - |> Base.decode16!(case: :lower) - |> BeaconBlob.hash() - |> Hash.to_string() - |> Kernel.==(blob_hash) - end) - |> Map.get("blob") - |> String.trim_leading("0x") - |> Base.decode16!(case: :lower) - |> OptimismTxnBatch.decode_eip4844_blob() - - if is_nil(decoded_blob_data) do - raise "Invalid blob" - else - Logger.info( - "The input for transaction #{transaction_hash} is taken from the Beacon Node. Blob hash: #{blob_hash}" - ) + [input | inputs_acc] + end + rescue + reason -> + Logger.warning("Cannot decode the blob #{blob_hash} taken from the Beacon Node. Reason: #{inspect(reason)}") - [decoded_blob_data | acc] - end - rescue - reason -> - Logger.warning( - "Cannot decode the blob #{blob_hash} taken from the Beacon Node. Reason: #{inspect(reason)}" - ) + inputs_acc + end - acc - end - end - end) - |> Enum.reverse() + defp celestia_blob_to_input("0x" <> tx_input, tx_hash, blobs_api_url) do + tx_input + |> Base.decode16!(case: :mixed) + |> celestia_blob_to_input(tx_hash, blobs_api_url) + end + + defp celestia_blob_to_input(tx_input, _tx_hash, blobs_api_url) + when byte_size(tx_input) == 1 + 8 + 32 and blobs_api_url != "" do + # the first byte encodes Celestia sign 0xCE + + # the next 8 bytes encode little-endian Celestia blob height + height = + tx_input + |> binary_part(1, 8) + |> :binary.decode_unsigned(:little) + + # the next 32 bytes contain the commitment + commitment = binary_part(tx_input, 1 + 8, 32) + commitment_string = Base.encode16(commitment, case: :lower) + + url = blobs_api_url <> "?height=#{height}&commitment=" <> commitment_string + + with {:ok, response} <- http_get_request(url), + namespace = Map.get(response, "namespace"), + data = Map.get(response, "data"), + true <- !is_nil(namespace) and !is_nil(data) do + data_decoded = Base.decode64!(data) + + [ + %{ + bytes: data_decoded, + celestia_blob_metadata: %{ + height: height, + namespace: "0x" <> namespace, + commitment: "0x" <> commitment_string + } + } + ] + else + false -> + Logger.error("Cannot read namespace or data from Celestia Blobs API response for the request #{url}") + + [] + + _ -> + Logger.error("Cannot read a response from Celestia Blobs API for the request #{url}") + [] + end + end + + defp celestia_blob_to_input(_tx_input, tx_hash, blobs_api_url) when blobs_api_url != "" do + Logger.error("L1 transaction with Celestia commitment has incorrect input length. Tx hash: #{tx_hash}") + + [] + end + + defp celestia_blob_to_input(_tx_input, _tx_hash, "") do + Logger.error( + "Cannot read Celestia blobs from the server as the API URL is not defined. Please, check INDEXER_OPTIMISM_L1_BATCH_CELESTIA_BLOBS_API_URL env variable." + ) + + [] end defp get_txn_batches_inner( @@ -499,36 +664,49 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do genesis_block_l2, incomplete_channels, json_rpc_named_arguments_l2, - blobs_api_url + {eip4844_blobs_api_url, celestia_blobs_api_url} ) do transactions_filtered - |> Enum.reduce({:ok, incomplete_channels, [], []}, fn tx, - {_, incomplete_channels_acc, batches_acc, sequences_acc} -> + |> Enum.reduce({:ok, incomplete_channels, [], [], []}, fn tx, + {_, incomplete_channels_acc, batches_acc, sequences_acc, + blobs_acc} -> inputs = - if tx.type == 3 do - # this is EIP-4844 transaction, so we get the inputs from the blobs - block_timestamp = get_block_timestamp_by_number(tx.block_number, blocks_params) - blobs_to_inputs(tx.hash, tx.blob_versioned_hashes, block_timestamp, blobs_api_url) - else - [tx.input] + cond do + tx.type == 3 -> + # this is EIP-4844 transaction, so we get the inputs from the blobs + block_timestamp = get_block_timestamp_by_number(tx.block_number, blocks_params) + + eip4844_blobs_to_inputs( + tx.hash, + tx.blob_versioned_hashes, + block_timestamp, + eip4844_blobs_api_url + ) + + first_byte(tx.input) == 0xCE -> + # this is Celestia DA transaction, so we get the data from Celestia blob + celestia_blob_to_input(tx.input, tx.hash, celestia_blobs_api_url) + + true -> + # this is calldata transaction, so the data is in the transaction input + [%{bytes: tx.input}] end - Enum.reduce(inputs, {:ok, incomplete_channels_acc, batches_acc, sequences_acc}, fn input, - {_, - new_incomplete_channels_acc, - new_batches_acc, - new_sequences_acc} -> - handle_input( - input, - tx, - blocks_params, - new_incomplete_channels_acc, - new_batches_acc, - new_sequences_acc, - genesis_block_l2, - json_rpc_named_arguments_l2 - ) - end) + Enum.reduce( + inputs, + {:ok, incomplete_channels_acc, batches_acc, sequences_acc, blobs_acc}, + fn input, {_, new_incomplete_channels_acc, new_batches_acc, new_sequences_acc, new_blobs_acc} -> + handle_input( + input, + tx, + blocks_params, + new_incomplete_channels_acc, + {new_batches_acc, new_sequences_acc, new_blobs_acc}, + genesis_block_l2, + json_rpc_named_arguments_l2 + ) + end + ) end) end @@ -537,18 +715,19 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do tx, blocks_params, incomplete_channels_acc, - batches_acc, - sequences_acc, + {batches_acc, sequences_acc, blobs_acc}, genesis_block_l2, json_rpc_named_arguments_l2 ) do - frame = input_to_frame(input) + frame = input_to_frame(input.bytes) if frame == :invalid_frame do Logger.warning("The frame in transaction #{tx.hash} is invalid.") raise "Invalid frame" end + block_timestamp = get_block_timestamp_by_number(tx.block_number, blocks_params) + channel = Map.get(incomplete_channels_acc, frame.channel_id, %{frames: %{}}) channel_frames = @@ -556,12 +735,15 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do data: frame.data, is_last: frame.is_last, block_number: tx.block_number, - tx_hash: tx.hash + block_timestamp: block_timestamp, + tx_hash: tx.hash, + eip4844_blob_hash: Map.get(input, :eip4844_blob_hash), + celestia_blob_metadata: Map.get(input, :celestia_blob_metadata) }) l1_timestamp = if frame.is_last do - get_block_timestamp_by_number(tx.block_number, blocks_params) + block_timestamp else Map.get(channel, :l1_timestamp) end @@ -579,14 +761,15 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do incomplete_channels_acc, batches_acc, sequences_acc, + blobs_acc, genesis_block_l2, json_rpc_named_arguments_l2 ) else - {:ok, Map.put(incomplete_channels_acc, frame.channel_id, channel_updated), batches_acc, sequences_acc} + {:ok, Map.put(incomplete_channels_acc, frame.channel_id, channel_updated), batches_acc, sequences_acc, blobs_acc} end rescue - _ -> {:ok, incomplete_channels_acc, batches_acc, sequences_acc} + _ -> {:ok, incomplete_channels_acc, batches_acc, sequences_acc, blobs_acc} end defp handle_channel( @@ -594,17 +777,67 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do incomplete_channels_acc, batches_acc, sequences_acc, + blobs_acc, genesis_block_l2, json_rpc_named_arguments_l2 ) do frame_sequence_last = List.first(sequences_acc) frame_sequence_id = next_frame_sequence_id(frame_sequence_last) - {bytes, l1_transaction_hashes} = + {bytes, l1_transaction_hashes, new_blobs_acc} = 0..(Enum.count(channel.frames) - 1) - |> Enum.reduce({<<>>, []}, fn frame_number, {bytes_acc, tx_hashes_acc} -> + |> Enum.reduce({<<>>, [], blobs_acc}, fn frame_number, {bytes_acc, tx_hashes_acc, new_blobs_acc} -> frame = Map.get(channel.frames, frame_number) - {bytes_acc <> frame.data, [frame.tx_hash | tx_hashes_acc]} + + next_blob_id = next_blob_id(List.last(new_blobs_acc)) + + new_blobs_acc = + cond do + !is_nil(Map.get(frame, :eip4844_blob_hash)) -> + # credo:disable-for-next-line /Credo.Check.Refactor.AppendSingleItem/ + new_blobs_acc ++ + [ + %{ + id: next_blob_id, + key: + Base.decode16!(String.trim_leading(frame.eip4844_blob_hash, "0x"), + case: :mixed + ), + type: :eip4844, + metadata: %{hash: frame.eip4844_blob_hash}, + l1_transaction_hash: frame.tx_hash, + l1_timestamp: frame.block_timestamp, + frame_sequence_id: frame_sequence_id + } + ] + + !is_nil(Map.get(frame, :celestia_blob_metadata)) -> + height = :binary.encode_unsigned(frame.celestia_blob_metadata.height) + + commitment = + Base.decode16!(String.trim_leading(frame.celestia_blob_metadata.commitment, "0x"), + case: :mixed + ) + + # credo:disable-for-next-line /Credo.Check.Refactor.AppendSingleItem/ + new_blobs_acc ++ + [ + %{ + id: next_blob_id, + key: :crypto.hash(:sha256, height <> commitment), + type: :celestia, + metadata: frame.celestia_blob_metadata, + l1_transaction_hash: frame.tx_hash, + l1_timestamp: frame.block_timestamp, + frame_sequence_id: frame_sequence_id + } + ] + + true -> + new_blobs_acc + end + + {bytes_acc <> frame.data, [frame.tx_hash | tx_hashes_acc], new_blobs_acc} end) batches_parsed = @@ -620,7 +853,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do Logger.error("Cannot parse frame sequence from these L1 transaction(s): #{inspect(l1_transaction_hashes)}") end - seq = %{ + sequence = %{ id: frame_sequence_id, l1_transaction_hashes: Enum.uniq(Enum.reverse(l1_transaction_hashes)), l1_timestamp: channel.l1_timestamp @@ -632,9 +865,9 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do |> remove_expired_channels() if batches_parsed == :error or Enum.empty?(batches_parsed) do - {:ok, new_incomplete_channels_acc, batches_acc, sequences_acc} + {:ok, new_incomplete_channels_acc, batches_acc, sequences_acc, blobs_acc} else - {:ok, new_incomplete_channels_acc, batches_acc ++ batches_parsed, [seq | sequences_acc]} + {:ok, new_incomplete_channels_acc, batches_acc ++ batches_parsed, [sequence | sequences_acc], new_blobs_acc} end end @@ -682,7 +915,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do end end - defp http_get_request(url) do + defp http_get_request(url, attempts_done \\ 0) do case Application.get_env(:explorer, :http_adapter).get(url) do {:ok, %Response{body: body, status_code: 200}} -> Jason.decode(body) @@ -696,13 +929,24 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do Logger.error(fn -> [ - "Error while sending request to Blockscout Blobs API: #{url}: ", + "Error while sending request to Blobs API: #{url}: ", inspect(error, limit: :infinity, printable_limit: :infinity) ] end) Logger.configure(truncate: old_truncate) - {:error, "Error while sending request to Blockscout Blobs API"} + + # retry to send the request + attempts_done = attempts_done + 1 + + if attempts_done < 3 do + # wait up to 20 minutes and then retry + :timer.sleep(min(3000 * Integer.pow(2, attempts_done - 1), 1_200_000)) + Logger.info("Retry to send the request to #{url} ...") + http_get_request(url, attempts_done) + else + {:error, "Error while sending request to Blobs API"} + end end end @@ -712,7 +956,8 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do |> Map.keys() |> Enum.max() - Map.get(channel.frames, last_frame_number).is_last and last_frame_number == Enum.count(channel.frames) - 1 + Map.get(channel.frames, last_frame_number).is_last and + last_frame_number == Enum.count(channel.frames) - 1 end defp remove_expired_channels(channels_map) do @@ -752,19 +997,12 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do is_last_size = 1 # the first byte must be zero - [derivation_version] = :binary.bin_to_list(binary_part(input_binary, 0, derivation_version_length)) - - cond do - derivation_version == 0x00 -> - nil + derivation_version = first_byte(input_binary) - derivation_version == 0xCE -> - Logger.warning("The module does not support Celestia DA yet. The frame will be ignored.") - raise "Unsupported derivation version" + if derivation_version != 0x00 do + Logger.warning("Derivation version #{derivation_version} is not supported. The frame will be ignored.") - true -> - Logger.warning("Derivation version #{derivation_version} is not supported. The frame will be ignored.") - raise "Unsupported derivation version" + raise "Unsupported derivation version" end # channel id has 16 bytes @@ -772,6 +1010,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do # frame number consists of 2 bytes frame_number_offset = derivation_version_length + channel_id_length + frame_number = :binary.decode_unsigned(binary_part(input_binary, frame_number_offset, frame_number_size)) # frame data length consists of 4 bytes @@ -781,7 +1020,8 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do :binary.decode_unsigned(binary_part(input_binary, frame_data_length_offset, frame_data_length_size)) input_length_must_be = - derivation_version_length + channel_id_length + frame_number_size + frame_data_length_size + frame_data_length + + derivation_version_length + channel_id_length + frame_number_size + frame_data_length_size + + frame_data_length + is_last_size input_length_current = byte_size(input_binary) @@ -793,6 +1033,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do # is_last is 1-byte item is_last_offset = frame_data_offset + frame_data_length + is_last = :binary.decode_unsigned(binary_part(input_binary, is_last_offset, is_last_size)) > 0 %{number: frame_number, data: frame_data, is_last: is_last, channel_id: channel_id} @@ -805,6 +1046,28 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do _ -> :invalid_frame end + defp next_blob_id(last_known_blob) when is_nil(last_known_blob) do + last_known_id = + Repo.one( + from( + fsb in FrameSequenceBlob, + select: fsb.id, + order_by: [desc: fsb.id], + limit: 1 + ) + ) + + if is_nil(last_known_id) do + 1 + else + last_known_id + 1 + end + end + + defp next_blob_id(last_known_blob) do + last_known_blob.id + 1 + end + defp next_frame_sequence_id(last_known_sequence) when is_nil(last_known_sequence) do last_known_id = Repo.one( @@ -915,6 +1178,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do cond do rem(rel_timestamp, @op_chain_block_time) != 0 -> Logger.error("rel_timestamp is not divisible by #{@op_chain_block_time}. We ignore the span batch.") + batch_acc block_count <= 0 -> @@ -948,7 +1212,7 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do end end - defp remove_duplicates(batches, sequences) do + defp remove_duplicates(batches, sequences, blobs) do unique_batches = batches |> Enum.sort(fn b1, b2 -> @@ -961,17 +1225,49 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do |> Map.values() unique_sequences = - if Enum.empty?(sequences) do - [] - else - sequences - |> Enum.reverse() - |> Enum.filter(fn seq -> - Enum.any?(unique_batches, fn batch -> batch.frame_sequence_id == seq.id end) - end) - end + sequences + |> Enum.reverse() + |> Enum.filter(fn seq -> + Enum.any?(unique_batches, fn batch -> batch.frame_sequence_id == seq.id end) + end) - {unique_batches, unique_sequences} + unique_blobs = + blobs + |> Enum.reduce(%{}, fn b, acc -> + prev_id = Map.get(acc, b.key, %{id: 0}).id + + if prev_id < b.id do + Map.put(acc, b.key, b) + else + acc + end + end) + |> Map.values() + |> Enum.filter(fn b -> + Enum.any?(unique_sequences, fn sec -> sec.id == b.frame_sequence_id end) + end) + + {unique_batches, unique_sequences, unique_blobs} + end + + defp remove_prev_frame_sequences(inserted) do + ids = + inserted + |> Map.get(:insert_txn_batches, []) + |> Enum.map(fn tb -> tb.frame_sequence_id_prev end) + |> Enum.uniq() + |> Enum.filter(fn id -> id > 0 end) + + Repo.delete_all(from(fs in FrameSequence, where: fs.id in ^ids)) + end + + defp set_frame_sequences_view_ready(sequences) do + sequence_ids = Enum.map(sequences, fn s -> s.id end) + + Repo.update_all( + from(fs in FrameSequence, where: fs.id in ^sequence_ids), + set: [view_ready: true] + ) end defp txs_filter(transactions_params, batch_submitter, batch_inbox) do @@ -983,9 +1279,63 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do if is_nil(from_address_hash) or is_nil(to_address_hash) do false else - String.downcase(from_address_hash) == batch_submitter and String.downcase(to_address_hash) == batch_inbox + String.downcase(from_address_hash) == batch_submitter and + String.downcase(to_address_hash) == batch_inbox end end) + |> Enum.sort(fn t1, t2 -> + {t1.block_number, t1.index} < {t2.block_number, t2.index} + end) + end + + defp read_system_config(contract_address, json_rpc_named_arguments) do + requests = [ + # startBlock() public getter + Contract.eth_call_request("0x48cd4cb1", contract_address, 0, nil, nil), + # batchInbox() public getter + Contract.eth_call_request("0xdac6e63a", contract_address, 1, nil, nil), + # batcherHash() public getter + Contract.eth_call_request("0xe81b2c6d", contract_address, 2, nil, nil) + ] + + error_message = &"Cannot call public getters of SystemConfig. Error: #{inspect(&1)}" + + case Helper.repeated_call( + &json_rpc/2, + [requests, json_rpc_named_arguments], + error_message, + Helper.infinite_retries_number() + ) do + {:ok, responses} -> + start_block = quantity_to_integer(Enum.at(responses, 0).result) + "0x000000000000000000000000" <> batch_inbox = Enum.at(responses, 1).result + "0x000000000000000000000000" <> batch_submitter = Enum.at(responses, 2).result + + {start_block, String.downcase("0x" <> batch_inbox), String.downcase("0x" <> batch_submitter)} + + _ -> + nil + end + end + + defp first_byte("0x" <> tx_input) do + tx_input + |> Base.decode16!(case: :mixed) + |> first_byte() + end + + defp first_byte(<>) do + version_byte + end + + defp first_byte(_tx_input) do + nil + end + + defp trim_url(url) do + url + |> String.trim() + |> String.trim_trailing("/") end defp zlib_decompress(bytes) do diff --git a/apps/indexer/lib/indexer/fetcher/optimism/withdrawal_event.ex b/apps/indexer/lib/indexer/fetcher/optimism/withdrawal_event.ex index 85020312d712..f2472a5c2716 100644 --- a/apps/indexer/lib/indexer/fetcher/optimism/withdrawal_event.ex +++ b/apps/indexer/lib/indexer/fetcher/optimism/withdrawal_event.ex @@ -57,10 +57,7 @@ defmodule Indexer.Fetcher.Optimism.WithdrawalEvent do def handle_continue(:ok, _state) do Logger.metadata(fetcher: @fetcher_name) - env = Application.get_all_env(:indexer)[__MODULE__] - optimism_l1_portal = Application.get_all_env(:indexer)[Indexer.Fetcher.Optimism][:optimism_l1_portal] - - Optimism.init_continue(env, optimism_l1_portal, __MODULE__) + Optimism.init_continue(nil, __MODULE__) end @impl GenServer diff --git a/apps/indexer/lib/indexer/fetcher/rollup_l1_reorg_monitor.ex b/apps/indexer/lib/indexer/fetcher/rollup_l1_reorg_monitor.ex index 5da08bf1d717..20b5c278609e 100644 --- a/apps/indexer/lib/indexer/fetcher/rollup_l1_reorg_monitor.ex +++ b/apps/indexer/lib/indexer/fetcher/rollup_l1_reorg_monitor.ex @@ -31,21 +31,31 @@ defmodule Indexer.Fetcher.RollupL1ReorgMonitor do def init(_args) do Logger.metadata(fetcher: @fetcher_name) - modules_can_use_reorg_monitor = [ + optimism_modules = [ Indexer.Fetcher.Optimism.OutputRoot, Indexer.Fetcher.Optimism.TxnBatch, - Indexer.Fetcher.Optimism.WithdrawalEvent, - Indexer.Fetcher.PolygonEdge.Deposit, - Indexer.Fetcher.PolygonEdge.WithdrawalExit, - Indexer.Fetcher.PolygonZkevm.BridgeL1, - Indexer.Fetcher.Shibarium.L1 + Indexer.Fetcher.Optimism.WithdrawalEvent ] + modules_can_use_reorg_monitor = + optimism_modules ++ + [ + Indexer.Fetcher.PolygonEdge.Deposit, + Indexer.Fetcher.PolygonEdge.WithdrawalExit, + Indexer.Fetcher.PolygonZkevm.BridgeL1, + Indexer.Fetcher.Shibarium.L1 + ] + modules_using_reorg_monitor = modules_can_use_reorg_monitor |> Enum.reject(fn module -> - module_config = Application.get_all_env(:indexer)[module] - is_nil(module_config[:start_block]) and is_nil(module_config[:start_block_l1]) + if module in optimism_modules do + optimism_config = Application.get_all_env(:indexer)[Indexer.Fetcher.Optimism] + is_nil(optimism_config[:optimism_l1_system_config]) + else + module_config = Application.get_all_env(:indexer)[module] + is_nil(module_config[:start_block]) and is_nil(module_config[:start_block_l1]) + end end) if Enum.empty?(modules_using_reorg_monitor) do @@ -68,11 +78,7 @@ defmodule Indexer.Fetcher.RollupL1ReorgMonitor do Application.get_all_env(:indexer)[Indexer.Fetcher.PolygonEdge][:polygon_edge_l1_rpc] Enum.member?( - [ - Indexer.Fetcher.Optimism.OutputRoot, - Indexer.Fetcher.Optimism.TxnBatch, - Indexer.Fetcher.Optimism.WithdrawalEvent - ], + optimism_modules, module_using_reorg_monitor ) -> # there can be more than one Optimism.* modules, so we get the common L1 RPC URL for them from Indexer.Fetcher.Optimism diff --git a/config/runtime.exs b/config/runtime.exs index 4ba4f097b3c4..3d972e2009a9 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -802,29 +802,22 @@ config :indexer, Indexer.Fetcher.Optimism.WithdrawalEvent.Supervisor, enabled: C config :indexer, Indexer.Fetcher.Optimism, optimism_l1_rpc: System.get_env("INDEXER_OPTIMISM_L1_RPC"), - optimism_l1_portal: System.get_env("INDEXER_OPTIMISM_L1_PORTAL_CONTRACT") + optimism_l1_system_config: System.get_env("INDEXER_OPTIMISM_L1_SYSTEM_CONFIG_CONTRACT") -config :indexer, Indexer.Fetcher.Optimism.Deposit, - start_block_l1: System.get_env("INDEXER_OPTIMISM_L1_DEPOSITS_START_BLOCK"), - batch_size: System.get_env("INDEXER_OPTIMISM_L1_DEPOSITS_BATCH_SIZE") +config :indexer, Indexer.Fetcher.Optimism.Deposit, batch_size: System.get_env("INDEXER_OPTIMISM_L1_DEPOSITS_BATCH_SIZE") config :indexer, Indexer.Fetcher.Optimism.OutputRoot, - start_block_l1: System.get_env("INDEXER_OPTIMISM_L1_OUTPUT_ROOTS_START_BLOCK"), output_oracle: System.get_env("INDEXER_OPTIMISM_L1_OUTPUT_ORACLE_CONTRACT") config :indexer, Indexer.Fetcher.Optimism.Withdrawal, - start_block_l2: System.get_env("INDEXER_OPTIMISM_L2_WITHDRAWALS_START_BLOCK"), - message_passer: System.get_env("INDEXER_OPTIMISM_L2_MESSAGE_PASSER_CONTRACT") - -config :indexer, Indexer.Fetcher.Optimism.WithdrawalEvent, - start_block_l1: System.get_env("INDEXER_OPTIMISM_L1_WITHDRAWALS_START_BLOCK") + start_block_l2: System.get_env("INDEXER_OPTIMISM_L2_WITHDRAWALS_START_BLOCK", "1"), + message_passer: + System.get_env("INDEXER_OPTIMISM_L2_MESSAGE_PASSER_CONTRACT", "0x4200000000000000000000000000000000000016") config :indexer, Indexer.Fetcher.Optimism.TxnBatch, - start_block_l1: System.get_env("INDEXER_OPTIMISM_L1_BATCH_START_BLOCK"), - batch_inbox: System.get_env("INDEXER_OPTIMISM_L1_BATCH_INBOX"), - batch_submitter: System.get_env("INDEXER_OPTIMISM_L1_BATCH_SUBMITTER"), blocks_chunk_size: System.get_env("INDEXER_OPTIMISM_L1_BATCH_BLOCKS_CHUNK_SIZE", "4"), - blobs_api_url: System.get_env("INDEXER_OPTIMISM_L1_BATCH_BLOCKSCOUT_BLOBS_API_URL"), + eip4844_blobs_api_url: System.get_env("INDEXER_OPTIMISM_L1_BATCH_BLOCKSCOUT_BLOBS_API_URL", ""), + celestia_blobs_api_url: System.get_env("INDEXER_OPTIMISM_L1_BATCH_CELESTIA_BLOBS_API_URL", ""), genesis_block_l2: ConfigHelper.parse_integer_or_nil_env_var("INDEXER_OPTIMISM_L2_BATCH_GENESIS_BLOCK_NUMBER") config :indexer, Indexer.Fetcher.Withdrawal.Supervisor, diff --git a/cspell.json b/cspell.json index de3b000448eb..579cb8d3cdd7 100644 --- a/cspell.json +++ b/cspell.json @@ -490,6 +490,7 @@ "subnetwork", "subqueries", "subquery", + "subranges", "subsubcalls", "subtrace", "subtraces", diff --git a/docker-compose/envs/common-blockscout.env b/docker-compose/envs/common-blockscout.env index 1f6138874d82..e37f225202be 100644 --- a/docker-compose/envs/common-blockscout.env +++ b/docker-compose/envs/common-blockscout.env @@ -254,18 +254,12 @@ INDEXER_DISABLE_INTERNAL_TRANSACTIONS_FETCHER=false # INDEXER_GRACEFUL_SHUTDOWN_PERIOD= # WITHDRAWALS_FIRST_BLOCK= # INDEXER_OPTIMISM_L1_RPC= -# INDEXER_OPTIMISM_L1_BATCH_START_BLOCK= -# INDEXER_OPTIMISM_L1_BATCH_INBOX= -# INDEXER_OPTIMISM_L1_BATCH_SUBMITTER= +# INDEXER_OPTIMISM_L1_SYSTEM_CONFIG_CONTRACT= # INDEXER_OPTIMISM_L1_BATCH_BLOCKS_CHUNK_SIZE= # INDEXER_OPTIMISM_L2_BATCH_GENESIS_BLOCK_NUMBER= -# INDEXER_OPTIMISM_L1_PORTAL_CONTRACT= -# INDEXER_OPTIMISM_L1_OUTPUT_ROOTS_START_BLOCK= # INDEXER_OPTIMISM_L1_OUTPUT_ORACLE_CONTRACT= -# INDEXER_OPTIMISM_L1_WITHDRAWALS_START_BLOCK= # INDEXER_OPTIMISM_L2_WITHDRAWALS_START_BLOCK= # INDEXER_OPTIMISM_L2_MESSAGE_PASSER_CONTRACT= -# INDEXER_OPTIMISM_L1_DEPOSITS_START_BLOCK= # INDEXER_OPTIMISM_L1_DEPOSITS_BATCH_SIZE= # ROOTSTOCK_REMASC_ADDRESS= # ROOTSTOCK_BRIDGE_ADDRESS= From ad78a4d57aaa0540ad776dc20df229c629cbd797 Mon Sep 17 00:00:00 2001 From: varasev <33550681+varasev@users.noreply.github.com> Date: Mon, 15 Jul 2024 11:15:11 +0300 Subject: [PATCH 24/32] Add Brotli decompression to Indexer.Fetcher.Optimism.TxnBatch (#10410) * Add Brotli decompression to Indexer.Fetcher.Optimism.TxnBatch * Update spelling --------- Co-authored-by: POA <33550681+poa@users.noreply.github.com> --- .../lib/indexer/fetcher/optimism/txn_batch.ex | 14 ++++++++++---- apps/indexer/mix.exs | 2 ++ cspell.json | 1 + mix.lock | 1 + 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex b/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex index 090424157766..c122d1713c6b 100644 --- a/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex +++ b/apps/indexer/lib/indexer/fetcher/optimism/txn_batch.ex @@ -47,6 +47,8 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do # Optimism chain block time is a constant (2 seconds) @op_chain_block_time 2 + @compressor_brotli 1 + def child_spec(start_link_arguments) do spec = %{ id: __MODULE__, @@ -1097,15 +1099,20 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do genesis_block_l2, json_rpc_named_arguments_l2 ) do - uncompressed_bytes = zlib_decompress(bytes) + uncompressed_bytes = + if first_byte(bytes) == @compressor_brotli do + {:ok, uncompressed} = :brotli.decode(binary_part(bytes, 1, byte_size(bytes) - 1)) + uncompressed + else + zlib_decompress(bytes) + end batches = Enum.reduce_while(Stream.iterate(0, &(&1 + 1)), {uncompressed_bytes, []}, fn _i, {remainder, batch_acc} -> try do {decoded, new_remainder} = ExRLP.decode(remainder, stream: true) - <> = binary_part(decoded, 0, 1) - content = binary_part(decoded, 1, byte_size(decoded) - 1) + <> = decoded new_batch_acc = cond do @@ -1310,7 +1317,6 @@ defmodule Indexer.Fetcher.Optimism.TxnBatch do start_block = quantity_to_integer(Enum.at(responses, 0).result) "0x000000000000000000000000" <> batch_inbox = Enum.at(responses, 1).result "0x000000000000000000000000" <> batch_submitter = Enum.at(responses, 2).result - {start_block, String.downcase("0x" <> batch_inbox), String.downcase("0x" <> batch_submitter)} _ -> diff --git a/apps/indexer/mix.exs b/apps/indexer/mix.exs index 7bccbcfcabfd..74226a9065e0 100644 --- a/apps/indexer/mix.exs +++ b/apps/indexer/mix.exs @@ -51,6 +51,8 @@ defmodule Indexer.MixProject do {:decorator, "~> 1.4"}, # JSONRPC access to Nethermind for `Explorer.Indexer` {:ethereum_jsonrpc, in_umbrella: true}, + # Brotli compression/decompression + {:brotli, "~> 0.3.2"}, # RLP encoding {:ex_rlp, "~> 0.6.0"}, # Importing to database diff --git a/cspell.json b/cspell.json index 579cb8d3cdd7..2f572a7d60e4 100644 --- a/cspell.json +++ b/cspell.json @@ -61,6 +61,7 @@ "blockscoutuser", "bools", "bridgedtokenlist", + "brotli", "browserconfig", "bsdr", "Btvk", diff --git a/mix.lock b/mix.lock index 6502c9d0f0e1..bbb6d6ace09b 100644 --- a/mix.lock +++ b/mix.lock @@ -9,6 +9,7 @@ "benchee": {:hex, :benchee, "1.3.1", "c786e6a76321121a44229dde3988fc772bca73ea75170a73fd5f4ddf1af95ccf", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "76224c58ea1d0391c8309a8ecbfe27d71062878f59bd41a390266bf4ac1cc56d"}, "benchee_csv": {:hex, :benchee_csv, "1.0.0", "0b3b9223290bfcb8003552705bec9bcf1a89b4a83b70bd686e45295c264f3d16", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:csv, "~> 2.0", [hex: :csv, repo: "hexpm", optional: false]}], "hexpm", "cdefb804c021dcf7a99199492026584be9b5a21d6644ac0d01c81c5d97c520d5"}, "briefly": {:git, "https://github.com/CargoSense/briefly.git", "4836ba322ffb504a102a15cc6e35d928ef97120e", []}, + "brotli": {:hex, :brotli, "0.3.2", "59cf45a399098516f1d34f70d8e010e5c9bf326659d3ef34c7cc56793339002b", [:rebar3], [], "hexpm", "9ec3ef9c753f80d0c657b4905193c55e5198f169fa1d1c044d8601d4d931a2ad"}, "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, "bureaucrat": {:hex, :bureaucrat, "0.2.10", "b0de157dad540e40007b663b683f716ced21f85ff0591093aadb209ad0d967e1", [:mix], [{:inflex, ">= 1.10.0", [hex: :inflex, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.2.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:plug, ">= 1.0.0", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 1.5 or ~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :poison, repo: "hexpm", optional: true]}], "hexpm", "bc7e5162b911c29c8ebefee87a2c16fbf13821a58f448a8fd024eb6c17fae15c"}, "bypass": {:hex, :bypass, "2.1.0", "909782781bf8e20ee86a9cabde36b259d44af8b9f38756173e8f5e2e1fabb9b1", [:mix], [{:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.0", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:ranch, "~> 1.3", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "d9b5df8fa5b7a6efa08384e9bbecfe4ce61c77d28a4282f79e02f1ef78d96b80"}, From bc7f6a796720a81b4c050235fe8bacf824b9c383 Mon Sep 17 00:00:00 2001 From: Viktor Baranov Date: Mon, 15 Jul 2024 12:28:19 +0300 Subject: [PATCH 25/32] Indexer/API docker images from production-optimism branch --- .../publish-docker-image-for-eth-sepolia.yml | 8 ++-- .../publish-docker-image-for-eth.yml | 4 +- .../publish-docker-image-for-optimism.yml | 46 ++++++++++++++++++- 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/.github/workflows/publish-docker-image-for-eth-sepolia.yml b/.github/workflows/publish-docker-image-for-eth-sepolia.yml index b389c94ada74..1c137b4d86d9 100644 --- a/.github/workflows/publish-docker-image-for-eth-sepolia.yml +++ b/.github/workflows/publish-docker-image-for-eth-sepolia.yml @@ -36,7 +36,6 @@ jobs: linux/amd64 linux/arm64/v8 build-args: | - CHAIN_TYPE=ethereum CACHE_EXCHANGE_RATES_PERIOD= API_V1_READ_METHODS_DISABLED=false DISABLE_WEBAPP=false @@ -46,6 +45,7 @@ jobs: CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL= BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }} RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=ethereum - name: Build and push Docker image (indexer) uses: docker/build-push-action@v5 @@ -59,7 +59,6 @@ jobs: linux/amd64 linux/arm64/v8 build-args: | - CHAIN_TYPE=ethereum CACHE_EXCHANGE_RATES_PERIOD= DISABLE_WEBAPP=true DISABLE_API=true @@ -68,6 +67,7 @@ jobs: CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL= BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }} RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=ethereum - name: Build and push Docker image (API) uses: docker/build-push-action@v5 @@ -81,7 +81,6 @@ jobs: linux/amd64 linux/arm64/v8 build-args: | - CHAIN_TYPE=ethereum CACHE_EXCHANGE_RATES_PERIOD= DISABLE_WEBAPP=true DISABLE_INDEXER=true @@ -89,4 +88,5 @@ jobs: ADMIN_PANEL_ENABLED=false CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL= BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }} - RELEASE_VERSION=${{ env.RELEASE_VERSION }} \ No newline at end of file + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=ethereum \ No newline at end of file diff --git a/.github/workflows/publish-docker-image-for-eth.yml b/.github/workflows/publish-docker-image-for-eth.yml index b6819c515b5f..8b9989e4fb6a 100644 --- a/.github/workflows/publish-docker-image-for-eth.yml +++ b/.github/workflows/publish-docker-image-for-eth.yml @@ -36,7 +36,6 @@ jobs: linux/amd64 linux/arm64/v8 build-args: | - CHAIN_TYPE=ethereum CACHE_EXCHANGE_RATES_PERIOD= API_V1_READ_METHODS_DISABLED=false DISABLE_WEBAPP=false @@ -45,4 +44,5 @@ jobs: ADMIN_PANEL_ENABLED=false CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL= BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }} - RELEASE_VERSION=${{ env.RELEASE_VERSION }} \ No newline at end of file + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=ethereum \ No newline at end of file diff --git a/.github/workflows/publish-docker-image-for-optimism.yml b/.github/workflows/publish-docker-image-for-optimism.yml index 1f3e81452252..d10cf7fe79d6 100644 --- a/.github/workflows/publish-docker-image-for-optimism.yml +++ b/.github/workflows/publish-docker-image-for-optimism.yml @@ -24,7 +24,7 @@ jobs: docker-arm-host: ${{ secrets.ARM_RUNNER_HOSTNAME }} docker-arm-host-key: ${{ secrets.ARM_RUNNER_KEY }} - - name: Build and push Docker image + - name: Build and push Docker image (indexer + API) uses: docker/build-push-action@v5 with: context: . @@ -45,4 +45,48 @@ jobs: CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL= BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }} RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=optimism + + - name: Build and push Docker image (indexer) + uses: docker/build-push-action@v5 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: blockscout/blockscout-${{ env.DOCKER_CHAIN_NAME }}:${{ env.RELEASE_VERSION }}-postrelease-${{ env.SHORT_SHA }}-indexer + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + CACHE_EXCHANGE_RATES_PERIOD= + DISABLE_WEBAPP=true + DISABLE_API=true + CACHE_TOTAL_GAS_USAGE_COUNTER_ENABLED= + ADMIN_PANEL_ENABLED=false + CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL= + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=optimism + + - name: Build and push Docker image (API) + uses: docker/build-push-action@v5 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: blockscout/blockscout-${{ env.DOCKER_CHAIN_NAME }}:${{ env.RELEASE_VERSION }}-postrelease-${{ env.SHORT_SHA }}-api + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + CACHE_EXCHANGE_RATES_PERIOD= + DISABLE_WEBAPP=true + DISABLE_INDEXER=true + CACHE_TOTAL_GAS_USAGE_COUNTER_ENABLED= + ADMIN_PANEL_ENABLED=false + CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL= + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} CHAIN_TYPE=optimism \ No newline at end of file From 314d2e7a5fbc1517dcff5afa7c9e280ce102ea8c Mon Sep 17 00:00:00 2001 From: Qwerty5Uiop <105209995+Qwerty5Uiop@users.noreply.github.com> Date: Mon, 15 Jul 2024 18:50:39 +0400 Subject: [PATCH 26/32] chore: Remove catchup sequence logic (#10415) --- .../lib/indexer/block/catchup/sequence.ex | 362 ------------------ .../indexer/block/catchup/sequence_test.exs | 248 ------------ .../indexer/block/realtime/fetcher_test.exs | 10 - .../uncataloged_token_transfers_test.exs | 2 - 4 files changed, 622 deletions(-) delete mode 100644 apps/indexer/lib/indexer/block/catchup/sequence.ex delete mode 100644 apps/indexer/test/indexer/block/catchup/sequence_test.exs diff --git a/apps/indexer/lib/indexer/block/catchup/sequence.ex b/apps/indexer/lib/indexer/block/catchup/sequence.ex deleted file mode 100644 index a5690efa48d7..000000000000 --- a/apps/indexer/lib/indexer/block/catchup/sequence.ex +++ /dev/null @@ -1,362 +0,0 @@ -defmodule Indexer.Block.Catchup.Sequence do - @moduledoc false - - use GenServer - - alias Indexer.{BoundQueue, Memory} - - @enforce_keys ~w(current bound_queue step)a - defstruct current: nil, - bound_queue: %BoundQueue{}, - step: nil - - @typedoc """ - The ranges to stream from the `t:Stream.t/` returned from `build_stream/1` - """ - @type ranges :: [Range.t()] - - @typep ranges_option :: {:ranges, ranges} - - @typedoc """ - The first number in the sequence to start for infinite sequences. - """ - @type first :: integer() - - @typep first_option :: {:first, first} - - @typedoc """ - * `:finite` - only popping ranges from `queue`. - * `:infinite` - generating new ranges from `current` and `step` when `queue` is empty. - """ - @type mode :: :finite | :infinite - - @typedoc """ - The size of `t:Range.t/0` to construct based on the `t:first_named_argument/0` or its current value when all - `t:prefix/0` ranges and any `t:Range.t/0`s injected with `inject_range/2` are consumed. - """ - @type step :: neg_integer() | pos_integer() - - @typep step_named_argument :: {:step, step} - - @typep memory_monitor_option :: {:memory_monitor, GenServer.server()} - - @type options :: [ranges_option | first_option | memory_monitor_option | step_named_argument] - - @typep edge :: :front | :back - - @typep range_tuple :: {first :: non_neg_integer(), last :: non_neg_integer()} - - @typep t :: %__MODULE__{ - bound_queue: BoundQueue.t(range_tuple()), - current: nil | integer(), - step: step() - } - - def child_spec([init_arguments]) do - child_spec([init_arguments, []]) - end - - def child_spec([_init_arguments, _gen_server_options] = start_link_arguments) do - spec = %{ - id: __MODULE__, - start: {__MODULE__, :start_link, start_link_arguments}, - type: :worker - } - - Supervisor.child_spec(spec, []) - end - - @doc """ - Starts a process for managing a block sequence. - - Infinite sequence - - Indexer.Block.Catchup.Sequence.start_link(first: 100, step: 10) - - Finite sequence - - Indexer.Block.Catchup.Sequence.start_link(ranges: [100..0]) - - """ - @spec start_link(options(), Keyword.t()) :: GenServer.on_start() - def start_link(init_options, gen_server_options \\ []) when is_list(init_options) and is_list(gen_server_options) do - GenServer.start_link(__MODULE__, init_options, gen_server_options) - end - - @doc """ - Builds an enumerable stream using a sequencer agent. - """ - @spec build_stream(GenServer.server()) :: Enumerable.t() - def build_stream(sequencer) do - Stream.resource( - fn -> sequencer end, - fn seq -> - case pop_front(seq) do - :halt -> {:halt, seq} - range -> {[range], seq} - end - end, - fn seq -> seq end - ) - end - - @doc """ - Changes the mode for the sequence to finite. - """ - @spec cap(GenServer.server()) :: mode - def cap(sequence) do - GenServer.call(sequence, :cap) - end - - @doc """ - Adds a range of block numbers to the end of the sequence. - """ - @spec push_back(GenServer.server(), Range.t()) :: :ok | {:error, String.t()} - def push_back(sequence, _first.._last = range) do - GenServer.call(sequence, {:push_back, range}) - end - - @doc """ - Adds a range of block numbers to the front of the sequence. - """ - @spec push_front(GenServer.server(), Range.t()) :: :ok | {:error, String.t()} - def push_front(sequence, _first.._last = range) do - GenServer.call(sequence, {:push_front, range}) - end - - @doc """ - Pops the next block range from the sequence. - """ - @spec pop_front(GenServer.server()) :: Range.t() | :halt - def pop_front(sequence) do - GenServer.call(sequence, :pop_front) - end - - @impl GenServer - @spec init(options) :: {:ok, t} - def init(options) when is_list(options) do - Process.flag(:trap_exit, true) - - shrinkable(options) - - with {:ok, %{ranges: ranges, first: first, step: step}} <- validate_options(options), - {:ok, bound_queue} <- push_chunked_ranges(%BoundQueue{}, step, ranges) do - {:ok, %__MODULE__{bound_queue: bound_queue, current: first, step: step}} - else - {:error, reason} -> - {:stop, reason} - end - end - - @impl GenServer - - @spec handle_call(:cap, GenServer.from(), %__MODULE__{current: nil}) :: {:reply, :finite, %__MODULE__{current: nil}} - @spec handle_call(:cap, GenServer.from(), %__MODULE__{current: integer()}) :: - {:reply, :infinite, %__MODULE__{current: nil}} - def handle_call(:cap, _from, %__MODULE__{current: current} = state) do - mode = - case current do - nil -> :finite - _ -> :infinite - end - - {:reply, mode, %__MODULE__{state | current: nil}} - end - - @spec handle_call({:push_back, Range.t()}, GenServer.from(), t()) :: {:reply, :ok | {:error, String.t()}, t()} - def handle_call({:push_back, _first.._last = range}, _from, %__MODULE__{bound_queue: bound_queue, step: step} = state) do - case push_chunked_range(bound_queue, step, range) do - {:ok, updated_bound_queue} -> - {:reply, :ok, %__MODULE__{state | bound_queue: updated_bound_queue}} - - {:error, _} = error -> - {:reply, error, state} - end - end - - @spec handle_call({:push_front, Range.t()}, GenServer.from(), t()) :: {:reply, :ok | {:error, String.t()}, t()} - def handle_call( - {:push_front, _first.._last = range}, - _from, - %__MODULE__{bound_queue: bound_queue, step: step} = state - ) do - case push_chunked_range(bound_queue, step, range, :front) do - {:ok, updated_bound_queue} -> - {:reply, :ok, %__MODULE__{state | bound_queue: updated_bound_queue}} - - {:error, _} = error -> - {:reply, error, state} - end - end - - @spec handle_call(:pop_front, GenServer.from(), t()) :: {:reply, Range.t() | :halt, t()} - def handle_call(:pop_front, _from, %__MODULE__{bound_queue: bound_queue, current: current, step: step} = state) do - {reply, new_state} = - case {current, BoundQueue.pop_front(bound_queue)} do - {_, {:ok, {{first, last}, new_bound_queue}}} -> - {first..last, %__MODULE__{state | bound_queue: new_bound_queue}} - - {nil, {:error, :empty}} -> - {:halt, %__MODULE__{state | bound_queue: bound_queue}} - - {_, {:error, :empty}} -> - case current + step do - new_current -> - last = new_current - 1 - {current..last, %__MODULE__{state | current: new_current, bound_queue: bound_queue}} - end - end - - {:reply, reply, new_state} - end - - @spec handle_call(:shrink, GenServer.from(), t()) :: {:reply, :ok, t()} - def handle_call(:shrink, _from, %__MODULE__{bound_queue: bound_queue} = state) do - {reply, shrunk_state} = - case BoundQueue.shrink(bound_queue) do - {:error, :minimum_size} = error -> - {error, state} - - {:ok, shrunk_bound_queue} -> - {:ok, %__MODULE__{state | bound_queue: shrunk_bound_queue}} - end - - {:reply, reply, shrunk_state, :hibernate} - end - - @spec handle_call(:shrunk?, GenServer.from(), t()) :: {:reply, boolean(), t()} - def handle_call(:shrunk?, _from, %__MODULE__{bound_queue: bound_queue} = state) do - {:reply, BoundQueue.shrunk?(bound_queue), state} - end - - def handle_call(:expand, _from, %__MODULE__{bound_queue: bound_queue} = state) do - {:reply, :ok, %{state | bound_queue: BoundQueue.expand(bound_queue)}} - end - - @spec push_chunked_range(BoundQueue.t(Range.t()), step, Range.t(), edge()) :: - {:ok, BoundQueue.t(Range.t())} | {:error, reason :: String.t()} - defp push_chunked_range(bound_queue, step, _.._ = range, edge \\ :back) - when is_integer(step) and edge in [:back, :front] do - with {:error, [reason]} <- push_chunked_ranges(bound_queue, step, [range], edge) do - {:error, reason} - end - end - - @spec push_chunked_range(BoundQueue.t(Range.t()), step, [Range.t()], edge()) :: - {:ok, BoundQueue.t(Range.t())} | {:error, reasons :: [String.t()]} - defp push_chunked_ranges(bound_queue, step, ranges, edge \\ :back) - when is_integer(step) and is_list(ranges) and edge in [:back, :front] do - reducer = - case edge do - :back -> &BoundQueue.push_back(&2, &1) - :front -> &BoundQueue.push_front(&2, &1) - end - - reduce_chunked_ranges(ranges, step, bound_queue, reducer) - end - - defp reduce_chunked_ranges(ranges, step, initial, reducer) - when is_list(ranges) and is_integer(step) and step != 0 and is_function(reducer, 2) do - Enum.reduce(ranges, {:ok, initial}, fn - range, {:ok, acc} -> - case reduce_chunked_range(range, step, acc, reducer) do - {:ok, _} = ok -> - ok - - {:error, reason} -> - {:error, [reason]} - end - - range, {:error, acc_reasons} = acc -> - case reduce_chunked_range(range, step, initial, reducer) do - {:ok, _} -> acc - {:error, reason} -> {:error, [reason | acc_reasons]} - end - end) - end - - defp reduce_chunked_range(_.._ = range, step, initial, reducer) do - count = Enum.count(range) - reduce_chunked_range(range, count, step, initial, reducer) - end - - defp reduce_chunked_range(first..last = range, _count, step, _initial, _reducer) - when (step < 0 and first < last) or (0 < step and last < first) do - {:error, "Range (#{inspect(range)}) direction is opposite step (#{step}) direction"} - end - - defp reduce_chunked_range(first..last, count, step, initial, reducer) when count <= abs(step) do - reducer.({first, last}, initial) - end - - defp reduce_chunked_range(first..last, _, step, initial, reducer) do - {sign, comparator} = - if step > 0 do - {1, &Kernel.>=/2} - else - {-1, &Kernel.<=/2} - end - - first - |> Stream.iterate(&(&1 + step)) - |> Enum.reduce_while( - initial, - &reduce_whiler(&1, &2, %{step: step, sign: sign, comparator: comparator, last: last, reducer: reducer}) - ) - end - - defp reduce_whiler(chunk_first, acc, %{step: step, sign: sign, comparator: comparator, last: last, reducer: reducer}) do - next_chunk_first = chunk_first + step - full_chunk_last = next_chunk_first - sign - - {action, chunk_last} = - if comparator.(full_chunk_last, last) do - {:halt, last} - else - {:cont, full_chunk_last} - end - - case reducer.({chunk_first, chunk_last}, acc) do - {:ok, reduced} -> - case action do - :halt -> {:halt, {:ok, reduced}} - :cont -> {:cont, reduced} - end - - {:error, _} = error -> - {:halt, error} - end - end - - defp shrinkable(options) do - case Keyword.get(options, :memory_monitor) do - nil -> :ok - memory_monitor -> Memory.Monitor.shrinkable(memory_monitor) - end - end - - defp validate_options(options) do - step = Keyword.fetch!(options, :step) - - case {Keyword.fetch(options, :ranges), Keyword.fetch(options, :first)} do - {:error, {:ok, first}} -> - case step do - pos_integer when is_integer(pos_integer) and pos_integer > 0 -> - {:ok, %{ranges: [], first: first, step: step}} - - _ -> - {:error, ":step must be a positive integer for infinite sequences"} - end - - {{:ok, ranges}, :error} -> - {:ok, %{ranges: ranges, first: nil, step: step}} - - {{:ok, _}, {:ok, _}} -> - {:error, - ":ranges and :first cannot be set at the same time as :ranges is for :finite mode while :first is for :infinite mode"} - - {:error, :error} -> - {:error, "either :ranges or :first must be set"} - end - end -end diff --git a/apps/indexer/test/indexer/block/catchup/sequence_test.exs b/apps/indexer/test/indexer/block/catchup/sequence_test.exs deleted file mode 100644 index eebc33d1ae62..000000000000 --- a/apps/indexer/test/indexer/block/catchup/sequence_test.exs +++ /dev/null @@ -1,248 +0,0 @@ -defmodule Indexer.Block.Catchup.SequenceTest do - use ExUnit.Case - - alias Indexer.Block.Catchup.Sequence - alias Indexer.Memory.Shrinkable - - describe "start_link/1" do - test "without :ranges with :first with positive step pops infinitely" do - {:ok, ascending} = Sequence.start_link(first: 5, step: 1) - - assert Sequence.pop_front(ascending) == 5..5 - assert Sequence.pop_front(ascending) == 6..6 - end - - test "without :ranges with :first with negative :step is error" do - {child_pid, child_ref} = - spawn_monitor(fn -> - Sequence.start_link(first: 1, step: -1) - Process.sleep(:timer.seconds(5)) - end) - - assert_receive {:DOWN, ^child_ref, :process, ^child_pid, - ":step must be a positive integer for infinite sequences"} - end - - test "without :ranges without :first returns error" do - {child_pid, child_ref} = - spawn_monitor(fn -> - Sequence.start_link(step: -1) - Process.sleep(:timer.seconds(5)) - end) - - assert_receive {:DOWN, ^child_ref, :process, ^child_pid, "either :ranges or :first must be set"} - end - - test "with ranges without :first" do - {:ok, pid} = Sequence.start_link(ranges: [1..4], step: 1) - - assert Sequence.pop_front(pid) == 1..1 - assert Sequence.pop_front(pid) == 2..2 - assert Sequence.pop_front(pid) == 3..3 - assert Sequence.pop_front(pid) == 4..4 - assert Sequence.pop_front(pid) == :halt - end - - test "with :ranges with :first returns error" do - {child_pid, child_ref} = - spawn_monitor(fn -> - Sequence.start_link(ranges: [1..0], first: 1, step: -1) - Process.sleep(:timer.seconds(5)) - end) - - assert_receive {:DOWN, ^child_ref, :process, ^child_pid, - ":ranges and :first cannot be set at the same time" <> - " as :ranges is for :finite mode while :first is for :infinite mode"} - end - - test "with 0 first with negative step does not return 0 twice" do - {:ok, pid} = Sequence.start_link(ranges: [1..0], step: -1) - - assert Sequence.pop_front(pid) == 1..1 - assert Sequence.pop_front(pid) == 0..0 - assert Sequence.pop_front(pid) == :halt - end - - # Regression test for https://github.com/poanetwork/blockscout/issues/387 - test "ensures Sequence shuts down when parent process dies" do - parent = self() - - {child_pid, child_ref} = spawn_monitor(fn -> send(parent, Sequence.start_link(first: 1, step: 1)) end) - - assert_receive {:DOWN, ^child_ref, :process, ^child_pid, :normal} - assert_receive {:ok, sequence_pid} when is_pid(sequence_pid) - - sequence_ref = Process.monitor(sequence_pid) - - # noproc when the sequence has already died by the time monitor is called - assert_receive {:DOWN, ^sequence_ref, :process, ^sequence_pid, status} when status in [:normal, :noproc] - end - end - - describe "push_back/2" do - test "with finite mode range is chunked" do - {:ok, pid} = Sequence.start_link(ranges: [1..0], step: -1) - - assert Sequence.pop_front(pid) == 1..1 - assert Sequence.pop_front(pid) == 0..0 - - assert Sequence.push_back(pid, 1..0) == :ok - - assert Sequence.pop_front(pid) == 1..1 - assert Sequence.pop_front(pid) == 0..0 - assert Sequence.pop_front(pid) == :halt - assert Sequence.pop_front(pid) == :halt - end - - test "with finite mode with range in wrong direction returns error" do - {:ok, ascending} = Sequence.start_link(first: 0, step: 1) - - assert Sequence.push_back(ascending, 1..0) == - {:error, "Range (1..0//-1) direction is opposite step (1) direction"} - - {:ok, descending} = Sequence.start_link(ranges: [1..0], step: -1) - - assert Sequence.push_back(descending, 0..1) == {:error, "Range (0..1) direction is opposite step (-1) direction"} - end - - test "with infinite mode range is chunked and is returned prior to calculated ranges" do - {:ok, pid} = Sequence.start_link(first: 5, step: 1) - - assert :ok = Sequence.push_back(pid, 3..4) - - assert Sequence.pop_front(pid) == 3..3 - assert Sequence.pop_front(pid) == 4..4 - # infinite sequence takes over - assert Sequence.pop_front(pid) == 5..5 - assert Sequence.pop_front(pid) == 6..6 - end - - test "with size == maximum_size, returns error" do - {:ok, pid} = Sequence.start_link(ranges: [1..0], step: -1) - - :ok = Shrinkable.shrink(pid) - - # error if currently size == maximum_size - assert {:error, :maximum_size} = Sequence.push_back(pid, 2..2) - - assert Sequence.pop_front(pid) == 1..1 - - # error if range would make sequence exceed maximum size - assert {:error, :maximum_size} = Sequence.push_back(pid, 3..2) - - # no error if range makes it under maximum size - assert :ok = Sequence.push_back(pid, 2..2) - - assert Sequence.pop_front(pid) == 2..2 - assert Sequence.pop_front(pid) == :halt - end - end - - describe "push_front/2" do - test "with finite mode range is chunked" do - {:ok, pid} = Sequence.start_link(ranges: [1..0], step: -1) - - assert Sequence.pop_front(pid) == 1..1 - assert Sequence.pop_front(pid) == 0..0 - - assert Sequence.push_front(pid, 1..0) == :ok - - assert Sequence.pop_front(pid) == 0..0 - assert Sequence.pop_front(pid) == 1..1 - assert Sequence.pop_front(pid) == :halt - assert Sequence.pop_front(pid) == :halt - end - - test "with finite mode with range in wrong direction returns error" do - {:ok, ascending} = Sequence.start_link(first: 0, step: 1) - - assert Sequence.push_front(ascending, 1..0) == - {:error, "Range (1..0//-1) direction is opposite step (1) direction"} - - {:ok, descending} = Sequence.start_link(ranges: [1..0], step: -1) - - assert Sequence.push_front(descending, 0..1) == {:error, "Range (0..1) direction is opposite step (-1) direction"} - end - - test "with infinite mode range is chunked and is returned prior to calculated ranges" do - {:ok, pid} = Sequence.start_link(first: 5, step: 1) - - assert :ok = Sequence.push_front(pid, 3..4) - - assert Sequence.pop_front(pid) == 4..4 - assert Sequence.pop_front(pid) == 3..3 - # infinite sequence takes over - assert Sequence.pop_front(pid) == 5..5 - assert Sequence.pop_front(pid) == 6..6 - end - - test "with size == maximum_size, returns error" do - {:ok, pid} = Sequence.start_link(ranges: [1..0], step: -1) - - :ok = Shrinkable.shrink(pid) - - # error if currently size == maximum_size - assert {:error, :maximum_size} = Sequence.push_front(pid, 2..2) - - assert Sequence.pop_front(pid) == 1..1 - - # error if range would make sequence exceed maximum size - assert {:error, :maximum_size} = Sequence.push_front(pid, 3..2) - - # no error if range makes it under maximum size - assert :ok = Sequence.push_front(pid, 2..2) - - assert Sequence.pop_front(pid) == 2..2 - assert Sequence.pop_front(pid) == :halt - end - end - - describe "cap/1" do - test "returns previous mode" do - {:ok, pid} = Sequence.start_link(first: 5, step: 1) - - assert Sequence.cap(pid) == :infinite - assert Sequence.cap(pid) == :finite - end - - test "disables infinite mode that uses first and step" do - {:ok, late_capped} = Sequence.start_link(first: 5, step: 1) - - assert Sequence.pop_front(late_capped) == 5..5 - assert Sequence.pop_front(late_capped) == 6..6 - assert Sequence.push_back(late_capped, 5..5) == :ok - assert Sequence.cap(late_capped) == :infinite - assert Sequence.pop_front(late_capped) == 5..5 - assert Sequence.pop_front(late_capped) == :halt - - {:ok, immediately_capped} = Sequence.start_link(first: 5, step: 1) - - assert Sequence.cap(immediately_capped) == :infinite - assert Sequence.pop_front(immediately_capped) == :halt - end - end - - describe "pop" do - test "with a non-empty queue in finite mode" do - {:ok, pid} = Sequence.start_link(ranges: [1..4, 6..9], step: 5) - - assert Sequence.pop_front(pid) == 1..4 - assert Sequence.pop_front(pid) == 6..9 - assert Sequence.pop_front(pid) == :halt - assert Sequence.pop_front(pid) == :halt - end - - test "with an empty queue in infinite mode returns range from next step from current" do - {:ok, pid} = Sequence.start_link(first: 5, step: 5) - - assert 5..9 == Sequence.pop_front(pid) - end - - test "with an empty queue in finite mode halts immediately" do - {:ok, pid} = Sequence.start_link(first: 5, step: 5) - :infinite = Sequence.cap(pid) - - assert Sequence.pop_front(pid) == :halt - end - end -end diff --git a/apps/indexer/test/indexer/block/realtime/fetcher_test.exs b/apps/indexer/test/indexer/block/realtime/fetcher_test.exs index d7e4f2354409..71b671ee27df 100644 --- a/apps/indexer/test/indexer/block/realtime/fetcher_test.exs +++ b/apps/indexer/test/indexer/block/realtime/fetcher_test.exs @@ -6,7 +6,6 @@ defmodule Indexer.Block.Realtime.FetcherTest do alias Explorer.Chain alias Explorer.Chain.{Address, Transaction, Wei} - alias Indexer.Block.Catchup.Sequence alias Indexer.Block.Realtime alias Indexer.Fetcher.CoinBalance.Realtime, as: CoinBalanceRealtime alias Indexer.Fetcher.{ContractCode, InternalTransaction, ReplacedTransaction, Token, TokenBalance, UncleBlock} @@ -48,9 +47,6 @@ defmodule Indexer.Block.Realtime.FetcherTest do block_fetcher: %Indexer.Block.Fetcher{} = block_fetcher, json_rpc_named_arguments: json_rpc_named_arguments } do - {:ok, sequence} = Sequence.start_link(ranges: [], step: 2) - Sequence.cap(sequence) - Token.Supervisor.Case.start_supervised!(json_rpc_named_arguments: json_rpc_named_arguments) ContractCode.Supervisor.Case.start_supervised!(json_rpc_named_arguments: json_rpc_named_arguments) @@ -516,9 +512,6 @@ defmodule Indexer.Block.Realtime.FetcherTest do } do Application.put_env(:indexer, :fetch_rewards_way, "manual") - {:ok, sequence} = Sequence.start_link(ranges: [], step: 2) - Sequence.cap(sequence) - Token.Supervisor.Case.start_supervised!(json_rpc_named_arguments: json_rpc_named_arguments) ContractCode.Supervisor.Case.start_supervised!(json_rpc_named_arguments: json_rpc_named_arguments) @@ -746,9 +739,6 @@ defmodule Indexer.Block.Realtime.FetcherTest do Application.put_env(:indexer, InternalTransaction.Supervisor, disabled?: true) Application.put_env(:indexer, UncleBlock.Supervisor, disabled?: true) - {:ok, sequence} = Sequence.start_link(ranges: [], step: 2) - Sequence.cap(sequence) - start_supervised!({Task.Supervisor, name: Realtime.TaskSupervisor}) Token.Supervisor.Case.start_supervised!(json_rpc_named_arguments: json_rpc_named_arguments) diff --git a/apps/indexer/test/indexer/temporary/uncataloged_token_transfers_test.exs b/apps/indexer/test/indexer/temporary/uncataloged_token_transfers_test.exs index 79fc9914f926..9e1468916ecf 100644 --- a/apps/indexer/test/indexer/temporary/uncataloged_token_transfers_test.exs +++ b/apps/indexer/test/indexer/temporary/uncataloged_token_transfers_test.exs @@ -1,7 +1,6 @@ defmodule Indexer.Temporary.UncatalogedTokenTransfersTest do use Explorer.DataCase - alias Indexer.Block.Catchup.Sequence alias Indexer.Temporary.UncatalogedTokenTransfers @moduletag :capture_log @@ -63,7 +62,6 @@ defmodule Indexer.Temporary.UncatalogedTokenTransfersTest do describe "handle_info with :push_front_blocks" do test "starts a task" do task_sup_pid = start_supervised!({Task.Supervisor, name: UncatalogedTokenTransfers.TaskSupervisor}) - start_supervised!({Sequence, [[ranges: [], step: -1], [name: :block_catchup_sequencer]]}) state = %{task_ref: nil, block_numbers: [1]} assert {:noreply, %{task_ref: task_ref}} = UncatalogedTokenTransfers.handle_info(:push_front_blocks, state) From 07d3c0c56606e7de0f60d028ea2ae399e6e95fa1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 12:27:43 +0300 Subject: [PATCH 27/32] chore(deps): bump cldr_utils from 2.27.0 to 2.28.0 (#10421) Bumps [cldr_utils](https://github.com/elixir-cldr/cldr_utils) from 2.27.0 to 2.28.0. - [Release notes](https://github.com/elixir-cldr/cldr_utils/releases) - [Changelog](https://github.com/elixir-cldr/cldr_utils/blob/main/CHANGELOG.md) - [Commits](https://github.com/elixir-cldr/cldr_utils/compare/v2.27.0...v2.28.0) --- updated-dependencies: - dependency-name: cldr_utils dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mix.lock b/mix.lock index bbb6d6ace09b..9faea2cab9fa 100644 --- a/mix.lock +++ b/mix.lock @@ -13,10 +13,10 @@ "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, "bureaucrat": {:hex, :bureaucrat, "0.2.10", "b0de157dad540e40007b663b683f716ced21f85ff0591093aadb209ad0d967e1", [:mix], [{:inflex, ">= 1.10.0", [hex: :inflex, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.2.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:plug, ">= 1.0.0", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 1.5 or ~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :poison, repo: "hexpm", optional: true]}], "hexpm", "bc7e5162b911c29c8ebefee87a2c16fbf13821a58f448a8fd024eb6c17fae15c"}, "bypass": {:hex, :bypass, "2.1.0", "909782781bf8e20ee86a9cabde36b259d44af8b9f38756173e8f5e2e1fabb9b1", [:mix], [{:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.0", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:ranch, "~> 1.3", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "d9b5df8fa5b7a6efa08384e9bbecfe4ce61c77d28a4282f79e02f1ef78d96b80"}, - "castore": {:hex, :castore, "1.0.7", "b651241514e5f6956028147fe6637f7ac13802537e895a724f90bf3e36ddd1dd", [:mix], [], "hexpm", "da7785a4b0d2a021cd1292a60875a784b6caef71e76bf4917bdee1f390455cf5"}, + "castore": {:hex, :castore, "1.0.8", "dedcf20ea746694647f883590b82d9e96014057aff1d44d03ec90f36a5c0dc6e", [:mix], [], "hexpm", "0b2b66d2ee742cb1d9cb8c8be3b43c3a70ee8651f37b75a8b982e036752983f1"}, "cbor": {:hex, :cbor, "1.0.1", "39511158e8ea5a57c1fcb9639aaa7efde67129678fee49ebbda780f6f24959b0", [:mix], [], "hexpm", "5431acbe7a7908f17f6a9cd43311002836a34a8ab01876918d8cfb709cd8b6a2"}, "certifi": {:hex, :certifi, "2.12.0", "2d1cca2ec95f59643862af91f001478c9863c2ac9cb6e2f89780bfd8de987329", [:rebar3], [], "hexpm", "ee68d85df22e554040cdb4be100f33873ac6051387baf6a8f6ce82272340ff1c"}, - "cldr_utils": {:hex, :cldr_utils, "2.27.0", "a75d5cdaaf6b7432eb10f547e6abe635c94746985c5b78e35bbbd08b16473b6c", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:certifi, "~> 2.5", [hex: :certifi, repo: "hexpm", optional: true]}, {:decimal, "~> 1.9 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "516f601e28da10b8f1f3af565321c4e3da3b898a0b50a5e5be425eff76d587e1"}, + "cldr_utils": {:hex, :cldr_utils, "2.28.0", "ce309d11b79fc13e1f22f808b5e3c1647102b01b11734ca8cb0296ca6d406fe4", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:certifi, "~> 2.5", [hex: :certifi, repo: "hexpm", optional: true]}, {:decimal, "~> 1.9 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "e7ac4bcea0fdbc11b5295ef30dd7b18d0922512399361af06a97198e57d23742"}, "cloak": {:hex, :cloak, "1.1.4", "aba387b22ea4d80d92d38ab1890cc528b06e0e7ef2a4581d71c3fdad59e997e7", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "92b20527b9aba3d939fab0dd32ce592ff86361547cfdc87d74edce6f980eb3d7"}, "cloak_ecto": {:hex, :cloak_ecto, "1.3.0", "0de127c857d7452ba3c3367f53fb814b0410ff9c680a8d20fbe8b9a3c57a1118", [:mix], [{:cloak, "~> 1.1.1", [hex: :cloak, repo: "hexpm", optional: false]}, {:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "314beb0c123b8a800418ca1d51065b27ba3b15f085977e65c0f7b2adab2de1cc"}, "coerce": {:hex, :coerce, "1.0.1", "211c27386315dc2894ac11bc1f413a0e38505d808153367bd5c6e75a4003d096", [:mix], [], "hexpm", "b44a691700f7a1a15b4b7e2ff1fa30bebd669929ac8aa43cffe9e2f8bf051cf1"}, From 6223cef36f455f3c360c1015fefefaa9b73f95d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 12:28:02 +0300 Subject: [PATCH 28/32] chore(deps): bump tesla from 1.11.1 to 1.11.2 (#10419) Bumps [tesla](https://github.com/teamon/tesla) from 1.11.1 to 1.11.2. - [Release notes](https://github.com/teamon/tesla/releases) - [Commits](https://github.com/teamon/tesla/compare/v1.11.1...v1.11.2) --- updated-dependencies: - dependency-name: tesla dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mix.lock b/mix.lock index 9faea2cab9fa..93f838e64d7e 100644 --- a/mix.lock +++ b/mix.lock @@ -133,7 +133,7 @@ "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, - "tesla": {:hex, :tesla, "1.11.1", "902ec0cd9fb06ba534be765f0eb78acd9d0ef70118230dc3a73fdc9afc91d036", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "c02d7dd149633c55c40adfaad6c3ce2615cfc89258b67a7f428c14bb835c398c"}, + "tesla": {:hex, :tesla, "1.11.2", "24707ac48b52f72f88fc05d242b1c59a85d1ee6f16f19c312d7d3419665c9cd5", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "c549cd03aec6a7196a641689dd378b799e635eb393f689b4bd756f750c7a4014"}, "timex": {:hex, :timex, "3.7.11", "bb95cb4eb1d06e27346325de506bcc6c30f9c6dea40d1ebe390b262fad1862d1", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:gettext, "~> 0.20", [hex: :gettext, repo: "hexpm", optional: false]}, {:tzdata, "~> 1.1", [hex: :tzdata, repo: "hexpm", optional: false]}], "hexpm", "8b9024f7efbabaf9bd7aa04f65cf8dcd7c9818ca5737677c7b76acbc6a94d1aa"}, "toml": {:hex, :toml, "0.6.2", "38f445df384a17e5d382befe30e3489112a48d3ba4c459e543f748c2f25dd4d1", [:mix], [], "hexpm", "d013e45126d74c0c26a38d31f5e8e9b83ea19fc752470feb9a86071ca5a672fa"}, "typed_ecto_schema": {:hex, :typed_ecto_schema, "0.4.1", "a373ca6f693f4de84cde474a67467a9cb9051a8a7f3f615f1e23dc74b75237fa", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "85c6962f79d35bf543dd5659c6adc340fd2480cacc6f25d2cc2933ea6e8fcb3b"}, From 277f49365b7f4cbc9478f1ac0e2c31b4573cd541 Mon Sep 17 00:00:00 2001 From: Qwerty5Uiop <105209995+Qwerty5Uiop@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:30:15 +0400 Subject: [PATCH 29/32] fix: Filter out internal transactions belonging to reorg (#10330) --- apps/explorer/lib/explorer/chain/internal_transaction.ex | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/explorer/lib/explorer/chain/internal_transaction.ex b/apps/explorer/lib/explorer/chain/internal_transaction.ex index 793a7fc65eac..2661c63e4ca4 100644 --- a/apps/explorer/lib/explorer/chain/internal_transaction.ex +++ b/apps/explorer/lib/explorer/chain/internal_transaction.ex @@ -780,7 +780,8 @@ defmodule Explorer.Chain.InternalTransaction do from( child in query, inner_join: transaction in assoc(child, :transaction), - where: transaction.hash == ^hash + where: transaction.hash == ^hash, + where: child.block_hash == transaction.block_hash ) end From b8d77f856aa672d3dce861d87516dee8bad0e214 Mon Sep 17 00:00:00 2001 From: Victor Baranov Date: Tue, 16 Jul 2024 12:31:25 +0300 Subject: [PATCH 30/32] fix: allow fetching image from properties -> image prop in token instance metadata (#10380) --- apps/block_scout_web/lib/block_scout_web/views/nft_helper.ex | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/apps/block_scout_web/lib/block_scout_web/views/nft_helper.ex b/apps/block_scout_web/lib/block_scout_web/views/nft_helper.ex index b7dbc3e158fc..1ccdf145f9ff 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/nft_helper.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/nft_helper.ex @@ -6,6 +6,7 @@ defmodule BlockScoutWeb.NFTHelper do def get_media_src(nil, _), do: nil + # credo:disable-for-next-line /Complexity/ def get_media_src(metadata, high_quality_media?) do result = cond do @@ -18,8 +19,8 @@ defmodule BlockScoutWeb.NFTHelper do metadata["image"] -> retrieve_image(metadata["image"]) - metadata["properties"]["image"]["description"] -> - metadata["properties"]["image"]["description"] + image = metadata["properties"]["image"] -> + if is_map(image), do: image["description"], else: image true -> nil From 86b10d9e62f109be4ca552294bcde1012a8577c6 Mon Sep 17 00:00:00 2001 From: Qwerty5Uiop <105209995+Qwerty5Uiop@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:34:04 +0400 Subject: [PATCH 31/32] fix: Transactions and token transfers block_consensus (#10285) --- apps/explorer/config/config.exs | 2 + apps/explorer/config/runtime/test.exs | 2 + apps/explorer/lib/explorer/application.ex | 2 + apps/explorer/lib/explorer/chain/import.ex | 2 +- .../chain/import/stage/block_following.ex | 2 +- .../chain/import/stage/block_pending.ex | 2 +- .../chain/import/stage/block_referencing.ex | 4 +- ...ocks_coin_balances.ex => block_related.ex} | 9 +-- .../token_transfer_block_consensus.ex | 57 +++++++++++++++++++ .../migrator/transaction_block_consensus.ex | 52 +++++++++++++++++ .../test/explorer/chain/import_test.exs | 4 +- 11 files changed, 126 insertions(+), 12 deletions(-) rename apps/explorer/lib/explorer/chain/import/stage/{addresses_blocks_coin_balances.ex => block_related.ex} (76%) create mode 100644 apps/explorer/lib/explorer/migrator/token_transfer_block_consensus.ex create mode 100644 apps/explorer/lib/explorer/migrator/transaction_block_consensus.ex diff --git a/apps/explorer/config/config.exs b/apps/explorer/config/config.exs index 88e78975d152..03977df83bae 100644 --- a/apps/explorer/config/config.exs +++ b/apps/explorer/config/config.exs @@ -123,6 +123,8 @@ config :explorer, Explorer.Migrator.SanitizeMissingBlockRanges, enabled: true config :explorer, Explorer.Migrator.SanitizeIncorrectNFTTokenTransfers, enabled: true config :explorer, Explorer.Migrator.TokenTransferTokenType, enabled: true config :explorer, Explorer.Migrator.SanitizeIncorrectWETHTokenTransfers, enabled: true +config :explorer, Explorer.Migrator.TransactionBlockConsensus, enabled: true +config :explorer, Explorer.Migrator.TokenTransferBlockConsensus, enabled: true config :explorer, Explorer.Chain.Fetcher.CheckBytecodeMatchingOnDemand, enabled: true diff --git a/apps/explorer/config/runtime/test.exs b/apps/explorer/config/runtime/test.exs index 368874b21903..56c02a846467 100644 --- a/apps/explorer/config/runtime/test.exs +++ b/apps/explorer/config/runtime/test.exs @@ -45,6 +45,8 @@ config :explorer, Explorer.Migrator.SanitizeMissingBlockRanges, enabled: false config :explorer, Explorer.Migrator.SanitizeIncorrectNFTTokenTransfers, enabled: false config :explorer, Explorer.Migrator.TokenTransferTokenType, enabled: false config :explorer, Explorer.Migrator.SanitizeIncorrectWETHTokenTransfers, enabled: false +config :explorer, Explorer.Migrator.TransactionBlockConsensus, enabled: false +config :explorer, Explorer.Migrator.TokenTransferBlockConsensus, enabled: false config :explorer, realtime_events_sender: Explorer.Chain.Events.SimpleSender diff --git a/apps/explorer/lib/explorer/application.ex b/apps/explorer/lib/explorer/application.ex index e2ffa55fedff..c82ebcb201bb 100644 --- a/apps/explorer/lib/explorer/application.ex +++ b/apps/explorer/lib/explorer/application.ex @@ -138,6 +138,8 @@ defmodule Explorer.Application do configure(Explorer.Migrator.SanitizeIncorrectNFTTokenTransfers), configure(Explorer.Migrator.TokenTransferTokenType), configure(Explorer.Migrator.SanitizeIncorrectWETHTokenTransfers), + configure(Explorer.Migrator.TransactionBlockConsensus), + configure(Explorer.Migrator.TokenTransferBlockConsensus), configure_chain_type_dependent_process(Explorer.Chain.Cache.StabilityValidatorsCounters, :stability) ] |> List.flatten() diff --git a/apps/explorer/lib/explorer/chain/import.ex b/apps/explorer/lib/explorer/chain/import.ex index ea8867131b40..d0101ed133f7 100644 --- a/apps/explorer/lib/explorer/chain/import.ex +++ b/apps/explorer/lib/explorer/chain/import.ex @@ -12,7 +12,7 @@ defmodule Explorer.Chain.Import do require Logger @stages [ - Import.Stage.AddressesBlocksCoinBalances, + Import.Stage.BlockRelated, Import.Stage.BlockReferencing, Import.Stage.BlockFollowing, Import.Stage.BlockPending diff --git a/apps/explorer/lib/explorer/chain/import/stage/block_following.ex b/apps/explorer/lib/explorer/chain/import/stage/block_following.ex index 193de566e68e..b59dc20dbd69 100644 --- a/apps/explorer/lib/explorer/chain/import/stage/block_following.ex +++ b/apps/explorer/lib/explorer/chain/import/stage/block_following.ex @@ -1,7 +1,7 @@ defmodule Explorer.Chain.Import.Stage.BlockFollowing do @moduledoc """ Imports any tables that follows and cannot be imported at the same time as - those imported by `Explorer.Chain.Import.Stage.AddressesBlocksCoinBalances` and `Explorer.Chain.Import.Stage.BlockReferencing` + those imported by `Explorer.Chain.Import.Stage.BlockRelated` and `Explorer.Chain.Import.Stage.BlockReferencing` """ alias Explorer.Chain.Import.{Runner, Stage} diff --git a/apps/explorer/lib/explorer/chain/import/stage/block_pending.ex b/apps/explorer/lib/explorer/chain/import/stage/block_pending.ex index 6dccdfdf5d10..abcd95e141cd 100644 --- a/apps/explorer/lib/explorer/chain/import/stage/block_pending.ex +++ b/apps/explorer/lib/explorer/chain/import/stage/block_pending.ex @@ -2,7 +2,7 @@ defmodule Explorer.Chain.Import.Stage.BlockPending do @moduledoc """ Imports any tables that uses `Explorer.Chain.PendingBlockOperation` to track progress and cannot be imported at the same time as those imported by - `Explorer.Chain.Import.Stage.AddressesBlocksCoinBalances` and `Explorer.Chain.Import.Stage.BlockReferencing` + `Explorer.Chain.Import.Stage.BlockRelated` and `Explorer.Chain.Import.Stage.BlockReferencing` """ alias Explorer.Chain.Import.{Runner, Stage} diff --git a/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex b/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex index 66e7d6837603..0d830810edf8 100644 --- a/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex +++ b/apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex @@ -1,18 +1,16 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do @moduledoc """ Imports any tables that reference `t:Explorer.Chain.Block.t/0` and that were - imported by `Explorer.Chain.Import.Stage.AddressesBlocksCoinBalances`. + imported by `Explorer.Chain.Import.Stage.BlockRelated`. """ alias Explorer.Chain.Import.{Runner, Stage} @behaviour Stage @default_runners [ - Runner.Transactions, Runner.Transaction.Forks, Runner.Logs, Runner.Tokens, - Runner.TokenTransfers, Runner.TokenInstances, Runner.Address.TokenBalances, Runner.TransactionActions, diff --git a/apps/explorer/lib/explorer/chain/import/stage/addresses_blocks_coin_balances.ex b/apps/explorer/lib/explorer/chain/import/stage/block_related.ex similarity index 76% rename from apps/explorer/lib/explorer/chain/import/stage/addresses_blocks_coin_balances.ex rename to apps/explorer/lib/explorer/chain/import/stage/block_related.ex index cfa42beaf5f6..a9c25cf03b27 100644 --- a/apps/explorer/lib/explorer/chain/import/stage/addresses_blocks_coin_balances.ex +++ b/apps/explorer/lib/explorer/chain/import/stage/block_related.ex @@ -1,7 +1,6 @@ -defmodule Explorer.Chain.Import.Stage.AddressesBlocksCoinBalances do +defmodule Explorer.Chain.Import.Stage.BlockRelated do @moduledoc """ - Import addresses, blocks and balances. - No tables have foreign key to addresses anymore, so it's possible to import addresses along with them. + Import blocks along with block related entities. """ alias Explorer.Chain.Import.{Runner, Stage} @@ -13,7 +12,9 @@ defmodule Explorer.Chain.Import.Stage.AddressesBlocksCoinBalances do @rest_runners [ Runner.Address.CoinBalances, Runner.Blocks, - Runner.Address.CoinBalancesDaily + Runner.Address.CoinBalancesDaily, + Runner.Transactions, + Runner.TokenTransfers ] @impl Stage diff --git a/apps/explorer/lib/explorer/migrator/token_transfer_block_consensus.ex b/apps/explorer/lib/explorer/migrator/token_transfer_block_consensus.ex new file mode 100644 index 000000000000..f2660501fb98 --- /dev/null +++ b/apps/explorer/lib/explorer/migrator/token_transfer_block_consensus.ex @@ -0,0 +1,57 @@ +defmodule Explorer.Migrator.TokenTransferBlockConsensus do + @moduledoc """ + Fixes token transfers block_consensus field + """ + + use Explorer.Migrator.FillingMigration + + import Ecto.Query + + alias Explorer.Chain.TokenTransfer + alias Explorer.Migrator.FillingMigration + alias Explorer.Repo + + @migration_name "token_transfers_block_consensus" + + @impl FillingMigration + def migration_name, do: @migration_name + + @impl FillingMigration + def last_unprocessed_identifiers do + limit = batch_size() * concurrency() + + unprocessed_data_query() + |> select([tt], {tt.transaction_hash, tt.block_hash, tt.log_index}) + |> limit(^limit) + |> Repo.all(timeout: :infinity) + end + + @impl FillingMigration + def unprocessed_data_query do + from( + tt in TokenTransfer, + join: block in assoc(tt, :block), + where: tt.block_consensus != block.consensus + ) + end + + @impl FillingMigration + def update_batch(token_transfer_ids) do + token_transfer_ids + |> build_update_query() + |> Repo.query!([], timeout: :infinity) + end + + @impl FillingMigration + def update_cache, do: :ok + + defp build_update_query(token_transfer_ids) do + """ + UPDATE token_transfers tt + SET block_consensus = b.consensus + FROM blocks b + WHERE tt.block_hash = b.hash + AND (tt.transaction_hash, tt.block_hash, tt.log_index) IN #{TokenTransfer.encode_token_transfer_ids(token_transfer_ids)}; + """ + end +end diff --git a/apps/explorer/lib/explorer/migrator/transaction_block_consensus.ex b/apps/explorer/lib/explorer/migrator/transaction_block_consensus.ex new file mode 100644 index 000000000000..11c55111d3e4 --- /dev/null +++ b/apps/explorer/lib/explorer/migrator/transaction_block_consensus.ex @@ -0,0 +1,52 @@ +defmodule Explorer.Migrator.TransactionBlockConsensus do + @moduledoc """ + Fixes transactions block_consensus field + """ + + use Explorer.Migrator.FillingMigration + + import Ecto.Query + + alias Explorer.Chain.Transaction + alias Explorer.Migrator.FillingMigration + alias Explorer.Repo + + @migration_name "transactions_block_consensus" + + @impl FillingMigration + def migration_name, do: @migration_name + + @impl FillingMigration + def last_unprocessed_identifiers do + limit = batch_size() * concurrency() + + unprocessed_data_query() + |> select([t], t.hash) + |> limit(^limit) + |> Repo.all(timeout: :infinity) + end + + @impl FillingMigration + def unprocessed_data_query do + from( + transaction in Transaction, + join: block in assoc(transaction, :block), + where: transaction.block_consensus != block.consensus + ) + end + + @impl FillingMigration + def update_batch(transaction_hashes) do + query = + from(transaction in Transaction, + join: block in assoc(transaction, :block), + where: transaction.hash in ^transaction_hashes, + update: [set: [block_consensus: block.consensus]] + ) + + Repo.update_all(query, [], timeout: :infinity) + end + + @impl FillingMigration + def update_cache, do: :ok +end diff --git a/apps/explorer/test/explorer/chain/import_test.exs b/apps/explorer/test/explorer/chain/import_test.exs index b0dfad37d4e6..d08b7595237e 100644 --- a/apps/explorer/test/explorer/chain/import_test.exs +++ b/apps/explorer/test/explorer/chain/import_test.exs @@ -375,12 +375,12 @@ defmodule Explorer.Chain.ImportTest do not_existing_block_hash = "0xf6b4b8c88df3ebd252ec476328334dc026cf66606a84fb769b3d3cbccc8471db" incorrect_data = - update_in(@import_data, [:transactions, :params], fn params -> + update_in(@import_data, [:logs, :params], fn params -> [params |> Enum.at(0) |> Map.put(:block_hash, not_existing_block_hash)] end) assert_raise(Postgrex.Error, fn -> Import.all(incorrect_data) end) - assert [] = Repo.all(Transaction) + assert [] = Repo.all(Log) assert %{consensus: true, refetch_needed: true} = Repo.one(Block) end From 81256f3981f70ac205f31620a4a8ece4f8c312c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:41:10 +0300 Subject: [PATCH 32/32] chore(deps): bump absinthe from 1.7.6 to 1.7.7 (#10420) Bumps [absinthe](https://github.com/absinthe-graphql/absinthe) from 1.7.6 to 1.7.7. - [Release notes](https://github.com/absinthe-graphql/absinthe/releases) - [Changelog](https://github.com/absinthe-graphql/absinthe/blob/main/CHANGELOG.md) - [Commits](https://github.com/absinthe-graphql/absinthe/compare/v1.7.6...v1.7.7) --- updated-dependencies: - dependency-name: absinthe dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mix.lock b/mix.lock index 93f838e64d7e..4fd99527594f 100644 --- a/mix.lock +++ b/mix.lock @@ -1,5 +1,5 @@ %{ - "absinthe": {:hex, :absinthe, "1.7.6", "0b897365f98d068cfcb4533c0200a8e58825a4aeeae6ec33633ebed6de11773b", [:mix], [{:dataloader, "~> 1.0.0 or ~> 2.0", [hex: :dataloader, repo: "hexpm", optional: true]}, {:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.2.1", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: true]}, {:telemetry, "~> 1.0 or ~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7626951ca5eec627da960615b51009f3a774765406ff02722b1d818f17e5778"}, + "absinthe": {:hex, :absinthe, "1.7.7", "ecbf4e9b21372dda271c79bb43dded3583b4f080348c5e68d9b5445e790ff17e", [:mix], [{:dataloader, "~> 1.0.0 or ~> 2.0", [hex: :dataloader, repo: "hexpm", optional: true]}, {:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.2.1 or ~> 0.3", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2145519828bcb7c8621b72d7af2bcff88b01cba2774583c40ebd867e1d336ff6"}, "absinthe_phoenix": {:hex, :absinthe_phoenix, "2.0.3", "74e0862f280424b7bc290f6f69e133268bce0b4e7db0218c7e129c5c2b1d3fd4", [:mix], [{:absinthe, "~> 1.5", [hex: :absinthe, repo: "hexpm", optional: false]}, {:absinthe_plug, "~> 1.5", [hex: :absinthe_plug, repo: "hexpm", optional: false]}, {:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.5", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.13 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.0", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}], "hexpm", "caffaea03c17ea7419fe07e4bc04c2399c47f0d8736900623dbf4749a826fd2c"}, "absinthe_plug": {:git, "https://github.com/blockscout/absinthe_plug.git", "90a8188e94e2650f13259fb16462075a87f98e18", [tag: "1.5.8"]}, "absinthe_relay": {:hex, :absinthe_relay, "1.5.2", "cfb8aed70f4e4c7718d3f1c212332d2ea728f17c7fc0f68f1e461f0f5f0c4b9a", [:mix], [{:absinthe, "~> 1.5.0 or ~> 1.6.0 or ~> 1.7.0", [hex: :absinthe, repo: "hexpm", optional: false]}, {:ecto, "~> 2.0 or ~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}], "hexpm", "0587ee913afa31512e1457a5064ee88427f8fe7bcfbeeecd41c71d9cff0b62b6"},