Skip to content

Commit

Permalink
feat: improve decompressor
Browse files Browse the repository at this point in the history
  • Loading branch information
davidtaikocha committed Jan 20, 2025
1 parent 6ca43fe commit df623af
Show file tree
Hide file tree
Showing 7 changed files with 90 additions and 109 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,29 +31,11 @@ func createPayloadAndSetHead(
"l1Origin", meta.L1Origin,
)

// Insert a TaikoL2.anchor / TaikoL2.anchorV2 transaction at transactions list head
var (
txList []*types.Transaction
err error
)
if len(meta.TxListBytes) != 0 {
if err := rlp.DecodeBytes(meta.TxListBytes, &txList); err != nil {
log.Error("Invalid txList bytes", "blockID", meta.createExecutionPayloadsMetaData.BlockID)
return nil, err
}
}

// Insert the anchor transaction at the head of the transactions list
txList = append([]*types.Transaction{anchorTx}, txList...)
if meta.createExecutionPayloadsMetaData.TxListBytes, err = rlp.EncodeToBytes(txList); err != nil {
log.Error("Encode txList error", "blockID", meta.BlockID, "error", err)
return nil, err
}

payload, err := createExecutionPayloads(
ctx,
rpc,
meta.createExecutionPayloadsMetaData,
anchorTx,
)
if err != nil {
return nil, fmt.Errorf("failed to create execution payloads: %w", err)
Expand Down Expand Up @@ -103,7 +85,15 @@ func createExecutionPayloads(
ctx context.Context,
rpc *rpc.Client,
meta *createExecutionPayloadsMetaData,
anchorTx *types.Transaction,
) (payloadData *engine.ExecutableData, err error) {
// Insert a TaikoL2.anchor / TaikoL2.anchorV2 transaction at transactions list head
txListBytes, err := rlp.EncodeToBytes(append([]*types.Transaction{anchorTx}, meta.Txs...))
if err != nil {
log.Error("Encode txList error", "blockID", meta.BlockID, "error", err)
return nil, err
}

fc := &engine.ForkchoiceStateV1{HeadBlockHash: meta.ParentHash}
attributes := &engine.PayloadAttributes{
Timestamp: meta.Timestamp,
Expand All @@ -114,7 +104,7 @@ func createExecutionPayloads(
Beneficiary: meta.SuggestedFeeRecipient,
GasLimit: uint64(meta.GasLimit) + consensus.AnchorGasLimit,
Timestamp: meta.Timestamp,
TxList: meta.TxListBytes,
TxList: txListBytes,
MixHash: meta.Difficulty,
ExtraData: meta.ExtraData,
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ type createExecutionPayloadsMetaData struct {
Timestamp uint64
ParentHash common.Hash
L1Origin *rawdb.L1Origin
TxListBytes []byte
Txs types.Transactions
BaseFee *big.Int
Withdrawals []*types.Withdrawal
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
pacayaBindings "github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/pacaya"
anchorTxConstructor "github.com/taikoxyz/taiko-mono/packages/taiko-client/driver/anchor_tx_constructor"
"github.com/taikoxyz/taiko-mono/packages/taiko-client/driver/chain_syncer/beaconsync"
txListDecompressor "github.com/taikoxyz/taiko-mono/packages/taiko-client/driver/txlist_decompressor"
txlistFetcher "github.com/taikoxyz/taiko-mono/packages/taiko-client/driver/txlist_fetcher"
eventIterator "github.com/taikoxyz/taiko-mono/packages/taiko-client/pkg/chain_iterator/event_iterator"
"github.com/taikoxyz/taiko-mono/packages/taiko-client/pkg/rpc"
Expand All @@ -23,24 +24,27 @@ import (

// BlocksInserterOntake is responsible for inserting Ontake blocks to the L2 execution engine.
type BlocksInserterOntake struct {
rpc *rpc.Client
progressTracker *beaconsync.SyncProgressTracker
blobDatasource *rpc.BlobDataSource
anchorConstructor *anchorTxConstructor.AnchorTxConstructor // TaikoL2.anchor transactions constructor
rpc *rpc.Client
progressTracker *beaconsync.SyncProgressTracker
blobDatasource *rpc.BlobDataSource
txListDecompressor *txListDecompressor.TxListDecompressor // Transactions list decompressor
anchorConstructor *anchorTxConstructor.AnchorTxConstructor // TaikoL2.anchor transactions constructor
}

// NewBlocksInserterOntake creates a new BlocksInserterOntake instance.
func NewBlocksInserterOntake(
rpc *rpc.Client,
progressTracker *beaconsync.SyncProgressTracker,
blobDatasource *rpc.BlobDataSource,
txListDecompressor *txListDecompressor.TxListDecompressor,
anchorConstructor *anchorTxConstructor.AnchorTxConstructor,
) *BlocksInserterOntake {
return &BlocksInserterOntake{
rpc: rpc,
progressTracker: progressTracker,
blobDatasource: blobDatasource,
anchorConstructor: anchorConstructor,
rpc: rpc,
progressTracker: progressTracker,
blobDatasource: blobDatasource,
txListDecompressor: txListDecompressor,
anchorConstructor: anchorConstructor,
}
}

Expand Down Expand Up @@ -152,7 +156,12 @@ func (i *BlocksInserterOntake) InsertBlocks(
L1BlockHeight: meta.GetRawBlockHeight(),
L1BlockHash: meta.GetRawBlockHash(),
},
TxListBytes: txListBytes,
Txs: i.txListDecompressor.TryDecompress(
i.rpc.L2.ChainID,
txListBytes,
meta.GetBlobUsed(),
false,
),
Withdrawals: make([]*types.Withdrawal, 0),
},
AnchorBlockID: new(big.Int).SetUint64(meta.GetAnchorBlockID()),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"

"github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/encoding"
"github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/metadata"
Expand Down Expand Up @@ -79,23 +78,17 @@ func (i *BlocksInserterPacaya) InsertBlocks(
return fmt.Errorf("failed to fetch tx list: %w", err)
}

txsInBatchBytes := i.txListDecompressor.TryDecompress(
i.rpc.L2.ChainID,
meta.GetBatchID(),
txListBytes,
meta.GetNumBlobs() != 0,
true,
)

var (
allTxs = i.txListDecompressor.TryDecompress(
i.rpc.L2.ChainID,
txListBytes,
meta.GetNumBlobs() != 0,
true,
)
parent *types.Header
lastPayloadData *engine.ExecutableData
allTxs types.Transactions
txListCursor = 0
)
if err = rlp.DecodeBytes(txsInBatchBytes, &allTxs); err != nil {
return fmt.Errorf("failed to decode tx list: %w", err)
}

for j, blockInfo := range meta.GetBlocks() {
// Fetch the L2 parent block, if the node is just finished a P2P sync, we simply use the tracker's
Expand Down Expand Up @@ -136,10 +129,6 @@ func (i *BlocksInserterPacaya) InsertBlocks(
"beaconSyncTriggered", i.progressTracker.Triggered(),
)

txListBytes, err := rlp.EncodeToBytes(allTxs[txListCursor:blockInfo.NumTransactions])
if err != nil {
return fmt.Errorf("failed to encode tx list: %w", err)
}
blockID := new(big.Int).SetUint64(parent.Number.Uint64() + 1)
difficulty, err := encoding.CalculatePacayaDifficulty(blockID)
if err != nil {
Expand Down Expand Up @@ -210,7 +199,7 @@ func (i *BlocksInserterPacaya) InsertBlocks(
L1BlockHeight: meta.GetRawBlockHeight(),
L1BlockHash: meta.GetRawBlockHash(),
},
TxListBytes: txListBytes,
Txs: allTxs[txListCursor:blockInfo.NumTransactions],
Withdrawals: make([]*types.Withdrawal, 0),
BaseFee: baseFee,
},
Expand Down
41 changes: 20 additions & 21 deletions packages/taiko-client/driver/chain_syncer/blob/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ func NewSyncer(
client,
progressTracker,
blobDataSource,
txListDecompressor,
constructor,
),
blocksInserterPacaya: blocksInserter.NewBlocksInserterPacaya(
Expand Down Expand Up @@ -230,24 +231,6 @@ func (s *Syncer) onBlockProposed(
return nil
}

if meta.IsPacaya() {
log.Info(
"New BatchProposed event",
"l1Height", meta.GetRawBlockHeight(),
"l1Hash", meta.GetRawBlockHash(),
"batchID", meta.TaikoBatchMetaDataPacaya().GetBatchID(),
"lastBlockID", lastBlockID,
"blocks", len(meta.TaikoBatchMetaDataPacaya().GetBlocks()),
)
} else {
log.Info(
"New BlockProposedV2 event",
"l1Height", meta.GetRawBlockHeight(),
"l1Hash", meta.GetRawBlockHash(),
"blockID", meta.TaikoBlockMetaDataOntake().GetBlockID(),
)
}

// If the event's timestamp is in the future, we wait until the timestamp is reached, should
// only happen when testing.
if timestamp > uint64(time.Now().Unix()) {
Expand All @@ -270,12 +253,28 @@ func (s *Syncer) onBlockProposed(
}

// Insert new blocks to L2 EE's chain.
if !meta.IsPacaya() {
if err := s.blocksInserterOntake.InsertBlocks(ctx, meta, tx, endIter); err != nil {
if meta.IsPacaya() {
log.Info(
"New BatchProposed event",
"l1Height", meta.GetRawBlockHeight(),
"l1Hash", meta.GetRawBlockHash(),
"batchID", meta.TaikoBatchMetaDataPacaya().GetBatchID(),
"lastBlockID", lastBlockID,
"blocks", len(meta.TaikoBatchMetaDataPacaya().GetBlocks()),
)
if err := s.blocksInserterPacaya.InsertBlocks(ctx, meta, tx, endIter); err != nil {
return err
}
} else {
if err := s.blocksInserterPacaya.InsertBlocks(ctx, meta, tx, endIter); err != nil {
log.Info(
"New BatchProposed event",
"l1Height", meta.GetRawBlockHeight(),
"l1Hash", meta.GetRawBlockHash(),
"batchID", meta.TaikoBatchMetaDataPacaya().GetBatchID(),
"lastBlockID", lastBlockID,
"blocks", len(meta.TaikoBatchMetaDataPacaya().GetBlocks()),
)
if err := s.blocksInserterOntake.InsertBlocks(ctx, meta, tx, endIter); err != nil {
return err
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,27 +41,25 @@ func NewTxListDecompressor(
// 2. The transaction list bytes must be able to be RLP decoded into a list of transactions.
func (v *TxListDecompressor) TryDecompress(
chainID *big.Int,
blockOrBatchID *big.Int,
txListBytes []byte,
blobUsed bool,
postPacaya bool,
) []byte {
) types.Transactions {
if chainID.Cmp(params.HeklaNetworkID) != 0 && !postPacaya {
return v.tryDecompressHekla(blockOrBatchID, txListBytes, blobUsed)
return v.tryDecompressHekla(txListBytes, blobUsed)
}

return v.tryDecompress(blockOrBatchID, txListBytes, blobUsed)
return v.tryDecompress(txListBytes, blobUsed)
}

// tryDecompress is the inner implementation of TryDecompress.
func (v *TxListDecompressor) tryDecompress(
blockOrBatchID *big.Int,
txListBytes []byte,
blobUsed bool,
) []byte {
) types.Transactions {
// If the transaction list is empty, it's valid.
if len(txListBytes) == 0 {
return []byte{}
return types.Transactions{}
}

// If calldata is used, the compressed bytes of the transaction list must be
Expand All @@ -70,9 +68,8 @@ func (v *TxListDecompressor) tryDecompress(
log.Info(
"Compressed transactions list binary too large",
"length", len(txListBytes),
"blockOrBatchID", blockOrBatchID,
)
return []byte{}
return types.Transactions{}
}

var (
Expand All @@ -82,31 +79,29 @@ func (v *TxListDecompressor) tryDecompress(

// Decompress the transaction list bytes.
if txListBytes, err = utils.Decompress(txListBytes); err != nil {
log.Info("Failed to decompress tx list bytes", "blockOrBatchID", blockOrBatchID, "error", err)
return []byte{}
log.Info("Failed to decompress tx list bytes", "error", err)
return types.Transactions{}
}

// Try to RLP decode the transaction list bytes.
if err = rlp.DecodeBytes(txListBytes, &txs); err != nil {
log.Info("Failed to decode transactions list bytes", "blockOrBatchID", blockOrBatchID, "error", err)
return []byte{}
log.Info("Failed to decode transactions list bytes", "error", err)
return types.Transactions{}
}

log.Info("Transaction list is valid", "blockOrBatchID", blockOrBatchID)
return txListBytes
return txs
}

// TryDecompressHekla is the same as tryDecompress, but it's used for Hekla network with
// an incorrect legacy bytes size check.
// ref: https://github.com/taikoxyz/taiko-client/pull/783
func (v *TxListDecompressor) tryDecompressHekla(
blockID *big.Int,
txListBytes []byte,
blobUsed bool,
) []byte {
) types.Transactions {
// If the transaction list is empty, it's valid.
if len(txListBytes) == 0 {
return []byte{}
return types.Transactions{}
}

var (
Expand All @@ -116,23 +111,23 @@ func (v *TxListDecompressor) tryDecompressHekla(

// Decompress the transaction list bytes.
if txListBytes, err = utils.Decompress(txListBytes); err != nil {
log.Info("Failed to decompress tx list bytes", "blockID", blockID, "error", err)
return []byte{}
log.Info("Failed to decompress tx list bytes", "error", err)
return types.Transactions{}
}

// If calldata is used, the compressed bytes of the transaction list must be
// less than or equal to maxBytesPerTxList.
if !blobUsed && (len(txListBytes) > int(v.maxBytesPerTxList)) {
log.Info("Compressed transactions list binary too large", "length", len(txListBytes), "blockID", blockID)
return []byte{}
log.Info("Compressed transactions list binary too large", "length", len(txListBytes))
return types.Transactions{}
}

// Try to RLP decode the transaction list bytes.
if err = rlp.DecodeBytes(txListBytes, &txs); err != nil {
log.Info("Failed to decode transactions list bytes", "blockID", blockID, "error", err)
return []byte{}
log.Info("Failed to decode transactions list bytes", "error", err)
return types.Transactions{}
}

log.Info("Transaction list is valid", "blockID", blockID)
return txListBytes
log.Info("Transaction list is valid")
return txs
}
Loading

0 comments on commit df623af

Please sign in to comment.