diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 034d4dc2..7c9d5c2e 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -123,6 +123,7 @@ jobs: yq -Y --in-place '.deploy_cdk_bridge_infra = false' params.yml yq -Y --in-place '.deploy_zkevm_permissionless_node = false' params.yml yq -Y --in-place '.deploy_observability = false' params.yml + yq -Y --in-place '.deploy_blutgang = false' params.yml - name: Deploy L1 run: | @@ -166,6 +167,12 @@ jobs: kurtosis run --enclave cdk-v1 --args-file params.yml . yq -Y --in-place '.deploy_observability = false' params.yml # reset + - name: Deploy Loadbalancer (blutgang) + run: | + yq -Y --in-place '.deploy_blutgang = true' params.yml + kurtosis run --enclave cdk-v1 --args-file params.yml . + yq -Y --in-place '.deploy_blutgang = false' params.yml # reset + - name: Check that batches are being verified run: | timeout_minutes="${CHECK_VERIFIED_BATCHES_TIMEOUT_MINUTES}" diff --git a/cdk_blutgang.star b/cdk_blutgang.star new file mode 100644 index 00000000..1fcbc272 --- /dev/null +++ b/cdk_blutgang.star @@ -0,0 +1,51 @@ +def run(plan, args): + blutgang_name = "blutgang" + args["deployment_suffix"] + blutgang_config_template = read_file( + src="./templates/blutgang/blutgang-config.toml" + ) + + zkevm_sequencer_service = plan.get_service(name="zkevm-node-sequencer" + args["deployment_suffix"]) + zkevm_sequencer_http_url = "http://{}:{}".format(zkevm_sequencer_service.ip_address, zkevm_sequencer_service.ports["rpc"].number) + + zkevm_rpc_service = plan.get_service(name="zkevm-node-rpc" + args["deployment_suffix"]) + zkevm_rpc_http_url = "http://{}:{}".format(zkevm_rpc_service.ip_address, zkevm_rpc_service.ports["http-rpc"].number) + zkevm_rpc_ws_url = "ws://{}:{}".format(zkevm_rpc_service.ip_address, zkevm_rpc_service.ports["ws-rpc"].number) + + zkevm_rpc_pless_service = plan.get_service(name="zkevm-node-rpc-pless" + args["deployment_suffix"]) + zkevm_rpc_pless_http_url = "http://{}:{}".format(zkevm_rpc_pless_service.ip_address, zkevm_rpc_pless_service.ports["http-rpc"].number) + zkevm_rpc_pless_ws_url = "ws://{}:{}".format(zkevm_rpc_pless_service.ip_address, zkevm_rpc_pless_service.ports["ws-rpc"].number) + + blutgang_config_artifact = plan.render_templates( + name="blutgang-config-artifact", + config={ + "blutgang-config.toml": struct(template=blutgang_config_template, data={ + "l2_sequencer_url": zkevm_sequencer_http_url, + "l2_rpc_url": zkevm_rpc_http_url, + "l2_ws_url": zkevm_rpc_ws_url, + "l2_rpc_pless_url": zkevm_rpc_pless_http_url, + "l2_ws_pless_url": zkevm_rpc_pless_ws_url, + } | args) + }, + ) + + blutgang_service_config = ServiceConfig( + image=args["blutgang_image"], + ports={ + "http": PortSpec(args["blutgang_rpc_port"], application_protocol="http"), + "admin": PortSpec(args["blutgang_admin_port"], application_protocol="http"), + }, + files={ + "/etc/blutgang": Directory( + artifact_names=[ + blutgang_config_artifact, + ] + ), + }, + cmd=["/app/blutgang", "-c", "/etc/blutgang/blutgang-config.toml"] + ) + + plan.add_service( + name=blutgang_name, + config=blutgang_service_config, + description="Starting blutgang service", + ) \ No newline at end of file diff --git a/doc-drafts/fork-id-migration.org b/doc-drafts/fork-id-migration.org deleted file mode 100644 index f8e40481..00000000 --- a/doc-drafts/fork-id-migration.org +++ /dev/null @@ -1,307 +0,0 @@ -#+TITLE: Fork ID 7 to 9 Migration Process -#+DATE: -#+AUTHOR: John Hilliard -#+EMAIL: jhilliard@polygon.technology -#+CREATOR: John Hilliard -#+DESCRIPTION: - - -#+OPTIONS: toc:nil -#+LATEX_HEADER: \usepackage{geometry} -#+LATEX_HEADER: \usepackage{lmodern} -#+LATEX_HEADER: \geometry{left=1in,right=1in,top=1in,bottom=1in} -#+LaTeX_CLASS_OPTIONS: [letterpaper] - -Let's document the process of upgrading from fork 7 to fork 9 using -our Kurtosis package. These steps would more or less be the same in -production except we would be using a [[https://github.com/0xPolygonHermez/zkevm-contracts/blob/v5.0.1-rc.2-fork.8/contracts/PolygonZkEVMTimelock.sol][timelock]] contract to make the -calls. - -Just to make sure I don't have any lingering state, I'm going to run a -clean: - -#+begin_src bash -kurtosis clean -a -#+end_src - -Now, we need to downgrade all of the necessary params in order to -switch back to fork 7: - -#+begin_src diff -diff --git a/params.yml b/params.yml -index c2dd446..4caf2d0 100644 ---- a/params.yml -+++ b/params.yml -@@ -11,14 +11,14 @@ deployment_suffix: "-001" - stages: [1, 2, 3, 4, 5] - - # Docker images and repositories used to spin up services. --zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.0 --zkevm_node_image: 0xpolygon/cdk-validium-node:0.6.4-cdk --zkevm_da_image: 0xpolygon/cdk-data-availability:0.0.7 -+zkevm_prover_image: hermeznetwork/zkevm-prover:v4.0.19 -+zkevm_node_image: 0xpolygon/cdk-validium-node:0.5.13-cdk.3 -+zkevm_da_image: 0xpolygon/cdk-data-availability:0.0.6 - zkevm_agglayer_image: nulyjkdhthz/agglayer:v0.1.0 - # a38e68b5466d1997cea8466dbd4fc8dacd4e11d8 --zkevm_contracts_branch: develop # v5.0.1-rc.2-fork.8 -+zkevm_contracts_branch: v4.0.0-fork.7 # v5.0.1-rc.2-fork.8 --zkevm_rollup_fork_id: 9 -+zkevm_rollup_fork_id: 7 - zkevm_bridge_service_image: hermeznetwork/zkevm-bridge-service:v0.4.2 - zkevm_bridge_ui_image: hermeznetwork/zkevm-bridge-ui:latest # TODO: better tags for the bridge ui -#+end_src - -After making those changes we should be able to kick off a full redeployment: - -#+begin_src bash -kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . -#+end_src - -After running this command, let's confirm onchain that this is running -fork 7. -#+begin_src bash -kurtosis files download cdk-v1 genesis /tmp/fork-7-test -jq -r '.L1Config.polygonRollupManagerAddress' /tmp/fork-7-test/genesis.json -cast call --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ - "$(jq -r '.L1Config.polygonRollupManagerAddress' /tmp/fork-7-test/genesis.json)" \ - "rollupIDToRollupData(uint32)(address,uint64,address,uint64,bytes32,uint64,uint64,uint64,uint64,uint64,uint64,uint8)" 1 -#+end_src - -In my case, this is showing a ~7~ as the 4th parameter so I think -we're in good shape. We should also perform some test transactions and -ensure batches are being verified as expected. - -#+begin_src bash -export ETH_RPC_URL="$(kurtosis port print cdk-v1 zkevm-node-rpc-001 http-rpc)" -cast send --legacy --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 --value 0.01ether 0x0000000000000000000000000000000000000000 -cast rpc zkevm_batchNumber -cast rpc zkevm_virtualBatchNumber -cast rpc zkevm_verifiedBatchNumber -#+end_src - -* Clean stop of the sequencer - -Before attempting the upgrade, we need to ensure there is a clean stop -of the sequencer. In order to do this, we'll pick a halting batch -number like this: - -#+begin_src diff -diff --git a/templates/trusted-node/node-config.toml b/templates/trusted-node/node-config.toml -index 6c9b9fa..372d904 100644 ---- a/templates/trusted-node/node-config.toml -+++ b/templates/trusted-node/node-config.toml -@@ -117,7 +117,7 @@ StateConsistencyCheckInterval = "5s" - BatchMaxDeltaTimestamp = "20s" - L2BlockMaxDeltaTimestamp = "4s" - ResourceExhaustedMarginPct = 10 -- HaltOnBatchNumber = 0 -+ HaltOnBatchNumber = 64 - SequentialBatchSanityCheck = false - SequentialProcessL2Block = true - [Sequencer.StreamServer] -#+end_src - -After making that change and re-running ~kurtosis run~, we'll need to -wait for the sequencer to halt and for the verified batch to equal the -latest batch. After making that change, there should be some error logs that look like this: - -#+begin_example -{"level":"error","ts":1711481674.517157,"caller":"sequencer/finalizer.go:806","msg":"halting finalizer, error: finalizer reached stop sequencer on batch number: 64%!(EXTRA string=\n/home/runner/work/cdk-validium-node/cdk-validium-node/log/log.go:142 github.com/0xPolygonHermez/zkevm-node/log.appendStackTraceMaybeArgs()\n/home/runner/work/cdk-validium-node/cdk-validium-node/log/log.go:251 github.com/0xPolygonHermez/zkevm-node/log.Errorf()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:806 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Halt()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:221 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).closeAndOpenNewWIPBatch()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:163 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeWIPBatch()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:330 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeBatches()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:166 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Start()\n)","pid":7,"version":"v0.1.0","stacktrace":"github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Halt\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:806\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).closeAndOpenNewWIPBatch\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:221\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeWIPBatch\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:163\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeBatches\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:330\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Start\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:166"} -#+end_example - -Now we need to wait for the verified batch number to catch up to the -trusted batch number: -#+begin_src bash -export ETH_RPC_URL="$(kurtosis port print cdk-v1 zkevm-node-rpc-001 http-rpc)" -cast rpc zkevm_batchNumber -cast rpc zkevm_verifiedBatchNumber -#+end_src - -Once those two numbers are the same, we should be in a good position -to stop the services that are going to be upgraded - -#+begin_src bash -kurtosis service stop cdk-v1 zkevm-executor-pless-001 -kurtosis service stop cdk-v1 zkevm-node-aggregator-001 -kurtosis service stop cdk-v1 zkevm-node-eth-tx-manager-001 -kurtosis service stop cdk-v1 zkevm-node-l2-gas-pricer-001 -kurtosis service stop cdk-v1 zkevm-node-rpc-001 -kurtosis service stop cdk-v1 zkevm-node-rpc-pless-001 -kurtosis service stop cdk-v1 zkevm-node-sequence-sender-001 -kurtosis service stop cdk-v1 zkevm-node-sequencer-001 -kurtosis service stop cdk-v1 zkevm-node-synchronizer-001 -kurtosis service stop cdk-v1 zkevm-node-synchronizer-pless-001 -kurtosis service stop cdk-v1 zkevm-prover-001 -#+end_src - -* Smart Contract Calls - -In order to upgrade, we're going to need to make a few smart contract -calls. - -#+begin_src bash -git clone git@github.com:0xPolygonHermez/zkevm-contracts.git -pushd zkevm-contracts/ -git reset --hard a38e68b5466d1997cea8466dbd4fc8dacd4e11d8 -npm i -printf "[profile.default]\nsrc = 'contracts'\nout = 'out'\nlibs = ['node_modules']\n" > foundry.toml -forge build -#+end_src - -Okay so now we have the contracts from a (hopefully) working version -of the repo. We can deploy a new verifier. This isn't strictly -necessary but good to do because in some cases you would need a new -verifier contract. - -#+begin_src bash -forge create --json \ - --rpc-url "http://$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ - --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ - contracts/mocks/VerifierRollupHelperMock.sol:VerifierRollupHelperMock > verifier-out.json -#+end_src - -Okay so we'll first try to create a new rollup type for our upgraded -network. In order to configure this file, we'll need a bunch of values -from the l1 setup. - -#+begin_src bash -kurtosis service exec cdk-v1 contracts-001 "cat /opt/zkevm/combined.json" -#+end_src - -Let's try forge to create the contracts: - -#+begin_src bash -ger="0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674" -pol="0xEdE9cf798E0fE25D35469493f43E88FeA4a5da0E" -bridge="0xD71f8F956AD979Cc2988381B8A743a2fE280537D" -mngr="0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2" -forge create --json \ - --rpc-url "http://$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ - --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ - contracts/v2/consensus/validium/migration/PolygonValidiumStorageMigration.sol:PolygonValidiumStorageMigration \ - --constructor-args $ger $pol $bridge $mngr > new-consensus-out.json - -genesis="0xd619a27d32e3050f2265a3f58dd74c8998572812da4874aa052f0886d0dfaf47" -cast send -j --rpc-url "http://$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ - --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ - $mngr \ - 'addNewRollupType(address,address,uint64,uint8,bytes32,string)' \ - "$(jq -r '.deployedTo' new-consensus-out.json)" \ - "$(jq -r '.deployedTo' verifier-out.json)" \ - 9 0 "$genesis" "test!!!" > add-rollup-type-out.json -#+end_src - - -Assuming that all worked somehow, you should be able to get your new -rollup type id: - -#+begin_src bash -cat add-rollup-type-out.json | jq -r '.logs[0].topics[1]' -#+end_src - -Taking that id, we should be able to update our rollup: - -#+begin_src bash -rollup="0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91" -cast send -j --rpc-url "http://$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ - --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ - $mngr \ - 'updateRollup(address,uint32,bytes)' \ - "$rollup" 2 0x > update-rollup-type-out.json -#+end_src - -Now we should also be able to verify that our rollupid has been -updated. Previously the 4th value was a ~7~ and now it should be a -~9~. - -#+begin_src bash -cast call --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ - "$(jq -r '.L1Config.polygonRollupManagerAddress' /tmp/fork-7-test/genesis.json)" \ - "rollupIDToRollupData(uint32)(address,uint64,address,uint64,bytes32,uint64,uint64,uint64,uint64,uint64,uint64,uint8)" 1 -#+end_src - -After updating the rollup it seems like the DA Protcol needs to be -setup again: - -#+begin_src bash -rollup="0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91" -dac="0x5A6896A98c4B7C7E8f16d177C719a1d856b9154c" -cast send -j \ - --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" \ - --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ - "$rollup" 'setDataAvailabilityProtocol(address)' $dac > set-dac-out.json -#+end_src - - -* Node Upgrade - -In terms of the smart contracts, the upgrade should more or less be -done, but we need to start the nodes back up. This procedure is very -sensitive and we must ensure that the synchronizer starts first. The -main thing we'll do is revert the parameters back to the versions of -the node that worked with fork 9 specify that ONLY stage 3 should run. - -#+begin_src diff -diff --git a/params.yml b/params.yml -index c2dd446..cdb8338 100644 ---- a/params.yml -+++ b/params.yml -@@ -8,7 +8,7 @@ deployment_suffix: "-001" - # The deployment process is divided into various stages. - # The `stages` parameter indicates the specific stages you wish the deployment to proceed through. - # By default, it will execute all the stages. --stages: [1, 2, 3, 4, 5] -+stages: [3] - - # Docker images and repositories used to spin up services. - zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.0 -#+end_src - -At this point, we should be able to run Kurtosis and ideally bring -back up the main node components. Before starting the node backup, be -sure to remove the ~HaltOnBatchNumber~ setting that we added earlier -in the process - -#+begin_src bash -kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . -#+end_src - -At this point, the core services are running and if everything went -well, we should be able to send a transaction and see that the batche -numbers are moving through their normal progression. - -#+begin_src bash -export ETH_RPC_URL="$(kurtosis port print cdk-v1 zkevm-node-rpc-001 http-rpc)" -cast send --legacy --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 --value 0.01ether 0x0000000000000000000000000000000000000000 -cast rpc zkevm_batchNumber -cast rpc zkevm_virtualBatchNumber -cast rpc zkevm_verifiedBatchNumber -#+end_src - -* Random Notes - -After starting the nodes back up I'm seeing a decent amount of errors -in the synchronizer like this, it doesn't seem like it actually causes -an issue, but it's a little odd. - -#+begin_example -{"level":"warn","ts":1711502381.03938,"caller":"etherman/etherman.go:661","msg":"Event not registered: {Address:0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 Topics:[0xd331bd4c4cd1afecb94a225184bded161ff3213624ba4fb58c4f30c5a861144a] Data:[0 0 0 0 0 0 0 0 0 0 0 0 90 104 150 169 140 75 124 126 143 22 209 119 199 25 161 216 86 185 21 76] BlockNumber:108 TxHash:0x1bb5e714dd96434ded2d818458cc517cf7b30f5787dbb3aedb667e5e3e96808e TxIndex:0 BlockHash:0xdf5850cd5a8975859595649a05ce245f02953e84af627e9b22a1f8381077f057 Index:0 Removed:false}","pid":7,"version":"0.6.4+cdk"} -#+end_example - -We can check this event directly from the rpc as well: - -#+begin_src bash -cast logs --rpc-url "http://$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 108 --to-block 108 -#+end_src - -We can reverse which event this is with the following script: - -#+begin_src bash -cat compiled-contracts/*.json | jq '.abi[] | select(.type == "event") | .type = "function"' | jq -s | polycli abi decode | grep d33 -cast sig-event 'SetDataAvailabilityProtocol(address)' -#+end_src - -It looks like the unregistered event is a call to -~SetDataAvailabilityProtocol(address)~, but unclear why that -particular event is not recognized. diff --git a/doc-drafts/ready-for-publication/README b/doc-drafts/ready-for-publication/README new file mode 100644 index 00000000..9a51804e --- /dev/null +++ b/doc-drafts/ready-for-publication/README @@ -0,0 +1,2 @@ +Docs in this folder are ready to be edited and meant for public +guides. \ No newline at end of file diff --git a/doc-drafts/working-with-the-timelock/working-with-the-timelock.org b/doc-drafts/working-with-the-timelock/working-with-the-timelock.org new file mode 100644 index 00000000..1d3527c7 --- /dev/null +++ b/doc-drafts/working-with-the-timelock/working-with-the-timelock.org @@ -0,0 +1,357 @@ +By default the Kurtosis CDK package will deploy a timelock contract +that can be used as an admin. For the sake of simplified testing, we +don't configure the timelock as the primary admin of the rollup +manager. We want to show how you could use the timelock in order to +get a better understanding. + +Assuming that you've already full spun up your network, let's get the +details of our deployment. + +#+begin_src bash :exports both verbatim :results output code +kurtosis service exec cdk-v1 contracts-001 'cat /opt/zkevm/combined.json' +#+end_src + +#+RESULTS: +#+begin_src bash +The command was successfully executed and returned '0'. Output was: +{ + "polygonRollupManagerAddress": "0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2", + "polygonZkEVMBridgeAddress": "0xD71f8F956AD979Cc2988381B8A743a2fE280537D", + "polygonZkEVMGlobalExitRootAddress": "0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674", + "polTokenAddress": "0xEdE9cf798E0fE25D35469493f43E88FeA4a5da0E", + "zkEVMDeployerContract": "0xe5CF69183CFCF0571E733D59a1a53d4E6ceD6E85", + "deployerAddress": "0xE34aaF64b29273B7D567FCFc40544c014EEe9970", + "timelockContractAddress": "0x07783C37CAAFe0f05C4105250C032062A83F7AC2", + "deploymentRollupManagerBlockNumber": 19, + "upgradeToULxLyBlockNumber": 19, + "admin": "0xE34aaF64b29273B7D567FCFc40544c014EEe9970", + "trustedAggregator": "0xCae5b68Ff783594bDe1b93cdE627c741722c4D4d", + "proxyAdminAddress": "0xB93b2fD69CE28f0DB91842aBFa40720d7e2B8fd7", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000001", + "polygonDataCommitteeAddress": "0x5A6896A98c4B7C7E8f16d177C719a1d856b9154c", + "firstBatchData": { + "transactions": "0xf9010380808401c9c38094d71f8f956ad979cc2988381b8a743a2fe280537d80b8e4f811bff7000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a40d5f56745a118d0906a34e69aec8c0db1cb8fa000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005ca1ab1e0000000000000000000000000000000000000000000000000000000005ca1ab1e1bff", + "globalExitRoot": "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "timestamp": 1713829086, + "sequencer": "0x5b06837A43bdC3dD9F114558DAf4B26ed49842Ed" + }, + "genesis": "0xd619a27d32e3050f2265a3f58dd74c8998572812da4874aa052f0886d0dfaf47", + "createRollupBlockNumber": 23, + "rollupAddress": "0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91", + "verifierAddress": "0xf22E2B040B639180557745F47aB97dFA95B1e22a", + "consensusContract": "PolygonValidiumEtrog", + "polygonZkEVMGlobalExitRootL2Address": "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" +} + +#+end_src + +In this case, it looks like my timelock is deployed at +~0x07783C37CAAFe0f05C4105250C032062A83F7AC2~. + +Let's confirm that the admin +~0xE34aaF64b29273B7D567FCFc40544c014EEe9970~ is actually a default +admin for the rollup manager. First we need to check what the admin +role is: + + +#+begin_src bash :exports both verbatim :results output code +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 'DEFAULT_ADMIN_ROLE()(bytes32)' +#+end_src + +#+RESULTS: +#+begin_src bash +0x0000000000000000000000000000000000000000000000000000000000000000 +#+end_src + + +Now let's see if the admin account has this role: + +#+begin_src bash :exports both verbatim :results output code +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 'hasRole(bytes32,address)(bool)' 0x0000000000000000000000000000000000000000000000000000000000000000 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +#+end_src + +#+RESULTS: +#+begin_src bash +true +#+end_src + + +Let's also confirm that the time lock does not have the default admin +role: + +#+begin_src bash :exports both verbatim :results output code +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 'hasRole(bytes32,address)(bool)' 0x0000000000000000000000000000000000000000000000000000000000000000 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 +#+end_src + +#+RESULTS: +#+begin_src bash +false +#+end_src + + +Okay this looks good. Let's first use the current admin account to +grant admin access to the time lock: + +#+begin_src bash :exports both verbatim :results output code +cast send --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 \ + --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ + 'grantRole(bytes32,address)' 0x0000000000000000000000000000000000000000000000000000000000000000 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 +#+end_src + +#+RESULTS: +#+begin_src bash + +blockHash 0x2a6ab08a2e87865a177bd24d16c96513c54cd08814f57aa73199712f5c71d0c0 +blockNumber 12342 +contractAddress +cumulativeGasUsed 58147 +effectiveGasPrice 3000000007 +from 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +gasUsed 58147 +logs [{"address":"0x2f50ef6b8e8ee4e579b17619a92de3e2ffbd8ad2","topics":["0x2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d","0x0000000000000000000000000000000000000000000000000000000000000000","0x00000000000000000000000007783c37caafe0f05c4105250c032062a83f7ac2","0x000000000000000000000000e34aaf64b29273b7d567fcfc40544c014eee9970"],"data":"0x","blockHash":"0x2a6ab08a2e87865a177bd24d16c96513c54cd08814f57aa73199712f5c71d0c0","blockNumber":"0x3036","transactionHash":"0xef9151dadc11aed67dd567426a610b9ff04c88beaad1a0a540fb02fb74a1be5d","transactionIndex":"0x0","logIndex":"0x0","removed":false}] +logsBloom 0x00000004000000000000000000000000000000400000000000008000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000010000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000100000000000020000000400000000000000000000000000000000000000000001000000200000000 +root +status 1 +transactionHash 0xef9151dadc11aed67dd567426a610b9ff04c88beaad1a0a540fb02fb74a1be5d +transactionIndex 0 +type 2 +to 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 +#+end_src + + +Okay, it looks like that transaction worked, let's confirm that the +timelock address is actually an admin now. This call previously +returned ~false~. Hopefully it returns ~true~ now. + +#+begin_src bash :exports both verbatim :results output code +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 'hasRole(bytes32,address)(bool)' 0x0000000000000000000000000000000000000000000000000000000000000000 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 +#+end_src + +#+RESULTS: +#+begin_src bash +true +#+end_src + +Great, it looks like we're headed in the right direction. Now let's +confirm the setup of our timelock. In particular, we should make sure +that our timelock admin is setup correctly. + + +#+begin_src bash :exports both verbatim :results output code +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 'DEFAULT_ADMIN_ROLE()(bytes32)' +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 'EXECUTOR_ROLE()(bytes32)' +#+end_src + +#+RESULTS: +#+begin_src bash +0x0000000000000000000000000000000000000000000000000000000000000000 +0xd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e63 +#+end_src + +Now that we have the roles, let's check if the admin account has them: + +#+begin_src bash :exports both verbatim :results output code +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 'hasRole(bytes32,address)(bool)' 0x0000000000000000000000000000000000000000000000000000000000000000 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 'hasRole(bytes32,address)(bool)' 0xd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e63 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +#+end_src + +#+RESULTS: +#+begin_src bash +false +true +#+end_src + +It looks like our typical admin account is configured as an executor +for the timelock, but not actually the admin. This means we should be +able to execute transactions, but it doesn't look like we would be +able to change the delay. From the [[https://github.com/OpenZeppelin/openzeppelin-contracts/blob/4032b42694ff6599b17ffde65b2b64d7fc8a38f8/contracts/governance/TimelockController.sol#L439-L456][code]] it looks like the timelock is +the only thing that can update its delay. + +At this point, the question is can we use the timelock to make an +admin call. Ideally we would revoke the admin's account as the default +admin role in the rollup manager. Let's see if it would even work. + +#+begin_src bash :exports both verbatim :results output code +cast call --from 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 'revokeRole(bytes32,address)' 0x0000000000000000000000000000000000000000000000000000000000000000 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +#+end_src + +#+RESULTS: +#+begin_src bash +0x +#+end_src + +This is a good sign, it means the timelock has the ability to revoke +admin access from the admin account in the rollup manager. Now we just +need to schedule the call. First let's get the call data. + +#+begin_src bash :exports both verbatim :results output code +cast calldata 'revokeRole(bytes32,address)' 0x0000000000000000000000000000000000000000000000000000000000000000 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +#+end_src + +#+RESULTS: +#+begin_src bash +0xd547741f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e34aaf64b29273b7d567fcfc40544c014eee9970 +#+end_src + +This looks good, now we need to schedule the call + +#+begin_src bash :exports both verbatim :results output code +target="0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2" +value="0" +calldata="0xd547741f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e34aaf64b29273b7d567fcfc40544c014eee9970" +predecessor="0x0000000000000000000000000000000000000000000000000000000000000000" +salt="0x0000000000000000000000000000000000000000000000000000000000000000" +delay="3601" + +cast send \ + --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ + --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'schedule(address,uint256,bytes,bytes32,bytes32,uint256)' "$target" "$value" "$calldata" "$predecessor" "$salt" "$delay" +#+end_src + +#+RESULTS: +#+begin_src bash + +blockHash 0xacd613e435f89d04c07fdca6e58b2abef1d5d0f2473774950c6ff5335d24c055 +blockNumber 12761 +contractAddress +cumulativeGasUsed 67681 +effectiveGasPrice 3000000007 +from 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +gasUsed 67681 +logs [{"address":"0x07783c37caafe0f05c4105250c032062a83f7ac2","topics":["0x4cf4410cc57040e44862ef0f45f3dd5a5e02db8eb8add648d4b0e236f1d07dca","0x2834e50d0fbd2359263689c685f4afd0311de4b150625c349a40a7b2b7e7f34e","0x0000000000000000000000000000000000000000000000000000000000000000"],"data":"0x0000000000000000000000002f50ef6b8e8ee4e579b17619a92de3e2ffbd8ad2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e110000000000000000000000000000000000000000000000000000000000000044d547741f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e34aaf64b29273b7d567fcfc40544c014eee997000000000000000000000000000000000000000000000000000000000","blockHash":"0xacd613e435f89d04c07fdca6e58b2abef1d5d0f2473774950c6ff5335d24c055","blockNumber":"0x31d9","transactionHash":"0x4fd7c8cd80b05a4472748aa6cdf8e82a5a857fc02f90c231c01816bcb0fd2b73","transactionIndex":"0x0","logIndex":"0x0","removed":false}] +logsBloom 0x00000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000004000200000000000000000000000000000000000000020000000000000000000800000000000000000000000000400000000000000000000000000000000000000000000040000000000000000000000000000000080000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000400000000000000020000000000000000000000000000000000000000000000000000000000000000000 +root +status 1 +transactionHash 0x4fd7c8cd80b05a4472748aa6cdf8e82a5a857fc02f90c231c01816bcb0fd2b73 +transactionIndex 0 +type 2 +to 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 +#+end_src + +Okay this looks successful, that should mean our call is +scheduled. Let's take a look at the logs: + +| Event | CallScheduled(bytes32,uint256,address,uint256,bytes,bytes32,uint256) | 0x4cf4410cc57040e44862ef0f45f3dd5a5e02db8eb8add648d4b0e236f1d07dca | + +That means our id is ~0x2834e50d0fbd2359263689c685f4afd0311de4b150625c349a40a7b2b7e7f34e~ + +With that, we should be able to check on the status: + +#+begin_src bash :exports both verbatim :results output code +id="0x2834e50d0fbd2359263689c685f4afd0311de4b150625c349a40a7b2b7e7f34e" + +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'isOperation(bytes32)(bool)' "$id" +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'isOperationPending(bytes32)(bool)' "$id" +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'isOperationReady(bytes32)(bool)' "$id" +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'isOperationDone(bytes32)(bool)' "$id" +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'getTimestamp(bytes32)(uint256)' "$id" + +#+end_src + +#+RESULTS: +#+begin_src bash +true +true +false +false +1713985543 +#+end_src + +This looks good, it looks like our operation is scheduled. We now just +need to wait until ~1713985543~. + +#+begin_src bash :exports both verbatim :results output code +printf "%d\n" $(cast block -j --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) | jq -r '.timestamp') +#+end_src + +#+RESULTS: +#+begin_src bash +1713983298 +#+end_src + +It looks like we still have to wait 37 minutes. + +Once the time is elapsed, I should be able to repeat the same exact +call from ~schedule~ and use ~execute~ instead. First let's make sure +that it reports that it's ready. + +#+begin_src bash :exports both verbatim :results output code +id="0x2834e50d0fbd2359263689c685f4afd0311de4b150625c349a40a7b2b7e7f34e" +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'isOperationReady(bytes32)(bool)' "$id" +#+end_src + +#+RESULTS: +#+begin_src bash +true +#+end_src + +Nice, that looks good. Let's execute it: + +#+begin_src bash :exports both verbatim :results output code +target="0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2" +value="0" +calldata="0xd547741f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e34aaf64b29273b7d567fcfc40544c014eee9970" +predecessor="0x0000000000000000000000000000000000000000000000000000000000000000" +salt="0x0000000000000000000000000000000000000000000000000000000000000000" + +cast send \ + --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ + --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 \ + 'execute(address,uint256,bytes,bytes32,bytes32)' "$target" "$value" "$calldata" "$predecessor" "$salt" +#+end_src + +#+RESULTS: +#+begin_src bash + +blockHash 0x32fe6a5224885c605eeca7052cb89925f018965d4ac176b1e99ceff3ad4e9b1c +blockNumber 14076 +contractAddress +cumulativeGasUsed 55161 +effectiveGasPrice 3000000007 +from 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +gasUsed 55161 +logs [{"address":"0x2f50ef6b8e8ee4e579b17619a92de3e2ffbd8ad2","topics":["0xf6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000e34aaf64b29273b7d567fcfc40544c014eee9970","0x00000000000000000000000007783c37caafe0f05c4105250c032062a83f7ac2"],"data":"0x","blockHash":"0x32fe6a5224885c605eeca7052cb89925f018965d4ac176b1e99ceff3ad4e9b1c","blockNumber":"0x36fc","transactionHash":"0x7f0de30fe4db193ed75aba6f2c3862da8a1f8cef63e4408b8bbd802cc892559e","transactionIndex":"0x0","logIndex":"0x0","removed":false},{"address":"0x07783c37caafe0f05c4105250c032062a83f7ac2","topics":["0xc2617efa69bab66782fa219543714338489c4e9e178271560a91b82c3f612b58","0x2834e50d0fbd2359263689c685f4afd0311de4b150625c349a40a7b2b7e7f34e","0x0000000000000000000000000000000000000000000000000000000000000000"],"data":"0x0000000000000000000000002f50ef6b8e8ee4e579b17619a92de3e2ffbd8ad2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000044d547741f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e34aaf64b29273b7d567fcfc40544c014eee997000000000000000000000000000000000000000000000000000000000","blockHash":"0x32fe6a5224885c605eeca7052cb89925f018965d4ac176b1e99ceff3ad4e9b1c","blockNumber":"0x36fc","transactionHash":"0x7f0de30fe4db193ed75aba6f2c3862da8a1f8cef63e4408b8bbd802cc892559e","transactionIndex":"0x0","logIndex":"0x1","removed":false}] +logsBloom 0x00000000000000000000000000000000000000400000000000008000000000000000000000000000000008000000000000200000000800000000000000000000000000000000000000000000000004000200000002000000000000000000002040000000020000000000000000000800000000000000000000000000400000000000000040000000000000000000000000000040000000000000010000000000000000090000000000000000000000000000000000000004000000000000000000000000000000080000000000000000000000000000000000000000000020000000400000000000000000000000000000000000000000001000000200000000 +root +status 1 +transactionHash 0x7f0de30fe4db193ed75aba6f2c3862da8a1f8cef63e4408b8bbd802cc892559e +transactionIndex 0 +type 2 +to 0x07783C37CAAFe0f05C4105250C032062A83F7AC2 +#+end_src + +Okay, I think that looks good. Let's make sure that the role has +actually been revoked. This previously returned ~true~ and we hope +that it's ~false~ now: + + +#+begin_src bash :exports both verbatim :results output code +cast call --rpc-url $(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc) 0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2 'hasRole(bytes32,address)(bool)' 0x0000000000000000000000000000000000000000000000000000000000000000 0xE34aaF64b29273B7D567FCFc40544c014EEe9970 +#+end_src + +#+RESULTS: +#+begin_src bash +false +#+end_src + +Okay this looks good. Just to recap that has happened: + +- We deployed the rollup manager using ~test~ mode which assigns the + admin as the admin account rather than the timelock +- We granted the ~DEFAULT_ADMIN_ROLE~ to the timelock addres +- We scheduled a time locked transaction to revoke the admin account's + ~DEFAULT_ADMIN_ROLE~ +- We executed the transaction after the elapsed amount of time. + +* Ref + +- https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/contracts/v2/PolygonRollupManager.sol +- https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/contracts/PolygonZkEVMTimelock.sol +- https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/governance/TimelockController.sol diff --git a/docs/how-to/integrate-da.md b/docs/how-to/integrate-da.md new file mode 100644 index 00000000..9c2a6423 --- /dev/null +++ b/docs/how-to/integrate-da.md @@ -0,0 +1,198 @@ +--- +comments: true +--- + +This document shows you how to integrate a third-party data availability (DAC) solution into your CDK stack. + +## Prerequisites + +!!! tip + Make sure you have upgraded your CDK stack if necessary. + +## Set up contracts + +This section shows you how to create a custom CDK validium DAC contract. + +1. Clone [zkevm-contracts](https://github.com/0xPolygonHermez/zkevm-contracts). + +2. `cd` into `zkevm-contracts` and checkout tag `v6.0.0-rc.1-fork.9`. + +3. Run `npm install` from the root. + +4. `cd` to the `contracts/v2/consensus/validium` directory. + + !!! tip + - Until further notice, these contracts run on the [etrog release](https://polygon.technology/blog/polygon-zkevm-the-etrog-upgrade-is-live-on-mainnet). + +5. Create your custom contract in the same directory, and make sure it implements the [IDataAvailabilityProtocol](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/contracts/v2/interfaces/IDataAvailabilityProtocol.sol) interface. + + !!! tip + - Use the Polygon DAC implementation contract: [PolygonDataCommittee.sol](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/contracts/v2/consensus/validium/PolygonDataCommittee.sol) as a guide. + - The contract supports custom smart contract implementation and, through this, DACs can add their custom on-chain verification logic. + +6. You can leave the `verifyMessage` function empty but make sure the `getProcotolName` function returns a unique name (such as Avail, Celestia, etc). The following example code comes from the [PolygonDataCommitee.sol](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/contracts/v2/consensus/validium/PolygonDataCommittee.sol) implementation. + + ```solidity + // Name of the data availability protocol + string internal constant _PROTOCOL_NAME = ""; + + ... + + /** + * @notice Return the protocol name + */ + function getProcotolName() external pure override returns (string memory) { + return _PROTOCOL_NAME; + } + ``` + +7. Update the [/deployment/v2/4_createRollup.ts](https://github.com/0xPolygonHermez/zkevm-contracts/blob/54f58c8b64806429bc4d5c52248f29cf80ba401c/deployment/v2/4_createRollup.ts#L77) script to add your contract name. + + ```ts + const supporteDataAvailabilityProtocols = [""]; + ``` + +8. Make your contract deployable by copying, editing for your custom implementation, and pasting back in, the `if` statement from the [/deployment/v2/4_createRollup.ts#L251](https://github.com/0xPolygonHermez/zkevm-contracts/blob/54f58c8b64806429bc4d5c52248f29cf80ba401c/deployment/v2/4_createRollup.ts#L260) node creation script. + +!!! info "`PolygonValidiumEtrog.sol` solution" + + The [Etrog DAC integration contract](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/contracts/v2/consensus/validium/PolygonValidiumEtrog.sol) is still work-in-progress at the time of writing but there are some interesting things to note. + + 1. It implements the function [`verifyMessage` function](https://github.com/0xPolygonHermez/zkevm-contracts/blob/54f58c8b64806429bc4d5c52248f29cf80ba401c/contracts/v2/consensus/validium/PolygonValidiumEtrog.sol#L231): + + ```solidity + // Validate that the data availability protocol accepts the dataAvailabilityMessage + // note This is a view function, so there's not much risk even if this contract was vulnerable to reentrant attacks + dataAvailabilityProtocol.verifyMessage( + accumulatedNonForcedTransactionsHash, + dataAvailabilityMessage + ); + ``` + + where `accumulatedNonForcedTransactionsHash` is used for verification against the protocol and `dataAvailabilityMessage` is a byte array containing the signature and addresses of the committee in ascending order. + + 2. It also implements a function to set the data availability protocol at [line 287](https://github.com/0xPolygonHermez/zkevm-contracts/blob/54f58c8b64806429bc4d5c52248f29cf80ba401c/contracts/v2/consensus/validium/PolygonValidiumEtrog.sol#L287) to see how they do this. + + ```solidity + /** + * @notice Allow the admin to set a new data availability protocol + * @param newDataAvailabilityProtocol Address of the new data availability protocol + */ + function setDataAvailabilityProtocol( + IDataAvailabilityProtocol newDataAvailabilityProtocol + ) external onlyAdmin { + dataAvailabilityProtocol = newDataAvailabilityProtocol; + + emit SetDataAvailabilityProtocol(address(newDataAvailabilityProtocol)); + } + ``` + +## Deploy Docker image + +This section shows you how to deploy the Docker image containing your custom DAC contract. + +1. Edit the following parameters in the [`docker/scripts/v2/deploy_parameters_docker.json`](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/docker/scripts/v2/deploy_parameters_docker.json) file: + + ```json + "minDelayTimelock": 3600, // BECOMES "minDelayTimelock": 1, + ``` + +2. Edit the following parameters in the [`/docker/scripts/v2/create_rollup_parameters_docker.json`](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/docker/scripts/v2/create_rollup_parameters_docker.json) file: + + ```json + "consensusContract": "PolygonValidiumEtrog", // CHANGE THIS TO YOUR CONTRACT NAME + "dataAvailabilityProtocol": "PolygonDataCommittee", // ADD THIS PARAMETER + ``` + +3. Run the following command: + + ```sh + cp docker/scripts/v2/hardhat.example.paris hardhat.config.ts + ``` + +4. Edit [docker/scripts/v2/deploy-docker.sh](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v6.0.0-rc.1-fork.9/docker/scripts/v2/deploy-docker.sh) to add the following line: + + ```sh + sudo chmod -R go+rxw docker/gethData before docker build -t hermeznetwork/geth-zkevm-contracts -f docker/Dockerfile . + ``` + +5. In the [deployment/v2/4_createRollup.ts](https://github.com/0xPolygonHermez/zkevm-contracts/blob/54f58c8b64806429bc4d5c52248f29cf80ba401c/deployment/v2/4_createRollup.ts#L290) file, uncomment the 290-291, and add a `console.log` output that grabs the address of the DAC: + + ```ts + // Setup data committee to 0 + await (await polygonDataCommittee?.setupCommittee(0, [], "0x")).wait(); + console.log(dataAvailabilityProtocol, "deployed to:", polygonDataCommittee.target); + ``` + +6. Build the image with the following commands: + + ```sh + sudo npx hardhat compile + sudo npm run docker:contracts + ``` + +7. Tag the image with the following command, where `XXXX` is custom: + + ```sh + docker image tag hermeznetwork/geth-zkevm-contracts hermeznetwork/geth-cdk-validium-contracts:XXXX + ``` + +## Set up the node + +This section shows you how to set up your CDK node that sends and receives data from the DAC. + +1. Create a package that implements the [`DABackender`](https://github.com/0xPolygon/cdk-validium-node/blob/b6ee6cb087099c2e97f3e596f84672fc021b517a/dataavailability/interfaces.go#L14) interface and place it under the [`cdk-validium-node/tree/develop/dataavailability`](https://github.com/0xPolygon/cdk-validium-node/tree/develop/dataavailability) directory. + +2. Add a new constant to the [/dataavailability/config.go](https://github.com/0xPolygon/cdk-validium-node/blob/b6ee6cb087099c2e97f3e596f84672fc021b517a/dataavailability/config.go) file that represents the DAC. + + ```go + const ( + // DataAvailabilityCommittee is the DAC protocol backend + DataAvailabilityCommittee DABackendType = "DataAvailabilityCommittee" + ) + ``` + + where `DataAvailabilityCommittee` matches the `_PROTOCOL_NAME` see in the [Set up contracts](#set-up-contracts) section. + +3. _OPTIONAL_: Add a config struct to the new package inside the main config.go file so that your package can receive custom configurations using the node’s main config file. + +4. Instantiate your package and use it to create the main data availability instance, as done in the Polygon implementation. + +## Test the integration + +!!! tip + - By default, all E2E tests run using the DAC. + - It is possible to run the E2E tests using other DAC backends by amending the `test.node.config.toml` file. + +To test your DAC integration, follow the steps below. + +1. Create an E2E test that uses your protocol by following the [test/e2e/datacommittee_test.go](https://github.com/0xPolygon/cdk-validium-node/blob/develop/test/e2e/datacommittee_test.go) example. + +2. Generate a docker image containing the changes to the node: + + ```sh + make build-docker + ``` + +3. Build the genesis file for the node: + + - First, clone the [cdk-validium-node](https://github.com/0xPolygon/cdk-validium-node) repo and checkout v0.6.4-cdk.5. + - Edit the `test/config/test.genesis.config.json` file taking values in the generated output files created previously in the contract repo's `docker/deploymentOutputs` folder: + + !!! info "Parameters to change" + `l1Config.polygonZkEVMAddres`s ==> `rollupAddress` @ `create_rollup_output.json` + `l1Config.polygonRollupManagerAddress` ==> `polygonRollupManager` @ `deploy_output.json` + `l1Config.polTokenAddress` ==> `polTokenAddress` @ `deploy_output.json` + `l1Config.polygonZkEVMGlobalExitRootAddress` ==> `polygonZkEVMGlobalExitRootAddress` @ `deploy_output.json` + `rollupCreationBlockNumber` ==> `createRollupBlock` @ `create_rollup_output.json` + `rollupManagerCreationBlockNumber` ==> `deploymentBlockNumber` @ `deploy_output.json` + `root` ==> `root` @ `genesis.json` + `genesis` ==> `genesis` @ `genesis.json` + + !!! important + - You should follow this step every time you build a new Docker image. + +4. Update the contracts Docker image tag with the custom tag you created at the [deploy Docker image](#deploy-docker-image) section, step 7, by amending the node's [Docker compose file](https://github.com/0xPolygon/cdk-validium-node/blob/develop/test/docker-compose.yml). + +5. Modify the Makefile so it can run your test. Use the [Polygon DAC Makefile](https://github.com/0xPolygon/cdk-validium-node/blob/develop/test/Makefile) as an example. + diff --git a/docs/how-to/manage-policies.md b/docs/how-to/manage-policies.md new file mode 100644 index 00000000..c2305fb6 --- /dev/null +++ b/docs/how-to/manage-policies.md @@ -0,0 +1,94 @@ +--- +comments: true +--- + +# Manage allowlists, and more, with policies + +!!! important + Policies are currently only available in validium mode. + +Managing allowlists, denylists, and ACLs is done with policies. + +## Policy overview + +A **policy** is a set of rules that govern what actions are allowed or denied in the transaction pool. + +- **Fine-grained control**: Developers can specify policies at a granular level, allowing or denying specific actions for specific addresses. +- **Dynamic updates**: Policies and ACLs can be updated on-the-fly without requiring a node restart. +- **Database-backed**: All policy data is stored in a PostgreSQL database. +- **Extensible**: New policies can be easily added to the system. + +## Validium node + +### Policies + +Currently, there are two defined policies: + +- **SendTx**: governs whether an address may send transactions to the pool. +- **Deploy**: governs whether an address may deploy a contract. + +The CDK validium node offers policy management features that include allowlisting[^1], denylisting[^2], and access control lists (ACLs)[^3]. These features are beneficial for validium-based app-chains that require fine-grained control over transaction pools. + +### Code definitions + +- **Policy management**: [`cmd/policy.go`](https://github.com/0xPolygon/cdk-validium-node/blob/5399f8859af9ffb0eb693bf395e1f09b53b154de/cmd/policy.go) contains the core logic of policy management. +- **Policy definitions**: [`pool/policy.go`](https://github.com/0xPolygon/cdk-validium-node/blob/5399f8859af9ffb0eb693bf395e1f09b53b154de/pool/policy.go) contains structs and utility functions for policies and ACLs. +- **Data**: [`pgpoolstorage/policy.go`](https://github.com/0xPolygon/cdk-validium-node/blob/5399f8859af9ffb0eb693bf395e1f09b53b154de/pool/policy.go) interacts with the data layer (PostgreSQL database) to store and retrieve policy and ACL data. +- **Policy interface**: [`pool/interfaces.go`](https://github.com/0xPolygon/cdk-validium-node/blob/5399f8859af9ffb0eb693bf395e1f09b53b154de/pool/interfaces.go#L42) contains a `policy` interface which defines the methods that policies must implement. + +### How to use a policy + +| Command name | Description | Flags & parameters | +|--------------|-------------------------------------------------------|--------------------------------------------------------------------------------------------------------| +| `policy add` | Add address(es) to a policy exclusion list | `--policy` (or `-p`): Policy name
`--csv`: CSV file with addresses | +| `policy remove` | Remove address(es) from a policy exclusion list | `--policy` (or `-p`): Policy name
`--csv`: CSV file with addresses to remove | +| `policy clear` | Clear all addresses from a policy's exclusion list | `--policy` (or `-p`): Policy name | +| `policy describe` | Describe the default actions for the policies or a specific policy | `--policy` (or `-p`): Policy name (optional)
`--no-header`: Omit header in output (optional) | +| `policy update` | Update the default action for a policy | `--policy` (or `-p`): Policy name
`--allow`: Set policy to 'allow'
`--deny`: Set policy to 'deny' | + +!!! note + The examples demonstrate a `deploy` policy. + +#### Add addresses + +To add one or more addresses to a specific policy, you can use the `policy add` command. If you have a CSV file containing the addresses, you can use the --csv` flag. + +```bash +docker exec -it cdk-validium-aggregator /app/cdk-validium-node policy add --policy deploy 0xAddress1 +``` + +#### Remove addresses + +To remove addresses from a policy, you can use the `policy remove` command. + +```bash +# Remove a single address from the 'deploy' policy +docker exec -it cdk-validium-aggregator /app/cdk-validium-node policy remove --policy deploy 0xAddress1 + +# Remove multiple addresses from the 'deploy' policy using a CSV file +docker exec -it cdk-validium-aggregator /app/cdk-validium-node policy remove --policy deploy --csv addresses.csv +``` + +#### Clear all addresses + +To remove all addresses from a policy's ACL, you can use the `policy clear` command. + +```bash +docker exec -it cdk-validium-aggregator /app/cdk-validium-node policy clear --policy deploy +``` + +#### Get information about a policy + +To get information about a specific policy or all policies, you can use the `policy describe` command. + +```bash +# Describe a specific policy +docker exec -it cdk-validium-aggregator /app/cdk-validium-node policy describe --policy deploy + +# Describe all policies +docker exec -it cdk-validium-aggregator /app/cdk-validium-node policy describe +``` + +[^1]: **Allowlisting**: The process of explicitly allowing addresses to perform certain actions. +[^2]: **Denylisting**: The process of explicitly denying addresses from performing certain actions. +[^3]: **ACL (access control list)**: A list of addresses that are exceptions to a given policy. diff --git a/docs/how-to/migrate/forkid-7-to-9 b/docs/how-to/migrate/forkid-7-to-9 deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/how-to/migrate/forkid-7-to-9.md b/docs/how-to/migrate/forkid-7-to-9.md new file mode 100644 index 00000000..a7e0544e --- /dev/null +++ b/docs/how-to/migrate/forkid-7-to-9.md @@ -0,0 +1,313 @@ +--- +comments: true +--- + +This document shows you how to migrate from fork 7 to fork 9 using the Kurtosis package. + +!!! tip + These steps are similar a production build, except you have to use a [timelock](https://github.com/0xPolygonHermez/zkevm-contracts/blob/v5.0.1-rc.2-fork.8/contracts/PolygonZkEVMTimelock.sol) contract to make the calls. + +## Prequisite steps and set up + +1. Run a clean command to remove any lingering state: + + ```sh + kurtosis clean --all + ``` + +2. Downgrade all the necessary parameters to switch back to fork 7. Open the `params.yml` file and make the following changes: + + ```diff + diff --git a/params.yml b/params.yml + index 175619f..a72d452 100644 + --- a/params.yml + +++ b/params.yml + @@ -29,13 +29,13 @@ args: + deployment_suffix: "-001" + + # Docker images and repositories used to spin up services. + - zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.0 + + zkevm_prover_image: hermeznetwork/zkevm-prover:v4.0.19 + + - zkevm_node_image: 0xpolygon/cdk-validium-node:0.6.4-cdk.2 + + zkevm_node_image: 0xpolygon/cdk-validium-node:0.5.13-cdk.3 + + - zkevm_da_image: 0xpolygon/cdk-data-availability:0.0.7 + + zkevm_da_image: 0xpolygon/cdk-data-availability:0.0.6 + + zkevm_contracts_image: leovct/zkevm-contracts # the tag is automatically replaced by the value of /zkevm_rollup_fork_id/ + @@ -160,7 +160,7 @@ args: + zkevm_rollup_chain_id: 10101 + + # The fork id of the new rollup. It indicates the prover (zkROM/executor) version. + - zkevm_rollup_fork_id: 9 + + zkevm_rollup_fork_id: 7 + + # The consensus contract name of the new rollup. + zkevm_rollup_consensus: PolygonValidiumEtrog + ``` + +3. Now kick-off a full redeploy: + + ```sh + kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + ``` + +4. Confirm onchain that fork 7 is running: + + ```sh + kurtosis files download cdk-v1 genesis /tmp/fork-7-test + cast call \ + --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ + "$(jq -r '.L1Config.polygonRollupManagerAddress' /tmp/fork-7-test/genesis.json)" \ + "rollupIDToRollupData(uint32)(address,uint64,address,uint64,bytes32,uint64,uint64,uint64,uint64,uint64,uint64,uint8)" 1 + ``` + + Should you see `7` showing as the 4th parameter. + +5. Send some test transactions to ensure batches are verified as expected. + + ```sh + export ETH_RPC_URL="$(kurtosis port print cdk-v1 zkevm-node-rpc-001 http-rpc)" + cast send --legacy --private-key "$(yq -r .args.zkevm_l2_admin_private_key params.yml)" --value 0.01ether 0x0000000000000000000000000000000000000000 + cast rpc zkevm_batchNumber + cast rpc zkevm_virtualBatchNumber + cast rpc zkevm_verifiedBatchNumber + ``` + +After a few minutes, the number of verified batches should increase (the first batch checked does not count). + +## Make a clean stop of the sequencer + +1. Before attempting the upgrade, we need to make a clean stop of the sequencer. To do this, pick a halting batch number by updating the `node-config.toml` file like this. Make sure to pick a batch number higher than the current batch number! + + ```sh + cast to-dec $(cast rpc zkevm_batchNumber | sed 's/"//g') + ``` + + ```diff + diff --git a/templates/trusted-node/node-config.toml b/templates/trusted-node/node-config.toml + index 6c9b9fa..372d904 100644 + --- a/templates/trusted-node/node-config.toml + +++ b/templates/trusted-node/node-config.toml + @@ -117,7 +117,7 @@ StateConsistencyCheckInterval = "5s" + BatchMaxDeltaTimestamp = "20s" + L2BlockMaxDeltaTimestamp = "4s" + ResourceExhaustedMarginPct = 10 + - HaltOnBatchNumber = 0 + + HaltOnBatchNumber = 64 + SequentialBatchSanityCheck = false + SequentialProcessL2Block = true + [Sequencer.StreamServer] + ``` + +2. Re-run Kurtosis: + + ```sh + kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + ``` + +3. Wait for the sequencer to halt and the verified batch to equal the latest batch and check the logs. + + ```sh + kurtosis service logs cdk-v1 zkevm-node-sequencer-001 --follow + ``` + + You should see error logs that look like this: + + ```json + {"level":"error","ts":1711481674.517157,"caller":"sequencer/finalizer.go:806","msg":"halting finalizer, error: finalizer reached stop sequencer on batch number: 64%!(EXTRA string=\n/home/runner/work/cdk-validium-node/cdk-validium-node/log/log.go:142 github.com/0xPolygonHermez/zkevm-node/log.appendStackTraceMaybeArgs()\n/home/runner/work/cdk-validium-node/cdk-validium-node/log/log.go:251 github.com/0xPolygonHermez/zkevm-node/log.Errorf()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:806 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Halt()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:221 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).closeAndOpenNewWIPBatch()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:163 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeWIPBatch()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:330 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeBatches()\n/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:166 github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Start()\n)","pid":7,"version":"v0.1.0","stacktrace":"github.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Halt\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:806\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).closeAndOpenNewWIPBatch\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:221\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeWIPBatch\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/batch.go:163\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).finalizeBatches\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:330\ngithub.com/0xPolygonHermez/zkevm-node/sequencer.(*finalizer).Start\n\t/home/runner/work/cdk-validium-node/cdk-validium-node/sequencer/finalizer.go:166"} + ``` + +4. Wait for the verified batch number to catch up to the trusted batch number: + + ```sh + export ETH_RPC_URL="$(kurtosis port print cdk-v1 zkevm-node-rpc-001 http-rpc)" + cast rpc zkevm_batchNumber + cast rpc zkevm_verifiedBatchNumber + ``` + +5. When those two numbers are the same, stop the services that are going to be upgraded: + + ```sh + kurtosis service stop cdk-v1 zkevm-executor-pless-001 + kurtosis service stop cdk-v1 zkevm-node-aggregator-001 + kurtosis service stop cdk-v1 zkevm-node-eth-tx-manager-001 + kurtosis service stop cdk-v1 zkevm-node-l2-gas-pricer-001 + kurtosis service stop cdk-v1 zkevm-node-rpc-001 + kurtosis service stop cdk-v1 zkevm-node-rpc-pless-001 + kurtosis service stop cdk-v1 zkevm-node-sequence-sender-001 + kurtosis service stop cdk-v1 zkevm-node-sequencer-001 + kurtosis service stop cdk-v1 zkevm-node-synchronizer-001 + kurtosis service stop cdk-v1 zkevm-node-synchronizer-pless-001 + kurtosis service stop cdk-v1 zkevm-prover-001 + ``` + +## Smart contract calls + +1. From another directory, make the required smart contract calls (this should not be done from the `kurtosis-cdk` directory): + + ```sh + git clone git@github.com:0xPolygonHermez/zkevm-contracts.git + pushd zkevm-contracts/ + git reset --hard a38e68b5466d1997cea8466dbd4fc8dacd4e11d8 + npm install + printf "[profile.default]\nsrc = 'contracts'\nout = 'out'\nlibs = ['node_modules']\n" > foundry.toml + forge build + ``` + +2. Deploy a new verifier. + + !!! tip + This step isn't strictly necessary but good to do because in some cases you need a new verifier contract. + + ```sh + forge create \ + --json \ + --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ + --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ + contracts/mocks/VerifierRollupHelperMock.sol:VerifierRollupHelperMock > verifier-out.json + ``` + +3. Create the `PolygonValidiumStorageMigration` contract: + + ```sh + export ETH_RPC_URL="$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" + ger="$(kurtosis service exec cdk-v1 contracts-001 "jq -r .polygonZkEVMGlobalExitRootAddress /opt/zkevm/combined.json" | tail -n +2)" + pol="$(kurtosis service exec cdk-v1 contracts-001 "jq -r .polTokenAddress /opt/zkevm/combined.json" | tail -n +2)" + bridge="$(kurtosis service exec cdk-v1 contracts-001 "jq -r .polygonZkEVMBridgeAddress /opt/zkevm/combined.json" | tail -n +2)" + mngr="$(kurtosis service exec cdk-v1 contracts-001 "jq -r .polygonRollupManager /opt/zkevm/combined.json" | tail -n +2)" + forge create \ + --json \ + --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ + contracts/v2/consensus/validium/migration/PolygonValidiumStorageMigration.sol:PolygonValidiumStorageMigration \ + --constructor-args $ger $pol $bridge $mngr > new-consensus-out.json + ``` + +4. Add a new rollup type to the rollup manager: + + ```sh + genesis="$(kurtosis service exec cdk-v1 contracts-001 "jq -r .genesis /opt/zkevm/combined.json" | tail -n +2)" + cast send \ + --json \ + --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ + $mngr \ + 'addNewRollupType(address,address,uint64,uint8,bytes32,string)' \ + "$(jq -r '.deployedTo' new-consensus-out.json)" \ + "$(jq -r '.deployedTo' verifier-out.json)" \ + 9 0 "$genesis" 'test!!!' > add-rollup-type-out.json + ``` + +5. Get your new rollup type id: + + ```sh + jq -r '.logs[0].topics[1]' add-rollup-type-out.json + ``` + +6. Update the rollup with the id: + + ```sh + rollup="$(kurtosis service exec cdk-v1 contracts-001 "jq -r .rollupAddress /opt/zkevm/combined.json" | tail -n +2)" + cast send \ + --json \ + --private-key 0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625 \ + $mngr \ + 'updateRollup(address,uint32,bytes)' \ + "$rollup" 2 0x > update-rollup-type-out.json + ``` + +7. Verify the updated rollupid. Previously the 4th value was a `7` and now it should be a `9`. + + ```sh + cast call \ + "$(jq -r '.L1Config.polygonRollupManagerAddress' /tmp/fork-7-test/genesis.json)" \ + "rollupIDToRollupData(uint32)(address,uint64,address,uint64,bytes32,uint64,uint64,uint64,uint64,uint64,uint64,uint8)" 1 + ``` + +8. Set up the data availability protcol again: + + ```sh + dac="$(kurtosis service exec cdk-v1 contracts-001 "jq -r .polygonDataCommittee /opt/zkevm/combined.json" | tail -n +2)" + cast send \ + --json \ + --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" \ + "$rollup" 'setDataAvailabilityProtocol(address)' $dac > set-dac-out.json + ``` + +## Node upgrade + +At this stage, the smart contracts are upgraded. However, we still need to start the nodes again. + +!!! warning + - This procedure is very sensitive. + - Ensure the synchronizer starts first. + +We're going to revert the parameters back to the versions of the node that worked with fork 9, and specify that _ONLY_ stage 3 should run. + +1. Update the `params.yml` file as follows: + + ```sh + yq -Y --in-place '.deploy_l1 = false' params.yml + yq -Y --in-place '.deploy_zkevm_contracts_on_l1 = false' params.yml + yq -Y --in-place '.deploy_databases = false' params.yml + yq -Y --in-place '.deploy_cdk_bridge_infra = false' params.yml + yq -Y --in-place '.deploy_zkevm_permissionless_node = false' params.yml + yq -Y --in-place '.deploy_observability = false' params.yml + ``` + +2. Remove the `HaltOnBatchNumber` setting that we added earlier. + +3. Run Kurtosis to bring up the main node components. + + ```sh + kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + ``` + +4. The core services are now running and we should be able to send a transaction and see the batch numbers moving through their normal progression. + + ```sh + export ETH_RPC_URL="$(kurtosis port print cdk-v1 zkevm-node-rpc-001 http-rpc)" + cast send --legacy --private-key "$(yq -r .args.zkevm_l2_admin_private_key params.yml)" --value 0.01ether 0x0000000000000000000000000000000000000000 + cast rpc zkevm_batchNumber + cast rpc zkevm_virtualBatchNumber + cast rpc zkevm_verifiedBatchNumber + ``` + +## Troubleshooting + +1. If you clone the `zkevm-contracts` repo in the same folder as `kurtosis-cdk`, you may see this error when you try to deploy the stack: + + ```sh + Error: An error occurred running command 'run' + Caused by: An error occurred calling the run function for command 'run' + Caused by: An error starting the Kurtosis code execution '.' + Caused by: Error uploading package '.' prior to executing it + Caused by: There was an error compressing module '.' before upload + Caused by: An error occurred creating the archive from the files at '.' + Caused by: The files you are trying to upload, which are now compressed, exceed or reach 100mb. Manipulation (i.e. uploads or downloads) of files larger than 100mb is currently disallowed in Kurtosis. + ``` + +2. You may also see errors like these: + + ```json + {"level":"warn","ts":1711502381.03938,"caller":"etherman/etherman.go:661","msg":"Event not registered: {Address:0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 Topics:[0xd331bd4c4cd1afecb94a225184bded161ff3213624ba4fb58c4f30c5a861144a] Data:[0 0 0 0 0 0 0 0 0 0 0 0 90 104 150 169 140 75 124 126 143 22 209 119 199 25 161 216 86 185 21 76] BlockNumber:108 TxHash:0x1bb5e714dd96434ded2d818458cc517cf7b30f5787dbb3aedb667e5e3e96808e TxIndex:0 BlockHash:0xdf5850cd5a8975859595649a05ce245f02953e84af627e9b22a1f8381077f057 Index:0 Removed:false}","pid":7,"version":"0.6.4+cdk"} + ``` + + You can check them directly from the rpc: + + ```sh + cast logs \ + --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" \ + --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 \ + --from-block 108 \ + --to-block 108 + ``` + + You can reverse an event with the following script: + + ```sh + cat compiled-contracts/*.json | jq '.abi[] | select(.type == "event") | .type = "function"' | jq -s | polycli abi decode | grep d33 + cast sig-event 'SetDataAvailabilityProtocol(address)' + ``` + +3. In the above example, it looks like the unregistered event is a call to `SetDataAvailabilityProtocol(address)`. diff --git a/docs/how-to/quick-test-stack.md b/docs/how-to/quick-test-stack.md new file mode 100644 index 00000000..b8d63d61 --- /dev/null +++ b/docs/how-to/quick-test-stack.md @@ -0,0 +1,61 @@ +--- +comments: true +--- + +A quick and easy method for testing a running CDK stack, whether in validium or rollup mode, is by sending a zero-value transaction and examining the result. + +## Get the URL + +First inspect the logs to find the `zkevm-node-rpc` details. For example, in the Kurtosis stack you should see something like: + +```sh +zkevm-node-rpc-001 http-rpc: 8123/tcp -> http://127.0.0.1:32803 +``` + +## Send transaction with cast + +In a terminal window, run the following command where the mnemonic is used just for testing and the address can be any valid account address. + +```sh +cast send --legacy --mnemonic 'code code code code code code code code code code code quality' --value 0 --gas-price 0 --rpc-url http://127.0.0.1:8123 0x0bb7AA0b4FdC2D2862c088424260e99ed6299148 +``` + +You should see something like this as output: + +```sh +blockHash 0x5d6d45f46e54c5d0890dd8a4ede989dc8042d7d3aeada375ea11d2e77c91a298 +blockNumber 1 +contractAddress +cumulativeGasUsed 21000 +effectiveGasPrice 0 +from 0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6 +gasUsed 21000 +logs [] +logsBloom 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +root 0x97b15537641583db08f1e3db15cb1e89212ed8d147670a11f93f368d5960e72f +status 1 +transactionHash 0xd5443cff8dcc1147ead09d978d3abe9179615aa3eecbe4819c6768390bc467a3 +transactionIndex 0 +type 0 +to 0x66ec…89fd +``` + +Status `1` signifies a successful transaction. + +## Send transaction with MetaMask + +1. Add the network details manually to MetaMask and accept the defaults and auto-populating fields. + + ![MetaMask manual network entry](../img/how-to/manual-network-entry.png) + +2. In local/test setup you will find a pre-funded account with private key: `0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625`. Import the account into MetaMask. + +
+ ![Import pre-funded account](../img/how-to/account.png){ width=45% } +
+ +3. Send a transaction to another MetaMask account. + + ![Transaction sending success](../img/how-to/tx-success.png) + +
\ No newline at end of file diff --git a/docs/how-to/use-native-token.md b/docs/how-to/use-native-token.md index a80f7c12..58dc49f5 100644 --- a/docs/how-to/use-native-token.md +++ b/docs/how-to/use-native-token.md @@ -181,9 +181,11 @@ The first derived private key from the `code...quality` mnemonic is ![Balance of receiving account](../img/how-to/gas-token-img/12_bridge.png) -## Using cast to do a bridge claim +## Using cast to withdraw assets from the bridge -The following work-in-progress cast script processes a bridge claim. Feel free to go through line-by-line and tweak where necessary. +The following work-in-progress cast script processes a bridge claim. + +Feel free to go through line-by-line and tweak where necessary. ```sh #!/bin/bash diff --git a/docs/img/how-to/account.png b/docs/img/how-to/account.png new file mode 100644 index 00000000..2af19c34 Binary files /dev/null and b/docs/img/how-to/account.png differ diff --git a/docs/img/how-to/manual-network-entry.png b/docs/img/how-to/manual-network-entry.png new file mode 100644 index 00000000..713fc239 Binary files /dev/null and b/docs/img/how-to/manual-network-entry.png differ diff --git a/docs/img/how-to/tx-success.png b/docs/img/how-to/tx-success.png new file mode 100644 index 00000000..470b97a0 Binary files /dev/null and b/docs/img/how-to/tx-success.png differ diff --git a/docs/index.md b/docs/index.md index 2a7554b2..18fb7cc3 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,9 +6,11 @@ hide: # Overview -Kurtosis is a developer platform that allows you to package and launch a backend stack without becoming overburdened with configuration details. +[Kurtosis](https://docs.kurtosis.com/) is a developer platform that allows you to package and launch a backend stack without becoming overburdened with configuration details. -The Polygon team has implemented a Kurtosis build for the CDK stack which includes all zkEVM node components, services, and a validium option. +The Polygon team has implemented a stable CDK build with Kurtosis which includes all node components and services, as well as a validium option. + +The documentation in this get started section relates to the CDK Kurtosis implementation. ## CDK architecture @@ -24,7 +26,5 @@ The diagram below shows how CDK stack components may interact with each other an The [quick start documents](quickstart/deploy-stack.md) show you how to deploy a private, portable, and modular Polygon CDK developer stack on [Kurtosis](https://github.com/kurtosis-tech/kurtosis). -The deployment document is aimed at users and developers who do not necessarily want to run the latest CDK versions, or follow the full tutorials, and are happy to play around with an experimental set up. -
diff --git a/main.star b/main.star index 1cc71710..b516fd6a 100644 --- a/main.star +++ b/main.star @@ -5,7 +5,7 @@ cdk_central_environment_package = import_module("./cdk_central_environment.star" cdk_bridge_infra_package = import_module("./cdk_bridge_infra.star") zkevm_permissionless_node_package = import_module("./zkevm_permissionless_node.star") observability_package = import_module("./observability.star") - +blutgang_package = import_module("./cdk_blutgang.star") def run( plan, @@ -16,6 +16,7 @@ def run( deploy_cdk_central_environment=True, deploy_zkevm_permissionless_node=True, deploy_observability=True, + deploy_blutgang=True, args={ "deployment_suffix": "-001", "zkevm_prover_image": "hermeznetwork/zkevm-prover:v6.0.0", @@ -102,6 +103,9 @@ def run( "zkevm_aggregator_host": "zkevm-node-aggregator-001", "genesis_file": "templates/permissionless-node/genesis.json", "polycli_version": "v0.1.42", + "blutgang_image": "makemake1337/blutgang:latest", + "blutgang_rpc_port": "55555", + "blutgang_admin_port": "55556", }, ): """Deploy a Polygon CDK Devnet with various configurable options. @@ -187,3 +191,11 @@ def run( observability_package.run(plan, observability_args) else: plan.print("Skipping the deployment of the observability stack") + + # Deploy blutgang for caching + if deploy_blutgang: + plan.print("Deploying blutgang") + blutgang_args = dict(args) + blutgang_package.run(plan, blutgang_args) + else: + plan.print("Skipping the deployment of blutgang") \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 801696a3..7763a2c3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -24,19 +24,20 @@ theme: nav: - Overview: index.md - - Getting started: - - For technical users: - - Deploy the CDK stack: quickstart/deploy-stack.md - - Set up a permissionless node: quickstart/set-up-node.md - - Observability: quickstart/observability.md - - For developers: - - Breakdown the deployment: quickstart/breakdown-deployment.md - - zkEVM contracts caching: quickstart/zkevm-cache-contracts.md - - Connect to CDK testnets: quickstart/connect-testnet.md + - Deploy and manage a CDK stack: + - Deploy the CDK stack: quickstart/deploy-stack.md + - Set up a permissionless node: quickstart/set-up-node.md + - Observability: quickstart/observability.md + - Breakdown the deployment: quickstart/breakdown-deployment.md + - zkEVM contracts caching: quickstart/zkevm-cache-contracts.md - How to: - #- Migrate: - # - Fork id 7 to 9: how-to/migrate/forkid-7-to-9.md - - Use a native token: how-to/use-native-token.md + - Migrate: + - Fork id 7 to 9: how-to/migrate/forkid-7-to-9.md + - Mint, bridge, and use tokens: how-to/use-native-token.md + - Integrate a DAC: how-to/integrate-da.md + - Manage allowlists with policies: how-to/manage-policies.md + - Quickly test a running stack: how-to/quick-test-stack.md + - Connect to CDK testnets: quickstart/connect-testnet.md diff --git a/params.yml b/params.yml index f7fd5646..92e82a38 100644 --- a/params.yml +++ b/params.yml @@ -23,6 +23,9 @@ deploy_zkevm_permissionless_node: true # Deploy observability stack. deploy_observability: true +# Deploy eth loadbalancer (blutgang). +deploy_blutgang: true + args: # Suffix appended to service names. # Note: It should be a string. @@ -145,6 +148,7 @@ args: ## L1 configuration. l1_chain_id: 271828 l1_preallocated_mnemonic: code code code code code code code code code code code quality + l1_funding_amount: 100ether l1_rpc_url: http://el-1-geth-lighthouse:8545 l1_ws_url: ws://el-1-geth-lighthouse:8546 # https://github.com/kurtosis-tech/ethereum-package/tree/main?tab=readme-ov-file#configuration @@ -179,3 +183,9 @@ args: ## Tools versions polycli_version: v0.1.42 + + ## blutgang configuration. + blutgang_image: makemake1337/blutgang:0.3.5 + + blutgang_rpc_port: 55555 + blutgang_admin_port: 55556 \ No newline at end of file diff --git a/templates/blutgang/blutgang-config.toml b/templates/blutgang/blutgang-config.toml new file mode 100644 index 00000000..9e870e87 --- /dev/null +++ b/templates/blutgang/blutgang-config.toml @@ -0,0 +1,86 @@ +# Config for blutgang goes here +[blutgang] +# Clear the cache DB on startup +do_clear = true +# Where to bind blutgang to +address = "0.0.0.0:{{.blutgang_rpc_port}}" +# Moving average length for the latency +ma_length = 100 +# Sort RPCs by latency on startup. Recommended to leave on. +sort_on_startup = true +# Enable health checking +health_check = true +# Enable content type header checking. Set this to `true` if you want +# Blutgang to be JSON-RPC compliant. +header_check = true +# Acceptable time to wait for a response in ms +ttl = 30 +# How many times to retry a request before giving up +max_retries = 32 +# Block time in ms, used as a sanity check when not receiving subscriptions +expected_block_time = 20000 +# Time between health checks in ms +health_check_ttl = 400 +# Supress the health check running info messages +supress_rpc_check = false + +# Note: the admin namespace contains volatile functions and +# should not be exposed publicly. +[admin] +# Enable the admin namespace +enabled = true +# Address for the admin RPC +address = "0.0.0.0:{{.blutgang_admin_port}}" +# Only allow read-only methods +# Recommended `true` unless you 100% need write methods +readonly = true +# Enable the use of JWT for auth +# Should be on if exposing to the internet +jwt = false +# jwt token +key = "" + +# Sled config +# Sled is the database we use for our cache, for more info check their docs +[sled] +# Path to db +db_path = "./blutgang-cache" +# sled mode. Can be HighThroughput/LowSpace +mode = "HighThroughput" +# Cache size in bytes. +cache_capacity = 1000000000 +# Use zstd compression. Reduces size 60-70%, +# and increases CPU and latency by around 10% for db writes and 2% for reads. +# If storage constrained, it's fine to have it be on. +compression = false +# Print DB profile when dropped. Doesn't do anything for now. +print_profile = false +# Frequency of flushes in ms +flush_every_ms = 240 + +# Add separate RPCs as TOML tables +# DO NOT name an rpc `blutgang`, `admin`, or `sled` + +[sequencer] +url = "{{.l2_sequencer_url}}" +# ws_url = "{{.l2_sequencer_ws_url}}" +# The maximum amount of time we can use this rpc in a row. +max_consecutive = 150 +# Max amount of queries per second. +max_per_second = 200 + +[rpc] +url = "{{.l2_rpc_url}}" +ws_url = "{{.l2_ws_url}}" +# The maximum amount of time we can use this rpc in a row. +max_consecutive = 150 +# Max amount of queries per second. +max_per_second = 200 + +[rpcpless] +url = "{{.l2_rpc_pless_url}}" +ws_url = "{{.l2_ws_pless_url}}" +# The maximum amount of time we can use this rpc in a row. +max_consecutive = 150 +# Max amount of queries per second. +max_per_second = 200 diff --git a/templates/contract-deploy/run-contract-setup.sh b/templates/contract-deploy/run-contract-setup.sh index 7480c89d..3538939a 100755 --- a/templates/contract-deploy/run-contract-setup.sh +++ b/templates/contract-deploy/run-contract-setup.sh @@ -28,7 +28,7 @@ fund_account_on_l1() { cast send \ --rpc-url "{{.l1_rpc_url}}" \ --mnemonic "{{.l1_preallocated_mnemonic}}" \ - --value "100ether" \ + --value "{{.l1_funding_amount}}" \ "$address" }