diff --git a/Cargo.lock b/Cargo.lock index 9afe610a..43f4883e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -468,7 +468,7 @@ dependencies = [ [[package]] name = "casper-binary-port" version = "1.0.0" -source = "git+https://github.com/casper-network/casper-node.git?branch=feat-2.0#f803ee53db31edd5f7f3c1fa1e0ec0ea59550158" +source = "git+https://github.com/casper-network/casper-node.git?branch=feat-2.0#93974105e0ee2ce152891465e0f7661c701c0396" dependencies = [ "bincode", "bytes", @@ -670,7 +670,7 @@ dependencies = [ [[package]] name = "casper-types" version = "5.0.0" -source = "git+https://github.com/casper-network/casper-node.git?branch=feat-2.0#f803ee53db31edd5f7f3c1fa1e0ec0ea59550158" +source = "git+https://github.com/casper-network/casper-node.git?branch=feat-2.0#93974105e0ee2ce152891465e0f7661c701c0396" dependencies = [ "base16", "base64 0.13.1", diff --git a/Cargo.toml b/Cargo.toml index 4a8f6c46..0c64330a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ anyhow = "1" async-stream = "0.3.4" async-trait = "0.1.77" casper-types = { git = "https://github.com/casper-network/casper-node.git", branch = "feat-2.0" } -casper-binary-port = { git = "https://github.com/casper-network/casper-node.git", branch = "feat-2.0" } +casper-binary-port = { git = "https://github.com/casper-network/casper-node.git", branch = "feat-2.0" } casper-event-sidecar = { path = "./event_sidecar", version = "1.0.0" } casper-event-types = { path = "./types", version = "1.0.0" } casper-rpc-sidecar = { path = "./rpc_sidecar", version = "1.0.0" } diff --git a/LEGACY_SSE_EMULATION.md b/LEGACY_SSE_EMULATION.md index bddbb113..9cd94933 100644 --- a/LEGACY_SSE_EMULATION.md +++ b/LEGACY_SSE_EMULATION.md @@ -1,111 +1,69 @@ -# Rationale +# The Legacy SSE Emulation -The casper node 2.x produces a different set of SSE events than the 1.x ones. Also, 1.x nodes used 3 sse endpoints (`/events/sigs`, `/events/deploys`, `/events/main`), while 2.x node exposes all SSE events under one firehose endpoint (`/events`). +Casper node versions 2.0 or greater (2.x) produce different SSE events than 1.x versions. Also, 1.x Casper nodes used 3 SSE endpoints (`/events/sigs`, `/events/deploys`, `/events/main`), while 2.x nodes expose all the SSE events on one endpoint (`/events`). -Generally the changes in 2.x regarding SSE are backwards incompatible to some extent. To harness all the details and collect all the data clients should adapt the new SSE API. However if some clients are not ready or have no need to adapt to the new SSE API, they can use the legacy SSE emulation. +Generally, the changes in 2.x regarding SSE are somewhat backward-incompatible. To collect all the data, clients should adopt the new SSE API. However, if some clients are not ready or do not need to adopt the new SSE API, they can use the legacy SSE emulation. -SSE emulation is by default turned off, the instruction on how to enable it is in the [main README.md](./README.md) file. +SSE emulation is off by default. To enable it, follow the steps below and read the main [README.md](./README.md#sse-server-configuration) file describing how to configure the SSE server. -**BEFORE YOU ENABLE LEGACY SSE EMULATION** please consider the following: +**LIMITATIONS:** -- The legacy SSE emulation is a temporary solution and can be removed in a future major release. Consider it being _deprecated_. -- The legacy SSE emulation is not a 1:1 mapping of the 2.x events to 1.x events. Some events will be omitted, some will be transformed, some will be passed as is. More details on the limitations of the emulation are explained below. -- The legacy SSE emulation is an additional drain on resources. It will consume more resources than the "native" 2.x SSE API. +Before enabling the legacy SSE emulation, consider its limitations: -# Premises of legacy SSE emulation +- The legacy SSE emulation is a temporary solution and may be removed in a future major release of the node software. +- The legacy SSE emulation does not map 2.x events to 1.x events in a 1-to-1 fashion. Some events are omitted, some are transformed, and some are passed through. Below are more details on the emulation's limitations. +- The legacy SSE emulation places an extra burden on resources. It will consume more resources than the native 2.x SSE API. +- The legacy SSE emulation will consume more resources than the "native" 2.x SSE API. -Currently the only possible emulation is the V1 SSE API. Enabling V1 SSE api emulation requires setting `emulate_legacy_sse_apis` to `["V1"]`, like: +> **Note**: 2.x node versions label new block events with `Version2`. In the rare case that a 2.x node sees a legacy block, it will label events coming from this block with `Version1`. The notion of Version1 and Version2 is new to 2.x, and wasn't present in 1.x node versions. So, for the legacy SSE emulation, both Version1 and Version2 BlockAdded events will be transformed to the old BlockAdded event format from 1.x. -``` +## Configuration + +To enable the legacy SSE emulation, set the `emulate_legacy_sse_apis` setting to `["V1"]`. Currently, this is the only possible value: + +```toml [sse_server] (...) emulate_legacy_sse_apis = ["V1"] (...) ``` -This will expose three additional sse endpoints: - -- `/events/sigs` -> publishes `ApiVersion`, `BlockAdded`, `DeployProcessed`, `DeployExpired`, `Fault` and `Shutdown` -- `/events/deploys`-> publishes `ApiVersion`, `TransactionAccepted` and `Shutdown` -- `/events/main` -> publishes `ApiVersion`, `FinalitySignature` and `Shutdown` events - -Those endpoints will emit events in the same format as the V1 SSE API of the casper node. There are limitations to what Casper Sidecar can and will do, here is a list of mapping assumptions: - -## Translating `ApiVersion` event - -Legacy SSE event will be the same - -## Translating `BlockAdded` event - -- When the 2.x event emits a V1 block it will be unwrapped and passed as a legacy BlockAdded, for instance a 2.x event like this: - - ```json - { - "BlockAdded": { - "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", - "block": { - "Version1": { - "hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", - "header": { - "parent_hash": "90ca56a697f8b1b19cba08c642fd7f04669b8cd49bb9d652fca989f8a9f8bcea", - "state_root_hash": "9cce223fdbeab41dbbcf0b62f3fd857373131378d51776de26bb9f4fefe1e849", - "body_hash": "5f37be399c15b2394af48243ce10a62a7d12769dc5f7740b18ad3bf55bde5271", - "random_bit": true, - "accumulated_seed": "b3e1930565a80a874a443eaadefa1a340927fb8b347729bbd93e93935a47a9e4", - "era_end": { - "era_report": { - "equivocators": [ - "0203c9da857cfeccf001ce00720ae2e0d083629858b60ac05dd285ce0edae55f0c8e", - "02026fb7b629a2ec0132505cdf036f6ffb946d03a1c9b5da57245af522b842f145be" - ], - "rewards": [ - { - "validator": "01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25", - "amount": 129457537 - } - ], - "inactive_validators": [] - }, - "next_era_validator_weights": [ - { - "validator": "0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b", - "weight": "1" - }, - { - "validator": "02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c", - "weight": "2" - } - ] - }, - "timestamp": "2024-04-25T20:00:35.640Z", - "era_id": 601701, - "height": 6017012, - "protocol_version": "1.0.0" - }, - "body": { - "proposer": "0203426736da2554ebf1f8ee1d2ce4ab11b1e33419d7dfc1ce2fe1945faf00bacc9e", - "deploy_hashes": [ - "06950e4374dc88685634ec30bcddd68e6b46c109ccf6d29e2dfcf5367df75571", - "27a89dd58e6297a5244342b68b117afe2555131b896ad6ed4321edcd4130ae7b" - ], - "transfer_hashes": [ - "3e30b6c1c5dbca9277425846b42dc832cd3d8ce889c38d6bfc8bd95b3e1c403e", - "c990ba47146270655eaacc53d4115cbd980697f3d4e9c76bccfdfce82af6ce08" - ] - } - } - } - } - } - ``` +This setting will expose three legacy SSE endpoints with the following events streamed on each endpoint: + +- `/events/main` - `ApiVersion`, `BlockAdded`, `DeployProcessed`, `DeployExpired`, `Fault` and `Shutdown` +- `/events/deploys`- `ApiVersion`, `DeployAccepted` and `Shutdown` +- `/events/sigs` - `ApiVersion`, `FinalitySignature` and `Shutdown` + +Those endpoints will emit events in the same format as the legacy SSE API of the Casper node. + +## Event Mapping + +There are limitations to what the Casper Sidecar can and will do. Below, you will find a list of mapping assumptions between 2.x events and 1.x events. + +- [`ApiVersion` events](#the-apiversion-event) +- [`BlockAdded` events](#the-blockadded-event) +- [`TransactionAccepted` events](#the-transactionaccepted-event) +- [`TransactionExpired` events](#the-transactionexpired-event) +- [`TransactionProcessed` events](#the-transactionprocessed-event) - will be translated to 1.x emulated event: +### `ApiVersion` events - ```json - { - "BlockAdded": { - "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", - "block": { +The legacy SSE ApiVersion event is the same as the current version. + +### `BlockAdded` events + +The Sidecar can emit a legacy `BlockAdded` event by unwrapping the 2.x event structure and creating a 1.x emulated event structure. + +A Version1 `BlockAdded` event will be unwrapped and passed as a legacy `BlockAdded` event as shown below. + +**Version1 BlockAdded in 2.x:** + +```json +{ + "BlockAdded": { + "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "block": { + "Version1": { "hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", "header": { "parent_hash": "90ca56a697f8b1b19cba08c642fd7f04669b8cd49bb9d652fca989f8a9f8bcea", @@ -157,264 +115,271 @@ Legacy SSE event will be the same } } } - ``` - -- When the 2.x event emits a V2 block the following rules apply: - - - `block_hash` will be copied from V2 to V1 - - `block.block_hash` will be copied from V2 to V1 - - `block.header.era_end`: - - if the era_end is a V1 variety - it will be copied - - if the era_end is a V2 variety: - - V2 `next_era_validator_weights` will be copied from V2 `next_era_validator_weights` - - V1 `era_report` will be assembled from V2 `era_end.equivocators`, `era_end.rewards` and `era_end.inactive_validators` fields - - IF one of the `rewards` contains a reward that doesn't fit in a u64 (because V2 has U512 type in rewards values) - the whole `era_end` **WILL BE OMITTED** from the legacy V1 block (value None) - - V2 field `next_era_gas_price` has no equivalent in V1 and will be omitted - - `block.header.current_gas_price` this field only exists in V2 and will be omitted from the V1 block header - - `block.header.proposer` will be copied from V2 to V1 `block.body.proposer` - - other `block.header.*` fields will be copied from V2 to V1 - - `block.body.deploy_hashes` will be based on V2 `block.body.standard` transactions. Bear in mind, that only values of transactions of type `Deploy` will be copied to V1 `block.body.deploy_hashes` array - - `block.body.transfer_hashes` will be based on V2 `block.body.mint` transactions. Bear in mind, that only values of transactions of type `Deploy` will be copied to V1 `block.body.transfer_hashes` array. - - An example of the above rules. - Input V2 BlockAdded: - - ```json - { - "BlockAdded": { - "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", - "block": { - "Version2": { - "hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", - "header": { - "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", - "parent_hash": "b8f5e9afd2e54856aa1656f962d07158f0fdf9cfac0f9992875f31f6bf2623a2", - "state_root_hash": "cbf02d08bb263aa8915507c172b5f590bbddcd68693fb1c71758b5684b011730", - "body_hash": "6041ab862a1e14a43a8e8a9a42dad27091915a337d18060c22bd3fe7b4f39607", - "random_bit": false, - "accumulated_seed": "a0e424710f4fba036ba450b40f2bd7a842b176cf136f3af1952a2a13eb02616c", - "era_end": { - "equivocators": [ - "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", - "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", - "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" - ], - "inactive_validators": [ - "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", - "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56" - ], - "next_era_validator_weights": [ - { - "validator": "02038b238d774c3c4228a0430e3a078e1a2533f9c87cccbcf695637502d8d6057a63", - "weight": "1" - }, - { - "validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", - "weight": "2" - } - ], - "rewards": { - "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc": "749546792", - "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2": "788342677", - "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec": "86241635", - "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c": "941794198" - }, - "next_era_gas_price": 1 - }, - "timestamp": "2024-04-25T20:31:39.895Z", - "era_id": 419571, - "height": 4195710, - "protocol_version": "1.0.0", - "current_gas_price": 1 - }, - "body": { - "transactions": { - "0": [{ - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80" - }, - { - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81" - }, - { - "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e82" - }], - "1": [{ - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e83" - }, - { - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e84" - }, - { - "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e85" - }], - "2": [{ - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e86" - }, - { - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e87" - }, - { - "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e88" - }], - "3": [{ - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e89" - }, - { - "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e90" - }, +} +``` + +**Emulated 1.x BlockAdded (from Version1):** + +```json +{ + "BlockAdded": { + "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "block": { + "hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "header": { + "parent_hash": "90ca56a697f8b1b19cba08c642fd7f04669b8cd49bb9d652fca989f8a9f8bcea", + "state_root_hash": "9cce223fdbeab41dbbcf0b62f3fd857373131378d51776de26bb9f4fefe1e849", + "body_hash": "5f37be399c15b2394af48243ce10a62a7d12769dc5f7740b18ad3bf55bde5271", + "random_bit": true, + "accumulated_seed": "b3e1930565a80a874a443eaadefa1a340927fb8b347729bbd93e93935a47a9e4", + "era_end": { + "era_report": { + "equivocators": [ + "0203c9da857cfeccf001ce00720ae2e0d083629858b60ac05dd285ce0edae55f0c8e", + "02026fb7b629a2ec0132505cdf036f6ffb946d03a1c9b5da57245af522b842f145be" + ], + "rewards": [ { - "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e91" - }] + "validator": "01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25", + "amount": 129457537 + } + ], + "inactive_validators": [] + }, + "next_era_validator_weights": [ + { + "validator": "0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b", + "weight": "1" + }, + { + "validator": "02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c", + "weight": "2" } - "rewarded_signatures": [[240], [0], [0]] - } - } + ] + }, + "timestamp": "2024-04-25T20:00:35.640Z", + "era_id": 601701, + "height": 6017012, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "0203426736da2554ebf1f8ee1d2ce4ab11b1e33419d7dfc1ce2fe1945faf00bacc9e", + "deploy_hashes": [ + "06950e4374dc88685634ec30bcddd68e6b46c109ccf6d29e2dfcf5367df75571", + "27a89dd58e6297a5244342b68b117afe2555131b896ad6ed4321edcd4130ae7b" + ], + "transfer_hashes": [ + "3e30b6c1c5dbca9277425846b42dc832cd3d8ce889c38d6bfc8bd95b3e1c403e", + "c990ba47146270655eaacc53d4115cbd980697f3d4e9c76bccfdfce82af6ce08" + ] } } } - ``` - - Output legacy BlockAdded: +} +``` - ```json - { - "BlockAdded": { - "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", - "block": { +When the 2.x event stream emits a legacy `BlockAdded` event, the following mapping rules apply: + +- `block_hash` will be copied from Version2 to Version1. +- `block.block_hash` will be copied from Version2 to Version1. +- `block.header.era_end`: + - If the `era_end` is a Version1 variety - it will be copied. + - If the `era_end` is a Version2 variety: + - Version2 `next_era_validator_weights` will be copied from Version2 `next_era_validator_weights`. + - Version1 `era_report` will be assembled from the Version2 `era_end.equivocators`, `era_end.rewards` and `era_end.inactive_validators` fields. + - If one of the `rewards` contains a reward that doesn't fit in a u64 (because Version2 has U512 type in rewards values) - the whole `era_end` **WILL BE OMITTED** from the legacy Version1 block (value None). + - Version2 field `next_era_gas_price` has no equivalent in Version1 and will be omitted. +- `block.header.current_gas_price` this field only exists in Version2 and will be omitted from the Version1 block header. +- `block.header.proposer` will be copied from Version2 to Version1 `block.body.proposer`. +- other `block.header.*` fields will be copied from Version2 to Version1. +- `block.body.deploy_hashes` will be based on Version2 `block.body.standard` transactions. Bear in mind, that only values of transactions of type `Deploy` will be copied to Version1 `block.body.deploy_hashes` array. +- `block.body.transfer_hashes` will be based on Version2 `block.body.mint` transactions. Bear in mind, that only values of transactions of type `Deploy` will be copied to Version1 `block.body.transfer_hashes` array. + +Here is an example mapping demonstrating the rules above: + +**Version2 BlockAdded in 2.x:** + +```json +{ + "BlockAdded": { + "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "block": { + "Version2": { "hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", "header": { + "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", "parent_hash": "b8f5e9afd2e54856aa1656f962d07158f0fdf9cfac0f9992875f31f6bf2623a2", "state_root_hash": "cbf02d08bb263aa8915507c172b5f590bbddcd68693fb1c71758b5684b011730", "body_hash": "6041ab862a1e14a43a8e8a9a42dad27091915a337d18060c22bd3fe7b4f39607", "random_bit": false, "accumulated_seed": "a0e424710f4fba036ba450b40f2bd7a842b176cf136f3af1952a2a13eb02616c", "era_end": { - "era_report": { - "equivocators": [ - "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", - "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", - "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" - ], - "rewards": [ - { - "validator": "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c", - "amount": 941794198 - }, - { - "validator": "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2", - "amount": 788342677 - }, - { - "validator": "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc", - "amount": 749546792 - }, - { - "validator": "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec", - "amount": 86241635 - } - ], - "inactive_validators": [ - "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", - "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56" - ] - }, + "equivocators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", + "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", + "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" + ], + "inactive_validators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56" + ], "next_era_validator_weights": [ - { - "validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", - "weight": "2" - }, { "validator": "02038b238d774c3c4228a0430e3a078e1a2533f9c87cccbcf695637502d8d6057a63", "weight": "1" + }, + { + "validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", + "weight": "2" } - ] + ], + "rewards": { + "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc": "749546792", + "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2": "788342677", + "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec": "86241635", + "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c": "941794198" + }, + "next_era_gas_price": 1 }, "timestamp": "2024-04-25T20:31:39.895Z", "era_id": 419571, "height": 4195710, - "protocol_version": "1.0.0" + "protocol_version": "2.0.0", + "current_gas_price": 1 }, "body": { - "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", - "deploy_hashes": [ - "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e89", - "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e90" - ], - "transfer_hashes": [ - "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80", - "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81" - ] + "transactions": { + "0": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e82" + }], + "1": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e83" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e84" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e85" + }], + "2": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e86" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e87" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e88" + }], + "3": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e89" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e90" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e91" + }] + } + "rewarded_signatures": [[240], [0], [0]] } } } } - ``` +} +``` -## Translating `TransactionAccepted` event +**Emulated 1.x BlockAdded (from Version2 BlockAdded):** -- If the event is a V1 variant - it will be unwrapped and passed, so a 2.x event: - ```json - { - "TransactionAccepted": { - "Deploy": { - "hash": "5a7709969c210db93d3c21bf49f8bf705d7c75a01609f606d04b0211af171d43", - "header": { - "account": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", - "timestamp": "2020-08-07T01:28:27.360Z", - "ttl": "4m 22s", - "gas_price": 72, - "body_hash": "aa2a111c086628a161001160756c5884e32fde0356bb85f484a3e55682ad089f", - "dependencies": [], - "chain_name": "casper-example" - }, - "payment": { - "StoredContractByName": { - "name": "casper-example", - "entry_point": "example-entry-point", - "args": [ - [ - "amount", - { - "cl_type": "U512", - "bytes": "0400f90295", - "parsed": "2500000000" - } - ] - ] - } - }, - "session": { - "StoredContractByHash": { - "hash": "dfb621e7012df48fe1d40fd8015b5e2396c477c9587e996678551148a06d3a89", - "entry_point": "8sY9fUUCwoiFZmxKo8kj", - "args": [ - [ - "YbZWtEuL4D6oMTJmUWvj", - { - "cl_type": { - "List": "U8" - }, - "bytes": "5a000000909ffe7807b03a5db0c3c183648710db16d408d8425a4e373fc0422a4efed1ab0040bc08786553fcac4521528c9fafca0b0fb86f4c6e9fb9db7a1454dda8ed612c4ea4c9a6378b230ae1e3c236e37d6ebee94339a56cb4be582a", - "parsed": [144, 159, 254, 120, 7] - } - ] +```json +{ + "BlockAdded": { + "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "block": { + "hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "header": { + "parent_hash": "b8f5e9afd2e54856aa1656f962d07158f0fdf9cfac0f9992875f31f6bf2623a2", + "state_root_hash": "cbf02d08bb263aa8915507c172b5f590bbddcd68693fb1c71758b5684b011730", + "body_hash": "6041ab862a1e14a43a8e8a9a42dad27091915a337d18060c22bd3fe7b4f39607", + "random_bit": false, + "accumulated_seed": "a0e424710f4fba036ba450b40f2bd7a842b176cf136f3af1952a2a13eb02616c", + "era_end": { + "era_report": { + "equivocators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", + "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", + "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" + ], + "rewards": [ + { + "validator": "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c", + "amount": 941794198 + }, + { + "validator": "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2", + "amount": 788342677 + }, + { + "validator": "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc", + "amount": 749546792 + }, + { + "validator": "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec", + "amount": 86241635 + } + ], + "inactive_validators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56" ] - } + }, + "next_era_validator_weights": [ + { + "validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", + "weight": "2" + }, + { + "validator": "02038b238d774c3c4228a0430e3a078e1a2533f9c87cccbcf695637502d8d6057a63", + "weight": "1" + } + ] }, - "approvals": [ - { - "signer": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", - "signature": "025d0a7ba37bebe6774681ca5adecb70fa4eef56821eb344bf0f6867e171a899a87edb2b8bf70f2cb47a1670a6baf2cded1fad535ee53a2f65da91c82ebf30945b" - } + "timestamp": "2024-04-25T20:31:39.895Z", + "era_id": 419571, + "height": 4195710, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", + "deploy_hashes": [ + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e89", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e90" + ], + "transfer_hashes": [ + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81" ] } } } - ``` - will be translated to legacy `DeployAccepted`: - ```json - { - "DeployAccepted": { +} +``` + +### `TransactionAccepted` events + +Version1 `TransactionAccepted` events will be unwrapped and translated to legacy `DeployAccepted` events on the legacy SSE stream. + +**Version1 TransactionAccepted in 2.x:** + +```json +{ + "TransactionAccepted": { + "Deploy": { "hash": "5a7709969c210db93d3c21bf49f8bf705d7c75a01609f606d04b0211af171d43", "header": { "account": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", @@ -467,100 +432,162 @@ Legacy SSE event will be the same ] } } - ``` - -* If the event is a V2 variant - it will be omitted so a 2.x event like: - ``` - { - "TransactionAccepted": { - "Version1": { - ... - } +} +``` + +**Emulated 1.x DeployAccepted (from Version1):** + +```json +{ + "DeployAccepted": { + "hash": "5a7709969c210db93d3c21bf49f8bf705d7c75a01609f606d04b0211af171d43", + "header": { + "account": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "timestamp": "2020-08-07T01:28:27.360Z", + "ttl": "4m 22s", + "gas_price": 72, + "body_hash": "aa2a111c086628a161001160756c5884e32fde0356bb85f484a3e55682ad089f", + "dependencies": [], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "U512", + "bytes": "0400f90295", + "parsed": "2500000000" + } + ] + ] } + }, + "session": { + "StoredContractByHash": { + "hash": "dfb621e7012df48fe1d40fd8015b5e2396c477c9587e996678551148a06d3a89", + "entry_point": "8sY9fUUCwoiFZmxKo8kj", + "args": [ + [ + "YbZWtEuL4D6oMTJmUWvj", + { + "cl_type": { + "List": "U8" + }, + "bytes": "5a000000909ffe7807b03a5db0c3c183648710db16d408d8425a4e373fc0422a4efed1ab0040bc08786553fcac4521528c9fafca0b0fb86f4c6e9fb9db7a1454dda8ed612c4ea4c9a6378b230ae1e3c236e37d6ebee94339a56cb4be582a", + "parsed": [144, 159, 254, 120, 7] + } + ] + ] + } + }, + "approvals": [ + { + "signer": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "signature": "025d0a7ba37bebe6774681ca5adecb70fa4eef56821eb344bf0f6867e171a899a87edb2b8bf70f2cb47a1670a6baf2cded1fad535ee53a2f65da91c82ebf30945b" + } + ] } - ``` - will be omitted from the legacy SSE streams +} +``` -## Translating `TransactionExpired` event +All Version1 variants will be omitted from legacy SSE streams. For example, the following Version1 `TransactionAccepted` event will not be streamed: -- If it's a Deploy variety it will be unpacked and sent. So a 2.x `TransactionExpired` event: +```json +"TransactionAccepted": { + "Version1": { + ... +``` - ```json - { - "TransactionExpired": { - "transaction_hash": { - "Deploy": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" - } - } - } - ``` +### `TransactionExpired` events - will be sent as a legacy `DeployExpired` event: +Other transaction types will be unwrapped and sent as legacy deploy types. - ```json - { - "DeployExpired": { - "deploy_hash": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" +A 2.x `TransactionExpired` event will be mapped to a `DeployExpired` event. + +**TransactionExpired mapped to DeployExpired:** + +```json +{ + "TransactionExpired": { + "transaction_hash": { + "Deploy": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" } } - ``` +} +``` + +```json +{ + "DeployExpired": { + "deploy_hash": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" + } +} +``` -* If it's a Version1 variant it will be omitted from legacy SSE streams. So a 2.x `TransactionExpired` event: +All Version1 variants will be omitted from legacy SSE streams. For example, the following Version1 `TransactionExpired` event will not be streamed: - ```json - { - "TransactionExpired": { - "Version1": { - "hash": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" - } +```json +{ + "TransactionExpired": { + "Version1": { + "hash": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" } } - ``` - - will be omitted - -## Translating `TransactionProcessed` event. - -- If `transaction_hash` field is a `Version1`, the event will be ignored. -- If `transaction_hash` field is a `Deploy`, it's value will be used as `DeployProcessed.deploy_hash` - - If `initiator_addr` field is not a `PublicKey` type, the event will be omitted. - - If `initiator_addr` field is a `PublicKey` type, it's value will be used as `DeployProcessed.account` - - `timestamp`, `ttl`, `block_hash` will be filled from analogous fields in the `TransactionProcessed` event - - If `execution_result` is a `Version1` type, it's value will be copied as-is do the `DeployProcessed.execution_result` field. - - If `execution_result` is a `Version2` type please see [this paragraph](#translating-executionresultv2) - -### Translating `ExecutionResultV2`. - -- When translating `ExecutionResultV2` (later in this paragraph called `ex_v2`) to legacy `ExecutionResult` (later in this paragraph called `ex_v1`) the following rules apply: - - if `ex_v2.error_message` is not empty, the `ExecutionResult` will be of type `Failure` and `ex_v1.error_message` will be set to that value. Otherwise `ex_v1` will be of type `Success` - - `ex_v1.cost` will be set to `ex_v2.cost` - - `ex_v1.transfers` will always be an empty list since 2.x node doesn't use a notion of `TransferAddr` anymore - - `ex_v1.effect` will be populated based on `ex_v2.effects` field applying rules from paragraph [Translating Effects from V2](#translating-effects-from-v2) - -### Translating `Effects` from V2 - -- Output `operations` field will always be an empty list, since 2.x node no longer uses this concept for execution results -- For `transforms` the objects will be constructed based on `ex_v2.effects` with the following exceptions: - - V2 `AddKeys` transform will be translated to V1 `NamedKeys` transform. - - V2 `Write` transform will be translated applying rules from paragraph [Translating Write transform from V2](#translating-write-transform-from-v2). If translating at least one `Write` transform is not translatable (In the paragraph it will be denoted that it yields a `None` value) - the whole transform will be an empty array. - -### Translating `Write` transform from V2 - -- When translating `Write` transforms from V2 to V1 the following rules apply: - - For `CLValue`, it will be copied to output as `WriteCLValue` transform - - For `Account` it will be copied to output as `WriteAccount` transform, taking the v2 `account_hash` as value for `WriteAccount`. - - For `ContractWasm` a `WriteContractWasm` transform will be created. Please note that `WriteContractWasm` has no data, so details from V2 will be omitted. - - For `Contract` a `WriteContract` transform will be created. Please note that `WriteContract` has no data, so details from V2 will be omitted. - - For `Contract` a `WriteContractPackage` transform will be created. Please note that `WriteContractPackage` has no data, so details from V2 will be omitted. - - For `LegacyTransfer` a `WriteTransfer` transform will be created. Data will be copied. - - For `DeployInfo` a `WriteDeployInfo` transform will be created. Data will be copied. - - For `EraInfo` a `ErInfo` transform will be created. Data will be copied. - - For `Bid` a `WriteBid` transform will be created. Data will be copied. - - For `Withdraw` a `WriteWithdraw` transform will be created. Data will be copied. - - For `NamedKey` will be translated into a `AddKeys` transform. Data will be copied. - - For `AddressableEntity` no value will be produced (a `None` value will be yielded). - - For `BidKind` no value will be produced (a `None` value will be yielded). - - For `Package` no value will be produced (a `None` value will be yielded). - - For `ByteCode` no value will be produced (a `None` value will be yielded). - - For `MessageTopic` no value will be produced (a `None` value will be yielded). - - For `Message` no value will be produced (a `None` value will be yielded). +} +``` + +### `TransactionProcessed` events + +When translating a `TransactionProcessed` event to a legacy `DeployProcessed` event, the following rules apply: + +- If the `transaction_hash` field contains `Version1`, the event will be ignored. +- If the `transaction_hash` field is a `Deploy`, its value will be used as `DeployProcessed.deploy_hash`. + - If the `initiator_addr` field is not a `PublicKey` type, the event will be omitted. + - If the `initiator_addr` field is a `PublicKey` type, its value will be used as `DeployProcessed.account`. + - `timestamp`, `ttl`, `block_hash` will be filled from analogous fields in the `TransactionProcessed` event. + - If the `execution_result` contains `Version1`, its value will be copied as-is to the `DeployProcessed.execution_result` field. + - If the `execution_result` contains `Version2`, see [this paragraph](#translating-executionresultv2). + +#### Translating `ExecutionResultV2` + +When translating the `ExecutionResultV2` (`ex_v2`) to a legacy `ExecutionResult` (`ex_v1`), the following rules apply: + +- If the `ex_v2.error_message` is not empty, the `ExecutionResult` will be of type `Failure`, and the `ex_v1.error_message` will be set to that value. Otherwise, `ex_v1` will be of type `Success`. +- The `ex_v1.cost` will be set to the `ex_v2.cost`. +- The `ex_v1.transfers` list will always be empty since the 2.x node no longer uses a' TransferAddr' notion. +- The `ex_v1.effect` will be populated based on the `ex_v2.effects` field, applying the rules from [Translating Effects from Version2](#translating-effects-from-v2). + +#### Translating `Effects` from Version2 + +When translating the `Effects` from Version2 to Version1, the following rules apply: + +- The output `operations` field will always be an empty list since the 2.x node no longer uses this concept for execution results. +- For `transforms`, the objects will be constructed based on the `ex_v2.effects` with the following exceptions: + - The Version2 `AddKeys` transform will be translated to the Version1 `NamedKeys` transform. + - The Version2 `Write` transform will be translated by applying the rules from paragraph [Translating Write transforms from Version2](#translating-write-transform-from-v2). If at least one `Write` transform is not translatable (yielding a `None` value), the transform will be an empty array. + +#### Translating `Write` transforms from Version2 + +When translating `Write` transforms from Version2 to Version1, the following rules apply: + +- `CLValue`: will be copied to the `WriteCLValue` transform. +- `Account`: will be copied to the `WriteAccount` transform, assigning the Version2 `account_hash` as the value for `WriteAccount`. +- `ContractWasm`: a `WriteContractWasm` transform will be created. Please note that the `WriteContractWasm` will not contain data, so the Version2 details will be omitted. +- `Contract`: a `WriteContract` transform will be created. Please note that the `WriteContract` will not contain data, so the Version2 details will be omitted. +- `ContractPackage`: a `WriteContractPackage` transform will be created. Please note that the `WriteContractPackage` will not contain data, so the Version2 details will be omitted. +- `LegacyTransfer`: a `WriteTransfer` transform will be created. Data will be copied. +- `DeployInfo`: a `WriteDeployInfo` transform will be created. Data will be copied. +- `EraInfo`: an `EraInfo` transform will be created. Data will be copied. +- `Bid`: a `WriteBid` transform will be created. Data will be copied. +- `Withdraw`: a `WriteWithdraw` transform will be created. Data will be copied. +- `NamedKey`: will be translated into an `AddKeys` transform. Data will be copied. +- `AddressableEntity`: the mapping will yield value `None`, meaning no value will be created. +- `BidKind`: the mapping will yield value `None`, meaning no value will be created. +- `Package`: the mapping will yield value `None`, meaning no value will be created. +- `ByteCode`: the mapping will yield value `None`, meaning no value will be created. +- `MessageTopic`: the mapping will yield value `None`, meaning no value will be created. +- `Message`: the mapping will yield value `None`, meaning no value will be created. diff --git a/README.md b/README.md index 7e34e0e2..aef4af3f 100644 --- a/README.md +++ b/README.md @@ -1,115 +1,166 @@ -# Casper Event Sidecar README +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) + +# The Casper Sidecar + +- [Summary of Purpose](#summary-of-purpose) +- [System Components and Architecture](#system-components-and-architecture) + - [The SSE server](#the-sse-server) + - [The REST API server](#the-rest-api-server) + - [The Admin API server](#the-admin-api-server) + - [The RPC API server](#the-rpc-api-server) +- [Configuring the Sidecar](#configuring-the-sidecar) + - [RPC server setup](#rpc-server-setup) + - [SSE server setup](#sse-server-setup) + - [Configuring SSE node connections](#configuring-sse-node-connections) + - [Configuring SSE legacy emulations](#configuring-sse-legacy-emulations) + - [Configuring the event stream](#configuring-the-event-stream) + - [REST server setup](#rest-server-setup) + - [Storage setup](#setup-storage) + - [Database connectivity setup](#database-connectivity-setup) + - [SQLite database](#sqlite-database) + - [PostgreSQL database](#postgresql-database) + - [Admin server setup](#admin-server-setup) +- [Running and Testing the Sidecar](#running-and-testing-the-sidecar) + - [Prerequisites](#prerequisites) + - [Running the Sidecar](#running-the-sidecar) + - [Testing the Sidecar](#testing-the-sidecar) +- [Swagger Documentation](#swagger-documentation) +- [OpenAPI Specification](#openapi-specification) +- [Troubleshooting Tips](#troubleshooting-tips) + - [Checking liveness](#checking-liveness) + - [Checking the node connection](#checking-the-node-connection) + - [Diagnosing errors](#diagnosing-errors) + - [Monitoring memory consumption](#monitoring-memory-consumption) + - [Ensuring sufficient storage](#ensuring-sufficient-storage) + - [Inspecting the REST API](#inspecting-the-rest-api) + - [Limiting concurrent requests](#limiting-concurrent-requests) ## Summary of Purpose -The Casper Event Sidecar is an application that runs in tandem with the node process. It's main purpose is to: -* offload the node from broadcasting SSE events to multiple clients -* provide client features that aren't part of the nodes' functionality, nor should they be +The Casper Sidecar is an application running in tandem with the node process. It allows subscribers to monitor a node's event stream, query stored events, and query the node's JSON RPC API, thus receiving faster responses and reducing the load placed on the node. Its primary purpose is to: + +* Offload the node from broadcasting SSE events to multiple clients. +* Provide client features that aren't part of the nodes' functionality, nor should they be. While the primary use case for the Sidecar application is running alongside the node on the same machine, it can be run remotely if necessary. -### System Components & Architecture +## System Components and Architecture + +The Casper Sidecar provides the following functionalities: +* A server-sent events (SSE) server with an `/events` endpoint that streams all the events received from all connected nodes. The Sidecar also stores these events. +* A REST API server that allows clients to query stored events. +* A JSON RPC bridge between end users and a Casper node's binary port. +* Legacy emulation for clients using older versions of the SSE API. -Casper Sidecar has three main functionalities: -* Providing a SSE server with a firehose `/events` endpoint that streams all events from the connected nodes. Sidecar also stores observed events in storage. -* Providing a REST API server that allows clients to query events in storage. -* Be a JSON RPC bridge between end users and a Casper node's binary RPC port. +The Sidecar has the following components and external dependencies: -The system has the following components and external dependencies: ```mermaid +--- +title: The Casper Sidecar Components +--- graph LR; - subgraph CASPER-SIDECAR + subgraph CASPER_SIDECAR SSE_SERVER["SSE server"] - RPC_API_SERVER["RPC API server (json)"] + RPC_API_SERVER["RPC API server (JSON)"] REST_API["Rest API server"] ADMIN_API["Admin API server"] end - CONFIG{{"Config file (toml)"}} - CONFIG --> CASPER-SIDECAR + CONFIG{{"Config file (TOML)"}} + CONFIG --> CASPER_SIDECAR STORAGE[(Storage)] - NODE_SSE(("Casper Node SSE port")) - NODE_BINARY(("Casper Node binary port")) + NODE_SSE(("Casper node SSE port")) + NODE_BINARY(("Casper node binary port")) RPC_API_SERVER --> NODE_BINARY SSE_SERVER --> NODE_SSE SSE_SERVER --> STORAGE STORAGE --> REST_API ``` -#### SSE Server +### The SSE server + +The SSE Server has these components: -Diving into the SSE Server, we see the following components: ```mermaid graph TD; CLIENT{Client} CLIENT --> SSE_SERVER_API STORAGE[("Storage")] - CONFIG{{"Config file (toml)"}} + CONFIG{{"Config file (TOML)"}} MAIN --1.reads--> CONFIG NODE_SSE{Node SSE port} SSE_LISTENER --2--> STORAGE NODE_SSE --1--> SSE_LISTENER - subgraph "Casper sidecar" + subgraph CASPER_SIDECAR MAIN[main.rs] - MAIN --2.spawns---> SSE-SERVER - subgraph SSE-SERVER + MAIN --2.spawns---> SSE_SERVER + subgraph SSE_SERVER SSE_SERVER_API["SSE API"] RING_BUFFER["Events buffer"] SSE_SERVER_API --> RING_BUFFER SSE_LISTENER --3--> RING_BUFFER - subgraph "For connection in connections" - SSE_LISTENER["SSE Listener"] + subgraph "connection" + SSE_LISTENER["SSE listener"] end end end ``` -Given the flow above, the SSE Listener processes events in this order: -1. Fetch an event from the node's SSE port -2. Store the event -3. Publish the event to the SSE API +The SSE Listener processes events in this order: +1. Fetch an event from the node's SSE port. +2. Store the event. +3. Publish the event to the SSE API. + +Casper nodes offer an event stream API that returns server-sent events (SSEs) with JSON-encoded data. The Sidecar reads the event stream of all connected nodes, acting as a passthrough and replicating the SSE interface of the connected nodes. + +The Sidecar can: +* Republish the current events from the node to clients listening to Sidecar's SSE API. +* Publish a configurable number of previous events to clients connecting to the Sidecar's SSE API with `?start_from=` query (similar to the node's SSE API). +* Store the events in external storage for clients to query them via the Sidecar's REST API. +Enabling and configuring the SSE Server of the Sidecar is optional. -Casper nodes offer an event stream API that returns Server-Sent Events (SSEs) with JSON-encoded data. The Sidecar reads the event stream of all connected nodes, acting as a passthrough and replicating the SSE interface of the connected nodes. The Sidecar can: -* republish the current events from the node to clients listening to Sidecar's SSE API -* publish a configurable number of previous events to clients connecting to the Sidecar's SSE API with `?start_from=` query (similar to the node's SSE API) -* store the events in external storage for clients to query them via the Sidecar's REST API -Enabling and configuring the SSE Server of the Sidecar is optional. +### The REST API server + +The Sidecar offers an optional REST API that allows clients to query the events stored in external storage. You can discover the specific endpoints of the REST API using [OpenAPI](#openapi-specification) and [Swagger](#swagger-documentation). The [usage instructions](USAGE.md) provide more details. -#### REST API Server ```mermaid graph LR; CLIENT{Client} CLIENT --> REST_API STORAGE[("Storage")] REST_API --> STORAGE - CONFIG{{"Config file (toml)"}} + CONFIG{{"Config file (TOML)"}} MAIN --1.reads--> CONFIG - subgraph "Casper sidecar" + subgraph CASPER_SIDECAR MAIN[main.rs] MAIN --2.spawns--> REST_API REST_API["REST API"] end ``` -The Sidecar offers an optional REST API that allows clients to query the events stored in external storage. Node operators can discover the specific endpoints of the REST API using [OpenAPI] (#openapi-specification) and [Swagger] (#swagger-documentation). Also, the [usage instructions](USAGE.md) provide more details. +### The Admin API server + +The Sidecar offers an administrative API to allow an operator to check its current status. The Sidecar operator has the option to enable and configure this API. Please see the [admin server configuration](#admin-server) for details. -#### ADMIN API Server ```mermaid graph LR; CLIENT{Client} CLIENT --> ADMIN_API CONFIG{{Config file}} MAIN --1.reads--> CONFIG - subgraph "Casper sidecar" + subgraph CASPER_SIDECAR MAIN[main.rs] MAIN --2.spawns--> ADMIN_API ADMIN_API["ADMIN API"] end ``` -The Sidecar offers an administrative API to allow an operator to check its current status. The Sidecar operator has the option to enable and configure this API. Please see the [admin server configuration](#admin-server) for details. +### The RPC API server + +The Sidecar also offers an RPC JSON API server that can be enabled and configured so that clients can interact with a Casper network. It is a JSON bridge between end users and a Casper node's binary port. The RPC API server forwards requests to the Casper node's binary port. For more details on how the RPC JSON API works, see the [RPC Sidecar README](rpc_sidecar/README.md). -#### RPC API Server ```mermaid graph LR; CLIENT{Client} @@ -118,17 +169,30 @@ The Sidecar offers an administrative API to allow an operator to check its curre MAIN --1.reads--> CONFIG CASPER_NODE(("Casper Node binary port")) RPC_API --forwards request--> CASPER_NODE - subgraph "Casper sidecar" + subgraph "Casper Sidecar" MAIN[main.rs] MAIN --2.spawns--> RPC_API RPC_API["RPC JSON API"] end ``` -The Sidecar offers an optional RPC JSON API module that can be enabled and configured. It is a JSON bridge between end users and a Casper node's binary port. The RPC API server forwards requests to the Casper node's binary port. For more details on how the RPC JSON API works, see the [RPC Sidecar README](rpc_sidecar/README.md). -Here is an example configuration of the RPC API server: +## Configuring the Sidecar -``` +The Sidecar service must be configured using a `.toml` file specified at runtime. + +This repository contains several sample configuration files that can be used as examples and adjusted according to your scenario: + +- [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) - Configuration for connecting to nodes on a local NCTL network. This configuration is used in the unit and integration tests found in this repository. +- [EXAMPLE_NCTL_POSTGRES_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml) - Configuration for using the PostgreSQL database and nodes on a local NCTL network. +- [EXAMPLE_NODE_CONFIG.toml](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) - Configuration for connecting to live nodes on a Casper network. + +Once you create the configuration file and are ready to run the Sidecar service, you must provide the configuration as an argument using the `-- --path-to-config` option as described [here](#running-the-sidecar). + +### RPC server setup + +Here is an example configuration for the RPC API server: + +```toml [rpc_server.main_server] enable_server = true address = '0.0.0.0:7777' @@ -137,10 +201,12 @@ max_body_bytes = 2_621_440 cors_origin = '' [rpc_server.node_client] -address = '127.0.0.1:28101' +address = '0.0.0.0:28101' max_message_size_bytes = 4_194_304 request_limit = 3 request_buffer_size = 16 +message_timeout_secs = 30 +client_access_timeout_secs = 2 [rpc_server.speculative_exec_server] enable_server = true @@ -169,43 +235,27 @@ max_attempts = 30 * `speculative_exec_server.max_body_bytes` - Maximum body size of request to API in bytes. * `speculative_exec_server.cors_origin` - Configures the CORS origin. -* `node_client.address` - Address of the Casper Node binary port +* `node_client.address` - Address of the Casper Node binary port. * `node_client.max_message_size_bytes` - Maximum binary port message size in bytes. * `node_client.request_limit` - Maximum number of in-flight requests. * `node_client.request_buffer_size` - Number of node requests that can be buffered. +* `node_client.message_timeout_secs` - Timeout for the message. +* `node_client.client_access_timeout_secs` - Timeout for the client connection. * `node_client.exponential_backoff.initial_delay_ms` - Timeout after the first broken connection (backoff) in milliseconds. * `node_client.exponential_backoff.max_delay_ms` - Maximum timeout after a broken connection in milliseconds. * `node_client.exponential_backoff.coefficient` - Coefficient for the exponential backoff. The next timeout is calculated as min(`current_timeout * coefficient`, `max_delay_ms`). * `node_client.exponential_backoff.max_attempts` - Maximum number of times to try to reconnect to the binary port of the node. -## Prerequisites +### SSE server setup -* CMake 3.1.4 or greater -* [Rust](https://www.rust-lang.org/tools/install) -* pkg-config -* gcc -* g++ - -## Configuration +The Sidecar SSE server is used to connect to Casper nodes, listen to events from them, store them locally and re-broadcast them to clients. Here is a sample configuration for the SSE server: -The SSE Sidecar service must be configured using a `.toml` file specified at runtime. - -This repository contains several sample configuration files that can be used as examples and adjusted according to your scenario: - -- [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) - Configuration for connecting to nodes on a local NCTL network. This configuration is used in the unit and integration tests found in this repository -- [EXAMPLE_NCTL_POSTGRES_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml) - Configuration for using the PostgreSQL database and nodes on a local NCTL network -- [EXAMPLE_NODE_CONFIG.toml](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) - Configuration for connecting to live nodes on a Casper network and setting up an admin server - -Once you create the configuration file and are ready to run the Sidecar service, you must provide the configuration as an argument using the `-- --path-to-config` option as described [here](#running-the-sidecar). - -### SSE server configuration -The Casper sidecar SSE server is used to connect to casper nodes, listen to events from them, store them locally and re-broadcast them to clients. The configuration for the SSE server itself is as follows: - -``` +```toml [sse_server] enable_server = true emulate_legacy_sse_apis = ["V1"] + [[sse_server.connections]] @@ -214,15 +264,18 @@ emulate_legacy_sse_apis = ["V1"] ``` * `sse_server.enable_server` - If set to true, the SSE server will be enabled. -* `sse_server.emulate_legacy_sse_apis` - A list of legacy casper node SSE APIs to emulate. The Sidecar will expose sse endpoints that are compatible with specified versions. Please bear in mind that this feature is an emulation and should be used only for transition periods. In most case scenarios having a 1 to 1 mapping of new messages into old formats is impossible, so this can be a process that looses some data and/or doesn't emit all messages that come out of the casper node. The details of the emulation are described in section [Event Stream Server SSE legacy emulations](#event-stream-server-sse-legacy-emulations) module. +* `sse_server.emulate_legacy_sse_apis` - A list of legacy Casper node SSE APIs to emulate. The Sidecar will expose SSE endpoints that are compatible with specified versions. Please bear in mind that this feature is an emulation and should be used only for transition periods. In most scenarios, having a 1-to-1 mapping of new messages into old formats is impossible, so this can be a process that loses some data and/or doesn't emit all messages that come from the Casper node. See the [Legacy SSE Emulation](./LEGACY_SSE_EMULATION.md) page for more details. -#### SSE Node Connections +#### Configuring SSE node connections -The Casper Sidecar's SSE component can connect to Casper nodes' SSE endpoints with versions greater or equal to `2.0.0`. +The Sidecar's SSE component can connect to Casper nodes' SSE endpoints with versions greater or equal to `2.0.0`. The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. Connecting to multiple nodes requires multiple `[[sse_server.connections]]` sections. -``` +```toml +[sse_server] +enable_server = true + [[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18101 @@ -267,37 +320,82 @@ sleep_between_keep_alive_checks_in_seconds = 30 * `delay_between_retries_in_seconds` - The delay between attempts to connect to the node. * `allow_partial_connection` - Determining whether the Sidecar will allow a partial connection to this node. * `enable_logging` - This enables the logging of events from the node in question. -* `connection_timeout_in_seconds` - Number of seconds before the connection request times out. Parameter is optional, defaults to 5 -* `no_message_timeout_in_seconds` - Number of seconds after which the connection will be restarted if no bytes were received. Parameter is optional, defaults to 120 -* `sleep_between_keep_alive_checks_in_seconds` - Optional parameter specifying the time intervals (in seconds) for checking if the connection is still alive. Defaults to 60 +* `connection_timeout_in_seconds` - Number of seconds before the connection request times out. This parameter is optional, and defaults to 5. +* `no_message_timeout_in_seconds` - Number of seconds after which the connection will be restarted if no bytes were received. This parameter is optional, and defaults to 120. +* `sleep_between_keep_alive_checks_in_seconds` - Optional parameter specifying the time intervals (in seconds) for checking if the connection is still alive. Defaults to 60. + +#### Configuring SSE legacy emulations -#### Event Stream Server SSE legacy emulations +Applications using version 1 of a Casper node's event stream server can still function using an emulated V1 SSE API for a limited time. Enabling the V1 SSE API emulation requires the `emulate_legacy_sse_apis` setting to be `["V1"]`: -Please see [Legacy sse emulation file](./LEGACY_SSE_EMULATION.md) +```toml +[sse_server] +enable_server = true +emulate_legacy_sse_apis = ["V1"] +``` + +This setting will expose three legacy SSE endpoints with the following events streamed on each endpoint: +* `/events/sigs` - Finality Signature events +* `/events/deploys` - DeployAccepted events +* `/events/main` - All other legacy events, including BlockAdded, DeployProcessed, DeployExpired, Fault, Step, and Shutdown events +See the [Legacy SSE Emulation](./LEGACY_SSE_EMULATION.md) page for more details. -### Storage +#### Configuring the event stream -This directory stores the SSE cache and an SQLite database if the Sidecar is configured to use SQLite. +To configure the Sidecar's event stream server, specify the following settings: +```toml +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 ``` + +* `event_stream_server.port` - The port under which the Sidecar's SSE server publishes events. +* `event_stream_server.max_concurrent_subscribers` - The maximum number of subscribers that can monitor the Sidecar's event stream. +* `event_stream_server.event_stream_buffer_length` - The number of events that the stream will hold in its buffer for reference when a subscriber reconnects. + +### REST server setup + +The following section determines outbound connection criteria for the Sidecar's REST server. + +```toml +[rest_api_server] +enable_server = true +port = 18888 +max_concurrent_requests = 50 +max_requests_per_second = 50 +request_timeout_in_seconds = 10 +``` + +* `enable_server` - If set to true, the RPC API server will be enabled. +* `port` - The port for accessing the Sidecar's REST server. `18888` is the default, but operators are free to choose their own port as needed. +* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be made to the REST server. +* `max_requests_per_second` - The maximum total number of requests that can be made per second. +* `request_timeout_in_seconds` - The total time before a request times out. + +### Storage setup + +This directory stores the SSE cache and an SQLite database if the Sidecar was configured to use SQLite. + +```toml [storage] storage_path = "./target/storage" ``` -### Database Connectivity +### Database connectivity setup -The Sidecar can connect to different types of databases. The current options are `SQLite` or `PostgreSQL`. The following sections show how to configure the database connection for one of these DBs. Note that the Sidecar can only connect to one DB at a time. +The Sidecar can connect to different types of databases. The current options are `SQLite` or `PostgreSQL`. The following sections show how to configure the database connection. Note that the Sidecar can only connect to one database at a time. -#### SQLite Database +#### SQLite database This section includes configurations for the SQLite database. -``` +```toml [storage.sqlite_config] file_name = "sqlite_database.db3" max_connections_in_pool = 100 -# https://www.sqlite.org/compile.html#default_wal_autocheckpoint wal_autocheckpointing_interval = 1000 ``` @@ -305,7 +403,7 @@ wal_autocheckpointing_interval = 1000 * `storage.sqlite_config.max_connections_in_pool` - The maximum number of connections to the database (should generally be left as is). * `storage.sqlite_config.wal_autocheckpointing_interval` - This controls how often the system commits pages to the database. The value determines the maximum number of pages before forcing a commit. More information can be found [here](https://www.sqlite.org/compile.html#default_wal_autocheckpoint). -#### PostgreSQL Database +#### PostgreSQL database The properties listed below are elements of the PostgreSQL database connection that can be configured for the Sidecar. @@ -343,13 +441,11 @@ SIDECAR_POSTGRES_MAX_CONNECTIONS="max connections" SIDECAR_POSTGRES_PORT="port" ``` -However, DB connectivity can also be configured using the Sidecar configuration file. - -If the DB environment variables and the Sidecar's configuration file have the same variable set, the DB environment variables will take precedence. +However, DB connectivity can also be configured using the Sidecar configuration file. If the DB environment variables and the Sidecar's configuration file have the same variable set, the DB environment variables will take precedence. It is possible to completely omit the PostgreSQL configuration from the Sidecar's configuration file. In this case, the Sidecar will attempt to connect to the PostgreSQL using the database environment variables or use some default values for non-critical variables. -``` +```toml [storage.postgresql_config] database_name = "event_sidecar" host = "localhost" @@ -358,44 +454,11 @@ database_username = "postgres" max_connections_in_pool = 30 ``` -#### Rest & Event Stream Criteria - -This information determines outbound connection criteria for the Sidecar's `rest_server`. - -``` -[rest_api_server] -enable_server = true -port = 18888 -max_concurrent_requests = 50 -max_requests_per_second = 50 -request_timeout_in_seconds = 10 -``` -* `enable_server` - If set to true, the RPC API server will be enabled. -* `port` - The port for accessing the sidecar's `rest_server`. `18888` is the default, but operators are free to choose their own port as needed. -* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be made to the REST server. -* `max_requests_per_second` - The maximum total number of requests that can be made per second. -* `request_timeout_in_seconds` - The total time before a request times out. - -``` -[sse_server.event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 -``` - -The `sse_server.event_stream_server` section specifies a port for the Sidecar's event stream. - -Additionally, there are the following two options: - -* `event_stream_server.port` - Port under which the SSE server is published. -* `event_stream_server.max_concurrent_subscribers` - The maximum number of subscribers that can monitor the Sidecar's event stream. -* `event_stream_server.event_stream_buffer_length` - The number of events that the stream will hold in its buffer for reference when a subscriber reconnects. - -### Admin Server +### Admin server setup This optional section configures the Sidecar's administrative server. If this section is not specified, the Sidecar will not start an admin server. -``` +```toml [admin_api_server] enable_server = true port = 18887 @@ -410,43 +473,31 @@ max_requests_per_second = 1 Access the admin server at `http://localhost:18887/metrics/`. -## Swagger Documentation - -Once the Sidecar is running, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. The Swagger documentation will allow you to test the REST API. - -## OpenAPI Specification +## Running and Testing the Sidecar -An OpenAPI schema is available at `http://localhost:18888/api-doc.json/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. +### Prerequisites -## Unit Testing the Sidecar - -You can run the unit and integration tests included in this repository with the following command: - -``` -cargo test -``` - -You can also run the performance tests using the following command: - -``` -cargo test -- --include-ignored -``` +To compile, test, and run the Sidecar, install the following software first: -The [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) file contains the configurations used for these tests. +* CMake 3.1.4 or greater +* [Rust](https://www.rust-lang.org/tools/install) +* pkg-config +* gcc +* g++ -## Running the Sidecar +### Running the Sidecar -After creating the configuration file, run the Sidecar using Cargo and point to the configuration file using the `--path-to-config` option, as shown below. The command needs to run with `root` privileges. +After creating the configuration file, run the Sidecar using `cargo` and point to the configuration file using the `--path-to-config` option, as shown below. The command needs to run with `root` privileges. -```shell +```sh sudo cargo run -- --path-to-config ./resources/example_configs/EXAMPLE_NODE_CONFIG.toml ``` The Sidecar application leverages tracing, which can be controlled by setting the `RUST_LOG` environment variable. -The following command will run the sidecar application with the `INFO` log level. +The following command will run the Sidecar application with the `INFO` log level. -``` +```sh RUST_LOG=info cargo run -p casper-sidecar -- --path-to-config ./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml ``` @@ -460,11 +511,35 @@ The log levels, listed in order of increasing verbosity, are: Further details about log levels can be found [here](https://docs.rs/env_logger/0.9.1/env_logger/#enabling-logging). -## Testing the Sidecar using NCTL +### Testing the Sidecar + +You can run the unit and integration tests included in this repository with the following command: + +```sh +cargo test +``` + +You can also run the performance tests using this command: + +```sh +cargo test -- --include-ignored +``` + +The [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) file contains the configurations used for these tests. + +#### Testing the Sidecar using NCTL The Sidecar application can be tested against live Casper nodes or a local [NCTL network](https://docs.casperlabs.io/dapp-dev-guide/building-dapps/setup-nctl/). -The configuration shown within this README will direct the Sidecar application to a locally hosted NCTL network if one is running. The Sidecar should function the same way it would with a live node, displaying events as they occur in the local NCTL network. +The configuration shown [here](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) will direct the Sidecar application to a locally hosted NCTL network if one is running. The Sidecar should function the same way it would while connected to a live node, displaying events as they occur in the local NCTL network. + +## Swagger Documentation + +Once the Sidecar is running, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. The Swagger documentation will allow you to test the REST API. + +## OpenAPI Specification + +An OpenAPI schema is available at `http://localhost:18888/api-doc.json/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. ## Troubleshooting Tips @@ -494,8 +569,8 @@ curl http://SIDECAR_URL:SIDECAR_ADMIN_PORT/metrics **Sample output**: -``` -# HELP node_statuses Current status of node to which sidecar is connected. Numbers mean: 0 - preparing; 1 - connecting; 2 - connected; 3 - reconnecting; -1 - connections_exhausted -> used up all connection attempts ; -2 - incompatible -> node is in an incompatible version +```sh +# HELP node_statuses Current status of node to which the Sidecar is connected. Numbers mean: 0 - preparing; 1 - connecting; 2 - connected; 3 - reconnecting; -1 - connections_exhausted -> used up all connection attempts ; -2 - incompatible -> node is in an incompatible version # TYPE node_statuses gauge node_statuses{node="35.180.42.211:9999"} 2 node_statuses{node="69.197.42.27:9999"} 2 @@ -516,7 +591,7 @@ In the above `node_statuses`, you can see which nodes are connecting, which are To diagnose errors, look for `error` logs and check the `error_counts` on the metrics page, `http://SIDECAR_URL:SIDECAR_ADMIN_PORT/metrics`, where most of the errors related to data flow will be stored: -``` +```sh # HELP error_counts Error counts # TYPE error_counts counter error_counts{category="connection_manager",description="fetching_from_stream_failed"} 6 @@ -526,7 +601,7 @@ error_counts{category="connection_manager",description="fetching_from_stream_fai To monitor the Sidecar's memory consumption, observe the metrics page, `http://SIDECAR_URL:SIDECAR_ADMIN_PORT/metrics`. Search for `process_resident_memory_bytes`: -``` +```sh # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 292110336 @@ -549,3 +624,7 @@ The easiest way to inspect the Sidecar’s REST API is with [Swagger](#swagger-d The Sidecar can be configured to limit concurrent requests (`max_concurrent_requests`) and requests per second (`max_requests_per_second`) for the REST and admin servers. However, remember that those are application-level guards, meaning that the operating system already accepted the connection, which used up the operating system's resources. Limiting potential DDoS attacks requires consideration before the requests are directed to the Sidecar application. + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/USAGE.md b/USAGE.md index 38030f8a..66cb5dd0 100644 --- a/USAGE.md +++ b/USAGE.md @@ -1,4 +1,4 @@ -# Casper Event Sidecar USAGE +# Casper Sidecar USAGE This document describes how to consume events and perform queries using the Sidecar, covering the following topics: @@ -20,11 +20,13 @@ Events are emitted on two endpoints: For more information on various event types emitted by the node, visit the [Monitoring and Consuming Events](https://docs.casperlabs.io/developers/dapps/monitor-and-consume-events/#event-types) documentation. -### Monitoring the Sidecar Event Stream +### Monitoring the Sidecar event stream It is possible to monitor the Sidecar event stream using *cURL*, depending on how the HOST and PORT are configured. -```json +The Sidecar can connect to Casper nodes with versions greater or equal to `2.0.0`. + +```sh curl -s http:///events ``` @@ -33,27 +35,25 @@ curl -s http:///events Given this [example configuration](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml), here are the commands for each endpoint: - ```json - curl -sN http://127.0.0.1:19999/events - ``` +```sh +curl -sN http://127.0.0.1:19999/events +``` Also, the Sidecar exposes an endpoint for Sidecar-generated events: - ```json - curl -sN http://127.0.0.1:19999/events/sidecar - ``` - -### The API Version of Node Events +```sh +curl -sN http://127.0.0.1:19999/events/sidecar +``` -An `ApiVersion` event is always emitted when a new client connects to a node's SSE server, informing the client of the node's software version. +### Node events versioning -When a client connects to the Sidecar, the Sidecar displays the node’s API version, `ApiVersion`, which it receives from the node. Then, it starts streaming the events coming from the node. The `ApiVersion` may differ from the node’s build version. +An `ApiVersion` event is always emitted when the Sidecar connects to a node's SSE server, broadcasting the node's software version. Then, the Sidecar starts streaming the events coming from the node. Note that the `ApiVersion` may differ from the node’s build version. If the node goes offline, the `ApiVersion` may differ when it restarts (i.e., in the case of an upgrade). In this case, the Sidecar will report the new `ApiVersion` to its client. If the node’s `ApiVersion` has not changed, the Sidecar will not report the version again and will continue to stream messages that use the previous version. -Here is an example of what the API version would look like while listening on the Sidecar’s `TransactionAccepted` event stream: +Here is an example of what the API version would look like while listening on the Sidecar’s event stream. The colons represent "keep-alive" messages. -``` +```sh curl -sN http://127.0.0.1:19999/events data:{"ApiVersion":"2.0.0"} @@ -68,17 +68,13 @@ id:21821471 : ``` -#### Middleware Mode - -The Sidecar can connect simultaneously to nodes with different build versions, which send messages with different API versions. There is also the rare possibility of nodes changing API versions and not being in sync with other connected nodes. Although this situation would be rare, clients should be able to parse messages with different API versions. - ->**Note**: The Sidecar can connect to Casper nodes with versions greater or equal to `2.0.0`. +>**Note**: The Sidecar can connect simultaneously to nodes with different build versions, which send messages with different API versions. There is also the rare possibility of nodes changing API versions and not being in sync with other connected nodes. Although this situation would be rare, clients should be able to parse messages with different API versions. -### The Version of Sidecar Events +### Sidecar events versioning When a client connects to the `events/sidecar` endpoint, it will receive a message containing the version of the Sidecar software. Release version `1.1.0` would look like this: -``` +```sh curl -sN http://127.0.0.1:19999/events/sidecar data:{"SidecarVersion":"1.1.0"} @@ -86,20 +82,19 @@ data:{"SidecarVersion":"1.1.0"} : : - ``` Note that the SidecarVersion differs from the APIVersion emitted by the node event streams. You will also see the keep-alive messages as colons, ensuring the connection is active. -### The Node Shutdown Event +### The node's Shutdown event -When the node sends a Shutdown event and disconnects from the Sidecar, the Sidecar will report it as part of the event stream and on the `/events` endpoint. The Sidecar will continue to operate and attempt to reconnect to the node according to the `max_attempts` and `delay_between_retries_in_seconds` settings specified in its configuration. +When the node sends a Shutdown event and disconnects from the Sidecar, the Sidecar will report it as part of the event stream on the `/events` endpoint. The Sidecar will continue to operate and attempt to reconnect to the node according to the `max_attempts` and `delay_between_retries_in_seconds` settings specified in its configuration. The Sidecar does not expose Shutdown events via its REST API. Here is an example of how the stream might look like if the node went offline for an upgrade and came back online after a Shutdown event with a new `ApiVersion`: -``` +```sh curl -sN http://127.0.0.1:19999/events data:{"ApiVersion":"2.0.0"} @@ -126,18 +121,17 @@ id:3 : : - ``` Note that the Sidecar can emit another type of shutdown event on the `events/sidecar` endpoint, as described below. -### The Sidecar Shutdown Event +### The Sidecar Shutdown event If the Sidecar attempts to connect to a node that does not come back online within the maximum number of reconnection attempts, the Sidecar will start a controlled shutdown process. It will emit a Sidecar-specific Shutdown event on the [events/sidecar](#the-sidecar-shutdown-event) endpoint, designated for events originating solely from the Sidecar service. The other event streams do not get this message because they only emit messages from the node. -The message structure of the Sidecar shutdown event is the same as the [node shutdown event](#the-node-shutdown-event). The sidecar event stream would look like this: +The message structure of the Sidecar shutdown event is the same as the [node shutdown event](#the-node-shutdown-event). The Sidecar event stream would look like this: -``` +```sh curl -sN http://127.0.0.1:19999/events/sidecar data:{"SidecarVersion":"1.1.0"} @@ -152,11 +146,29 @@ data:"Shutdown" id:8 ``` +## Replaying the Event Stream + +This command will replay the event stream from an old event onward. The server will replay all the cached events if the ID is 0 or if you specify an event ID already purged from the node's cache. + +Replace the `HOST`, `PORT`, and `ID` fields with the values needed. + +```sh +curl -sN http://HOST:PORT/events?start_from=ID +``` + +**Example:** + +```sh +curl -sN http://65.21.235.219:9999/events?start_from=29267508 +``` + +Note that certain shells like `zsh` may require an escape character before the question mark. + ## The REST Server The Sidecar provides a RESTful endpoint for useful queries about the state of the network. -### Latest Block +### Latest block Retrieve information about the last block added to the linear chain. @@ -164,7 +176,7 @@ The path URL is `/block`. Example: -```json +```sh curl -s http://127.0.0.1:18888/block ``` @@ -178,7 +190,7 @@ curl -s http://127.0.0.1:18888/block

-### Block by Hash +### Block by hash Retrieve information about a block given its block hash. @@ -186,7 +198,7 @@ The path URL is `/block/`. Enter a valid block hash. Example: -```json +```sh curl -s http://127.0.0.1:18888/block/bd2e0c36150a74f50d9884e38a0955f8b1cba94821b9828c5f54d8929d6151bc ``` @@ -199,7 +211,7 @@ curl -s http://127.0.0.1:18888/block/bd2e0c36150a74f50d9884e38a0955f8b1cba94821b

-### Block by Height +### Block by chain height Retrieve information about a block, given a specific block height. @@ -207,7 +219,7 @@ The path URL is `/block/`. Enter a valid number represe Example: -```json +```sh curl -s http://127.0.0.1:18888/block/336460 ``` @@ -220,7 +232,7 @@ curl -s http://127.0.0.1:18888/block/336460

-### Transaction by Hash +### Transaction by hash Retrieve an aggregate of the various states a transaction goes through, given its transaction hash. The endpoint also needs the transaction type as an input (`deploy` or `version1`) The node does not emit this event, but the Sidecar computes it and returns it for the given transaction. This endpoint behaves differently than other endpoints, which return the raw event received from the node. @@ -230,7 +242,7 @@ The output differs depending on the transaction's status, which changes over tim Example: -```json +```sh curl -s http://127.0.0.1:18888//transaction/version1/3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a ``` @@ -240,7 +252,8 @@ The sample output below is for a transaction that was accepted but has yet to be Transaction accepted but not processed yet ```json -{"transaction_hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","transaction_accepted": {"header": {"api_version": "2.0.0","network_name": "casper-net-1"},"payload": {"transaction": {"Version1": {"hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","header": {"chain_name": "casper-net-1","timestamp": "2024-03-20T13:31:59.772Z","ttl": "30m","body_hash": "40c7476a175fb97656ec6da1ace2f1900a9d353f1637943a30edd5385494b345","pricing_mode": {"Fixed": {"gas_price_tolerance": 1000}},"initiator_addr": {"PublicKey": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973"}},"body": {"args": [],"target": {"Session": {"kind": "Standard","module_bytes":"","runtime": "VmCasperV1"}},"entry_point": {"Custom": "test"},"scheduling": "Standard"},"approvals": [{"signer": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973","signature": "0154fd295f5d4d62544f63d70470de28b2bf2cddecac2a237b6a2a78d25ee14b21ea2861d711a51f57b3f9f74e247a8d26861eceead6569f233949864a9d5fa100"}]}}}},"transaction_processed": ,"transaction_expired": false}``` +{"transaction_hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","transaction_accepted": {"header": {"api_version": "2.0.0","network_name": "casper-net-1"},"payload": {"transaction": {"Version1": {"hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","header": {"chain_name": "casper-net-1","timestamp": "2024-03-20T13:31:59.772Z","ttl": "30m","body_hash": "40c7476a175fb97656ec6da1ace2f1900a9d353f1637943a30edd5385494b345","pricing_mode": {"Fixed": {"gas_price_tolerance": 1000}},"initiator_addr": {"PublicKey": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973"}},"body": {"args": [],"target": {"Session": {"kind": "Standard","module_bytes":"","runtime": "VmCasperV1"}},"entry_point": {"Custom": "test"},"scheduling": "Standard"},"approvals": [{"signer": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973","signature": "0154fd295f5d4d62544f63d70470de28b2bf2cddecac2a237b6a2a78d25ee14b21ea2861d711a51f57b3f9f74e247a8d26861eceead6569f233949864a9d5fa100"}]}}}},"transaction_processed": ,"transaction_expired": false} +```

@@ -250,11 +263,13 @@ The next sample output is for a transaction that was accepted and processed. Transaction accepted and processed successfully ```json -{"transaction_hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","transaction_accepted": {"header": {"api_version": "2.0.0","network_name": "casper-net-1"},"payload": {"transaction": {"Version1": {"hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","header": {"chain_name": "casper-net-1","timestamp": "2024-03-20T13:31:59.772Z","ttl": "30m","body_hash": "40c7476a175fb97656ec6da1ace2f1900a9d353f1637943a30edd5385494b345","pricing_mode": {"Fixed": {"gas_price_tolerance": 1000}},"initiator_addr": {"PublicKey": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973"}},"body": {"args": [],"target": {"Session": {"kind": "Standard","module_bytes":"","runtime": "VmCasperV1"}},"entry_point": {"Custom": "test"},"scheduling": "Standard"},"approvals": [{"signer": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973","signature": "0154fd295f5d4d62544f63d70470de28b2bf2cddecac2a237b6a2a78d25ee14b21ea2861d711a51f57b3f9f74e247a8d26861eceead6569f233949864a9d5fa100"}]}}}},"transaction_processed": {"transaction_hash":{"Deploy":"c6907d46a5cc61ef30c66dbb6599208a57d3d62812c5f061169cdd7ad4e52597"},"initiator_addr":{"PublicKey":"0202dec9e70126ddd13af6e2e14771339c22f73626202a28ef1ed41594a3b2a79156"},"timestamp":"2024-03-20T13:58:57.301Z","ttl":"2m 53s","block_hash":"6c6a1fb17147fe467a52f8078e4c6d1143e8f61e2ec0c57938a0ac5f49e3f960","execution_result":{"Version1":{"Success":{"effect":{"operations":[{"key":"9192013132486795888","kind":"NoOp"}],"transforms":[{"key":"9278390014984155010","transform":{"AddUInt64":17967007786823421753}},{"key":"8284631679508534160","transform":{"AddUInt512":"13486131286369918968"}},{"key":"11406903664472624400","transform":{"AddKeys":[{"name":"5532223989822042950","key":"6376159234520705888"},{"name":"9797089120764120320","key":"3973583116099652644"},{"name":"17360643427404656075","key":"3412027808185329863"},{"name":"9849256366384177518","key":"1556404389498537987"},{"name":"14237913702817074429","key":"16416969798013966173"}]}},{"key":"11567235260771335457","transform":"Identity"},{"key":"13285707355579107355","transform":"Identity"}]},"transfers":[],"cost":"14667737366273622842"}}},"messages":[{"entity_addr":{"SmartContract":[193,43,184,185,6,88,15,83,243,107,130,63,136,174,24,148,79,214,87,238,171,138,195,141,119,235,134,196,253,221,36,0]},"message":{"String":"wLNta4zbpJiW5ScjagPXm5LoGViYApCfIbEXJycPUuLQP4fA7REhV4LdBRbZ7bQb"},"topic_name":"FdRRgbXEGS1xKEXCJKvaq7hVyZ2ZUlSb","topic_name_hash":"473f644238bbb334843df5bd06a85e8bc34d692cce804de5f97e7f344595c769","topic_index":4225483688,"block_index":16248749308130060594},{"entity_addr":{"Account":[109,75,111,241,219,141,104,160,197,208,7,245,112,199,31,150,68,65,166,247,43,111,0,56,32,124,7,36,107,230,100,132]},"message":{"String":"U5qR82wJoPDGJWhwJ4qkblsu6Q5DDqDt0Q2pAjhVOUjn520PdvYOC27oo4aDEosw"},"topic_name":"zMEkHxGgUUSMmb7eWJhFs5e6DH9vXvCg","topic_name_hash":"d911ebafb53ccfeaf5c970e462a864622ec4e3a1030a17a8cfaf4d7a4cd74d48","topic_index":560585407,"block_index":15889379229443860143}]},"transaction_expired": false}``` +{"transaction_hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","transaction_accepted": {"header": {"api_version": "2.0.0","network_name": "casper-net-1"},"payload": {"transaction": {"Version1": {"hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","header": {"chain_name": "casper-net-1","timestamp": "2024-03-20T13:31:59.772Z","ttl": "30m","body_hash": "40c7476a175fb97656ec6da1ace2f1900a9d353f1637943a30edd5385494b345","pricing_mode": {"Fixed": {"gas_price_tolerance": 1000}},"initiator_addr": {"PublicKey": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973"}},"body": {"args": [],"target": {"Session": {"kind": "Standard","module_bytes":"","runtime": "VmCasperV1"}},"entry_point": {"Custom": "test"},"scheduling": "Standard"},"approvals": [{"signer": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973","signature": "0154fd295f5d4d62544f63d70470de28b2bf2cddecac2a237b6a2a78d25ee14b21ea2861d711a51f57b3f9f74e247a8d26861eceead6569f233949864a9d5fa100"}]}}}},"transaction_processed": {"transaction_hash":{"Deploy":"c6907d46a5cc61ef30c66dbb6599208a57d3d62812c5f061169cdd7ad4e52597"},"initiator_addr":{"PublicKey":"0202dec9e70126ddd13af6e2e14771339c22f73626202a28ef1ed41594a3b2a79156"},"timestamp":"2024-03-20T13:58:57.301Z","ttl":"2m 53s","block_hash":"6c6a1fb17147fe467a52f8078e4c6d1143e8f61e2ec0c57938a0ac5f49e3f960","execution_result":{"Version1":{"Success":{"effect":{"operations":[{"key":"9192013132486795888","kind":"NoOp"}],"transforms":[{"key":"9278390014984155010","transform":{"AddUInt64":17967007786823421753}},{"key":"8284631679508534160","transform":{"AddUInt512":"13486131286369918968"}},{"key":"11406903664472624400","transform":{"AddKeys":[{"name":"5532223989822042950","key":"6376159234520705888"},{"name":"9797089120764120320","key":"3973583116099652644"},{"name":"17360643427404656075","key":"3412027808185329863"},{"name":"9849256366384177518","key":"1556404389498537987"},{"name":"14237913702817074429","key":"16416969798013966173"}]}},{"key":"11567235260771335457","transform":"Identity"},{"key":"13285707355579107355","transform":"Identity"}]},"transfers":[],"cost":"14667737366273622842"}}},"messages":[{"entity_addr":{"SmartContract":[193,43,184,185,6,88,15,83,243,107,130,63,136,174,24,148,79,214,87,238,171,138,195,141,119,235,134,196,253,221,36,0]},"message":{"String":"wLNta4zbpJiW5ScjagPXm5LoGViYApCfIbEXJycPUuLQP4fA7REhV4LdBRbZ7bQb"},"topic_name":"FdRRgbXEGS1xKEXCJKvaq7hVyZ2ZUlSb","topic_name_hash":"473f644238bbb334843df5bd06a85e8bc34d692cce804de5f97e7f344595c769","topic_index":4225483688,"block_index":16248749308130060594},{"entity_addr":{"Account":[109,75,111,241,219,141,104,160,197,208,7,245,112,199,31,150,68,65,166,247,43,111,0,56,32,124,7,36,107,230,100,132]},"message":{"String":"U5qR82wJoPDGJWhwJ4qkblsu6Q5DDqDt0Q2pAjhVOUjn520PdvYOC27oo4aDEosw"},"topic_name":"zMEkHxGgUUSMmb7eWJhFs5e6DH9vXvCg","topic_name_hash":"d911ebafb53ccfeaf5c970e462a864622ec4e3a1030a17a8cfaf4d7a4cd74d48","topic_index":560585407,"block_index":15889379229443860143}]},"transaction_expired": false} +``` +

-### Accepted Transaction by Hash +### Accepted transaction by hash Retrieve information about an accepted transaction, given its transaction hash. @@ -262,7 +277,7 @@ The path URL is `/transaction/accepted//
-### Expired Transaction by Hash +### Expired transaction by hash Retrieve information about a transaction that expired, given its trnasaction type and transaction hash. @@ -284,7 +299,7 @@ The path URL is `/transaction/expired// -### Processed Transaction by Hash +### Processed transaction by hash Retrieve information about a transaction that was processed, given its transaction hash. The path URL is `/transaction/expired/version1/`. Enter a valid transaction hash. Example: -```json +```sh curl -s http://127.0.0.1:18888/transaction/processed/version1/8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7 ``` @@ -317,29 +332,29 @@ curl -s http://127.0.0.1:18888/transaction/processed/version1/8204af872d7d19ef8d

-### Faults by Public Key +### Faults by public key Retrieve the faults associated with a validator's public key. The path URL is `/faults/`. Enter a valid hexadecimal representation of a validator's public key. Example: -```json +```sh curl -s http://127.0.0.1:18888/faults/01a601840126a0363a6048bfcbb0492ab5a313a1a19dc4c695650d8f3b51302703 ``` -### Faults by Era +### Faults by era Return the faults associated with an era, given a valid era identifier. The path URL is: `/faults/`. Enter an era identifier. Example: -```json +```sh curl -s http://127.0.0.1:18888/faults/2304 ``` -### Finality Signatures by Block +### Finality signatures by block Retrieve the finality signatures in a block, given its block hash. @@ -347,11 +362,11 @@ The path URL is: `/signatures/`. Enter a valid block hash Example: -```json +```sh curl -s http://127.0.0.1:18888/signatures/85aa2a939bc3a4afc6d953c965bab333bb5e53185b96bb07b52c295164046da2 ``` -### Step by Era +### Step by era Retrieve the step event emitted at the end of an era, given a valid era identifier. @@ -359,28 +374,28 @@ The path URL is: `/step/`. Enter a valid era identifier. Example: -```json +```sh curl -s http://127.0.0.1:18888/step/7268 ``` -### Missing Filter +### Missing filter If no filter URL was specified after the root address (HOST:PORT), an error message will be returned. Example: -```json +```sh curl http://127.0.0.1:18888 {"code":400,"message":"Invalid request path provided"} ``` -### Invalid Filter +### Invalid filter If an invalid filter was specified, an error message will be returned. Example: -```json +```sh curl http://127.0.0.1:18888/other {"code":400,"message":"Invalid request path provided"} ``` diff --git a/event_sidecar/src/database/writer_generator.rs b/event_sidecar/src/database/writer_generator.rs index d5f90973..bcdccb45 100644 --- a/event_sidecar/src/database/writer_generator.rs +++ b/event_sidecar/src/database/writer_generator.rs @@ -312,8 +312,7 @@ network_name: String, event_id: u32, event_source_address: String, api_version: String, -network_name: String, - + network_name: String, ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); diff --git a/event_sidecar/src/utils.rs b/event_sidecar/src/utils.rs index 9b4d3034..92866650 100644 --- a/event_sidecar/src/utils.rs +++ b/event_sidecar/src/utils.rs @@ -263,14 +263,14 @@ pub mod tests { config: TestingConfig, ) -> tokio::task::JoinHandle> { tokio::spawn(async move { unpack_test_config_and_run(config, true).await }) - // starting event sidecar + // starting the sidecar } pub async fn start_sidecar( config: TestingConfig, ) -> tokio::task::JoinHandle> { tokio::spawn(async move { unpack_test_config_and_run(config, false).await }) - // starting event sidecar + // starting the sidecar } pub fn build_test_config() -> (TestingConfig, TempDir, u16, u16, u16) { diff --git a/json_rpc/README.md b/json_rpc/README.md index 9b16ca2d..b0c5cc91 100644 --- a/json_rpc/README.md +++ b/json_rpc/README.md @@ -1,4 +1,4 @@ -# `casper-json-rpc` +# The `casper-json-rpc` Library [![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) @@ -7,16 +7,15 @@ [![Documentation](https://docs.rs/casper-node/badge.svg)](https://docs.rs/casper-json-rpc) [![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) -A library suitable for use as the framework for a JSON-RPC server. +The `casper-json-rpc` library described here can be used as the framework for a JSON-RPC server. # Usage -Normally usage will involve two steps: - * construct a set of request handlers using a - [`RequestHandlersBuilder`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/struct.RequestHandlersBuilder.html) - * call [`casper_json_rpc::route`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/fn.route.html) to construct a - boxed warp filter ready to be passed to [`warp::service`](https://docs.rs/warp/latest/warp/fn.service.html) for - example +Typical usage of this library involves two steps: + +* Construct a set of request handlers using a +[`RequestHandlersBuilder`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/struct.RequestHandlersBuilder.html). +* Call [`casper_json_rpc::route`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/fn.route.html) to construct a boxed warp filter ready to be passed to [`warp::service`](https://docs.rs/warp/latest/warp/fn.service.html). # Example @@ -61,15 +60,15 @@ async fn main() { } ``` -If this receives a request such as +The following is a sample request: -``` +```sh curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":"id","method":"get"}' http://127.0.0.1:3030/rpc ``` -then the server will respond with +Here is a sample response: -```json +```sh {"jsonrpc":"2.0","id":"id","result":"got it"} ``` @@ -77,13 +76,12 @@ then the server will respond with To return a JSON-RPC response indicating an error, use [`Error::new`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/struct.Error.html#method.new). Most error -conditions which require returning a reserved error are already handled in the provided warp filters. The only +conditions that require returning a reserved error are already handled in the provided warp filters. The only exception is -[`ReservedErrorCode::InvalidParams`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/enum.ReservedErrorCode.html#variant.InvalidParams) -which should be returned by any RPC handler which deems the provided `params: Option` to be invalid for any +[`ReservedErrorCode::InvalidParams`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/enum.ReservedErrorCode.html#variant.InvalidParams), which should be returned by any RPC handler that deems the provided `params: Option` to be invalid for any reason. -Generally a set of custom error codes should be provided. These should all implement +Generally, a set of custom error codes should be provided. These should all implement [`ErrorCodeT`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/trait.ErrorCodeT.html). ## Example custom error code diff --git a/resources/ETC_README.md b/resources/ETC_README.md index 216a53f0..9ac25ceb 100644 --- a/resources/ETC_README.md +++ b/resources/ETC_README.md @@ -1,237 +1,74 @@ -# Casper Event Sidecar README for Node Operators +# Casper Sidecar README for Node Operators -## Summary of Purpose +This page contains specific instructions for node operators. Before proceeding, familiarize yourself with the main [README](../README.md) file, which covers the following: + - [Summary of purpose](../README.md#summary-of-purpose) + - [System components and architecture](../README.md#system-components-and-architecture) + - [Configuration options](../README.md#configuring-the-sidecar) + - [Running and testing the Sidecar](../README.md#running-and-testing-the-sidecar) + - [Troubleshooting tips](../README.md#troubleshooting-tips) -The Casper Event Sidecar is an application that runs in tandem with the node process. This reduces the load on the node process by allowing subscribers to monitor the event stream through the Sidecar, while the node focuses entirely on the blockchain. Users needing access to the JSON-RPC will still need to query the node directly. -While the primary use case for the Sidecar application is running alongside the node on the same machine, it can be run remotely if necessary. - -### System Components & Architecture - -Casper Nodes offer a Node Event Stream API returning Server-Sent Events (SSEs) that hold JSON-encoded data. The SSE Sidecar uses this API to achieve the following goals: - -* Build a sidecar middleware service that reads the Event Stream of all connected nodes, acting as a passthrough and replicating the SSE interface of the connected nodes and their filters (i.e., `/main`, `/deploys`, and `/sigs` with support for the use of the `?start_from=` query to allow clients to get previously sent events from the Sidecar's buffer). - -* Provide a new RESTful endpoint that is discoverable to node operators. - -The SSE Sidecar uses one ring buffer for outbound events, providing some robustness against unintended subscriber disconnects. If a disconnected subscriber re-subscribes before the buffer moves past their last received event, there will be no gap in the event history if they use the `start_from` URL query. - - -## Configuration +## Configuring the Sidecar The file `/etc/casper-sidecar/config.toml` holds a default configuration. This should work if installed on a Casper node. If you install the Sidecar on an external server, you must update the `ip-address` values under `node_connections` appropriately. -### Node Connections - -The Sidecar can connect to Casper nodes with versions greater or equal to `2.0.0`. - -The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. - -``` -[[sse_server.connections]] -ip_address = "127.0.0.1" -sse_port = 9999 -rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true -connection_timeout_in_seconds = 3 -no_message_timeout_in_seconds = 60 -sleep_between_keep_alive_checks_in_seconds = 30 -``` - -* `ip_address` - The IP address of the node to monitor. -* `sse_port` - The node's event stream (SSE) port. This [example configuration](../resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `9999`. -* `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](../resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `8888`. -* `max_attempts` - The maximum number of attempts the Sidecar will make to connect to the node. If set to `0`, the Sidecar will not attempt to connect. -* `delay_between_retries_in_seconds` - The delay between attempts to connect to the node. -* `allow_partial_connection` - Determining whether the sidecar will allow a partial connection to this node. -* `enable_logging` - This enables logging of events from the node in question. -* `connection_timeout_in_seconds` - Number of seconds before the connection request times out. Parameter is optional, defaults to 5 -* `no_message_timeout_in_seconds` - Number of seconds after which the connection will be restarted if no bytes were received. Parameter is optional, defaults to 120 -* `sleep_between_keep_alive_checks_in_seconds` - Optional parameter specifying the time intervals (in seconds) for checking if the connection is still alive. Defaults to 60 - -Connecting to multiple nodes requires multiple `[[sse_server.connections]]` sections: - -``` -[[sse_server.connections]] -ip_address = "127.0.0.1" -sse_port = 9999 -rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true - -[[sse_server.connections]] -ip_address = "18.154.79.193" -sse_port = 1234 -rest_port = 3456 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true -``` - -### Storage +For more information, including how to setup the SSE, RPC, REST, and Admin servers, read the [configuration options](../README.md#configuring-the-sidecar) in the main README. -This directory stores the SSE cache and an SQLite database if the Sidecar is configured to use SQLite. -``` -[storage] -storage_path = "/var/lib/casper-sidecar" -``` - -### Database Connectivity - - - -The Sidecar can connect to different types of databases. The current options are `SQLite` or `PostgreSQL`. The following sections show how to configure the database connection for one of these DBs. Note that the Sidecar can only connect to one DB at a time. - -#### SQLite Database - -This section includes configurations for the SQLite database. - -``` -[storage.sqlite_config] -file_name = "sqlite_database.db3" -max_connections_in_pool = 100 -# https://www.sqlite.org/compile.html#default_wal_autocheckpoint -wal_autocheckpointing_interval = 1000 -``` - -* `file_name` - The database file path. -* `max_connections_in_pool` - The maximum number of connections to the database. (Should generally be left as is.) -* `wal_autocheckpointing_interval` - This controls how often the system commits pages to the database. The value determines the maximum number of pages before forcing a commit. More information can be found [here](https://www.sqlite.org/compile.html#default_wal_autocheckpoint). - -#### PostgreSQL Database - -The properties listed below are elements of the PostgreSQL database connection that can be configured for the Sidecar. - -* `database_name` - Name of the database. -* `host` - URL to PostgreSQL instance. -* `database_username` - Username. -* `database_password` - Database password. -* `max_connections_in_pool` - The maximum number of connections to the database. -* `port` - The port for the database connection. - - -To run the Sidecar with PostgreSQL, you can set the following database environment variables to control how the Sidecar connects to the database. This is the suggested method to set the connection information for the PostgreSQL database. - -``` -SIDECAR_POSTGRES_USERNAME="your username" -``` +## Installing the Sidecar on a Node -``` -SIDECAR_POSTGRES_PASSWORD="your password" -``` +The following command will install the Debian package for the Casper Sidecar service on various flavors of Linux. -``` -SIDECAR_POSTGRES_DATABASE_NAME="your database name" -``` + -``` -SIDECAR_POSTGRES_HOST="your host" +```bash +sudo apt install ./casper-sidecar_0.1.0-0_amd64.deb ``` -``` -SIDECAR_POSTGRES_MAX_CONNECTIONS="max connections" -``` +Check the service status: +```bash +systemctl status casper-sidecar ``` -SIDECAR_POSTGRES_PORT="port" -``` - -However, DB connectivity can also be configured using the Sidecar configuration file. - -If the DB environment variables and the Sidecar's configuration file have the same variable set, the DB environment variables will take precedence. -It is possible to completely omit the PostgreSQL configuration from the Sidecar's configuration file. In this case, the Sidecar will attempt to connect to the PostgreSQL using the database environment variables or use some default values for non-critical variables. +Check the logs and make sure the service is running as expected. +```bash +journalctl --no-pager -u casper-sidecar ``` -[storage.postgresql_config] -database_name = "event_sidecar" -host = "localhost" -database_password = "p@$$w0rd" -database_username = "postgres" -max_connections_in_pool = 30 -``` - -### REST & Event Stream Criteria - -This information determines outbound connection criteria for the Sidecar's `rest_server`. - +If you see any errors, you may need to [update the configuration](#configuring-the-service) and restart the service with the commands below. -``` -[rest_api_server] -port = 18888 -max_concurrent_requests = 50 -max_requests_per_second = 50 -request_timeout_in_seconds = 10 -``` +## Running the Sidecar on a Node -* `port` - The port for accessing the Sidecar's `rest_server`. `18888` is the default, but operators are free to choose their own port as needed. -* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be made to the REST server. -* `max_requests_per_second` - The maximum total number of requests that can be made per second. -* `request_timeout_in_seconds` - The total time before a request times out. +The `casper-sidecar` service starts after installation, using the systemd service file. -``` -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 -``` +### Stop -The `event_stream_server` section specifies a port for the Sidecar's event stream. +`sudo systemctl stop casper-sidecar.service` -Additionally, there are the following two options: +### Start -* `max_concurrent_subscribers` - The maximum number of subscribers that can monitor the Sidecar's event stream. -* `event_stream_buffer_length` - The number of events that the stream will hold in its buffer for reference when a subscriber reconnects. +`sudo systemctl start casper-sidecar.service` -### Admin Server - +## Sidecar Storage -This optional section configures the Sidecar's administrative REST server. If this section is not specified, the Sidecar will not start an admin server. +This directory stores the SSE cache and a database if the Sidecar was configured to use one. -``` -[admin_api_server] -port = 18887 -max_concurrent_requests = 1 -max_requests_per_second = 1 +```toml +[storage] +storage_path = "/var/lib/casper-sidecar" ``` -* `port` - The port for accessing the Sidecar's admin REST server. -* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be sent to the admin server. -* `max_requests_per_second` - The maximum total number of requests that can be sent per second to the admin server. - -Access the admin server at `http://localhost:18887/metrics/`. +The DB setup is described [here](../README#database-connectivity-setup). ## Swagger Documentation -Once the Sidecar is running, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. +If the Sidecar is running locally, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. ## OpenAPI Specification -An OpenAPI schema is available at `http://localhost:18888/api-doc.json/`. - -## Running the Event Sidecar - -The `casper-sidecar` service starts after installation, using the systemd service file. - -### Stop - -`sudo systemctl stop casper-sidecar.service` - -### Start - -`sudo systemctl start casper-sidecar.service` - -### Logs - -`journalctl --no-pager -u casper-sidecar` \ No newline at end of file +An OpenAPI schema is available at `http://localhost:18888/api-doc.json/`. \ No newline at end of file diff --git a/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml b/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml index 78f31211..e2d2bc9c 100644 --- a/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml +++ b/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml @@ -1,3 +1,34 @@ +[rpc_server.main_server] +enable_server = true +address = "0.0.0.0:11102" +qps_limit = 100 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.speculative_exec_server] +enable_server = true +address = "0.0.0.0:25102" +qps_limit = 1 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.node_client] +address = "0.0.0.0:28102" +max_message_size_bytes = 4194304 +request_limit = 3 +request_buffer_size = 16 +message_timeout_secs = 30 +client_access_timeout_secs = 2 + +[rpc_server.node_client.exponential_backoff] +initial_delay_ms = 1000 +max_delay_ms = 32000 +coefficient = 2 +max_attempts = 30 + +[sse_server] +enable_server = true + [[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18101 @@ -6,6 +37,8 @@ max_attempts = 10 delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 [[sse_server.connections]] ip_address = "127.0.0.1" @@ -15,6 +48,8 @@ max_attempts = 10 delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = false +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 [[sse_server.connections]] ip_address = "127.0.0.1" @@ -25,6 +60,8 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = false connection_timeout_in_seconds = 3 +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 [sse_server.event_stream_server] port = 19999 diff --git a/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml b/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml index 43a30918..57ff8908 100644 --- a/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml +++ b/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml @@ -1,3 +1,34 @@ +[rpc_server.main_server] +enable_server = true +address = "0.0.0.0:11102" +qps_limit = 100 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.speculative_exec_server] +enable_server = true +address = "0.0.0.0:25102" +qps_limit = 1 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.node_client] +address = "0.0.0.0:28102" +max_message_size_bytes = 4194304 +request_limit = 3 +request_buffer_size = 16 +message_timeout_secs = 30 +client_access_timeout_secs = 2 + +[rpc_server.node_client.exponential_backoff] +initial_delay_ms = 1000 +max_delay_ms = 32000 +coefficient = 2 +max_attempts = 30 + +[sse_server] +enable_server = true + [[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18101 @@ -6,6 +37,8 @@ max_attempts = 10 delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 [[sse_server.connections]] ip_address = "127.0.0.1" @@ -15,6 +48,8 @@ max_attempts = 10 delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = false +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 [[sse_server.connections]] ip_address = "127.0.0.1" @@ -25,6 +60,8 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = false connection_timeout_in_seconds = 3 +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 [sse_server.event_stream_server] port = 19999 @@ -45,3 +82,8 @@ max_connections_in_pool = 30 port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 + +[admin_api_server] +port = 18887 +max_concurrent_requests = 1 +max_requests_per_second = 1 \ No newline at end of file diff --git a/resources/example_configs/EXAMPLE_NODE_CONFIG.toml b/resources/example_configs/EXAMPLE_NODE_CONFIG.toml index f34bc350..e8a14648 100644 --- a/resources/example_configs/EXAMPLE_NODE_CONFIG.toml +++ b/resources/example_configs/EXAMPLE_NODE_CONFIG.toml @@ -1,29 +1,66 @@ +[rpc_server.main_server] +enable_server = true +address = "0.0.0.0:7777" +qps_limit = 100 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.speculative_exec_server] +enable_server = true +address = "0.0.0.0:7778" +qps_limit = 1 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.node_client] +address = "3.20.57.210:7777" +max_message_size_bytes = 4194304 +request_limit = 10 +request_buffer_size = 50 +message_timeout_secs = 60 +client_access_timeout_secs = 60 + +[rpc_server.node_client.exponential_backoff] +initial_delay_ms = 1000 +max_delay_ms = 32000 +coefficient = 2 +max_attempts = 30 + +[sse_server] +enable_server = true + [[sse_server.connections]] -ip_address = "127.0.0.1" +ip_address = "168.254.51.1" sse_port = 9999 rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true +max_attempts = 100 +delay_between_retries_in_seconds = 10 +allow_partial_connection = true +enable_logging = false +no_message_timeout_in_seconds = 20 +sleep_between_keep_alive_checks_in_seconds = 10 [[sse_server.connections]] ip_address = "168.254.51.2" sse_port = 9999 rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 +max_attempts = 100 +delay_between_retries_in_seconds = 10 allow_partial_connection = false -enable_logging = true +enable_logging = false +no_message_timeout_in_seconds = 20 +sleep_between_keep_alive_checks_in_seconds = 10 [[sse_server.connections]] ip_address = "168.254.51.3" sse_port = 9999 rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 +max_attempts = 100 +delay_between_retries_in_seconds = 10 allow_partial_connection = false -enable_logging = true +enable_logging = false +no_message_timeout_in_seconds = 20 +sleep_between_keep_alive_checks_in_seconds = 10 [sse_server.event_stream_server] port = 19999 diff --git a/resources/test/rpc_schema.json b/resources/test/rpc_schema.json index a8d13a23..0e384132 100644 --- a/resources/test/rpc_schema.json +++ b/resources/test/rpc_schema.json @@ -222,7 +222,7 @@ ], "target": "Native", "entry_point": "Transfer", - "transaction_kind": 0, + "transaction_category": 0, "scheduling": "Standard" }, "approvals": [ @@ -557,7 +557,7 @@ ], "target": "Native", "entry_point": "Transfer", - "transaction_kind": 0, + "transaction_category": 0, "scheduling": "Standard" }, "approvals": [ @@ -1513,6 +1513,106 @@ } ] }, + { + "name": "info_get_reward", + "summary": "returns the reward for a given era and a validator or a delegator", + "params": [ + { + "name": "validator", + "schema": { + "description": "The public key of the validator.", + "$ref": "#/components/schemas/PublicKey" + }, + "required": true + }, + { + "name": "era_identifier", + "schema": { + "description": "The era identifier. If `None`, the last finalized era is used.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + }, + { + "name": "delegator", + "schema": { + "description": "The public key of the delegator. If `Some`, the rewards for the delegator are returned. If `None`, the rewards for the validator are returned.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "info_get_reward_result", + "schema": { + "description": "Result for \"info_get_reward\" RPC response.", + "type": "object", + "required": [ + "api_version", + "era_id", + "reward_amount" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "reward_amount": { + "description": "The total reward amount in the requested era.", + "$ref": "#/components/schemas/U512" + }, + "era_id": { + "description": "The era for which the reward was calculated.", + "$ref": "#/components/schemas/EraId" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_reward_example", + "params": [ + { + "name": "era_identifier", + "value": { + "Era": 1 + } + }, + { + "name": "validator", + "value": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + { + "name": "delegator", + "value": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + } + ], + "result": { + "name": "info_get_reward_example_result", + "value": { + "api_version": "2.0.0", + "reward_amount": "42", + "era_id": 1 + } + } + } + ] + }, { "name": "info_get_validator_changes", "summary": "returns status changes of active validators", @@ -3159,7 +3259,7 @@ "additionalProperties": false }, { - "description": "The cost of the transaction is determined by the cost table, per the transaction kind.", + "description": "The cost of the transaction is determined by the cost table, per the transaction category.", "type": "object", "required": [ "Fixed" @@ -3255,7 +3355,7 @@ "entry_point", "scheduling", "target", - "transaction_kind" + "transaction_category" ], "properties": { "args": { @@ -3267,7 +3367,7 @@ "entry_point": { "$ref": "#/components/schemas/TransactionEntryPoint" }, - "transaction_kind": { + "transaction_category": { "type": "integer", "format": "uint8", "minimum": 0.0 @@ -3334,19 +3434,10 @@ "Session": { "type": "object", "required": [ - "kind", "module_bytes", "runtime" ], "properties": { - "kind": { - "description": "The kind of session.", - "allOf": [ - { - "$ref": "#/components/schemas/TransactionSessionKind" - } - ] - }, "module_bytes": { "description": "The compiled Wasm.", "allOf": [ @@ -3485,39 +3576,6 @@ } ] }, - "TransactionSessionKind": { - "description": "Session kind of a Transaction.", - "oneOf": [ - { - "description": "A standard (non-special-case) session.\n\nThis kind of session is not allowed to install or upgrade a stored contract, but can call stored contracts.", - "type": "string", - "enum": [ - "Standard" - ] - }, - { - "description": "A session which installs a stored contract.", - "type": "string", - "enum": [ - "Installer" - ] - }, - { - "description": "A session which upgrades a previously-installed stored contract. Such a session must have \"package_id: PackageIdentifier\" runtime arg present.", - "type": "string", - "enum": [ - "Upgrader" - ] - }, - { - "description": "A session which doesn't call any stored contracts.\n\nThis kind of session is not allowed to install or upgrade a stored contract.", - "type": "string", - "enum": [ - "Isolated" - ] - } - ] - }, "TransactionEntryPoint": { "description": "Entry point of a Transaction.", "oneOf": [ @@ -4872,6 +4930,8 @@ "bonding_purse", "delegation_rate", "inactive", + "maximum_delegation_amount", + "minimum_delegation_amount", "staked_amount", "validator_public_key" ], @@ -4920,6 +4980,18 @@ "inactive": { "description": "`true` if validator has been \"evicted\"", "type": "boolean" + }, + "minimum_delegation_amount": { + "description": "Minimum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "maximum_delegation_amount": { + "description": "Maximum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 } }, "additionalProperties": false @@ -7784,6 +7856,35 @@ }, "additionalProperties": false }, + "EraIdentifier": { + "description": "Identifier for an era.", + "oneOf": [ + { + "type": "object", + "required": [ + "Era" + ], + "properties": { + "Era": { + "$ref": "#/components/schemas/EraId" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "Block" + ], + "properties": { + "Block": { + "$ref": "#/components/schemas/BlockIdentifier" + } + }, + "additionalProperties": false + } + ] + }, "JsonValidatorChanges": { "description": "The changes in a validator's status.", "type": "object", diff --git a/resources/test/speculative_rpc_schema.json b/resources/test/speculative_rpc_schema.json index a35dbdb0..391d0eac 100644 --- a/resources/test/speculative_rpc_schema.json +++ b/resources/test/speculative_rpc_schema.json @@ -230,7 +230,7 @@ ], "target": "Native", "entry_point": "Transfer", - "transaction_kind": 0, + "transaction_category": 0, "scheduling": "Standard" }, "approvals": [ @@ -2947,6 +2947,8 @@ "bonding_purse", "delegation_rate", "inactive", + "maximum_delegation_amount", + "minimum_delegation_amount", "staked_amount", "validator_public_key" ], @@ -2995,6 +2997,18 @@ "inactive": { "description": "`true` if validator has been \"evicted\"", "type": "boolean" + }, + "minimum_delegation_amount": { + "description": "Minimum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "maximum_delegation_amount": { + "description": "Maximum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 } }, "additionalProperties": false @@ -3763,7 +3777,7 @@ "additionalProperties": false }, { - "description": "The cost of the transaction is determined by the cost table, per the transaction kind.", + "description": "The cost of the transaction is determined by the cost table, per the transaction category.", "type": "object", "required": [ "Fixed" @@ -3824,7 +3838,7 @@ "entry_point", "scheduling", "target", - "transaction_kind" + "transaction_category" ], "properties": { "args": { @@ -3836,7 +3850,7 @@ "entry_point": { "$ref": "#/components/schemas/TransactionEntryPoint" }, - "transaction_kind": { + "transaction_category": { "type": "integer", "format": "uint8", "minimum": 0.0 @@ -3903,19 +3917,10 @@ "Session": { "type": "object", "required": [ - "kind", "module_bytes", "runtime" ], "properties": { - "kind": { - "description": "The kind of session.", - "allOf": [ - { - "$ref": "#/components/schemas/TransactionSessionKind" - } - ] - }, "module_bytes": { "description": "The compiled Wasm.", "allOf": [ @@ -4035,39 +4040,6 @@ } ] }, - "TransactionSessionKind": { - "description": "Session kind of a Transaction.", - "oneOf": [ - { - "description": "A standard (non-special-case) session.\n\nThis kind of session is not allowed to install or upgrade a stored contract, but can call stored contracts.", - "type": "string", - "enum": [ - "Standard" - ] - }, - { - "description": "A session which installs a stored contract.", - "type": "string", - "enum": [ - "Installer" - ] - }, - { - "description": "A session which upgrades a previously-installed stored contract. Such a session must have \"package_id: PackageIdentifier\" runtime arg present.", - "type": "string", - "enum": [ - "Upgrader" - ] - }, - { - "description": "A session which doesn't call any stored contracts.\n\nThis kind of session is not allowed to install or upgrade a stored contract.", - "type": "string", - "enum": [ - "Isolated" - ] - } - ] - }, "TransactionEntryPoint": { "description": "Entry point of a Transaction.", "oneOf": [ diff --git a/rpc_sidecar/README.md b/rpc_sidecar/README.md index bc7ffcdd..423d4c93 100644 --- a/rpc_sidecar/README.md +++ b/rpc_sidecar/README.md @@ -8,24 +8,23 @@ ## Synopsis -The Casper Event Sidecar is a process that connects to the RPC port of a Casper node and exposes a JSON-RPC interface for interacting with that node. The RPC protocol allows for basic operations like querying global state, sending transactions and deploys, etc. All of the RPC methods are documented [here](https://docs.casper.network/developers/json-rpc/). +The Casper Sidecar provides connectivity to the binary port of a Casper node (among [other capabilities](../README.md#system-components-and-architecture)), exposing a JSON-RPC interface for interacting with that node. The RPC protocol allows for basic operations like querying global state, sending transactions and deploys, etc. All of the available RPC methods are documented [here](https://docs.casper.network/developers/json-rpc/). ## Protocol -The sidecar maintains a TCP connection with the node and communicates using a custom binary protocol built on top of [Juliet](https://github.com/casper-network/juliet). The protocol uses a request-response model where the sidecar sends simple self-contained requests and the node responds to them. The requests can be split into these main categories: -- read requests - - queries for transient in-memory information like the - current block height, peer list, component status etc. - - queries for database items, with both the database and the key - always being explicitly specified by the sidecar -- execute transaction requests - - request to submit a transaction for execution - - request to speculatively execute a transaction + +The Sidecar maintains a TCP connection with the node and communicates using a custom binary protocol, which uses a request-response model. The Sidecar sends simple self-contained requests and the node responds to them. The requests can be split into these main categories: +- Read requests + - Queries for transient in-memory information like the current block height, peer list, component status etc. + - Queries for database items, with both the database and the key always being explicitly specified by the sidecar +- Transaction requests + - Requests to submit transactions for execution + - Requests to speculatively execute a transactions ## Discovering the JSON RPC API -Once running, the Sidecar can be queried for its JSON RPC API using the `rpc.discover` method, as shown below. The result will be a list of RPC methods and their parameters. +Once setup and running as described [here](../README.md), the Sidecar can be queried for its JSON-RPC API using the `rpc.discover` method, as shown below. The result will be a list of RPC methods and their parameters. -```bash +```sh curl -X POST http://localhost:/rpc -H 'Content-Type: application/json' -d '{"jsonrpc": "2.0", "method": "rpc.discover", "id": 1}' ``` diff --git a/rpc_sidecar/src/http_server.rs b/rpc_sidecar/src/http_server.rs index 4ceb9ed2..43f93bcf 100644 --- a/rpc_sidecar/src/http_server.rs +++ b/rpc_sidecar/src/http_server.rs @@ -6,7 +6,7 @@ use casper_json_rpc::{CorsOrigin, RequestHandlersBuilder}; use crate::{ rpcs::{ - info::{GetPeers, GetStatus, GetTransaction}, + info::{GetPeers, GetReward, GetStatus, GetTransaction}, state::{GetAddressableEntity, QueryBalanceDetails}, }, NodeClient, @@ -54,6 +54,7 @@ pub async fn run( GetTransaction::register_as_handler(node.clone(), &mut handlers); GetPeers::register_as_handler(node.clone(), &mut handlers); GetStatus::register_as_handler(node.clone(), &mut handlers); + GetReward::register_as_handler(node.clone(), &mut handlers); GetEraInfoBySwitchBlock::register_as_handler(node.clone(), &mut handlers); GetEraSummary::register_as_handler(node.clone(), &mut handlers); GetAuctionInfo::register_as_handler(node.clone(), &mut handlers); diff --git a/rpc_sidecar/src/lib.rs b/rpc_sidecar/src/lib.rs index ed81d0d4..870d1625 100644 --- a/rpc_sidecar/src/lib.rs +++ b/rpc_sidecar/src/lib.rs @@ -8,7 +8,9 @@ mod speculative_exec_server; pub mod testing; use anyhow::Error; -use casper_types::ProtocolVersion; +use casper_binary_port::{BinaryRequest, BinaryRequestHeader}; +use casper_types::bytesrepr::ToBytes; +use casper_types::{bytesrepr, ProtocolVersion}; pub use config::{FieldParseError, RpcServerConfig, RpcServerConfigTarget}; pub use config::{NodeClientConfig, RpcConfig}; use futures::future::BoxFuture; @@ -116,6 +118,14 @@ fn resolve_address(address: &str) -> anyhow::Result { .ok_or_else(|| anyhow::anyhow!("failed to resolve address")) } +fn encode_request(req: &BinaryRequest, id: u16) -> Result, bytesrepr::Error> { + let header = BinaryRequestHeader::new(SUPPORTED_PROTOCOL_VERSION, req.tag(), id); + let mut bytes = Vec::with_capacity(header.serialized_length() + req.serialized_length()); + header.write_bytes(&mut bytes)?; + req.write_bytes(&mut bytes)?; + Ok(bytes) +} + #[cfg(test)] mod tests { use std::fs; diff --git a/rpc_sidecar/src/node_client.rs b/rpc_sidecar/src/node_client.rs index 27503408..6bdfc581 100644 --- a/rpc_sidecar/src/node_client.rs +++ b/rpc_sidecar/src/node_client.rs @@ -1,4 +1,4 @@ -use crate::{NodeClientConfig, SUPPORTED_PROTOCOL_VERSION}; +use crate::{encode_request, NodeClientConfig, SUPPORTED_PROTOCOL_VERSION}; use anyhow::Error as AnyhowError; use async_trait::async_trait; use futures::{Future, SinkExt, StreamExt}; @@ -6,23 +6,27 @@ use metrics::rpc::{inc_disconnect, observe_reconnect_time}; use serde::de::DeserializeOwned; use std::{ convert::{TryFrom, TryInto}, - sync::Arc, + sync::{ + atomic::{AtomicU16, Ordering}, + Arc, + }, time::Duration, }; use tokio_util::codec::Framed; use casper_binary_port::{ - BalanceResponse, BinaryMessage, BinaryMessageCodec, BinaryRequest, BinaryRequestHeader, - BinaryResponse, BinaryResponseAndRequest, ConsensusValidatorChanges, DictionaryItemIdentifier, - DictionaryQueryResult, ErrorCode, GetRequest, GetTrieFullResult, GlobalStateQueryResult, - GlobalStateRequest, InformationRequest, KeyPrefix, NodeStatus, PayloadEntity, PurseIdentifier, - RecordId, SpeculativeExecutionResult, TransactionWithExecutionInfo, + BalanceResponse, BinaryMessage, BinaryMessageCodec, BinaryRequest, BinaryResponse, + BinaryResponseAndRequest, ConsensusValidatorChanges, DictionaryItemIdentifier, + DictionaryQueryResult, EraIdentifier, ErrorCode, GetRequest, GetTrieFullResult, + GlobalStateQueryResult, GlobalStateRequest, InformationRequest, KeyPrefix, NodeStatus, + PayloadEntity, PurseIdentifier, RecordId, RewardResponse, SpeculativeExecutionResult, + TransactionWithExecutionInfo, }; use casper_types::{ bytesrepr::{self, FromBytes, ToBytes}, AvailableBlockRange, BlockHash, BlockHeader, BlockIdentifier, ChainspecRawBytes, Digest, - GlobalStateIdentifier, Key, KeyTag, Peers, ProtocolVersion, SignedBlock, StoredValue, - Transaction, TransactionHash, Transfer, + GlobalStateIdentifier, Key, KeyTag, Peers, ProtocolVersion, PublicKey, SignedBlock, + StoredValue, Transaction, TransactionHash, Transfer, }; use std::{ fmt::{self, Display, Formatter}, @@ -34,6 +38,12 @@ use tokio::{ }; use tracing::{error, field, info, warn}; +const MAX_MISMATCHED_ID_RETRIES: u8 = 100; +#[cfg(not(test))] +const INITIAL_REQUEST_ID: u16 = 0; +#[cfg(test)] +const INITIAL_REQUEST_ID: u16 = 1; + #[async_trait] pub trait NodeClient: Send + Sync { async fn send_request(&self, req: BinaryRequest) -> Result; @@ -238,6 +248,24 @@ pub trait NodeClient: Send + Sync { let resp = self.read_info(InformationRequest::NodeStatus).await?; parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) } + + async fn read_reward( + &self, + era_identifier: Option, + validator: PublicKey, + delegator: Option, + ) -> Result, Error> { + let validator = validator.into(); + let delegator = delegator.map(Into::into); + let resp = self + .read_info(InformationRequest::Reward { + era_identifier, + validator, + delegator, + }) + .await?; + parse_response::(&resp.into()) + } } #[derive(Debug, thiserror::Error, PartialEq, Eq)] @@ -475,6 +503,12 @@ impl From for InvalidTransactionOrDeploy { pub enum Error { #[error("request error: {0}")] RequestFailed(String), + #[error("request id mismatch: expected {expected}, got {got}")] + RequestResponseIdMismatch { expected: u16, got: u16 }, + #[error("failed to get a response with correct id {max} times, giving up")] + TooManyMismatchedResponses { max: u8 }, + #[error("failed to deserialize the original request provided with the response: {0}")] + OriginalRequestDeserialization(String), #[error("failed to deserialize the envelope of a response: {0}")] EnvelopeDeserialization(String), #[error("failed to deserialize a response: {0}")] @@ -497,18 +531,27 @@ pub enum Error { InvalidTransaction(InvalidTransactionOrDeploy), #[error("speculative execution has failed: {0}")] SpecExecutionFailed(String), + #[error("the switch block for the requested era was not found")] + SwitchBlockNotFound, + #[error("the parent of the switch block for the requested era was not found")] + SwitchBlockParentNotFound, + #[error("cannot serve rewards stored in V1 format")] + UnsupportedRewardsV1Request, #[error("received a response with an unsupported protocol version: {0}")] UnsupportedProtocolVersion(ProtocolVersion), #[error("received an unexpected node error: {message} ({code})")] - UnexpectedNodeError { message: String, code: u8 }, + UnexpectedNodeError { message: String, code: u16 }, } impl Error { - fn from_error_code(code: u8) -> Self { + fn from_error_code(code: u16) -> Self { match ErrorCode::try_from(code) { Ok(ErrorCode::FunctionDisabled) => Self::FunctionIsDisabled, Ok(ErrorCode::RootNotFound) => Self::UnknownStateRootHash, Ok(ErrorCode::FailedQuery) => Self::QueryFailedToExecute, + Ok(ErrorCode::SwitchBlockNotFound) => Self::SwitchBlockNotFound, + Ok(ErrorCode::SwitchBlockParentNotFound) => Self::SwitchBlockParentNotFound, + Ok(ErrorCode::UnsupportedRewardsV1Request) => Self::UnsupportedRewardsV1Request, Ok( err @ (ErrorCode::InvalidDeployChainName | ErrorCode::InvalidDeployDependenciesNoLongerSupported @@ -602,6 +645,7 @@ pub struct FramedNodeClient { shutdown: Arc>, config: NodeClientConfig, request_limit: Semaphore, + current_request_id: AtomicU16, } impl FramedNodeClient { @@ -626,11 +670,16 @@ impl FramedNodeClient { reconnect, shutdown, config, + current_request_id: AtomicU16::new(INITIAL_REQUEST_ID), }, reconnect_loop, )) } + fn next_id(&self) -> u16 { + self.current_request_id.fetch_add(1, Ordering::Relaxed) + } + async fn reconnect_loop( config: NodeClientConfig, client: Arc>>, @@ -657,8 +706,7 @@ impl FramedNodeClient { req: BinaryRequest, client: &mut RwLockWriteGuard<'_, Framed>, ) -> Result { - let payload = - BinaryMessage::new(encode_request(&req).expect("should always serialize a request")); + let (request_id, payload) = self.generate_payload(req); if let Err(err) = tokio::time::timeout( Duration::from_secs(self.config.message_timeout_secs), @@ -670,26 +718,52 @@ impl FramedNodeClient { return Err(Error::RequestFailed(err.to_string())); }; - let Ok(maybe_response) = tokio::time::timeout( - Duration::from_secs(self.config.message_timeout_secs), - client.next(), - ) - .await - else { - return Err(Error::RequestFailed("timeout".to_owned())); - }; - - if let Some(response) = maybe_response { - let resp = bytesrepr::deserialize_from_slice( - response - .map_err(|err| Error::RequestFailed(err.to_string()))? - .payload(), + for _ in 0..MAX_MISMATCHED_ID_RETRIES { + let Ok(maybe_response) = tokio::time::timeout( + Duration::from_secs(self.config.message_timeout_secs), + client.next(), ) - .map_err(|err| Error::EnvelopeDeserialization(err.to_string()))?; - handle_response(resp, &self.shutdown) - } else { - Err(Error::RequestFailed("disconnected".to_owned())) + .await + else { + return Err(Error::RequestFailed("timeout".to_owned())); + }; + + if let Some(response) = maybe_response { + let resp = bytesrepr::deserialize_from_slice( + response + .map_err(|err| Error::RequestFailed(err.to_string()))? + .payload(), + ) + .map_err(|err| Error::EnvelopeDeserialization(err.to_string()))?; + match validate_response(resp, request_id, &self.shutdown) { + Ok(response) => return Ok(response), + Err(err) if matches!(err, Error::RequestResponseIdMismatch { expected, got } if expected > got) => + { + // If our expected ID is greater than the one we received, it means we can + // try to recover from the situation by reading more responses from the stream. + warn!(%err, "received a response with an outdated id, trying another response"); + continue; + } + Err(err) => return Err(err), + } + } else { + return Err(Error::RequestFailed("disconnected".to_owned())); + } } + + Err(Error::TooManyMismatchedResponses { + max: MAX_MISMATCHED_ID_RETRIES, + }) + } + + fn generate_payload(&self, req: BinaryRequest) -> (u16, BinaryMessage) { + let next_id = self.next_id(); + ( + next_id, + BinaryMessage::new( + encode_request(&req, next_id).expect("should always serialize a request"), + ), + ) } async fn connect_with_retries( @@ -775,12 +849,20 @@ impl NodeClient for FramedNodeClient { } } -fn handle_response( +fn validate_response( resp: BinaryResponseAndRequest, + expected_id: u16, shutdown: &Notify, ) -> Result { - let version = resp.response().protocol_version(); + let original_id = resp.original_request_id(); + if original_id != expected_id { + return Err(Error::RequestResponseIdMismatch { + expected: expected_id, + got: original_id, + }); + } + let version = resp.response().protocol_version(); if version.is_compatible_with(&SUPPORTED_PROTOCOL_VERSION) { Ok(resp) } else { @@ -790,14 +872,6 @@ fn handle_response( } } -fn encode_request(req: &BinaryRequest) -> Result, bytesrepr::Error> { - let header = BinaryRequestHeader::new(SUPPORTED_PROTOCOL_VERSION, req.tag()); - let mut bytes = Vec::with_capacity(header.serialized_length() + req.serialized_length()); - header.write_bytes(&mut bytes)?; - req.write_bytes(&mut bytes)?; - Ok(bytes) -} - fn parse_response(resp: &BinaryResponse) -> Result, Error> where A: FromBytes + PayloadEntity, @@ -872,9 +946,13 @@ where #[cfg(test)] mod tests { - use crate::testing::{get_port, start_mock_binary_port_responding_with_stored_value}; + use crate::testing::{ + get_dummy_request, get_dummy_request_payload, get_port, start_mock_binary_port, + start_mock_binary_port_responding_with_stored_value, + }; use super::*; + use casper_binary_port::BinaryRequestHeader; use casper_types::testing::TestRng; use casper_types::{CLValue, SemVer}; use futures::FutureExt; @@ -885,11 +963,15 @@ mod tests { let notify = Notify::::new(); let bad_version = ProtocolVersion::from_parts(10, 0, 0); - let result = handle_response( + let request = get_dummy_request_payload(None); + + let result = validate_response( BinaryResponseAndRequest::new( BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, bad_version), - &[], + &request, + 0, ), + 0, ¬ify, ); @@ -905,11 +987,15 @@ mod tests { ..SUPPORTED_PROTOCOL_VERSION.value() }); - let result = handle_response( + let request = get_dummy_request_payload(None); + + let result = validate_response( BinaryResponseAndRequest::new( BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), - &[], + &request, + 0, ), + 0, ¬ify, ); @@ -917,7 +1003,8 @@ mod tests { result, Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), - &[], + &request, + 0 )) ); assert_eq!(notify.notified().now_or_never(), None) @@ -931,11 +1018,15 @@ mod tests { ..SUPPORTED_PROTOCOL_VERSION.value() }); - let result = handle_response( + let request = get_dummy_request_payload(None); + + let result = validate_response( BinaryResponseAndRequest::new( BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), - &[], + &request, + 0, ), + 0, ¬ify, ); @@ -943,7 +1034,8 @@ mod tests { result, Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), - &[], + &request, + 0 )) ); assert_eq!(notify.notified().now_or_never(), None) @@ -966,8 +1058,13 @@ mod tests { let port = get_port(); let mut rng = TestRng::new(); let shutdown = Arc::new(tokio::sync::Notify::new()); - let _mock_server_handle = - start_mock_binary_port_responding_with_stored_value(port, Arc::clone(&shutdown)).await; + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID), + None, + Arc::clone(&shutdown), + ) + .await; let config = NodeClientConfig::new_with_port_and_retries(port, 2); let (c, _) = FramedNodeClient::new(config).await.unwrap(); @@ -985,9 +1082,13 @@ mod tests { let shutdown = Arc::new(tokio::sync::Notify::new()); tokio::spawn(async move { sleep(Duration::from_secs(5)).await; - let _mock_server_handle = - start_mock_binary_port_responding_with_stored_value(port, Arc::clone(&shutdown)) - .await; + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID), + None, + Arc::clone(&shutdown), + ) + .await; }); let config = NodeClientConfig::new_with_port_and_retries(port, 5); let (client, _) = FramedNodeClient::new(config).await.unwrap(); @@ -1021,12 +1122,18 @@ mod tests { let port = get_port(); let mut rng = TestRng::new(); let shutdown = Arc::new(tokio::sync::Notify::new()); - let mock_server_handle = - start_mock_binary_port_responding_with_stored_value(port, Arc::clone(&shutdown)).await; + let mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID), + None, + Arc::clone(&shutdown), + ) + .await; let config = NodeClientConfig::new_with_port(port); let (c, reconnect_loop) = FramedNodeClient::new(config).await.unwrap(); let scenario = async { + // Request id = 0 assert!(query_global_state_for_string_value(&mut rng, &c) .await .is_ok()); @@ -1034,6 +1141,7 @@ mod tests { shutdown.notify_one(); let _ = mock_server_handle.await; + // Request id = 1 let err = query_global_state_for_string_value(&mut rng, &c) .await .unwrap_err(); @@ -1042,12 +1150,17 @@ mod tests { Error::RequestFailed(e) if e == "disconnected" )); - let _mock_server_handle = - start_mock_binary_port_responding_with_stored_value(port, Arc::clone(&shutdown)) - .await; + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID + 2), + None, + Arc::clone(&shutdown), + ) + .await; tokio::time::sleep(Duration::from_secs(2)).await; + // Request id = 2 assert!(query_global_state_for_string_value(&mut rng, &c) .await .is_ok()); @@ -1058,4 +1171,124 @@ mod tests { _ = reconnect_loop => panic!("reconnect loop should not exit"), } } + + #[tokio::test] + async fn should_generate_payload_with_incrementing_id() { + let port = get_port(); + let config = NodeClientConfig::new_with_port(port); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = + start_mock_binary_port(port, vec![], 1, Arc::clone(&shutdown)).await; + let (c, _) = FramedNodeClient::new(config).await.unwrap(); + + let generated_ids: Vec<_> = (INITIAL_REQUEST_ID..INITIAL_REQUEST_ID + 10) + .map(|_| { + let (_, binary_message) = c.generate_payload(get_dummy_request()); + let header = BinaryRequestHeader::from_bytes(binary_message.payload()) + .unwrap() + .0; + header.id() + }) + .collect(); + + assert_eq!( + generated_ids, + (INITIAL_REQUEST_ID..INITIAL_REQUEST_ID + 10).collect::>() + ); + } + + #[test] + fn should_reject_mismatched_request_id() { + let notify = Notify::::new(); + + let expected_id = 1; + let actual_id = 2; + + let req = get_dummy_request_payload(Some(actual_id)); + let resp = BinaryResponse::new_empty(ProtocolVersion::V2_0_0); + let resp_and_req = BinaryResponseAndRequest::new(resp, &req, actual_id); + + let result = validate_response(resp_and_req, expected_id, ¬ify); + assert!(matches!( + result, + Err(Error::RequestResponseIdMismatch { expected, got }) if expected == 1 && got == 2 + )); + + let expected_id = 2; + let actual_id = 1; + + let req = get_dummy_request_payload(Some(actual_id)); + let resp = BinaryResponse::new_empty(ProtocolVersion::V2_0_0); + let resp_and_req = BinaryResponseAndRequest::new(resp, &req, actual_id); + + let result = validate_response(resp_and_req, expected_id, ¬ify); + assert!(matches!( + result, + Err(Error::RequestResponseIdMismatch { expected, got }) if expected == 2 && got == 1 + )); + } + + #[test] + fn should_accept_matching_request_id() { + let notify = Notify::::new(); + + let expected_id = 1; + let actual_id = 1; + + let req = get_dummy_request_payload(Some(actual_id)); + let resp = BinaryResponse::new_empty(ProtocolVersion::V2_0_0); + let resp_and_req = BinaryResponseAndRequest::new(resp, &req, actual_id); + + let result = validate_response(resp_and_req, expected_id, ¬ify); + dbg!(&result); + assert!(result.is_ok()) + } + + #[tokio::test] + async fn should_keep_retrying_to_get_response_up_to_the_limit() { + const LIMIT: u8 = MAX_MISMATCHED_ID_RETRIES - 1; + + let port = get_port(); + let mut rng = TestRng::new(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(0), + Some(LIMIT), + Arc::clone(&shutdown), + ) + .await; + let config = NodeClientConfig::new_with_port_and_retries(port, 2); + let (c, _) = FramedNodeClient::new(config).await.unwrap(); + + let res = query_global_state_for_string_value(&mut rng, &c) + .await + .unwrap_err(); + // Expect error different than 'TooManyMismatchResponses' + assert!(!matches!(res, Error::TooManyMismatchedResponses { .. })); + } + + #[tokio::test] + async fn should_quit_retrying_to_get_response_over_the_retry_limit() { + const LIMIT: u8 = MAX_MISMATCHED_ID_RETRIES; + + let port = get_port(); + let mut rng = TestRng::new(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(0), + Some(LIMIT), + Arc::clone(&shutdown), + ) + .await; + let config = NodeClientConfig::new_with_port_and_retries(port, 2); + let (c, _) = FramedNodeClient::new(config).await.unwrap(); + + let res = query_global_state_for_string_value(&mut rng, &c) + .await + .unwrap_err(); + // Expect 'TooManyMismatchResponses' error + assert!(matches!(res, Error::TooManyMismatchedResponses { max } if max == LIMIT)); + } } diff --git a/rpc_sidecar/src/rpcs/account.rs b/rpc_sidecar/src/rpcs/account.rs index 26db1720..bc175457 100644 --- a/rpc_sidecar/src/rpcs/account.rs +++ b/rpc_sidecar/src/rpcs/account.rs @@ -172,6 +172,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } _ => unimplemented!(), @@ -213,6 +214,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } _ => unimplemented!(), @@ -257,6 +259,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } _ => unimplemented!(), diff --git a/rpc_sidecar/src/rpcs/chain.rs b/rpc_sidecar/src/rpcs/chain.rs index 38290a26..0a7ee4bc 100644 --- a/rpc_sidecar/src/rpcs/chain.rs +++ b/rpc_sidecar/src/rpcs/chain.rs @@ -704,6 +704,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(self.block.clone(), SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) @@ -716,6 +717,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::Record { @@ -758,6 +760,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -778,6 +781,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), diff --git a/rpc_sidecar/src/rpcs/docs.rs b/rpc_sidecar/src/rpcs/docs.rs index cb6bbb84..772f892e 100644 --- a/rpc_sidecar/src/rpcs/docs.rs +++ b/rpc_sidecar/src/rpcs/docs.rs @@ -18,7 +18,10 @@ use super::{ chain::{ GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetEraSummary, GetStateRootHash, }, - info::{GetChainspec, GetDeploy, GetPeers, GetStatus, GetTransaction, GetValidatorChanges}, + info::{ + GetChainspec, GetDeploy, GetPeers, GetReward, GetStatus, GetTransaction, + GetValidatorChanges, + }, state::{ GetAccountInfo, GetAddressableEntity, GetAuctionInfo, GetBalance, GetDictionaryItem, GetItem, QueryBalance, QueryBalanceDetails, QueryGlobalState, @@ -86,6 +89,9 @@ pub(crate) static OPEN_RPC_SCHEMA: Lazy = Lazy::new(|| { ); schema.push_without_params::("returns a list of peers connected to the node"); schema.push_without_params::("returns the current status of the node"); + schema.push_with_params::( + "returns the reward for a given era and a validator or a delegator", + ); schema .push_without_params::("returns status changes of active validators"); schema.push_without_params::( diff --git a/rpc_sidecar/src/rpcs/error.rs b/rpc_sidecar/src/rpcs/error.rs index fa6853c0..9444bf57 100644 --- a/rpc_sidecar/src/rpcs/error.rs +++ b/rpc_sidecar/src/rpcs/error.rs @@ -37,6 +37,8 @@ pub enum Error { AccountNotFound, #[error("the requested addressable entity was not found")] AddressableEntityNotFound, + #[error("the requested reward was not found")] + RewardNotFound, #[error("the requested account has been migrated to an addressable entity")] AccountMigratedToEntity, #[error("the provided dictionary value is {0} instead of a URef")] @@ -82,11 +84,21 @@ impl Error { Error::NodeRequest(_, NodeClientError::FunctionIsDisabled) => { Some(ErrorCode::FunctionIsDisabled) } + Error::NodeRequest(_, NodeClientError::SwitchBlockNotFound) => { + Some(ErrorCode::SwitchBlockNotFound) + } + Error::NodeRequest(_, NodeClientError::SwitchBlockParentNotFound) => { + Some(ErrorCode::SwitchBlockParentNotFound) + } + Error::NodeRequest(_, NodeClientError::UnsupportedRewardsV1Request) => { + Some(ErrorCode::UnsupportedRewardsV1Request) + } Error::InvalidPurseURef(_) => Some(ErrorCode::FailedToParseGetBalanceURef), Error::InvalidDictionaryKey(_) => Some(ErrorCode::FailedToParseQueryKey), Error::MainPurseNotFound => Some(ErrorCode::NoSuchMainPurse), Error::AccountNotFound => Some(ErrorCode::NoSuchAccount), Error::AddressableEntityNotFound => Some(ErrorCode::NoSuchAddressableEntity), + Error::RewardNotFound => Some(ErrorCode::NoRewardsFound), Error::AccountMigratedToEntity => Some(ErrorCode::AccountMigratedToEntity), Error::InvalidTypeUnderDictionaryKey(_) | Error::DictionaryKeyNotFound diff --git a/rpc_sidecar/src/rpcs/error_code.rs b/rpc_sidecar/src/rpcs/error_code.rs index 9e222bdb..085c08d5 100644 --- a/rpc_sidecar/src/rpcs/error_code.rs +++ b/rpc_sidecar/src/rpcs/error_code.rs @@ -53,6 +53,14 @@ pub enum ErrorCode { NoSuchAddressableEntity = -32020, /// The requested account has been migrated to an addressable entity. AccountMigratedToEntity = -32021, + /// The requested reward was not found. + NoRewardsFound = -32022, + /// The switch block for the requested era was not found. + SwitchBlockNotFound = -32023, + /// The parent of the switch block for the requested era was not found. + SwitchBlockParentNotFound = -32024, + /// Cannot serve rewards stored in V1 format + UnsupportedRewardsV1Request = -32025, } impl From for (i64, &'static str) { @@ -92,6 +100,15 @@ impl From for (i64, &'static str) { error_code as i64, "Account migrated to an addressable entity", ), + ErrorCode::NoRewardsFound => (error_code as i64, "No rewards found"), + ErrorCode::SwitchBlockNotFound => (error_code as i64, "Switch block not found"), + ErrorCode::SwitchBlockParentNotFound => { + (error_code as i64, "Switch block parent not found") + } + ErrorCode::UnsupportedRewardsV1Request => ( + error_code as i64, + "Cannot serve rewards stored in V1 format", + ), } } } diff --git a/rpc_sidecar/src/rpcs/info.rs b/rpc_sidecar/src/rpcs/info.rs index 72973459..d2c8e3ca 100644 --- a/rpc_sidecar/src/rpcs/info.rs +++ b/rpc_sidecar/src/rpcs/info.rs @@ -3,16 +3,17 @@ use std::{collections::BTreeMap, str, sync::Arc}; use async_trait::async_trait; -use casper_binary_port::MinimalBlockInfo; +use casper_binary_port::{EraIdentifier as PortEraIdentifier, MinimalBlockInfo}; use once_cell::sync::Lazy; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use casper_types::{ execution::{ExecutionResult, ExecutionResultV2}, - ActivationPoint, AvailableBlockRange, Block, BlockHash, BlockSynchronizerStatus, - ChainspecRawBytes, Deploy, DeployHash, Digest, EraId, ExecutionInfo, NextUpgrade, Peers, - ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, TransactionHash, ValidatorChange, + ActivationPoint, AvailableBlockRange, Block, BlockHash, BlockIdentifier, + BlockSynchronizerStatus, ChainspecRawBytes, Deploy, DeployHash, Digest, EraId, ExecutionInfo, + NextUpgrade, Peers, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, + TransactionHash, ValidatorChange, U512, }; use super::{ @@ -92,6 +93,16 @@ static GET_STATUS_RESULT: Lazy = Lazy::new(|| GetStatusResult { #[cfg(test)] build_version: String::from("1.0.0-xxxxxxxxx@DEBUG"), }); +static GET_REWARD_PARAMS: Lazy = Lazy::new(|| GetRewardParams { + era_identifier: Some(EraIdentifier::Era(EraId::new(1))), + validator: PublicKey::example().clone(), + delegator: Some(PublicKey::example().clone()), +}); +static GET_REWARD_RESULT: Lazy = Lazy::new(|| GetRewardResult { + api_version: DOCS_EXAMPLE_API_VERSION, + reward_amount: U512::from(42), + era_id: EraId::new(1), +}); /// Params for "info_get_deploy" RPC request. #[derive(Serialize, Deserialize, Debug, JsonSchema)] @@ -495,6 +506,84 @@ impl RpcWithoutParams for GetStatus { } } +/// Params for "info_get_reward" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetRewardParams { + /// The era identifier. If `None`, the last finalized era is used. + pub era_identifier: Option, + /// The public key of the validator. + pub validator: PublicKey, + /// The public key of the delegator. If `Some`, the rewards for the delegator are returned. + /// If `None`, the rewards for the validator are returned. + pub delegator: Option, +} + +impl DocExample for GetRewardParams { + fn doc_example() -> &'static Self { + &GET_REWARD_PARAMS + } +} + +/// Identifier for an era. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +pub enum EraIdentifier { + Era(EraId), + Block(BlockIdentifier), +} + +/// Result for "info_get_reward" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetRewardResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The total reward amount in the requested era. + pub reward_amount: U512, + /// The era for which the reward was calculated. + pub era_id: EraId, +} + +impl DocExample for GetRewardResult { + fn doc_example() -> &'static Self { + &GET_REWARD_RESULT + } +} + +/// "info_get_reward" RPC. +pub struct GetReward {} + +#[async_trait] +impl RpcWithParams for GetReward { + const METHOD: &'static str = "info_get_reward"; + type RequestParams = GetRewardParams; + type ResponseResult = GetRewardResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let identifier = match params.era_identifier { + Some(EraIdentifier::Era(era_id)) => Some(PortEraIdentifier::Era(era_id)), + Some(EraIdentifier::Block(block_id)) => Some(PortEraIdentifier::Block(block_id)), + None => None, + }; + + let result = node_client + .read_reward(identifier, params.validator, params.delegator) + .await + .map_err(|err| Error::NodeRequest("rewards", err))? + .ok_or(Error::RewardNotFound)?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + reward_amount: result.amount(), + era_id: result.era_id(), + }) + } +} + #[cfg(not(test))] fn version_string() -> String { use std::env; @@ -526,7 +615,7 @@ mod tests { use crate::{rpcs::ErrorCode, ClientError, SUPPORTED_PROTOCOL_VERSION}; use casper_binary_port::{ BinaryRequest, BinaryResponse, BinaryResponseAndRequest, GetRequest, InformationRequest, - InformationRequestTag, TransactionWithExecutionInfo, + InformationRequestTag, RewardResponse, TransactionWithExecutionInfo, }; use casper_types::{ bytesrepr::{FromBytes, ToBytes}, @@ -715,6 +804,38 @@ mod tests { assert_eq!(err.code(), ErrorCode::VariantMismatch as i64); } + #[tokio::test] + async fn should_return_rewards() { + let rng = &mut TestRng::new(); + let reward_amount = U512::from(rng.gen_range(0..1000)); + let era_id = EraId::new(rng.gen_range(0..1000)); + let validator = PublicKey::random(rng); + let delegator = rng.gen::().then(|| PublicKey::random(rng)); + + let resp = GetReward::do_handle_request( + Arc::new(RewardMock { + reward_amount, + era_id, + }), + GetRewardParams { + era_identifier: Some(EraIdentifier::Era(era_id)), + validator: validator.clone(), + delegator, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetRewardResult { + api_version: CURRENT_API_VERSION, + reward_amount, + era_id, + } + ); + } + struct ValidTransactionMock { transaction_bytes: Vec, should_request_approvals: bool, @@ -757,6 +878,35 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(transaction, SUPPORTED_PROTOCOL_VERSION), &[], + 0, + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct RewardMock { + reward_amount: U512, + era_id: EraId, + } + + #[async_trait] + impl NodeClient for RewardMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Reward) => + { + let resp = RewardResponse::new(self.reward_amount, self.era_id); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(resp, SUPPORTED_PROTOCOL_VERSION), + &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), diff --git a/rpc_sidecar/src/rpcs/speculative_exec.rs b/rpc_sidecar/src/rpcs/speculative_exec.rs index f2ecddda..83884a1b 100644 --- a/rpc_sidecar/src/rpcs/speculative_exec.rs +++ b/rpc_sidecar/src/rpcs/speculative_exec.rs @@ -246,6 +246,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::TrySpeculativeExec { .. } => Ok(BinaryResponseAndRequest::new( @@ -254,6 +255,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )), req => unimplemented!("unexpected request: {:?}", req), } diff --git a/rpc_sidecar/src/rpcs/state.rs b/rpc_sidecar/src/rpcs/state.rs index 0fa0f506..94f7ee23 100644 --- a/rpc_sidecar/src/rpcs/state.rs +++ b/rpc_sidecar/src/rpcs/state.rs @@ -1260,6 +1260,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1280,6 +1281,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(bids, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1300,6 +1302,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(bids, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1321,6 +1324,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(result, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1339,6 +1343,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(result, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -1405,6 +1410,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1425,6 +1431,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(bids, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1445,6 +1452,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(bids, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1468,6 +1476,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(result, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1483,6 +1492,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1502,6 +1512,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(result, SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -1559,6 +1570,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) @@ -1571,6 +1583,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -1624,6 +1637,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1644,6 +1658,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1669,6 +1684,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1690,6 +1706,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1707,6 +1724,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -1834,6 +1852,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1848,6 +1867,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -1936,6 +1956,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -1959,6 +1980,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -2010,6 +2032,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -2024,6 +2047,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -2220,6 +2244,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -2242,6 +2267,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(self.0.clone(), SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -2271,6 +2297,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -2279,6 +2306,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(self.result.clone(), SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -2308,6 +2336,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } BinaryRequest::Get(GetRequest::State(req)) @@ -2328,6 +2357,7 @@ mod tests { SUPPORTED_PROTOCOL_VERSION, ), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), @@ -2350,6 +2380,7 @@ mod tests { Ok(BinaryResponseAndRequest::new( BinaryResponse::from_value(self.0.clone(), SUPPORTED_PROTOCOL_VERSION), &[], + 0, )) } req => unimplemented!("unexpected request: {:?}", req), diff --git a/rpc_sidecar/src/testing/mod.rs b/rpc_sidecar/src/testing/mod.rs index f8d9ce60..119a4a2c 100644 --- a/rpc_sidecar/src/testing/mod.rs +++ b/rpc_sidecar/src/testing/mod.rs @@ -2,9 +2,10 @@ use std::sync::Arc; use std::time::Duration; use casper_binary_port::{ - BinaryMessage, BinaryMessageCodec, BinaryResponse, BinaryResponseAndRequest, - GlobalStateQueryResult, + BinaryMessage, BinaryMessageCodec, BinaryRequest, BinaryResponse, BinaryResponseAndRequest, + GetRequest, GlobalStateQueryResult, }; +use casper_types::bytesrepr; use casper_types::{bytesrepr::ToBytes, CLValue, ProtocolVersion, StoredValue}; use futures::{SinkExt, StreamExt}; use tokio::sync::Notify; @@ -15,17 +16,24 @@ use tokio::{ }; use tokio_util::codec::Framed; +use crate::encode_request; + const LOCALHOST: &str = "127.0.0.1"; const MESSAGE_SIZE: u32 = 1024 * 1024 * 10; pub struct BinaryPortMock { port: u16, response: Vec, + number_of_responses: u8, } impl BinaryPortMock { - pub fn new(port: u16, response: Vec) -> Self { - Self { port, response } + pub fn new(port: u16, response: Vec, number_of_responses: u8) -> Self { + Self { + port, + response, + number_of_responses, + } } pub async fn start(&self, shutdown: Arc) { @@ -43,7 +51,7 @@ impl BinaryPortMock { match val { Ok((stream, _addr)) => { let response_payload = self.response.clone(); - tokio::spawn(handle_client(stream, response_payload)); + tokio::spawn(handle_client(stream, response_payload, self.number_of_responses)); } Err(io_err) => { println!("acceptance failure: {:?}", io_err); @@ -55,14 +63,16 @@ impl BinaryPortMock { } } -async fn handle_client(stream: TcpStream, response: Vec) { +async fn handle_client(stream: TcpStream, response: Vec, number_of_responses: u8) { let mut client = Framed::new(stream, BinaryMessageCodec::new(MESSAGE_SIZE)); let next_message = client.next().await; if next_message.is_some() { tokio::spawn({ async move { - let _ = client.send(BinaryMessage::new(response)).await; + for _ in 0..number_of_responses { + let _ = client.send(BinaryMessage::new(response.clone())).await; + } } }); } @@ -74,22 +84,49 @@ pub fn get_port() -> u16 { pub async fn start_mock_binary_port_responding_with_stored_value( port: u16, + request_id: Option, + number_of_responses: Option, shutdown: Arc, ) -> JoinHandle<()> { let value = StoredValue::CLValue(CLValue::from_t("Foo").unwrap()); let data = GlobalStateQueryResult::new(value, vec![]); let protocol_version = ProtocolVersion::from_parts(2, 0, 0); let val = BinaryResponse::from_value(data, protocol_version); - let request = []; - let response = BinaryResponseAndRequest::new(val, &request); - start_mock_binary_port(port, response.to_bytes().unwrap(), shutdown).await + let request = get_dummy_request_payload(request_id); + let response = BinaryResponseAndRequest::new(val, &request, request_id.unwrap_or_default()); + start_mock_binary_port( + port, + response.to_bytes().unwrap(), + number_of_responses.unwrap_or(1), // Single response by default + shutdown, + ) + .await } -async fn start_mock_binary_port(port: u16, data: Vec, shutdown: Arc) -> JoinHandle<()> { +pub async fn start_mock_binary_port( + port: u16, + data: Vec, + number_of_responses: u8, + shutdown: Arc, +) -> JoinHandle<()> { let handler = tokio::spawn(async move { - let binary_port = BinaryPortMock::new(port, data); + let binary_port = BinaryPortMock::new(port, data, number_of_responses); binary_port.start(shutdown).await; }); sleep(Duration::from_secs(3)).await; // This should be handled differently, preferably the mock binary port should inform that it already bound to the port handler } + +pub(crate) fn get_dummy_request() -> BinaryRequest { + BinaryRequest::Get(GetRequest::Information { + info_type_tag: 0, + key: vec![], + }) +} + +pub(crate) fn get_dummy_request_payload(request_id: Option) -> bytesrepr::Bytes { + let dummy_request = get_dummy_request(); + encode_request(&dummy_request, request_id.unwrap_or_default()) + .unwrap() + .into() +} diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index d847e8eb..1718efc1 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -45,7 +45,7 @@ assets = [ ] maintainer-scripts = "../resources/maintainer_scripts/debian" extended-description = """ -Package for Casper Sidecar RPC and SSE +Package for Casper Sidecar """ [package.metadata.deb.systemd-units] diff --git a/sidecar/src/component.rs b/sidecar/src/component.rs index aea1a451..ef277643 100644 --- a/sidecar/src/component.rs +++ b/sidecar/src/component.rs @@ -358,8 +358,13 @@ mod tests { async fn given_rpc_api_server_component_when_config_should_return_some() { let port = get_port(); let shutdown = Arc::new(tokio::sync::Notify::new()); - let _mock_server_handle = - start_mock_binary_port_responding_with_stored_value(port, Arc::clone(&shutdown)).await; + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + None, + None, + Arc::clone(&shutdown), + ) + .await; let component = RpcApiComponent::new(); let mut config = all_components_all_enabled(); config.rpc_server.as_mut().unwrap().node_client = diff --git a/sidecar/src/run.rs b/sidecar/src/run.rs index 63f509d1..d6aea8d1 100644 --- a/sidecar/src/run.rs +++ b/sidecar/src/run.rs @@ -29,10 +29,6 @@ async fn do_run( config: SidecarConfig, components: Vec>, ) -> Result { - if components.is_empty() { - info!("No sidecar components are defined/enabled. Exiting"); - return Ok(ExitCode::SUCCESS); - } let mut component_futures = Vec::new(); for component in components.iter() { let maybe_future = component.prepare_component_task(&config).await?; @@ -40,5 +36,9 @@ async fn do_run( component_futures.push(future); } } + if component_futures.is_empty() { + info!("No runnable sidecar components are defined/enabled. Exiting"); + return Ok(ExitCode::SUCCESS); + } futures::future::select_all(component_futures).await.0 } diff --git a/types/src/legacy_sse_data/fixtures.rs b/types/src/legacy_sse_data/fixtures.rs index ba0d1bd5..b51c4d8f 100644 --- a/types/src/legacy_sse_data/fixtures.rs +++ b/types/src/legacy_sse_data/fixtures.rs @@ -425,7 +425,7 @@ const RAW_TRANSACTION_ACCEPTED: &str = r#" "scheduling": { "FutureTimestamp": "2020-08-07T01:32:59.428Z" }, - "transaction_kind": 0 + "transaction_category": 0 }, "approvals": [ {