From 751132411bb6fd9961a16017cce29c754df2b703 Mon Sep 17 00:00:00 2001 From: jacek-casper <145967538+jacek-casper@users.noreply.github.com> Date: Fri, 23 Feb 2024 14:36:21 +0000 Subject: [PATCH] RPC sidecar changes (#231) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Moving rpc sidecar to event sidecar workspace. Both og those servers will work on one binary * Updating schemars version because the old one is having bugs (if there is a name collision for components schema only one will be saved) * Copying casper_types to this project. The reason is that casper_types in release <= 4.0.1 depends on old schemars * Copying casper_types for 2.0 release to this project. The reason is that rpc sidecar has to use the new types definitions, but for now they are not released (and it's not clear if they will be released prior to node 2.0 release). * Changing RpcError implementation to fix tests. Some alignments of codestyle to make clippy happy. * Moving casper-types dependencies to workspace level * Sync changes from node branch * Update the schema file * Delete protocol.md * Move a DbId fix * Change error message * Changes to versioning * Sync changes to types * Switch to having a single binary * Moving config files, fixing compilation issues * bump 'juliet' to '0.2.1' * Sync casper-types changes * Changing RPC sidecar config so that the rpc_server.node_client.exponential_backoff will take a new parameter called max_attempts. I tcan be either "infinite" or a positive, non-zero number. * Storing ApiVersion in event_log table. Removing is_big_integer_id config from DDLConfiguration because it's no longer needed (new version of sea_query handles the situation of defining big_integer and autoincrement) * Revert "Storing ApiVersion in event_log table. Removing is_big_integer_id con…" * Update for node review changes (#15) * Update for node changes * Fix lints * Cleanup * Cover all values in tag roundtrip tests * Moving admin server out from the sse sidecar. They are spinned up separately from sse events server. Also the database initialization happens separetely. Is sse events server is defined a storage definition is required. If rest api server is defined a storage definition is required. * Fix GlobalStateRequest::random * Changes explicit BoxFuture casting to calling 'boxed()' method --------- Co-authored-by: Jakub Zajkowski Co-authored-by: Rafał Chabowski Co-authored-by: Rafał Chabowski <88321181+rafal-ch@users.noreply.github.com> Co-authored-by: zajko --- Cargo.lock | 2204 ++++- Cargo.toml | 24 +- README.md | 30 +- USAGE.md | 2 +- casper_types/CHANGELOG.md | 200 + casper_types/Cargo.toml | 77 + casper_types/README.md | 22 + casper_types/benches/bytesrepr_bench.rs | 894 ++ casper_types/src/access_rights.rs | 422 + casper_types/src/account.rs | 1013 +++ casper_types/src/account/account_hash.rs | 218 + casper_types/src/account/action_thresholds.rs | 170 + casper_types/src/account/action_type.rs | 32 + casper_types/src/account/associated_keys.rs | 360 + casper_types/src/account/error.rs | 110 + casper_types/src/account/weight.rs | 62 + casper_types/src/api_error.rs | 874 ++ casper_types/src/block_time.rs | 47 + casper_types/src/bytesrepr.rs | 1594 ++++ casper_types/src/bytesrepr/bytes.rs | 389 + casper_types/src/checksummed_hex.rs | 241 + casper_types/src/cl_type.rs | 779 ++ casper_types/src/cl_value.rs | 1197 +++ casper_types/src/cl_value/jsonrepr.rs | 272 + casper_types/src/contract_wasm.rs | 372 + casper_types/src/contracts.rs | 2106 +++++ casper_types/src/crypto.rs | 35 + casper_types/src/crypto/asymmetric_key.rs | 1274 +++ .../src/crypto/asymmetric_key/gens.rs | 44 + .../src/crypto/asymmetric_key/tests.rs | 862 ++ casper_types/src/crypto/error.rs | 111 + casper_types/src/deploy_info.rs | 172 + casper_types/src/era_id.rs | 241 + casper_types/src/execution_result.rs | 814 ++ casper_types/src/file_utils.rs | 77 + casper_types/src/gas.rs | 232 + casper_types/src/gens.rs | 531 ++ casper_types/src/json_pretty_printer.rs | 291 + casper_types/src/key.rs | 1458 ++++ casper_types/src/lib.rs | 113 + casper_types/src/motes.rs | 248 + casper_types/src/named_key.rs | 46 + casper_types/src/phase.rs | 56 + casper_types/src/protocol_version.rs | 550 ++ casper_types/src/runtime_args.rs | 368 + casper_types/src/semver.rs | 152 + casper_types/src/stored_value.rs | 464 ++ .../src/stored_value/type_mismatch.rs | 30 + casper_types/src/system.rs | 14 + casper_types/src/system/auction.rs | 53 + casper_types/src/system/auction/bid.rs | 554 ++ .../src/system/auction/bid/vesting.rs | 523 ++ casper_types/src/system/auction/constants.rs | 98 + casper_types/src/system/auction/delegator.rs | 242 + .../src/system/auction/entry_points.rs | 146 + casper_types/src/system/auction/era_info.rs | 314 + casper_types/src/system/auction/error.rs | 543 ++ .../system/auction/seigniorage_recipient.rs | 196 + .../src/system/auction/unbonding_purse.rs | 236 + .../src/system/auction/withdraw_purse.rs | 195 + casper_types/src/system/call_stack_element.rs | 194 + casper_types/src/system/error.rs | 43 + casper_types/src/system/handle_payment.rs | 8 + .../src/system/handle_payment/constants.rs | 37 + .../src/system/handle_payment/entry_points.rs | 66 + .../src/system/handle_payment/error.rs | 424 + casper_types/src/system/mint.rs | 8 + casper_types/src/system/mint/constants.rs | 40 + casper_types/src/system/mint/entry_points.rs | 102 + casper_types/src/system/mint/error.rs | 298 + casper_types/src/system/standard_payment.rs | 6 + .../src/system/standard_payment/constants.rs | 10 + .../system/standard_payment/entry_points.rs | 25 + .../src/system/system_contract_type.rs | 171 + casper_types/src/tagged.rs | 5 + casper_types/src/testing.rs | 174 + casper_types/src/timestamp.rs | 472 ++ casper_types/src/transfer.rs | 506 ++ casper_types/src/transfer_result.rs | 39 + casper_types/src/uint.rs | 1001 +++ casper_types/src/uref.rs | 427 + casper_types/tests/version_numbers.rs | 5 + casper_types_ver_2_0/CHANGELOG.md | 204 + casper_types_ver_2_0/Cargo.toml | 89 + casper_types_ver_2_0/README.md | 22 + .../benches/bytesrepr_bench.rs | 872 ++ casper_types_ver_2_0/src/access_rights.rs | 421 + casper_types_ver_2_0/src/account.rs | 857 ++ .../src/account/account_hash.rs | 212 + .../src/account/action_thresholds.rs | 175 + .../src/account/action_type.rs | 32 + .../src/account/associated_keys.rs | 381 + casper_types_ver_2_0/src/account/error.rs | 43 + casper_types_ver_2_0/src/account/weight.rs | 69 + .../src/addressable_entity.rs | 1714 ++++ .../addressable_entity/action_thresholds.rs | 212 + .../src/addressable_entity/action_type.rs | 38 + .../src/addressable_entity/associated_keys.rs | 386 + .../src/addressable_entity/error.rs | 112 + .../src/addressable_entity/named_keys.rs | 166 + .../src/addressable_entity/weight.rs | 66 + casper_types_ver_2_0/src/api_error.rs | 949 +++ casper_types_ver_2_0/src/auction_state.rs | 203 + casper_types_ver_2_0/src/binary_port.rs | 66 + .../src/binary_port/binary_request.rs | 297 + .../src/binary_port/binary_response.rs | 177 + .../binary_response_and_request.rs | 155 + .../src/binary_port/binary_response_header.rs | 134 + .../src/binary_port/error_code.rs | 79 + .../src/binary_port/get_all_values_result.rs | 15 + .../src/binary_port/get_request.rs | 146 + .../binary_port/global_state_query_result.rs | 99 + .../src/binary_port/information_request.rs | 370 + .../src/binary_port/minimal_block_info.rs | 123 + .../src/binary_port/node_status.rs | 173 + .../src/binary_port/payload_type.rs | 510 ++ .../src/binary_port/record_id.rs | 105 + .../src/binary_port/state_request.rs | 186 + .../src/binary_port/type_wrappers.rs | 349 + casper_types_ver_2_0/src/block.rs | 494 ++ .../src/block/available_block_range.rs | 110 + casper_types_ver_2_0/src/block/block_body.rs | 115 + .../src/block/block_body/block_body_v1.rs | 160 + .../src/block/block_body/block_body_v2.rs | 214 + casper_types_ver_2_0/src/block/block_hash.rs | 131 + .../src/block/block_hash_and_height.rs | 114 + .../src/block/block_header.rs | 287 + .../src/block/block_header/block_header_v1.rs | 372 + .../src/block/block_header/block_header_v2.rs | 371 + .../src/block/block_identifier.rs | 138 + .../src/block/block_signatures.rs | 248 + .../src/block/block_sync_status.rs | 212 + casper_types_ver_2_0/src/block/block_v1.rs | 367 + casper_types_ver_2_0/src/block/block_v2.rs | 411 + casper_types_ver_2_0/src/block/era_end.rs | 133 + .../src/block/era_end/era_end_v1.rs | 163 + .../block/era_end/era_end_v1/era_report.rs | 252 + .../src/block/era_end/era_end_v2.rs | 249 + .../src/block/finality_signature.rs | 266 + .../src/block/finality_signature_id.rs | 55 + .../src/block/json_compatibility.rs | 8 + .../json_block_with_signatures.rs | 95 + .../src/block/rewarded_signatures.rs | 474 ++ casper_types_ver_2_0/src/block/rewards.rs | 11 + .../src/block/signed_block.rs | 80 + .../src/block/signed_block_header.rs | 143 + .../test_block_v1_builder.rs | 183 + .../test_block_v2_builder.rs | 275 + casper_types_ver_2_0/src/block_time.rs | 55 + casper_types_ver_2_0/src/byte_code.rs | 467 ++ casper_types_ver_2_0/src/bytesrepr.rs | 1646 ++++ casper_types_ver_2_0/src/bytesrepr/bytes.rs | 405 + casper_types_ver_2_0/src/chainspec.rs | 260 + .../src/chainspec/accounts_config.rs | 192 + .../accounts_config/account_config.rs | 138 + .../accounts_config/delegator_config.rs | 133 + .../src/chainspec/accounts_config/genesis.rs | 497 ++ .../accounts_config/validator_config.rs | 102 + .../src/chainspec/activation_point.rs | 121 + .../src/chainspec/chainspec_raw_bytes.rs | 196 + .../src/chainspec/core_config.rs | 538 ++ .../src/chainspec/fee_handling.rs | 76 + .../src/chainspec/global_state_update.rs | 181 + .../src/chainspec/highway_config.rs | 111 + .../src/chainspec/network_config.rs | 86 + .../src/chainspec/next_upgrade.rs | 115 + .../src/chainspec/protocol_config.rs | 125 + .../src/chainspec/refund_handling.rs | 97 + .../src/chainspec/transaction_config.rs | 211 + .../transaction_config/deploy_config.rs | 112 + .../transaction_v1_config.rs | 74 + .../src/chainspec/vm_config.rs | 42 + .../src/chainspec/vm_config/auction_costs.rs | 269 + .../chainspec/vm_config/chainspec_registry.rs | 157 + .../vm_config/handle_payment_costs.rs | 116 + .../vm_config/host_function_costs.rs | 1080 +++ .../src/chainspec/vm_config/message_limits.rs | 131 + .../src/chainspec/vm_config/mint_costs.rs | 172 + .../src/chainspec/vm_config/opcode_costs.rs | 773 ++ .../vm_config/standard_payment_costs.rs | 70 + .../src/chainspec/vm_config/storage_costs.rs | 138 + .../src/chainspec/vm_config/system_config.rs | 179 + .../src/chainspec/vm_config/upgrade_config.rs | 112 + .../src/chainspec/vm_config/wasm_config.rs | 186 + casper_types_ver_2_0/src/checksummed_hex.rs | 241 + casper_types_ver_2_0/src/cl_type.rs | 817 ++ casper_types_ver_2_0/src/cl_value.rs | 1208 +++ casper_types_ver_2_0/src/cl_value/jsonrepr.rs | 272 + casper_types_ver_2_0/src/contract_messages.rs | 228 + .../src/contract_messages/error.rs | 74 + .../src/contract_messages/messages.rs | 323 + .../src/contract_messages/topics.rs | 254 + casper_types_ver_2_0/src/contract_wasm.rs | 373 + casper_types_ver_2_0/src/contracts.rs | 1308 +++ casper_types_ver_2_0/src/crypto.rs | 35 + .../src/crypto/asymmetric_key.rs | 1304 +++ .../src/crypto/asymmetric_key/gens.rs | 44 + .../src/crypto/asymmetric_key/tests.rs | 861 ++ casper_types_ver_2_0/src/crypto/error.rs | 155 + casper_types_ver_2_0/src/deploy_info.rs | 174 + casper_types_ver_2_0/src/digest.rs | 730 ++ .../src/digest/chunk_with_proof.rs | 335 + casper_types_ver_2_0/src/digest/error.rs | 233 + .../src/digest/indexed_merkle_proof.rs | 514 ++ casper_types_ver_2_0/src/display_iter.rs | 40 + casper_types_ver_2_0/src/era_id.rs | 254 + casper_types_ver_2_0/src/execution.rs | 17 + casper_types_ver_2_0/src/execution/effects.rs | 105 + .../src/execution/execution_result.rs | 148 + .../src/execution/execution_result_v1.rs | 794 ++ .../src/execution/execution_result_v2.rs | 259 + .../src/execution/transform.rs | 75 + .../src/execution/transform_error.rs | 136 + .../src/execution/transform_kind.rs | 847 ++ casper_types_ver_2_0/src/file_utils.rs | 77 + casper_types_ver_2_0/src/gas.rs | 240 + casper_types_ver_2_0/src/gens.rs | 738 ++ .../src/json_pretty_printer.rs | 291 + casper_types_ver_2_0/src/key.rs | 2172 +++++ casper_types_ver_2_0/src/lib.rs | 215 + casper_types_ver_2_0/src/motes.rs | 248 + casper_types_ver_2_0/src/package.rs | 1567 ++++ casper_types_ver_2_0/src/peers_map.rs | 138 + casper_types_ver_2_0/src/phase.rs | 56 + casper_types_ver_2_0/src/protocol_version.rs | 550 ++ casper_types_ver_2_0/src/reactor_state.rs | 109 + casper_types_ver_2_0/src/semver.rs | 152 + casper_types_ver_2_0/src/serde_helpers.rs | 109 + casper_types_ver_2_0/src/stored_value.rs | 899 ++ .../stored_value/global_state_identifier.rs | 127 + .../src/stored_value/type_mismatch.rs | 68 + casper_types_ver_2_0/src/system.rs | 12 + casper_types_ver_2_0/src/system/auction.rs | 279 + .../src/system/auction/bid.rs | 609 ++ .../src/system/auction/bid/vesting.rs | 520 ++ .../src/system/auction/bid_addr.rs | 335 + .../src/system/auction/bid_kind.rs | 323 + .../src/system/auction/constants.rs | 98 + .../src/system/auction/delegator.rs | 309 + .../src/system/auction/entry_points.rs | 142 + .../src/system/auction/era_info.rs | 311 + .../src/system/auction/error.rs | 545 ++ .../system/auction/seigniorage_recipient.rs | 196 + .../src/system/auction/unbonding_purse.rs | 238 + .../src/system/auction/validator_bid.rs | 380 + .../src/system/auction/withdraw_purse.rs | 192 + .../src/system/call_stack_element.rs | 164 + casper_types_ver_2_0/src/system/error.rs | 43 + .../src/system/handle_payment.rs | 8 + .../src/system/handle_payment/constants.rs | 37 + .../src/system/handle_payment/entry_points.rs | 66 + .../src/system/handle_payment/error.rs | 424 + casper_types_ver_2_0/src/system/mint.rs | 8 + .../src/system/mint/constants.rs | 40 + .../src/system/mint/entry_points.rs | 102 + casper_types_ver_2_0/src/system/mint/error.rs | 300 + .../src/system/standard_payment.rs | 6 + .../src/system/standard_payment/constants.rs | 10 + .../system/standard_payment/entry_points.rs | 25 + .../src/system/system_contract_type.rs | 249 + casper_types_ver_2_0/src/tagged.rs | 5 + casper_types_ver_2_0/src/testing.rs | 195 + casper_types_ver_2_0/src/timestamp.rs | 470 ++ casper_types_ver_2_0/src/transaction.rs | 340 + .../addressable_entity_identifier.rs | 122 + .../src/transaction/deploy.rs | 2007 +++++ .../src/transaction/deploy/deploy_approval.rs | 103 + .../deploy/deploy_approvals_hash.rs | 111 + .../src/transaction/deploy/deploy_builder.rs | 155 + .../deploy/deploy_builder/error.rs | 44 + .../transaction/deploy/deploy_footprint.rs | 28 + .../src/transaction/deploy/deploy_hash.rs | 116 + .../src/transaction/deploy/deploy_header.rs | 230 + .../src/transaction/deploy/deploy_id.rs | 116 + .../src/transaction/deploy/error.rs | 400 + .../deploy/executable_deploy_item.rs | 827 ++ .../deploy/finalized_deploy_approvals.rs | 76 + .../src/transaction/execution_info.rs | 62 + .../src/transaction/finalized_approvals.rs | 128 + .../src/transaction/initiator_addr.rs | 165 + .../initiator_addr_and_secret_key.rs | 40 + .../src/transaction/package_identifier.rs | 191 + .../src/transaction/pricing_mode.rs | 121 + .../src/transaction/runtime_args.rs | 388 + .../transaction/transaction_approvals_hash.rs | 110 + .../transaction/transaction_entry_point.rs | 232 + .../src/transaction/transaction_hash.rs | 143 + .../src/transaction/transaction_header.rs | 116 + .../src/transaction/transaction_id.rs | 197 + .../transaction_invocation_target.rs | 303 + .../src/transaction/transaction_runtime.rs | 73 + .../src/transaction/transaction_scheduling.rs | 133 + .../transaction/transaction_session_kind.rs | 118 + .../src/transaction/transaction_target.rs | 236 + .../src/transaction/transaction_v1.rs | 809 ++ .../transaction/transaction_v1/errors_v1.rs | 386 + .../finalized_transaction_v1_approvals.rs | 78 + .../transaction_v1/transaction_v1_approval.rs | 102 + .../transaction_v1_approvals_hash.rs | 114 + .../transaction_v1/transaction_v1_body.rs | 426 + .../transaction_v1_body/arg_handling.rs | 783 ++ .../transaction_v1/transaction_v1_builder.rs | 490 ++ .../transaction_v1_builder/error.rs | 44 + .../transaction_v1/transaction_v1_hash.rs | 117 + .../transaction_v1/transaction_v1_header.rs | 244 + casper_types_ver_2_0/src/transfer.rs | 414 + casper_types_ver_2_0/src/transfer_result.rs | 39 + casper_types_ver_2_0/src/uint.rs | 1001 +++ casper_types_ver_2_0/src/uref.rs | 424 + casper_types_ver_2_0/src/validator_change.rs | 101 + casper_types_ver_2_0/tests/version_numbers.rs | 5 + event_sidecar/Cargo.toml | 90 + .../src/admin_server.rs | 10 +- .../src/api_version_manager.rs | 0 .../src/database/database_errors.rs | 0 .../src/database/env_vars.rs | 0 .../src/database/errors.rs | 0 .../src/database/migration_manager.rs | 0 .../src/database/migration_manager/tests.rs | 0 .../src/database/mod.rs | 2 + .../src/database/postgresql_database.rs | 0 .../database/postgresql_database/reader.rs | 0 .../src/database/postgresql_database/tests.rs | 0 .../database/postgresql_database/writer.rs | 0 .../src/database/reader_generator.rs | 0 .../src/database/sqlite_database.rs | 0 .../src/database/sqlite_database/reader.rs | 0 .../src/database/sqlite_database/tests.rs | 0 .../src/database/sqlite_database/writer.rs | 0 .../src/database/tests.rs | 0 .../src/database/types.rs | 0 .../src/database/writer_generator.rs | 0 .../src/event_stream_server.rs | 0 .../src/event_stream_server/config.rs | 0 .../src/event_stream_server/endpoint.rs | 0 .../src/event_stream_server/event_indexer.rs | 0 .../src/event_stream_server/http_server.rs | 0 .../src/event_stream_server/sse_server.rs | 0 .../src/event_stream_server/tests.rs | 0 event_sidecar/src/lib.rs | 790 ++ {sidecar => event_sidecar}/src/rest_server.rs | 4 +- .../src/rest_server/errors.rs | 0 .../src/rest_server/filters.rs | 0 .../src/rest_server/handlers.rs | 0 .../src/rest_server/openapi.rs | 0 .../openapi/schema_transformation_visitor.rs | 0 .../src/rest_server/tests.rs | 0 {sidecar => event_sidecar}/src/sql.rs | 0 {sidecar => event_sidecar}/src/sql/tables.rs | 0 .../src/sql/tables/block_added.rs | 0 .../src/sql/tables/deploy_accepted.rs | 0 .../src/sql/tables/deploy_event.rs | 0 .../src/sql/tables/deploy_expired.rs | 0 .../src/sql/tables/deploy_processed.rs | 0 .../src/sql/tables/event_log.rs | 0 .../src/sql/tables/event_type.rs | 1 + .../src/sql/tables/fault.rs | 0 .../src/sql/tables/finality_signature.rs | 0 .../src/sql/tables/migration.rs | 0 .../src/sql/tables/shutdown.rs | 1 + .../src/sql/tables/step.rs | 0 {sidecar => event_sidecar}/src/testing.rs | 0 .../src/testing/fake_database.rs | 0 .../src/testing/fake_event_stream.rs | 0 .../src/testing/mock_node.rs | 0 .../src/testing/raw_sse_events_utils.rs | 0 .../src/testing/shared.rs | 0 .../src/testing/simple_sse_server.rs | 0 .../src/testing/test_clock.rs | 0 .../src/testing/testing_config.rs | 43 +- {sidecar => event_sidecar}/src/tests.rs | 0 .../src/tests/integration_tests.rs | 54 +- .../tests/integration_tests_version_switch.rs | 2 +- .../src/tests/performance_tests.rs | 23 +- {sidecar => event_sidecar}/src/types.rs | 0 .../src/types/config.rs | 139 +- .../src/types/database.rs | 31 +- .../src/types/sse_events.rs | 0 {sidecar => event_sidecar}/src/utils.rs | 75 +- json_rpc/CHANGELOG.md | 28 + json_rpc/Cargo.toml | 26 + json_rpc/README.md | 118 + json_rpc/src/error.rs | 282 + json_rpc/src/filters.rs | 205 + json_rpc/src/filters/tests.rs | 18 + .../tests/base_filter_with_recovery_tests.rs | 220 + .../tests/main_filter_with_recovery_tests.rs | 320 + json_rpc/src/lib.rs | 177 + json_rpc/src/rejections.rs | 72 + json_rpc/src/request.rs | 461 ++ json_rpc/src/request/params.rs | 202 + json_rpc/src/request_handlers.rs | 115 + json_rpc/src/response.rs | 108 + listener/Cargo.toml | 22 +- resources/ETC_README.md | 16 +- .../example_configs/EXAMPLE_NCTL_CONFIG.toml | 21 +- .../EXAMPLE_NCTL_POSTGRES_CONFIG.toml | 18 +- .../example_configs/EXAMPLE_NODE_CONFIG.toml | 22 +- .../default_rpc_only_config.toml | 86 + .../default_sse_only_config.toml} | 14 +- resources/test/rpc_schema.json | 7364 +++++++++++++++++ resources/test/schema_chainspec_bytes.json | 69 + resources/test/schema_rpc_schema.json | 642 ++ resources/test/schema_status.json | 415 + resources/test/schema_validator_changes.json | 146 + rpc_sidecar/Cargo.toml | 74 + rpc_sidecar/README.md | 28 + rpc_sidecar/build.rs | 16 + rpc_sidecar/src/config.rs | 363 + rpc_sidecar/src/http_server.rs | 101 + rpc_sidecar/src/lib.rs | 243 + rpc_sidecar/src/node_client.rs | 612 ++ rpc_sidecar/src/rpcs.rs | 618 ++ rpc_sidecar/src/rpcs/account.rs | 286 + rpc_sidecar/src/rpcs/chain.rs | 702 ++ rpc_sidecar/src/rpcs/chain/era_summary.rs | 57 + rpc_sidecar/src/rpcs/common.rs | 161 + rpc_sidecar/src/rpcs/docs.rs | 600 ++ rpc_sidecar/src/rpcs/error.rs | 110 + rpc_sidecar/src/rpcs/error_code.rs | 93 + rpc_sidecar/src/rpcs/info.rs | 695 ++ rpc_sidecar/src/rpcs/speculative_exec.rs | 272 + rpc_sidecar/src/rpcs/state.rs | 1385 ++++ rpc_sidecar/src/speculative_exec_config.rs | 49 + rpc_sidecar/src/speculative_exec_server.rs | 70 + rpc_sidecar/src/testing/mod.rs | 72 + rust-toolchain.toml | 2 +- sidecar/Cargo.toml | 109 +- sidecar/src/config.rs | 146 + sidecar/src/config/speculative_exec_config.rs | 49 + sidecar/src/main.rs | 929 +-- types/Cargo.toml | 12 +- types/src/block.rs | 6 +- types/src/deploy.rs | 6 +- types/src/digest.rs | 8 +- types/src/executable_deploy_item.rs | 9 +- 436 files changed, 113025 insertions(+), 1629 deletions(-) create mode 100644 casper_types/CHANGELOG.md create mode 100644 casper_types/Cargo.toml create mode 100644 casper_types/README.md create mode 100644 casper_types/benches/bytesrepr_bench.rs create mode 100644 casper_types/src/access_rights.rs create mode 100644 casper_types/src/account.rs create mode 100644 casper_types/src/account/account_hash.rs create mode 100644 casper_types/src/account/action_thresholds.rs create mode 100644 casper_types/src/account/action_type.rs create mode 100644 casper_types/src/account/associated_keys.rs create mode 100644 casper_types/src/account/error.rs create mode 100644 casper_types/src/account/weight.rs create mode 100644 casper_types/src/api_error.rs create mode 100644 casper_types/src/block_time.rs create mode 100644 casper_types/src/bytesrepr.rs create mode 100644 casper_types/src/bytesrepr/bytes.rs create mode 100644 casper_types/src/checksummed_hex.rs create mode 100644 casper_types/src/cl_type.rs create mode 100644 casper_types/src/cl_value.rs create mode 100644 casper_types/src/cl_value/jsonrepr.rs create mode 100644 casper_types/src/contract_wasm.rs create mode 100644 casper_types/src/contracts.rs create mode 100644 casper_types/src/crypto.rs create mode 100644 casper_types/src/crypto/asymmetric_key.rs create mode 100644 casper_types/src/crypto/asymmetric_key/gens.rs create mode 100644 casper_types/src/crypto/asymmetric_key/tests.rs create mode 100644 casper_types/src/crypto/error.rs create mode 100644 casper_types/src/deploy_info.rs create mode 100644 casper_types/src/era_id.rs create mode 100644 casper_types/src/execution_result.rs create mode 100644 casper_types/src/file_utils.rs create mode 100644 casper_types/src/gas.rs create mode 100644 casper_types/src/gens.rs create mode 100644 casper_types/src/json_pretty_printer.rs create mode 100644 casper_types/src/key.rs create mode 100644 casper_types/src/lib.rs create mode 100644 casper_types/src/motes.rs create mode 100644 casper_types/src/named_key.rs create mode 100644 casper_types/src/phase.rs create mode 100644 casper_types/src/protocol_version.rs create mode 100644 casper_types/src/runtime_args.rs create mode 100644 casper_types/src/semver.rs create mode 100644 casper_types/src/stored_value.rs create mode 100644 casper_types/src/stored_value/type_mismatch.rs create mode 100644 casper_types/src/system.rs create mode 100644 casper_types/src/system/auction.rs create mode 100644 casper_types/src/system/auction/bid.rs create mode 100644 casper_types/src/system/auction/bid/vesting.rs create mode 100644 casper_types/src/system/auction/constants.rs create mode 100644 casper_types/src/system/auction/delegator.rs create mode 100644 casper_types/src/system/auction/entry_points.rs create mode 100644 casper_types/src/system/auction/era_info.rs create mode 100644 casper_types/src/system/auction/error.rs create mode 100644 casper_types/src/system/auction/seigniorage_recipient.rs create mode 100644 casper_types/src/system/auction/unbonding_purse.rs create mode 100644 casper_types/src/system/auction/withdraw_purse.rs create mode 100644 casper_types/src/system/call_stack_element.rs create mode 100644 casper_types/src/system/error.rs create mode 100644 casper_types/src/system/handle_payment.rs create mode 100644 casper_types/src/system/handle_payment/constants.rs create mode 100644 casper_types/src/system/handle_payment/entry_points.rs create mode 100644 casper_types/src/system/handle_payment/error.rs create mode 100644 casper_types/src/system/mint.rs create mode 100644 casper_types/src/system/mint/constants.rs create mode 100644 casper_types/src/system/mint/entry_points.rs create mode 100644 casper_types/src/system/mint/error.rs create mode 100644 casper_types/src/system/standard_payment.rs create mode 100644 casper_types/src/system/standard_payment/constants.rs create mode 100644 casper_types/src/system/standard_payment/entry_points.rs create mode 100644 casper_types/src/system/system_contract_type.rs create mode 100644 casper_types/src/tagged.rs create mode 100644 casper_types/src/testing.rs create mode 100644 casper_types/src/timestamp.rs create mode 100644 casper_types/src/transfer.rs create mode 100644 casper_types/src/transfer_result.rs create mode 100644 casper_types/src/uint.rs create mode 100644 casper_types/src/uref.rs create mode 100644 casper_types/tests/version_numbers.rs create mode 100644 casper_types_ver_2_0/CHANGELOG.md create mode 100644 casper_types_ver_2_0/Cargo.toml create mode 100644 casper_types_ver_2_0/README.md create mode 100644 casper_types_ver_2_0/benches/bytesrepr_bench.rs create mode 100644 casper_types_ver_2_0/src/access_rights.rs create mode 100644 casper_types_ver_2_0/src/account.rs create mode 100644 casper_types_ver_2_0/src/account/account_hash.rs create mode 100644 casper_types_ver_2_0/src/account/action_thresholds.rs create mode 100644 casper_types_ver_2_0/src/account/action_type.rs create mode 100644 casper_types_ver_2_0/src/account/associated_keys.rs create mode 100644 casper_types_ver_2_0/src/account/error.rs create mode 100644 casper_types_ver_2_0/src/account/weight.rs create mode 100644 casper_types_ver_2_0/src/addressable_entity.rs create mode 100644 casper_types_ver_2_0/src/addressable_entity/action_thresholds.rs create mode 100644 casper_types_ver_2_0/src/addressable_entity/action_type.rs create mode 100644 casper_types_ver_2_0/src/addressable_entity/associated_keys.rs create mode 100644 casper_types_ver_2_0/src/addressable_entity/error.rs create mode 100644 casper_types_ver_2_0/src/addressable_entity/named_keys.rs create mode 100644 casper_types_ver_2_0/src/addressable_entity/weight.rs create mode 100644 casper_types_ver_2_0/src/api_error.rs create mode 100644 casper_types_ver_2_0/src/auction_state.rs create mode 100644 casper_types_ver_2_0/src/binary_port.rs create mode 100644 casper_types_ver_2_0/src/binary_port/binary_request.rs create mode 100644 casper_types_ver_2_0/src/binary_port/binary_response.rs create mode 100644 casper_types_ver_2_0/src/binary_port/binary_response_and_request.rs create mode 100644 casper_types_ver_2_0/src/binary_port/binary_response_header.rs create mode 100644 casper_types_ver_2_0/src/binary_port/error_code.rs create mode 100644 casper_types_ver_2_0/src/binary_port/get_all_values_result.rs create mode 100644 casper_types_ver_2_0/src/binary_port/get_request.rs create mode 100644 casper_types_ver_2_0/src/binary_port/global_state_query_result.rs create mode 100644 casper_types_ver_2_0/src/binary_port/information_request.rs create mode 100644 casper_types_ver_2_0/src/binary_port/minimal_block_info.rs create mode 100644 casper_types_ver_2_0/src/binary_port/node_status.rs create mode 100644 casper_types_ver_2_0/src/binary_port/payload_type.rs create mode 100644 casper_types_ver_2_0/src/binary_port/record_id.rs create mode 100644 casper_types_ver_2_0/src/binary_port/state_request.rs create mode 100644 casper_types_ver_2_0/src/binary_port/type_wrappers.rs create mode 100644 casper_types_ver_2_0/src/block.rs create mode 100644 casper_types_ver_2_0/src/block/available_block_range.rs create mode 100644 casper_types_ver_2_0/src/block/block_body.rs create mode 100644 casper_types_ver_2_0/src/block/block_body/block_body_v1.rs create mode 100644 casper_types_ver_2_0/src/block/block_body/block_body_v2.rs create mode 100644 casper_types_ver_2_0/src/block/block_hash.rs create mode 100644 casper_types_ver_2_0/src/block/block_hash_and_height.rs create mode 100644 casper_types_ver_2_0/src/block/block_header.rs create mode 100644 casper_types_ver_2_0/src/block/block_header/block_header_v1.rs create mode 100644 casper_types_ver_2_0/src/block/block_header/block_header_v2.rs create mode 100644 casper_types_ver_2_0/src/block/block_identifier.rs create mode 100644 casper_types_ver_2_0/src/block/block_signatures.rs create mode 100644 casper_types_ver_2_0/src/block/block_sync_status.rs create mode 100644 casper_types_ver_2_0/src/block/block_v1.rs create mode 100644 casper_types_ver_2_0/src/block/block_v2.rs create mode 100644 casper_types_ver_2_0/src/block/era_end.rs create mode 100644 casper_types_ver_2_0/src/block/era_end/era_end_v1.rs create mode 100644 casper_types_ver_2_0/src/block/era_end/era_end_v1/era_report.rs create mode 100644 casper_types_ver_2_0/src/block/era_end/era_end_v2.rs create mode 100644 casper_types_ver_2_0/src/block/finality_signature.rs create mode 100644 casper_types_ver_2_0/src/block/finality_signature_id.rs create mode 100644 casper_types_ver_2_0/src/block/json_compatibility.rs create mode 100644 casper_types_ver_2_0/src/block/json_compatibility/json_block_with_signatures.rs create mode 100644 casper_types_ver_2_0/src/block/rewarded_signatures.rs create mode 100644 casper_types_ver_2_0/src/block/rewards.rs create mode 100644 casper_types_ver_2_0/src/block/signed_block.rs create mode 100644 casper_types_ver_2_0/src/block/signed_block_header.rs create mode 100644 casper_types_ver_2_0/src/block/test_block_builder/test_block_v1_builder.rs create mode 100644 casper_types_ver_2_0/src/block/test_block_builder/test_block_v2_builder.rs create mode 100644 casper_types_ver_2_0/src/block_time.rs create mode 100644 casper_types_ver_2_0/src/byte_code.rs create mode 100644 casper_types_ver_2_0/src/bytesrepr.rs create mode 100644 casper_types_ver_2_0/src/bytesrepr/bytes.rs create mode 100644 casper_types_ver_2_0/src/chainspec.rs create mode 100644 casper_types_ver_2_0/src/chainspec/accounts_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/accounts_config/account_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/accounts_config/delegator_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/accounts_config/genesis.rs create mode 100644 casper_types_ver_2_0/src/chainspec/accounts_config/validator_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/activation_point.rs create mode 100644 casper_types_ver_2_0/src/chainspec/chainspec_raw_bytes.rs create mode 100644 casper_types_ver_2_0/src/chainspec/core_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/fee_handling.rs create mode 100644 casper_types_ver_2_0/src/chainspec/global_state_update.rs create mode 100644 casper_types_ver_2_0/src/chainspec/highway_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/network_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/next_upgrade.rs create mode 100644 casper_types_ver_2_0/src/chainspec/protocol_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/refund_handling.rs create mode 100644 casper_types_ver_2_0/src/chainspec/transaction_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/transaction_config/deploy_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/transaction_config/transaction_v1_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/auction_costs.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/chainspec_registry.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/handle_payment_costs.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/host_function_costs.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/message_limits.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/mint_costs.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/opcode_costs.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/standard_payment_costs.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/storage_costs.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/system_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/upgrade_config.rs create mode 100644 casper_types_ver_2_0/src/chainspec/vm_config/wasm_config.rs create mode 100644 casper_types_ver_2_0/src/checksummed_hex.rs create mode 100644 casper_types_ver_2_0/src/cl_type.rs create mode 100644 casper_types_ver_2_0/src/cl_value.rs create mode 100644 casper_types_ver_2_0/src/cl_value/jsonrepr.rs create mode 100644 casper_types_ver_2_0/src/contract_messages.rs create mode 100644 casper_types_ver_2_0/src/contract_messages/error.rs create mode 100644 casper_types_ver_2_0/src/contract_messages/messages.rs create mode 100644 casper_types_ver_2_0/src/contract_messages/topics.rs create mode 100644 casper_types_ver_2_0/src/contract_wasm.rs create mode 100644 casper_types_ver_2_0/src/contracts.rs create mode 100644 casper_types_ver_2_0/src/crypto.rs create mode 100644 casper_types_ver_2_0/src/crypto/asymmetric_key.rs create mode 100644 casper_types_ver_2_0/src/crypto/asymmetric_key/gens.rs create mode 100644 casper_types_ver_2_0/src/crypto/asymmetric_key/tests.rs create mode 100644 casper_types_ver_2_0/src/crypto/error.rs create mode 100644 casper_types_ver_2_0/src/deploy_info.rs create mode 100644 casper_types_ver_2_0/src/digest.rs create mode 100644 casper_types_ver_2_0/src/digest/chunk_with_proof.rs create mode 100644 casper_types_ver_2_0/src/digest/error.rs create mode 100644 casper_types_ver_2_0/src/digest/indexed_merkle_proof.rs create mode 100644 casper_types_ver_2_0/src/display_iter.rs create mode 100644 casper_types_ver_2_0/src/era_id.rs create mode 100644 casper_types_ver_2_0/src/execution.rs create mode 100644 casper_types_ver_2_0/src/execution/effects.rs create mode 100644 casper_types_ver_2_0/src/execution/execution_result.rs create mode 100644 casper_types_ver_2_0/src/execution/execution_result_v1.rs create mode 100644 casper_types_ver_2_0/src/execution/execution_result_v2.rs create mode 100644 casper_types_ver_2_0/src/execution/transform.rs create mode 100644 casper_types_ver_2_0/src/execution/transform_error.rs create mode 100644 casper_types_ver_2_0/src/execution/transform_kind.rs create mode 100644 casper_types_ver_2_0/src/file_utils.rs create mode 100644 casper_types_ver_2_0/src/gas.rs create mode 100644 casper_types_ver_2_0/src/gens.rs create mode 100644 casper_types_ver_2_0/src/json_pretty_printer.rs create mode 100644 casper_types_ver_2_0/src/key.rs create mode 100644 casper_types_ver_2_0/src/lib.rs create mode 100644 casper_types_ver_2_0/src/motes.rs create mode 100644 casper_types_ver_2_0/src/package.rs create mode 100644 casper_types_ver_2_0/src/peers_map.rs create mode 100644 casper_types_ver_2_0/src/phase.rs create mode 100644 casper_types_ver_2_0/src/protocol_version.rs create mode 100644 casper_types_ver_2_0/src/reactor_state.rs create mode 100644 casper_types_ver_2_0/src/semver.rs create mode 100644 casper_types_ver_2_0/src/serde_helpers.rs create mode 100644 casper_types_ver_2_0/src/stored_value.rs create mode 100644 casper_types_ver_2_0/src/stored_value/global_state_identifier.rs create mode 100644 casper_types_ver_2_0/src/stored_value/type_mismatch.rs create mode 100644 casper_types_ver_2_0/src/system.rs create mode 100644 casper_types_ver_2_0/src/system/auction.rs create mode 100644 casper_types_ver_2_0/src/system/auction/bid.rs create mode 100644 casper_types_ver_2_0/src/system/auction/bid/vesting.rs create mode 100644 casper_types_ver_2_0/src/system/auction/bid_addr.rs create mode 100644 casper_types_ver_2_0/src/system/auction/bid_kind.rs create mode 100644 casper_types_ver_2_0/src/system/auction/constants.rs create mode 100644 casper_types_ver_2_0/src/system/auction/delegator.rs create mode 100644 casper_types_ver_2_0/src/system/auction/entry_points.rs create mode 100644 casper_types_ver_2_0/src/system/auction/era_info.rs create mode 100644 casper_types_ver_2_0/src/system/auction/error.rs create mode 100644 casper_types_ver_2_0/src/system/auction/seigniorage_recipient.rs create mode 100644 casper_types_ver_2_0/src/system/auction/unbonding_purse.rs create mode 100644 casper_types_ver_2_0/src/system/auction/validator_bid.rs create mode 100644 casper_types_ver_2_0/src/system/auction/withdraw_purse.rs create mode 100644 casper_types_ver_2_0/src/system/call_stack_element.rs create mode 100644 casper_types_ver_2_0/src/system/error.rs create mode 100644 casper_types_ver_2_0/src/system/handle_payment.rs create mode 100644 casper_types_ver_2_0/src/system/handle_payment/constants.rs create mode 100644 casper_types_ver_2_0/src/system/handle_payment/entry_points.rs create mode 100644 casper_types_ver_2_0/src/system/handle_payment/error.rs create mode 100644 casper_types_ver_2_0/src/system/mint.rs create mode 100644 casper_types_ver_2_0/src/system/mint/constants.rs create mode 100644 casper_types_ver_2_0/src/system/mint/entry_points.rs create mode 100644 casper_types_ver_2_0/src/system/mint/error.rs create mode 100644 casper_types_ver_2_0/src/system/standard_payment.rs create mode 100644 casper_types_ver_2_0/src/system/standard_payment/constants.rs create mode 100644 casper_types_ver_2_0/src/system/standard_payment/entry_points.rs create mode 100644 casper_types_ver_2_0/src/system/system_contract_type.rs create mode 100644 casper_types_ver_2_0/src/tagged.rs create mode 100644 casper_types_ver_2_0/src/testing.rs create mode 100644 casper_types_ver_2_0/src/timestamp.rs create mode 100644 casper_types_ver_2_0/src/transaction.rs create mode 100644 casper_types_ver_2_0/src/transaction/addressable_entity_identifier.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_approval.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_approvals_hash.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_builder.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_builder/error.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_footprint.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_hash.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_header.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/deploy_id.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/error.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/executable_deploy_item.rs create mode 100644 casper_types_ver_2_0/src/transaction/deploy/finalized_deploy_approvals.rs create mode 100644 casper_types_ver_2_0/src/transaction/execution_info.rs create mode 100644 casper_types_ver_2_0/src/transaction/finalized_approvals.rs create mode 100644 casper_types_ver_2_0/src/transaction/initiator_addr.rs create mode 100644 casper_types_ver_2_0/src/transaction/initiator_addr_and_secret_key.rs create mode 100644 casper_types_ver_2_0/src/transaction/package_identifier.rs create mode 100644 casper_types_ver_2_0/src/transaction/pricing_mode.rs create mode 100644 casper_types_ver_2_0/src/transaction/runtime_args.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_approvals_hash.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_entry_point.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_hash.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_header.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_id.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_invocation_target.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_runtime.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_scheduling.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_session_kind.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_target.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/errors_v1.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/finalized_transaction_v1_approvals.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approval.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approvals_hash.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder/error.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_hash.rs create mode 100644 casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_header.rs create mode 100644 casper_types_ver_2_0/src/transfer.rs create mode 100644 casper_types_ver_2_0/src/transfer_result.rs create mode 100644 casper_types_ver_2_0/src/uint.rs create mode 100644 casper_types_ver_2_0/src/uref.rs create mode 100644 casper_types_ver_2_0/src/validator_change.rs create mode 100644 casper_types_ver_2_0/tests/version_numbers.rs create mode 100644 event_sidecar/Cargo.toml rename {sidecar => event_sidecar}/src/admin_server.rs (90%) rename {sidecar => event_sidecar}/src/api_version_manager.rs (100%) rename {sidecar => event_sidecar}/src/database/database_errors.rs (100%) rename {sidecar => event_sidecar}/src/database/env_vars.rs (100%) rename {sidecar => event_sidecar}/src/database/errors.rs (100%) rename {sidecar => event_sidecar}/src/database/migration_manager.rs (100%) rename {sidecar => event_sidecar}/src/database/migration_manager/tests.rs (100%) rename {sidecar => event_sidecar}/src/database/mod.rs (82%) rename {sidecar => event_sidecar}/src/database/postgresql_database.rs (100%) rename {sidecar => event_sidecar}/src/database/postgresql_database/reader.rs (100%) rename {sidecar => event_sidecar}/src/database/postgresql_database/tests.rs (100%) rename {sidecar => event_sidecar}/src/database/postgresql_database/writer.rs (100%) rename {sidecar => event_sidecar}/src/database/reader_generator.rs (100%) rename {sidecar => event_sidecar}/src/database/sqlite_database.rs (100%) rename {sidecar => event_sidecar}/src/database/sqlite_database/reader.rs (100%) rename {sidecar => event_sidecar}/src/database/sqlite_database/tests.rs (100%) rename {sidecar => event_sidecar}/src/database/sqlite_database/writer.rs (100%) rename {sidecar => event_sidecar}/src/database/tests.rs (100%) rename {sidecar => event_sidecar}/src/database/types.rs (100%) rename {sidecar => event_sidecar}/src/database/writer_generator.rs (100%) rename {sidecar => event_sidecar}/src/event_stream_server.rs (100%) rename {sidecar => event_sidecar}/src/event_stream_server/config.rs (100%) rename {sidecar => event_sidecar}/src/event_stream_server/endpoint.rs (100%) rename {sidecar => event_sidecar}/src/event_stream_server/event_indexer.rs (100%) rename {sidecar => event_sidecar}/src/event_stream_server/http_server.rs (100%) rename {sidecar => event_sidecar}/src/event_stream_server/sse_server.rs (100%) rename {sidecar => event_sidecar}/src/event_stream_server/tests.rs (100%) create mode 100644 event_sidecar/src/lib.rs rename {sidecar => event_sidecar}/src/rest_server.rs (92%) rename {sidecar => event_sidecar}/src/rest_server/errors.rs (100%) rename {sidecar => event_sidecar}/src/rest_server/filters.rs (100%) rename {sidecar => event_sidecar}/src/rest_server/handlers.rs (100%) rename {sidecar => event_sidecar}/src/rest_server/openapi.rs (100%) rename {sidecar => event_sidecar}/src/rest_server/openapi/schema_transformation_visitor.rs (100%) rename {sidecar => event_sidecar}/src/rest_server/tests.rs (100%) rename {sidecar => event_sidecar}/src/sql.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/block_added.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/deploy_accepted.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/deploy_event.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/deploy_expired.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/deploy_processed.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/event_log.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/event_type.rs (98%) rename {sidecar => event_sidecar}/src/sql/tables/fault.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/finality_signature.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/migration.rs (100%) rename {sidecar => event_sidecar}/src/sql/tables/shutdown.rs (98%) rename {sidecar => event_sidecar}/src/sql/tables/step.rs (100%) rename {sidecar => event_sidecar}/src/testing.rs (100%) rename {sidecar => event_sidecar}/src/testing/fake_database.rs (100%) rename {sidecar => event_sidecar}/src/testing/fake_event_stream.rs (100%) rename {sidecar => event_sidecar}/src/testing/mock_node.rs (100%) rename {sidecar => event_sidecar}/src/testing/raw_sse_events_utils.rs (100%) rename {sidecar => event_sidecar}/src/testing/shared.rs (100%) rename {sidecar => event_sidecar}/src/testing/simple_sse_server.rs (100%) rename {sidecar => event_sidecar}/src/testing/test_clock.rs (100%) rename {sidecar => event_sidecar}/src/testing/testing_config.rs (79%) rename {sidecar => event_sidecar}/src/tests.rs (100%) rename {sidecar => event_sidecar}/src/tests/integration_tests.rs (95%) rename {sidecar => event_sidecar}/src/tests/integration_tests_version_switch.rs (96%) rename {sidecar => event_sidecar}/src/tests/performance_tests.rs (97%) rename {sidecar => event_sidecar}/src/types.rs (100%) rename {sidecar => event_sidecar}/src/types/config.rs (69%) rename {sidecar => event_sidecar}/src/types/database.rs (93%) rename {sidecar => event_sidecar}/src/types/sse_events.rs (100%) rename {sidecar => event_sidecar}/src/utils.rs (86%) create mode 100644 json_rpc/CHANGELOG.md create mode 100644 json_rpc/Cargo.toml create mode 100644 json_rpc/README.md create mode 100644 json_rpc/src/error.rs create mode 100644 json_rpc/src/filters.rs create mode 100644 json_rpc/src/filters/tests.rs create mode 100644 json_rpc/src/filters/tests/base_filter_with_recovery_tests.rs create mode 100644 json_rpc/src/filters/tests/main_filter_with_recovery_tests.rs create mode 100644 json_rpc/src/lib.rs create mode 100644 json_rpc/src/rejections.rs create mode 100644 json_rpc/src/request.rs create mode 100644 json_rpc/src/request/params.rs create mode 100644 json_rpc/src/request_handlers.rs create mode 100644 json_rpc/src/response.rs rename EXAMPLE_NCTL_CONFIG.toml => resources/example_configs/EXAMPLE_NCTL_CONFIG.toml (80%) rename EXAMPLE_NCTL_POSTGRES_CONFIG.toml => resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml (87%) rename EXAMPLE_NODE_CONFIG.toml => resources/example_configs/EXAMPLE_NODE_CONFIG.toml (84%) create mode 100644 resources/example_configs/default_rpc_only_config.toml rename resources/{default_config.toml => example_configs/default_sse_only_config.toml} (89%) create mode 100644 resources/test/rpc_schema.json create mode 100644 resources/test/schema_chainspec_bytes.json create mode 100644 resources/test/schema_rpc_schema.json create mode 100644 resources/test/schema_status.json create mode 100644 resources/test/schema_validator_changes.json create mode 100644 rpc_sidecar/Cargo.toml create mode 100644 rpc_sidecar/README.md create mode 100644 rpc_sidecar/build.rs create mode 100644 rpc_sidecar/src/config.rs create mode 100644 rpc_sidecar/src/http_server.rs create mode 100644 rpc_sidecar/src/lib.rs create mode 100644 rpc_sidecar/src/node_client.rs create mode 100644 rpc_sidecar/src/rpcs.rs create mode 100644 rpc_sidecar/src/rpcs/account.rs create mode 100644 rpc_sidecar/src/rpcs/chain.rs create mode 100644 rpc_sidecar/src/rpcs/chain/era_summary.rs create mode 100644 rpc_sidecar/src/rpcs/common.rs create mode 100644 rpc_sidecar/src/rpcs/docs.rs create mode 100644 rpc_sidecar/src/rpcs/error.rs create mode 100644 rpc_sidecar/src/rpcs/error_code.rs create mode 100644 rpc_sidecar/src/rpcs/info.rs create mode 100644 rpc_sidecar/src/rpcs/speculative_exec.rs create mode 100644 rpc_sidecar/src/rpcs/state.rs create mode 100644 rpc_sidecar/src/speculative_exec_config.rs create mode 100644 rpc_sidecar/src/speculative_exec_server.rs create mode 100644 rpc_sidecar/src/testing/mod.rs create mode 100644 sidecar/src/config.rs create mode 100644 sidecar/src/config/speculative_exec_config.rs diff --git a/Cargo.lock b/Cargo.lock index 063222d8..a79891e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "getrandom", @@ -92,6 +92,15 @@ dependencies = [ "ansitok", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + [[package]] name = "ansitok" version = "0.2.0" @@ -152,9 +161,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" + +[[package]] +name = "arc-swap" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "archiver-rs" @@ -169,6 +184,12 @@ dependencies = [ "zip", ] +[[package]] +name = "array-init" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" + [[package]] name = "arrayvec" version = "0.5.2" @@ -216,20 +237,20 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -257,7 +278,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" dependencies = [ "nix", - "rand 0.8.5", + "rand", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", ] [[package]] @@ -287,6 +319,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -305,6 +343,21 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -335,37 +388,17 @@ dependencies = [ "serde", ] -[[package]] -name = "bitvec" -version = "0.18.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98fcd36dda4e17b7d7abc64cb549bf0201f4ab71e00700c798ca7e62ed3761fa" -dependencies = [ - "funty", - "radium", - "wyz", -] - [[package]] name = "blake2" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac", "digest 0.9.0", "opaque-debug", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -396,6 +429,26 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bstr" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +dependencies = [ + "memchr", + "regex-automata 0.4.3", + "serde", +] + +[[package]] +name = "btoi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad" +dependencies = [ + "num-traits", +] + [[package]] name = "bumpalo" version = "3.14.0" @@ -408,6 +461,26 @@ version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" +[[package]] +name = "bytemuck" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965ab7eb5f8f97d2a083c799f3a1b994fc397b2fe2da5d1da1626ce15a39f2b1" +dependencies = [ + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", +] + [[package]] name = "byteorder" version = "1.5.0" @@ -480,7 +553,6 @@ dependencies = [ "casper-event-listener", "casper-event-types", "casper-types", - "clap", "colored", "derive-new", "eventsource-stream", @@ -496,8 +568,8 @@ dependencies = [ "once_cell", "pg-embed", "portpicker", - "pretty_assertions", - "rand 0.8.5", + "pretty_assertions 1.4.0", + "rand", "regex", "reqwest", "schemars", @@ -508,11 +580,9 @@ dependencies = [ "tabled", "tempfile", "thiserror", - "tikv-jemallocator", "tokio", "tokio-stream", "tokio-util", - "toml", "tower", "tracing", "tracing-subscriber", @@ -533,51 +603,189 @@ dependencies = [ "hex_fmt", "once_cell", "prometheus", - "rand 0.8.5", + "rand", "serde", "serde_json", "thiserror", "utoipa", ] +[[package]] +name = "casper-json-rpc" +version = "1.1.0" +dependencies = [ + "bytes", + "env_logger", + "futures", + "http", + "hyper", + "itertools 0.10.5", + "serde", + "serde_json", + "tokio", + "tracing", + "warp", +] + +[[package]] +name = "casper-rpc-sidecar" +version = "1.0.0" +dependencies = [ + "anyhow", + "assert-json-diff", + "async-trait", + "backtrace", + "base16", + "bincode", + "bytes", + "casper-json-rpc", + "casper-types-ver-2_0", + "datasize", + "futures", + "http", + "hyper", + "juliet", + "num_cpus", + "once_cell", + "portpicker", + "pretty_assertions 0.7.2", + "rand", + "regex", + "schemars", + "serde", + "serde_json", + "structopt", + "tempfile", + "thiserror", + "tokio", + "toml 0.5.11", + "tower", + "tracing", + "tracing-subscriber", + "vergen", + "warp", +] + +[[package]] +name = "casper-sidecar" +version = "1.0.0" +dependencies = [ + "anyhow", + "backtrace", + "casper-event-sidecar", + "casper-rpc-sidecar", + "clap 4.4.13", + "datasize", + "futures", + "num_cpus", + "serde", + "thiserror", + "tikv-jemallocator", + "tokio", + "toml 0.5.11", + "tracing", + "tracing-subscriber", +] + [[package]] name = "casper-types" +version = "4.0.1" +dependencies = [ + "base16", + "base64 0.13.1", + "bincode", + "bitflags 1.3.2", + "blake2", + "criterion", + "datasize", + "derp", + "ed25519-dalek", + "getrandom", + "hex", + "hex_fmt", + "humantime", + "k256", + "num", + "num-derive", + "num-integer", + "num-rational", + "num-traits", + "once_cell", + "openssl", + "pem", + "proptest", + "proptest-attr-macro", + "proptest-derive", + "rand", + "rand_pcg", + "schemars", + "serde", + "serde_bytes", + "serde_json", + "serde_test", + "strum 0.24.1", + "tempfile", + "thiserror", + "uint", + "untrusted 0.7.1", + "version-sync", +] + +[[package]] +name = "casper-types-ver-2_0" version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d65faf6ea346ce733206a51822cb4da2a76cee29308b0ee4c1f3cba756bdee5" dependencies = [ "base16", "base64 0.13.1", + "bincode", "bitflags 1.3.2", "blake2", + "criterion", + "datasize", + "derive_more", "derp", "ed25519-dalek", "getrandom", "hex", "hex_fmt", "humantime", + "itertools 0.10.5", "k256", + "libc", "num", "num-derive", "num-integer", "num-rational", "num-traits", "once_cell", + "openssl", "pem", "proptest", + "proptest-attr-macro", "proptest-derive", - "rand 0.8.5", + "rand", "rand_pcg", "schemars", "serde", + "serde-map-to-array", "serde_bytes", "serde_json", - "strum", + "serde_test", + "strum 0.24.1", + "tempfile", "thiserror", + "tracing", "uint", "untrusted 0.7.1", + "version-sync", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.0.83" @@ -606,9 +814,24 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.11" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags 1.3.2", + "strsim 0.8.0", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "clap" +version = "4.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "52bdc885e4cacc7f7c9eedc1ef6da641603180c783c41a15c264944deeaab642" dependencies = [ "clap_builder", "clap_derive", @@ -616,14 +839,14 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "fb7fb5e4e979aec3be7791562fcba452f94ad85e954da024396433e0e25a79e9" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim", + "strsim 0.10.0", ] [[package]] @@ -632,10 +855,10 @@ version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ - "heck", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "heck 0.4.1", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -644,6 +867,12 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +[[package]] +name = "clru" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8191fa7302e03607ff0e237d4246cc043ff5b3cb9409d995172ba3bea16b807" + [[package]] name = "colorchoice" version = "1.0.0" @@ -672,6 +901,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.4" @@ -690,9 +925,9 @@ checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -721,11 +956,69 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +dependencies = [ + "atty", + "cast", + "clap 2.34.0", + "criterion-plot", + "csv", + "itertools 0.10.5", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_cbor", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +dependencies = [ + "cast", + "itertools 0.10.5", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e3681d554572a651dda4186cd47240627c3d0114d45a95f6ad27f2f22e7548d" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", +] + [[package]] name = "crossbeam-queue" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153" +checksum = "adc6598521bb5a83d491e8c1fe51db7296019d2ca3cb93cc6c2a20369a4d78a2" dependencies = [ "cfg-if", "crossbeam-utils", @@ -733,9 +1026,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.17" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" +checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ "cfg-if", ] @@ -746,6 +1039,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -767,34 +1072,92 @@ dependencies = [ ] [[package]] -name = "crypto-mac" -version = "0.10.1" +name = "csv" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ - "generic-array", - "subtle", + "csv-core", + "itoa", + "ryu", + "serde", ] [[package]] -name = "curve25519-dalek" -version = "3.2.0" +name = "csv-core" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", + "memchr", +] + +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote 1.0.35", + "syn 1.0.109", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "platforms", + "rustc_version", + "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", +] + [[package]] name = "data-encoding" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +[[package]] +name = "datasize" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e65c07d59e45d77a8bda53458c24a828893a99ac6cdd9c84111e09176ab739a2" +dependencies = [ + "datasize_derive", + "fake_instant", + "serde", +] + +[[package]] +name = "datasize_derive" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613e4ee15899913285b7612004bbd490abd605be7b11d35afada5902fb6b91d5" +dependencies = [ + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 1.0.109", +] + [[package]] name = "der" version = "0.7.8" @@ -808,9 +1171,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -821,8 +1184,21 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2 1.0.75", + "quote 1.0.35", + "rustc_version", "syn 1.0.109", ] @@ -856,7 +1232,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", @@ -909,6 +1285,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + [[package]] name = "dyn-clone" version = "1.0.16" @@ -917,34 +1299,38 @@ checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" [[package]] name = "ecdsa" -version = "0.10.2" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fbdb4ff710acb4db8ca29f93b897529ea6d6a45626d5183b47e012aa6ae7e4" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ + "der", + "digest 0.10.7", "elliptic-curve", - "hmac 0.10.1", - "signature 1.2.2", + "rfc6979", + "signature", ] [[package]] name = "ed25519" -version = "1.2.0" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "signature 1.2.2", + "pkcs8", + "signature", ] [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", - "rand 0.7.3", - "sha2 0.9.9", + "serde", + "sha2", + "subtle", "zeroize", ] @@ -959,17 +1345,18 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.8.5" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2db227e61a43a34915680bdda462ec0e212095518020a88a1f91acd16092c39" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "bitvec", - "digest 0.9.0", + "base16ct", + "crypto-bigint", + "digest 0.10.7", "ff", - "funty", "generic-array", "group", - "rand_core 0.5.1", + "rand_core", + "sec1", "subtle", "zeroize", ] @@ -983,6 +1370,19 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "env_logger" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -1027,6 +1427,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "fake_instant" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3006df2e7bf21592b4983931164020b02f54eefdc1e35b2f70147858cc1e20ad" + [[package]] name = "fancy-regex" version = "0.11.0" @@ -1037,6 +1443,15 @@ dependencies = [ "regex", ] +[[package]] +name = "faster-hex" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a2b11eda1d40935b26cf18f6833c526845ae8c41e58d09af6adeb6f0269183" +dependencies = [ + "serde", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -1045,15 +1460,20 @@ checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" -version = "0.8.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01646e077d4ebda82b73f1bca002ea1e91561a77df2431a9e79729bcc31950ef" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "bitvec", - "rand_core 0.5.1", + "rand_core", "subtle", ] +[[package]] +name = "fiat-crypto" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" + [[package]] name = "filetime" version = "0.2.23" @@ -1134,159 +1554,650 @@ dependencies = [ ] [[package]] -name = "funty" -version = "1.1.0" +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-intrusive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot 0.11.2", +] + +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot 0.12.1", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "gix" +version = "0.55.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "002667cd1ebb789313d0d0afe3d23b2821cf3b0e91605095f0e6d8751f0ceeea" +dependencies = [ + "gix-actor", + "gix-commitgraph", + "gix-config", + "gix-date", + "gix-diff", + "gix-discover", + "gix-features", + "gix-fs", + "gix-glob", + "gix-hash", + "gix-hashtable", + "gix-index", + "gix-lock", + "gix-macros", + "gix-object", + "gix-odb", + "gix-pack", + "gix-path", + "gix-ref", + "gix-refspec", + "gix-revision", + "gix-revwalk", + "gix-sec", + "gix-tempfile", + "gix-trace", + "gix-traverse", + "gix-url", + "gix-utils", + "gix-validate", + "once_cell", + "parking_lot 0.12.1", + "signal-hook", + "smallvec", + "thiserror", + "unicode-normalization", +] + +[[package]] +name = "gix-actor" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eadca029ef716b4378f7afb19f7ee101fde9e58ba1f1445971315ac866db417" +dependencies = [ + "bstr", + "btoi", + "gix-date", + "itoa", + "thiserror", + "winnow", +] + +[[package]] +name = "gix-bitmap" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b6cd0f246180034ddafac9b00a112f19178135b21eb031b3f79355891f7325" +dependencies = [ + "thiserror", +] + +[[package]] +name = "gix-chunk" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "003ec6deacf68076a0c157271a127e0bb2c031c1a41f7168cbe5d248d9b85c78" +dependencies = [ + "thiserror", +] + +[[package]] +name = "gix-commitgraph" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a7007ba021f059803afaf6f8a48872422abc20550ac12ede6ddea2936cec36" +dependencies = [ + "bstr", + "gix-chunk", + "gix-features", + "gix-hash", + "memmap2 0.9.3", + "thiserror", +] + +[[package]] +name = "gix-config" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cae98c6b4c66c09379bc35274b172587d6b0ac369a416c39128ad8c6454f9bb" +dependencies = [ + "bstr", + "gix-config-value", + "gix-features", + "gix-glob", + "gix-path", + "gix-ref", + "gix-sec", + "memchr", + "once_cell", + "smallvec", + "thiserror", + "unicode-bom", + "winnow", +] + +[[package]] +name = "gix-config-value" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e0be46f4cf1f8f9e88d0e3eb7b29718aff23889563249f379119bd1ab6910e" +dependencies = [ + "bitflags 2.4.1", + "bstr", + "gix-path", + "libc", + "thiserror", +] + +[[package]] +name = "gix-date" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +checksum = "fb7f3dfb72bebe3449b5e642be64e3c6ccbe9821c8b8f19f487cf5bfbbf4067e" +dependencies = [ + "bstr", + "itoa", + "thiserror", + "time", +] + +[[package]] +name = "gix-diff" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "931394f69fb8c9ed6afc0aae3487bd869e936339bcc13ed8884472af072e0554" +dependencies = [ + "gix-hash", + "gix-object", + "thiserror", +] + +[[package]] +name = "gix-discover" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a45d5cf0321178883e38705ab2b098f625d609a7d4c391b33ac952eff2c490f2" +dependencies = [ + "bstr", + "dunce", + "gix-hash", + "gix-path", + "gix-ref", + "gix-sec", + "thiserror", +] + +[[package]] +name = "gix-features" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d46a4a5c6bb5bebec9c0d18b65ada20e6517dbd7cf855b87dd4bbdce3a771b2" +dependencies = [ + "crc32fast", + "flate2", + "gix-hash", + "gix-trace", + "libc", + "once_cell", + "prodash", + "sha1_smol", + "thiserror", + "walkdir", +] + +[[package]] +name = "gix-fs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20e86eb040f5776a5ade092282e51cdcad398adb77d948b88d17583c2ae4e107" +dependencies = [ + "gix-features", +] + +[[package]] +name = "gix-glob" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5db19298c5eeea2961e5b3bf190767a2d1f09b8802aeb5f258e42276350aff19" +dependencies = [ + "bitflags 2.4.1", + "bstr", + "gix-features", + "gix-path", +] + +[[package]] +name = "gix-hash" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f8cf8c2266f63e582b7eb206799b63aa5fa68ee510ad349f637dfe2d0653de0" +dependencies = [ + "faster-hex", + "thiserror", +] + +[[package]] +name = "gix-hashtable" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feb61880816d7ec4f0b20606b498147d480860ddd9133ba542628df2f548d3ca" +dependencies = [ + "gix-hash", + "hashbrown 0.14.3", + "parking_lot 0.12.1", +] + +[[package]] +name = "gix-index" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c83a4fcc121b2f2e109088f677f89f85e7a8ebf39e8e6659c0ae54d4283b1650" +dependencies = [ + "bitflags 2.4.1", + "bstr", + "btoi", + "filetime", + "gix-bitmap", + "gix-features", + "gix-fs", + "gix-hash", + "gix-lock", + "gix-object", + "gix-traverse", + "itoa", + "memmap2 0.7.1", + "smallvec", + "thiserror", +] + +[[package]] +name = "gix-lock" +version = "11.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e5c65e6a29830a435664891ced3f3c1af010f14900226019590ee0971a22f37" +dependencies = [ + "gix-tempfile", + "gix-utils", + "thiserror", +] + +[[package]] +name = "gix-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75e7ab728059f595f6ddc1ad8771b8d6a231971ae493d9d5948ecad366ee8bb" +dependencies = [ + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", +] + +[[package]] +name = "gix-object" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "740f2a44267f58770a1cb3a3d01d14e67b089c7136c48d4bddbb3cfd2bf86a51" +dependencies = [ + "bstr", + "btoi", + "gix-actor", + "gix-date", + "gix-features", + "gix-hash", + "gix-validate", + "itoa", + "smallvec", + "thiserror", + "winnow", +] + +[[package]] +name = "gix-odb" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8630b56cb80d8fa684d383dad006a66401ee8314e12fbf0e566ddad8c115143b" +dependencies = [ + "arc-swap", + "gix-date", + "gix-features", + "gix-hash", + "gix-object", + "gix-pack", + "gix-path", + "gix-quote", + "parking_lot 0.12.1", + "tempfile", + "thiserror", +] [[package]] -name = "futures" -version = "0.3.29" +name = "gix-pack" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "1431ba2e30deff1405920693d54ab231c88d7c240dd6ccc936ee223d8f8697c3" dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", + "clru", + "gix-chunk", + "gix-features", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-path", + "gix-tempfile", + "memmap2 0.7.1", + "parking_lot 0.12.1", + "smallvec", + "thiserror", ] [[package]] -name = "futures-channel" -version = "0.3.29" +name = "gix-path" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "b8dd0998ab245f33d40ca2267e58d542fe54185ebd1dc41923346cf28d179fb6" dependencies = [ - "futures-core", - "futures-sink", + "bstr", + "gix-trace", + "home", + "once_cell", + "thiserror", ] [[package]] -name = "futures-core" -version = "0.3.29" +name = "gix-quote" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "9f7dc10303d73a960d10fb82f81188b036ac3e6b11b5795b20b1a60b51d1321f" +dependencies = [ + "bstr", + "btoi", + "thiserror", +] [[package]] -name = "futures-executor" -version = "0.3.29" +name = "gix-ref" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "0ec2f6d07ac88d2fb8007ee3fa3e801856fb9d82e7366ec0ca332eb2c9d74a52" dependencies = [ - "futures-core", - "futures-task", - "futures-util", + "gix-actor", + "gix-date", + "gix-features", + "gix-fs", + "gix-hash", + "gix-lock", + "gix-object", + "gix-path", + "gix-tempfile", + "gix-validate", + "memmap2 0.7.1", + "thiserror", + "winnow", ] [[package]] -name = "futures-intrusive" -version = "0.4.2" +name = "gix-refspec" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" +checksum = "ccb0974cc41dbdb43a180c7f67aa481e1c1e160fcfa8f4a55291fd1126c1a6e7" dependencies = [ - "futures-core", - "lock_api", - "parking_lot 0.11.2", + "bstr", + "gix-hash", + "gix-revision", + "gix-validate", + "smallvec", + "thiserror", ] [[package]] -name = "futures-intrusive" -version = "0.5.0" +name = "gix-revision" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +checksum = "2ca97ac73459a7f3766aa4a5638a6e37d56d4c7962bc1986fbaf4883d0772588" dependencies = [ - "futures-core", - "lock_api", - "parking_lot 0.12.1", + "bstr", + "gix-date", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-revwalk", + "gix-trace", + "thiserror", ] [[package]] -name = "futures-io" -version = "0.3.29" +name = "gix-revwalk" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a16d8c892e4cd676d86f0265bf9d40cefd73d8d94f86b213b8b77d50e77efae0" +dependencies = [ + "gix-commitgraph", + "gix-date", + "gix-hash", + "gix-hashtable", + "gix-object", + "smallvec", + "thiserror", +] [[package]] -name = "futures-macro" -version = "0.3.29" +name = "gix-sec" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "78f6dce0c6683e2219e8169aac4b1c29e89540a8262fef7056b31d80d969408c" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "bitflags 2.4.1", + "gix-path", + "libc", + "windows", ] [[package]] -name = "futures-sink" -version = "0.3.29" +name = "gix-tempfile" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "388dd29114a86ec69b28d1e26d6d63a662300ecf61ab3f4cc578f7d7dc9e7e23" +dependencies = [ + "gix-fs", + "libc", + "once_cell", + "parking_lot 0.12.1", + "signal-hook", + "signal-hook-registry", + "tempfile", +] [[package]] -name = "futures-task" -version = "0.3.29" +name = "gix-trace" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "e8e1127ede0475b58f4fe9c0aaa0d9bb0bad2af90bbd93ccd307c8632b863d89" [[package]] -name = "futures-util" -version = "0.3.29" +name = "gix-traverse" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "14d050ec7d4e1bb76abf0636cf4104fb915b70e54e3ced9a4427c999100ff38a" dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", + "gix-commitgraph", + "gix-date", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-revwalk", + "smallvec", + "thiserror", ] [[package]] -name = "generic-array" -version = "0.14.7" +name = "gix-url" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "0c427a1a11ccfa53a4a2da47d9442c2241deee63a154bc15cc14b8312fbc4005" dependencies = [ - "typenum", - "version_check", + "bstr", + "gix-features", + "gix-path", + "home", + "thiserror", + "url", ] [[package]] -name = "getrandom" -version = "0.2.11" +name = "gix-utils" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "de6225e2de30b6e9bca2d9f1cc4731640fcef0fb3cabddceee366e7e85d3e94f" dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi", - "wasm-bindgen", + "fastrand", ] [[package]] -name = "gimli" -version = "0.28.1" +name = "gix-validate" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "ac7cc36f496bd5d96cdca0f9289bb684480725d40db60f48194aa7723b883854" +dependencies = [ + "bstr", + "thiserror", +] [[package]] name = "group" -version = "0.8.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc11f9f5fbf1943b48ae7c2bf6846e7d827a512d1be4f23af708f5ca5d01dde1" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core 0.5.1", + "rand_core", "subtle", ] @@ -1309,6 +2220,12 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + [[package]] name = "hashbrown" version = "0.12.3" @@ -1321,7 +2238,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", "allocator-api2", ] @@ -1358,6 +2275,15 @@ dependencies = [ "http", ] +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "heck" version = "0.4.1" @@ -1367,6 +2293,15 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + [[package]] name = "hermit-abi" version = "0.3.3" @@ -1401,17 +2336,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ - "hmac 0.12.1", -] - -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", + "hmac", ] [[package]] @@ -1543,13 +2468,13 @@ dependencies = [ [[package]] name = "inherent" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce243b1bfa62ffc028f1cc3b6034ec63d649f3031bc8a4fbbb004e1ac17d1f68" +checksum = "0122b7114117e64a63ac49f752a5ca4624d534c7b1c7de796ac196381cd2d947" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -1576,7 +2501,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.3", "libc", "windows-sys 0.48.0", ] @@ -1644,11 +2569,11 @@ version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a071f4f7efc9a9118dfb627a0a94ef247986e1ab8606a4c806ae2b3aa3b6978" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", "anyhow", "base64 0.21.5", "bytecount", - "clap", + "clap 4.4.13", "fancy-regex", "fraction", "getrandom", @@ -1668,16 +2593,34 @@ dependencies = [ "uuid", ] +[[package]] +name = "juliet" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037077290fa87cd3a82b7bace2b3278c5e774d584e2626e1a356dced41f690a5" +dependencies = [ + "array-init", + "bimap", + "bytemuck", + "bytes", + "futures", + "once_cell", + "strum 0.25.0", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "k256" -version = "0.7.3" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4476a0808212a9e81ce802eb1a0cfc60e73aea296553bacc0fac7e1268bc572a" +checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b" dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", - "sha2 0.9.9", + "sha2", ] [[package]] @@ -1762,6 +2705,15 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "md-5" version = "0.10.6" @@ -1774,9 +2726,27 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" + +[[package]] +name = "memmap2" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f49388d20533534cd19360ad3d6a7dadc885944aa802ba3995040c5ec11288c6" +dependencies = [ + "libc", +] + +[[package]] +name = "memmap2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "45fd3a57831bf88bc63f8cebc0cf956116276e97fef3966103e96416209f7c92" +dependencies = [ + "libc", +] [[package]] name = "mime" @@ -1831,7 +2801,7 @@ dependencies = [ "futures", "hyper", "log", - "rand 0.8.5", + "rand", "regex", "serde_json", "serde_urlencoded", @@ -1943,7 +2913,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand 0.8.5", + "rand", "smallvec", "zeroize", ] @@ -1969,8 +2939,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "syn 1.0.109", ] @@ -2005,6 +2975,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", + "serde", ] [[package]] @@ -2023,15 +2994,24 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.3", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ "libc", ] [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -2042,6 +3022,12 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + [[package]] name = "opaque-debug" version = "0.3.0" @@ -2050,9 +3036,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.61" +version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -2069,9 +3055,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -2082,9 +3068,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.97" +version = "0.9.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" dependencies = [ "cc", "libc", @@ -2098,6 +3084,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "output_vt100" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" +dependencies = [ + "winapi", +] + [[package]] name = "overload" version = "0.1.1" @@ -2172,7 +3167,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2189,9 +3184,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest 0.10.7", - "hmac 0.12.1", + "hmac", "password-hash", - "sha2 0.10.8", + "sha2", ] [[package]] @@ -2254,9 +3249,9 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -2294,9 +3289,43 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" + +[[package]] +name = "platforms" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] [[package]] name = "portpicker" @@ -2304,7 +3333,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9" dependencies = [ - "rand 0.8.5", + "rand", ] [[package]] @@ -2319,6 +3348,18 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pretty_assertions" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cab0e7c02cf376875e9335e0ba1da535775beb5450d21e1dffca068818ed98b" +dependencies = [ + "ansi_term", + "ctor", + "diff", + "output_vt100", +] + [[package]] name = "pretty_assertions" version = "1.4.0" @@ -2336,8 +3377,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "syn 1.0.109", "version_check", ] @@ -2348,8 +3389,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "version_check", ] @@ -2364,9 +3405,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" dependencies = [ "unicode-ident", ] @@ -2384,6 +3425,12 @@ dependencies = [ "rustix 0.36.17", ] +[[package]] +name = "prodash" +version = "26.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "794b5bf8e2d19b53dcdcec3e4bba628e20f5b6062503ba89281fa7037dd7bbcf" + [[package]] name = "prometheus" version = "0.13.3" @@ -2412,15 +3459,26 @@ dependencies = [ "bitflags 2.4.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.8.2", "rusty-fork", "tempfile", "unarray", ] +[[package]] +name = "proptest-attr-macro" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa06db3abc95f048e0afa371db5569b24912bb98a8e2e2e89c75c5b43bc2aa8" +dependencies = [ + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 1.0.109", +] + [[package]] name = "proptest-derive" version = "0.3.0" @@ -2438,6 +3496,17 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" +[[package]] +name = "pulldown-cmark" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -2455,28 +3524,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2 1.0.70", -] - -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "rand" -version = "0.7.3" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", + "proc-macro2 1.0.75", ] [[package]] @@ -2486,18 +3538,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -2507,31 +3549,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", + "rand_core", ] -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" - [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom", ] [[package]] @@ -2540,7 +3567,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -2549,7 +3576,27 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", +] + +[[package]] +name = "rayon" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", ] [[package]] @@ -2589,8 +3636,17 @@ checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -2601,9 +3657,15 @@ checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.2", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.2" @@ -2650,6 +3712,16 @@ dependencies = [ "winreg", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -2692,8 +3764,8 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8", - "rand_core 0.6.4", - "signature 2.2.0", + "rand_core", + "signature", "spki", "subtle", "zeroize", @@ -2716,11 +3788,11 @@ version = "6.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "rust-embed-utils", "shellexpand", - "syn 2.0.41", + "syn 2.0.48", "walkdir", ] @@ -2730,7 +3802,7 @@ version = "7.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74" dependencies = [ - "sha2 0.10.8", + "sha2", "walkdir", ] @@ -2740,6 +3812,15 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "0.36.17" @@ -2823,18 +3904,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "schemars" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b82485a532ef0af18878ad4281f73e58161cdba1db7918176e9294f0ca5498a5" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" dependencies = [ "dyn-clone", "indexmap 1.9.3", @@ -2845,12 +3926,12 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791c2c848cff1abaeae34fef7e70da5f93171d9eea81ce0fe969a1df627a61a8" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "serde_derive_internals", "syn 1.0.109", ] @@ -2879,9 +3960,9 @@ dependencies = [ [[package]] name = "sea-query" -version = "0.30.5" +version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e40446e3c048cec0802375f52462a05cc774b9ea6af1dffba6c646b7825e4cf9" +checksum = "a4a1feb0a26c02efedb049b22d3884e66f15a40c42b33dcbe49b46abc484c2bd" dependencies = [ "inherent", "sea-query-derive", @@ -2893,13 +3974,26 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25a82fcb49253abcb45cdcb2adf92956060ec0928635eb21b4f7a6d8f25ab0bc" dependencies = [ - "heck", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "heck 0.4.1", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", "thiserror", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "subtle", + "zeroize", +] + [[package]] name = "security-framework" version = "2.9.2" @@ -2923,51 +4017,77 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" + [[package]] name = "serde" -version = "1.0.193" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-map-to-array" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c14b52efc56c711e0dbae3f26e0cc233f5dac336c1bf0b07e1b7dc2dca3b2cc7" +dependencies = [ + "schemars", + "serde", +] + [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ + "half", "serde", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "serde_derive_internals" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dbab34ca63057a1f15280bdf3c39f2b1eb1b54c17e98360e511637aef7418c6" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "syn 1.0.109", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" dependencies = [ "indexmap 2.1.0", "itoa", @@ -2975,6 +4095,24 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_test" +version = "1.0.176" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a2f49ace1498612d14f7e0b8245519584db8299541dfe31a06374a828d620ab" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2999,17 +4137,10 @@ dependencies = [ ] [[package]] -name = "sha2" -version = "0.9.9" +name = "sha1_smol" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" @@ -3041,22 +4172,22 @@ dependencies = [ ] [[package]] -name = "signal-hook-registry" -version = "1.4.1" +name = "signal-hook" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", + "signal-hook-registry", ] [[package]] -name = "signature" -version = "1.2.2" +name = "signal-hook-registry" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ - "digest 0.9.0", - "rand_core 0.5.1", + "libc", ] [[package]] @@ -3066,14 +4197,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] name = "similar" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" +checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" [[package]] name = "slab" @@ -3184,7 +4315,7 @@ dependencies = [ "hashlink", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "indexmap 1.9.3", "itoa", "libc", @@ -3194,13 +4325,13 @@ dependencies = [ "once_cell", "paste", "percent-encoding", - "rand 0.8.5", + "rand", "rustls", "rustls-pemfile", "serde", "serde_json", "sha1", - "sha2 0.10.8", + "sha2", "smallvec", "sqlformat", "sqlx-rt", @@ -3218,7 +4349,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", "atoi 2.0.0", "byteorder", "bytes", @@ -3243,7 +4374,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "sha2 0.10.8", + "sha2", "smallvec", "sqlformat", "thiserror", @@ -3261,11 +4392,11 @@ checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9" dependencies = [ "dotenvy", "either", - "heck", + "heck 0.4.1", "once_cell", - "proc-macro2 1.0.70", - "quote 1.0.33", - "sha2 0.10.8", + "proc-macro2 1.0.75", + "quote 1.0.35", + "sha2", "sqlx-core 0.6.3", "sqlx-rt", "syn 1.0.109", @@ -3278,8 +4409,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "sqlx-core 0.7.3", "sqlx-macros-core", "syn 1.0.109", @@ -3294,14 +4425,14 @@ dependencies = [ "atomic-write-file", "dotenvy", "either", - "heck", + "heck 0.4.1", "hex", "once_cell", - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "serde", "serde_json", - "sha2 0.10.8", + "sha2", "sqlx-core 0.7.3", "sqlx-mysql", "sqlx-postgres", @@ -3334,18 +4465,18 @@ dependencies = [ "generic-array", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "itoa", "log", "md-5", "memchr", "once_cell", "percent-encoding", - "rand 0.8.5", + "rand", "rsa", "serde", "sha1", - "sha2 0.10.8", + "sha2", "smallvec", "sqlx-core 0.7.3", "stringprep", @@ -3373,18 +4504,18 @@ dependencies = [ "futures-util", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "home", "itoa", "log", "md-5", "memchr", "once_cell", - "rand 0.8.5", + "rand", "serde", "serde_json", "sha1", - "sha2 0.10.8", + "sha2", "smallvec", "sqlx-core 0.7.3", "stringprep", @@ -3444,19 +4575,58 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "structopt" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" +dependencies = [ + "clap 2.34.0", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" +dependencies = [ + "heck 0.3.3", + "proc-macro-error", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 1.0.109", +] + [[package]] name = "strum" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ - "strum_macros", + "strum_macros 0.24.3", +] + +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros 0.25.3", ] [[package]] @@ -3465,18 +4635,31 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", - "proc-macro2 1.0.70", - "quote 1.0.33", + "heck 0.4.1", + "proc-macro2 1.0.75", + "quote 1.0.35", "rustversion", "syn 1.0.109", ] +[[package]] +name = "strum_macros" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.75", + "quote 1.0.35", + "rustversion", + "syn 2.0.48", +] + [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" @@ -3495,19 +4678,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.41" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "unicode-ident", ] @@ -3550,10 +4733,10 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beca1b4eaceb4f2755df858b88d9b9315b7ccfd1ffd0d7a48a52602301f01a57" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", "syn 1.0.109", ] @@ -3570,35 +4753,53 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand", "redox_syscall 0.4.1", "rustix 0.38.28", - "windows-sys 0.48.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "termcolor" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", ] [[package]] name = "thiserror" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -3638,6 +4839,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", + "itoa", + "libc", + "num_threads", "powerfmt", "serde", "time-core", @@ -3659,6 +4863,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -3699,9 +4913,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -3772,6 +4986,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.1.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -3818,9 +5066,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -3844,18 +5092,35 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + [[package]] name = "tracing-subscriber" version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", + "tracing-serde", ] [[package]] @@ -3876,7 +5141,7 @@ dependencies = [ "http", "httparse", "log", - "rand 0.8.5", + "rand", "sha1", "thiserror", "url", @@ -3922,6 +5187,12 @@ version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" +[[package]] +name = "unicode-bom" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eec5d1121208364f6793f7d2e222bf75a915c19557537745b195b253dd64217" + [[package]] name = "unicode-ident" version = "1.0.12" @@ -4021,9 +5292,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05d96dcd6fc96f3df9b3280ef480770af1b7c5d14bc55192baa9b067976d920c" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -4059,6 +5330,39 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "vergen" +version = "8.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1290fd64cc4e7d3c9b07d7f333ce0ce0007253e32870e632624835cc80b83939" +dependencies = [ + "anyhow", + "gix", + "rustversion", + "time", +] + +[[package]] +name = "version-sync" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835169da0173ea373ddf5987632aac1f918967fbbe58195e304342282efa6089" +dependencies = [ + "proc-macro2 1.0.75", + "pulldown-cmark", + "regex", + "semver", + "syn 2.0.48", + "toml 0.7.8", + "url", +] + [[package]] name = "version_check" version = "0.9.4" @@ -4082,8 +5386,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.75", + "quote 1.0.35", ] [[package]] @@ -4171,9 +5475,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", "wasm-bindgen-shared", ] @@ -4195,7 +5499,7 @@ version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ - "quote 1.0.33", + "quote 1.0.35", "wasm-bindgen-macro-support", ] @@ -4205,9 +5509,9 @@ version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4307,6 +5611,25 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -4505,6 +5828,15 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winnow" +version = "0.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8434aeec7b290e8da5c3f0d628cb0eac6cabcb31d14bb74f779a08109a5914d6" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -4515,17 +5847,11 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - [[package]] name = "xattr" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7dae5072fe1f8db8f8d29059189ac175196e410e40ba42d5d4684ae2f750995" +checksum = "914566e6413e7fa959cc394fb30e563ba80f3541fbd40816d4c05a0fc3f2a0f1" dependencies = [ "libc", "linux-raw-sys 0.4.12", @@ -4549,22 +5875,22 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zerocopy" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -4572,20 +5898,6 @@ name = "zeroize" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", -] [[package]] name = "zip" @@ -4600,7 +5912,7 @@ dependencies = [ "crc32fast", "crossbeam-utils", "flate2", - "hmac 0.12.1", + "hmac", "pbkdf2", "sha1", "time", diff --git a/Cargo.toml b/Cargo.toml index d6ecb6e6..a6b57201 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,12 +1,30 @@ [workspace] resolver = "1" members = [ - "sidecar", + "casper_types", + "casper_types_ver_2_0", + "event_sidecar", + "json_rpc", "listener", - "types", + "rpc_sidecar", + "sidecar", + "types" ] [workspace.dependencies] -once_cell = "1.18.0" +anyhow = "1" async-stream = "0.3.4" +casper-types = { path = "./casper_types", version = "4.0.1" } +casper-types-ver-2_0 = { version = "3.0.0", path = "./casper_types_ver_2_0" } +casper-event-sidecar = { path = "./event_sidecar", version = "1.0.0" } +casper-rpc-sidecar = { path = "./rpc_sidecar", version = "1.0.0" } +datasize = "0.2.11" +futures = "0" futures-util = "0.3.28" +once_cell = "1.18.0" +thiserror = "1" +tokio = "1.23.1" +toml = "0.5.8" +tracing = { version = "0", default-features = false } +tracing-subscriber = "0" +serde = { version = "1", default-features = false } diff --git a/README.md b/README.md index 1ade2677..3969e77e 100644 --- a/README.md +++ b/README.md @@ -32,9 +32,9 @@ The SSE Sidecar service must be configured using a `.toml` file specified at run This repository contains several sample configuration files that can be used as examples and adjusted according to your scenario: -- [EXAMPLE_NCTL_CONFIG.toml](./EXAMPLE_NCTL_CONFIG.toml) - Configuration for connecting to nodes on a local NCTL network. This configuration is used in the unit and integration tests found in this repository -- [EXAMPLE_NCTL_POSTGRES_CONFIG.toml](./EXAMPLE_NCTL_POSTGRES_CONFIG.toml) - Configuration for using the PostgreSQL database and nodes on a local NCTL network -- [EXAMPLE_NODE_CONFIG.toml](./EXAMPLE_NODE_CONFIG.toml) - Configuration for connecting to live nodes on a Casper network and setting up an admin server +- [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) - Configuration for connecting to nodes on a local NCTL network. This configuration is used in the unit and integration tests found in this repository +- [EXAMPLE_NCTL_POSTGRES_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml) - Configuration for using the PostgreSQL database and nodes on a local NCTL network +- [EXAMPLE_NODE_CONFIG.toml](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) - Configuration for connecting to live nodes on a Casper network and setting up an admin server Once you create the configuration file and are ready to run the Sidecar service, you must provide the configuration as an argument using the `-- --path-to-config` option as described [here](#running-the-sidecar). @@ -42,10 +42,10 @@ Once you create the configuration file and are ready to run the Sidecar service, The Sidecar can connect to Casper nodes with versions greater or equal to `1.5.2`. -The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. Connecting to multiple nodes requires multiple `[[connections]]` sections. +The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. Connecting to multiple nodes requires multiple `[[sse_server.connections]]` sections. ``` -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18101 rest_port = 14101 @@ -57,7 +57,7 @@ connection_timeout_in_seconds = 3 no_message_timeout_in_seconds = 60 sleep_between_keep_alive_checks_in_seconds = 30 -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18102 rest_port = 14102 @@ -69,7 +69,7 @@ connection_timeout_in_seconds = 3 no_message_timeout_in_seconds = 60 sleep_between_keep_alive_checks_in_seconds = 30 -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18103 rest_port = 14103 @@ -83,8 +83,8 @@ sleep_between_keep_alive_checks_in_seconds = 30 ``` * `ip_address` - The IP address of the node to monitor. -* `sse_port` - The node's event stream (SSE) port. This [example configuration](EXAMPLE_NODE_CONFIG.toml) uses port `9999`. -* `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](EXAMPLE_NODE_CONFIG.toml) uses port `8888`. +* `sse_port` - The node's event stream (SSE) port. This [example configuration](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `9999`. +* `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `8888`. * `max_attempts` - The maximum number of attempts the Sidecar will make to connect to the node. If set to `0`, the Sidecar will not attempt to connect. * `delay_between_retries_in_seconds` - The delay between attempts to connect to the node. * `allow_partial_connection` - Determining whether the Sidecar will allow a partial connection to this node. @@ -180,7 +180,7 @@ max_connections_in_pool = 30 This information determines outbound connection criteria for the Sidecar's `rest_server`. ``` -[rest_server] +[rest_api_server] port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 @@ -193,7 +193,7 @@ request_timeout_in_seconds = 10 * `request_timeout_in_seconds` - The total time before a request times out. ``` -[event_stream_server] +[sse_server.event_stream_server] port = 19999 max_concurrent_subscribers = 100 event_stream_buffer_length = 5000 @@ -211,7 +211,7 @@ Additionally, there are the following two options: This optional section configures the Sidecar's administrative server. If this section is not specified, the Sidecar will not start an admin server. ``` -[admin_server] +[admin_api_server] port = 18887 max_concurrent_requests = 1 max_requests_per_second = 1 @@ -245,14 +245,14 @@ You can also run the performance tests using the following command: cargo test -- --include-ignored ``` -The [EXAMPLE_NCTL_CONFIG.toml](./EXAMPLE_NCTL_CONFIG.toml) file contains the configurations used for these tests. +The [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) file contains the configurations used for these tests. ## Running the Sidecar After creating the configuration file, run the Sidecar using Cargo and point to the configuration file using the `--path-to-config` option, as shown below. The command needs to run with `root` privileges. ```shell -sudo cargo run -- --path-to-config EXAMPLE_NODE_CONFIG.toml +sudo cargo run -- --path-to-config ./resources/example_configs/EXAMPLE_NODE_CONFIG.toml ``` The Sidecar application leverages tracing, which can be controlled by setting the `RUST_LOG` environment variable. @@ -260,7 +260,7 @@ The Sidecar application leverages tracing, which can be controlled by setting th The following command will run the sidecar application with the `INFO` log level. ``` -RUST_LOG=info cargo run -p casper-event-sidecar -- --path-to-config EXAMPLE_NCTL_CONFIG.toml +RUST_LOG=info cargo run -p casper-event-sidecar -- --path-to-config ./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml ``` The log levels, listed in order of increasing verbosity, are: diff --git a/USAGE.md b/USAGE.md index ea2aeed2..5968f55c 100644 --- a/USAGE.md +++ b/USAGE.md @@ -35,7 +35,7 @@ curl -s http:///events/ - `PORT` - The port number where the Sidecar emits events - `TYPE` - The type of event emitted -Given this [example configuration](EXAMPLE_NODE_CONFIG.toml), here are the commands for each endpoint: +Given this [example configuration](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml), here are the commands for each endpoint: - **Deploy events:** diff --git a/casper_types/CHANGELOG.md b/casper_types/CHANGELOG.md new file mode 100644 index 00000000..08b78b25 --- /dev/null +++ b/casper_types/CHANGELOG.md @@ -0,0 +1,200 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + + + +## 4.0.1 + +### Added +* Add a new `SyncHandling` enum, which allows a node to opt out of historical sync. + +### Changed +* Update `k256` to version 0.13.1. + +### Removed +* Remove `ExecutionResult::successful_transfers`. + +### Security +* Update `ed25519-dalek` to version 2.0.0 as mitigation for [RUSTSEC-2022-0093](https://rustsec.org/advisories/RUSTSEC-2022-0093) + + + +## 3.0.0 + +### Added +* Add new `bytesrepr::Error::NotRepresentable` error variant that represents values that are not representable by the serialization format. +* Add new `Key::Unbond` key variant under which the new unbonding information (to support redelegation) is written. +* Add new `Key::ChainspecRegistry` key variant under which the `ChainspecRegistry` is written. +* Add new `Key::ChecksumRegistry` key variant under which a registry of checksums for a given block is written. There are two checksums in the registry, one for the execution results and the other for the approvals of all deploys in the block. +* Add new `StoredValue::Unbonding` variant to support redelegating. +* Add a new type `WithdrawPurses` which is meant to represent `UnbondingPurses` as they exist in current live networks. + +### Changed +* Extend `UnbondingPurse` to take a new field `new_validator` which represents the validator to whom tokens will be re-delegated. +* Increase `DICTIONARY_ITEM_KEY_MAX_LENGTH` to 128. +* Change prefix of formatted string representation of `ContractPackageHash` from "contract-package-wasm" to "contract-package-". Parsing from the old format is still supported. +* Apply `#[non_exhaustive]` to error enums. +* Change Debug output of `DeployHash` to hex-encoded string rather than a list of integers. + +### Fixed +* Fix some integer casts, where failure is now detected and reported via new error variant `NotRepresentable`. + + + +## 2.0.0 + +### Fixed +* Republish v1.6.0 as v2.0.0 due to missed breaking change in API (addition of new variant to `Key`). + + + +## 1.6.0 [YANKED] + +### Added +* Extend asymmetric key functionality, available via feature `std` (moved from `casper-nodes` crate). +* Provide `Timestamp` and `TimeDiff` types for time operations, with extended functionality available via feature `std` (moved from `casper-nodes` crate). +* Provide test-only functionality, in particular a seedable RNG `TestRng` which outputs its seed on test failure. Available via a new feature `testing`. +* Add new `Key::EraSummary` key variant under which the era summary info is written on each switch block execution. + +### Deprecated +* Deprecate `gens` feature: its functionality is included in the new `testing` feature. + + + +## 1.5.0 + +### Added +* Provide types and functionality to support improved access control inside execution engine. +* Provide `CLTyped` impl for `ContractPackage` to allow it to be passed into contracts. + +### Fixed +* Limit parsing of CLTyped objects to a maximum of 50 types deep. + + + +## 1.4.6 - 2021-12-29 + +### Changed +* Disable checksummed-hex encoding, but leave checksummed-hex decoding in place. + + + +## 1.4.5 - 2021-12-06 + +### Added +* Add function to `auction::MintProvider` trait to support minting into an existing purse. + +### Changed +* Change checksummed hex implementation to use 32 byte rather than 64 byte blake2b digests. + + + +## [1.4.4] - 2021-11-18 + +### Fixed +* Revert the accidental change to the `std` feature causing a broken build when this feature is enabled. + + + +## [1.4.3] - 2021-11-17 [YANKED] + + + +## [1.4.2] - 2021-11-13 [YANKED] + +### Added +* Add checksummed hex encoding following a scheme similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55). + + + +## [1.4.1] - 2021-10-23 + +No changes. + + + +## [1.4.0] - 2021-10-21 [YANKED] + +### Added +* Add `json-schema` feature, disabled by default, to enable many types to be used to produce JSON-schema data. +* Add implicit `datasize` feature, disabled by default, to enable many types to derive the `DataSize` trait. +* Add `StoredValue` types to this crate. + +### Changed +* Support building and testing using stable Rust. +* Allow longer hex string to be presented in `json` files. Current maximum is increased from 100 to 150 characters. +* Improve documentation and `Debug` impls for `ApiError`. + +### Deprecated +* Feature `std` is deprecated as it is now a no-op, since there is no benefit to linking the std lib via this crate. + + + +## [1.3.0] - 2021-07-19 + +### Changed +* Restrict summarization when JSON pretty-printing to contiguous long hex strings. +* Update pinned version of Rust to `nightly-2021-06-17`. + +### Removed +* Remove ability to clone `SecretKey`s. + + + +## [1.2.0] - 2021-05-27 + +### Changed +* Change to Apache 2.0 license. +* Return a `Result` from the constructor of `SecretKey` rather than potentially panicking. +* Improve `Key` error reporting and tests. + +### Fixed +* Fix `Key` deserialization. + + + +## [1.1.1] - 2021-04-19 + +No changes. + + + +## [1.1.0] - 2021-04-13 [YANKED] + +No changes. + + + +## [1.0.1] - 2021-04-08 + +No changes. + + + +## [1.0.0] - 2021-03-30 + +### Added +* Initial release of types for use by software compatible with Casper mainnet. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/compare/24fc4027a...dev +[1.4.3]: https://github.com/casper-network/casper-node/compare/2be27b3f5...24fc4027a +[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.1...2be27b3f5 +[1.4.1]: https://github.com/casper-network/casper-node/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0 diff --git a/casper_types/Cargo.toml b/casper_types/Cargo.toml new file mode 100644 index 00000000..5f11687d --- /dev/null +++ b/casper_types/Cargo.toml @@ -0,0 +1,77 @@ +[package] +name = "casper-types" +version = "4.0.1" # when updating, also update 'html_root_url' in lib.rs +authors = ["Fraser Hutchison "] +edition = "2021" +description = "Types shared by many casper crates for use on the Casper network." +readme = "README.md" +documentation = "https://docs.rs/casper-types" +homepage = "https://casperlabs.io" +repository = "https://github.com/CasperLabs/casper-node/tree/master/types" +license = "Apache-2.0" + +[dependencies] +base16 = { version = "0.2.1", default-features = false, features = ["alloc"] } +base64 = { version = "0.13.0", default-features = false } +bitflags = "1" +blake2 = { version = "0.9.0", default-features = false } +datasize = { workspace = true, optional = true } +derp = { version = "0.0.14", optional = true } +ed25519-dalek = { version = "2.0.0", default-features = false, features = ["alloc", "zeroize"] } +getrandom = { version = "0.2.0", features = ["rdrand"], optional = true } +hex = { version = "0.4.2", default-features = false, features = ["alloc"] } +hex_fmt = "0.3.0" +humantime = { version = "2", optional = true } +k256 = { version = "0.13.1", default-features = false, features = ["ecdsa", "sha256"] } +num = { version = "0.4.0", default-features = false, features = ["alloc"] } +num-derive = { version = "0.3.0", default-features = false } +num-integer = { version = "0.1.42", default-features = false } +num-rational = { version = "0.4.0", default-features = false } +num-traits = { version = "0.2.10", default-features = false } +once_cell = { workspace = true, optional = true } +pem = { version = "0.8.1", optional = true } +proptest = { version = "1.0.0", optional = true } +proptest-derive = { version = "0.3.0", optional = true } +rand = { version = "0.8.3", default-features = false, features = ["small_rng"] } +rand_pcg = { version = "0.3.0", optional = true } +schemars = { version = "=0.8.16", features = ["preserve_order"], optional = true } +serde = { workspace = true, default-features = false, features = ["alloc", "derive"] } +serde_bytes = { version = "0.11.5", default-features = false, features = ["alloc"] } +serde_json = { version = "1.0.59", default-features = false, features = ["alloc"] } +strum = { version = "0.24", features = ["derive"], optional = true } +thiserror = { workspace = true, optional = true } +uint = { version = "0.9.0", default-features = false } +untrusted = { version = "0.7.1", optional = true } +version-sync = { version = "0.9", optional = true } + +[dev-dependencies] +bincode = "1.3.1" +criterion = "0.3.5" +derp = "0.0.14" +getrandom = "0.2.0" +humantime = "2" +once_cell = {workspace = true} +openssl = "0.10.32" +pem = "0.8.1" +proptest = "1.0.0" +proptest-derive = "0.3.0" +proptest-attr-macro = "1.0.0" +rand = "0.8.3" +rand_pcg = "0.3.0" +serde_json = "1" +serde_test = "1" +strum = { version = "0.24", features = ["derive"] } +tempfile = "3.4.0" +thiserror = { workspace = true } +untrusted = "0.7.1" + +[features] +json-schema = ["once_cell", "schemars"] +std = ["derp", "getrandom/std", "humantime", "once_cell", "pem", "serde_json/preserve_order", "thiserror", "untrusted"] +testing = ["proptest", "proptest-derive", "rand_pcg", "strum"] +# DEPRECATED - use "testing" instead of "gens". +gens = ["testing"] + +[[bench]] +name = "bytesrepr_bench" +harness = false diff --git a/casper_types/README.md b/casper_types/README.md new file mode 100644 index 00000000..46f14ea2 --- /dev/null +++ b/casper_types/README.md @@ -0,0 +1,22 @@ +# `casper-types` + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Build Status](https://drone-auto-casper-network.casperlabs.io/api/badges/casper-network/casper-node/status.svg?branch=dev)](http://drone-auto-casper-network.casperlabs.io/casper-network/casper-node) +[![Crates.io](https://img.shields.io/crates/v/casper-types)](https://crates.io/crates/casper-types) +[![Documentation](https://docs.rs/casper-types/badge.svg)](https://docs.rs/casper-types) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) + +Types shared by many casper crates for use on the Casper network. + +## `no_std` + +The crate is `no_std` (using the `core` and `alloc` crates) unless any of the following features are enabled: + +* `json-schema` to enable many types to be used to produce JSON-schema data via the [`schemars`](https://crates.io/crates/schemars) crate +* `datasize` to enable many types to derive the [`DataSize`](https://github.com/casperlabs/datasize-rs) trait +* `gens` to enable many types to be produced in accordance with [`proptest`](https://crates.io/crates/proptest) usage for consumption within dependee crates' property testing suites + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/casper_types/benches/bytesrepr_bench.rs b/casper_types/benches/bytesrepr_bench.rs new file mode 100644 index 00000000..ac4e360e --- /dev/null +++ b/casper_types/benches/bytesrepr_bench.rs @@ -0,0 +1,894 @@ +use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; + +use std::{ + collections::{BTreeMap, BTreeSet}, + iter, +}; + +use casper_types::{ + account::{Account, AccountHash, ActionThresholds, AssociatedKeys, Weight}, + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + contracts::{ContractPackageStatus, NamedKeys}, + system::auction::{Bid, Delegator, EraInfo, SeigniorageAllocation}, + AccessRights, CLType, CLTyped, CLValue, Contract, ContractHash, ContractPackage, + ContractPackageHash, ContractVersionKey, ContractWasmHash, DeployHash, DeployInfo, EntryPoint, + EntryPointAccess, EntryPointType, EntryPoints, Group, Key, Parameter, ProtocolVersion, + PublicKey, SecretKey, Transfer, TransferAddr, URef, KEY_HASH_LENGTH, TRANSFER_ADDR_LENGTH, + U128, U256, U512, UREF_ADDR_LENGTH, +}; + +static KB: usize = 1024; +static BATCH: usize = 4 * KB; + +const TEST_I32: i32 = 123_456_789; +const TEST_U128: U128 = U128([123_456_789, 0]); +const TEST_U256: U256 = U256([123_456_789, 0, 0, 0]); +const TEST_U512: U512 = U512([123_456_789, 0, 0, 0, 0, 0, 0, 0]); +const TEST_STR_1: &str = "String One"; +const TEST_STR_2: &str = "String Two"; + +fn prepare_vector(size: usize) -> Vec { + (0..size as i32).collect() +} + +fn serialize_vector_of_i32s(b: &mut Bencher) { + let data = prepare_vector(black_box(BATCH)); + b.iter(|| data.to_bytes()); +} + +fn deserialize_vector_of_i32s(b: &mut Bencher) { + let data = prepare_vector(black_box(BATCH)).to_bytes().unwrap(); + b.iter(|| { + let (res, _rem): (Vec, _) = FromBytes::from_bytes(&data).unwrap(); + res + }); +} + +fn serialize_vector_of_u8(b: &mut Bencher) { + // 0, 1, ... 254, 255, 0, 1, ... + let data: Bytes = prepare_vector(BATCH) + .into_iter() + .map(|value| value as u8) + .collect(); + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_vector_of_u8(b: &mut Bencher) { + // 0, 1, ... 254, 255, 0, 1, ... + let data: Vec = prepare_vector(BATCH) + .into_iter() + .map(|value| value as u8) + .collect::() + .to_bytes() + .unwrap(); + b.iter(|| Bytes::from_bytes(black_box(&data))) +} + +fn serialize_u8(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&129u8))); +} + +fn deserialize_u8(b: &mut Bencher) { + b.iter(|| u8::from_bytes(black_box(&[129u8]))); +} + +fn serialize_i32(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&1_816_142_132i32))); +} + +fn deserialize_i32(b: &mut Bencher) { + b.iter(|| i32::from_bytes(black_box(&[0x34, 0x21, 0x40, 0x6c]))); +} + +fn serialize_u64(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&14_157_907_845_468_752_670u64))); +} + +fn deserialize_u64(b: &mut Bencher) { + b.iter(|| u64::from_bytes(black_box(&[0x1e, 0x8b, 0xe1, 0x73, 0x2c, 0xfe, 0x7a, 0xc4]))); +} + +fn serialize_some_u64(b: &mut Bencher) { + let data = Some(14_157_907_845_468_752_670u64); + + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_some_u64(b: &mut Bencher) { + let data = Some(14_157_907_845_468_752_670u64); + let data = data.to_bytes().unwrap(); + + b.iter(|| Option::::from_bytes(&data)); +} + +fn serialize_none_u64(b: &mut Bencher) { + let data: Option = None; + + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_ok_u64(b: &mut Bencher) { + let data: Option = None; + let data = data.to_bytes().unwrap(); + b.iter(|| Option::::from_bytes(&data)); +} + +fn make_test_vec_of_vec8() -> Vec { + (0..4) + .map(|_v| { + // 0, 1, 2, ..., 254, 255 + let inner_vec = iter::repeat_with(|| 0..255u8) + .flatten() + // 4 times to create 4x 1024 bytes + .take(4) + .collect::>(); + Bytes::from(inner_vec) + }) + .collect() +} + +fn serialize_vector_of_vector_of_u8(b: &mut Bencher) { + let data = make_test_vec_of_vec8(); + b.iter(|| data.to_bytes()); +} + +fn deserialize_vector_of_vector_of_u8(b: &mut Bencher) { + let data = make_test_vec_of_vec8().to_bytes().unwrap(); + b.iter(|| Vec::::from_bytes(black_box(&data))); +} + +fn serialize_tree_map(b: &mut Bencher) { + let data = { + let mut res = BTreeMap::new(); + res.insert("asdf".to_string(), "zxcv".to_string()); + res.insert("qwer".to_string(), "rewq".to_string()); + res.insert("1234".to_string(), "5678".to_string()); + res + }; + + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_treemap(b: &mut Bencher) { + let data = { + let mut res = BTreeMap::new(); + res.insert("asdf".to_string(), "zxcv".to_string()); + res.insert("qwer".to_string(), "rewq".to_string()); + res.insert("1234".to_string(), "5678".to_string()); + res + }; + let data = data.to_bytes().unwrap(); + b.iter(|| BTreeMap::::from_bytes(black_box(&data))); +} + +fn serialize_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."; + let data = lorem.to_string(); + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."; + let data = lorem.to_bytes().unwrap(); + b.iter(|| String::from_bytes(&data)); +} + +fn serialize_vec_of_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.".to_string(); + let array_of_lorem: Vec = lorem.split(' ').map(Into::into).collect(); + let data = array_of_lorem; + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_vec_of_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.".to_string(); + let array_of_lorem: Vec = lorem.split(' ').map(Into::into).collect(); + let data = array_of_lorem.to_bytes().unwrap(); + + b.iter(|| Vec::::from_bytes(&data)); +} + +fn serialize_unit(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&()))) +} + +fn deserialize_unit(b: &mut Bencher) { + let data = ().to_bytes().unwrap(); + + b.iter(|| <()>::from_bytes(&data)) +} + +fn serialize_key_account(b: &mut Bencher) { + let account = Key::Account(AccountHash::new([0u8; 32])); + + b.iter(|| ToBytes::to_bytes(black_box(&account))) +} + +fn deserialize_key_account(b: &mut Bencher) { + let account = Key::Account(AccountHash::new([0u8; 32])); + let account_bytes = account.to_bytes().unwrap(); + + b.iter(|| Key::from_bytes(black_box(&account_bytes))) +} + +fn serialize_key_hash(b: &mut Bencher) { + let hash = Key::Hash([0u8; 32]); + b.iter(|| ToBytes::to_bytes(black_box(&hash))) +} + +fn deserialize_key_hash(b: &mut Bencher) { + let hash = Key::Hash([0u8; 32]); + let hash_bytes = hash.to_bytes().unwrap(); + + b.iter(|| Key::from_bytes(black_box(&hash_bytes))) +} + +fn serialize_key_uref(b: &mut Bencher) { + let uref = Key::URef(URef::new([0u8; 32], AccessRights::ADD_WRITE)); + b.iter(|| ToBytes::to_bytes(black_box(&uref))) +} + +fn deserialize_key_uref(b: &mut Bencher) { + let uref = Key::URef(URef::new([0u8; 32], AccessRights::ADD_WRITE)); + let uref_bytes = uref.to_bytes().unwrap(); + + b.iter(|| Key::from_bytes(black_box(&uref_bytes))) +} + +fn serialize_vec_of_keys(b: &mut Bencher) { + let keys: Vec = (0..32) + .map(|i| Key::URef(URef::new([i; 32], AccessRights::ADD_WRITE))) + .collect(); + b.iter(|| ToBytes::to_bytes(black_box(&keys))) +} + +fn deserialize_vec_of_keys(b: &mut Bencher) { + let keys: Vec = (0..32) + .map(|i| Key::URef(URef::new([i; 32], AccessRights::ADD_WRITE))) + .collect(); + let keys_bytes = keys.to_bytes().unwrap(); + b.iter(|| Vec::::from_bytes(black_box(&keys_bytes))); +} + +fn serialize_access_rights_read(b: &mut Bencher) { + b.iter(|| AccessRights::READ.to_bytes()); +} + +fn deserialize_access_rights_read(b: &mut Bencher) { + let data = AccessRights::READ.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_write(b: &mut Bencher) { + b.iter(|| AccessRights::WRITE.to_bytes()); +} + +fn deserialize_access_rights_write(b: &mut Bencher) { + let data = AccessRights::WRITE.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_add(b: &mut Bencher) { + b.iter(|| AccessRights::ADD.to_bytes()); +} + +fn deserialize_access_rights_add(b: &mut Bencher) { + let data = AccessRights::ADD.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_read_add(b: &mut Bencher) { + b.iter(|| AccessRights::READ_ADD.to_bytes()); +} + +fn deserialize_access_rights_read_add(b: &mut Bencher) { + let data = AccessRights::READ_ADD.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_read_write(b: &mut Bencher) { + b.iter(|| AccessRights::READ_WRITE.to_bytes()); +} + +fn deserialize_access_rights_read_write(b: &mut Bencher) { + let data = AccessRights::READ_WRITE.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_add_write(b: &mut Bencher) { + b.iter(|| AccessRights::ADD_WRITE.to_bytes()); +} + +fn deserialize_access_rights_add_write(b: &mut Bencher) { + let data = AccessRights::ADD_WRITE.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_cl_value(raw_value: T) -> Vec { + CLValue::from_t(raw_value) + .expect("should create CLValue") + .to_bytes() + .expect("should serialize CLValue") +} + +fn benchmark_deserialization(b: &mut Bencher, raw_value: T) { + let serialized_value = serialize_cl_value(raw_value); + b.iter(|| { + let cl_value: CLValue = bytesrepr::deserialize_from_slice(&serialized_value).unwrap(); + let _raw_value: T = cl_value.into_t().unwrap(); + }); +} + +fn serialize_cl_value_int32(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_I32)); +} + +fn deserialize_cl_value_int32(b: &mut Bencher) { + benchmark_deserialization(b, TEST_I32); +} + +fn serialize_cl_value_uint128(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_U128)); +} + +fn deserialize_cl_value_uint128(b: &mut Bencher) { + benchmark_deserialization(b, TEST_U128); +} + +fn serialize_cl_value_uint256(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_U256)); +} + +fn deserialize_cl_value_uint256(b: &mut Bencher) { + benchmark_deserialization(b, TEST_U256); +} + +fn serialize_cl_value_uint512(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_U512)); +} + +fn deserialize_cl_value_uint512(b: &mut Bencher) { + benchmark_deserialization(b, TEST_U512); +} + +fn serialize_cl_value_bytearray(b: &mut Bencher) { + b.iter_with_setup( + || { + let vec: Vec = (0..255).collect(); + Bytes::from(vec) + }, + serialize_cl_value, + ); +} + +fn deserialize_cl_value_bytearray(b: &mut Bencher) { + let vec = (0..255).collect::>(); + let bytes: Bytes = vec.into(); + benchmark_deserialization(b, bytes); +} + +fn serialize_cl_value_listint32(b: &mut Bencher) { + b.iter(|| serialize_cl_value((0..1024).collect::>())); +} + +fn deserialize_cl_value_listint32(b: &mut Bencher) { + benchmark_deserialization(b, (0..1024).collect::>()); +} + +fn serialize_cl_value_string(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_STR_1.to_string())); +} + +fn deserialize_cl_value_string(b: &mut Bencher) { + benchmark_deserialization(b, TEST_STR_1.to_string()); +} + +fn serialize_cl_value_liststring(b: &mut Bencher) { + b.iter(|| serialize_cl_value(vec![TEST_STR_1.to_string(), TEST_STR_2.to_string()])); +} + +fn deserialize_cl_value_liststring(b: &mut Bencher) { + benchmark_deserialization(b, vec![TEST_STR_1.to_string(), TEST_STR_2.to_string()]); +} + +fn serialize_cl_value_namedkey(b: &mut Bencher) { + b.iter(|| { + serialize_cl_value(( + TEST_STR_1.to_string(), + Key::Account(AccountHash::new([0xffu8; 32])), + )) + }); +} + +fn deserialize_cl_value_namedkey(b: &mut Bencher) { + benchmark_deserialization( + b, + ( + TEST_STR_1.to_string(), + Key::Account(AccountHash::new([0xffu8; 32])), + ), + ); +} + +fn serialize_u128(b: &mut Bencher) { + let num_u128 = U128::default(); + b.iter(|| ToBytes::to_bytes(black_box(&num_u128))) +} + +fn deserialize_u128(b: &mut Bencher) { + let num_u128 = U128::default(); + let num_u128_bytes = num_u128.to_bytes().unwrap(); + + b.iter(|| U128::from_bytes(black_box(&num_u128_bytes))) +} + +fn serialize_u256(b: &mut Bencher) { + let num_u256 = U256::default(); + b.iter(|| ToBytes::to_bytes(black_box(&num_u256))) +} + +fn deserialize_u256(b: &mut Bencher) { + let num_u256 = U256::default(); + let num_u256_bytes = num_u256.to_bytes().unwrap(); + + b.iter(|| U256::from_bytes(black_box(&num_u256_bytes))) +} + +fn serialize_u512(b: &mut Bencher) { + let num_u512 = U512::default(); + b.iter(|| ToBytes::to_bytes(black_box(&num_u512))) +} + +fn deserialize_u512(b: &mut Bencher) { + let num_u512 = U512::default(); + let num_u512_bytes = num_u512.to_bytes().unwrap(); + + b.iter(|| U512::from_bytes(black_box(&num_u512_bytes))) +} + +fn sample_account(associated_keys_len: u8, named_keys_len: u8) -> Account { + let account_hash = AccountHash::default(); + let named_keys: NamedKeys = sample_named_keys(named_keys_len); + let main_purse = URef::default(); + let associated_keys = { + let mut tmp = AssociatedKeys::new(AccountHash::default(), Weight::new(1)); + (1..associated_keys_len).for_each(|i| { + tmp.add_key( + AccountHash::new([i; casper_types::account::ACCOUNT_HASH_LENGTH]), + Weight::new(1), + ) + .unwrap() + }); + tmp + }; + let action_thresholds = ActionThresholds::default(); + Account::new( + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + ) +} + +fn serialize_account(b: &mut Bencher) { + let account = sample_account(10, 10); + b.iter(|| ToBytes::to_bytes(black_box(&account))); +} + +fn deserialize_account(b: &mut Bencher) { + let account = sample_account(10, 10); + let account_bytes = Account::to_bytes(&account).unwrap(); + b.iter(|| Account::from_bytes(black_box(&account_bytes)).unwrap()); +} + +fn serialize_contract(b: &mut Bencher) { + let contract = sample_contract(10, 10); + b.iter(|| ToBytes::to_bytes(black_box(&contract))); +} + +fn deserialize_contract(b: &mut Bencher) { + let contract = sample_contract(10, 10); + let contract_bytes = Contract::to_bytes(&contract).unwrap(); + b.iter(|| Contract::from_bytes(black_box(&contract_bytes)).unwrap()); +} + +fn sample_named_keys(len: u8) -> BTreeMap { + (0..len) + .map(|i| { + ( + format!("named-key-{}", i), + Key::Account(AccountHash::default()), + ) + }) + .collect() +} + +fn sample_contract(named_keys_len: u8, entry_points_len: u8) -> Contract { + let named_keys: NamedKeys = sample_named_keys(named_keys_len); + + let entry_points = { + let mut tmp = EntryPoints::default(); + (1..entry_points_len).for_each(|i| { + let args = vec![ + Parameter::new("first", CLType::U32), + Parameter::new("Foo", CLType::U32), + ]; + let entry_point = EntryPoint::new( + format!("test-{}", i), + args, + casper_types::CLType::U512, + EntryPointAccess::groups(&["Group 2"]), + EntryPointType::Contract, + ); + tmp.add_entry_point(entry_point); + }); + tmp + }; + + casper_types::contracts::Contract::new( + ContractPackageHash::default(), + ContractWasmHash::default(), + named_keys, + entry_points, + ProtocolVersion::default(), + ) +} + +fn contract_version_key_fn(i: u8) -> ContractVersionKey { + ContractVersionKey::new(i as u32, i as u32) +} + +fn contract_hash_fn(i: u8) -> ContractHash { + ContractHash::new([i; KEY_HASH_LENGTH]) +} + +fn sample_map(key_fn: FK, value_fn: FV, count: u8) -> BTreeMap +where + FK: Fn(u8) -> K, + FV: Fn(u8) -> V, +{ + (0..count) + .map(|i| { + let key = key_fn(i); + let value = value_fn(i); + (key, value) + }) + .collect() +} + +fn sample_set(fun: F, count: u8) -> BTreeSet +where + F: Fn(u8) -> K, +{ + (0..count).map(fun).collect() +} + +fn sample_group(i: u8) -> Group { + Group::new(format!("group-{}", i)) +} + +fn sample_uref(i: u8) -> URef { + URef::new([i; UREF_ADDR_LENGTH], AccessRights::all()) +} + +fn sample_contract_package( + contract_versions_len: u8, + disabled_versions_len: u8, + groups_len: u8, +) -> ContractPackage { + let access_key = URef::default(); + let versions = sample_map( + contract_version_key_fn, + contract_hash_fn, + contract_versions_len, + ); + let disabled_versions = sample_set(contract_version_key_fn, disabled_versions_len); + let groups = sample_map(sample_group, |_| sample_set(sample_uref, 3), groups_len); + + ContractPackage::new( + access_key, + versions, + disabled_versions, + groups, + ContractPackageStatus::Locked, + ) +} + +fn serialize_contract_package(b: &mut Bencher) { + let contract = sample_contract_package(5, 1, 5); + b.iter(|| ContractPackage::to_bytes(black_box(&contract))); +} + +fn deserialize_contract_package(b: &mut Bencher) { + let contract_package = sample_contract_package(5, 1, 5); + let contract_bytes = ContractPackage::to_bytes(&contract_package).unwrap(); + b.iter(|| ContractPackage::from_bytes(black_box(&contract_bytes)).unwrap()); +} + +fn u32_to_pk(i: u32) -> PublicKey { + let mut sk_bytes = [0u8; 32]; + U256::from(i).to_big_endian(&mut sk_bytes); + let sk = SecretKey::ed25519_from_bytes(sk_bytes).unwrap(); + PublicKey::from(&sk) +} + +fn sample_delegators(delegators_len: u32) -> Vec { + (0..delegators_len) + .map(|i| { + let delegator_pk = u32_to_pk(i); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let bonding_purse = URef::default(); + let validator_pk = u32_to_pk(i); + Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk) + }) + .collect() +} + +fn sample_bid(delegators_len: u32) -> Bid { + let validator_public_key = PublicKey::System; + let bonding_purse = URef::default(); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let delegation_rate = 10u8; + let mut bid = Bid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + ); + let new_delegators = sample_delegators(delegators_len); + + let curr_delegators = bid.delegators_mut(); + for delegator in new_delegators.into_iter() { + assert!(curr_delegators + .insert(delegator.delegator_public_key().clone(), delegator) + .is_none()); + } + bid +} + +fn serialize_bid(delegators_len: u32, b: &mut Bencher) { + let bid = sample_bid(delegators_len); + b.iter(|| Bid::to_bytes(black_box(&bid))); +} + +fn deserialize_bid(delegators_len: u32, b: &mut Bencher) { + let bid = sample_bid(delegators_len); + let bid_bytes = Bid::to_bytes(&bid).unwrap(); + b.iter(|| Bid::from_bytes(black_box(&bid_bytes))); +} + +fn sample_transfer() -> Transfer { + Transfer::new( + DeployHash::default(), + AccountHash::default(), + None, + URef::default(), + URef::default(), + U512::MAX, + U512::from_dec_str("123123123123").unwrap(), + Some(1u64), + ) +} + +fn serialize_transfer(b: &mut Bencher) { + let transfer = sample_transfer(); + b.iter(|| Transfer::to_bytes(&transfer)); +} + +fn deserialize_transfer(b: &mut Bencher) { + let transfer = sample_transfer(); + let transfer_bytes = transfer.to_bytes().unwrap(); + b.iter(|| Transfer::from_bytes(&transfer_bytes)); +} + +fn sample_deploy_info(transfer_len: u16) -> DeployInfo { + let transfers = (0..transfer_len) + .map(|i| { + let mut tmp = [0u8; TRANSFER_ADDR_LENGTH]; + U256::from(i).to_little_endian(&mut tmp); + TransferAddr::new(tmp) + }) + .collect::>(); + DeployInfo::new( + DeployHash::default(), + &transfers, + AccountHash::default(), + URef::default(), + U512::MAX, + ) +} + +fn serialize_deploy_info(b: &mut Bencher) { + let deploy_info = sample_deploy_info(1000); + b.iter(|| DeployInfo::to_bytes(&deploy_info)); +} + +fn deserialize_deploy_info(b: &mut Bencher) { + let deploy_info = sample_deploy_info(1000); + let deploy_bytes = deploy_info.to_bytes().unwrap(); + b.iter(|| DeployInfo::from_bytes(&deploy_bytes)); +} + +fn sample_era_info(delegators_len: u32) -> EraInfo { + let mut base = EraInfo::new(); + let delegations = (0..delegators_len).map(|i| { + let pk = u32_to_pk(i); + SeigniorageAllocation::delegator(pk.clone(), pk, U512::MAX) + }); + base.seigniorage_allocations_mut().extend(delegations); + base +} + +fn serialize_era_info(delegators_len: u32, b: &mut Bencher) { + let era_info = sample_era_info(delegators_len); + b.iter(|| EraInfo::to_bytes(&era_info)); +} + +fn deserialize_era_info(delegators_len: u32, b: &mut Bencher) { + let era_info = sample_era_info(delegators_len); + let era_info_bytes = era_info.to_bytes().unwrap(); + b.iter(|| EraInfo::from_bytes(&era_info_bytes)); +} + +fn bytesrepr_bench(c: &mut Criterion) { + c.bench_function("serialize_vector_of_i32s", serialize_vector_of_i32s); + c.bench_function("deserialize_vector_of_i32s", deserialize_vector_of_i32s); + c.bench_function("serialize_vector_of_u8", serialize_vector_of_u8); + c.bench_function("deserialize_vector_of_u8", deserialize_vector_of_u8); + c.bench_function("serialize_u8", serialize_u8); + c.bench_function("deserialize_u8", deserialize_u8); + c.bench_function("serialize_i32", serialize_i32); + c.bench_function("deserialize_i32", deserialize_i32); + c.bench_function("serialize_u64", serialize_u64); + c.bench_function("deserialize_u64", deserialize_u64); + c.bench_function("serialize_some_u64", serialize_some_u64); + c.bench_function("deserialize_some_u64", deserialize_some_u64); + c.bench_function("serialize_none_u64", serialize_none_u64); + c.bench_function("deserialize_ok_u64", deserialize_ok_u64); + c.bench_function( + "serialize_vector_of_vector_of_u8", + serialize_vector_of_vector_of_u8, + ); + c.bench_function( + "deserialize_vector_of_vector_of_u8", + deserialize_vector_of_vector_of_u8, + ); + c.bench_function("serialize_tree_map", serialize_tree_map); + c.bench_function("deserialize_treemap", deserialize_treemap); + c.bench_function("serialize_string", serialize_string); + c.bench_function("deserialize_string", deserialize_string); + c.bench_function("serialize_vec_of_string", serialize_vec_of_string); + c.bench_function("deserialize_vec_of_string", deserialize_vec_of_string); + c.bench_function("serialize_unit", serialize_unit); + c.bench_function("deserialize_unit", deserialize_unit); + c.bench_function("serialize_key_account", serialize_key_account); + c.bench_function("deserialize_key_account", deserialize_key_account); + c.bench_function("serialize_key_hash", serialize_key_hash); + c.bench_function("deserialize_key_hash", deserialize_key_hash); + c.bench_function("serialize_key_uref", serialize_key_uref); + c.bench_function("deserialize_key_uref", deserialize_key_uref); + c.bench_function("serialize_vec_of_keys", serialize_vec_of_keys); + c.bench_function("deserialize_vec_of_keys", deserialize_vec_of_keys); + c.bench_function("serialize_access_rights_read", serialize_access_rights_read); + c.bench_function( + "deserialize_access_rights_read", + deserialize_access_rights_read, + ); + c.bench_function( + "serialize_access_rights_write", + serialize_access_rights_write, + ); + c.bench_function( + "deserialize_access_rights_write", + deserialize_access_rights_write, + ); + c.bench_function("serialize_access_rights_add", serialize_access_rights_add); + c.bench_function( + "deserialize_access_rights_add", + deserialize_access_rights_add, + ); + c.bench_function( + "serialize_access_rights_read_add", + serialize_access_rights_read_add, + ); + c.bench_function( + "deserialize_access_rights_read_add", + deserialize_access_rights_read_add, + ); + c.bench_function( + "serialize_access_rights_read_write", + serialize_access_rights_read_write, + ); + c.bench_function( + "deserialize_access_rights_read_write", + deserialize_access_rights_read_write, + ); + c.bench_function( + "serialize_access_rights_add_write", + serialize_access_rights_add_write, + ); + c.bench_function( + "deserialize_access_rights_add_write", + deserialize_access_rights_add_write, + ); + c.bench_function("serialize_cl_value_int32", serialize_cl_value_int32); + c.bench_function("deserialize_cl_value_int32", deserialize_cl_value_int32); + c.bench_function("serialize_cl_value_uint128", serialize_cl_value_uint128); + c.bench_function("deserialize_cl_value_uint128", deserialize_cl_value_uint128); + c.bench_function("serialize_cl_value_uint256", serialize_cl_value_uint256); + c.bench_function("deserialize_cl_value_uint256", deserialize_cl_value_uint256); + c.bench_function("serialize_cl_value_uint512", serialize_cl_value_uint512); + c.bench_function("deserialize_cl_value_uint512", deserialize_cl_value_uint512); + c.bench_function("serialize_cl_value_bytearray", serialize_cl_value_bytearray); + c.bench_function( + "deserialize_cl_value_bytearray", + deserialize_cl_value_bytearray, + ); + c.bench_function("serialize_cl_value_listint32", serialize_cl_value_listint32); + c.bench_function( + "deserialize_cl_value_listint32", + deserialize_cl_value_listint32, + ); + c.bench_function("serialize_cl_value_string", serialize_cl_value_string); + c.bench_function("deserialize_cl_value_string", deserialize_cl_value_string); + c.bench_function( + "serialize_cl_value_liststring", + serialize_cl_value_liststring, + ); + c.bench_function( + "deserialize_cl_value_liststring", + deserialize_cl_value_liststring, + ); + c.bench_function("serialize_cl_value_namedkey", serialize_cl_value_namedkey); + c.bench_function( + "deserialize_cl_value_namedkey", + deserialize_cl_value_namedkey, + ); + c.bench_function("serialize_u128", serialize_u128); + c.bench_function("deserialize_u128", deserialize_u128); + c.bench_function("serialize_u256", serialize_u256); + c.bench_function("deserialize_u256", deserialize_u256); + c.bench_function("serialize_u512", serialize_u512); + c.bench_function("deserialize_u512", deserialize_u512); + c.bench_function("bytesrepr::serialize_account", serialize_account); + c.bench_function("bytesrepr::deserialize_account", deserialize_account); + c.bench_function("bytesrepr::serialize_contract", serialize_contract); + c.bench_function("bytesrepr::deserialize_contract", deserialize_contract); + c.bench_function( + "bytesrepr::serialize_contract_package", + serialize_contract_package, + ); + c.bench_function( + "bytesrepr::deserialize_contract_package", + deserialize_contract_package, + ); + c.bench_function("bytesrepr::serialize_bid_small", |b| serialize_bid(10, b)); + c.bench_function("bytesrepr::serialize_bid_medium", |b| serialize_bid(100, b)); + c.bench_function("bytesrepr::serialize_bid_big", |b| serialize_bid(1000, b)); + c.bench_function("bytesrepr::deserialize_bid_small", |b| { + deserialize_bid(10, b) + }); + c.bench_function("bytesrepr::deserialize_bid_medium", |b| { + deserialize_bid(100, b) + }); + c.bench_function("bytesrepr::deserialize_bid_big", |b| { + deserialize_bid(1000, b) + }); + c.bench_function("bytesrepr::serialize_transfer", serialize_transfer); + c.bench_function("bytesrepr::deserialize_transfer", deserialize_transfer); + c.bench_function("bytesrepr::serialize_deploy_info", serialize_deploy_info); + c.bench_function( + "bytesrepr::deserialize_deploy_info", + deserialize_deploy_info, + ); + c.bench_function("bytesrepr::serialize_era_info", |b| { + serialize_era_info(500, b) + }); + c.bench_function("bytesrepr::deserialize_era_info", |b| { + deserialize_era_info(500, b) + }); +} + +criterion_group!(benches, bytesrepr_bench); +criterion_main!(benches); diff --git a/casper_types/src/access_rights.rs b/casper_types/src/access_rights.rs new file mode 100644 index 00000000..e138f2f4 --- /dev/null +++ b/casper_types/src/access_rights.rs @@ -0,0 +1,422 @@ +use alloc::{ + collections::{btree_map::Entry, BTreeMap}, + vec::Vec, +}; +use core::fmt::{self, Display, Formatter}; + +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{bytesrepr, Key, URef, URefAddr}; +pub use private::AccessRights; + +/// The number of bytes in a serialized [`AccessRights`]. +pub const ACCESS_RIGHTS_SERIALIZED_LENGTH: usize = 1; + +// Module exists only to restrict the scope of the following `#allow`. +#[allow(clippy::bad_bit_mask)] +mod private { + use bitflags::bitflags; + #[cfg(feature = "datasize")] + use datasize::DataSize; + + bitflags! { + /// A struct which behaves like a set of bitflags to define access rights associated with a + /// [`URef`](crate::URef). + #[allow(clippy::derived_hash_with_manual_eq)] + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct AccessRights: u8 { + /// No permissions + const NONE = 0; + /// Permission to read the value under the associated `URef`. + const READ = 0b001; + /// Permission to write a value under the associated `URef`. + const WRITE = 0b010; + /// Permission to add to the value under the associated `URef`. + const ADD = 0b100; + /// Permission to read or add to the value under the associated `URef`. + const READ_ADD = Self::READ.bits | Self::ADD.bits; + /// Permission to read or write the value under the associated `URef`. + const READ_WRITE = Self::READ.bits | Self::WRITE.bits; + /// Permission to add to, or write the value under the associated `URef`. + const ADD_WRITE = Self::ADD.bits | Self::WRITE.bits; + /// Permission to read, add to, or write the value under the associated `URef`. + const READ_ADD_WRITE = Self::READ.bits | Self::ADD.bits | Self::WRITE.bits; + } + } +} + +impl Default for AccessRights { + fn default() -> Self { + AccessRights::NONE + } +} + +impl AccessRights { + /// Returns `true` if the `READ` flag is set. + pub fn is_readable(self) -> bool { + self & AccessRights::READ == AccessRights::READ + } + + /// Returns `true` if the `WRITE` flag is set. + pub fn is_writeable(self) -> bool { + self & AccessRights::WRITE == AccessRights::WRITE + } + + /// Returns `true` if the `ADD` flag is set. + pub fn is_addable(self) -> bool { + self & AccessRights::ADD == AccessRights::ADD + } + + /// Returns `true` if no flags are set. + pub fn is_none(self) -> bool { + self == AccessRights::NONE + } +} + +impl Display for AccessRights { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match *self { + AccessRights::NONE => write!(f, "NONE"), + AccessRights::READ => write!(f, "READ"), + AccessRights::WRITE => write!(f, "WRITE"), + AccessRights::ADD => write!(f, "ADD"), + AccessRights::READ_ADD => write!(f, "READ_ADD"), + AccessRights::READ_WRITE => write!(f, "READ_WRITE"), + AccessRights::ADD_WRITE => write!(f, "ADD_WRITE"), + AccessRights::READ_ADD_WRITE => write!(f, "READ_ADD_WRITE"), + _ => write!(f, "UNKNOWN"), + } + } +} + +impl bytesrepr::ToBytes for AccessRights { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.bits().to_bytes() + } + + fn serialized_length(&self) -> usize { + ACCESS_RIGHTS_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.bits()); + Ok(()) + } +} + +impl bytesrepr::FromBytes for AccessRights { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (id, rem) = u8::from_bytes(bytes)?; + match AccessRights::from_bits(id) { + Some(rights) => Ok((rights, rem)), + None => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Serialize for AccessRights { + fn serialize(&self, serializer: S) -> Result { + self.bits().serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for AccessRights { + fn deserialize>(deserializer: D) -> Result { + let bits = u8::deserialize(deserializer)?; + AccessRights::from_bits(bits).ok_or_else(|| SerdeError::custom("invalid bits")) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AccessRights { + let mut result = AccessRights::NONE; + if rng.gen() { + result |= AccessRights::READ; + } + if rng.gen() { + result |= AccessRights::WRITE; + } + if rng.gen() { + result |= AccessRights::ADD; + } + result + } +} + +/// Used to indicate if a granted [`URef`] was already held by the context. +#[derive(Debug, PartialEq, Eq)] +pub enum GrantedAccess { + /// No new set of access rights were granted. + PreExisting, + /// A new set of access rights were granted. + Granted { + /// The address of the URef. + uref_addr: URefAddr, + /// The set of the newly granted access rights. + newly_granted_access_rights: AccessRights, + }, +} + +/// Access rights for a given runtime context. +#[derive(Debug, PartialEq, Eq)] +pub struct ContextAccessRights { + context_key: Key, + access_rights: BTreeMap, +} + +impl ContextAccessRights { + /// Creates a new instance of access rights from an iterator of URefs merging any duplicates, + /// taking the union of their rights. + pub fn new>(context_key: Key, uref_iter: T) -> Self { + let mut context_access_rights = ContextAccessRights { + context_key, + access_rights: BTreeMap::new(), + }; + context_access_rights.do_extend(uref_iter); + context_access_rights + } + + /// Returns the current context key. + pub fn context_key(&self) -> Key { + self.context_key + } + + /// Extends the current access rights from a given set of URefs. + pub fn extend(&mut self, urefs: &[URef]) { + self.do_extend(urefs.iter().copied()) + } + + /// Extends the current access rights from a given set of URefs. + fn do_extend>(&mut self, uref_iter: T) { + for uref in uref_iter { + match self.access_rights.entry(uref.addr()) { + Entry::Occupied(rights) => { + *rights.into_mut() = rights.get().union(uref.access_rights()); + } + Entry::Vacant(rights) => { + rights.insert(uref.access_rights()); + } + } + } + } + + /// Checks whether given uref has enough access rights. + pub fn has_access_rights_to_uref(&self, uref: &URef) -> bool { + if let Some(known_rights) = self.access_rights.get(&uref.addr()) { + let rights_to_check = uref.access_rights(); + known_rights.contains(rights_to_check) + } else { + // URef is not known + false + } + } + + /// Grants access to a [`URef`]; unless access was pre-existing. + pub fn grant_access(&mut self, uref: URef) -> GrantedAccess { + match self.access_rights.entry(uref.addr()) { + Entry::Occupied(existing_rights) => { + let newly_granted_access_rights = + uref.access_rights().difference(*existing_rights.get()); + *existing_rights.into_mut() = existing_rights.get().union(uref.access_rights()); + if newly_granted_access_rights.is_none() { + GrantedAccess::PreExisting + } else { + GrantedAccess::Granted { + uref_addr: uref.addr(), + newly_granted_access_rights, + } + } + } + Entry::Vacant(rights) => { + rights.insert(uref.access_rights()); + GrantedAccess::Granted { + uref_addr: uref.addr(), + newly_granted_access_rights: uref.access_rights(), + } + } + } + } + + /// Remove access for a given `URef`. + pub fn remove_access(&mut self, uref_addr: URefAddr, access_rights: AccessRights) { + if let Some(current_access_rights) = self.access_rights.get_mut(&uref_addr) { + current_access_rights.remove(access_rights) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::UREF_ADDR_LENGTH; + + const UREF_ADDRESS: [u8; UREF_ADDR_LENGTH] = [1; UREF_ADDR_LENGTH]; + const KEY: Key = Key::URef(URef::new(UREF_ADDRESS, AccessRights::empty())); + const UREF_NO_PERMISSIONS: URef = URef::new(UREF_ADDRESS, AccessRights::empty()); + const UREF_READ: URef = URef::new(UREF_ADDRESS, AccessRights::READ); + const UREF_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::ADD); + const UREF_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::WRITE); + const UREF_READ_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD); + const UREF_READ_ADD_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD_WRITE); + + fn test_readable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_readable(), is_true) + } + + #[test] + fn test_is_readable() { + test_readable(AccessRights::READ, true); + test_readable(AccessRights::READ_ADD, true); + test_readable(AccessRights::READ_WRITE, true); + test_readable(AccessRights::READ_ADD_WRITE, true); + test_readable(AccessRights::ADD, false); + test_readable(AccessRights::ADD_WRITE, false); + test_readable(AccessRights::WRITE, false); + } + + fn test_writable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_writeable(), is_true) + } + + #[test] + fn test_is_writable() { + test_writable(AccessRights::WRITE, true); + test_writable(AccessRights::READ_WRITE, true); + test_writable(AccessRights::ADD_WRITE, true); + test_writable(AccessRights::READ, false); + test_writable(AccessRights::ADD, false); + test_writable(AccessRights::READ_ADD, false); + test_writable(AccessRights::READ_ADD_WRITE, true); + } + + fn test_addable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_addable(), is_true) + } + + #[test] + fn test_is_addable() { + test_addable(AccessRights::ADD, true); + test_addable(AccessRights::READ_ADD, true); + test_addable(AccessRights::READ_WRITE, false); + test_addable(AccessRights::ADD_WRITE, true); + test_addable(AccessRights::READ, false); + test_addable(AccessRights::WRITE, false); + test_addable(AccessRights::READ_ADD_WRITE, true); + } + + #[test] + fn should_check_has_access_rights_to_uref() { + let context_rights = ContextAccessRights::new(KEY, vec![UREF_READ_ADD]); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD)); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ)); + assert!(context_rights.has_access_rights_to_uref(&UREF_ADD)); + assert!(context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS)); + } + + #[test] + fn should_check_does_not_have_access_rights_to_uref() { + let context_rights = ContextAccessRights::new(KEY, vec![UREF_READ_ADD]); + assert!(!context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + assert!(!context_rights + .has_access_rights_to_uref(&URef::new([2; UREF_ADDR_LENGTH], AccessRights::empty()))); + } + + #[test] + fn should_extend_access_rights() { + // Start with uref with no permissions. + let mut context_rights = ContextAccessRights::new(KEY, vec![UREF_NO_PERMISSIONS]); + let mut expected_rights = BTreeMap::new(); + expected_rights.insert(UREF_ADDRESS, AccessRights::empty()); + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a READ_ADD: should merge to single READ_ADD. + context_rights.extend(&[UREF_READ_ADD]); + *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD; + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a READ: should have no observable effect. + context_rights.extend(&[UREF_READ]); + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a WRITE: should merge to single READ_ADD_WRITE. + context_rights.extend(&[UREF_WRITE]); + *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD_WRITE; + assert_eq!(context_rights.access_rights, expected_rights); + } + + #[test] + fn should_perform_union_of_access_rights_in_new() { + let context_rights = + ContextAccessRights::new(KEY, vec![UREF_NO_PERMISSIONS, UREF_READ, UREF_ADD]); + + // Expect the three discrete URefs' rights to be unioned into READ_ADD. + let mut expected_rights = BTreeMap::new(); + expected_rights.insert(UREF_ADDRESS, AccessRights::READ_ADD); + assert_eq!(context_rights.access_rights, expected_rights); + } + + #[test] + fn should_grant_access_rights() { + let mut context_rights = ContextAccessRights::new(KEY, vec![UREF_READ_ADD]); + let granted_access = context_rights.grant_access(UREF_READ); + assert_eq!(granted_access, GrantedAccess::PreExisting); + let granted_access = context_rights.grant_access(UREF_READ_ADD_WRITE); + assert_eq!( + granted_access, + GrantedAccess::Granted { + uref_addr: UREF_ADDRESS, + newly_granted_access_rights: AccessRights::WRITE + } + ); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + let new_uref = URef::new([3; 32], AccessRights::all()); + let granted_access = context_rights.grant_access(new_uref); + assert_eq!( + granted_access, + GrantedAccess::Granted { + uref_addr: new_uref.addr(), + newly_granted_access_rights: AccessRights::all() + } + ); + assert!(context_rights.has_access_rights_to_uref(&new_uref)); + } + + #[test] + fn should_remove_access_rights() { + let mut context_rights = ContextAccessRights::new(KEY, vec![UREF_READ_ADD_WRITE]); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + + // Strip write access from the context rights. + context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE), + "Write access should have been removed" + ); + + // Strip the access again to ensure that the bit is not flipped back. + context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE), + "Write access should not have been granted back" + ); + assert!( + context_rights.has_access_rights_to_uref(&UREF_READ_ADD), + "Read and add access should be preserved." + ); + + // Strip both read and add access from the context rights. + context_rights.remove_access(UREF_ADDRESS, AccessRights::READ_ADD); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD), + "Read and add access should have been removed" + ); + assert!( + context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS), + "The access rights should be empty" + ); + } +} diff --git a/casper_types/src/account.rs b/casper_types/src/account.rs new file mode 100644 index 00000000..f07892f0 --- /dev/null +++ b/casper_types/src/account.rs @@ -0,0 +1,1013 @@ +//! Contains types and constants associated with user accounts. + +mod account_hash; +pub mod action_thresholds; +mod action_type; +pub mod associated_keys; +mod error; +mod weight; + +use serde::Serialize; + +use alloc::{collections::BTreeSet, vec::Vec}; +use core::{ + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + iter, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; + +pub use self::{ + account_hash::{AccountHash, ACCOUNT_HASH_FORMATTED_STRING_PREFIX, ACCOUNT_HASH_LENGTH}, + action_thresholds::ActionThresholds, + action_type::ActionType, + associated_keys::AssociatedKeys, + error::{FromStrError, SetThresholdFailure, TryFromIntError, TryFromSliceForAccountHashError}, + weight::{Weight, WEIGHT_SERIALIZED_LENGTH}, +}; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + contracts::NamedKeys, + crypto, AccessRights, ContextAccessRights, Key, URef, BLAKE2B_DIGEST_LENGTH, +}; + +/// Represents an Account in the global state. +#[derive(PartialEq, Eq, Clone, Debug, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Account { + account_hash: AccountHash, + named_keys: NamedKeys, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, +} + +impl Account { + /// Creates a new account. + pub fn new( + account_hash: AccountHash, + named_keys: NamedKeys, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, + ) -> Self { + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + } + } + + /// An Account constructor with presets for associated_keys and action_thresholds. + /// + /// An account created with this method is valid and can be used as the target of a transaction. + /// It will be created with an [`AssociatedKeys`] with a [`Weight`] of 1, and a default + /// [`ActionThresholds`]. + pub fn create(account: AccountHash, named_keys: NamedKeys, main_purse: URef) -> Self { + let associated_keys = AssociatedKeys::new(account, Weight::new(1)); + + let action_thresholds: ActionThresholds = Default::default(); + Account::new( + account, + named_keys, + main_purse, + associated_keys, + action_thresholds, + ) + } + + /// Extracts the access rights from the named keys and main purse of the account. + pub fn extract_access_rights(&self) -> ContextAccessRights { + let urefs_iter = self + .named_keys + .values() + .filter_map(|key| key.as_uref().copied()) + .chain(iter::once(self.main_purse)); + ContextAccessRights::new(Key::from(self.account_hash), urefs_iter) + } + + /// Appends named keys to an account's named_keys field. + pub fn named_keys_append(&mut self, keys: &mut NamedKeys) { + self.named_keys.append(keys); + } + + /// Returns named keys. + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys + } + + /// Returns a mutable reference to named keys. + pub fn named_keys_mut(&mut self) -> &mut NamedKeys { + &mut self.named_keys + } + + /// Returns account hash. + pub fn account_hash(&self) -> AccountHash { + self.account_hash + } + + /// Returns main purse. + pub fn main_purse(&self) -> URef { + self.main_purse + } + + /// Returns an [`AccessRights::ADD`]-only version of the main purse's [`URef`]. + pub fn main_purse_add_only(&self) -> URef { + URef::new(self.main_purse.addr(), AccessRights::ADD) + } + + /// Returns associated keys. + pub fn associated_keys(&self) -> &AssociatedKeys { + &self.associated_keys + } + + /// Returns action thresholds. + pub fn action_thresholds(&self) -> &ActionThresholds { + &self.action_thresholds + } + + /// Adds an associated key to an account. + pub fn add_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), AddKeyFailure> { + self.associated_keys.add_key(account_hash, weight) + } + + /// Checks if removing given key would properly satisfy thresholds. + fn can_remove_key(&self, account_hash: AccountHash) -> bool { + let total_weight_without = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Returns true if the total weight calculated without given public key would be greater or + // equal to all of the thresholds. + total_weight_without >= *self.action_thresholds().deployment() + && total_weight_without >= *self.action_thresholds().key_management() + } + + /// Checks if adding a weight to a sum of all weights excluding the given key would make the + /// resulting value to fall below any of the thresholds on account. + fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool { + // Calculates total weight of all keys excluding the given key + let total_weight = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Safely calculate new weight by adding the updated weight + let new_weight = total_weight.value().saturating_add(weight.value()); + + // Returns true if the new weight would be greater or equal to all of + // the thresholds. + new_weight >= self.action_thresholds().deployment().value() + && new_weight >= self.action_thresholds().key_management().value() + } + + /// Removes an associated key from an account. + /// + /// Verifies that removing the key will not cause the remaining weight to fall below any action + /// thresholds. + pub fn remove_associated_key( + &mut self, + account_hash: AccountHash, + ) -> Result<(), RemoveKeyFailure> { + if self.associated_keys.contains_key(&account_hash) { + // Check if removing this weight would fall below thresholds + if !self.can_remove_key(account_hash) { + return Err(RemoveKeyFailure::ThresholdViolation); + } + } + self.associated_keys.remove_key(&account_hash) + } + + /// Updates an associated key. + /// + /// Returns an error if the update would result in a violation of the key management thresholds. + pub fn update_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), UpdateKeyFailure> { + if let Some(current_weight) = self.associated_keys.get(&account_hash) { + if weight < *current_weight { + // New weight is smaller than current weight + if !self.can_update_key(account_hash, weight) { + return Err(UpdateKeyFailure::ThresholdViolation); + } + } + } + self.associated_keys.update_key(account_hash, weight) + } + + /// Sets a new action threshold for a given action type for the account. + /// + /// Returns an error if the new action threshold weight is greater than the total weight of the + /// account's associated keys. + pub fn set_action_threshold( + &mut self, + action_type: ActionType, + weight: Weight, + ) -> Result<(), SetThresholdFailure> { + // Verify if new threshold weight exceeds total weight of all associated + // keys. + self.can_set_threshold(weight)?; + // Set new weight for given action + self.action_thresholds.set_threshold(action_type, weight) + } + + /// Verifies if user can set action threshold. + pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> { + let total_weight = self.associated_keys.total_keys_weight(); + if new_threshold > total_weight { + return Err(SetThresholdFailure::InsufficientTotalWeight); + } + Ok(()) + } + + /// Sets a new action threshold for a given action type for the account without checking against + /// the total weight of the associated keys. + /// + /// This should only be called when authorized by an administrator account. + /// + /// Returns an error if setting the action would cause the `ActionType::Deployment` threshold to + /// be greater than any of the other action types. + pub fn set_action_threshold_unchecked( + &mut self, + action_type: ActionType, + threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + self.action_thresholds.set_threshold(action_type, threshold) + } + + /// Checks whether all authorization keys are associated with this account. + pub fn can_authorize(&self, authorization_keys: &BTreeSet) -> bool { + !authorization_keys.is_empty() + && authorization_keys + .iter() + .all(|e| self.associated_keys.contains_key(e)) + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to deploy threshold. + pub fn can_deploy_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().deployment() + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to key management threshold. + pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().key_management() + } +} + +impl ToBytes for Account { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.account_hash().write_bytes(&mut result)?; + self.named_keys().write_bytes(&mut result)?; + self.main_purse.write_bytes(&mut result)?; + self.associated_keys().write_bytes(&mut result)?; + self.action_thresholds().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.account_hash.serialized_length() + + self.named_keys.serialized_length() + + self.main_purse.serialized_length() + + self.associated_keys.serialized_length() + + self.action_thresholds.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.account_hash().write_bytes(writer)?; + self.named_keys().write_bytes(writer)?; + self.main_purse().write_bytes(writer)?; + self.associated_keys().write_bytes(writer)?; + self.action_thresholds().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Account { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (account_hash, rem) = AccountHash::from_bytes(bytes)?; + let (named_keys, rem) = NamedKeys::from_bytes(rem)?; + let (main_purse, rem) = URef::from_bytes(rem)?; + let (associated_keys, rem) = AssociatedKeys::from_bytes(rem)?; + let (action_thresholds, rem) = ActionThresholds::from_bytes(rem)?; + Ok(( + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + }, + rem, + )) + } +} + +#[doc(hidden)] +#[deprecated( + since = "1.4.4", + note = "function moved to casper_types::crypto::blake2b" +)] +pub fn blake2b>(data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { + crypto::blake2b(data) +} + +/// Errors that can occur while adding a new [`AccountHash`] to an account's associated keys map. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum AddKeyFailure { + /// There are already maximum [`AccountHash`]s associated with the given account. + MaxKeysLimit = 1, + /// The given [`AccountHash`] is already associated with the given account. + DuplicateKey = 2, + /// Caller doesn't have sufficient permissions to associate a new [`AccountHash`] with the + /// given account. + PermissionDenied = 3, +} + +impl Display for AddKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + AddKeyFailure::MaxKeysLimit => formatter.write_str( + "Unable to add new associated key because maximum amount of keys is reached", + ), + AddKeyFailure::DuplicateKey => formatter + .write_str("Unable to add new associated key because given key already exists"), + AddKeyFailure::PermissionDenied => formatter + .write_str("Unable to add new associated key due to insufficient permissions"), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for AddKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == AddKeyFailure::MaxKeysLimit as i32 => Ok(AddKeyFailure::MaxKeysLimit), + d if d == AddKeyFailure::DuplicateKey as i32 => Ok(AddKeyFailure::DuplicateKey), + d if d == AddKeyFailure::PermissionDenied as i32 => Ok(AddKeyFailure::PermissionDenied), + _ => Err(TryFromIntError(())), + } + } +} + +/// Errors that can occur while removing a [`AccountHash`] from an account's associated keys map. +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum RemoveKeyFailure { + /// The given [`AccountHash`] is not associated with the given account. + MissingKey = 1, + /// Caller doesn't have sufficient permissions to remove an associated [`AccountHash`] from the + /// given account. + PermissionDenied = 2, + /// Removing the given associated [`AccountHash`] would cause the total weight of all remaining + /// `AccountHash`s to fall below one of the action thresholds for the given account. + ThresholdViolation = 3, +} + +impl Display for RemoveKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + RemoveKeyFailure::MissingKey => { + formatter.write_str("Unable to remove a key that does not exist") + } + RemoveKeyFailure::PermissionDenied => formatter + .write_str("Unable to remove associated key due to insufficient permissions"), + RemoveKeyFailure::ThresholdViolation => formatter.write_str( + "Unable to remove a key which would violate action threshold constraints", + ), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for RemoveKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == RemoveKeyFailure::MissingKey as i32 => Ok(RemoveKeyFailure::MissingKey), + d if d == RemoveKeyFailure::PermissionDenied as i32 => { + Ok(RemoveKeyFailure::PermissionDenied) + } + d if d == RemoveKeyFailure::ThresholdViolation as i32 => { + Ok(RemoveKeyFailure::ThresholdViolation) + } + _ => Err(TryFromIntError(())), + } + } +} + +/// Errors that can occur while updating the [`Weight`] of a [`AccountHash`] in an account's +/// associated keys map. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum UpdateKeyFailure { + /// The given [`AccountHash`] is not associated with the given account. + MissingKey = 1, + /// Caller doesn't have sufficient permissions to update an associated [`AccountHash`] from the + /// given account. + PermissionDenied = 2, + /// Updating the [`Weight`] of the given associated [`AccountHash`] would cause the total + /// weight of all `AccountHash`s to fall below one of the action thresholds for the given + /// account. + ThresholdViolation = 3, +} + +impl Display for UpdateKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + UpdateKeyFailure::MissingKey => formatter.write_str( + "Unable to update the value under an associated key that does not exist", + ), + UpdateKeyFailure::PermissionDenied => formatter + .write_str("Unable to update associated key due to insufficient permissions"), + UpdateKeyFailure::ThresholdViolation => formatter.write_str( + "Unable to update weight that would fall below any of action thresholds", + ), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for UpdateKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == UpdateKeyFailure::MissingKey as i32 => Ok(UpdateKeyFailure::MissingKey), + d if d == UpdateKeyFailure::PermissionDenied as i32 => { + Ok(UpdateKeyFailure::PermissionDenied) + } + d if d == UpdateKeyFailure::ThresholdViolation as i32 => { + Ok(UpdateKeyFailure::ThresholdViolation) + } + _ => Err(TryFromIntError(())), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::{ + account::{ + action_thresholds::gens::action_thresholds_arb, + associated_keys::gens::associated_keys_arb, Account, Weight, + }, + gens::{account_hash_arb, named_keys_arb, uref_arb}, + }; + + prop_compose! { + pub fn account_arb()( + account_hash in account_hash_arb(), + urefs in named_keys_arb(3), + purse in uref_arb(), + thresholds in action_thresholds_arb(), + mut associated_keys in associated_keys_arb(), + ) -> Account { + associated_keys.add_key(account_hash, Weight::new(1)).unwrap(); + Account::new( + account_hash, + urefs, + purse, + associated_keys, + thresholds, + ) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + account::{ + Account, AccountHash, ActionThresholds, ActionType, AssociatedKeys, RemoveKeyFailure, + SetThresholdFailure, UpdateKeyFailure, Weight, + }, + contracts::NamedKeys, + AccessRights, URef, + }; + use std::{collections::BTreeSet, convert::TryFrom, iter::FromIterator, vec::Vec}; + + use super::*; + + #[test] + fn account_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let account_hash = AccountHash::try_from(&bytes[..]).expect("should create account hash"); + assert_eq!(&bytes, &account_hash.as_bytes()); + } + + #[test] + fn account_hash_from_slice_too_small() { + let _account_hash = + AccountHash::try_from(&[0u8; 31][..]).expect_err("should not create account hash"); + } + + #[test] + fn account_hash_from_slice_too_big() { + let _account_hash = + AccountHash::try_from(&[0u8; 33][..]).expect_err("should not create account hash"); + } + + #[test] + fn try_from_i32_for_set_threshold_failure() { + let max_valid_value_for_variant = SetThresholdFailure::InsufficientTotalWeight as i32; + assert_eq!( + Err(TryFromIntError(())), + SetThresholdFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `SetThresholdFailure::try_from` for a new variant of \ + `SetThresholdFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn try_from_i32_for_add_key_failure() { + let max_valid_value_for_variant = AddKeyFailure::PermissionDenied as i32; + assert_eq!( + Err(TryFromIntError(())), + AddKeyFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `AddKeyFailure::try_from` for a new variant of \ + `AddKeyFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn try_from_i32_for_remove_key_failure() { + let max_valid_value_for_variant = RemoveKeyFailure::ThresholdViolation as i32; + assert_eq!( + Err(TryFromIntError(())), + RemoveKeyFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `RemoveKeyFailure::try_from` for a new variant of \ + `RemoveKeyFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn try_from_i32_for_update_key_failure() { + let max_valid_value_for_variant = UpdateKeyFailure::ThresholdViolation as i32; + assert_eq!( + Err(TryFromIntError(())), + UpdateKeyFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `UpdateKeyFailure::try_from` for a new variant of \ + `UpdateKeyFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn account_hash_from_str() { + let account_hash = AccountHash([3; 32]); + let encoded = account_hash.to_formatted_string(); + let decoded = AccountHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(account_hash, decoded); + + let invalid_prefix = + "accounthash-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(invalid_prefix).is_err()); + + let invalid_prefix = + "account-hash0000000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "account-hash-00000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "account-hash-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "account-hash-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(AccountHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn account_hash_serde_roundtrip() { + let account_hash = AccountHash([255; 32]); + let serialized = bincode::serialize(&account_hash).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(account_hash, decoded); + } + + #[test] + fn account_hash_json_roundtrip() { + let account_hash = AccountHash([255; 32]); + let json_string = serde_json::to_string_pretty(&account_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(account_hash, decoded); + } + + #[test] + fn associated_keys_can_authorize_keys() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_3, key_2, key_1]))); + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_3, key_2]))); + + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_2]))); + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1]))); + + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + key_1, + key_2, + AccountHash::new([42; 32]) + ]))); + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + AccountHash::new([42; 32]), + key_1, + key_2 + ]))); + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + AccountHash::new([43; 32]), + AccountHash::new([44; 32]), + AccountHash::new([42; 32]) + ]))); + assert!(!account.can_authorize(&BTreeSet::new())); + } + + #[test] + fn account_can_deploy_with() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) + .expect("should add key 3"); + res + }; + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + // sum: 22, required 33 - can't deploy + assert!(!account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 33, required 33 - can deploy + assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 34, required 33 - can deploy + assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([2u8; 32]), + AccountHash::new([1u8; 32]), + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + ]))); + } + + #[test] + fn account_can_manage_keys_with() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) + .expect("should add key 3"); + res + }; + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(11), Weight::new(33)) + .expect("should create thresholds"), + ); + + // sum: 22, required 33 - can't manage + assert!(!account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 33, required 33 - can manage + assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 34, required 33 - can manage + assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([2u8; 32]), + AccountHash::new([1u8; 32]), + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + ]))); + } + + #[test] + fn set_action_threshold_higher_than_total_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + res.add_key(key_2, Weight::new(3)) + .expect("should add key 2"); + res.add_key(key_3, Weight::new(4)) + .expect("should add key 3"); + res + }; + let mut account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + assert_eq!( + account + .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 1)) + .unwrap_err(), + SetThresholdFailure::InsufficientTotalWeight, + ); + assert_eq!( + account + .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 245)) + .unwrap_err(), + SetThresholdFailure::InsufficientTotalWeight, + ) + } + + #[test] + fn remove_key_would_violate_action_thresholds() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + res.add_key(key_2, Weight::new(3)) + .expect("should add key 2"); + res.add_key(key_3, Weight::new(4)) + .expect("should add key 3"); + res + }; + let mut account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(1 + 2 + 3 + 4), Weight::new(1 + 2 + 3 + 4 + 5)) + .expect("should create thresholds"), + ); + + assert_eq!( + account.remove_associated_key(key_3).unwrap_err(), + RemoveKeyFailure::ThresholdViolation, + ) + } + + #[test] + fn updating_key_would_violate_action_thresholds() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(2); + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(3); + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(4); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + // 1 + 2 + 3 + 4 + res + }; + + let deployment_threshold = Weight::new( + identity_key_weight.value() + + key_1_weight.value() + + key_2_weight.value() + + key_3_weight.value(), + ); + let key_management_threshold = Weight::new(deployment_threshold.value() + 1); + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(deployment_threshold, key_management_threshold) + .expect("should create thresholds"), + ); + + // Decreases by 3 + assert_eq!( + account + .clone() + .update_associated_key(key_3, Weight::new(1)) + .unwrap_err(), + UpdateKeyFailure::ThresholdViolation, + ); + + // increase total weight (12) + account + .update_associated_key(identity_key, Weight::new(3)) + .unwrap(); + + // variant a) decrease total weight by 1 (total 11) + account + .clone() + .update_associated_key(key_3, Weight::new(3)) + .unwrap(); + // variant b) decrease total weight by 3 (total 9) - fail + assert_eq!( + account + .update_associated_key(key_3, Weight::new(1)) + .unwrap_err(), + UpdateKeyFailure::ThresholdViolation + ); + } + + #[test] + fn overflowing_should_allow_removal() { + let identity_key = AccountHash::new([42; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + + let associated_keys = { + // Identity + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + + // Spare key + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + // Big key + res.add_key(key_2, Weight::new(255)) + .expect("should add key 2"); + + res + }; + + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + ActionThresholds::new(Weight::new(1), Weight::new(254)) + .expect("should create thresholds"), + ); + + account.remove_associated_key(key_1).expect("should work") + } + + #[test] + fn overflowing_should_allow_updating() { + let identity_key = AccountHash::new([1; 32]); + let identity_key_weight = Weight::new(1); + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(3); + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(255); + let deployment_threshold = Weight::new(1); + let key_management_threshold = Weight::new(254); + + let associated_keys = { + // Identity + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + // Spare key + res.add_key(key_1, key_1_weight).expect("should add key 1"); + // Big key + res.add_key(key_2, key_2_weight).expect("should add key 2"); + + res + }; + + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + ActionThresholds::new(deployment_threshold, key_management_threshold) + .expect("should create thresholds"), + ); + + // decrease so total weight would be changed from 1 + 3 + 255 to 1 + 1 + 255 + account + .update_associated_key(key_1, Weight::new(1)) + .expect("should work"); + } + + #[test] + fn should_extract_access_rights() { + const MAIN_PURSE: URef = URef::new([2; 32], AccessRights::READ_ADD_WRITE); + const OTHER_UREF: URef = URef::new([3; 32], AccessRights::READ); + + let account_hash = AccountHash::new([1u8; 32]); + let mut named_keys = NamedKeys::new(); + named_keys.insert("a".to_string(), Key::URef(OTHER_UREF)); + let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1)); + let account = Account::new( + account_hash, + named_keys, + MAIN_PURSE, + associated_keys, + ActionThresholds::new(Weight::new(1), Weight::new(1)) + .expect("should create thresholds"), + ); + + let actual_access_rights = account.extract_access_rights(); + + let expected_access_rights = + ContextAccessRights::new(Key::from(account_hash), vec![MAIN_PURSE, OTHER_UREF]); + assert_eq!(actual_access_rights, expected_access_rights) + } +} + +#[cfg(test)] +mod proptests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + proptest! { + #[test] + fn test_value_account(acct in gens::account_arb()) { + bytesrepr::test_serialization_roundtrip(&acct); + } + } +} diff --git a/casper_types/src/account/account_hash.rs b/casper_types/src/account/account_hash.rs new file mode 100644 index 00000000..5c798be5 --- /dev/null +++ b/casper_types/src/account/account_hash.rs @@ -0,0 +1,218 @@ +use alloc::{string::String, vec::Vec}; +use core::{ + convert::{From, TryFrom}, + fmt::{Debug, Display, Formatter}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use super::FromStrError; +use crate::{ + bytesrepr::{Error, FromBytes, ToBytes}, + checksummed_hex, crypto, CLType, CLTyped, PublicKey, BLAKE2B_DIGEST_LENGTH, +}; + +/// The length in bytes of a [`AccountHash`]. +pub const ACCOUNT_HASH_LENGTH: usize = 32; +/// The prefix applied to the hex-encoded `AccountHash` to produce a formatted string +/// representation. +pub const ACCOUNT_HASH_FORMATTED_STRING_PREFIX: &str = "account-hash-"; + +/// A newtype wrapping an array which contains the raw bytes of +/// the AccountHash, a hash of Public Key and Algorithm +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct AccountHash(pub [u8; ACCOUNT_HASH_LENGTH]); + +impl AccountHash { + /// Constructs a new `AccountHash` instance from the raw bytes of an Public Key Account Hash. + pub const fn new(value: [u8; ACCOUNT_HASH_LENGTH]) -> AccountHash { + AccountHash(value) + } + + /// Returns the raw bytes of the account hash as an array. + pub fn value(&self) -> [u8; ACCOUNT_HASH_LENGTH] { + self.0 + } + + /// Returns the raw bytes of the account hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `AccountHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + ACCOUNT_HASH_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into an `AccountHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(ACCOUNT_HASH_FORMATTED_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(AccountHash(bytes)) + } + + /// Parses a `PublicKey` and outputs the corresponding account hash. + pub fn from_public_key( + public_key: &PublicKey, + blake2b_hash_fn: impl Fn(Vec) -> [u8; BLAKE2B_DIGEST_LENGTH], + ) -> Self { + const SYSTEM_LOWERCASE: &str = "system"; + const ED25519_LOWERCASE: &str = "ed25519"; + const SECP256K1_LOWERCASE: &str = "secp256k1"; + + let algorithm_name = match public_key { + PublicKey::System => SYSTEM_LOWERCASE, + PublicKey::Ed25519(_) => ED25519_LOWERCASE, + PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE, + }; + let public_key_bytes: Vec = public_key.into(); + + // Prepare preimage based on the public key parameters. + let preimage = { + let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1); + data.extend(algorithm_name.as_bytes()); + data.push(0); + data.extend(public_key_bytes); + data + }; + // Hash the preimage data using blake2b256 and return it. + let digest = blake2b_hash_fn(preimage); + Self::new(digest) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for AccountHash { + fn schema_name() -> String { + String::from("AccountHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("Hex-encoded account hash.".to_string()); + schema_object.into() + } +} + +impl Serialize for AccountHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for AccountHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + AccountHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; ACCOUNT_HASH_LENGTH]>::deserialize(deserializer)?; + Ok(AccountHash(bytes)) + } + } +} + +impl TryFrom<&[u8]> for AccountHash { + type Error = TryFromSliceForAccountHashError; + + fn try_from(bytes: &[u8]) -> Result { + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes) + .map(AccountHash::new) + .map_err(|_| TryFromSliceForAccountHashError(())) + } +} + +impl TryFrom<&alloc::vec::Vec> for AccountHash { + type Error = TryFromSliceForAccountHashError; + + fn try_from(bytes: &Vec) -> Result { + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes as &[u8]) + .map(AccountHash::new) + .map_err(|_| TryFromSliceForAccountHashError(())) + } +} + +impl From<&PublicKey> for AccountHash { + fn from(public_key: &PublicKey) -> Self { + AccountHash::from_public_key(public_key, crypto::blake2b) + } +} + +impl Display for AccountHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for AccountHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "AccountHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for AccountHash { + fn cl_type() -> CLType { + CLType::ByteArray(ACCOUNT_HASH_LENGTH as u32) + } +} + +impl ToBytes for AccountHash { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for AccountHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((AccountHash::new(bytes), rem)) + } +} + +impl AsRef<[u8]> for AccountHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`]. +#[derive(Debug)] +pub struct TryFromSliceForAccountHashError(()); + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AccountHash { + AccountHash::new(rng.gen()) + } +} diff --git a/casper_types/src/account/action_thresholds.rs b/casper_types/src/account/action_thresholds.rs new file mode 100644 index 00000000..48eb21b3 --- /dev/null +++ b/casper_types/src/account/action_thresholds.rs @@ -0,0 +1,170 @@ +//! This module contains types and functions for managing action thresholds. + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::{ActionType, SetThresholdFailure, Weight, WEIGHT_SERIALIZED_LENGTH}, + bytesrepr::{self, Error, FromBytes, ToBytes}, +}; + +/// Thresholds that have to be met when executing an action of a certain type. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ActionThresholds { + /// Threshold for deploy execution. + pub deployment: Weight, + /// Threshold for managing action threshold. + pub key_management: Weight, +} + +impl ActionThresholds { + /// Creates new ActionThresholds object with provided weights + /// + /// Requires deployment threshold to be lower than or equal to + /// key management threshold. + pub fn new( + deployment: Weight, + key_management: Weight, + ) -> Result { + if deployment > key_management { + return Err(SetThresholdFailure::DeploymentThreshold); + } + Ok(ActionThresholds { + deployment, + key_management, + }) + } + /// Sets new threshold for [ActionType::Deployment]. + /// Should return an error if setting new threshold for `action_type` breaks + /// one of the invariants. Currently, invariant is that + /// `ActionType::Deployment` threshold shouldn't be higher than any + /// other, which should be checked both when increasing `Deployment` + /// threshold and decreasing the other. + pub fn set_deployment_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if new_threshold > self.key_management { + Err(SetThresholdFailure::DeploymentThreshold) + } else { + self.deployment = new_threshold; + Ok(()) + } + } + + /// Sets new threshold for [ActionType::KeyManagement]. + pub fn set_key_management_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if self.deployment > new_threshold { + Err(SetThresholdFailure::KeyManagementThreshold) + } else { + self.key_management = new_threshold; + Ok(()) + } + } + + /// Returns the deployment action threshold. + pub fn deployment(&self) -> &Weight { + &self.deployment + } + + /// Returns key management action threshold. + pub fn key_management(&self) -> &Weight { + &self.key_management + } + + /// Unified function that takes an action type, and changes appropriate + /// threshold defined by the [ActionType] variants. + pub fn set_threshold( + &mut self, + action_type: ActionType, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + match action_type { + ActionType::Deployment => self.set_deployment_threshold(new_threshold), + ActionType::KeyManagement => self.set_key_management_threshold(new_threshold), + } + } +} + +impl Default for ActionThresholds { + fn default() -> Self { + ActionThresholds { + deployment: Weight::new(1), + key_management: Weight::new(1), + } + } +} + +impl ToBytes for ActionThresholds { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + result.append(&mut self.deployment.to_bytes()?); + result.append(&mut self.key_management.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + 2 * WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deployment().write_bytes(writer)?; + self.key_management().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ActionThresholds { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (deployment, rem) = Weight::from_bytes(bytes)?; + let (key_management, rem) = Weight::from_bytes(rem)?; + let ret = ActionThresholds { + deployment, + key_management, + }; + Ok((ret, rem)) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use super::ActionThresholds; + + pub fn action_thresholds_arb() -> impl Strategy { + Just(Default::default()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_create_new_action_thresholds() { + let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); + assert_eq!(*action_thresholds.deployment(), Weight::new(1)); + assert_eq!(*action_thresholds.key_management(), Weight::new(42)); + } + + #[test] + fn should_not_create_action_thresholds_with_invalid_deployment_threshold() { + // deployment cant be greater than key management + assert!(ActionThresholds::new(Weight::new(5), Weight::new(1)).is_err()); + } + + #[test] + fn serialization_roundtrip() { + let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); + bytesrepr::test_serialization_roundtrip(&action_thresholds); + } +} diff --git a/casper_types/src/account/action_type.rs b/casper_types/src/account/action_type.rs new file mode 100644 index 00000000..2a4862a5 --- /dev/null +++ b/casper_types/src/account/action_type.rs @@ -0,0 +1,32 @@ +use core::convert::TryFrom; + +use super::TryFromIntError; + +/// The various types of action which can be performed in the context of a given account. +#[repr(u32)] +pub enum ActionType { + /// Represents performing a deploy. + Deployment = 0, + /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s + /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total + /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to + /// perform various actions). + KeyManagement = 1, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for ActionType { + type Error = TryFromIntError; + + fn try_from(value: u32) -> Result { + // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive + // that helps to automatically create `from_u32` and `to_u32`. This approach + // gives better control over generated code. + match value { + d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment), + d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement), + _ => Err(TryFromIntError(())), + } + } +} diff --git a/casper_types/src/account/associated_keys.rs b/casper_types/src/account/associated_keys.rs new file mode 100644 index 00000000..698fa071 --- /dev/null +++ b/casper_types/src/account/associated_keys.rs @@ -0,0 +1,360 @@ +//! This module contains types and functions for working with keys associated with an account. + +use alloc::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + vec::Vec, +}; + +use core::convert::TryInto; +#[cfg(feature = "datasize")] +use datasize::DataSize; + +use serde::{Deserialize, Serialize}; + +use crate::{ + account::{AccountHash, AddKeyFailure, RemoveKeyFailure, UpdateKeyFailure, Weight}, + bytesrepr::{self, Error, FromBytes, ToBytes}, +}; + +/// A mapping that represents the association of a [`Weight`] with an [`AccountHash`]. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct AssociatedKeys(BTreeMap); + +impl AssociatedKeys { + /// Constructs a new AssociatedKeys. + pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys { + let mut bt: BTreeMap = BTreeMap::new(); + bt.insert(key, weight); + AssociatedKeys(bt) + } + + /// Adds new AssociatedKey to the set. + /// Returns true if added successfully, false otherwise. + pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(entry) => { + entry.insert(weight); + } + Entry::Occupied(_) => return Err(AddKeyFailure::DuplicateKey), + } + Ok(()) + } + + /// Removes key from the associated keys set. + /// Returns true if value was found in the set prior to the removal, false + /// otherwise. + pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> { + self.0 + .remove(key) + .map(|_| ()) + .ok_or(RemoveKeyFailure::MissingKey) + } + + /// Adds new AssociatedKey to the set. + /// Returns true if added successfully, false otherwise. + pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(_) => { + return Err(UpdateKeyFailure::MissingKey); + } + Entry::Occupied(mut entry) => { + *entry.get_mut() = weight; + } + } + Ok(()) + } + + /// Returns the weight of an account hash. + pub fn get(&self, key: &AccountHash) -> Option<&Weight> { + self.0.get(key) + } + + /// Returns `true` if a given key exists. + pub fn contains_key(&self, key: &AccountHash) -> bool { + self.0.contains_key(key) + } + + /// Returns an iterator over the account hash and the weights. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the count of the associated keys. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the associated keys are empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Helper method that calculates weight for keys that comes from any + /// source. + /// + /// This method is not concerned about uniqueness of the passed iterable. + /// Uniqueness is determined based on the input collection properties, + /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`]) + /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]). + fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator) -> Weight { + let total = keys + .filter_map(|key| self.0.get(key)) + .fold(0u8, |acc, w| acc.saturating_add(w.value())); + + Weight::new(total) + } + + /// Calculates total weight of authorization keys provided by an argument + pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet) -> Weight { + self.calculate_any_keys_weight(authorization_keys.iter()) + } + + /// Calculates total weight of all authorization keys + pub fn total_keys_weight(&self) -> Weight { + self.calculate_any_keys_weight(self.0.keys()) + } + + /// Calculates total weight of all authorization keys excluding a given key + pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight { + self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash)) + } +} + +impl From> for AssociatedKeys { + fn from(associated_keys: BTreeMap) -> Self { + Self(associated_keys) + } +} + +impl From for BTreeMap { + fn from(associated_keys: AssociatedKeys) -> Self { + associated_keys.0 + } +} + +impl ToBytes for AssociatedKeys { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let length_32: u32 = self + .0 + .len() + .try_into() + .map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for (key, weight) in self.0.iter() { + key.write_bytes(writer)?; + weight.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for AssociatedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (associated_keys, rem) = FromBytes::from_bytes(bytes)?; + Ok((AssociatedKeys(associated_keys), rem)) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::gens::{account_hash_arb, weight_arb}; + + use super::AssociatedKeys; + + pub fn associated_keys_arb() -> impl Strategy { + proptest::collection::btree_map(account_hash_arb(), weight_arb(), 10).prop_map(|keys| { + let mut associated_keys = AssociatedKeys::default(); + keys.into_iter().for_each(|(k, v)| { + associated_keys.add_key(k, v).unwrap(); + }); + associated_keys + }) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeSet, iter::FromIterator}; + + use crate::{ + account::{AccountHash, AddKeyFailure, Weight, ACCOUNT_HASH_LENGTH}, + bytesrepr, + }; + + use super::*; + + #[test] + fn associated_keys_add() { + let mut keys = + AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1)); + let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let new_pk_weight = Weight::new(2); + assert!(keys.add_key(new_pk, new_pk_weight).is_ok()); + assert_eq!(keys.get(&new_pk), Some(&new_pk_weight)) + } + + #[test] + fn associated_keys_add_duplicate() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert_eq!( + keys.add_key(pk, Weight::new(10)), + Err(AddKeyFailure::DuplicateKey) + ); + assert_eq!(keys.get(&pk), Some(&weight)); + } + + #[test] + fn associated_keys_remove() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert!(keys.remove_key(&pk).is_ok()); + assert!(keys + .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH])) + .is_err()); + } + + #[test] + fn associated_keys_update() { + let pk1 = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let pk2 = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk1, weight); + assert!(matches!( + keys.update_key(pk2, Weight::new(2)) + .expect_err("should get error"), + UpdateKeyFailure::MissingKey + )); + keys.add_key(pk2, Weight::new(1)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(1))); + keys.update_key(pk2, Weight::new(2)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(2))); + } + + #[test] + fn associated_keys_calculate_keys_once() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + assert_eq!( + keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + key_1, key_2, key_3, key_1, key_2, key_3, + ])), + Weight::new(1 + 2 + 3) + ); + } + + #[test] + fn associated_keys_total_weight() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(12)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(13)) + .expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight(), + Weight::new(1 + 11 + 12 + 13) + ); + } + + #[test] + fn associated_keys_total_weight_excluding() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(11); + + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(12); + + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(13); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight_excluding(key_2), + Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value()) + ); + } + + #[test] + fn overflowing_keys_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + + let identity_key_weight = Weight::new(250); + let weight_1 = Weight::new(1); + let weight_2 = Weight::new(2); + let weight_3 = Weight::new(3); + + let saturated_weight = Weight::new(u8::max_value()); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + res.add_key(key_1, weight_1).expect("should add key 1"); + res.add_key(key_2, weight_2).expect("should add key 2"); + res.add_key(key_3, weight_3).expect("should add key 3"); + res + }; + + assert_eq!( + associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + identity_key, // 250 + key_1, // 251 + key_2, // 253 + key_3, // 256 - error + ])), + saturated_weight, + ); + } + + #[test] + fn serialization_roundtrip() { + let mut keys = AssociatedKeys::default(); + keys.add_key(AccountHash::new([1; 32]), Weight::new(1)) + .unwrap(); + keys.add_key(AccountHash::new([2; 32]), Weight::new(2)) + .unwrap(); + keys.add_key(AccountHash::new([3; 32]), Weight::new(3)) + .unwrap(); + bytesrepr::test_serialization_roundtrip(&keys); + } +} diff --git a/casper_types/src/account/error.rs b/casper_types/src/account/error.rs new file mode 100644 index 00000000..36b9cb7f --- /dev/null +++ b/casper_types/src/account/error.rs @@ -0,0 +1,110 @@ +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +#[derive(Debug, Eq, PartialEq)] +pub struct TryFromIntError(pub(super) ()); + +/// Error returned when decoding an `AccountHash` from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The hash is not valid hex. + Hex(base16::DecodeError), + /// The hash is the wrong length. + Hash(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "prefix is not 'account-hash-'"), + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Hash(error) => write!(f, "address portion is wrong length: {}", error), + } + } +} + +/// Errors that can occur while changing action thresholds (i.e. the total +/// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to perform +/// various actions) on an account. +#[repr(i32)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[non_exhaustive] +pub enum SetThresholdFailure { + /// Setting the key-management threshold to a value lower than the deployment threshold is + /// disallowed. + KeyManagementThreshold = 1, + /// Setting the deployment threshold to a value greater than any other threshold is disallowed. + DeploymentThreshold = 2, + /// Caller doesn't have sufficient permissions to set new thresholds. + PermissionDeniedError = 3, + /// Setting a threshold to a value greater than the total weight of associated keys is + /// disallowed. + InsufficientTotalWeight = 4, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for SetThresholdFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == SetThresholdFailure::KeyManagementThreshold as i32 => { + Ok(SetThresholdFailure::KeyManagementThreshold) + } + d if d == SetThresholdFailure::DeploymentThreshold as i32 => { + Ok(SetThresholdFailure::DeploymentThreshold) + } + d if d == SetThresholdFailure::PermissionDeniedError as i32 => { + Ok(SetThresholdFailure::PermissionDeniedError) + } + d if d == SetThresholdFailure::InsufficientTotalWeight as i32 => { + Ok(SetThresholdFailure::InsufficientTotalWeight) + } + _ => Err(TryFromIntError(())), + } + } +} + +impl Display for SetThresholdFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + SetThresholdFailure::KeyManagementThreshold => formatter + .write_str("New threshold should be greater than or equal to deployment threshold"), + SetThresholdFailure::DeploymentThreshold => formatter.write_str( + "New threshold should be lower than or equal to key management threshold", + ), + SetThresholdFailure::PermissionDeniedError => formatter + .write_str("Unable to set action threshold due to insufficient permissions"), + SetThresholdFailure::InsufficientTotalWeight => formatter.write_str( + "New threshold should be lower or equal than total weight of associated keys", + ), + } + } +} + +/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`](super::AccountHash). +#[derive(Debug)] +pub struct TryFromSliceForAccountHashError(()); diff --git a/casper_types/src/account/weight.rs b/casper_types/src/account/weight.rs new file mode 100644 index 00000000..b27d7737 --- /dev/null +++ b/casper_types/src/account/weight.rs @@ -0,0 +1,62 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// The number of bytes in a serialized [`Weight`]. +pub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// The weight attributed to a given [`AccountHash`](super::AccountHash) in an account's associated +/// keys. +#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Weight(u8); + +impl Weight { + /// Maximum possible weight. + pub const MAX: Weight = Weight(u8::MAX); + + /// Constructs a new `Weight`. + pub const fn new(weight: u8) -> Weight { + Weight(weight) + } + + /// Returns the value of `self` as a `u8`. + pub fn value(self) -> u8 { + self.0 + } +} + +impl ToBytes for Weight { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.0); + Ok(()) + } +} + +impl FromBytes for Weight { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (byte, rem) = u8::from_bytes(bytes)?; + Ok((Weight::new(byte), rem)) + } +} + +impl CLTyped for Weight { + fn cl_type() -> CLType { + CLType::U8 + } +} diff --git a/casper_types/src/api_error.rs b/casper_types/src/api_error.rs new file mode 100644 index 00000000..eb1da1a1 --- /dev/null +++ b/casper_types/src/api_error.rs @@ -0,0 +1,874 @@ +//! Contains [`ApiError`] and associated helper functions. + +use core::{ + convert::TryFrom, + fmt::{self, Debug, Formatter}, +}; + +use crate::{ + account::{ + AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, TryFromIntError, + TryFromSliceForAccountHashError, UpdateKeyFailure, + }, + bytesrepr, contracts, + system::{auction, handle_payment, mint}, + CLValueError, +}; + +/// All `Error` variants defined in this library other than `Error::User` will convert to a `u32` +/// value less than or equal to `RESERVED_ERROR_MAX`. +const RESERVED_ERROR_MAX: u32 = u16::MAX as u32; // 0..=65535 + +/// Handle Payment errors will have this value added to them when being converted to a `u32`. +const POS_ERROR_OFFSET: u32 = RESERVED_ERROR_MAX - u8::MAX as u32; // 65280..=65535 + +/// Mint errors will have this value added to them when being converted to a `u32`. +const MINT_ERROR_OFFSET: u32 = (POS_ERROR_OFFSET - 1) - u8::MAX as u32; // 65024..=65279 + +/// Contract header errors will have this value added to them when being converted to a `u32`. +const HEADER_ERROR_OFFSET: u32 = (MINT_ERROR_OFFSET - 1) - u8::MAX as u32; // 64768..=65023 + +/// Contract header errors will have this value added to them when being converted to a `u32`. +const AUCTION_ERROR_OFFSET: u32 = (HEADER_ERROR_OFFSET - 1) - u8::MAX as u32; // 64512..=64767 + +/// Minimum value of user error's inclusive range. +const USER_ERROR_MIN: u32 = RESERVED_ERROR_MAX + 1; + +/// Maximum value of user error's inclusive range. +const USER_ERROR_MAX: u32 = 2 * RESERVED_ERROR_MAX + 1; + +/// Minimum value of Mint error's inclusive range. +const MINT_ERROR_MIN: u32 = MINT_ERROR_OFFSET; + +/// Maximum value of Mint error's inclusive range. +const MINT_ERROR_MAX: u32 = POS_ERROR_OFFSET - 1; + +/// Minimum value of Handle Payment error's inclusive range. +const HP_ERROR_MIN: u32 = POS_ERROR_OFFSET; + +/// Maximum value of Handle Payment error's inclusive range. +const HP_ERROR_MAX: u32 = RESERVED_ERROR_MAX; + +/// Minimum value of contract header error's inclusive range. +const HEADER_ERROR_MIN: u32 = HEADER_ERROR_OFFSET; + +/// Maximum value of contract header error's inclusive range. +const HEADER_ERROR_MAX: u32 = HEADER_ERROR_OFFSET + u8::MAX as u32; + +/// Minimum value of an auction contract error's inclusive range. +const AUCTION_ERROR_MIN: u32 = AUCTION_ERROR_OFFSET; + +/// Maximum value of an auction contract error's inclusive range. +const AUCTION_ERROR_MAX: u32 = AUCTION_ERROR_OFFSET + u8::MAX as u32; + +/// Errors which can be encountered while running a smart contract. +/// +/// An `ApiError` can be converted to a `u32` in order to be passed via the execution engine's +/// `ext_ffi::casper_revert()` function. This means the information each variant can convey is +/// limited. +/// +/// The variants are split into numeric ranges as follows: +/// +/// | Inclusive range | Variant(s) | +/// | ----------------| ----------------------------------------------------------------| +/// | [1, 64511] | all except reserved system contract error ranges defined below. | +/// | [64512, 64767] | `Auction` | +/// | [64768, 65023] | `ContractHeader` | +/// | [65024, 65279] | `Mint` | +/// | [65280, 65535] | `HandlePayment` | +/// | [65536, 131071] | `User` | +/// +/// Users can specify a C-style enum and implement `From` to ease usage of +/// `casper_contract::runtime::revert()`, e.g. +/// ``` +/// use casper_types::ApiError; +/// +/// #[repr(u16)] +/// enum FailureCode { +/// Zero = 0, // 65,536 as an ApiError::User +/// One, // 65,537 as an ApiError::User +/// Two // 65,538 as an ApiError::User +/// } +/// +/// impl From for ApiError { +/// fn from(code: FailureCode) -> Self { +/// ApiError::User(code as u16) +/// } +/// } +/// +/// assert_eq!(ApiError::User(1), FailureCode::One.into()); +/// assert_eq!(65_536, u32::from(ApiError::from(FailureCode::Zero))); +/// assert_eq!(65_538, u32::from(ApiError::from(FailureCode::Two))); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub enum ApiError { + /// Optional data was unexpectedly `None`. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(1), ApiError::None); + /// ``` + None, + /// Specified argument not provided. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(2), ApiError::MissingArgument); + /// ``` + MissingArgument, + /// Argument not of correct type. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(3), ApiError::InvalidArgument); + /// ``` + InvalidArgument, + /// Failed to deserialize a value. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(4), ApiError::Deserialize); + /// ``` + Deserialize, + /// `casper_contract::storage::read()` returned an error. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(5), ApiError::Read); + /// ``` + Read, + /// The given key returned a `None` value. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(6), ApiError::ValueNotFound); + /// ``` + ValueNotFound, + /// Failed to find a specified contract. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(7), ApiError::ContractNotFound); + /// ``` + ContractNotFound, + /// A call to `casper_contract::runtime::get_key()` returned a failure. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(8), ApiError::GetKey); + /// ``` + GetKey, + /// The [`Key`](crate::Key) variant was not as expected. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(9), ApiError::UnexpectedKeyVariant); + /// ``` + UnexpectedKeyVariant, + /// Obsolete error variant (we no longer have ContractRef). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(10), ApiError::UnexpectedContractRefVariant); + /// ``` + UnexpectedContractRefVariant, // TODO: this variant is not used any longer and can be removed + /// Invalid purse name given. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(11), ApiError::InvalidPurseName); + /// ``` + InvalidPurseName, + /// Invalid purse retrieved. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(12), ApiError::InvalidPurse); + /// ``` + InvalidPurse, + /// Failed to upgrade contract at [`URef`](crate::URef). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(13), ApiError::UpgradeContractAtURef); + /// ``` + UpgradeContractAtURef, + /// Failed to transfer motes. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(14), ApiError::Transfer); + /// ``` + Transfer, + /// The given [`URef`](crate::URef) has no access rights. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(15), ApiError::NoAccessRights); + /// ``` + NoAccessRights, + /// A given type could not be constructed from a [`CLValue`](crate::CLValue). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(16), ApiError::CLTypeMismatch); + /// ``` + CLTypeMismatch, + /// Early end of stream while deserializing. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(17), ApiError::EarlyEndOfStream); + /// ``` + EarlyEndOfStream, + /// Formatting error while deserializing. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(18), ApiError::Formatting); + /// ``` + Formatting, + /// Not all input bytes were consumed in [`deserialize`](crate::bytesrepr::deserialize). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(19), ApiError::LeftOverBytes); + /// ``` + LeftOverBytes, + /// Out of memory error. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(20), ApiError::OutOfMemory); + /// ``` + OutOfMemory, + /// There are already maximum [`AccountHash`](crate::account::AccountHash)s associated with the + /// given account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(21), ApiError::MaxKeysLimit); + /// ``` + MaxKeysLimit, + /// The given [`AccountHash`](crate::account::AccountHash) is already associated with the given + /// account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(22), ApiError::DuplicateKey); + /// ``` + DuplicateKey, + /// Caller doesn't have sufficient permissions to perform the given action. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(23), ApiError::PermissionDenied); + /// ``` + PermissionDenied, + /// The given [`AccountHash`](crate::account::AccountHash) is not associated with the given + /// account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(24), ApiError::MissingKey); + /// ``` + MissingKey, + /// Removing/updating the given associated [`AccountHash`](crate::account::AccountHash) would + /// cause the total [`Weight`](crate::account::Weight) of all remaining `AccountHash`s to + /// fall below one of the action thresholds for the given account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(25), ApiError::ThresholdViolation); + /// ``` + ThresholdViolation, + /// Setting the key-management threshold to a value lower than the deployment threshold is + /// disallowed. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(26), ApiError::KeyManagementThreshold); + /// ``` + KeyManagementThreshold, + /// Setting the deployment threshold to a value greater than any other threshold is disallowed. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(27), ApiError::DeploymentThreshold); + /// ``` + DeploymentThreshold, + /// Setting a threshold to a value greater than the total weight of associated keys is + /// disallowed. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(28), ApiError::InsufficientTotalWeight); + /// ``` + InsufficientTotalWeight, + /// The given `u32` doesn't map to a [`SystemContractType`](crate::system::SystemContractType). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(29), ApiError::InvalidSystemContract); + /// ``` + InvalidSystemContract, + /// Failed to create a new purse. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(30), ApiError::PurseNotCreated); + /// ``` + PurseNotCreated, + /// An unhandled value, likely representing a bug in the code. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(31), ApiError::Unhandled); + /// ``` + Unhandled, + /// The provided buffer is too small to complete an operation. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(32), ApiError::BufferTooSmall); + /// ``` + BufferTooSmall, + /// No data available in the host buffer. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(33), ApiError::HostBufferEmpty); + /// ``` + HostBufferEmpty, + /// The host buffer has been set to a value and should be consumed first by a read operation. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(34), ApiError::HostBufferFull); + /// ``` + HostBufferFull, + /// Could not lay out an array in memory + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(35), ApiError::AllocLayout); + /// ``` + AllocLayout, + /// The `dictionary_item_key` length exceeds the maximum length. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(36), ApiError::DictionaryItemKeyExceedsLength); + /// ``` + DictionaryItemKeyExceedsLength, + /// The `dictionary_item_key` is invalid. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(37), ApiError::InvalidDictionaryItemKey); + /// ``` + InvalidDictionaryItemKey, + /// Unable to retrieve the requested system contract hash. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(38), ApiError::MissingSystemContractHash); + /// ``` + MissingSystemContractHash, + /// Exceeded a recursion depth limit. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(39), ApiError::ExceededRecursionDepth); + /// ``` + ExceededRecursionDepth, + /// Attempt to serialize a value that does not have a serialized representation. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(40), ApiError::NonRepresentableSerialization); + /// ``` + NonRepresentableSerialization, + /// Error specific to Auction contract. See + /// [casper_types::system::auction::Error](crate::system::auction::Error). + /// ``` + /// # use casper_types::ApiError; + /// for code in 64512..=64767 { + /// assert!(matches!(ApiError::from(code), ApiError::AuctionError(_auction_error))); + /// } + /// ``` + AuctionError(u8), + /// Contract header errors. See [casper_types::contracts::Error](crate::contracts::Error). + /// + /// ``` + /// # use casper_types::ApiError; + /// for code in 64768..=65023 { + /// assert!(matches!(ApiError::from(code), ApiError::ContractHeader(_contract_header_error))); + /// } + /// ``` + ContractHeader(u8), + /// Error specific to Mint contract. See + /// [casper_types::system::mint::Error](crate::system::mint::Error). + /// ``` + /// # use casper_types::ApiError; + /// for code in 65024..=65279 { + /// assert!(matches!(ApiError::from(code), ApiError::Mint(_mint_error))); + /// } + /// ``` + Mint(u8), + /// Error specific to Handle Payment contract. See + /// [casper_types::system::handle_payment](crate::system::handle_payment::Error). + /// ``` + /// # use casper_types::ApiError; + /// for code in 65280..=65535 { + /// assert!(matches!(ApiError::from(code), ApiError::HandlePayment(_handle_payment_error))); + /// } + /// ``` + HandlePayment(u8), + /// User-specified error code. The internal `u16` value is added to `u16::MAX as u32 + 1` when + /// an `Error::User` is converted to a `u32`. + /// ``` + /// # use casper_types::ApiError; + /// for code in 65536..131071 { + /// assert!(matches!(ApiError::from(code), ApiError::User(_))); + /// } + /// ``` + User(u16), +} + +impl From for ApiError { + fn from(error: bytesrepr::Error) -> Self { + match error { + bytesrepr::Error::EarlyEndOfStream => ApiError::EarlyEndOfStream, + bytesrepr::Error::Formatting => ApiError::Formatting, + bytesrepr::Error::LeftOverBytes => ApiError::LeftOverBytes, + bytesrepr::Error::OutOfMemory => ApiError::OutOfMemory, + bytesrepr::Error::NotRepresentable => ApiError::NonRepresentableSerialization, + bytesrepr::Error::ExceededRecursionDepth => ApiError::ExceededRecursionDepth, + } + } +} + +impl From for ApiError { + fn from(error: AddKeyFailure) -> Self { + match error { + AddKeyFailure::MaxKeysLimit => ApiError::MaxKeysLimit, + AddKeyFailure::DuplicateKey => ApiError::DuplicateKey, + AddKeyFailure::PermissionDenied => ApiError::PermissionDenied, + } + } +} + +impl From for ApiError { + fn from(error: UpdateKeyFailure) -> Self { + match error { + UpdateKeyFailure::MissingKey => ApiError::MissingKey, + UpdateKeyFailure::PermissionDenied => ApiError::PermissionDenied, + UpdateKeyFailure::ThresholdViolation => ApiError::ThresholdViolation, + } + } +} + +impl From for ApiError { + fn from(error: RemoveKeyFailure) -> Self { + match error { + RemoveKeyFailure::MissingKey => ApiError::MissingKey, + RemoveKeyFailure::PermissionDenied => ApiError::PermissionDenied, + RemoveKeyFailure::ThresholdViolation => ApiError::ThresholdViolation, + } + } +} + +impl From for ApiError { + fn from(error: SetThresholdFailure) -> Self { + match error { + SetThresholdFailure::KeyManagementThreshold => ApiError::KeyManagementThreshold, + SetThresholdFailure::DeploymentThreshold => ApiError::DeploymentThreshold, + SetThresholdFailure::PermissionDeniedError => ApiError::PermissionDenied, + SetThresholdFailure::InsufficientTotalWeight => ApiError::InsufficientTotalWeight, + } + } +} + +impl From for ApiError { + fn from(error: CLValueError) -> Self { + match error { + CLValueError::Serialization(bytesrepr_error) => bytesrepr_error.into(), + CLValueError::Type(_) => ApiError::CLTypeMismatch, + } + } +} + +impl From for ApiError { + fn from(error: contracts::Error) -> Self { + ApiError::ContractHeader(error as u8) + } +} + +impl From for ApiError { + fn from(error: auction::Error) -> Self { + ApiError::AuctionError(error as u8) + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl From for ApiError { + fn from(_error: TryFromIntError) -> Self { + ApiError::Unhandled + } +} + +impl From for ApiError { + fn from(_error: TryFromSliceForAccountHashError) -> Self { + ApiError::Deserialize + } +} + +impl From for ApiError { + fn from(error: mint::Error) -> Self { + ApiError::Mint(error as u8) + } +} + +impl From for ApiError { + fn from(error: handle_payment::Error) -> Self { + ApiError::HandlePayment(error as u8) + } +} + +impl From for u32 { + fn from(error: ApiError) -> Self { + match error { + ApiError::None => 1, + ApiError::MissingArgument => 2, + ApiError::InvalidArgument => 3, + ApiError::Deserialize => 4, + ApiError::Read => 5, + ApiError::ValueNotFound => 6, + ApiError::ContractNotFound => 7, + ApiError::GetKey => 8, + ApiError::UnexpectedKeyVariant => 9, + ApiError::UnexpectedContractRefVariant => 10, + ApiError::InvalidPurseName => 11, + ApiError::InvalidPurse => 12, + ApiError::UpgradeContractAtURef => 13, + ApiError::Transfer => 14, + ApiError::NoAccessRights => 15, + ApiError::CLTypeMismatch => 16, + ApiError::EarlyEndOfStream => 17, + ApiError::Formatting => 18, + ApiError::LeftOverBytes => 19, + ApiError::OutOfMemory => 20, + ApiError::MaxKeysLimit => 21, + ApiError::DuplicateKey => 22, + ApiError::PermissionDenied => 23, + ApiError::MissingKey => 24, + ApiError::ThresholdViolation => 25, + ApiError::KeyManagementThreshold => 26, + ApiError::DeploymentThreshold => 27, + ApiError::InsufficientTotalWeight => 28, + ApiError::InvalidSystemContract => 29, + ApiError::PurseNotCreated => 30, + ApiError::Unhandled => 31, + ApiError::BufferTooSmall => 32, + ApiError::HostBufferEmpty => 33, + ApiError::HostBufferFull => 34, + ApiError::AllocLayout => 35, + ApiError::DictionaryItemKeyExceedsLength => 36, + ApiError::InvalidDictionaryItemKey => 37, + ApiError::MissingSystemContractHash => 38, + ApiError::ExceededRecursionDepth => 39, + ApiError::NonRepresentableSerialization => 40, + ApiError::AuctionError(value) => AUCTION_ERROR_OFFSET + u32::from(value), + ApiError::ContractHeader(value) => HEADER_ERROR_OFFSET + u32::from(value), + ApiError::Mint(value) => MINT_ERROR_OFFSET + u32::from(value), + ApiError::HandlePayment(value) => POS_ERROR_OFFSET + u32::from(value), + ApiError::User(value) => RESERVED_ERROR_MAX + 1 + u32::from(value), + } + } +} + +impl From for ApiError { + fn from(value: u32) -> ApiError { + match value { + 1 => ApiError::None, + 2 => ApiError::MissingArgument, + 3 => ApiError::InvalidArgument, + 4 => ApiError::Deserialize, + 5 => ApiError::Read, + 6 => ApiError::ValueNotFound, + 7 => ApiError::ContractNotFound, + 8 => ApiError::GetKey, + 9 => ApiError::UnexpectedKeyVariant, + 10 => ApiError::UnexpectedContractRefVariant, + 11 => ApiError::InvalidPurseName, + 12 => ApiError::InvalidPurse, + 13 => ApiError::UpgradeContractAtURef, + 14 => ApiError::Transfer, + 15 => ApiError::NoAccessRights, + 16 => ApiError::CLTypeMismatch, + 17 => ApiError::EarlyEndOfStream, + 18 => ApiError::Formatting, + 19 => ApiError::LeftOverBytes, + 20 => ApiError::OutOfMemory, + 21 => ApiError::MaxKeysLimit, + 22 => ApiError::DuplicateKey, + 23 => ApiError::PermissionDenied, + 24 => ApiError::MissingKey, + 25 => ApiError::ThresholdViolation, + 26 => ApiError::KeyManagementThreshold, + 27 => ApiError::DeploymentThreshold, + 28 => ApiError::InsufficientTotalWeight, + 29 => ApiError::InvalidSystemContract, + 30 => ApiError::PurseNotCreated, + 31 => ApiError::Unhandled, + 32 => ApiError::BufferTooSmall, + 33 => ApiError::HostBufferEmpty, + 34 => ApiError::HostBufferFull, + 35 => ApiError::AllocLayout, + 36 => ApiError::DictionaryItemKeyExceedsLength, + 37 => ApiError::InvalidDictionaryItemKey, + 38 => ApiError::MissingSystemContractHash, + 39 => ApiError::ExceededRecursionDepth, + 40 => ApiError::NonRepresentableSerialization, + USER_ERROR_MIN..=USER_ERROR_MAX => ApiError::User(value as u16), + HP_ERROR_MIN..=HP_ERROR_MAX => ApiError::HandlePayment(value as u8), + MINT_ERROR_MIN..=MINT_ERROR_MAX => ApiError::Mint(value as u8), + HEADER_ERROR_MIN..=HEADER_ERROR_MAX => ApiError::ContractHeader(value as u8), + AUCTION_ERROR_MIN..=AUCTION_ERROR_MAX => ApiError::AuctionError(value as u8), + _ => ApiError::Unhandled, + } + } +} + +impl Debug for ApiError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + ApiError::None => write!(f, "ApiError::None")?, + ApiError::MissingArgument => write!(f, "ApiError::MissingArgument")?, + ApiError::InvalidArgument => write!(f, "ApiError::InvalidArgument")?, + ApiError::Deserialize => write!(f, "ApiError::Deserialize")?, + ApiError::Read => write!(f, "ApiError::Read")?, + ApiError::ValueNotFound => write!(f, "ApiError::ValueNotFound")?, + ApiError::ContractNotFound => write!(f, "ApiError::ContractNotFound")?, + ApiError::GetKey => write!(f, "ApiError::GetKey")?, + ApiError::UnexpectedKeyVariant => write!(f, "ApiError::UnexpectedKeyVariant")?, + ApiError::UnexpectedContractRefVariant => { + write!(f, "ApiError::UnexpectedContractRefVariant")? + } + ApiError::InvalidPurseName => write!(f, "ApiError::InvalidPurseName")?, + ApiError::InvalidPurse => write!(f, "ApiError::InvalidPurse")?, + ApiError::UpgradeContractAtURef => write!(f, "ApiError::UpgradeContractAtURef")?, + ApiError::Transfer => write!(f, "ApiError::Transfer")?, + ApiError::NoAccessRights => write!(f, "ApiError::NoAccessRights")?, + ApiError::CLTypeMismatch => write!(f, "ApiError::CLTypeMismatch")?, + ApiError::EarlyEndOfStream => write!(f, "ApiError::EarlyEndOfStream")?, + ApiError::Formatting => write!(f, "ApiError::Formatting")?, + ApiError::LeftOverBytes => write!(f, "ApiError::LeftOverBytes")?, + ApiError::OutOfMemory => write!(f, "ApiError::OutOfMemory")?, + ApiError::MaxKeysLimit => write!(f, "ApiError::MaxKeysLimit")?, + ApiError::DuplicateKey => write!(f, "ApiError::DuplicateKey")?, + ApiError::PermissionDenied => write!(f, "ApiError::PermissionDenied")?, + ApiError::MissingKey => write!(f, "ApiError::MissingKey")?, + ApiError::ThresholdViolation => write!(f, "ApiError::ThresholdViolation")?, + ApiError::KeyManagementThreshold => write!(f, "ApiError::KeyManagementThreshold")?, + ApiError::DeploymentThreshold => write!(f, "ApiError::DeploymentThreshold")?, + ApiError::InsufficientTotalWeight => write!(f, "ApiError::InsufficientTotalWeight")?, + ApiError::InvalidSystemContract => write!(f, "ApiError::InvalidSystemContract")?, + ApiError::PurseNotCreated => write!(f, "ApiError::PurseNotCreated")?, + ApiError::Unhandled => write!(f, "ApiError::Unhandled")?, + ApiError::BufferTooSmall => write!(f, "ApiError::BufferTooSmall")?, + ApiError::HostBufferEmpty => write!(f, "ApiError::HostBufferEmpty")?, + ApiError::HostBufferFull => write!(f, "ApiError::HostBufferFull")?, + ApiError::AllocLayout => write!(f, "ApiError::AllocLayout")?, + ApiError::DictionaryItemKeyExceedsLength => { + write!(f, "ApiError::DictionaryItemKeyTooLarge")? + } + ApiError::InvalidDictionaryItemKey => write!(f, "ApiError::InvalidDictionaryItemKey")?, + ApiError::MissingSystemContractHash => write!(f, "ApiError::MissingContractHash")?, + ApiError::NonRepresentableSerialization => { + write!(f, "ApiError::NonRepresentableSerialization")? + } + ApiError::ExceededRecursionDepth => write!(f, "ApiError::ExceededRecursionDepth")?, + ApiError::AuctionError(value) => write!( + f, + "ApiError::AuctionError({:?})", + auction::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::ContractHeader(value) => write!( + f, + "ApiError::ContractHeader({:?})", + contracts::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::Mint(value) => write!( + f, + "ApiError::Mint({:?})", + mint::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::HandlePayment(value) => write!( + f, + "ApiError::HandlePayment({:?})", + handle_payment::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::User(value) => write!(f, "ApiError::User({})", value)?, + } + write!(f, " [{}]", u32::from(*self)) + } +} + +impl fmt::Display for ApiError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ApiError::User(value) => write!(f, "User error: {}", value), + ApiError::ContractHeader(value) => write!(f, "Contract header error: {}", value), + ApiError::Mint(value) => write!(f, "Mint error: {}", value), + ApiError::HandlePayment(value) => write!(f, "Handle Payment error: {}", value), + _ => ::fmt(self, f), + } + } +} + +// This function is not intended to be used by third party crates. +#[doc(hidden)] +pub fn i32_from(result: Result<(), T>) -> i32 +where + ApiError: From, +{ + match result { + Ok(()) => 0, + Err(error) => { + let api_error = ApiError::from(error); + u32::from(api_error) as i32 + } + } +} + +/// Converts an `i32` to a `Result<(), ApiError>`, where `0` represents `Ok(())`, and all other +/// inputs are mapped to `Err(ApiError::)`. The full list of mappings can be found in the +/// [docs for `ApiError`](ApiError#mappings). +pub fn result_from(value: i32) -> Result<(), ApiError> { + match value { + 0 => Ok(()), + _ => Err(ApiError::from(value as u32)), + } +} + +#[cfg(test)] +mod tests { + use std::{i32, u16, u8}; + + use super::*; + + fn round_trip(result: Result<(), ApiError>) { + let code = i32_from(result); + assert_eq!(result, result_from(code)); + } + + #[test] + fn error_values() { + assert_eq!(65_024_u32, u32::from(ApiError::Mint(0))); // MINT_ERROR_OFFSET == 65,024 + assert_eq!(65_279_u32, u32::from(ApiError::Mint(u8::MAX))); + assert_eq!(65_280_u32, u32::from(ApiError::HandlePayment(0))); // POS_ERROR_OFFSET == 65,280 + assert_eq!(65_535_u32, u32::from(ApiError::HandlePayment(u8::MAX))); + assert_eq!(65_536_u32, u32::from(ApiError::User(0))); // u16::MAX + 1 + assert_eq!(131_071_u32, u32::from(ApiError::User(u16::MAX))); // 2 * u16::MAX + 1 + } + + #[test] + fn error_descriptions_getkey() { + assert_eq!("ApiError::GetKey [8]", &format!("{:?}", ApiError::GetKey)); + assert_eq!("ApiError::GetKey [8]", &format!("{}", ApiError::GetKey)); + } + + #[test] + fn error_descriptions_contract_header() { + assert_eq!( + "ApiError::ContractHeader(PreviouslyUsedVersion) [64769]", + &format!( + "{:?}", + ApiError::ContractHeader(contracts::Error::PreviouslyUsedVersion as u8) + ) + ); + assert_eq!( + "Contract header error: 0", + &format!("{}", ApiError::ContractHeader(0)) + ); + assert_eq!( + "Contract header error: 255", + &format!("{}", ApiError::ContractHeader(u8::MAX)) + ); + } + + #[test] + fn error_descriptions_mint() { + assert_eq!( + "ApiError::Mint(InsufficientFunds) [65024]", + &format!("{:?}", ApiError::Mint(0)) + ); + assert_eq!("Mint error: 0", &format!("{}", ApiError::Mint(0))); + assert_eq!("Mint error: 255", &format!("{}", ApiError::Mint(u8::MAX))); + } + + #[test] + fn error_descriptions_handle_payment() { + assert_eq!( + "ApiError::HandlePayment(NotBonded) [65280]", + &format!( + "{:?}", + ApiError::HandlePayment(handle_payment::Error::NotBonded as u8) + ) + ); + } + #[test] + fn error_descriptions_handle_payment_display() { + assert_eq!( + "Handle Payment error: 0", + &format!( + "{}", + ApiError::HandlePayment(handle_payment::Error::NotBonded as u8) + ) + ); + } + + #[test] + fn error_descriptions_user_errors() { + assert_eq!( + "ApiError::User(0) [65536]", + &format!("{:?}", ApiError::User(0)) + ); + + assert_eq!("User error: 0", &format!("{}", ApiError::User(0))); + assert_eq!( + "ApiError::User(65535) [131071]", + &format!("{:?}", ApiError::User(u16::MAX)) + ); + assert_eq!( + "User error: 65535", + &format!("{}", ApiError::User(u16::MAX)) + ); + } + + #[test] + fn error_edge_cases() { + assert_eq!(Err(ApiError::Unhandled), result_from(i32::MAX)); + assert_eq!( + Err(ApiError::ContractHeader(255)), + result_from(MINT_ERROR_OFFSET as i32 - 1) + ); + assert_eq!(Err(ApiError::Unhandled), result_from(-1)); + assert_eq!(Err(ApiError::Unhandled), result_from(i32::MIN)); + } + + #[test] + fn error_round_trips() { + round_trip(Ok(())); + round_trip(Err(ApiError::None)); + round_trip(Err(ApiError::MissingArgument)); + round_trip(Err(ApiError::InvalidArgument)); + round_trip(Err(ApiError::Deserialize)); + round_trip(Err(ApiError::Read)); + round_trip(Err(ApiError::ValueNotFound)); + round_trip(Err(ApiError::ContractNotFound)); + round_trip(Err(ApiError::GetKey)); + round_trip(Err(ApiError::UnexpectedKeyVariant)); + round_trip(Err(ApiError::UnexpectedContractRefVariant)); + round_trip(Err(ApiError::InvalidPurseName)); + round_trip(Err(ApiError::InvalidPurse)); + round_trip(Err(ApiError::UpgradeContractAtURef)); + round_trip(Err(ApiError::Transfer)); + round_trip(Err(ApiError::NoAccessRights)); + round_trip(Err(ApiError::CLTypeMismatch)); + round_trip(Err(ApiError::EarlyEndOfStream)); + round_trip(Err(ApiError::Formatting)); + round_trip(Err(ApiError::LeftOverBytes)); + round_trip(Err(ApiError::OutOfMemory)); + round_trip(Err(ApiError::MaxKeysLimit)); + round_trip(Err(ApiError::DuplicateKey)); + round_trip(Err(ApiError::PermissionDenied)); + round_trip(Err(ApiError::MissingKey)); + round_trip(Err(ApiError::ThresholdViolation)); + round_trip(Err(ApiError::KeyManagementThreshold)); + round_trip(Err(ApiError::DeploymentThreshold)); + round_trip(Err(ApiError::InsufficientTotalWeight)); + round_trip(Err(ApiError::InvalidSystemContract)); + round_trip(Err(ApiError::PurseNotCreated)); + round_trip(Err(ApiError::Unhandled)); + round_trip(Err(ApiError::BufferTooSmall)); + round_trip(Err(ApiError::HostBufferEmpty)); + round_trip(Err(ApiError::HostBufferFull)); + round_trip(Err(ApiError::AllocLayout)); + round_trip(Err(ApiError::NonRepresentableSerialization)); + round_trip(Err(ApiError::ContractHeader(0))); + round_trip(Err(ApiError::ContractHeader(u8::MAX))); + round_trip(Err(ApiError::Mint(0))); + round_trip(Err(ApiError::Mint(u8::MAX))); + round_trip(Err(ApiError::HandlePayment(0))); + round_trip(Err(ApiError::HandlePayment(u8::MAX))); + round_trip(Err(ApiError::User(0))); + round_trip(Err(ApiError::User(u16::MAX))); + round_trip(Err(ApiError::AuctionError(0))); + round_trip(Err(ApiError::AuctionError(u8::MAX))); + } +} diff --git a/casper_types/src/block_time.rs b/casper_types/src/block_time.rs new file mode 100644 index 00000000..4122f7ca --- /dev/null +++ b/casper_types/src/block_time.rs @@ -0,0 +1,47 @@ +use alloc::vec::Vec; + +use crate::bytesrepr::{Error, FromBytes, ToBytes, U64_SERIALIZED_LENGTH}; + +/// The number of bytes in a serialized [`BlockTime`]. +pub const BLOCKTIME_SERIALIZED_LENGTH: usize = U64_SERIALIZED_LENGTH; + +/// A newtype wrapping a [`u64`] which represents the block time. +#[derive(Clone, Copy, Default, Debug, PartialEq, Eq, PartialOrd)] +pub struct BlockTime(u64); + +impl BlockTime { + /// Constructs a `BlockTime`. + pub fn new(value: u64) -> Self { + BlockTime(value) + } + + /// Saturating integer subtraction. Computes `self - other`, saturating at `0` instead of + /// overflowing. + #[must_use] + pub fn saturating_sub(self, other: BlockTime) -> Self { + BlockTime(self.0.saturating_sub(other.0)) + } +} + +impl From for u64 { + fn from(blocktime: BlockTime) -> Self { + blocktime.0 + } +} + +impl ToBytes for BlockTime { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + BLOCKTIME_SERIALIZED_LENGTH + } +} + +impl FromBytes for BlockTime { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (time, rem) = FromBytes::from_bytes(bytes)?; + Ok((BlockTime::new(time), rem)) + } +} diff --git a/casper_types/src/bytesrepr.rs b/casper_types/src/bytesrepr.rs new file mode 100644 index 00000000..136dd19a --- /dev/null +++ b/casper_types/src/bytesrepr.rs @@ -0,0 +1,1594 @@ +//! Contains serialization and deserialization code for types used throughout the system. +mod bytes; + +use alloc::{ + alloc::{alloc, Layout}, + collections::{BTreeMap, BTreeSet, VecDeque}, + str, + string::String, + vec, + vec::Vec, +}; +#[cfg(debug_assertions)] +use core::any; +use core::{ + convert::TryInto, + fmt::{self, Display, Formatter}, + mem, + ptr::NonNull, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num_integer::Integer; +use num_rational::Ratio; +use serde::{Deserialize, Serialize}; + +pub use bytes::Bytes; + +/// The number of bytes in a serialized `()`. +pub const UNIT_SERIALIZED_LENGTH: usize = 0; +/// The number of bytes in a serialized `bool`. +pub const BOOL_SERIALIZED_LENGTH: usize = 1; +/// The number of bytes in a serialized `i32`. +pub const I32_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `i64`. +pub const I64_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u8`. +pub const U8_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u16`. +pub const U16_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u32`. +pub const U32_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u64`. +pub const U64_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized [`U128`](crate::U128). +pub const U128_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized [`U256`](crate::U256). +pub const U256_SERIALIZED_LENGTH: usize = U128_SERIALIZED_LENGTH * 2; +/// The number of bytes in a serialized [`U512`](crate::U512). +pub const U512_SERIALIZED_LENGTH: usize = U256_SERIALIZED_LENGTH * 2; +/// The tag representing a `None` value. +pub const OPTION_NONE_TAG: u8 = 0; +/// The tag representing a `Some` value. +pub const OPTION_SOME_TAG: u8 = 1; +/// The tag representing an `Err` value. +pub const RESULT_ERR_TAG: u8 = 0; +/// The tag representing an `Ok` value. +pub const RESULT_OK_TAG: u8 = 1; + +/// A type which can be serialized to a `Vec`. +pub trait ToBytes { + /// Serializes `&self` to a `Vec`. + fn to_bytes(&self) -> Result, Error>; + /// Consumes `self` and serializes to a `Vec`. + fn into_bytes(self) -> Result, Error> + where + Self: Sized, + { + self.to_bytes() + } + /// Returns the length of the `Vec` which would be returned from a successful call to + /// `to_bytes()` or `into_bytes()`. The data is not actually serialized, so this call is + /// relatively cheap. + fn serialized_length(&self) -> usize; + + /// Writes `&self` into a mutable `writer`. + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend(self.to_bytes()?); + Ok(()) + } +} + +/// A type which can be deserialized from a `Vec`. +pub trait FromBytes: Sized { + /// Deserializes the slice into `Self`. + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error>; + + /// Deserializes the `Vec` into `Self`. + fn from_vec(bytes: Vec) -> Result<(Self, Vec), Error> { + Self::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder))) + } +} + +/// Returns a `Vec` initialized with sufficient capacity to hold `to_be_serialized` after +/// serialization. +pub fn unchecked_allocate_buffer(to_be_serialized: &T) -> Vec { + let serialized_length = to_be_serialized.serialized_length(); + Vec::with_capacity(serialized_length) +} + +/// Returns a `Vec` initialized with sufficient capacity to hold `to_be_serialized` after +/// serialization, or an error if the capacity would exceed `u32::max_value()`. +pub fn allocate_buffer(to_be_serialized: &T) -> Result, Error> { + let serialized_length = to_be_serialized.serialized_length(); + if serialized_length > u32::max_value() as usize { + return Err(Error::OutOfMemory); + } + Ok(Vec::with_capacity(serialized_length)) +} + +/// Serialization and deserialization errors. +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Early end of stream while deserializing. + EarlyEndOfStream = 0, + /// Formatting error while deserializing. + Formatting, + /// Not all input bytes were consumed in [`deserialize`]. + LeftOverBytes, + /// Out of memory error. + OutOfMemory, + /// No serialized representation is available for a value. + NotRepresentable, + /// Exceeded a recursion depth limit. + ExceededRecursionDepth, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::EarlyEndOfStream => { + formatter.write_str("Deserialization error: early end of stream") + } + Error::Formatting => formatter.write_str("Deserialization error: formatting"), + Error::LeftOverBytes => formatter.write_str("Deserialization error: left-over bytes"), + Error::OutOfMemory => formatter.write_str("Serialization error: out of memory"), + Error::NotRepresentable => { + formatter.write_str("Serialization error: value is not representable.") + } + Error::ExceededRecursionDepth => formatter.write_str("exceeded recursion depth"), + } + } +} + +/// Deserializes `bytes` into an instance of `T`. +/// +/// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes +/// are consumed in the operation. +pub fn deserialize(bytes: Vec) -> Result { + let (t, remainder) = T::from_bytes(&bytes)?; + if remainder.is_empty() { + Ok(t) + } else { + Err(Error::LeftOverBytes) + } +} + +/// Deserializes a slice of bytes into an instance of `T`. +/// +/// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes +/// are consumed in the operation. +pub fn deserialize_from_slice, O: FromBytes>(bytes: I) -> Result { + let (t, remainder) = O::from_bytes(bytes.as_ref())?; + if remainder.is_empty() { + Ok(t) + } else { + Err(Error::LeftOverBytes) + } +} + +/// Serializes `t` into a `Vec`. +pub fn serialize(t: impl ToBytes) -> Result, Error> { + t.into_bytes() +} + +/// Safely splits the slice at the given point. +pub(crate) fn safe_split_at(bytes: &[u8], n: usize) -> Result<(&[u8], &[u8]), Error> { + if n > bytes.len() { + Err(Error::EarlyEndOfStream) + } else { + Ok(bytes.split_at(n)) + } +} + +impl ToBytes for () { + fn to_bytes(&self) -> Result, Error> { + Ok(Vec::new()) + } + + fn serialized_length(&self) -> usize { + UNIT_SERIALIZED_LENGTH + } +} + +impl FromBytes for () { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + Ok(((), bytes)) + } +} + +impl ToBytes for bool { + fn to_bytes(&self) -> Result, Error> { + u8::from(*self).to_bytes() + } + + fn serialized_length(&self) -> usize { + BOOL_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for bool { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + match bytes.split_first() { + None => Err(Error::EarlyEndOfStream), + Some((byte, rem)) => match byte { + 1 => Ok((true, rem)), + 0 => Ok((false, rem)), + _ => Err(Error::Formatting), + }, + } + } +} + +impl ToBytes for u8 { + fn to_bytes(&self) -> Result, Error> { + Ok(vec![*self]) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self); + Ok(()) + } +} + +impl FromBytes for u8 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + match bytes.split_first() { + None => Err(Error::EarlyEndOfStream), + Some((byte, rem)) => Ok((*byte, rem)), + } + } +} + +impl ToBytes for i32 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + I32_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for i32 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; I32_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, I32_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for i64 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + I64_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for i64 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; I64_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, I64_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for u16 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + U16_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for u16 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; U16_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, U16_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for u32 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + U32_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for u32 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; U32_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, U32_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for u64 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + U64_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for u64 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; U64_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, U64_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for String { + fn to_bytes(&self) -> Result, Error> { + let bytes = self.as_bytes(); + u8_slice_to_bytes(bytes) + } + + fn serialized_length(&self) -> usize { + u8_slice_serialized_length(self.as_bytes()) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } +} + +impl FromBytes for String { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (size, remainder) = u32::from_bytes(bytes)?; + let (str_bytes, remainder) = safe_split_at(remainder, size as usize)?; + let result = String::from_utf8(str_bytes.to_vec()).map_err(|_| Error::Formatting)?; + Ok((result, remainder)) + } +} + +fn ensure_efficient_serialization() { + #[cfg(debug_assertions)] + debug_assert_ne!( + any::type_name::(), + any::type_name::(), + "You should use Bytes newtype wrapper for efficiency" + ); +} + +fn iterator_serialized_length<'a, T: 'a + ToBytes>(ts: impl Iterator) -> usize { + U32_SERIALIZED_LENGTH + ts.map(ToBytes::serialized_length).sum::() +} + +impl ToBytes for Vec { + fn to_bytes(&self) -> Result, Error> { + ensure_efficient_serialization::(); + + let mut result = try_vec_with_capacity(self.serialized_length())?; + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); + + for item in self.iter() { + result.append(&mut item.to_bytes()?); + } + + Ok(result) + } + + fn into_bytes(self) -> Result, Error> { + ensure_efficient_serialization::(); + + let mut result = allocate_buffer(&self)?; + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); + + for item in self { + result.append(&mut item.into_bytes()?); + } + + Ok(result) + } + + fn serialized_length(&self) -> usize { + iterator_serialized_length(self.iter()) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for item in self.iter() { + item.write_bytes(writer)?; + } + Ok(()) + } +} + +// TODO Replace `try_vec_with_capacity` with `Vec::try_reserve_exact` once it's in stable. +fn try_vec_with_capacity(capacity: usize) -> Result, Error> { + // see https://doc.rust-lang.org/src/alloc/raw_vec.rs.html#75-98 + let elem_size = mem::size_of::(); + let alloc_size = capacity.checked_mul(elem_size).ok_or(Error::OutOfMemory)?; + + let ptr = if alloc_size == 0 { + NonNull::::dangling() + } else { + let align = mem::align_of::(); + let layout = Layout::from_size_align(alloc_size, align).map_err(|_| Error::OutOfMemory)?; + let raw_ptr = unsafe { alloc(layout) }; + let non_null_ptr = NonNull::::new(raw_ptr).ok_or(Error::OutOfMemory)?; + non_null_ptr.cast() + }; + unsafe { Ok(Vec::from_raw_parts(ptr.as_ptr(), 0, capacity)) } +} + +fn vec_from_vec(bytes: Vec) -> Result<(Vec, Vec), Error> { + ensure_efficient_serialization::(); + + Vec::::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder))) +} + +impl FromBytes for Vec { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + ensure_efficient_serialization::(); + + let (count, mut stream) = u32::from_bytes(bytes)?; + + let mut result = try_vec_with_capacity(count as usize)?; + for _ in 0..count { + let (value, remainder) = T::from_bytes(stream)?; + result.push(value); + stream = remainder; + } + + Ok((result, stream)) + } + + fn from_vec(bytes: Vec) -> Result<(Self, Vec), Error> { + vec_from_vec(bytes) + } +} + +impl ToBytes for VecDeque { + fn to_bytes(&self) -> Result, Error> { + let (slice1, slice2) = self.as_slices(); + let mut result = allocate_buffer(self)?; + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); + for item in slice1.iter().chain(slice2.iter()) { + result.append(&mut item.to_bytes()?); + } + Ok(result) + } + + fn into_bytes(self) -> Result, Error> { + let vec: Vec = self.into(); + vec.to_bytes() + } + + fn serialized_length(&self) -> usize { + let (slice1, slice2) = self.as_slices(); + iterator_serialized_length(slice1.iter().chain(slice2.iter())) + } +} + +impl FromBytes for VecDeque { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (vec, bytes) = Vec::from_bytes(bytes)?; + Ok((VecDeque::from(vec), bytes)) + } + + fn from_vec(bytes: Vec) -> Result<(Self, Vec), Error> { + let (vec, bytes) = vec_from_vec(bytes)?; + Ok((VecDeque::from(vec), bytes)) + } +} + +impl ToBytes for [u8; COUNT] { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_vec()) + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + COUNT + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(self); + Ok(()) + } +} + +impl FromBytes for [u8; COUNT] { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = safe_split_at(bytes, COUNT)?; + // SAFETY: safe_split_at makes sure `bytes` is exactly `COUNT` bytes. + let ptr = bytes.as_ptr() as *const [u8; COUNT]; + let result = unsafe { *ptr }; + Ok((result, rem)) + } +} + +impl ToBytes for BTreeSet { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + + let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut num_keys.to_bytes()?); + + for value in self.iter() { + result.append(&mut value.to_bytes()?); + } + + Ok(result) + } + + fn serialized_length(&self) -> usize { + U32_SERIALIZED_LENGTH + self.iter().map(|v| v.serialized_length()).sum::() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for value in self.iter() { + value.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for BTreeSet { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (num_keys, mut stream) = u32::from_bytes(bytes)?; + let mut result = BTreeSet::new(); + for _ in 0..num_keys { + let (v, rem) = V::from_bytes(stream)?; + result.insert(v); + stream = rem; + } + Ok((result, stream)) + } +} + +impl ToBytes for BTreeMap +where + K: ToBytes, + V: ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + + let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut num_keys.to_bytes()?); + + for (key, value) in self.iter() { + result.append(&mut key.to_bytes()?); + result.append(&mut value.to_bytes()?); + } + + Ok(result) + } + + fn serialized_length(&self) -> usize { + U32_SERIALIZED_LENGTH + + self + .iter() + .map(|(key, value)| key.serialized_length() + value.serialized_length()) + .sum::() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for (key, value) in self.iter() { + key.write_bytes(writer)?; + value.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for BTreeMap +where + K: FromBytes + Ord, + V: FromBytes, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (num_keys, mut stream) = u32::from_bytes(bytes)?; + let mut result = BTreeMap::new(); + for _ in 0..num_keys { + let (k, rem) = K::from_bytes(stream)?; + let (v, rem) = V::from_bytes(rem)?; + result.insert(k, v); + stream = rem; + } + Ok((result, stream)) + } +} + +impl ToBytes for Option { + fn to_bytes(&self) -> Result, Error> { + match self { + None => Ok(vec![OPTION_NONE_TAG]), + Some(v) => { + let mut result = allocate_buffer(self)?; + result.push(OPTION_SOME_TAG); + + let mut value = v.to_bytes()?; + result.append(&mut value); + + Ok(result) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Some(v) => v.serialized_length(), + None => 0, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + None => writer.push(OPTION_NONE_TAG), + Some(v) => { + writer.push(OPTION_SOME_TAG); + v.write_bytes(writer)?; + } + }; + Ok(()) + } +} + +impl FromBytes for Option { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + OPTION_NONE_TAG => Ok((None, rem)), + OPTION_SOME_TAG => { + let (t, rem) = T::from_bytes(rem)?; + Ok((Some(t), rem)) + } + _ => Err(Error::Formatting), + } + } +} + +impl ToBytes for Result { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + let (variant, mut value) = match self { + Err(error) => (RESULT_ERR_TAG, error.to_bytes()?), + Ok(result) => (RESULT_OK_TAG, result.to_bytes()?), + }; + result.push(variant); + result.append(&mut value); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Ok(ok) => ok.serialized_length(), + Err(error) => error.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + Err(error) => { + writer.push(RESULT_ERR_TAG); + error.write_bytes(writer)?; + } + Ok(result) => { + writer.push(RESULT_OK_TAG); + result.write_bytes(writer)?; + } + }; + Ok(()) + } +} + +impl FromBytes for Result { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (variant, rem) = u8::from_bytes(bytes)?; + match variant { + RESULT_ERR_TAG => { + let (value, rem) = E::from_bytes(rem)?; + Ok((Err(value), rem)) + } + RESULT_OK_TAG => { + let (value, rem) = T::from_bytes(rem)?; + Ok((Ok(value), rem)) + } + _ => Err(Error::Formatting), + } + } +} + +impl ToBytes for (T1,) { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for (T1,) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + Ok(((t1,), remainder)) + } +} + +impl ToBytes for (T1, T2) { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + self.1.serialized_length() + } +} + +impl FromBytes for (T1, T2) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + Ok(((t1, t2), remainder)) + } +} + +impl ToBytes for (T1, T2, T3) { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + self.1.serialized_length() + self.2.serialized_length() + } +} + +impl FromBytes for (T1, T2, T3) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + Ok(((t1, t2, t3), remainder)) + } +} + +impl ToBytes for (T1, T2, T3, T4) { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + } +} + +impl FromBytes for (T1, T2, T3, T4) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4), remainder)) + } +} + +impl ToBytes + for (T1, T2, T3, T4, T5) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + } +} + +impl FromBytes + for (T1, T2, T3, T4, T5) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5), remainder)) + } +} + +impl ToBytes + for (T1, T2, T3, T4, T5, T6) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + } +} + +impl + FromBytes for (T1, T2, T3, T4, T5, T6) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6), remainder)) + } +} + +impl + ToBytes for (T1, T2, T3, T4, T5, T6, T7) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7), remainder)) + } +} + +impl< + T1: ToBytes, + T2: ToBytes, + T3: ToBytes, + T4: ToBytes, + T5: ToBytes, + T6: ToBytes, + T7: ToBytes, + T8: ToBytes, + > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + result.append(&mut self.7.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + + self.7.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + T8: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + let (t8, remainder) = T8::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7, t8), remainder)) + } +} + +impl< + T1: ToBytes, + T2: ToBytes, + T3: ToBytes, + T4: ToBytes, + T5: ToBytes, + T6: ToBytes, + T7: ToBytes, + T8: ToBytes, + T9: ToBytes, + > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + result.append(&mut self.7.to_bytes()?); + result.append(&mut self.8.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + + self.7.serialized_length() + + self.8.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + T8: FromBytes, + T9: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + let (t8, remainder) = T8::from_bytes(remainder)?; + let (t9, remainder) = T9::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7, t8, t9), remainder)) + } +} + +impl< + T1: ToBytes, + T2: ToBytes, + T3: ToBytes, + T4: ToBytes, + T5: ToBytes, + T6: ToBytes, + T7: ToBytes, + T8: ToBytes, + T9: ToBytes, + T10: ToBytes, + > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + result.append(&mut self.7.to_bytes()?); + result.append(&mut self.8.to_bytes()?); + result.append(&mut self.9.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + + self.7.serialized_length() + + self.8.serialized_length() + + self.9.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + T8: FromBytes, + T9: FromBytes, + T10: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + let (t8, remainder) = T8::from_bytes(remainder)?; + let (t9, remainder) = T9::from_bytes(remainder)?; + let (t10, remainder) = T10::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7, t8, t9, t10), remainder)) + } +} + +impl ToBytes for str { + #[inline] + fn to_bytes(&self) -> Result, Error> { + u8_slice_to_bytes(self.as_bytes()) + } + + #[inline] + fn serialized_length(&self) -> usize { + u8_slice_serialized_length(self.as_bytes()) + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } +} + +impl ToBytes for &str { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + (*self).to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + (*self).serialized_length() + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } +} + +impl ToBytes for &T +where + T: ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + (*self).to_bytes() + } + + fn serialized_length(&self) -> usize { + (*self).serialized_length() + } +} + +impl ToBytes for Ratio +where + T: Clone + Integer + ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + if self.denom().is_zero() { + return Err(Error::Formatting); + } + (self.numer().clone(), self.denom().clone()).into_bytes() + } + + fn serialized_length(&self) -> usize { + (self.numer().clone(), self.denom().clone()).serialized_length() + } +} + +impl FromBytes for Ratio +where + T: Clone + FromBytes + Integer, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let ((numer, denom), rem): ((T, T), &[u8]) = FromBytes::from_bytes(bytes)?; + if denom.is_zero() { + return Err(Error::Formatting); + } + Ok((Ratio::new(numer, denom), rem)) + } +} + +/// Serializes a slice of bytes with a length prefix. +/// +/// This function is serializing a slice of bytes with an addition of a 4 byte length prefix. +/// +/// For safety you should prefer to use [`vec_u8_to_bytes`]. For efficiency reasons you should also +/// avoid using serializing Vec. +fn u8_slice_to_bytes(bytes: &[u8]) -> Result, Error> { + let serialized_length = u8_slice_serialized_length(bytes); + let mut vec = try_vec_with_capacity(serialized_length)?; + let length_prefix: u32 = bytes + .len() + .try_into() + .map_err(|_| Error::NotRepresentable)?; + let length_prefix_bytes = length_prefix.to_le_bytes(); + vec.extend_from_slice(&length_prefix_bytes); + vec.extend_from_slice(bytes); + Ok(vec) +} + +fn write_u8_slice(bytes: &[u8], writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = bytes + .len() + .try_into() + .map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + writer.extend_from_slice(bytes); + Ok(()) +} + +/// Serializes a vector of bytes with a length prefix. +/// +/// For efficiency you should avoid serializing Vec. +#[allow(clippy::ptr_arg)] +#[inline] +pub(crate) fn vec_u8_to_bytes(vec: &Vec) -> Result, Error> { + u8_slice_to_bytes(vec.as_slice()) +} + +/// Returns serialized length of serialized slice of bytes. +/// +/// This function adds a length prefix in the beginning. +#[inline(always)] +fn u8_slice_serialized_length(bytes: &[u8]) -> usize { + U32_SERIALIZED_LENGTH + bytes.len() +} + +#[allow(clippy::ptr_arg)] +#[inline] +pub(crate) fn vec_u8_serialized_length(vec: &Vec) -> usize { + u8_slice_serialized_length(vec.as_slice()) +} + +// This test helper is not intended to be used by third party crates. +#[doc(hidden)] +/// Returns `true` if a we can serialize and then deserialize a value +pub fn test_serialization_roundtrip(t: &T) +where + T: alloc::fmt::Debug + ToBytes + FromBytes + PartialEq, +{ + let serialized = ToBytes::to_bytes(t).expect("Unable to serialize data"); + assert_eq!( + serialized.len(), + t.serialized_length(), + "\nLength of serialized data: {},\nserialized_length() yielded: {},\nserialized data: {:?}, t is {:?}", + serialized.len(), + t.serialized_length(), + serialized, + t + ); + let mut written_bytes = vec![]; + t.write_bytes(&mut written_bytes) + .expect("Unable to serialize data via write_bytes"); + assert_eq!(serialized, written_bytes); + + let deserialized_from_slice = + deserialize_from_slice(&serialized).expect("Unable to deserialize data"); + // assert!(*t == deserialized); + assert_eq!(*t, deserialized_from_slice); + + let deserialized = deserialize::(serialized).expect("Unable to deserialize data"); + assert_eq!(*t, deserialized); +} +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_not_serialize_zero_denominator() { + let malicious = Ratio::new_raw(1, 0); + assert_eq!(malicious.to_bytes().unwrap_err(), Error::Formatting); + } + + #[test] + fn should_not_deserialize_zero_denominator() { + let malicious_bytes = (1u64, 0u64).to_bytes().unwrap(); + let result: Result, Error> = super::deserialize(malicious_bytes); + assert_eq!(result.unwrap_err(), Error::Formatting); + } + + #[test] + fn should_have_generic_tobytes_impl_for_borrowed_types() { + struct NonCopyable; + + impl ToBytes for NonCopyable { + fn to_bytes(&self) -> Result, Error> { + Ok(vec![1, 2, 3]) + } + + fn serialized_length(&self) -> usize { + 3 + } + } + + let noncopyable: &NonCopyable = &NonCopyable; + + assert_eq!(noncopyable.to_bytes().unwrap(), vec![1, 2, 3]); + assert_eq!(noncopyable.serialized_length(), 3); + assert_eq!(noncopyable.into_bytes().unwrap(), vec![1, 2, 3]); + } + + #[cfg(debug_assertions)] + #[test] + #[should_panic(expected = "You should use Bytes newtype wrapper for efficiency")] + fn should_fail_to_serialize_slice_of_u8() { + let bytes = b"0123456789".to_vec(); + bytes.to_bytes().unwrap(); + } +} + +#[cfg(test)] +mod proptests { + use std::collections::VecDeque; + + use proptest::{collection::vec, prelude::*}; + + use crate::{ + bytesrepr::{self, bytes::gens::bytes_arb, ToBytes}, + gens::*, + }; + + proptest! { + #[test] + fn test_bool(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u8(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u16(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u32(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_i32(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u64(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_i64(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u8_slice_32(s in u8_slice_32()) { + bytesrepr::test_serialization_roundtrip(&s); + } + + #[test] + fn test_vec_u8(u in bytes_arb(1..100)) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_vec_i32(u in vec(any::(), 1..100)) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_vecdeque_i32((front, back) in (vec(any::(), 1..100), vec(any::(), 1..100))) { + let mut vec_deque = VecDeque::new(); + for f in front { + vec_deque.push_front(f); + } + for f in back { + vec_deque.push_back(f); + } + bytesrepr::test_serialization_roundtrip(&vec_deque); + } + + #[test] + fn test_vec_vec_u8(u in vec(bytes_arb(1..100), 10)) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_uref_map(m in named_keys_arb(20)) { + bytesrepr::test_serialization_roundtrip(&m); + } + + #[test] + fn test_array_u8_32(arr in any::<[u8; 32]>()) { + bytesrepr::test_serialization_roundtrip(&arr); + } + + #[test] + fn test_string(s in "\\PC*") { + bytesrepr::test_serialization_roundtrip(&s); + } + + #[test] + fn test_str(s in "\\PC*") { + let not_a_string_object = s.as_str(); + not_a_string_object.to_bytes().expect("should serialize a str"); + } + + #[test] + fn test_option(o in proptest::option::of(key_arb())) { + bytesrepr::test_serialization_roundtrip(&o); + } + + #[test] + fn test_unit(unit in Just(())) { + bytesrepr::test_serialization_roundtrip(&unit); + } + + #[test] + fn test_u128_serialization(u in u128_arb()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u256_serialization(u in u256_arb()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u512_serialization(u in u512_arb()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_key_serialization(key in key_arb()) { + bytesrepr::test_serialization_roundtrip(&key); + } + + #[test] + fn test_cl_value_serialization(cl_value in cl_value_arb()) { + bytesrepr::test_serialization_roundtrip(&cl_value); + } + + #[test] + fn test_access_rights(access_right in access_rights_arb()) { + bytesrepr::test_serialization_roundtrip(&access_right); + } + + #[test] + fn test_uref(uref in uref_arb()) { + bytesrepr::test_serialization_roundtrip(&uref); + } + + #[test] + fn test_account_hash(pk in account_hash_arb()) { + bytesrepr::test_serialization_roundtrip(&pk); + } + + #[test] + fn test_result(result in result_arb()) { + bytesrepr::test_serialization_roundtrip(&result); + } + + #[test] + fn test_phase_serialization(phase in phase_arb()) { + bytesrepr::test_serialization_roundtrip(&phase); + } + + #[test] + fn test_protocol_version(protocol_version in protocol_version_arb()) { + bytesrepr::test_serialization_roundtrip(&protocol_version); + } + + #[test] + fn test_sem_ver(sem_ver in sem_ver_arb()) { + bytesrepr::test_serialization_roundtrip(&sem_ver); + } + + #[test] + fn test_tuple1(t in (any::(),)) { + bytesrepr::test_serialization_roundtrip(&t); + } + + #[test] + fn test_tuple2(t in (any::(),any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + + #[test] + fn test_tuple3(t in (any::(),any::(),any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + + #[test] + fn test_tuple4(t in (any::(),any::(),any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple5(t in (any::(),any::(),any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple6(t in (any::(),any::(),any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple7(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple8(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple9(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple10(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_ratio_u64(t in (any::(), 1..u64::max_value())) { + bytesrepr::test_serialization_roundtrip(&t); + } + } +} diff --git a/casper_types/src/bytesrepr/bytes.rs b/casper_types/src/bytesrepr/bytes.rs new file mode 100644 index 00000000..4ecf9747 --- /dev/null +++ b/casper_types/src/bytesrepr/bytes.rs @@ -0,0 +1,389 @@ +use alloc::{ + string::String, + vec::{IntoIter, Vec}, +}; +use core::{ + cmp, fmt, + iter::FromIterator, + ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeTo}, + slice, +}; + +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{ + de::{Error as SerdeError, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; + +use super::{Error, FromBytes, ToBytes}; +use crate::{checksummed_hex, CLType, CLTyped}; + +/// A newtype wrapper for bytes that has efficient serialization routines. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Default, Hash)] +pub struct Bytes(Vec); + +impl Bytes { + /// Constructs a new, empty vector of bytes. + pub fn new() -> Bytes { + Bytes::default() + } + + /// Returns reference to inner container. + #[inline] + pub fn inner_bytes(&self) -> &Vec { + &self.0 + } + + /// Extracts a slice containing the entire vector. + pub fn as_slice(&self) -> &[u8] { + self + } +} + +impl Deref for Bytes { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.deref() + } +} + +impl From> for Bytes { + fn from(vec: Vec) -> Self { + Self(vec) + } +} + +impl From for Vec { + fn from(bytes: Bytes) -> Self { + bytes.0 + } +} + +impl From<&[u8]> for Bytes { + fn from(bytes: &[u8]) -> Self { + Self(bytes.to_vec()) + } +} + +impl CLTyped for Bytes { + fn cl_type() -> CLType { + >::cl_type() + } +} + +impl AsRef<[u8]> for Bytes { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for Bytes { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + super::vec_u8_to_bytes(&self.0) + } + + #[inline(always)] + fn into_bytes(self) -> Result, Error> { + super::vec_u8_to_bytes(&self.0) + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + super::vec_u8_serialized_length(&self.0) + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + super::write_u8_slice(self.as_slice(), writer) + } +} + +impl FromBytes for Bytes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), super::Error> { + let (size, remainder) = u32::from_bytes(bytes)?; + let (result, remainder) = super::safe_split_at(remainder, size as usize)?; + Ok((Bytes(result.to_vec()), remainder)) + } + + fn from_vec(stream: Vec) -> Result<(Self, Vec), Error> { + let (size, mut stream) = u32::from_vec(stream)?; + + if size as usize > stream.len() { + Err(Error::EarlyEndOfStream) + } else { + let remainder = stream.split_off(size as usize); + Ok((Bytes(stream), remainder)) + } + } +} + +impl Index for Bytes { + type Output = u8; + + fn index(&self, index: usize) -> &u8 { + let Bytes(ref dat) = self; + &dat[index] + } +} + +impl Index> for Bytes { + type Output = [u8]; + + fn index(&self, index: Range) -> &[u8] { + let Bytes(dat) = self; + &dat[index] + } +} + +impl Index> for Bytes { + type Output = [u8]; + + fn index(&self, index: RangeTo) -> &[u8] { + let Bytes(dat) = self; + &dat[index] + } +} + +impl Index> for Bytes { + type Output = [u8]; + + fn index(&self, index: RangeFrom) -> &[u8] { + let Bytes(dat) = self; + &dat[index] + } +} + +impl Index for Bytes { + type Output = [u8]; + + fn index(&self, _: RangeFull) -> &[u8] { + let Bytes(dat) = self; + &dat[..] + } +} + +impl FromIterator for Bytes { + #[inline] + fn from_iter>(iter: I) -> Bytes { + let vec = Vec::from_iter(iter); + Bytes(vec) + } +} + +impl<'a> IntoIterator for &'a Bytes { + type Item = &'a u8; + + type IntoIter = slice::Iter<'a, u8>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl IntoIterator for Bytes { + type Item = u8; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +#[cfg(feature = "datasize")] +impl datasize::DataSize for Bytes { + const IS_DYNAMIC: bool = true; + + const STATIC_HEAP_SIZE: usize = 0; + + fn estimate_heap_size(&self) -> usize { + self.0.capacity() * std::mem::size_of::() + } +} + +const RANDOM_BYTES_MAX_LENGTH: usize = 100; + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Bytes { + let len = rng.gen_range(0..RANDOM_BYTES_MAX_LENGTH); + let mut result = Vec::with_capacity(len); + for _ in 0..len { + result.push(rng.gen()); + } + result.into() + } +} + +struct BytesVisitor; + +impl<'de> Visitor<'de> for BytesVisitor { + type Value = Bytes; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("byte array") + } + + fn visit_seq(self, mut visitor: V) -> Result + where + V: SeqAccess<'de>, + { + let len = cmp::min(visitor.size_hint().unwrap_or(0), 4096); + let mut bytes = Vec::with_capacity(len); + + while let Some(b) = visitor.next_element()? { + bytes.push(b); + } + + Ok(Bytes::from(bytes)) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v)) + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v)) + } + + fn visit_str(self, v: &str) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v.as_bytes())) + } + + fn visit_string(self, v: String) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v.into_bytes())) + } +} + +impl<'de> Deserialize<'de> for Bytes { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + checksummed_hex::decode(hex_string) + .map(Bytes) + .map_err(SerdeError::custom) + } else { + let bytes = deserializer.deserialize_byte_buf(BytesVisitor)?; + Ok(bytes) + } + } +} + +impl Serialize for Bytes { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + base16::encode_lower(&self.0).serialize(serializer) + } else { + serializer.serialize_bytes(&self.0) + } + } +} + +#[cfg(test)] +mod tests { + use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}; + use alloc::vec::Vec; + + use serde_json::json; + use serde_test::{assert_tokens, Configure, Token}; + + use super::Bytes; + + const TRUTH: &[u8] = &[0xde, 0xad, 0xbe, 0xef]; + + #[test] + fn vec_u8_from_bytes() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + let data_bytes = data.to_bytes().unwrap(); + assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH / 2]).is_err()); + assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH]).is_err()); + assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH + 2]).is_err()); + } + + #[test] + fn should_serialize_deserialize_bytes() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + bytesrepr::test_serialization_roundtrip(&data); + } + + #[test] + fn should_fail_to_serialize_deserialize_malicious_bytes() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + let mut serialized = data.to_bytes().expect("should serialize data"); + serialized = serialized[..serialized.len() - 1].to_vec(); + let res: Result<(_, &[u8]), Error> = Bytes::from_bytes(&serialized); + assert_eq!(res.unwrap_err(), Error::EarlyEndOfStream); + } + + #[test] + fn should_serialize_deserialize_bytes_and_keep_rem() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + let expected_rem: Vec = vec![6, 7, 8, 9, 10]; + let mut serialized = data.to_bytes().expect("should serialize data"); + serialized.extend(&expected_rem); + let (deserialized, rem): (Bytes, &[u8]) = + FromBytes::from_bytes(&serialized).expect("should deserialize data"); + assert_eq!(data, deserialized); + assert_eq!(&rem, &expected_rem); + } + + #[test] + fn should_ser_de_human_readable() { + let truth = vec![0xde, 0xad, 0xbe, 0xef]; + + let bytes_ser: Bytes = truth.clone().into(); + + let json_object = serde_json::to_value(bytes_ser).unwrap(); + assert_eq!(json_object, json!("deadbeef")); + + let bytes_de: Bytes = serde_json::from_value(json_object).unwrap(); + assert_eq!(bytes_de, Bytes::from(truth)); + } + + #[test] + fn should_ser_de_readable() { + let truth: Bytes = TRUTH.into(); + assert_tokens(&truth.readable(), &[Token::Str("deadbeef")]); + } + + #[test] + fn should_ser_de_compact() { + let truth: Bytes = TRUTH.into(); + assert_tokens(&truth.compact(), &[Token::Bytes(TRUTH)]); + } +} + +#[cfg(test)] +pub mod gens { + use super::Bytes; + use proptest::{ + collection::{vec, SizeRange}, + prelude::*, + }; + + pub fn bytes_arb(size: impl Into) -> impl Strategy { + vec(any::(), size).prop_map(Bytes::from) + } +} diff --git a/casper_types/src/checksummed_hex.rs b/casper_types/src/checksummed_hex.rs new file mode 100644 index 00000000..165acd3a --- /dev/null +++ b/casper_types/src/checksummed_hex.rs @@ -0,0 +1,241 @@ +//! Checksummed hex encoding following an [EIP-55][1]-like scheme. +//! +//! [1]: https://eips.ethereum.org/EIPS/eip-55 + +use alloc::vec::Vec; +use core::ops::RangeInclusive; + +use base16; + +use crate::crypto; + +/// The number of input bytes, at or below which [`decode`] will checksum-decode the output. +pub const SMALL_BYTES_COUNT: usize = 75; + +const HEX_CHARS: [char; 22] = [ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', + 'D', 'E', 'F', +]; + +/// Takes a slice of bytes and breaks it up into a vector of *nibbles* (ie, 4-bit values) +/// represented as `u8`s. +fn bytes_to_nibbles<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator + 'a { + input + .as_ref() + .iter() + .flat_map(move |byte| [4, 0].iter().map(move |offset| (byte >> offset) & 0x0f)) +} + +/// Takes a slice of bytes and outputs an infinite cyclic stream of bits for those bytes. +fn bytes_to_bits_cycle(bytes: Vec) -> impl Iterator { + bytes + .into_iter() + .cycle() + .flat_map(move |byte| (0..8usize).map(move |offset| ((byte >> offset) & 0x01) == 0x01)) +} + +/// Returns the bytes encoded as hexadecimal with mixed-case based checksums following a scheme +/// similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55). +/// +/// Key differences: +/// - Works on any length of data, not just 20-byte addresses +/// - Uses Blake2b hashes rather than Keccak +/// - Uses hash bits rather than nibbles +fn encode_iter<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator + 'a { + let nibbles = bytes_to_nibbles(input); + let mut hash_bits = bytes_to_bits_cycle(crypto::blake2b(input.as_ref()).to_vec()); + nibbles.map(move |mut nibble| { + // Base 16 numbers greater than 10 are represented by the ascii characters a through f. + if nibble >= 10 && hash_bits.next().unwrap_or(true) { + // We are using nibble to index HEX_CHARS, so adding 6 to nibble gives us the index + // of the uppercase character. HEX_CHARS[10] == 'a', HEX_CHARS[16] == 'A'. + nibble += 6; + } + HEX_CHARS[nibble as usize] + }) +} + +/// Returns true if all chars in a string are uppercase or lowercase. +/// Returns false if the string is mixed case or if there are no alphabetic chars. +fn string_is_same_case>(s: T) -> bool { + const LOWER_RANGE: RangeInclusive = b'a'..=b'f'; + const UPPER_RANGE: RangeInclusive = b'A'..=b'F'; + + let mut chars = s + .as_ref() + .iter() + .filter(|c| LOWER_RANGE.contains(c) || UPPER_RANGE.contains(c)); + + match chars.next() { + Some(first) => { + let is_upper = UPPER_RANGE.contains(first); + chars.all(|c| UPPER_RANGE.contains(c) == is_upper) + } + None => { + // String has no actual characters. + true + } + } +} + +/// Decodes a mixed-case hexadecimal string, verifying that it conforms to the checksum scheme +/// similar to scheme in [EIP-55][1]. +/// +/// Key differences: +/// - Works on any length of (decoded) data up to `SMALL_BYTES_COUNT`, not just 20-byte addresses +/// - Uses Blake2b hashes rather than Keccak +/// - Uses hash bits rather than nibbles +/// +/// For backward compatibility: if the hex string is all uppercase or all lowercase, the check is +/// skipped. +/// +/// [1]: https://eips.ethereum.org/EIPS/eip-55 +pub fn decode>(input: T) -> Result, base16::DecodeError> { + let bytes = base16::decode(input.as_ref())?; + + // If the string was not small or not mixed case, don't verify the checksum. + if bytes.len() > SMALL_BYTES_COUNT || string_is_same_case(input.as_ref()) { + return Ok(bytes); + } + + encode_iter(&bytes) + .zip(input.as_ref().iter()) + .enumerate() + .try_for_each(|(index, (expected_case_hex_char, &input_hex_char))| { + if expected_case_hex_char as u8 == input_hex_char { + Ok(()) + } else { + Err(base16::DecodeError::InvalidByte { + index, + byte: expected_case_hex_char as u8, + }) + } + })?; + Ok(bytes) +} + +#[cfg(test)] +mod tests { + use alloc::string::String; + + use proptest::{ + collection::vec, + prelude::{any, prop_assert, prop_assert_eq}, + }; + use proptest_attr_macro::proptest; + + use super::*; + + #[test] + fn should_decode_empty_input() { + let input = String::new(); + let actual = decode(input).unwrap(); + assert!(actual.is_empty()); + } + + #[test] + fn string_is_same_case_true_when_same_case() { + let input = "aaaaaaaaaaa"; + assert!(string_is_same_case(input)); + + let input = "AAAAAAAAAAA"; + assert!(string_is_same_case(input)); + } + + #[test] + fn string_is_same_case_false_when_mixed_case() { + let input = "aAaAaAaAaAa"; + assert!(!string_is_same_case(input)); + } + + #[test] + fn string_is_same_case_no_alphabetic_chars_in_string() { + let input = "424242424242"; + assert!(string_is_same_case(input)); + } + + #[test] + fn should_checksum_decode_only_if_small() { + let input = [255; SMALL_BYTES_COUNT]; + let small_encoded: String = encode_iter(&input).collect(); + assert_eq!(input.to_vec(), decode(&small_encoded).unwrap()); + + assert!(decode("A1a2").is_err()); + + let large_encoded = format!("A1{}", small_encoded); + assert!(decode(large_encoded).is_ok()); + } + + #[proptest] + fn hex_roundtrip(input: Vec) { + prop_assert_eq!( + &input, + &decode(encode_iter(&input).collect::()).expect("Failed to decode input.") + ); + } + + proptest::proptest! { + #[test] + fn should_fail_on_invalid_checksum(input in vec(any::(), 0..75)) { + let encoded: String = encode_iter(&input).collect(); + + // Swap the case of the first letter in the checksum hex-encoded value. + let mut expected_error = None; + let mutated: String = encoded + .char_indices() + .map(|(index, mut c)| { + if expected_error.is_some() || c.is_ascii_digit() { + return c; + } + expected_error = Some(base16::DecodeError::InvalidByte { + index, + byte: c as u8, + }); + if c.is_ascii_uppercase() { + c.make_ascii_lowercase(); + } else { + c.make_ascii_uppercase(); + } + c + }) + .collect(); + + // If the encoded form is now all the same case or digits, just return. + if string_is_same_case(&mutated) { + return Ok(()); + } + + // Assert we can still decode to original input using `base16::decode`. + prop_assert_eq!( + input, + base16::decode(&mutated).expect("Failed to decode input.") + ); + + // Assert decoding using `checksummed_hex::decode` returns the expected error. + prop_assert_eq!(expected_error.unwrap(), decode(&mutated).unwrap_err()) + } + } + + #[proptest] + fn hex_roundtrip_sanity(input: Vec) { + prop_assert!(decode(encode_iter(&input).collect::()).is_ok()) + } + + #[proptest] + fn is_same_case_uppercase(input: String) { + let input = input.to_uppercase(); + prop_assert!(string_is_same_case(input)); + } + + #[proptest] + fn is_same_case_lowercase(input: String) { + let input = input.to_lowercase(); + prop_assert!(string_is_same_case(input)); + } + + #[proptest] + fn is_not_same_case(input: String) { + let input = format!("aA{}", input); + prop_assert!(!string_is_same_case(input)); + } +} diff --git a/casper_types/src/cl_type.rs b/casper_types/src/cl_type.rs new file mode 100644 index 00000000..b49b4ac5 --- /dev/null +++ b/casper_types/src/cl_type.rs @@ -0,0 +1,779 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{ + boxed::Box, + collections::{BTreeMap, BTreeSet, VecDeque}, + string::String, + vec::Vec, +}; +use core::mem; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num_rational::Ratio; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Key, URef, U128, U256, U512, +}; + +// This must be less than 300 in order to avoid a stack overflow when deserializing. +pub(crate) const CL_TYPE_RECURSION_DEPTH: u8 = 50; + +const CL_TYPE_TAG_BOOL: u8 = 0; +const CL_TYPE_TAG_I32: u8 = 1; +const CL_TYPE_TAG_I64: u8 = 2; +const CL_TYPE_TAG_U8: u8 = 3; +const CL_TYPE_TAG_U32: u8 = 4; +const CL_TYPE_TAG_U64: u8 = 5; +const CL_TYPE_TAG_U128: u8 = 6; +const CL_TYPE_TAG_U256: u8 = 7; +const CL_TYPE_TAG_U512: u8 = 8; +const CL_TYPE_TAG_UNIT: u8 = 9; +const CL_TYPE_TAG_STRING: u8 = 10; +const CL_TYPE_TAG_KEY: u8 = 11; +const CL_TYPE_TAG_UREF: u8 = 12; +const CL_TYPE_TAG_OPTION: u8 = 13; +const CL_TYPE_TAG_LIST: u8 = 14; +const CL_TYPE_TAG_BYTE_ARRAY: u8 = 15; +const CL_TYPE_TAG_RESULT: u8 = 16; +const CL_TYPE_TAG_MAP: u8 = 17; +const CL_TYPE_TAG_TUPLE1: u8 = 18; +const CL_TYPE_TAG_TUPLE2: u8 = 19; +const CL_TYPE_TAG_TUPLE3: u8 = 20; +const CL_TYPE_TAG_ANY: u8 = 21; +const CL_TYPE_TAG_PUBLIC_KEY: u8 = 22; + +/// Casper types, i.e. types which can be stored and manipulated by smart contracts. +/// +/// Provides a description of the underlying data type of a [`CLValue`](crate::CLValue). +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum CLType { + /// `bool` primitive. + Bool, + /// `i32` primitive. + I32, + /// `i64` primitive. + I64, + /// `u8` primitive. + U8, + /// `u32` primitive. + U32, + /// `u64` primitive. + U64, + /// [`U128`] large unsigned integer type. + U128, + /// [`U256`] large unsigned integer type. + U256, + /// [`U512`] large unsigned integer type. + U512, + /// `()` primitive. + Unit, + /// `String` primitive. + String, + /// [`Key`] system type. + Key, + /// [`URef`] system type. + URef, + /// [`PublicKey`](crate::PublicKey) system type. + PublicKey, + /// `Option` of a `CLType`. + #[cfg_attr(feature = "datasize", data_size(skip))] + Option(Box), + /// Variable-length list of a single `CLType` (comparable to a `Vec`). + #[cfg_attr(feature = "datasize", data_size(skip))] + List(Box), + /// Fixed-length list of a single `CLType` (comparable to a Rust array). + ByteArray(u32), + /// `Result` with `Ok` and `Err` variants of `CLType`s. + #[allow(missing_docs)] // generated docs are explicit enough. + #[cfg_attr(feature = "datasize", data_size(skip))] + Result { ok: Box, err: Box }, + /// Map with keys of a single `CLType` and values of a single `CLType`. + #[allow(missing_docs)] // generated docs are explicit enough. + #[cfg_attr(feature = "datasize", data_size(skip))] + Map { + key: Box, + value: Box, + }, + /// 1-ary tuple of a `CLType`. + #[cfg_attr(feature = "datasize", data_size(skip))] + Tuple1([Box; 1]), + /// 2-ary tuple of `CLType`s. + #[cfg_attr(feature = "datasize", data_size(skip))] + Tuple2([Box; 2]), + /// 3-ary tuple of `CLType`s. + #[cfg_attr(feature = "datasize", data_size(skip))] + Tuple3([Box; 3]), + /// Unspecified type. + Any, +} + +impl CLType { + /// The `len()` of the `Vec` resulting from `self.to_bytes()`. + pub fn serialized_length(&self) -> usize { + mem::size_of::() + + match self { + CLType::Bool + | CLType::I32 + | CLType::I64 + | CLType::U8 + | CLType::U32 + | CLType::U64 + | CLType::U128 + | CLType::U256 + | CLType::U512 + | CLType::Unit + | CLType::String + | CLType::Key + | CLType::URef + | CLType::PublicKey + | CLType::Any => 0, + CLType::Option(cl_type) | CLType::List(cl_type) => cl_type.serialized_length(), + CLType::ByteArray(list_len) => list_len.serialized_length(), + CLType::Result { ok, err } => ok.serialized_length() + err.serialized_length(), + CLType::Map { key, value } => key.serialized_length() + value.serialized_length(), + CLType::Tuple1(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array), + CLType::Tuple2(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array), + CLType::Tuple3(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array), + } + } + + /// Returns `true` if the [`CLType`] is [`Option`]. + pub fn is_option(&self) -> bool { + matches!(self, Self::Option(..)) + } +} + +/// Returns the `CLType` describing a "named key" on the system, i.e. a `(String, Key)`. +pub fn named_key_type() -> CLType { + CLType::Tuple2([Box::new(CLType::String), Box::new(CLType::Key)]) +} + +impl CLType { + pub(crate) fn append_bytes(&self, stream: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + CLType::Bool => stream.push(CL_TYPE_TAG_BOOL), + CLType::I32 => stream.push(CL_TYPE_TAG_I32), + CLType::I64 => stream.push(CL_TYPE_TAG_I64), + CLType::U8 => stream.push(CL_TYPE_TAG_U8), + CLType::U32 => stream.push(CL_TYPE_TAG_U32), + CLType::U64 => stream.push(CL_TYPE_TAG_U64), + CLType::U128 => stream.push(CL_TYPE_TAG_U128), + CLType::U256 => stream.push(CL_TYPE_TAG_U256), + CLType::U512 => stream.push(CL_TYPE_TAG_U512), + CLType::Unit => stream.push(CL_TYPE_TAG_UNIT), + CLType::String => stream.push(CL_TYPE_TAG_STRING), + CLType::Key => stream.push(CL_TYPE_TAG_KEY), + CLType::URef => stream.push(CL_TYPE_TAG_UREF), + CLType::PublicKey => stream.push(CL_TYPE_TAG_PUBLIC_KEY), + CLType::Option(cl_type) => { + stream.push(CL_TYPE_TAG_OPTION); + cl_type.append_bytes(stream)?; + } + CLType::List(cl_type) => { + stream.push(CL_TYPE_TAG_LIST); + cl_type.append_bytes(stream)?; + } + CLType::ByteArray(len) => { + stream.push(CL_TYPE_TAG_BYTE_ARRAY); + stream.append(&mut len.to_bytes()?); + } + CLType::Result { ok, err } => { + stream.push(CL_TYPE_TAG_RESULT); + ok.append_bytes(stream)?; + err.append_bytes(stream)?; + } + CLType::Map { key, value } => { + stream.push(CL_TYPE_TAG_MAP); + key.append_bytes(stream)?; + value.append_bytes(stream)?; + } + CLType::Tuple1(cl_type_array) => { + serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE1, cl_type_array, stream)? + } + CLType::Tuple2(cl_type_array) => { + serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE2, cl_type_array, stream)? + } + CLType::Tuple3(cl_type_array) => { + serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE3, cl_type_array, stream)? + } + CLType::Any => stream.push(CL_TYPE_TAG_ANY), + } + Ok(()) + } +} + +impl FromBytes for CLType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + depth_limited_from_bytes(0, bytes) + } +} + +fn depth_limited_from_bytes(depth: u8, bytes: &[u8]) -> Result<(CLType, &[u8]), bytesrepr::Error> { + if depth >= CL_TYPE_RECURSION_DEPTH { + return Err(bytesrepr::Error::ExceededRecursionDepth); + } + let depth = depth + 1; + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + CL_TYPE_TAG_BOOL => Ok((CLType::Bool, remainder)), + CL_TYPE_TAG_I32 => Ok((CLType::I32, remainder)), + CL_TYPE_TAG_I64 => Ok((CLType::I64, remainder)), + CL_TYPE_TAG_U8 => Ok((CLType::U8, remainder)), + CL_TYPE_TAG_U32 => Ok((CLType::U32, remainder)), + CL_TYPE_TAG_U64 => Ok((CLType::U64, remainder)), + CL_TYPE_TAG_U128 => Ok((CLType::U128, remainder)), + CL_TYPE_TAG_U256 => Ok((CLType::U256, remainder)), + CL_TYPE_TAG_U512 => Ok((CLType::U512, remainder)), + CL_TYPE_TAG_UNIT => Ok((CLType::Unit, remainder)), + CL_TYPE_TAG_STRING => Ok((CLType::String, remainder)), + CL_TYPE_TAG_KEY => Ok((CLType::Key, remainder)), + CL_TYPE_TAG_UREF => Ok((CLType::URef, remainder)), + CL_TYPE_TAG_PUBLIC_KEY => Ok((CLType::PublicKey, remainder)), + CL_TYPE_TAG_OPTION => { + let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Option(Box::new(inner_type)); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_LIST => { + let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::List(Box::new(inner_type)); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_BYTE_ARRAY => { + let (len, remainder) = u32::from_bytes(remainder)?; + let cl_type = CLType::ByteArray(len); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_RESULT => { + let (ok_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let (err_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Result { + ok: Box::new(ok_type), + err: Box::new(err_type), + }; + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_MAP => { + let (key_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let (value_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Map { + key: Box::new(key_type), + value: Box::new(value_type), + }; + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE1 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 1, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 1 + // element + let cl_type = CLType::Tuple1([inner_types.pop_front().unwrap()]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE2 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 2, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 2 + // elements + let cl_type = CLType::Tuple2([ + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + ]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE3 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 3, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 3 + // elements + let cl_type = CLType::Tuple3([ + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + ]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_ANY => Ok((CLType::Any, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } +} + +fn serialize_cl_tuple_type<'a, T: IntoIterator>>( + tag: u8, + cl_type_array: T, + stream: &mut Vec, +) -> Result<(), bytesrepr::Error> { + stream.push(tag); + for cl_type in cl_type_array { + cl_type.append_bytes(stream)?; + } + Ok(()) +} + +fn parse_cl_tuple_types( + depth: u8, + count: usize, + mut bytes: &[u8], +) -> Result<(VecDeque>, &[u8]), bytesrepr::Error> { + let mut cl_types = VecDeque::with_capacity(count); + for _ in 0..count { + let (cl_type, remainder) = depth_limited_from_bytes(depth, bytes)?; + cl_types.push_back(Box::new(cl_type)); + bytes = remainder; + } + + Ok((cl_types, bytes)) +} + +fn serialized_length_of_cl_tuple_type<'a, T: IntoIterator>>( + cl_type_array: T, +) -> usize { + cl_type_array + .into_iter() + .map(|cl_type| cl_type.serialized_length()) + .sum() +} + +/// A type which can be described as a [`CLType`]. +pub trait CLTyped { + /// The `CLType` of `Self`. + fn cl_type() -> CLType; +} + +impl CLTyped for bool { + fn cl_type() -> CLType { + CLType::Bool + } +} + +impl CLTyped for i32 { + fn cl_type() -> CLType { + CLType::I32 + } +} + +impl CLTyped for i64 { + fn cl_type() -> CLType { + CLType::I64 + } +} + +impl CLTyped for u8 { + fn cl_type() -> CLType { + CLType::U8 + } +} + +impl CLTyped for u32 { + fn cl_type() -> CLType { + CLType::U32 + } +} + +impl CLTyped for u64 { + fn cl_type() -> CLType { + CLType::U64 + } +} + +impl CLTyped for U128 { + fn cl_type() -> CLType { + CLType::U128 + } +} + +impl CLTyped for U256 { + fn cl_type() -> CLType { + CLType::U256 + } +} + +impl CLTyped for U512 { + fn cl_type() -> CLType { + CLType::U512 + } +} + +impl CLTyped for () { + fn cl_type() -> CLType { + CLType::Unit + } +} + +impl CLTyped for String { + fn cl_type() -> CLType { + CLType::String + } +} + +impl CLTyped for &str { + fn cl_type() -> CLType { + CLType::String + } +} + +impl CLTyped for Key { + fn cl_type() -> CLType { + CLType::Key + } +} + +impl CLTyped for URef { + fn cl_type() -> CLType { + CLType::URef + } +} + +impl CLTyped for Option { + fn cl_type() -> CLType { + CLType::Option(Box::new(T::cl_type())) + } +} + +impl CLTyped for Vec { + fn cl_type() -> CLType { + CLType::List(Box::new(T::cl_type())) + } +} + +impl CLTyped for BTreeSet { + fn cl_type() -> CLType { + CLType::List(Box::new(T::cl_type())) + } +} + +impl CLTyped for &T { + fn cl_type() -> CLType { + T::cl_type() + } +} + +impl CLTyped for [u8; COUNT] { + fn cl_type() -> CLType { + CLType::ByteArray(COUNT as u32) + } +} + +impl CLTyped for Result { + fn cl_type() -> CLType { + let ok = Box::new(T::cl_type()); + let err = Box::new(E::cl_type()); + CLType::Result { ok, err } + } +} + +impl CLTyped for BTreeMap { + fn cl_type() -> CLType { + let key = Box::new(K::cl_type()); + let value = Box::new(V::cl_type()); + CLType::Map { key, value } + } +} + +impl CLTyped for (T1,) { + fn cl_type() -> CLType { + CLType::Tuple1([Box::new(T1::cl_type())]) + } +} + +impl CLTyped for (T1, T2) { + fn cl_type() -> CLType { + CLType::Tuple2([Box::new(T1::cl_type()), Box::new(T2::cl_type())]) + } +} + +impl CLTyped for (T1, T2, T3) { + fn cl_type() -> CLType { + CLType::Tuple3([ + Box::new(T1::cl_type()), + Box::new(T2::cl_type()), + Box::new(T3::cl_type()), + ]) + } +} + +impl CLTyped for Ratio { + fn cl_type() -> CLType { + <(T, T)>::cl_type() + } +} + +#[cfg(test)] +mod tests { + use std::{fmt::Debug, iter, string::ToString}; + + use super::*; + use crate::{ + bytesrepr::{FromBytes, ToBytes}, + AccessRights, CLValue, + }; + + fn round_trip(value: &T) { + let cl_value = CLValue::from_t(value.clone()).unwrap(); + + let serialized_cl_value = cl_value.to_bytes().unwrap(); + assert_eq!(serialized_cl_value.len(), cl_value.serialized_length()); + let parsed_cl_value: CLValue = bytesrepr::deserialize(serialized_cl_value).unwrap(); + assert_eq!(cl_value, parsed_cl_value); + + let parsed_value = CLValue::into_t(cl_value).unwrap(); + assert_eq!(*value, parsed_value); + } + + #[test] + fn bool_should_work() { + round_trip(&true); + round_trip(&false); + } + + #[test] + fn u8_should_work() { + round_trip(&1u8); + } + + #[test] + fn u32_should_work() { + round_trip(&1u32); + } + + #[test] + fn i32_should_work() { + round_trip(&-1i32); + } + + #[test] + fn u64_should_work() { + round_trip(&1u64); + } + + #[test] + fn i64_should_work() { + round_trip(&-1i64); + } + + #[test] + fn u128_should_work() { + round_trip(&U128::one()); + } + + #[test] + fn u256_should_work() { + round_trip(&U256::one()); + } + + #[test] + fn u512_should_work() { + round_trip(&U512::one()); + } + + #[test] + fn unit_should_work() { + round_trip(&()); + } + + #[test] + fn string_should_work() { + round_trip(&String::from("abc")); + } + + #[test] + fn key_should_work() { + let key = Key::URef(URef::new([0u8; 32], AccessRights::READ_ADD_WRITE)); + round_trip(&key); + } + + #[test] + fn uref_should_work() { + let uref = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE); + round_trip(&uref); + } + + #[test] + fn option_of_cl_type_should_work() { + let x: Option = Some(-1); + let y: Option = None; + + round_trip(&x); + round_trip(&y); + } + + #[test] + fn vec_of_cl_type_should_work() { + let vec = vec![String::from("a"), String::from("b")]; + round_trip(&vec); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn small_array_of_u8_should_work() { + macro_rules! test_small_array { + ($($N:literal)+) => { + $( + let mut array: [u8; $N] = Default::default(); + for i in 0..$N { + array[i] = i as u8; + } + round_trip(&array); + )+ + } + } + + test_small_array! { + 1 2 3 4 5 6 7 8 9 + 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 + 30 31 32 + } + } + + #[test] + fn large_array_of_cl_type_should_work() { + macro_rules! test_large_array { + ($($N:literal)+) => { + $( + let array = { + let mut tmp = [0u8; $N]; + for i in 0..$N { + tmp[i] = i as u8; + } + tmp + }; + + let cl_value = CLValue::from_t(array.clone()).unwrap(); + + let serialized_cl_value = cl_value.to_bytes().unwrap(); + let parsed_cl_value: CLValue = bytesrepr::deserialize(serialized_cl_value).unwrap(); + assert_eq!(cl_value, parsed_cl_value); + + let parsed_value: [u8; $N] = CLValue::into_t(cl_value).unwrap(); + for i in 0..$N { + assert_eq!(array[i], parsed_value[i]); + } + )+ + } + } + + test_large_array! { 64 128 256 512 } + } + + #[test] + fn result_of_cl_type_should_work() { + let x: Result<(), String> = Ok(()); + let y: Result<(), String> = Err(String::from("Hello, world!")); + + round_trip(&x); + round_trip(&y); + } + + #[test] + fn map_of_cl_type_should_work() { + let mut map: BTreeMap = BTreeMap::new(); + map.insert(String::from("abc"), 1); + map.insert(String::from("xyz"), 2); + + round_trip(&map); + } + + #[test] + fn tuple_1_should_work() { + let x = (-1i32,); + + round_trip(&x); + } + + #[test] + fn tuple_2_should_work() { + let x = (-1i32, String::from("a")); + + round_trip(&x); + } + + #[test] + fn tuple_3_should_work() { + let x = (-1i32, 1u32, String::from("a")); + + round_trip(&x); + } + + #[test] + fn parsing_nested_tuple_1_cltype_should_not_stack_overflow() { + // The bytesrepr representation of the CLType for a + // nested (((...((),),...),),) looks like: + // [18, 18, 18, ..., 9] + + for i in 1..1000 { + let bytes = iter::repeat(CL_TYPE_TAG_TUPLE1) + .take(i) + .chain(iter::once(CL_TYPE_TAG_UNIT)) + .collect(); + match bytesrepr::deserialize(bytes) { + Ok(parsed_cltype) => assert!(matches!(parsed_cltype, CLType::Tuple1(_))), + Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth), + } + } + } + + #[test] + fn parsing_nested_tuple_1_value_should_not_stack_overflow() { + // The bytesrepr representation of the CLValue for a + // nested (((...((),),...),),) looks like: + // [0, 0, 0, 0, 18, 18, 18, ..., 18, 9] + + for i in 1..1000 { + let bytes = iter::repeat(0) + .take(4) + .chain(iter::repeat(CL_TYPE_TAG_TUPLE1).take(i)) + .chain(iter::once(CL_TYPE_TAG_UNIT)) + .collect(); + match bytesrepr::deserialize::(bytes) { + Ok(parsed_clvalue) => { + assert!(matches!(parsed_clvalue.cl_type(), CLType::Tuple1(_))) + } + Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth), + } + } + } + + #[test] + fn any_should_work() { + #[derive(PartialEq, Debug, Clone)] + struct Any(String); + + impl CLTyped for Any { + fn cl_type() -> CLType { + CLType::Any + } + } + + impl ToBytes for Any { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + } + + impl FromBytes for Any { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = String::from_bytes(bytes)?; + Ok((Any(inner), remainder)) + } + } + + let any = Any("Any test".to_string()); + round_trip(&any); + } + + #[test] + fn should_have_cltype_of_ref_to_cltyped() { + assert_eq!(>::cl_type(), >::cl_type()) + } +} diff --git a/casper_types/src/cl_value.rs b/casper_types/src/cl_value.rs new file mode 100644 index 00000000..1dc1bee5 --- /dev/null +++ b/casper_types/src/cl_value.rs @@ -0,0 +1,1197 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::Value; + +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + checksummed_hex, CLType, CLTyped, +}; + +mod jsonrepr; + +/// Error while converting a [`CLValue`] into a given type. +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct CLTypeMismatch { + /// The [`CLType`] into which the `CLValue` was being converted. + pub expected: CLType, + /// The actual underlying [`CLType`] of this `CLValue`, i.e. the type from which it was + /// constructed. + pub found: CLType, +} + +impl Display for CLTypeMismatch { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "Expected {:?} but found {:?}.", + self.expected, self.found + ) + } +} + +/// Error relating to [`CLValue`] operations. +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum CLValueError { + /// An error while serializing or deserializing the underlying data. + Serialization(bytesrepr::Error), + /// A type mismatch while trying to convert a [`CLValue`] into a given type. + Type(CLTypeMismatch), +} + +impl From for CLValueError { + fn from(error: bytesrepr::Error) -> Self { + CLValueError::Serialization(error) + } +} + +impl Display for CLValueError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + CLValueError::Serialization(error) => write!(formatter, "CLValue error: {}", error), + CLValueError::Type(error) => write!(formatter, "Type mismatch: {}", error), + } + } +} + +/// A Casper value, i.e. a value which can be stored and manipulated by smart contracts. +/// +/// It holds the underlying data as a type-erased, serialized `Vec` and also holds the +/// [`CLType`] of the underlying data as a separate member. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct CLValue { + cl_type: CLType, + bytes: Bytes, +} + +impl CLValue { + /// Constructs a `CLValue` from `t`. + pub fn from_t(t: T) -> Result { + let bytes = t.into_bytes()?; + + Ok(CLValue { + cl_type: T::cl_type(), + bytes: bytes.into(), + }) + } + + /// Consumes and converts `self` back into its underlying type. + pub fn into_t(self) -> Result { + let expected = T::cl_type(); + + if self.cl_type == expected { + Ok(bytesrepr::deserialize_from_slice(&self.bytes)?) + } else { + Err(CLValueError::Type(CLTypeMismatch { + expected, + found: self.cl_type, + })) + } + } + + /// A convenience method to create CLValue for a unit. + pub fn unit() -> Self { + CLValue::from_components(CLType::Unit, Vec::new()) + } + + // This is only required in order to implement `TryFrom for CLValue` (i.e. the + // conversion from the Protobuf `CLValue`) in a separate module to this one. + #[doc(hidden)] + pub fn from_components(cl_type: CLType, bytes: Vec) -> Self { + Self { + cl_type, + bytes: bytes.into(), + } + } + + // This is only required in order to implement `From for state::CLValue` (i.e. the + // conversion to the Protobuf `CLValue`) in a separate module to this one. + #[doc(hidden)] + pub fn destructure(self) -> (CLType, Bytes) { + (self.cl_type, self.bytes) + } + + /// The [`CLType`] of the underlying data. + pub fn cl_type(&self) -> &CLType { + &self.cl_type + } + + /// Returns a reference to the serialized form of the underlying value held in this `CLValue`. + pub fn inner_bytes(&self) -> &Vec { + self.bytes.inner_bytes() + } + + /// Returns the length of the `Vec` yielded after calling `self.to_bytes()`. + /// + /// Note, this method doesn't actually serialize `self`, and hence is relatively cheap. + pub fn serialized_length(&self) -> usize { + self.cl_type.serialized_length() + U32_SERIALIZED_LENGTH + self.bytes.len() + } +} + +impl ToBytes for CLValue { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.clone().into_bytes() + } + + fn into_bytes(self) -> Result, bytesrepr::Error> { + let mut result = self.bytes.into_bytes()?; + self.cl_type.append_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.bytes.serialized_length() + self.cl_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.bytes.write_bytes(writer)?; + self.cl_type.append_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for CLValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, remainder) = FromBytes::from_bytes(bytes)?; + let (cl_type, remainder) = FromBytes::from_bytes(remainder)?; + let cl_value = CLValue { cl_type, bytes }; + Ok((cl_value, remainder)) + } +} + +/// We need to implement `JsonSchema` for `CLValue` as though it is a `CLValueJson`. +#[cfg(feature = "json-schema")] +impl JsonSchema for CLValue { + fn schema_name() -> String { + "CLValue".to_string() + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + ::json_schema(gen) + } +} + +/// A Casper value, i.e. a value which can be stored and manipulated by smart contracts. +/// +/// It holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of +/// the underlying data as a separate member. +/// +/// The `parsed` field, representing the original value, is a convenience only available when a +/// CLValue is encoded to JSON, and can always be set to null if preferred. +#[derive(Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "CLValue"))] +struct CLValueJson { + cl_type: CLType, + bytes: String, + parsed: Option, +} + +impl Serialize for CLValue { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + CLValueJson { + cl_type: self.cl_type.clone(), + bytes: base16::encode_lower(&self.bytes), + parsed: jsonrepr::cl_value_to_json(self), + } + .serialize(serializer) + } else { + (&self.cl_type, &self.bytes).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for CLValue { + fn deserialize>(deserializer: D) -> Result { + let (cl_type, bytes) = if deserializer.is_human_readable() { + let json = CLValueJson::deserialize(deserializer)?; + ( + json.cl_type.clone(), + checksummed_hex::decode(&json.bytes).map_err(D::Error::custom)?, + ) + } else { + <(CLType, Vec)>::deserialize(deserializer)? + }; + Ok(CLValue { + cl_type, + bytes: bytes.into(), + }) + } +} + +#[cfg(test)] +mod tests { + use alloc::string::ToString; + + #[cfg(feature = "json-schema")] + use schemars::schema_for; + + use super::*; + use crate::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + key::KEY_HASH_LENGTH, + AccessRights, DeployHash, Key, PublicKey, TransferAddr, URef, DEPLOY_HASH_LENGTH, + TRANSFER_ADDR_LENGTH, U128, U256, U512, UREF_ADDR_LENGTH, + }; + + #[cfg(feature = "json-schema")] + #[test] + fn json_schema() { + let json_clvalue_schema = schema_for!(CLValueJson); + let clvalue_schema = schema_for!(CLValue); + assert_eq!(json_clvalue_schema, clvalue_schema); + } + + #[test] + fn serde_roundtrip() { + let cl_value = CLValue::from_t(true).unwrap(); + let serialized = bincode::serialize(&cl_value).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(cl_value, decoded); + } + + #[test] + fn json_roundtrip() { + let cl_value = CLValue::from_t(true).unwrap(); + let json_string = serde_json::to_string_pretty(&cl_value).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(cl_value, decoded); + } + + fn check_to_json(value: T, expected: &str) { + let cl_value = CLValue::from_t(value).unwrap(); + let cl_value_as_json = serde_json::to_string(&cl_value).unwrap(); + // Remove the `serialized_bytes` field: + // Split the string at `,"serialized_bytes":`. + let pattern = r#","bytes":""#; + let start_index = cl_value_as_json.find(pattern).unwrap(); + let (start, end) = cl_value_as_json.split_at(start_index); + // Find the end of the value of the `bytes` field, and split there. + let mut json_without_serialize_bytes = start.to_string(); + for (index, char) in end.char_indices().skip(pattern.len()) { + if char == '"' { + let (_to_remove, to_keep) = end.split_at(index + 1); + json_without_serialize_bytes.push_str(to_keep); + break; + } + } + assert_eq!(json_without_serialize_bytes, expected); + } + + mod simple_types { + use super::*; + use crate::crypto::SecretKey; + + #[test] + fn bool_cl_value_should_encode_to_json() { + check_to_json(true, r#"{"cl_type":"Bool","parsed":true}"#); + check_to_json(false, r#"{"cl_type":"Bool","parsed":false}"#); + } + + #[test] + fn i32_cl_value_should_encode_to_json() { + check_to_json( + i32::min_value(), + r#"{"cl_type":"I32","parsed":-2147483648}"#, + ); + check_to_json(0_i32, r#"{"cl_type":"I32","parsed":0}"#); + check_to_json(i32::max_value(), r#"{"cl_type":"I32","parsed":2147483647}"#); + } + + #[test] + fn i64_cl_value_should_encode_to_json() { + check_to_json( + i64::min_value(), + r#"{"cl_type":"I64","parsed":-9223372036854775808}"#, + ); + check_to_json(0_i64, r#"{"cl_type":"I64","parsed":0}"#); + check_to_json( + i64::max_value(), + r#"{"cl_type":"I64","parsed":9223372036854775807}"#, + ); + } + + #[test] + fn u8_cl_value_should_encode_to_json() { + check_to_json(0_u8, r#"{"cl_type":"U8","parsed":0}"#); + check_to_json(u8::max_value(), r#"{"cl_type":"U8","parsed":255}"#); + } + + #[test] + fn u32_cl_value_should_encode_to_json() { + check_to_json(0_u32, r#"{"cl_type":"U32","parsed":0}"#); + check_to_json(u32::max_value(), r#"{"cl_type":"U32","parsed":4294967295}"#); + } + + #[test] + fn u64_cl_value_should_encode_to_json() { + check_to_json(0_u64, r#"{"cl_type":"U64","parsed":0}"#); + check_to_json( + u64::max_value(), + r#"{"cl_type":"U64","parsed":18446744073709551615}"#, + ); + } + + #[test] + fn u128_cl_value_should_encode_to_json() { + check_to_json(U128::zero(), r#"{"cl_type":"U128","parsed":"0"}"#); + check_to_json( + U128::max_value(), + r#"{"cl_type":"U128","parsed":"340282366920938463463374607431768211455"}"#, + ); + } + + #[test] + fn u256_cl_value_should_encode_to_json() { + check_to_json(U256::zero(), r#"{"cl_type":"U256","parsed":"0"}"#); + check_to_json( + U256::max_value(), + r#"{"cl_type":"U256","parsed":"115792089237316195423570985008687907853269984665640564039457584007913129639935"}"#, + ); + } + + #[test] + fn u512_cl_value_should_encode_to_json() { + check_to_json(U512::zero(), r#"{"cl_type":"U512","parsed":"0"}"#); + check_to_json( + U512::max_value(), + r#"{"cl_type":"U512","parsed":"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095"}"#, + ); + } + + #[test] + fn unit_cl_value_should_encode_to_json() { + check_to_json((), r#"{"cl_type":"Unit","parsed":null}"#); + } + + #[test] + fn string_cl_value_should_encode_to_json() { + check_to_json(String::new(), r#"{"cl_type":"String","parsed":""}"#); + check_to_json( + "test string".to_string(), + r#"{"cl_type":"String","parsed":"test string"}"#, + ); + } + + #[test] + fn key_cl_value_should_encode_to_json() { + let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH])); + check_to_json( + key_account, + r#"{"cl_type":"Key","parsed":{"Account":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}}"#, + ); + + let key_hash = Key::Hash([2; KEY_HASH_LENGTH]); + check_to_json( + key_hash, + r#"{"cl_type":"Key","parsed":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + ); + + let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ)); + check_to_json( + key_uref, + r#"{"cl_type":"Key","parsed":{"URef":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}}"#, + ); + + let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH])); + check_to_json( + key_transfer, + r#"{"cl_type":"Key","parsed":{"Transfer":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}}"#, + ); + + let key_deploy_info = Key::DeployInfo(DeployHash::new([5; DEPLOY_HASH_LENGTH])); + check_to_json( + key_deploy_info, + r#"{"cl_type":"Key","parsed":{"DeployInfo":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}}"#, + ); + } + + #[test] + fn uref_cl_value_should_encode_to_json() { + let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + check_to_json( + uref, + r#"{"cl_type":"URef","parsed":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}"#, + ); + } + + #[test] + fn public_key_cl_value_should_encode_to_json() { + check_to_json( + PublicKey::from( + &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), + ), + r#"{"cl_type":"PublicKey","parsed":"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c"}"#, + ); + check_to_json( + PublicKey::from( + &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), + ), + r#"{"cl_type":"PublicKey","parsed":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}"#, + ); + } + } + + mod option { + use super::*; + use crate::crypto::SecretKey; + + #[test] + fn bool_cl_value_should_encode_to_json() { + check_to_json(Some(true), r#"{"cl_type":{"Option":"Bool"},"parsed":true}"#); + check_to_json( + Some(false), + r#"{"cl_type":{"Option":"Bool"},"parsed":false}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"Bool"},"parsed":null}"#, + ); + } + + #[test] + fn i32_cl_value_should_encode_to_json() { + check_to_json( + Some(i32::min_value()), + r#"{"cl_type":{"Option":"I32"},"parsed":-2147483648}"#, + ); + check_to_json(Some(0_i32), r#"{"cl_type":{"Option":"I32"},"parsed":0}"#); + check_to_json( + Some(i32::max_value()), + r#"{"cl_type":{"Option":"I32"},"parsed":2147483647}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"I32"},"parsed":null}"#, + ); + } + + #[test] + fn i64_cl_value_should_encode_to_json() { + check_to_json( + Some(i64::min_value()), + r#"{"cl_type":{"Option":"I64"},"parsed":-9223372036854775808}"#, + ); + check_to_json(Some(0_i64), r#"{"cl_type":{"Option":"I64"},"parsed":0}"#); + check_to_json( + Some(i64::max_value()), + r#"{"cl_type":{"Option":"I64"},"parsed":9223372036854775807}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"I64"},"parsed":null}"#, + ); + } + + #[test] + fn u8_cl_value_should_encode_to_json() { + check_to_json(Some(0_u8), r#"{"cl_type":{"Option":"U8"},"parsed":0}"#); + check_to_json( + Some(u8::max_value()), + r#"{"cl_type":{"Option":"U8"},"parsed":255}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U8"},"parsed":null}"#, + ); + } + + #[test] + fn u32_cl_value_should_encode_to_json() { + check_to_json(Some(0_u32), r#"{"cl_type":{"Option":"U32"},"parsed":0}"#); + check_to_json( + Some(u32::max_value()), + r#"{"cl_type":{"Option":"U32"},"parsed":4294967295}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U32"},"parsed":null}"#, + ); + } + + #[test] + fn u64_cl_value_should_encode_to_json() { + check_to_json(Some(0_u64), r#"{"cl_type":{"Option":"U64"},"parsed":0}"#); + check_to_json( + Some(u64::max_value()), + r#"{"cl_type":{"Option":"U64"},"parsed":18446744073709551615}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U64"},"parsed":null}"#, + ); + } + + #[test] + fn u128_cl_value_should_encode_to_json() { + check_to_json( + Some(U128::zero()), + r#"{"cl_type":{"Option":"U128"},"parsed":"0"}"#, + ); + check_to_json( + Some(U128::max_value()), + r#"{"cl_type":{"Option":"U128"},"parsed":"340282366920938463463374607431768211455"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U128"},"parsed":null}"#, + ); + } + + #[test] + fn u256_cl_value_should_encode_to_json() { + check_to_json( + Some(U256::zero()), + r#"{"cl_type":{"Option":"U256"},"parsed":"0"}"#, + ); + check_to_json( + Some(U256::max_value()), + r#"{"cl_type":{"Option":"U256"},"parsed":"115792089237316195423570985008687907853269984665640564039457584007913129639935"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U256"},"parsed":null}"#, + ); + } + + #[test] + fn u512_cl_value_should_encode_to_json() { + check_to_json( + Some(U512::zero()), + r#"{"cl_type":{"Option":"U512"},"parsed":"0"}"#, + ); + check_to_json( + Some(U512::max_value()), + r#"{"cl_type":{"Option":"U512"},"parsed":"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U512"},"parsed":null}"#, + ); + } + + #[test] + fn unit_cl_value_should_encode_to_json() { + check_to_json(Some(()), r#"{"cl_type":{"Option":"Unit"},"parsed":null}"#); + check_to_json( + Option::<()>::None, + r#"{"cl_type":{"Option":"Unit"},"parsed":null}"#, + ); + } + + #[test] + fn string_cl_value_should_encode_to_json() { + check_to_json( + Some(String::new()), + r#"{"cl_type":{"Option":"String"},"parsed":""}"#, + ); + check_to_json( + Some("test string".to_string()), + r#"{"cl_type":{"Option":"String"},"parsed":"test string"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"String"},"parsed":null}"#, + ); + } + + #[test] + fn key_cl_value_should_encode_to_json() { + let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH])); + check_to_json( + Some(key_account), + r#"{"cl_type":{"Option":"Key"},"parsed":{"Account":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}}"#, + ); + + let key_hash = Key::Hash([2; KEY_HASH_LENGTH]); + check_to_json( + Some(key_hash), + r#"{"cl_type":{"Option":"Key"},"parsed":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + ); + + let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ)); + check_to_json( + Some(key_uref), + r#"{"cl_type":{"Option":"Key"},"parsed":{"URef":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}}"#, + ); + + let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH])); + check_to_json( + Some(key_transfer), + r#"{"cl_type":{"Option":"Key"},"parsed":{"Transfer":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}}"#, + ); + + let key_deploy_info = Key::DeployInfo(DeployHash::new([5; DEPLOY_HASH_LENGTH])); + check_to_json( + Some(key_deploy_info), + r#"{"cl_type":{"Option":"Key"},"parsed":{"DeployInfo":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}}"#, + ); + + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"Key"},"parsed":null}"#, + ) + } + + #[test] + fn uref_cl_value_should_encode_to_json() { + let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + check_to_json( + Some(uref), + r#"{"cl_type":{"Option":"URef"},"parsed":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"URef"},"parsed":null}"#, + ) + } + + #[test] + fn public_key_cl_value_should_encode_to_json() { + check_to_json( + Some(PublicKey::from( + &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), + )), + r#"{"cl_type":{"Option":"PublicKey"},"parsed":"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c"}"#, + ); + check_to_json( + Some(PublicKey::from( + &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), + )), + r#"{"cl_type":{"Option":"PublicKey"},"parsed":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"PublicKey"},"parsed":null}"#, + ) + } + } + + mod result { + use super::*; + use crate::crypto::SecretKey; + + #[test] + fn bool_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"I32"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"U32"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"Unit"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"String"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn i32_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"I32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"U32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"Unit"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"String"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"I32","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"I32","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn i64_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"I32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"U32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"Unit"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"String"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"I64","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"I64","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u8_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"I32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"U32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"Unit"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"String"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U8","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U8","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u32_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"I32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"U32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"Unit"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"String"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U32","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U32","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u64_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"I32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"U32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"Unit"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"String"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U64","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U64","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u128_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"I32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"U32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"Unit"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"String"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U128","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U128","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u256_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"I32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"U32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"Unit"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"String"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U256","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U256","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u512_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"I32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"U32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"Unit"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"String"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U512","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U512","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn unit_cl_value_should_encode_to_json() { + check_to_json( + Result::<(), i32>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"I32"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), u32>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"U32"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), ()>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"Unit"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), String>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"String"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), i32>::Err(-1), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::<(), u32>::Err(1), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::<(), ()>::Err(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::<(), String>::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn string_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"I32"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"U32"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"Unit"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"String"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"String","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"String","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"String","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn key_cl_value_should_encode_to_json() { + let key = Key::Hash([2; KEY_HASH_LENGTH]); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"I32"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + ); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"U32"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + ); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"Unit"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + ); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"String"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"Key","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"Key","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"Key","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"Key","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn uref_cl_value_should_encode_to_json() { + let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"I32"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"U32"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"Unit"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"String"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"URef","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"URef","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"URef","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"URef","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn public_key_cl_value_should_encode_to_json() { + let secret_key = + SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(); + let public_key = PublicKey::from(&secret_key); + check_to_json( + Result::::Ok(public_key.clone()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"I32"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Ok(public_key.clone()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"U32"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Ok(public_key.clone()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"Unit"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Ok(public_key), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"String"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + } +} diff --git a/casper_types/src/cl_value/jsonrepr.rs b/casper_types/src/cl_value/jsonrepr.rs new file mode 100644 index 00000000..1b3b3e28 --- /dev/null +++ b/casper_types/src/cl_value/jsonrepr.rs @@ -0,0 +1,272 @@ +use alloc::{string::String, vec, vec::Vec}; + +use serde::Serialize; +use serde_json::{json, Value}; + +use crate::{ + bytesrepr::{self, FromBytes, OPTION_NONE_TAG, OPTION_SOME_TAG, RESULT_ERR_TAG, RESULT_OK_TAG}, + cl_type::CL_TYPE_RECURSION_DEPTH, + CLType, CLValue, Key, PublicKey, URef, U128, U256, U512, +}; + +/// Returns a best-effort attempt to convert the `CLValue` into a meaningful JSON value. +pub fn cl_value_to_json(cl_value: &CLValue) -> Option { + depth_limited_to_json(0, cl_value.cl_type(), cl_value.inner_bytes()).and_then( + |(json_value, remainder)| { + if remainder.is_empty() { + Some(json_value) + } else { + None + } + }, + ) +} + +fn depth_limited_to_json<'a>( + depth: u8, + cl_type: &CLType, + bytes: &'a [u8], +) -> Option<(Value, &'a [u8])> { + if depth >= CL_TYPE_RECURSION_DEPTH { + return None; + } + let depth = depth + 1; + + match cl_type { + CLType::Bool => simple_type_to_json::(bytes), + CLType::I32 => simple_type_to_json::(bytes), + CLType::I64 => simple_type_to_json::(bytes), + CLType::U8 => simple_type_to_json::(bytes), + CLType::U32 => simple_type_to_json::(bytes), + CLType::U64 => simple_type_to_json::(bytes), + CLType::U128 => simple_type_to_json::(bytes), + CLType::U256 => simple_type_to_json::(bytes), + CLType::U512 => simple_type_to_json::(bytes), + CLType::Unit => simple_type_to_json::<()>(bytes), + CLType::String => simple_type_to_json::(bytes), + CLType::Key => simple_type_to_json::(bytes), + CLType::URef => simple_type_to_json::(bytes), + CLType::PublicKey => simple_type_to_json::(bytes), + CLType::Option(inner_cl_type) => { + let (variant, remainder) = u8::from_bytes(bytes).ok()?; + match variant { + OPTION_NONE_TAG => Some((Value::Null, remainder)), + OPTION_SOME_TAG => Some(depth_limited_to_json(depth, inner_cl_type, remainder)?), + _ => None, + } + } + CLType::List(inner_cl_type) => { + let (count, mut stream) = u32::from_bytes(bytes).ok()?; + let mut result: Vec = Vec::new(); + for _ in 0..count { + let (value, remainder) = depth_limited_to_json(depth, inner_cl_type, stream)?; + result.push(value); + stream = remainder; + } + Some((json!(result), stream)) + } + CLType::ByteArray(length) => { + let (bytes, remainder) = bytesrepr::safe_split_at(bytes, *length as usize).ok()?; + let hex_encoded_bytes = base16::encode_lower(&bytes); + Some((json![hex_encoded_bytes], remainder)) + } + CLType::Result { ok, err } => { + let (variant, remainder) = u8::from_bytes(bytes).ok()?; + match variant { + RESULT_ERR_TAG => { + let (value, remainder) = depth_limited_to_json(depth, err, remainder)?; + Some((json!({ "Err": value }), remainder)) + } + RESULT_OK_TAG => { + let (value, remainder) = depth_limited_to_json(depth, ok, remainder)?; + Some((json!({ "Ok": value }), remainder)) + } + _ => None, + } + } + CLType::Map { key, value } => { + let (num_keys, mut stream) = u32::from_bytes(bytes).ok()?; + let mut result: Vec = Vec::new(); + for _ in 0..num_keys { + let (k, remainder) = depth_limited_to_json(depth, key, stream)?; + let (v, remainder) = depth_limited_to_json(depth, value, remainder)?; + result.push(json!({"key": k, "value": v})); + stream = remainder; + } + Some((json!(result), stream)) + } + CLType::Tuple1(arr) => { + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + Some((json!([t1]), remainder)) + } + CLType::Tuple2(arr) => { + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?; + Some((json!([t1, t2]), remainder)) + } + CLType::Tuple3(arr) => { + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?; + let (t3, remainder) = depth_limited_to_json(depth, &arr[2], remainder)?; + Some((json!([t1, t2, t3]), remainder)) + } + CLType::Any => None, + } +} + +fn simple_type_to_json(bytes: &[u8]) -> Option<(Value, &[u8])> { + let (value, remainder) = T::from_bytes(bytes).ok()?; + Some((json!(value), remainder)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bytesrepr::ToBytes, AsymmetricType, CLTyped, SecretKey}; + use alloc::collections::BTreeMap; + + fn test_value(value: T) { + let cl_value = CLValue::from_t(value.clone()).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!(value); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn list_of_ints_to_json_value() { + test_value::>(vec![]); + test_value(vec![10u32, 12u32]); + } + + #[test] + fn list_of_bools_to_json_value() { + test_value(vec![true, false]); + } + + #[test] + fn list_of_string_to_json_value() { + test_value(vec!["rust", "python"]); + } + + #[test] + fn list_of_public_keys_to_json_value() { + let a = PublicKey::from( + &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + ); + let b = PublicKey::from( + &SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let a_hex = a.to_hex(); + let b_hex = b.to_hex(); + let cl_value = CLValue::from_t(vec![a, b]).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!([a_hex, b_hex]); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn list_of_list_of_public_keys_to_json_value() { + let a = PublicKey::from( + &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + ); + let b = PublicKey::from( + &SecretKey::ed25519_from_bytes([3; PublicKey::ED25519_LENGTH]).unwrap(), + ); + let c = PublicKey::from( + &SecretKey::ed25519_from_bytes([6; PublicKey::ED25519_LENGTH]).unwrap(), + ); + let a_hex = a.to_hex(); + let b_hex = b.to_hex(); + let c_hex = c.to_hex(); + let cl_value = CLValue::from_t(vec![vec![a, b], vec![c]]).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!([[a_hex, b_hex], [c_hex]]); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn map_of_string_to_list_of_ints_to_json_value() { + let key1 = String::from("first"); + let key2 = String::from("second"); + let value1 = vec![]; + let value2 = vec![1, 2, 3]; + let mut map: BTreeMap> = BTreeMap::new(); + map.insert(key1.clone(), value1.clone()); + map.insert(key2.clone(), value2.clone()); + let cl_value = CLValue::from_t(map).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!([ + { "key": key1, "value": value1 }, + { "key": key2, "value": value2 } + ]); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn option_some_of_lists_to_json_value() { + test_value(Some(vec![1, 2, 3])); + } + + #[test] + fn option_none_to_json_value() { + test_value(Option::::None); + } + + #[test] + fn bytes_to_json_value() { + let bytes = [1_u8, 2]; + let cl_value = CLValue::from_t(bytes).unwrap(); + let cl_value_as_json = cl_value_to_json(&cl_value).unwrap(); + let expected = json!(base16::encode_lower(&bytes)); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn result_ok_to_json_value() { + test_value(Result::, String>::Ok(vec![1, 2, 3])); + } + + #[test] + fn result_error_to_json_value() { + test_value(Result::, String>::Err(String::from("Upsss"))); + } + + #[test] + fn tuples_to_json_value() { + let v1 = String::from("Hello"); + let v2 = vec![1, 2, 3]; + let v3 = 1u8; + + test_value((v1.clone(),)); + test_value((v1.clone(), v2.clone())); + test_value((v1, v2, v3)); + } + + #[test] + fn json_encoding_nested_tuple_1_value_should_not_stack_overflow() { + // Returns a CLType corresponding to (((...(cl_type,),...),),) nested in tuples to + // `depth_limit`. + fn wrap_in_tuple1(cl_type: CLType, current_depth: usize, depth_limit: usize) -> CLType { + if current_depth == depth_limit { + return cl_type; + } + wrap_in_tuple1( + CLType::Tuple1([Box::new(cl_type)]), + current_depth + 1, + depth_limit, + ) + } + + for depth_limit in &[1, CL_TYPE_RECURSION_DEPTH as usize] { + let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit); + let cl_value = CLValue::from_components(cl_type, vec![]); + assert!(cl_value_to_json(&cl_value).is_some()); + } + + for depth_limit in &[CL_TYPE_RECURSION_DEPTH as usize + 1, 1000] { + let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit); + let cl_value = CLValue::from_components(cl_type, vec![]); + assert!(cl_value_to_json(&cl_value).is_none()); + } + } +} diff --git a/casper_types/src/contract_wasm.rs b/casper_types/src/contract_wasm.rs new file mode 100644 index 00000000..aaca3817 --- /dev/null +++ b/casper_types/src/contract_wasm.rs @@ -0,0 +1,372 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account, + account::TryFromSliceForAccountHashError, + bytesrepr::{Bytes, Error, FromBytes, ToBytes}, + checksummed_hex, uref, CLType, CLTyped, HashAddr, +}; + +const CONTRACT_WASM_MAX_DISPLAY_LEN: usize = 16; +const KEY_HASH_LENGTH: usize = 32; +const WASM_STRING_PREFIX: &str = "contract-wasm-"; + +/// Associated error type of `TryFrom<&[u8]>` for `ContractWasmHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + InvalidPrefix, + Hex(base16::DecodeError), + Account(TryFromSliceForAccountHashError), + Hash(TryFromSliceError), + AccountHash(account::FromStrError), + URef(uref::FromStrError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceForAccountHashError) -> Self { + FromStrError::Account(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: account::FromStrError) -> Self { + FromStrError::AccountHash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Account(error) => write!(f, "account from string error: {:?}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::AccountHash(error) => { + write!(f, "account hash from string error: {:?}", error) + } + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + } + } +} + +/// A newtype wrapping a `HashAddr` which is the raw bytes of +/// the ContractWasmHash +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractWasmHash(HashAddr); + +impl ContractWasmHash { + /// Constructs a new `ContractWasmHash` from the raw bytes of the contract wasm hash. + pub const fn new(value: HashAddr) -> ContractWasmHash { + ContractWasmHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ContractWasmHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", WASM_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ContractWasmHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(WASM_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(ContractWasmHash(bytes)) + } +} + +impl Display for ContractWasmHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ContractWasmHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ContractWasmHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ContractWasmHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ContractWasmHash { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractWasmHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ContractWasmHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for ContractWasmHash { + fn from(bytes: [u8; 32]) -> Self { + ContractWasmHash(bytes) + } +} + +impl Serialize for ContractWasmHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ContractWasmHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ContractWasmHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(ContractWasmHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ContractWasmHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ContractWasmHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(ContractWasmHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ContractWasmHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(ContractWasmHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ContractWasmHash { + fn schema_name() -> String { + String::from("ContractWasmHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = + Some("The hash address of the contract wasm".to_string()); + schema_object.into() + } +} + +/// A container for contract's WASM bytes. +#[derive(PartialEq, Eq, Clone, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractWasm { + bytes: Bytes, +} + +impl Debug for ContractWasm { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + if self.bytes.len() > CONTRACT_WASM_MAX_DISPLAY_LEN { + write!( + f, + "ContractWasm(0x{}...)", + base16::encode_lower(&self.bytes[..CONTRACT_WASM_MAX_DISPLAY_LEN]) + ) + } else { + write!(f, "ContractWasm(0x{})", base16::encode_lower(&self.bytes)) + } + } +} + +impl ContractWasm { + /// Creates new WASM object from bytes. + pub fn new(bytes: Vec) -> Self { + ContractWasm { + bytes: bytes.into(), + } + } + + /// Consumes instance of [`ContractWasm`] and returns its bytes. + pub fn take_bytes(self) -> Vec { + self.bytes.into() + } + + /// Returns a slice of contained WASM bytes. + pub fn bytes(&self) -> &[u8] { + self.bytes.as_ref() + } +} + +impl ToBytes for ContractWasm { + fn to_bytes(&self) -> Result, Error> { + self.bytes.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.bytes.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.bytes.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractWasm { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem1) = FromBytes::from_bytes(bytes)?; + Ok((ContractWasm { bytes }, rem1)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn test_debug_repr_of_short_wasm() { + const SIZE: usize = 8; + let wasm_bytes = vec![0; SIZE]; + let contract_wasm = ContractWasm::new(wasm_bytes); + // String output is less than the bytes itself + assert_eq!( + format!("{:?}", contract_wasm), + "ContractWasm(0x0000000000000000)" + ); + } + + #[test] + fn test_debug_repr_of_long_wasm() { + const SIZE: usize = 65; + let wasm_bytes = vec![0; SIZE]; + let contract_wasm = ContractWasm::new(wasm_bytes); + // String output is less than the bytes itself + assert_eq!( + format!("{:?}", contract_wasm), + "ContractWasm(0x00000000000000000000000000000000...)" + ); + } + + #[test] + fn contract_wasm_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let contract_hash = + HashAddr::try_from(&bytes[..]).expect("should create contract wasm hash"); + let contract_hash = ContractWasmHash::new(contract_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_wasm_hash_from_str() { + let contract_hash = ContractWasmHash([3; 32]); + let encoded = contract_hash.to_formatted_string(); + let decoded = ContractWasmHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(contract_hash, decoded); + + let invalid_prefix = + "contractwasm-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractWasmHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "contract-wasm-00000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractWasmHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractWasmHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(ContractWasmHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn contract_wasm_hash_serde_roundtrip() { + let contract_hash = ContractWasmHash([255; 32]); + let serialized = bincode::serialize(&contract_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(contract_hash, deserialized) + } + + #[test] + fn contract_wasm_hash_json_roundtrip() { + let contract_hash = ContractWasmHash([255; 32]); + let json_string = serde_json::to_string_pretty(&contract_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(contract_hash, decoded) + } +} diff --git a/casper_types/src/contracts.rs b/casper_types/src/contracts.rs new file mode 100644 index 00000000..4c39a798 --- /dev/null +++ b/casper_types/src/contracts.rs @@ -0,0 +1,2106 @@ +//! Data types for supporting contract headers feature. +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{ + collections::{BTreeMap, BTreeSet}, + format, + string::{String, ToString}, + vec::Vec, +}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account, + account::TryFromSliceForAccountHashError, + bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + checksummed_hex, + contract_wasm::ContractWasmHash, + uref, + uref::URef, + CLType, CLTyped, ContextAccessRights, HashAddr, Key, ProtocolVersion, KEY_HASH_LENGTH, +}; + +/// Maximum number of distinct user groups. +pub const MAX_GROUPS: u8 = 10; +/// Maximum number of URefs which can be assigned across all user groups. +pub const MAX_TOTAL_UREFS: usize = 100; + +const CONTRACT_STRING_PREFIX: &str = "contract-"; +const PACKAGE_STRING_PREFIX: &str = "contract-package-"; +// We need to support the legacy prefix of "contract-package-wasm". +const PACKAGE_STRING_LEGACY_EXTRA_PREFIX: &str = "wasm"; + +/// Set of errors which may happen when working with contract headers. +#[derive(Debug, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Attempt to override an existing or previously existing version with a + /// new header (this is not allowed to ensure immutability of a given + /// version). + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(1, Error::PreviouslyUsedVersion as u8); + /// ``` + PreviouslyUsedVersion = 1, + /// Attempted to disable a contract that does not exist. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(2, Error::ContractNotFound as u8); + /// ``` + ContractNotFound = 2, + /// Attempted to create a user group which already exists (use the update + /// function to change an existing user group). + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(3, Error::GroupAlreadyExists as u8); + /// ``` + GroupAlreadyExists = 3, + /// Attempted to add a new user group which exceeds the allowed maximum + /// number of groups. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(4, Error::MaxGroupsExceeded as u8); + /// ``` + MaxGroupsExceeded = 4, + /// Attempted to add a new URef to a group, which resulted in the total + /// number of URefs across all user groups to exceed the allowed maximum. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(5, Error::MaxTotalURefsExceeded as u8); + /// ``` + MaxTotalURefsExceeded = 5, + /// Attempted to remove a URef from a group, which does not exist in the + /// group. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(6, Error::GroupDoesNotExist as u8); + /// ``` + GroupDoesNotExist = 6, + /// Attempted to remove unknown URef from the group. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(7, Error::UnableToRemoveURef as u8); + /// ``` + UnableToRemoveURef = 7, + /// Group is use by at least one active contract. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(8, Error::GroupInUse as u8); + /// ``` + GroupInUse = 8, + /// URef already exists in given group. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(9, Error::URefAlreadyExists as u8); + /// ``` + URefAlreadyExists = 9, +} + +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Self::PreviouslyUsedVersion as u8 => Self::PreviouslyUsedVersion, + v if v == Self::ContractNotFound as u8 => Self::ContractNotFound, + v if v == Self::GroupAlreadyExists as u8 => Self::GroupAlreadyExists, + v if v == Self::MaxGroupsExceeded as u8 => Self::MaxGroupsExceeded, + v if v == Self::MaxTotalURefsExceeded as u8 => Self::MaxTotalURefsExceeded, + v if v == Self::GroupDoesNotExist as u8 => Self::GroupDoesNotExist, + v if v == Self::UnableToRemoveURef as u8 => Self::UnableToRemoveURef, + v if v == Self::GroupInUse as u8 => Self::GroupInUse, + v if v == Self::URefAlreadyExists as u8 => Self::URefAlreadyExists, + _ => return Err(()), + }; + Ok(error) + } +} + +/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +impl Display for TryFromSliceForContractHashError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "failed to retrieve from slice") + } +} + +/// An error from parsing a formatted contract string +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Invalid formatted string prefix. + InvalidPrefix, + /// Error when decoding a hex string + Hex(base16::DecodeError), + /// Error when parsing an account + Account(TryFromSliceForAccountHashError), + /// Error when parsing the hash. + Hash(TryFromSliceError), + /// Error when parsing an account hash. + AccountHash(account::FromStrError), + /// Error when parsing an uref. + URef(uref::FromStrError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceForAccountHashError) -> Self { + FromStrError::Account(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: account::FromStrError) -> Self { + FromStrError::AccountHash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Account(error) => write!(f, "account from string error: {:?}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::AccountHash(error) => { + write!(f, "account hash from string error: {:?}", error) + } + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + } + } +} + +/// A (labelled) "user group". Each method of a versioned contract may be +/// associated with one or more user groups which are allowed to call it. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Group(String); + +impl Group { + /// Basic constructor + pub fn new>(s: T) -> Self { + Group(s.into()) + } + + /// Retrieves underlying name. + pub fn value(&self) -> &str { + &self.0 + } +} + +impl From for String { + fn from(group: Group) -> Self { + group.0 + } +} + +impl ToBytes for Group { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.value().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Group { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + String::from_bytes(bytes).map(|(label, bytes)| (Group(label), bytes)) + } +} + +/// Automatically incremented value for a contract version within a major `ProtocolVersion`. +pub type ContractVersion = u32; + +/// Within each discrete major `ProtocolVersion`, contract version resets to this value. +pub const CONTRACT_INITIAL_VERSION: ContractVersion = 1; + +/// Major element of `ProtocolVersion` a `ContractVersion` is compatible with. +pub type ProtocolVersionMajor = u32; + +/// Major element of `ProtocolVersion` combined with `ContractVersion`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractVersionKey(ProtocolVersionMajor, ContractVersion); + +impl ContractVersionKey { + /// Returns a new instance of ContractVersionKey with provided values. + pub fn new( + protocol_version_major: ProtocolVersionMajor, + contract_version: ContractVersion, + ) -> Self { + Self(protocol_version_major, contract_version) + } + + /// Returns the major element of the protocol version this contract is compatible with. + pub fn protocol_version_major(self) -> ProtocolVersionMajor { + self.0 + } + + /// Returns the contract version within the protocol major version. + pub fn contract_version(self) -> ContractVersion { + self.1 + } +} + +impl From for (ProtocolVersionMajor, ContractVersion) { + fn from(contract_version_key: ContractVersionKey) -> Self { + (contract_version_key.0, contract_version_key.1) + } +} + +/// Serialized length of `ContractVersionKey`. +pub const CONTRACT_VERSION_KEY_SERIALIZED_LENGTH: usize = + U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH; + +impl ToBytes for ContractVersionKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.0.to_bytes()?); + ret.append(&mut self.1.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + CONTRACT_VERSION_KEY_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + self.1.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractVersionKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (major, rem): (u32, &[u8]) = FromBytes::from_bytes(bytes)?; + let (contract, rem): (ContractVersion, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((ContractVersionKey::new(major, contract), rem)) + } +} + +impl fmt::Display for ContractVersionKey { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}.{}", self.0, self.1) + } +} + +/// Collection of contract versions. +pub type ContractVersions = BTreeMap; + +/// Collection of disabled contract versions. The runtime will not permit disabled +/// contract versions to be executed. +pub type DisabledVersions = BTreeSet; + +/// Collection of named groups. +pub type Groups = BTreeMap>; + +/// A newtype wrapping a `HashAddr` which references a [`Contract`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractHash(HashAddr); + +impl ContractHash { + /// Constructs a new `ContractHash` from the raw bytes of the contract hash. + pub const fn new(value: HashAddr) -> ContractHash { + ContractHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ContractHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + CONTRACT_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ContractHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(CONTRACT_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(ContractHash(bytes)) + } +} + +impl Display for ContractHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ContractHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ContractHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ContractHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ContractHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for ContractHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ContractHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for ContractHash { + fn from(bytes: [u8; 32]) -> Self { + ContractHash(bytes) + } +} + +impl Serialize for ContractHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ContractHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ContractHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(ContractHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ContractHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ContractHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(ContractHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ContractHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(ContractHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ContractHash { + fn schema_name() -> String { + String::from("ContractHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("The hash address of the contract".to_string()); + schema_object.into() + } +} + +/// A newtype wrapping a `HashAddr` which references a [`ContractPackage`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractPackageHash(HashAddr); + +impl ContractPackageHash { + /// Constructs a new `ContractPackageHash` from the raw bytes of the contract package hash. + pub const fn new(value: HashAddr) -> ContractPackageHash { + ContractPackageHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ContractPackageHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", PACKAGE_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ContractPackageHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(PACKAGE_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let hex_addr = remainder + .strip_prefix(PACKAGE_STRING_LEGACY_EXTRA_PREFIX) + .unwrap_or(remainder); + + let bytes = HashAddr::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?; + Ok(ContractPackageHash(bytes)) + } +} + +impl Display for ContractPackageHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ContractPackageHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ContractPackageHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ContractPackageHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ContractPackageHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for ContractPackageHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ContractPackageHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for ContractPackageHash { + fn from(bytes: [u8; 32]) -> Self { + ContractPackageHash(bytes) + } +} + +impl Serialize for ContractPackageHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ContractPackageHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ContractPackageHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(ContractPackageHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ContractPackageHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ContractPackageHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(ContractPackageHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ContractPackageHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(ContractPackageHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ContractPackageHash { + fn schema_name() -> String { + String::from("ContractPackageHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = + Some("The hash address of the contract package".to_string()); + schema_object.into() + } +} + +/// A enum to determine the lock status of the contract package. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum ContractPackageStatus { + /// The package is locked and cannot be versioned. + Locked, + /// The package is unlocked and can be versioned. + Unlocked, +} + +impl ContractPackageStatus { + /// Create a new status flag based on a boolean value + pub fn new(is_locked: bool) -> Self { + if is_locked { + ContractPackageStatus::Locked + } else { + ContractPackageStatus::Unlocked + } + } +} + +impl Default for ContractPackageStatus { + fn default() -> Self { + Self::Unlocked + } +} + +impl ToBytes for ContractPackageStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + match self { + ContractPackageStatus::Unlocked => result.append(&mut false.to_bytes()?), + ContractPackageStatus::Locked => result.append(&mut true.to_bytes()?), + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + ContractPackageStatus::Unlocked => false.serialized_length(), + ContractPackageStatus::Locked => true.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ContractPackageStatus::Locked => writer.push(u8::from(true)), + ContractPackageStatus::Unlocked => writer.push(u8::from(false)), + } + Ok(()) + } +} + +impl FromBytes for ContractPackageStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (val, bytes) = bool::from_bytes(bytes)?; + let status = ContractPackageStatus::new(val); + Ok((status, bytes)) + } +} + +/// Contract definition, metadata, and security container. +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractPackage { + /// Key used to add or disable versions + access_key: URef, + /// All versions (enabled & disabled) + versions: ContractVersions, + /// Disabled versions + disabled_versions: DisabledVersions, + /// Mapping maintaining the set of URefs associated with each "user + /// group". This can be used to control access to methods in a particular + /// version of the contract. A method is callable by any context which + /// "knows" any of the URefs associated with the method's user group. + groups: Groups, + /// A flag that determines whether a contract is locked + lock_status: ContractPackageStatus, +} + +impl CLTyped for ContractPackage { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ContractPackage { + /// Create new `ContractPackage` (with no versions) from given access key. + pub fn new( + access_key: URef, + versions: ContractVersions, + disabled_versions: DisabledVersions, + groups: Groups, + lock_status: ContractPackageStatus, + ) -> Self { + ContractPackage { + access_key, + versions, + disabled_versions, + groups, + lock_status, + } + } + + /// Get the access key for this contract. + pub fn access_key(&self) -> URef { + self.access_key + } + + /// Get the mutable group definitions for this contract. + pub fn groups_mut(&mut self) -> &mut Groups { + &mut self.groups + } + + /// Get the group definitions for this contract. + pub fn groups(&self) -> &Groups { + &self.groups + } + + /// Adds new group to this contract. + pub fn add_group(&mut self, group: Group, urefs: BTreeSet) { + let v = self.groups.entry(group).or_default(); + v.extend(urefs) + } + + /// Lookup the contract hash for a given contract version (if present) + pub fn lookup_contract_hash( + &self, + contract_version_key: ContractVersionKey, + ) -> Option<&ContractHash> { + if !self.is_version_enabled(contract_version_key) { + return None; + } + self.versions.get(&contract_version_key) + } + + /// Returns `true` if the given contract version exists and is enabled. + pub fn is_version_enabled(&self, contract_version_key: ContractVersionKey) -> bool { + !self.disabled_versions.contains(&contract_version_key) + && self.versions.contains_key(&contract_version_key) + } + + /// Returns `true` if the given contract hash exists and is enabled. + pub fn is_contract_enabled(&self, contract_hash: &ContractHash) -> bool { + match self.find_contract_version_key_by_hash(contract_hash) { + Some(version_key) => !self.disabled_versions.contains(version_key), + None => false, + } + } + + /// Insert a new contract version; the next sequential version number will be issued. + pub fn insert_contract_version( + &mut self, + protocol_version_major: ProtocolVersionMajor, + contract_hash: ContractHash, + ) -> ContractVersionKey { + let contract_version = self.next_contract_version_for(protocol_version_major); + let key = ContractVersionKey::new(protocol_version_major, contract_version); + self.versions.insert(key, contract_hash); + key + } + + /// Disable the contract version corresponding to the given hash (if it exists). + pub fn disable_contract_version(&mut self, contract_hash: ContractHash) -> Result<(), Error> { + let contract_version_key = self + .find_contract_version_key_by_hash(&contract_hash) + .copied() + .ok_or(Error::ContractNotFound)?; + + if !self.disabled_versions.contains(&contract_version_key) { + self.disabled_versions.insert(contract_version_key); + } + + Ok(()) + } + + /// Enable the contract version corresponding to the given hash (if it exists). + pub fn enable_contract_version(&mut self, contract_hash: ContractHash) -> Result<(), Error> { + let contract_version_key = self + .find_contract_version_key_by_hash(&contract_hash) + .copied() + .ok_or(Error::ContractNotFound)?; + + self.disabled_versions.remove(&contract_version_key); + + Ok(()) + } + + fn find_contract_version_key_by_hash( + &self, + contract_hash: &ContractHash, + ) -> Option<&ContractVersionKey> { + self.versions + .iter() + .filter_map(|(k, v)| if v == contract_hash { Some(k) } else { None }) + .next() + } + + /// Returns reference to all of this contract's versions. + pub fn versions(&self) -> &ContractVersions { + &self.versions + } + + /// Returns all of this contract's enabled contract versions. + pub fn enabled_versions(&self) -> ContractVersions { + let mut ret = ContractVersions::new(); + for version in &self.versions { + if !self.is_version_enabled(*version.0) { + continue; + } + ret.insert(*version.0, *version.1); + } + ret + } + + /// Returns mutable reference to all of this contract's versions (enabled and disabled). + pub fn versions_mut(&mut self) -> &mut ContractVersions { + &mut self.versions + } + + /// Consumes the object and returns all of this contract's versions (enabled and disabled). + pub fn take_versions(self) -> ContractVersions { + self.versions + } + + /// Returns all of this contract's disabled versions. + pub fn disabled_versions(&self) -> &DisabledVersions { + &self.disabled_versions + } + + /// Returns mut reference to all of this contract's disabled versions. + pub fn disabled_versions_mut(&mut self) -> &mut DisabledVersions { + &mut self.disabled_versions + } + + /// Removes a group from this contract (if it exists). + pub fn remove_group(&mut self, group: &Group) -> bool { + self.groups.remove(group).is_some() + } + + /// Gets the next available contract version for the given protocol version + fn next_contract_version_for(&self, protocol_version: ProtocolVersionMajor) -> ContractVersion { + let current_version = self + .versions + .keys() + .rev() + .find_map(|&contract_version_key| { + if contract_version_key.protocol_version_major() == protocol_version { + Some(contract_version_key.contract_version()) + } else { + None + } + }) + .unwrap_or(0); + + current_version + 1 + } + + /// Return the contract version key for the newest enabled contract version. + pub fn current_contract_version(&self) -> Option { + self.enabled_versions().keys().next_back().copied() + } + + /// Return the contract hash for the newest enabled contract version. + pub fn current_contract_hash(&self) -> Option { + self.enabled_versions().values().next_back().copied() + } + + /// Return the lock status of the contract package. + pub fn is_locked(&self) -> bool { + match self.lock_status { + ContractPackageStatus::Unlocked => false, + ContractPackageStatus::Locked => true, + } + } + + /// Return the package status itself + pub fn get_lock_status(&self) -> ContractPackageStatus { + self.lock_status.clone() + } +} + +impl ToBytes for ContractPackage { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.access_key().write_bytes(&mut result)?; + self.versions().write_bytes(&mut result)?; + self.disabled_versions().write_bytes(&mut result)?; + self.groups().write_bytes(&mut result)?; + self.lock_status.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.access_key.serialized_length() + + self.versions.serialized_length() + + self.disabled_versions.serialized_length() + + self.groups.serialized_length() + + self.lock_status.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.access_key().write_bytes(writer)?; + self.versions().write_bytes(writer)?; + self.disabled_versions().write_bytes(writer)?; + self.groups().write_bytes(writer)?; + self.lock_status.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractPackage { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (access_key, bytes) = URef::from_bytes(bytes)?; + let (versions, bytes) = ContractVersions::from_bytes(bytes)?; + let (disabled_versions, bytes) = DisabledVersions::from_bytes(bytes)?; + let (groups, bytes) = Groups::from_bytes(bytes)?; + let (lock_status, bytes) = ContractPackageStatus::from_bytes(bytes)?; + let result = ContractPackage { + access_key, + versions, + disabled_versions, + groups, + lock_status, + }; + + Ok((result, bytes)) + } +} + +/// Type alias for a container used inside [`EntryPoints`]. +pub type EntryPointsMap = BTreeMap; + +/// Collection of named entry points +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct EntryPoints(EntryPointsMap); + +impl Default for EntryPoints { + fn default() -> Self { + let mut entry_points = EntryPoints::new(); + let entry_point = EntryPoint::default(); + entry_points.add_entry_point(entry_point); + entry_points + } +} + +impl ToBytes for EntryPoints { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EntryPoints { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entry_points_map, rem) = EntryPointsMap::from_bytes(bytes)?; + Ok((EntryPoints(entry_points_map), rem)) + } +} + +impl EntryPoints { + /// Creates empty instance of [`EntryPoints`]. + pub fn new() -> EntryPoints { + EntryPoints(EntryPointsMap::new()) + } + + /// Adds new [`EntryPoint`]. + pub fn add_entry_point(&mut self, entry_point: EntryPoint) { + self.0.insert(entry_point.name().to_string(), entry_point); + } + + /// Checks if given [`EntryPoint`] exists. + pub fn has_entry_point(&self, entry_point_name: &str) -> bool { + self.0.contains_key(entry_point_name) + } + + /// Gets an existing [`EntryPoint`] by its name. + pub fn get(&self, entry_point_name: &str) -> Option<&EntryPoint> { + self.0.get(entry_point_name) + } + + /// Returns iterator for existing entry point names. + pub fn keys(&self) -> impl Iterator { + self.0.keys() + } + + /// Takes all entry points. + pub fn take_entry_points(self) -> Vec { + self.0.into_values().collect() + } + + /// Returns the length of the entry points + pub fn len(&self) -> usize { + self.0.len() + } + + /// Checks if the `EntryPoints` is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl From> for EntryPoints { + fn from(entry_points: Vec) -> EntryPoints { + let entries = entry_points + .into_iter() + .map(|entry_point| (String::from(entry_point.name()), entry_point)) + .collect(); + EntryPoints(entries) + } +} + +/// Collection of named keys +pub type NamedKeys = BTreeMap; + +/// Methods and type signatures supported by a contract. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Contract { + contract_package_hash: ContractPackageHash, + contract_wasm_hash: ContractWasmHash, + named_keys: NamedKeys, + entry_points: EntryPoints, + protocol_version: ProtocolVersion, +} + +impl From + for ( + ContractPackageHash, + ContractWasmHash, + NamedKeys, + EntryPoints, + ProtocolVersion, + ) +{ + fn from(contract: Contract) -> Self { + ( + contract.contract_package_hash, + contract.contract_wasm_hash, + contract.named_keys, + contract.entry_points, + contract.protocol_version, + ) + } +} + +impl Contract { + /// `Contract` constructor. + pub fn new( + contract_package_hash: ContractPackageHash, + contract_wasm_hash: ContractWasmHash, + named_keys: NamedKeys, + entry_points: EntryPoints, + protocol_version: ProtocolVersion, + ) -> Self { + Contract { + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points, + protocol_version, + } + } + + /// Hash for accessing contract package + pub fn contract_package_hash(&self) -> ContractPackageHash { + self.contract_package_hash + } + + /// Hash for accessing contract WASM + pub fn contract_wasm_hash(&self) -> ContractWasmHash { + self.contract_wasm_hash + } + + /// Checks whether there is a method with the given name + pub fn has_entry_point(&self, name: &str) -> bool { + self.entry_points.has_entry_point(name) + } + + /// Returns the type signature for the given `method`. + pub fn entry_point(&self, method: &str) -> Option<&EntryPoint> { + self.entry_points.get(method) + } + + /// Get the protocol version this header is targeting. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Adds new entry point + pub fn add_entry_point>(&mut self, entry_point: EntryPoint) { + self.entry_points.add_entry_point(entry_point); + } + + /// Hash for accessing contract bytes + pub fn contract_wasm_key(&self) -> Key { + self.contract_wasm_hash.into() + } + + /// Returns immutable reference to methods + pub fn entry_points(&self) -> &EntryPoints { + &self.entry_points + } + + /// Takes `named_keys` + pub fn take_named_keys(self) -> NamedKeys { + self.named_keys + } + + /// Returns a reference to `named_keys` + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys + } + + /// Appends `keys` to `named_keys` + pub fn named_keys_append(&mut self, keys: &mut NamedKeys) { + self.named_keys.append(keys); + } + + /// Removes given named key. + pub fn remove_named_key(&mut self, key: &str) -> Option { + self.named_keys.remove(key) + } + + /// Set protocol_version. + pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) { + self.protocol_version = protocol_version; + } + + /// Determines if `Contract` is compatible with a given `ProtocolVersion`. + pub fn is_compatible_protocol_version(&self, protocol_version: ProtocolVersion) -> bool { + self.protocol_version.value().major == protocol_version.value().major + } + + /// Extracts the access rights from the named keys of the contract. + pub fn extract_access_rights(&self, contract_hash: ContractHash) -> ContextAccessRights { + let urefs_iter = self + .named_keys + .values() + .filter_map(|key| key.as_uref().copied()); + ContextAccessRights::new(contract_hash.into(), urefs_iter) + } +} + +impl ToBytes for Contract { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.contract_package_hash().write_bytes(&mut result)?; + self.contract_wasm_hash().write_bytes(&mut result)?; + self.named_keys().write_bytes(&mut result)?; + self.entry_points().write_bytes(&mut result)?; + self.protocol_version().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.entry_points) + + ToBytes::serialized_length(&self.contract_package_hash) + + ToBytes::serialized_length(&self.contract_wasm_hash) + + ToBytes::serialized_length(&self.protocol_version) + + ToBytes::serialized_length(&self.named_keys) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.contract_package_hash().write_bytes(writer)?; + self.contract_wasm_hash().write_bytes(writer)?; + self.named_keys().write_bytes(writer)?; + self.entry_points().write_bytes(writer)?; + self.protocol_version().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Contract { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (contract_package_hash, bytes) = FromBytes::from_bytes(bytes)?; + let (contract_wasm_hash, bytes) = FromBytes::from_bytes(bytes)?; + let (named_keys, bytes) = NamedKeys::from_bytes(bytes)?; + let (entry_points, bytes) = EntryPoints::from_bytes(bytes)?; + let (protocol_version, bytes) = ProtocolVersion::from_bytes(bytes)?; + Ok(( + Contract { + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points, + protocol_version, + }, + bytes, + )) + } +} + +impl Default for Contract { + fn default() -> Self { + Contract { + named_keys: NamedKeys::default(), + entry_points: EntryPoints::default(), + contract_wasm_hash: [0; KEY_HASH_LENGTH].into(), + contract_package_hash: [0; KEY_HASH_LENGTH].into(), + protocol_version: ProtocolVersion::V1_0_0, + } + } +} + +/// Context of method execution +#[repr(u8)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointType { + /// Runs as session code + Session = 0, + /// Runs within contract's context + Contract = 1, +} + +impl ToBytes for EntryPointType { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + 1 + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for EntryPointType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, bytes) = u8::from_bytes(bytes)?; + match value { + 0 => Ok((EntryPointType::Session, bytes)), + 1 => Ok((EntryPointType::Contract, bytes)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// Default name for an entry point +pub const DEFAULT_ENTRY_POINT_NAME: &str = "call"; + +/// Default name for an installer entry point +pub const ENTRY_POINT_NAME_INSTALL: &str = "install"; + +/// Default name for an upgrade entry point +pub const UPGRADE_ENTRY_POINT_NAME: &str = "upgrade"; + +/// Collection of entry point parameters. +pub type Parameters = Vec; + +/// Type signature of a method. Order of arguments matter since can be +/// referenced by index as well as name. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EntryPoint { + name: String, + args: Parameters, + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, +} + +impl From for (String, Parameters, CLType, EntryPointAccess, EntryPointType) { + fn from(entry_point: EntryPoint) -> Self { + ( + entry_point.name, + entry_point.args, + entry_point.ret, + entry_point.access, + entry_point.entry_point_type, + ) + } +} + +impl EntryPoint { + /// `EntryPoint` constructor. + pub fn new>( + name: T, + args: Parameters, + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, + ) -> Self { + EntryPoint { + name: name.into(), + args, + ret, + access, + entry_point_type, + } + } + + /// Create a default [`EntryPoint`] with specified name. + pub fn default_with_name>(name: T) -> Self { + EntryPoint { + name: name.into(), + ..Default::default() + } + } + + /// Get name. + pub fn name(&self) -> &str { + &self.name + } + + /// Get access enum. + pub fn access(&self) -> &EntryPointAccess { + &self.access + } + + /// Get the arguments for this method. + pub fn args(&self) -> &[Parameter] { + self.args.as_slice() + } + + /// Get the return type. + pub fn ret(&self) -> &CLType { + &self.ret + } + + /// Obtains entry point + pub fn entry_point_type(&self) -> EntryPointType { + self.entry_point_type + } +} + +impl Default for EntryPoint { + /// constructor for a public session `EntryPoint` that takes no args and returns `Unit` + fn default() -> Self { + EntryPoint { + name: DEFAULT_ENTRY_POINT_NAME.to_string(), + args: Vec::new(), + ret: CLType::Unit, + access: EntryPointAccess::Public, + entry_point_type: EntryPointType::Session, + } + } +} + +impl ToBytes for EntryPoint { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.name.to_bytes()?); + result.append(&mut self.args.to_bytes()?); + self.ret.append_bytes(&mut result)?; + result.append(&mut self.access.to_bytes()?); + result.append(&mut self.entry_point_type.to_bytes()?); + + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + + self.args.serialized_length() + + self.ret.serialized_length() + + self.access.serialized_length() + + self.entry_point_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name().write_bytes(writer)?; + self.args.write_bytes(writer)?; + self.ret.append_bytes(writer)?; + self.access().write_bytes(writer)?; + self.entry_point_type().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EntryPoint { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, bytes) = String::from_bytes(bytes)?; + let (args, bytes) = Vec::::from_bytes(bytes)?; + let (ret, bytes) = CLType::from_bytes(bytes)?; + let (access, bytes) = EntryPointAccess::from_bytes(bytes)?; + let (entry_point_type, bytes) = EntryPointType::from_bytes(bytes)?; + + Ok(( + EntryPoint { + name, + args, + ret, + access, + entry_point_type, + }, + bytes, + )) + } +} + +/// Enum describing the possible access control options for a contract entry +/// point (method). +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointAccess { + /// Anyone can call this method (no access controls). + Public, + /// Only users from the listed groups may call this method. Note: if the + /// list is empty then this method is not callable from outside the + /// contract. + Groups(Vec), +} + +const ENTRYPOINTACCESS_PUBLIC_TAG: u8 = 1; +const ENTRYPOINTACCESS_GROUPS_TAG: u8 = 2; + +impl EntryPointAccess { + /// Constructor for access granted to only listed groups. + pub fn groups(labels: &[&str]) -> Self { + let list: Vec = labels.iter().map(|s| Group(String::from(*s))).collect(); + EntryPointAccess::Groups(list) + } +} + +impl ToBytes for EntryPointAccess { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + + match self { + EntryPointAccess::Public => { + result.push(ENTRYPOINTACCESS_PUBLIC_TAG); + } + EntryPointAccess::Groups(groups) => { + result.push(ENTRYPOINTACCESS_GROUPS_TAG); + result.append(&mut groups.to_bytes()?); + } + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + EntryPointAccess::Public => 1, + EntryPointAccess::Groups(groups) => 1 + groups.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + EntryPointAccess::Public => { + writer.push(ENTRYPOINTACCESS_PUBLIC_TAG); + } + EntryPointAccess::Groups(groups) => { + writer.push(ENTRYPOINTACCESS_GROUPS_TAG); + groups.write_bytes(writer)?; + } + } + Ok(()) + } +} + +impl FromBytes for EntryPointAccess { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, bytes) = u8::from_bytes(bytes)?; + + match tag { + ENTRYPOINTACCESS_PUBLIC_TAG => Ok((EntryPointAccess::Public, bytes)), + ENTRYPOINTACCESS_GROUPS_TAG => { + let (groups, bytes) = Vec::::from_bytes(bytes)?; + let result = EntryPointAccess::Groups(groups); + Ok((result, bytes)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// Parameter to a method +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Parameter { + name: String, + cl_type: CLType, +} + +impl Parameter { + /// `Parameter` constructor. + pub fn new>(name: T, cl_type: CLType) -> Self { + Parameter { + name: name.into(), + cl_type, + } + } + + /// Get the type of this argument. + pub fn cl_type(&self) -> &CLType { + &self.cl_type + } + + /// Get a reference to the parameter's name. + pub fn name(&self) -> &str { + &self.name + } +} + +impl From for (String, CLType) { + fn from(parameter: Parameter) -> Self { + (parameter.name, parameter.cl_type) + } +} + +impl ToBytes for Parameter { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = ToBytes::to_bytes(&self.name)?; + self.cl_type.append_bytes(&mut result)?; + + Ok(result) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.name) + self.cl_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.cl_type.append_bytes(writer) + } +} + +impl FromBytes for Parameter { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, bytes) = String::from_bytes(bytes)?; + let (cl_type, bytes) = CLType::from_bytes(bytes)?; + + Ok((Parameter { name, cl_type }, bytes)) + } +} + +#[cfg(test)] +mod tests { + use std::iter::FromIterator; + + use super::*; + use crate::{AccessRights, URef, UREF_ADDR_LENGTH}; + use alloc::borrow::ToOwned; + + const CONTRACT_HASH_V1: ContractHash = ContractHash::new([42; 32]); + const CONTRACT_HASH_V2: ContractHash = ContractHash::new([84; 32]); + + fn make_contract_package() -> ContractPackage { + let mut contract_package = ContractPackage::new( + URef::new([0; 32], AccessRights::NONE), + ContractVersions::default(), + DisabledVersions::default(), + Groups::default(), + ContractPackageStatus::default(), + ); + + // add groups + { + let group_urefs = { + let mut ret = BTreeSet::new(); + ret.insert(URef::new([1; 32], AccessRights::READ)); + ret + }; + + contract_package + .groups_mut() + .insert(Group::new("Group 1"), group_urefs.clone()); + + contract_package + .groups_mut() + .insert(Group::new("Group 2"), group_urefs); + } + + // add entry_points + let _entry_points = { + let mut ret = BTreeMap::new(); + let entrypoint = EntryPoint::new( + "method0".to_string(), + vec![], + CLType::U32, + EntryPointAccess::groups(&["Group 2"]), + EntryPointType::Session, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + let entrypoint = EntryPoint::new( + "method1".to_string(), + vec![Parameter::new("Foo", CLType::U32)], + CLType::U32, + EntryPointAccess::groups(&["Group 1"]), + EntryPointType::Session, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + ret + }; + + let _contract_package_hash = [41; 32]; + let _contract_wasm_hash = [43; 32]; + let _named_keys = NamedKeys::new(); + let protocol_version = ProtocolVersion::V1_0_0; + + let v1 = contract_package + .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V1); + let v2 = contract_package + .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V2); + + assert!(v2 > v1); + + contract_package + } + + #[test] + fn next_contract_version() { + let major = 1; + let mut contract_package = ContractPackage::new( + URef::new([0; 32], AccessRights::NONE), + ContractVersions::default(), + DisabledVersions::default(), + Groups::default(), + ContractPackageStatus::default(), + ); + assert_eq!(contract_package.next_contract_version_for(major), 1); + + let next_version = contract_package.insert_contract_version(major, [123; 32].into()); + assert_eq!(next_version, ContractVersionKey::new(major, 1)); + assert_eq!(contract_package.next_contract_version_for(major), 2); + let next_version_2 = contract_package.insert_contract_version(major, [124; 32].into()); + assert_eq!(next_version_2, ContractVersionKey::new(major, 2)); + + let major = 2; + assert_eq!(contract_package.next_contract_version_for(major), 1); + let next_version_3 = contract_package.insert_contract_version(major, [42; 32].into()); + assert_eq!(next_version_3, ContractVersionKey::new(major, 1)); + } + + #[test] + fn roundtrip_serialization() { + let contract_package = make_contract_package(); + let bytes = contract_package.to_bytes().expect("should serialize"); + let (decoded_package, rem) = + ContractPackage::from_bytes(&bytes).expect("should deserialize"); + assert_eq!(contract_package, decoded_package); + assert_eq!(rem.len(), 0); + } + + #[test] + fn should_remove_group() { + let mut contract_package = make_contract_package(); + + assert!(!contract_package.remove_group(&Group::new("Non-existent group"))); + assert!(contract_package.remove_group(&Group::new("Group 1"))); + assert!(!contract_package.remove_group(&Group::new("Group 1"))); // Group no longer exists + } + + #[test] + fn should_disable_and_enable_contract_version() { + const NEW_CONTRACT_HASH: ContractHash = ContractHash::new([123; 32]); + + let mut contract_package = make_contract_package(); + + assert!( + !contract_package.is_contract_enabled(&NEW_CONTRACT_HASH), + "nonexisting contract contract should return false" + ); + + assert_eq!( + contract_package.current_contract_version(), + Some(ContractVersionKey(1, 2)) + ); + assert_eq!( + contract_package.current_contract_hash(), + Some(CONTRACT_HASH_V2) + ); + + assert_eq!( + contract_package.versions(), + &BTreeMap::from_iter([ + (ContractVersionKey(1, 1), CONTRACT_HASH_V1), + (ContractVersionKey(1, 2), CONTRACT_HASH_V2) + ]), + ); + assert_eq!( + contract_package.enabled_versions(), + BTreeMap::from_iter([ + (ContractVersionKey(1, 1), CONTRACT_HASH_V1), + (ContractVersionKey(1, 2), CONTRACT_HASH_V2) + ]), + ); + + assert!(!contract_package.is_contract_enabled(&NEW_CONTRACT_HASH)); + + assert_eq!( + contract_package.disable_contract_version(NEW_CONTRACT_HASH), + Err(Error::ContractNotFound), + "should return contract not found error" + ); + + assert!( + !contract_package.is_contract_enabled(&NEW_CONTRACT_HASH), + "disabling missing contract shouldnt change outcome" + ); + + let next_version = contract_package.insert_contract_version(1, NEW_CONTRACT_HASH); + assert!( + contract_package.is_version_enabled(next_version), + "version should exist and be enabled" + ); + assert!( + contract_package.is_contract_enabled(&NEW_CONTRACT_HASH), + "contract should be enabled" + ); + + assert_eq!( + contract_package.disable_contract_version(NEW_CONTRACT_HASH), + Ok(()), + "should be able to disable version" + ); + assert!(!contract_package.is_contract_enabled(&NEW_CONTRACT_HASH)); + + assert_eq!( + contract_package.lookup_contract_hash(next_version), + None, + "should not return disabled contract version" + ); + + assert!( + !contract_package.is_version_enabled(next_version), + "version should not be enabled" + ); + + assert_eq!( + contract_package.current_contract_version(), + Some(ContractVersionKey(1, 2)) + ); + assert_eq!( + contract_package.current_contract_hash(), + Some(CONTRACT_HASH_V2) + ); + assert_eq!( + contract_package.versions(), + &BTreeMap::from_iter([ + (ContractVersionKey(1, 1), CONTRACT_HASH_V1), + (ContractVersionKey(1, 2), CONTRACT_HASH_V2), + (next_version, NEW_CONTRACT_HASH), + ]), + ); + assert_eq!( + contract_package.enabled_versions(), + BTreeMap::from_iter([ + (ContractVersionKey(1, 1), CONTRACT_HASH_V1), + (ContractVersionKey(1, 2), CONTRACT_HASH_V2), + ]), + ); + assert_eq!( + contract_package.disabled_versions(), + &BTreeSet::from_iter([next_version]), + ); + + assert_eq!( + contract_package.current_contract_version(), + Some(ContractVersionKey(1, 2)) + ); + assert_eq!( + contract_package.current_contract_hash(), + Some(CONTRACT_HASH_V2) + ); + + assert_eq!( + contract_package.disable_contract_version(CONTRACT_HASH_V2), + Ok(()), + "should be able to disable version 2" + ); + + assert_eq!( + contract_package.enabled_versions(), + BTreeMap::from_iter([(ContractVersionKey(1, 1), CONTRACT_HASH_V1),]), + ); + + assert_eq!( + contract_package.current_contract_version(), + Some(ContractVersionKey(1, 1)) + ); + assert_eq!( + contract_package.current_contract_hash(), + Some(CONTRACT_HASH_V1) + ); + + assert_eq!( + contract_package.disabled_versions(), + &BTreeSet::from_iter([next_version, ContractVersionKey(1, 2)]), + ); + + assert_eq!( + contract_package.enable_contract_version(CONTRACT_HASH_V2), + Ok(()), + ); + + assert_eq!( + contract_package.enabled_versions(), + BTreeMap::from_iter([ + (ContractVersionKey(1, 1), CONTRACT_HASH_V1), + (ContractVersionKey(1, 2), CONTRACT_HASH_V2), + ]), + ); + + assert_eq!( + contract_package.disabled_versions(), + &BTreeSet::from_iter([next_version]) + ); + + assert_eq!( + contract_package.current_contract_hash(), + Some(CONTRACT_HASH_V2) + ); + + assert_eq!( + contract_package.enable_contract_version(NEW_CONTRACT_HASH), + Ok(()), + ); + + assert_eq!( + contract_package.enable_contract_version(NEW_CONTRACT_HASH), + Ok(()), + "enabling a contract twice should be a noop" + ); + + assert_eq!( + contract_package.enabled_versions(), + BTreeMap::from_iter([ + (ContractVersionKey(1, 1), CONTRACT_HASH_V1), + (ContractVersionKey(1, 2), CONTRACT_HASH_V2), + (next_version, NEW_CONTRACT_HASH), + ]), + ); + + assert_eq!(contract_package.disabled_versions(), &BTreeSet::new(),); + + assert_eq!( + contract_package.current_contract_hash(), + Some(NEW_CONTRACT_HASH) + ); + } + + #[test] + fn should_not_allow_to_enable_non_existing_version() { + let mut contract_package = make_contract_package(); + + assert_eq!( + contract_package.enable_contract_version(ContractHash::default()), + Err(Error::ContractNotFound), + ); + } + + #[test] + fn contract_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let contract_hash = HashAddr::try_from(&bytes[..]).expect("should create contract hash"); + let contract_hash = ContractHash::new(contract_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_package_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let contract_hash = HashAddr::try_from(&bytes[..]).expect("should create contract hash"); + let contract_hash = ContractPackageHash::new(contract_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_hash_from_str() { + let contract_hash = ContractHash([3; 32]); + let encoded = contract_hash.to_formatted_string(); + let decoded = ContractHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(contract_hash, decoded); + + let invalid_prefix = + "contract--0000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = "contract-00000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "contract-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "contract-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(ContractHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn contract_package_hash_from_str() { + let contract_package_hash = ContractPackageHash([3; 32]); + let encoded = contract_package_hash.to_formatted_string(); + let decoded = ContractPackageHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(contract_package_hash, decoded); + + let invalid_prefix = + "contract-package0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } + + #[test] + fn contract_package_hash_from_legacy_str() { + let contract_package_hash = ContractPackageHash([3; 32]); + let hex_addr = contract_package_hash.to_string(); + let legacy_encoded = format!("contract-package-wasm{}", hex_addr); + let decoded_from_legacy = ContractPackageHash::from_formatted_str(&legacy_encoded) + .expect("should accept legacy prefixed string"); + assert_eq!( + contract_package_hash, decoded_from_legacy, + "decoded_from_legacy should equal decoded" + ); + + let invalid_prefix = + "contract-packagewasm0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-wasm00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } + + #[test] + fn contract_hash_serde_roundtrip() { + let contract_hash = ContractHash([255; 32]); + let serialized = bincode::serialize(&contract_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(contract_hash, deserialized) + } + + #[test] + fn contract_hash_json_roundtrip() { + let contract_hash = ContractHash([255; 32]); + let json_string = serde_json::to_string_pretty(&contract_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(contract_hash, decoded) + } + + #[test] + fn contract_package_hash_serde_roundtrip() { + let contract_hash = ContractPackageHash([255; 32]); + let serialized = bincode::serialize(&contract_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(contract_hash, deserialized) + } + + #[test] + fn contract_package_hash_json_roundtrip() { + let contract_hash = ContractPackageHash([255; 32]); + let json_string = serde_json::to_string_pretty(&contract_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(contract_hash, decoded) + } + + #[test] + fn should_extract_access_rights() { + let contract_hash = ContractHash([255; 32]); + let uref = URef::new([84; UREF_ADDR_LENGTH], AccessRights::READ_ADD); + let uref_r = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ); + let uref_a = URef::new([42; UREF_ADDR_LENGTH], AccessRights::ADD); + let uref_w = URef::new([42; UREF_ADDR_LENGTH], AccessRights::WRITE); + let mut named_keys = NamedKeys::new(); + named_keys.insert("a".to_string(), Key::URef(uref_r)); + named_keys.insert("b".to_string(), Key::URef(uref_a)); + named_keys.insert("c".to_string(), Key::URef(uref_w)); + named_keys.insert("d".to_string(), Key::URef(uref)); + let contract = Contract::new( + ContractPackageHash::new([254; 32]), + ContractWasmHash::new([253; 32]), + named_keys, + EntryPoints::default(), + ProtocolVersion::V1_0_0, + ); + let access_rights = contract.extract_access_rights(contract_hash); + let expected_uref = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + assert!( + access_rights.has_access_rights_to_uref(&uref), + "urefs in named keys should be included in access rights" + ); + assert!( + access_rights.has_access_rights_to_uref(&expected_uref), + "multiple access right bits to the same uref should coalesce" + ); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + // #![proptest_config(ProptestConfig { + // cases: 1024, + // .. ProptestConfig::default() + // })] + + #[test] + fn test_value_contract(contract in gens::contract_arb()) { + bytesrepr::test_serialization_roundtrip(&contract); + } + + #[test] + fn test_value_contract_package(contract_pkg in gens::contract_package_arb()) { + bytesrepr::test_serialization_roundtrip(&contract_pkg); + } + } +} diff --git a/casper_types/src/crypto.rs b/casper_types/src/crypto.rs new file mode 100644 index 00000000..fbcd172c --- /dev/null +++ b/casper_types/src/crypto.rs @@ -0,0 +1,35 @@ +//! Cryptographic types and operations on them + +mod asymmetric_key; +mod error; + +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; + +use crate::key::BLAKE2B_DIGEST_LENGTH; +#[cfg(any(feature = "std", test))] +pub use asymmetric_key::generate_ed25519_keypair; +#[cfg(any(feature = "testing", feature = "gens", test))] +pub use asymmetric_key::gens; +pub use asymmetric_key::{ + sign, verify, AsymmetricType, PublicKey, SecretKey, Signature, ED25519_TAG, SECP256K1_TAG, + SYSTEM_ACCOUNT, SYSTEM_TAG, +}; +pub use error::Error; +#[cfg(any(feature = "std", test))] +pub use error::ErrorExt; + +#[doc(hidden)] +pub fn blake2b>(data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { + let mut result = [0; BLAKE2B_DIGEST_LENGTH]; + // NOTE: Assumed safe as `BLAKE2B_DIGEST_LENGTH` is a valid value for a hasher + let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect("should create hasher"); + + hasher.update(data); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + result +} diff --git a/casper_types/src/crypto/asymmetric_key.rs b/casper_types/src/crypto/asymmetric_key.rs new file mode 100644 index 00000000..5c82289f --- /dev/null +++ b/casper_types/src/crypto/asymmetric_key.rs @@ -0,0 +1,1274 @@ +//! Asymmetric key types and methods on them + +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; +use core::{ + cmp::Ordering, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + hash::{Hash, Hasher}, + iter, + marker::Copy, +}; +#[cfg(any(feature = "std", test))] +use std::path::Path; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use derp::{Der, Tag}; +use ed25519_dalek::{ + Signature as Ed25519Signature, SigningKey as Ed25519SecretKey, + VerifyingKey as Ed25519PublicKey, PUBLIC_KEY_LENGTH as ED25519_PUBLIC_KEY_LENGTH, + SECRET_KEY_LENGTH as ED25519_SECRET_KEY_LENGTH, SIGNATURE_LENGTH as ED25519_SIGNATURE_LENGTH, +}; +use hex_fmt::HexFmt; +use k256::ecdsa::{ + signature::{Signer, Verifier}, + Signature as Secp256k1Signature, SigningKey as Secp256k1SecretKey, + VerifyingKey as Secp256k1PublicKey, +}; +#[cfg(any(feature = "std", test))] +use once_cell::sync::Lazy; +#[cfg(any(feature = "std", test))] +use pem::Pem; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] +use serde_json::json; +#[cfg(any(feature = "std", test))] +use untrusted::Input; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +use crate::{ + account::AccountHash, + bytesrepr, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, + crypto::Error, + CLType, CLTyped, Tagged, +}; +#[cfg(any(feature = "std", test))] +use crate::{ + crypto::ErrorExt, + file_utils::{read_file, write_file, write_private_file}, +}; + +#[cfg(any(feature = "testing", test))] +pub mod gens; +#[cfg(test)] +mod tests; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for system variant. +pub const SYSTEM_TAG: u8 = 0; +const SYSTEM: &str = "System"; + +/// Tag for ed25519 variant. +pub const ED25519_TAG: u8 = 1; +const ED25519: &str = "Ed25519"; + +/// Tag for secp256k1 variant. +pub const SECP256K1_TAG: u8 = 2; +const SECP256K1: &str = "Secp256k1"; + +const SECP256K1_SECRET_KEY_LENGTH: usize = 32; +const SECP256K1_COMPRESSED_PUBLIC_KEY_LENGTH: usize = 33; +const SECP256K1_SIGNATURE_LENGTH: usize = 64; + +/// Public key for system account. +pub const SYSTEM_ACCOUNT: PublicKey = PublicKey::System; + +// See https://www.secg.org/sec1-v2.pdf#subsection.C.4 +#[cfg(any(feature = "std", test))] +const EC_PUBLIC_KEY_OBJECT_IDENTIFIER: [u8; 7] = [42, 134, 72, 206, 61, 2, 1]; + +// See https://tools.ietf.org/html/rfc8410#section-10.3 +#[cfg(any(feature = "std", test))] +const ED25519_OBJECT_IDENTIFIER: [u8; 3] = [43, 101, 112]; +#[cfg(any(feature = "std", test))] +const ED25519_PEM_SECRET_KEY_TAG: &str = "PRIVATE KEY"; +#[cfg(any(feature = "std", test))] +const ED25519_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; + +// Ref? +#[cfg(any(feature = "std", test))] +const SECP256K1_OBJECT_IDENTIFIER: [u8; 5] = [43, 129, 4, 0, 10]; +#[cfg(any(feature = "std", test))] +const SECP256K1_PEM_SECRET_KEY_TAG: &str = "EC PRIVATE KEY"; +#[cfg(any(feature = "std", test))] +const SECP256K1_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; + +#[cfg(any(feature = "std", test))] +static ED25519_SECRET_KEY: Lazy = Lazy::new(|| { + let bytes = [15u8; SecretKey::ED25519_LENGTH]; + SecretKey::ed25519_from_bytes(bytes).unwrap() +}); + +#[cfg(any(feature = "std", test))] +static ED25519_PUBLIC_KEY: Lazy = Lazy::new(|| { + let bytes = [15u8; SecretKey::ED25519_LENGTH]; + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + PublicKey::from(&secret_key) +}); + +/// Operations on asymmetric cryptographic type. +pub trait AsymmetricType<'a> +where + Self: 'a + Sized + Tagged, + Vec: From<&'a Self>, +{ + /// Converts `self` to hex, where the first byte represents the algorithm tag. + fn to_hex(&'a self) -> String { + let bytes = iter::once(self.tag()) + .chain(Vec::::from(self)) + .collect::>(); + base16::encode_lower(&bytes) + } + + /// Tries to decode `Self` from its hex-representation. The hex format should be as produced + /// by `AsymmetricType::to_hex()`. + fn from_hex>(input: A) -> Result { + if input.as_ref().len() < 2 { + return Err(Error::AsymmetricKey( + "failed to decode from hex: too short".to_string(), + )); + } + + let (tag_hex, key_hex) = input.as_ref().split_at(2); + + let tag = checksummed_hex::decode(tag_hex)?; + let key_bytes = checksummed_hex::decode(key_hex)?; + + match tag[0] { + SYSTEM_TAG => { + if key_bytes.is_empty() { + Ok(Self::system()) + } else { + Err(Error::AsymmetricKey( + "failed to decode from hex: invalid system variant".to_string(), + )) + } + } + ED25519_TAG => Self::ed25519_from_bytes(&key_bytes), + SECP256K1_TAG => Self::secp256k1_from_bytes(&key_bytes), + _ => Err(Error::AsymmetricKey(format!( + "failed to decode from hex: invalid tag. Expected {}, {} or {}, got {}", + SYSTEM_TAG, ED25519_TAG, SECP256K1_TAG, tag[0] + ))), + } + } + + /// Constructs a new system variant. + fn system() -> Self; + + /// Constructs a new ed25519 variant from a byte slice. + fn ed25519_from_bytes>(bytes: T) -> Result; + + /// Constructs a new secp256k1 variant from a byte slice. + fn secp256k1_from_bytes>(bytes: T) -> Result; +} + +/// A secret or private asymmetric key. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum SecretKey { + /// System secret key. + System, + /// Ed25519 secret key. + #[cfg_attr(feature = "datasize", data_size(skip))] + // Manually verified to have no data on the heap. + Ed25519(Ed25519SecretKey), + /// secp256k1 secret key. + #[cfg_attr(feature = "datasize", data_size(skip))] + Secp256k1(Secp256k1SecretKey), +} + +impl SecretKey { + /// The length in bytes of a system secret key. + pub const SYSTEM_LENGTH: usize = 0; + + /// The length in bytes of an Ed25519 secret key. + pub const ED25519_LENGTH: usize = ED25519_SECRET_KEY_LENGTH; + + /// The length in bytes of a secp256k1 secret key. + pub const SECP256K1_LENGTH: usize = SECP256K1_SECRET_KEY_LENGTH; + + /// Constructs a new system variant. + pub fn system() -> Self { + SecretKey::System + } + + /// Constructs a new ed25519 variant from a byte slice. + pub fn ed25519_from_bytes>(bytes: T) -> Result { + Ok(SecretKey::Ed25519(Ed25519SecretKey::try_from( + bytes.as_ref(), + )?)) + } + + /// Constructs a new secp256k1 variant from a byte slice. + pub fn secp256k1_from_bytes>(bytes: T) -> Result { + Ok(SecretKey::Secp256k1( + Secp256k1SecretKey::from_slice(bytes.as_ref()).map_err(|_| Error::SignatureError)?, + )) + } + + fn variant_name(&self) -> &str { + match self { + SecretKey::System => SYSTEM, + SecretKey::Ed25519(_) => ED25519, + SecretKey::Secp256k1(_) => SECP256K1, + } + } +} + +#[cfg(any(feature = "std", test))] +impl SecretKey { + /// Generates a new ed25519 variant using the system's secure random number generator. + pub fn generate_ed25519() -> Result { + let mut bytes = [0u8; Self::ED25519_LENGTH]; + getrandom::getrandom(&mut bytes[..])?; + SecretKey::ed25519_from_bytes(bytes).map_err(Into::into) + } + + /// Generates a new secp256k1 variant using the system's secure random number generator. + pub fn generate_secp256k1() -> Result { + let mut bytes = [0u8; Self::SECP256K1_LENGTH]; + getrandom::getrandom(&mut bytes[..])?; + SecretKey::secp256k1_from_bytes(bytes).map_err(Into::into) + } + + /// Attempts to write the key bytes to the configured file path. + pub fn to_file>(&self, file: P) -> Result<(), ErrorExt> { + write_private_file(file, self.to_pem()?).map_err(ErrorExt::SecretKeySave) + } + + /// Attempts to read the key bytes from configured file path. + pub fn from_file>(file: P) -> Result { + let data = read_file(file).map_err(ErrorExt::SecretKeyLoad)?; + Self::from_pem(data) + } + + /// DER encodes a key. + pub fn to_der(&self) -> Result, ErrorExt> { + match self { + SecretKey::System => Err(Error::System(String::from("to_der")).into()), + SecretKey::Ed25519(secret_key) => { + // See https://tools.ietf.org/html/rfc8410#section-10.3 + let mut key_bytes = vec![]; + let mut der = Der::new(&mut key_bytes); + der.octet_string(&secret_key.to_bytes())?; + + let mut encoded = vec![]; + der = Der::new(&mut encoded); + der.sequence(|der| { + der.integer(&[0])?; + der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; + der.octet_string(&key_bytes) + })?; + Ok(encoded) + } + SecretKey::Secp256k1(secret_key) => { + // See https://www.secg.org/sec1-v2.pdf#subsection.C.4 + let mut oid_bytes = vec![]; + let mut der = Der::new(&mut oid_bytes); + der.oid(&SECP256K1_OBJECT_IDENTIFIER)?; + + let mut encoded = vec![]; + der = Der::new(&mut encoded); + der.sequence(|der| { + der.integer(&[1])?; + der.octet_string(secret_key.to_bytes().as_slice())?; + der.element(Tag::ContextSpecificConstructed0, &oid_bytes) + })?; + Ok(encoded) + } + } + } + + /// Decodes a key from a DER-encoded slice. + pub fn from_der>(input: T) -> Result { + let input = Input::from(input.as_ref()); + + let (key_type_tag, raw_bytes) = input.read_all(derp::Error::Read, |input| { + derp::nested(input, Tag::Sequence, |input| { + // Safe to ignore the first value which should be an integer. + let version_slice = + derp::expect_tag_and_get_value(input, Tag::Integer)?.as_slice_less_safe(); + if version_slice.len() != 1 { + return Err(derp::Error::NonZeroUnusedBits); + } + let version = version_slice[0]; + + // Read the next value. + let (tag, value) = derp::read_tag_and_get_value(input)?; + if tag == Tag::Sequence as u8 { + // Expecting an Ed25519 key. + if version != 0 { + return Err(derp::Error::WrongValue); + } + + // The sequence should have one element: an object identifier defining Ed25519. + let object_identifier = value.read_all(derp::Error::Read, |input| { + derp::expect_tag_and_get_value(input, Tag::Oid) + })?; + if object_identifier.as_slice_less_safe() != ED25519_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + // The third and final value should be the raw bytes of the secret key as an + // octet string in an octet string. + let raw_bytes = derp::nested(input, Tag::OctetString, |input| { + derp::expect_tag_and_get_value(input, Tag::OctetString) + })? + .as_slice_less_safe(); + + return Ok((ED25519_TAG, raw_bytes)); + } else if tag == Tag::OctetString as u8 { + // Expecting a secp256k1 key. + if version != 1 { + return Err(derp::Error::WrongValue); + } + + // The octet string is the secret key. + let raw_bytes = value.as_slice_less_safe(); + + // The object identifier is next. + let parameter0 = + derp::expect_tag_and_get_value(input, Tag::ContextSpecificConstructed0)?; + let object_identifier = parameter0.read_all(derp::Error::Read, |input| { + derp::expect_tag_and_get_value(input, Tag::Oid) + })?; + if object_identifier.as_slice_less_safe() != SECP256K1_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + // There might be an optional public key as the final value, but we're not + // interested in parsing that. Read it to ensure `input.read_all` doesn't fail + // with unused bytes error. + let _ = derp::read_tag_and_get_value(input); + + return Ok((SECP256K1_TAG, raw_bytes)); + } + + Err(derp::Error::WrongValue) + }) + })?; + + match key_type_tag { + SYSTEM_TAG => Err(Error::AsymmetricKey("cannot construct variant".to_string()).into()), + ED25519_TAG => SecretKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), + SECP256K1_TAG => SecretKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), + _ => Err(Error::AsymmetricKey("unknown type tag".to_string()).into()), + } + } + + /// PEM encodes a key. + pub fn to_pem(&self) -> Result { + let tag = match self { + SecretKey::System => return Err(Error::System(String::from("to_pem")).into()), + SecretKey::Ed25519(_) => ED25519_PEM_SECRET_KEY_TAG.to_string(), + SecretKey::Secp256k1(_) => SECP256K1_PEM_SECRET_KEY_TAG.to_string(), + }; + let contents = self.to_der()?; + let pem = Pem { tag, contents }; + Ok(pem::encode(&pem)) + } + + /// Decodes a key from a PEM-encoded slice. + pub fn from_pem>(input: T) -> Result { + let pem = pem::parse(input)?; + + let secret_key = Self::from_der(&pem.contents)?; + + let bad_tag = |expected_tag: &str| { + ErrorExt::FromPem(format!( + "invalid tag: expected {}, got {}", + expected_tag, pem.tag + )) + }; + + match secret_key { + SecretKey::System => return Err(Error::System(String::from("from_pem")).into()), + SecretKey::Ed25519(_) => { + if pem.tag != ED25519_PEM_SECRET_KEY_TAG { + return Err(bad_tag(ED25519_PEM_SECRET_KEY_TAG)); + } + } + SecretKey::Secp256k1(_) => { + if pem.tag != SECP256K1_PEM_SECRET_KEY_TAG { + return Err(bad_tag(SECP256K1_PEM_SECRET_KEY_TAG)); + } + } + } + + Ok(secret_key) + } + + /// Generates a random instance using a `TestRng`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + Self::random_ed25519(rng) + } else { + Self::random_secp256k1(rng) + } + } + + /// Generates a random ed25519 instance using a `TestRng`. + #[cfg(any(feature = "testing", test))] + pub fn random_ed25519(rng: &mut TestRng) -> Self { + let mut bytes = [0u8; Self::ED25519_LENGTH]; + rng.fill_bytes(&mut bytes[..]); + SecretKey::ed25519_from_bytes(bytes).unwrap() + } + + /// Generates a random secp256k1 instance using a `TestRng`. + #[cfg(any(feature = "testing", test))] + pub fn random_secp256k1(rng: &mut TestRng) -> Self { + let mut bytes = [0u8; Self::SECP256K1_LENGTH]; + rng.fill_bytes(&mut bytes[..]); + SecretKey::secp256k1_from_bytes(bytes).unwrap() + } + + /// Returns an example value for documentation purposes. + pub fn doc_example() -> &'static Self { + &ED25519_SECRET_KEY + } +} + +impl Debug for SecretKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "SecretKey::{}", self.variant_name()) + } +} + +impl Display for SecretKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + ::fmt(self, formatter) + } +} + +impl Tagged for SecretKey { + fn tag(&self) -> u8 { + match self { + SecretKey::System => SYSTEM_TAG, + SecretKey::Ed25519(_) => ED25519_TAG, + SecretKey::Secp256k1(_) => SECP256K1_TAG, + } + } +} + +/// A public asymmetric key. +#[derive(Clone, Eq, PartialEq)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum PublicKey { + /// System public key. + System, + /// Ed25519 public key. + #[cfg_attr(feature = "datasize", data_size(skip))] + Ed25519(Ed25519PublicKey), + /// secp256k1 public key. + #[cfg_attr(feature = "datasize", data_size(skip))] + Secp256k1(Secp256k1PublicKey), +} + +impl PublicKey { + /// The length in bytes of a system public key. + pub const SYSTEM_LENGTH: usize = 0; + + /// The length in bytes of an Ed25519 public key. + pub const ED25519_LENGTH: usize = ED25519_PUBLIC_KEY_LENGTH; + + /// The length in bytes of a secp256k1 public key. + pub const SECP256K1_LENGTH: usize = SECP256K1_COMPRESSED_PUBLIC_KEY_LENGTH; + + /// Creates an `AccountHash` from a given `PublicKey` instance. + pub fn to_account_hash(&self) -> AccountHash { + AccountHash::from(self) + } + + /// Returns `true` if this public key is of the `System` variant. + pub fn is_system(&self) -> bool { + matches!(self, PublicKey::System) + } + + fn variant_name(&self) -> &str { + match self { + PublicKey::System => SYSTEM, + PublicKey::Ed25519(_) => ED25519, + PublicKey::Secp256k1(_) => SECP256K1, + } + } +} + +#[cfg(any(feature = "std", test))] +impl PublicKey { + /// Generates a new ed25519 variant using the system's secure random number generator. + pub fn generate_ed25519() -> Result { + let mut bytes = [0u8; Self::ED25519_LENGTH]; + getrandom::getrandom(&mut bytes[..]).expect("RNG failure!"); + PublicKey::ed25519_from_bytes(bytes).map_err(Into::into) + } + + /// Generates a new secp256k1 variant using the system's secure random number generator. + pub fn generate_secp256k1() -> Result { + let mut bytes = [0u8; Self::SECP256K1_LENGTH]; + getrandom::getrandom(&mut bytes[..]).expect("RNG failure!"); + PublicKey::secp256k1_from_bytes(bytes).map_err(Into::into) + } + + /// Attempts to write the key bytes to the configured file path. + pub fn to_file>(&self, file: P) -> Result<(), ErrorExt> { + write_file(file, self.to_pem()?).map_err(ErrorExt::PublicKeySave) + } + + /// Attempts to read the key bytes from configured file path. + pub fn from_file>(file: P) -> Result { + let data = read_file(file).map_err(ErrorExt::PublicKeyLoad)?; + Self::from_pem(data) + } + + /// DER encodes a key. + pub fn to_der(&self) -> Result, ErrorExt> { + match self { + PublicKey::System => Err(Error::System(String::from("to_der")).into()), + PublicKey::Ed25519(public_key) => { + // See https://tools.ietf.org/html/rfc8410#section-10.1 + let mut encoded = vec![]; + let mut der = Der::new(&mut encoded); + der.sequence(|der| { + der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; + der.bit_string(0, public_key.as_ref()) + })?; + Ok(encoded) + } + PublicKey::Secp256k1(public_key) => { + // See https://www.secg.org/sec1-v2.pdf#subsection.C.3 + let mut encoded = vec![]; + let mut der = Der::new(&mut encoded); + der.sequence(|der| { + der.sequence(|der| { + der.oid(&EC_PUBLIC_KEY_OBJECT_IDENTIFIER)?; + der.oid(&SECP256K1_OBJECT_IDENTIFIER) + })?; + der.bit_string(0, public_key.to_encoded_point(true).as_ref()) + })?; + Ok(encoded) + } + } + } + + /// Decodes a key from a DER-encoded slice. + pub fn from_der>(input: T) -> Result { + let input = Input::from(input.as_ref()); + + let mut key_type_tag = ED25519_TAG; + let raw_bytes = input.read_all(derp::Error::Read, |input| { + derp::nested(input, Tag::Sequence, |input| { + derp::nested(input, Tag::Sequence, |input| { + // Read the first value. + let object_identifier = + derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); + if object_identifier == ED25519_OBJECT_IDENTIFIER { + key_type_tag = ED25519_TAG; + Ok(()) + } else if object_identifier == EC_PUBLIC_KEY_OBJECT_IDENTIFIER { + // Assert the next object identifier is the secp256k1 ID. + let next_object_identifier = + derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); + if next_object_identifier != SECP256K1_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + key_type_tag = SECP256K1_TAG; + Ok(()) + } else { + Err(derp::Error::WrongValue) + } + })?; + Ok(derp::bit_string_with_no_unused_bits(input)?.as_slice_less_safe()) + }) + })?; + + match key_type_tag { + ED25519_TAG => PublicKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), + SECP256K1_TAG => PublicKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), + _ => unreachable!(), + } + } + + /// PEM encodes a key. + pub fn to_pem(&self) -> Result { + let tag = match self { + PublicKey::System => return Err(Error::System(String::from("to_pem")).into()), + PublicKey::Ed25519(_) => ED25519_PEM_PUBLIC_KEY_TAG.to_string(), + PublicKey::Secp256k1(_) => SECP256K1_PEM_PUBLIC_KEY_TAG.to_string(), + }; + let contents = self.to_der()?; + let pem = Pem { tag, contents }; + Ok(pem::encode(&pem)) + } + + /// Decodes a key from a PEM-encoded slice. + pub fn from_pem>(input: T) -> Result { + let pem = pem::parse(input)?; + let public_key = Self::from_der(&pem.contents)?; + let bad_tag = |expected_tag: &str| { + ErrorExt::FromPem(format!( + "invalid tag: expected {}, got {}", + expected_tag, pem.tag + )) + }; + match public_key { + PublicKey::System => return Err(Error::System(String::from("from_pem")).into()), + PublicKey::Ed25519(_) => { + if pem.tag != ED25519_PEM_PUBLIC_KEY_TAG { + return Err(bad_tag(ED25519_PEM_PUBLIC_KEY_TAG)); + } + } + PublicKey::Secp256k1(_) => { + if pem.tag != SECP256K1_PEM_PUBLIC_KEY_TAG { + return Err(bad_tag(SECP256K1_PEM_PUBLIC_KEY_TAG)); + } + } + } + Ok(public_key) + } + + /// Generates a random instance using a `TestRng`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + PublicKey::from(&secret_key) + } + + /// Generates a random ed25519 instance using a `TestRng`. + #[cfg(any(feature = "testing", test))] + pub fn random_ed25519(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random_ed25519(rng); + PublicKey::from(&secret_key) + } + + /// Generates a random secp256k1 instance using a `TestRng`. + #[cfg(any(feature = "testing", test))] + pub fn random_secp256k1(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random_secp256k1(rng); + PublicKey::from(&secret_key) + } + + /// Returns an example value for documentation purposes. + pub fn doc_example() -> &'static Self { + &ED25519_PUBLIC_KEY + } +} + +impl AsymmetricType<'_> for PublicKey { + fn system() -> Self { + PublicKey::System + } + + fn ed25519_from_bytes>(bytes: T) -> Result { + Ok(PublicKey::Ed25519(Ed25519PublicKey::try_from( + bytes.as_ref(), + )?)) + } + + fn secp256k1_from_bytes>(bytes: T) -> Result { + Ok(PublicKey::Secp256k1( + Secp256k1PublicKey::from_sec1_bytes(bytes.as_ref()) + .map_err(|_| Error::SignatureError)?, + )) + } +} + +impl From<&SecretKey> for PublicKey { + fn from(secret_key: &SecretKey) -> PublicKey { + match secret_key { + SecretKey::System => PublicKey::System, + SecretKey::Ed25519(secret_key) => PublicKey::Ed25519(secret_key.into()), + SecretKey::Secp256k1(secret_key) => PublicKey::Secp256k1(secret_key.into()), + } + } +} + +impl From<&PublicKey> for Vec { + fn from(public_key: &PublicKey) -> Self { + match public_key { + PublicKey::System => Vec::new(), + PublicKey::Ed25519(key) => key.to_bytes().into(), + PublicKey::Secp256k1(key) => key.to_encoded_point(true).as_ref().into(), + } + } +} + +impl From for Vec { + fn from(public_key: PublicKey) -> Self { + Vec::::from(&public_key) + } +} + +impl Debug for PublicKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "PublicKey::{}({})", + self.variant_name(), + base16::encode_lower(&Into::>::into(self)) + ) + } +} + +impl Display for PublicKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "PubKey::{}({:10})", + self.variant_name(), + HexFmt(Into::>::into(self)) + ) + } +} + +impl PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for PublicKey { + fn cmp(&self, other: &Self) -> Ordering { + let self_tag = self.tag(); + let other_tag = other.tag(); + if self_tag == other_tag { + Into::>::into(self).cmp(&Into::>::into(other)) + } else { + self_tag.cmp(&other_tag) + } + } +} + +// This implementation of `Hash` agrees with the derived `PartialEq`. It's required since +// `ed25519_dalek::PublicKey` doesn't implement `Hash`. +#[allow(clippy::derived_hash_with_manual_eq)] +impl Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.tag().hash(state); + Into::>::into(self).hash(state); + } +} + +impl Tagged for PublicKey { + fn tag(&self) -> u8 { + match self { + PublicKey::System => SYSTEM_TAG, + PublicKey::Ed25519(_) => ED25519_TAG, + PublicKey::Secp256k1(_) => SECP256K1_TAG, + } + } +} + +impl ToBytes for PublicKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + PublicKey::System => Self::SYSTEM_LENGTH, + PublicKey::Ed25519(_) => Self::ED25519_LENGTH, + PublicKey::Secp256k1(_) => Self::SECP256K1_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PublicKey::System => writer.push(SYSTEM_TAG), + PublicKey::Ed25519(public_key) => { + writer.push(ED25519_TAG); + writer.extend_from_slice(public_key.as_bytes()); + } + PublicKey::Secp256k1(public_key) => { + writer.push(SECP256K1_TAG); + writer.extend_from_slice(public_key.to_encoded_point(true).as_ref()); + } + } + Ok(()) + } +} + +impl FromBytes for PublicKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + SYSTEM_TAG => Ok((PublicKey::System, remainder)), + ED25519_TAG => { + let (raw_bytes, remainder): ([u8; Self::ED25519_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = Self::ed25519_from_bytes(raw_bytes) + .map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + SECP256K1_TAG => { + let (raw_bytes, remainder): ([u8; Self::SECP256K1_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = Self::secp256k1_from_bytes(raw_bytes) + .map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Serialize for PublicKey { + fn serialize(&self, serializer: S) -> Result { + detail::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for PublicKey { + fn deserialize>(deserializer: D) -> Result { + detail::deserialize(deserializer) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for PublicKey { + fn schema_name() -> String { + String::from("PublicKey") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some( + "Hex-encoded cryptographic public key, including the algorithm tag prefix.".to_string(), + ); + schema_object.metadata().examples = vec![ + json!({ + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an \ + immediate switch block after a network upgrade rather than a specific validator. \ + Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }), + json!({ + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is \ + followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }), + json!({ + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is \ + followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + }), + ]; + schema_object.into() + } +} + +impl CLTyped for PublicKey { + fn cl_type() -> CLType { + CLType::PublicKey + } +} + +/// A signature of given data. +#[derive(Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum Signature { + /// System signature. Cannot be verified. + System, + /// Ed25519 signature. + #[cfg_attr(feature = "datasize", data_size(skip))] + Ed25519(Ed25519Signature), + /// Secp256k1 signature. + #[cfg_attr(feature = "datasize", data_size(skip))] + Secp256k1(Secp256k1Signature), +} + +impl Signature { + /// The length in bytes of a system signature, + pub const SYSTEM_LENGTH: usize = 0; + + /// The length in bytes of an Ed25519 signature, + pub const ED25519_LENGTH: usize = ED25519_SIGNATURE_LENGTH; + + /// The length in bytes of a secp256k1 signature + pub const SECP256K1_LENGTH: usize = SECP256K1_SIGNATURE_LENGTH; + + /// Constructs a new Ed25519 variant from a byte array. + pub fn ed25519(bytes: [u8; Self::ED25519_LENGTH]) -> Result { + let signature = Ed25519Signature::from_bytes(&bytes); + Ok(Signature::Ed25519(signature)) + } + + /// Constructs a new secp256k1 variant from a byte array. + pub fn secp256k1(bytes: [u8; Self::SECP256K1_LENGTH]) -> Result { + let signature = Secp256k1Signature::try_from(&bytes[..]).map_err(|_| { + Error::AsymmetricKey(format!( + "failed to construct secp256k1 signature from {:?}", + &bytes[..] + )) + })?; + + Ok(Signature::Secp256k1(signature)) + } + + fn variant_name(&self) -> &str { + match self { + Signature::System => SYSTEM, + Signature::Ed25519(_) => ED25519, + Signature::Secp256k1(_) => SECP256K1, + } + } +} + +impl AsymmetricType<'_> for Signature { + fn system() -> Self { + Signature::System + } + + fn ed25519_from_bytes>(bytes: T) -> Result { + let signature = Ed25519Signature::try_from(bytes.as_ref()).map_err(|_| { + Error::AsymmetricKey(format!( + "failed to construct Ed25519 signature from {:?}", + bytes.as_ref() + )) + })?; + Ok(Signature::Ed25519(signature)) + } + + fn secp256k1_from_bytes>(bytes: T) -> Result { + let signature = Secp256k1Signature::try_from(bytes.as_ref()).map_err(|_| { + Error::AsymmetricKey(format!( + "failed to construct secp256k1 signature from {:?}", + bytes.as_ref() + )) + })?; + Ok(Signature::Secp256k1(signature)) + } +} + +impl Debug for Signature { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "Signature::{}({})", + self.variant_name(), + base16::encode_lower(&Into::>::into(*self)) + ) + } +} + +impl Display for Signature { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "Sig::{}({:10})", + self.variant_name(), + HexFmt(Into::>::into(*self)) + ) + } +} + +impl PartialOrd for Signature { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Signature { + fn cmp(&self, other: &Self) -> Ordering { + let self_tag = self.tag(); + let other_tag = other.tag(); + if self_tag == other_tag { + Into::>::into(*self).cmp(&Into::>::into(*other)) + } else { + self_tag.cmp(&other_tag) + } + } +} + +impl PartialEq for Signature { + fn eq(&self, other: &Self) -> bool { + self.tag() == other.tag() && Into::>::into(*self) == Into::>::into(*other) + } +} + +impl Eq for Signature {} + +impl Hash for Signature { + fn hash(&self, state: &mut H) { + self.tag().hash(state); + Into::>::into(*self).hash(state); + } +} + +impl Tagged for Signature { + fn tag(&self) -> u8 { + match self { + Signature::System => SYSTEM_TAG, + Signature::Ed25519(_) => ED25519_TAG, + Signature::Secp256k1(_) => SECP256K1_TAG, + } + } +} + +impl ToBytes for Signature { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + Signature::System => Self::SYSTEM_LENGTH, + Signature::Ed25519(_) => Self::ED25519_LENGTH, + Signature::Secp256k1(_) => Self::SECP256K1_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Signature::System => { + writer.push(SYSTEM_TAG); + } + Signature::Ed25519(signature) => { + writer.push(ED25519_TAG); + writer.extend(signature.to_bytes()); + } + Signature::Secp256k1(signature) => { + writer.push(SECP256K1_TAG); + writer.extend_from_slice(&signature.to_bytes()); + } + } + Ok(()) + } +} + +impl FromBytes for Signature { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + SYSTEM_TAG => Ok((Signature::System, remainder)), + ED25519_TAG => { + let (raw_bytes, remainder): ([u8; Self::ED25519_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = + Self::ed25519(raw_bytes).map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + SECP256K1_TAG => { + let (raw_bytes, remainder): ([u8; Self::SECP256K1_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = + Self::secp256k1(raw_bytes).map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Serialize for Signature { + fn serialize(&self, serializer: S) -> Result { + detail::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for Signature { + fn deserialize>(deserializer: D) -> Result { + detail::deserialize(deserializer) + } +} + +impl From<&Signature> for Vec { + fn from(signature: &Signature) -> Self { + match signature { + Signature::System => Vec::new(), + Signature::Ed25519(signature) => signature.to_bytes().into(), + Signature::Secp256k1(signature) => (*signature.to_bytes()).into(), + } + } +} + +impl From for Vec { + fn from(signature: Signature) -> Self { + Vec::::from(&signature) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for Signature { + fn schema_name() -> String { + String::from("Signature") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some( + "Hex-encoded cryptographic signature, including the algorithm tag prefix.".to_string(), + ); + schema_object.into() + } +} + +/// Signs the given message using the given key pair. +pub fn sign>( + message: T, + secret_key: &SecretKey, + public_key: &PublicKey, +) -> Signature { + match (secret_key, public_key) { + (SecretKey::System, PublicKey::System) => { + panic!("cannot create signature with system keys",) + } + (SecretKey::Ed25519(secret_key), PublicKey::Ed25519(_public_key)) => { + let signature = secret_key.sign(message.as_ref()); + Signature::Ed25519(signature) + } + (SecretKey::Secp256k1(secret_key), PublicKey::Secp256k1(_public_key)) => { + let signer = secret_key; + let signature: Secp256k1Signature = signer + .try_sign(message.as_ref()) + .expect("should create signature"); + Signature::Secp256k1(signature) + } + _ => panic!("secret and public key types must match"), + } +} + +/// Verifies the signature of the given message against the given public key. +pub fn verify>( + message: T, + signature: &Signature, + public_key: &PublicKey, +) -> Result<(), Error> { + match (signature, public_key) { + (Signature::System, _) => Err(Error::AsymmetricKey(String::from( + "signatures based on the system key cannot be verified", + ))), + (Signature::Ed25519(signature), PublicKey::Ed25519(public_key)) => public_key + .verify_strict(message.as_ref(), signature) + .map_err(|_| Error::AsymmetricKey(String::from("failed to verify Ed25519 signature"))), + (Signature::Secp256k1(signature), PublicKey::Secp256k1(public_key)) => { + let verifier: &Secp256k1PublicKey = public_key; + verifier + .verify(message.as_ref(), signature) + .map_err(|error| { + Error::AsymmetricKey(format!("failed to verify secp256k1 signature: {}", error)) + }) + } + _ => Err(Error::AsymmetricKey(format!( + "type mismatch between {} and {}", + signature, public_key + ))), + } +} + +/// Generates an Ed25519 keypair using the operating system's cryptographically secure random number +/// generator. +#[cfg(any(feature = "std", test))] +pub fn generate_ed25519_keypair() -> (SecretKey, PublicKey) { + let secret_key = SecretKey::generate_ed25519().unwrap(); + let public_key = PublicKey::from(&secret_key); + (secret_key, public_key) +} + +mod detail { + use alloc::{string::String, vec::Vec}; + + use serde::{de::Error as _deError, Deserialize, Deserializer, Serialize, Serializer}; + + use super::{PublicKey, Signature}; + use crate::AsymmetricType; + + /// Used to serialize and deserialize asymmetric key types where the (de)serializer is not a + /// human-readable type. + /// + /// The wrapped contents are the result of calling `t_as_ref()` on the type. + #[derive(Serialize, Deserialize)] + pub(super) enum AsymmetricTypeAsBytes { + System, + Ed25519(Vec), + Secp256k1(Vec), + } + + impl From<&PublicKey> for AsymmetricTypeAsBytes { + fn from(public_key: &PublicKey) -> Self { + match public_key { + PublicKey::System => AsymmetricTypeAsBytes::System, + key @ PublicKey::Ed25519(_) => AsymmetricTypeAsBytes::Ed25519(key.into()), + key @ PublicKey::Secp256k1(_) => AsymmetricTypeAsBytes::Secp256k1(key.into()), + } + } + } + + impl From<&Signature> for AsymmetricTypeAsBytes { + fn from(signature: &Signature) -> Self { + match signature { + Signature::System => AsymmetricTypeAsBytes::System, + key @ Signature::Ed25519(_) => AsymmetricTypeAsBytes::Ed25519(key.into()), + key @ Signature::Secp256k1(_) => AsymmetricTypeAsBytes::Secp256k1(key.into()), + } + } + } + + pub(super) fn serialize<'a, T, S>(value: &'a T, serializer: S) -> Result + where + T: AsymmetricType<'a>, + Vec: From<&'a T>, + S: Serializer, + AsymmetricTypeAsBytes: From<&'a T>, + { + if serializer.is_human_readable() { + return value.to_hex().serialize(serializer); + } + + AsymmetricTypeAsBytes::from(value).serialize(serializer) + } + + pub(super) fn deserialize<'a, 'de, T, D>(deserializer: D) -> Result + where + T: AsymmetricType<'a>, + Vec: From<&'a T>, + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let value = T::from_hex(hex_string.as_bytes()).map_err(D::Error::custom)?; + return Ok(value); + } + + let as_bytes = AsymmetricTypeAsBytes::deserialize(deserializer)?; + match as_bytes { + AsymmetricTypeAsBytes::System => Ok(T::system()), + AsymmetricTypeAsBytes::Ed25519(raw_bytes) => { + T::ed25519_from_bytes(raw_bytes).map_err(D::Error::custom) + } + AsymmetricTypeAsBytes::Secp256k1(raw_bytes) => { + T::secp256k1_from_bytes(raw_bytes).map_err(D::Error::custom) + } + } + } +} diff --git a/casper_types/src/crypto/asymmetric_key/gens.rs b/casper_types/src/crypto/asymmetric_key/gens.rs new file mode 100644 index 00000000..2316133a --- /dev/null +++ b/casper_types/src/crypto/asymmetric_key/gens.rs @@ -0,0 +1,44 @@ +//! Generators for asymmetric key types + +use core::convert::TryInto; + +use proptest::{ + collection, + prelude::{Arbitrary, Just, Strategy}, + prop_oneof, +}; + +use crate::{crypto::SecretKey, PublicKey}; + +/// Creates an arbitrary [`PublicKey`] +pub fn public_key_arb() -> impl Strategy { + prop_oneof![ + Just(PublicKey::System), + collection::vec(::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| { + let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap(); + PublicKey::from(&secret_key) + }), + collection::vec(::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| { + let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap(); + PublicKey::from(&secret_key) + }) + ] +} + +/// Returns a strategy for creating random [`PublicKey`] instances but NOT system variant. +pub fn public_key_arb_no_system() -> impl Strategy { + prop_oneof![ + collection::vec(::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| { + let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap(); + PublicKey::from(&secret_key) + }), + collection::vec(::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| { + let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap(); + PublicKey::from(&secret_key) + }) + ] +} diff --git a/casper_types/src/crypto/asymmetric_key/tests.rs b/casper_types/src/crypto/asymmetric_key/tests.rs new file mode 100644 index 00000000..be7132da --- /dev/null +++ b/casper_types/src/crypto/asymmetric_key/tests.rs @@ -0,0 +1,862 @@ +use std::{ + cmp::Ordering, + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + iter, +}; + +use rand::RngCore; + +use k256::elliptic_curve::sec1::ToEncodedPoint; +use openssl::pkey::{PKey, Private, Public}; + +use super::*; +use crate::{ + bytesrepr, checksummed_hex, crypto::SecretKey, testing::TestRng, AsymmetricType, PublicKey, + Tagged, +}; + +#[test] +fn can_construct_ed25519_keypair_from_zeroes() { + let bytes = [0; SecretKey::ED25519_LENGTH]; + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +#[test] +#[should_panic] +fn cannot_construct_secp256k1_keypair_from_zeroes() { + let bytes = [0; SecretKey::SECP256K1_LENGTH]; + let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +#[test] +fn can_construct_ed25519_keypair_from_ones() { + let bytes = [1; SecretKey::ED25519_LENGTH]; + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +#[test] +fn can_construct_secp256k1_keypair_from_ones() { + let bytes = [1; SecretKey::SECP256K1_LENGTH]; + let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +type OpenSSLSecretKey = PKey; +type OpenSSLPublicKey = PKey; + +// `SecretKey` does not implement `PartialEq`, so just compare derived `PublicKey`s. +fn assert_secret_keys_equal(lhs: &SecretKey, rhs: &SecretKey) { + assert_eq!(PublicKey::from(lhs), PublicKey::from(rhs)); +} + +fn secret_key_der_roundtrip(secret_key: SecretKey) { + let der_encoded = secret_key.to_der().unwrap(); + let decoded = SecretKey::from_der(&der_encoded).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + SecretKey::from_der(&der_encoded[1..]).unwrap_err(); +} + +fn secret_key_pem_roundtrip(secret_key: SecretKey) { + let pem_encoded = secret_key.to_pem().unwrap(); + let decoded = SecretKey::from_pem(pem_encoded.as_bytes()).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); + + // Check PEM-encoded can be decoded by openssl. + let _ = OpenSSLSecretKey::private_key_from_pem(pem_encoded.as_bytes()).unwrap(); + + // Ensure malformed encoded version fails to decode. + SecretKey::from_pem(&pem_encoded[1..]).unwrap_err(); +} + +fn known_secret_key_to_pem(expected_key: &SecretKey, known_key_pem: &str, expected_tag: u8) { + let decoded = SecretKey::from_pem(known_key_pem.as_bytes()).unwrap(); + assert_secret_keys_equal(expected_key, &decoded); + assert_eq!(expected_tag, decoded.tag()); +} + +fn secret_key_file_roundtrip(secret_key: SecretKey) { + let tempdir = tempfile::tempdir().unwrap(); + let path = tempdir.path().join("test_secret_key.pem"); + + secret_key.to_file(&path).unwrap(); + let decoded = SecretKey::from_file(&path).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); +} + +fn public_key_serialization_roundtrip(public_key: PublicKey) { + // Try to/from bincode. + let serialized = bincode::serialize(&public_key).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(public_key, deserialized); + assert_eq!(public_key.tag(), deserialized.tag()); + + // Try to/from JSON. + let serialized = serde_json::to_vec_pretty(&public_key).unwrap(); + let deserialized = serde_json::from_slice(&serialized).unwrap(); + assert_eq!(public_key, deserialized); + assert_eq!(public_key.tag(), deserialized.tag()); + + // Using bytesrepr. + bytesrepr::test_serialization_roundtrip(&public_key); +} + +fn public_key_der_roundtrip(public_key: PublicKey) { + let der_encoded = public_key.to_der().unwrap(); + let decoded = PublicKey::from_der(&der_encoded).unwrap(); + assert_eq!(public_key, decoded); + + // Check DER-encoded can be decoded by openssl. + let _ = OpenSSLPublicKey::public_key_from_der(&der_encoded).unwrap(); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_der(&der_encoded[1..]).unwrap_err(); +} + +fn public_key_pem_roundtrip(public_key: PublicKey) { + let pem_encoded = public_key.to_pem().unwrap(); + let decoded = PublicKey::from_pem(pem_encoded.as_bytes()).unwrap(); + assert_eq!(public_key, decoded); + assert_eq!(public_key.tag(), decoded.tag()); + + // Check PEM-encoded can be decoded by openssl. + let _ = OpenSSLPublicKey::public_key_from_pem(pem_encoded.as_bytes()).unwrap(); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_pem(&pem_encoded[1..]).unwrap_err(); +} + +fn known_public_key_to_pem(known_key_hex: &str, known_key_pem: &str) { + let key_bytes = checksummed_hex::decode(known_key_hex).unwrap(); + let decoded = PublicKey::from_pem(known_key_pem.as_bytes()).unwrap(); + assert_eq!(key_bytes, Into::>::into(decoded)); +} + +fn public_key_file_roundtrip(public_key: PublicKey) { + let tempdir = tempfile::tempdir().unwrap(); + let path = tempdir.path().join("test_public_key.pem"); + + public_key.to_file(&path).unwrap(); + let decoded = PublicKey::from_file(&path).unwrap(); + assert_eq!(public_key, decoded); +} + +fn public_key_hex_roundtrip(public_key: PublicKey) { + let hex_encoded = public_key.to_hex(); + let decoded = PublicKey::from_hex(&hex_encoded).unwrap(); + assert_eq!(public_key, decoded); + assert_eq!(public_key.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_hex(&hex_encoded[..1]).unwrap_err(); + PublicKey::from_hex(&hex_encoded[1..]).unwrap_err(); +} + +fn signature_serialization_roundtrip(signature: Signature) { + // Try to/from bincode. + let serialized = bincode::serialize(&signature).unwrap(); + let deserialized: Signature = bincode::deserialize(&serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()); + + // Try to/from JSON. + let serialized = serde_json::to_vec_pretty(&signature).unwrap(); + let deserialized = serde_json::from_slice(&serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()); + + // Try to/from using bytesrepr. + let serialized = bytesrepr::serialize(signature).unwrap(); + let deserialized = bytesrepr::deserialize(serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()) +} + +fn signature_hex_roundtrip(signature: Signature) { + let hex_encoded = signature.to_hex(); + let decoded = Signature::from_hex(hex_encoded.as_bytes()).unwrap(); + assert_eq!(signature, decoded); + assert_eq!(signature.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + Signature::from_hex(&hex_encoded[..1]).unwrap_err(); + Signature::from_hex(&hex_encoded[1..]).unwrap_err(); +} + +fn hash(data: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + data.hash(&mut hasher); + hasher.finish() +} + +fn check_ord_and_hash(low: T, high: T) { + #[allow(clippy::redundant_clone)] + let low_copy = low.clone(); + + assert_eq!(hash(&low), hash(&low_copy)); + assert_ne!(hash(&low), hash(&high)); + + assert_eq!(Ordering::Less, low.cmp(&high)); + assert_eq!(Some(Ordering::Less), low.partial_cmp(&high)); + + assert_eq!(Ordering::Greater, high.cmp(&low)); + assert_eq!(Some(Ordering::Greater), high.partial_cmp(&low)); + + assert_eq!(Ordering::Equal, low.cmp(&low_copy)); + assert_eq!(Some(Ordering::Equal), low.partial_cmp(&low_copy)); +} + +mod system { + use std::path::Path; + + use super::{sign, verify}; + use crate::crypto::{AsymmetricType, PublicKey, SecretKey, Signature}; + + #[test] + fn secret_key_to_der_should_error() { + assert!(SecretKey::system().to_der().is_err()); + } + + #[test] + fn secret_key_to_pem_should_error() { + assert!(SecretKey::system().to_pem().is_err()); + } + + #[test] + fn secret_key_to_file_should_error() { + assert!(SecretKey::system().to_file(Path::new("/dev/null")).is_err()); + } + + #[test] + fn public_key_serialization_roundtrip() { + super::public_key_serialization_roundtrip(PublicKey::system()); + } + + #[test] + fn public_key_to_der_should_error() { + assert!(PublicKey::system().to_der().is_err()); + } + + #[test] + fn public_key_to_pem_should_error() { + assert!(PublicKey::system().to_pem().is_err()); + } + + #[test] + fn public_key_to_file_should_error() { + assert!(PublicKey::system().to_file(Path::new("/dev/null")).is_err()); + } + + #[test] + fn public_key_to_and_from_hex() { + super::public_key_hex_roundtrip(PublicKey::system()); + } + + #[test] + #[should_panic] + fn sign_should_panic() { + sign([], &SecretKey::system(), &PublicKey::system()); + } + + #[test] + fn signature_to_and_from_hex() { + super::signature_hex_roundtrip(Signature::system()); + } + + #[test] + fn public_key_to_account_hash() { + assert_ne!( + PublicKey::system().to_account_hash().as_ref(), + Into::>::into(PublicKey::system()) + ); + } + + #[test] + fn verify_should_error() { + assert!(verify([], &Signature::system(), &PublicKey::system()).is_err()); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + crate::bytesrepr::test_serialization_roundtrip(&Signature::system()); + } +} + +mod ed25519 { + use rand::Rng; + + use super::*; + use crate::ED25519_TAG; + + const SECRET_KEY_LENGTH: usize = SecretKey::ED25519_LENGTH; + const PUBLIC_KEY_LENGTH: usize = PublicKey::ED25519_LENGTH; + const SIGNATURE_LENGTH: usize = Signature::ED25519_LENGTH; + + #[test] + fn secret_key_from_bytes() { + // Secret key should be `SecretKey::ED25519_LENGTH` bytes. + let bytes = [0; SECRET_KEY_LENGTH + 1]; + assert!(SecretKey::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(SecretKey::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(SecretKey::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn secret_key_to_and_from_der() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let der_encoded = secret_key.to_der().unwrap(); + secret_key_der_roundtrip(secret_key); + + // Check DER-encoded can be decoded by openssl. + let _ = OpenSSLSecretKey::private_key_from_der(&der_encoded).unwrap(); + } + + #[test] + fn secret_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + secret_key_pem_roundtrip(secret_key); + } + + #[test] + fn known_secret_key_to_pem() { + // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.3 + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC +-----END PRIVATE KEY-----"#; + let key_bytes = + base16::decode("d4ee72dbf913584ad5b6d8f1f769f8ad3afe7c28cbf1d4fbe097a88f44755842") + .unwrap(); + let expected_key = SecretKey::ed25519_from_bytes(key_bytes).unwrap(); + super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, ED25519_TAG); + } + + #[test] + fn secret_key_to_and_from_file() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + secret_key_file_roundtrip(secret_key); + } + + #[test] + fn public_key_serialization_roundtrip() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + super::public_key_serialization_roundtrip(public_key); + } + + #[test] + fn public_key_from_bytes() { + // Public key should be `PublicKey::ED25519_LENGTH` bytes. Create vec with an extra + // byte. + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + let bytes: Vec = iter::once(rng.gen()) + .chain(Into::>::into(public_key)) + .collect::>(); + + assert!(PublicKey::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(PublicKey::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(PublicKey::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn public_key_to_and_from_der() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_der_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_pem_roundtrip(public_key); + } + + #[test] + fn known_public_key_to_pem() { + // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.1 + const KNOWN_KEY_HEX: &str = + "19bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1"; + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- +MCowBQYDK2VwAyEAGb9ECWmEzf6FQbrBZ9w7lshQhqowtrbLDFw4rXAxZuE= +-----END PUBLIC KEY-----"#; + super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); + } + + #[test] + fn public_key_to_and_from_file() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_file_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_hex_roundtrip(public_key); + } + + #[test] + fn signature_serialization_roundtrip() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + super::signature_serialization_roundtrip(signature); + } + + #[test] + fn signature_from_bytes() { + // Signature should be `Signature::ED25519_LENGTH` bytes. + let bytes = [2; SIGNATURE_LENGTH + 1]; + assert!(Signature::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(Signature::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(Signature::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn signature_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + signature_hex_roundtrip(signature); + } + + #[test] + fn public_key_traits() { + let public_key_low = PublicKey::ed25519_from_bytes([1; PUBLIC_KEY_LENGTH]).unwrap(); + let public_key_high = PublicKey::ed25519_from_bytes([3; PUBLIC_KEY_LENGTH]).unwrap(); + check_ord_and_hash(public_key_low, public_key_high) + } + + #[test] + fn public_key_to_account_hash() { + let public_key_high = PublicKey::ed25519_from_bytes([255; PUBLIC_KEY_LENGTH]).unwrap(); + assert_ne!( + public_key_high.to_account_hash().as_ref(), + Into::>::into(public_key_high) + ); + } + + #[test] + fn signature_traits() { + let signature_low = Signature::ed25519([1; SIGNATURE_LENGTH]).unwrap(); + let signature_high = Signature::ed25519([3; SIGNATURE_LENGTH]).unwrap(); + check_ord_and_hash(signature_low, signature_high) + } + + #[test] + fn sign_and_verify() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + + let public_key = PublicKey::from(&secret_key); + let other_public_key = PublicKey::random_ed25519(&mut rng); + let wrong_type_public_key = PublicKey::random_secp256k1(&mut rng); + + let message = b"message"; + let signature = sign(message, &secret_key, &public_key); + + assert!(verify(message, &signature, &public_key).is_ok()); + assert!(verify(message, &signature, &other_public_key).is_err()); + assert!(verify(message, &signature, &wrong_type_public_key).is_err()); + assert!(verify(&message[1..], &signature, &public_key).is_err()); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + let mut rng = TestRng::new(); + let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&ed25519_secret_key); + let data = b"data"; + let signature = sign(data, &ed25519_secret_key, &public_key); + bytesrepr::test_serialization_roundtrip(&signature); + } + + #[test] + fn validate_known_signature() { + // In the event that this test fails, we need to consider pinning the version of the + // `ed25519-dalek` crate to maintain backwards compatibility with existing data on the + // Casper network. + + // Values taken from: + // https://github.com/dalek-cryptography/ed25519-dalek/blob/925eb9ea56192053c9eb93b9d30d1b9419eee128/TESTVECTORS#L62 + let secret_key_hex = "bf5ba5d6a49dd5ef7b4d5d7d3e4ecc505c01f6ccee4c54b5ef7b40af6a454140"; + let public_key_hex = "1be034f813017b900d8990af45fad5b5214b573bd303ef7a75ef4b8c5c5b9842"; + let message_hex = + "16152c2e037b1c0d3219ced8e0674aee6b57834b55106c5344625322da638ecea2fc9a424a05ee9512\ + d48fcf75dd8bd4691b3c10c28ec98ee1afa5b863d1c36795ed18105db3a9aabd9d2b4c1747adbaf1a56\ + ffcc0c533c1c0faef331cdb79d961fa39f880a1b8b1164741822efb15a7259a465bef212855751fab66\ + a897bfa211abe0ea2f2e1cd8a11d80e142cde1263eec267a3138ae1fcf4099db0ab53d64f336f4bcd7a\ + 363f6db112c0a2453051a0006f813aaf4ae948a2090619374fa58052409c28ef76225687df3cb2d1b0b\ + fb43b09f47f1232f790e6d8dea759e57942099f4c4bd3390f28afc2098244961465c643fc8b29766af2\ + bcbc5440b86e83608cfc937be98bb4827fd5e6b689adc2e26513db531076a6564396255a09975b7034d\ + ac06461b255642e3a7ed75fa9fc265011f5f6250382a84ac268d63ba64"; + let signature_hex = + "279cace6fdaf3945e3837df474b28646143747632bede93e7a66f5ca291d2c24978512ca0cb8827c8c\ + 322685bd605503a5ec94dbae61bbdcae1e49650602bc07"; + + let secret_key_bytes = base16::decode(secret_key_hex).unwrap(); + let public_key_bytes = base16::decode(public_key_hex).unwrap(); + let message_bytes = base16::decode(message_hex).unwrap(); + let signature_bytes = base16::decode(signature_hex).unwrap(); + + let secret_key = SecretKey::ed25519_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::ed25519_from_bytes(public_key_bytes).unwrap(); + assert_eq!(public_key, PublicKey::from(&secret_key)); + + let signature = Signature::ed25519_from_bytes(signature_bytes).unwrap(); + assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature); + assert!(verify(&message_bytes, &signature, &public_key).is_ok()); + } +} + +mod secp256k1 { + use rand::Rng; + + use super::*; + use crate::SECP256K1_TAG; + + const SECRET_KEY_LENGTH: usize = SecretKey::SECP256K1_LENGTH; + const SIGNATURE_LENGTH: usize = Signature::SECP256K1_LENGTH; + + #[test] + fn secret_key_from_bytes() { + // Secret key should be `SecretKey::SECP256K1_LENGTH` bytes. + // The k256 library will ensure that a byte stream of a length not equal to + // `SECP256K1_LENGTH` will fail due to an assertion internal to the library. + // We can check that invalid byte streams e.g [0;32] does not generate a valid key. + let bytes = [0; SECRET_KEY_LENGTH]; + assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_err()); + + // Check that a valid byte stream produces a valid key + let bytes = [1; SECRET_KEY_LENGTH]; + assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_ok()); + } + + #[test] + fn secret_key_to_and_from_der() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_der_roundtrip(secret_key); + } + + #[test] + fn secret_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_pem_roundtrip(secret_key); + } + + #[test] + fn known_secret_key_to_pem() { + // Example values taken from Python client. + const KNOWN_KEY_PEM: &str = r#"-----BEGIN EC PRIVATE KEY----- +MHQCAQEEIL3fqaMKAfXSK1D2PnVVbZlZ7jTv133nukq4+95s6kmcoAcGBSuBBAAK +oUQDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtdkv+kBR5u4ISEAkuc2TFWQHX0 +Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== +-----END EC PRIVATE KEY-----"#; + let key_bytes = + base16::decode("bddfa9a30a01f5d22b50f63e75556d9959ee34efd77de7ba4ab8fbde6cea499c") + .unwrap(); + let expected_key = SecretKey::secp256k1_from_bytes(key_bytes).unwrap(); + super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, SECP256K1_TAG); + } + + #[test] + fn secret_key_to_and_from_file() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_file_roundtrip(secret_key); + } + + #[test] + fn public_key_serialization_roundtrip() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + super::public_key_serialization_roundtrip(public_key); + } + + #[test] + fn public_key_from_bytes() { + // Public key should be `PublicKey::SECP256K1_LENGTH` bytes. Create vec with an extra + // byte. + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + let bytes: Vec = iter::once(rng.gen()) + .chain(Into::>::into(public_key)) + .collect::>(); + + assert!(PublicKey::secp256k1_from_bytes(&bytes[..]).is_err()); + assert!(PublicKey::secp256k1_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(PublicKey::secp256k1_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn public_key_to_and_from_der() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_der_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_pem_roundtrip(public_key); + } + + #[test] + fn known_public_key_to_pem() { + // Example values taken from Python client. + const KNOWN_KEY_HEX: &str = + "03408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084"; + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- +MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtd +kv+kBR5u4ISEAkuc2TFWQHX0Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== +-----END PUBLIC KEY-----"#; + super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); + } + + #[test] + fn public_key_to_and_from_file() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_file_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_hex_roundtrip(public_key); + } + + #[test] + fn signature_serialization_roundtrip() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + super::signature_serialization_roundtrip(signature); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + bytesrepr::test_serialization_roundtrip(&signature); + } + + #[test] + fn signature_from_bytes() { + // Signature should be `Signature::SECP256K1_LENGTH` bytes. + let bytes = [2; SIGNATURE_LENGTH + 1]; + assert!(Signature::secp256k1_from_bytes(&bytes[..]).is_err()); + assert!(Signature::secp256k1_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(Signature::secp256k1_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn signature_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + signature_hex_roundtrip(signature); + } + + #[test] + fn public_key_traits() { + let mut rng = TestRng::new(); + let public_key1 = PublicKey::random_secp256k1(&mut rng); + let public_key2 = PublicKey::random_secp256k1(&mut rng); + if Into::>::into(public_key1.clone()) < Into::>::into(public_key2.clone()) { + check_ord_and_hash(public_key1, public_key2) + } else { + check_ord_and_hash(public_key2, public_key1) + } + } + + #[test] + fn public_key_to_account_hash() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + assert_ne!( + public_key.to_account_hash().as_ref(), + Into::>::into(public_key) + ); + } + + #[test] + fn signature_traits() { + let signature_low = Signature::secp256k1([1; SIGNATURE_LENGTH]).unwrap(); + let signature_high = Signature::secp256k1([3; SIGNATURE_LENGTH]).unwrap(); + check_ord_and_hash(signature_low, signature_high) + } + + #[test] + fn validate_known_signature() { + // In the event that this test fails, we need to consider pinning the version of the + // `k256` crate to maintain backwards compatibility with existing data on the Casper + // network. + let secret_key_hex = "833fe62409237b9d62ec77587520911e9a759cec1d19755b7da901b96dca3d42"; + let public_key_hex = "028e24fd9654f12c793d3d376c15f7abe53e0fbd537884a3a98d10d2dc6d513b4e"; + let message_hex = "616263"; + let signature_hex = "8016162860f0795154643d15c5ab5bb840d8c695d6de027421755579ea7f2a4629b7e0c88fc3428669a6a89496f426181b73f10c6c8a05ac8f49d6cb5032eb89"; + + let secret_key_bytes = base16::decode(secret_key_hex).unwrap(); + let public_key_bytes = base16::decode(public_key_hex).unwrap(); + let message_bytes = base16::decode(message_hex).unwrap(); + let signature_bytes = base16::decode(signature_hex).unwrap(); + + let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::secp256k1_from_bytes(public_key_bytes).unwrap(); + assert_eq!(public_key, PublicKey::from(&secret_key)); + + let signature = Signature::secp256k1_from_bytes(signature_bytes).unwrap(); + assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature); + assert!(verify(&message_bytes, &signature, &public_key).is_ok()); + } +} + +#[test] +fn public_key_traits() { + let system_key = PublicKey::system(); + let mut rng = TestRng::new(); + let ed25519_public_key = PublicKey::random_ed25519(&mut rng); + let secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); + check_ord_and_hash(ed25519_public_key.clone(), secp256k1_public_key.clone()); + check_ord_and_hash(system_key.clone(), ed25519_public_key); + check_ord_and_hash(system_key, secp256k1_public_key); +} + +#[test] +fn signature_traits() { + let system_sig = Signature::system(); + let ed25519_sig = Signature::ed25519([3; Signature::ED25519_LENGTH]).unwrap(); + let secp256k1_sig = Signature::secp256k1([1; Signature::SECP256K1_LENGTH]).unwrap(); + check_ord_and_hash(ed25519_sig, secp256k1_sig); + check_ord_and_hash(system_sig, ed25519_sig); + check_ord_and_hash(system_sig, secp256k1_sig); +} + +#[test] +fn sign_and_verify() { + let mut rng = TestRng::new(); + let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); + let secp256k1_secret_key = SecretKey::random_secp256k1(&mut rng); + + let ed25519_public_key = PublicKey::from(&ed25519_secret_key); + let secp256k1_public_key = PublicKey::from(&secp256k1_secret_key); + + let other_ed25519_public_key = PublicKey::random_ed25519(&mut rng); + let other_secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); + + let message = b"message"; + let ed25519_signature = sign(message, &ed25519_secret_key, &ed25519_public_key); + let secp256k1_signature = sign(message, &secp256k1_secret_key, &secp256k1_public_key); + + assert!(verify(message, &ed25519_signature, &ed25519_public_key).is_ok()); + assert!(verify(message, &secp256k1_signature, &secp256k1_public_key).is_ok()); + + assert!(verify(message, &ed25519_signature, &other_ed25519_public_key).is_err()); + assert!(verify(message, &secp256k1_signature, &other_secp256k1_public_key).is_err()); + + assert!(verify(message, &ed25519_signature, &secp256k1_public_key).is_err()); + assert!(verify(message, &secp256k1_signature, &ed25519_public_key).is_err()); + + assert!(verify(&message[1..], &ed25519_signature, &ed25519_public_key).is_err()); + assert!(verify(&message[1..], &secp256k1_signature, &secp256k1_public_key).is_err()); +} + +#[test] +fn should_construct_secp256k1_from_uncompressed_bytes() { + let mut rng = TestRng::new(); + + let mut secret_key_bytes = [0u8; SecretKey::SECP256K1_LENGTH]; + rng.fill_bytes(&mut secret_key_bytes[..]); + + // Construct a secp256k1 secret key and use that to construct a public key. + let secp256k1_secret_key = k256::SecretKey::from_slice(&secret_key_bytes).unwrap(); + let secp256k1_public_key = secp256k1_secret_key.public_key(); + + // Construct a CL secret key and public key from that (which will be a compressed key). + let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::from(&secret_key); + assert_eq!( + Into::>::into(public_key.clone()).len(), + PublicKey::SECP256K1_LENGTH + ); + assert_ne!( + secp256k1_public_key + .to_encoded_point(false) + .as_bytes() + .len(), + PublicKey::SECP256K1_LENGTH + ); + + // Construct a CL public key from uncompressed public key bytes and ensure it's compressed. + let from_uncompressed_bytes = + PublicKey::secp256k1_from_bytes(secp256k1_public_key.to_encoded_point(false).as_bytes()) + .unwrap(); + assert_eq!(public_key, from_uncompressed_bytes); + + // Construct a CL public key from the uncompressed one's hex representation and ensure it's + // compressed. + let uncompressed_hex = { + let tag_bytes = vec![0x02u8]; + base16::encode_lower(&tag_bytes) + + &base16::encode_lower(&secp256k1_public_key.to_encoded_point(false).as_bytes()) + }; + + format!( + "02{}", + base16::encode_lower(secp256k1_public_key.to_encoded_point(false).as_bytes()) + .to_lowercase() + ); + let from_uncompressed_hex = PublicKey::from_hex(uncompressed_hex).unwrap(); + assert_eq!(public_key, from_uncompressed_hex); +} + +#[test] +fn generate_ed25519_should_generate_an_ed25519_key() { + let secret_key = SecretKey::generate_ed25519().unwrap(); + assert!(matches!(secret_key, SecretKey::Ed25519(_))) +} + +#[test] +fn generate_secp256k1_should_generate_an_secp256k1_key() { + let secret_key = SecretKey::generate_secp256k1().unwrap(); + assert!(matches!(secret_key, SecretKey::Secp256k1(_))) +} diff --git a/casper_types/src/crypto/error.rs b/casper_types/src/crypto/error.rs new file mode 100644 index 00000000..6750e61f --- /dev/null +++ b/casper_types/src/crypto/error.rs @@ -0,0 +1,111 @@ +use alloc::string::String; +use core::fmt::Debug; +#[cfg(not(any(feature = "std", test)))] +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use ed25519_dalek::ed25519::Error as SignatureError; +#[cfg(any(feature = "std", test))] +use pem::PemError; +#[cfg(any(feature = "std", test))] +use thiserror::Error; + +#[cfg(any(feature = "std", test))] +use crate::file_utils::{ReadFileError, WriteFileError}; + +/// Cryptographic errors. +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(any(feature = "std", test), derive(Error))] +#[non_exhaustive] +pub enum Error { + /// Error resulting from creating or using asymmetric key types. + #[cfg_attr(any(feature = "std", test), error("asymmetric key error: {0}"))] + AsymmetricKey(String), + + /// Error resulting when decoding a type from a hex-encoded representation. + #[cfg_attr(feature = "datasize", data_size(skip))] + #[cfg_attr(any(feature = "std", test), error("parsing from hex: {0}"))] + FromHex(base16::DecodeError), + + /// Error resulting when decoding a type from a base64 representation. + #[cfg_attr(feature = "datasize", data_size(skip))] + #[cfg_attr(any(feature = "std", test), error("decoding error: {0}"))] + FromBase64(base64::DecodeError), + + /// Signature error. + #[cfg_attr(any(feature = "std", test), error("error in signature"))] + SignatureError, + + /// Error trying to manipulate the system key. + #[cfg_attr( + any(feature = "std", test), + error("invalid operation on system key: {0}") + )] + System(String), +} + +#[cfg(not(any(feature = "std", test)))] +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self, formatter) + } +} + +impl From for Error { + fn from(error: base16::DecodeError) -> Self { + Error::FromHex(error) + } +} + +impl From for Error { + fn from(_error: SignatureError) -> Self { + Error::SignatureError + } +} + +/// Cryptographic errors extended with some additional variants. +#[cfg(any(feature = "std", test))] +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum ErrorExt { + /// A basic crypto error. + #[error("crypto error: {0:?}")] + CryptoError(#[from] Error), + + /// Error trying to read a secret key. + #[error("secret key load failed: {0}")] + SecretKeyLoad(ReadFileError), + + /// Error trying to read a public key. + #[error("public key load failed: {0}")] + PublicKeyLoad(ReadFileError), + + /// Error trying to write a secret key. + #[error("secret key save failed: {0}")] + SecretKeySave(WriteFileError), + + /// Error trying to write a public key. + #[error("public key save failed: {0}")] + PublicKeySave(WriteFileError), + + /// Pem format error. + #[error("pem error: {0}")] + FromPem(String), + + /// DER format error. + #[error("der error: {0}")] + FromDer(#[from] derp::Error), + + /// Error in getting random bytes from the system's preferred random number source. + #[error("failed to get random bytes: {0}")] + GetRandomBytes(#[from] getrandom::Error), +} + +#[cfg(any(feature = "std", test))] +impl From for ErrorExt { + fn from(error: PemError) -> Self { + ErrorExt::FromPem(error.to_string()) + } +} diff --git a/casper_types/src/deploy_info.rs b/casper_types/src/deploy_info.rs new file mode 100644 index 00000000..5108f5db --- /dev/null +++ b/casper_types/src/deploy_info.rs @@ -0,0 +1,172 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes}, + DeployHash, TransferAddr, URef, U512, +}; + +/// Information relating to the given Deploy. +#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct DeployInfo { + /// The relevant Deploy. + pub deploy_hash: DeployHash, + /// Transfers performed by the Deploy. + pub transfers: Vec, + /// Account identifier of the creator of the Deploy. + pub from: AccountHash, + /// Source purse used for payment of the Deploy. + pub source: URef, + /// Gas cost of executing the Deploy. + pub gas: U512, +} + +impl DeployInfo { + /// Creates a [`DeployInfo`]. + pub fn new( + deploy_hash: DeployHash, + transfers: &[TransferAddr], + from: AccountHash, + source: URef, + gas: U512, + ) -> Self { + let transfers = transfers.to_vec(); + DeployInfo { + deploy_hash, + transfers, + from, + source, + gas, + } + } +} + +impl FromBytes for DeployInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (deploy_hash, rem) = DeployHash::from_bytes(bytes)?; + let (transfers, rem) = Vec::::from_bytes(rem)?; + let (from, rem) = AccountHash::from_bytes(rem)?; + let (source, rem) = URef::from_bytes(rem)?; + let (gas, rem) = U512::from_bytes(rem)?; + Ok(( + DeployInfo { + deploy_hash, + transfers, + from, + source, + gas, + }, + rem, + )) + } +} + +impl ToBytes for DeployInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.deploy_hash.write_bytes(&mut result)?; + self.transfers.write_bytes(&mut result)?; + self.from.write_bytes(&mut result)?; + self.source.write_bytes(&mut result)?; + self.gas.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.deploy_hash.serialized_length() + + self.transfers.serialized_length() + + self.from.serialized_length() + + self.source.serialized_length() + + self.gas.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.transfers.write_bytes(writer)?; + self.from.write_bytes(writer)?; + self.source.write_bytes(writer)?; + self.gas.write_bytes(writer)?; + Ok(()) + } +} + +/// Generators for a `Deploy` +#[cfg(any(feature = "testing", feature = "gens", test))] +pub(crate) mod gens { + use alloc::vec::Vec; + + use proptest::{ + array, + collection::{self, SizeRange}, + prelude::{Arbitrary, Strategy}, + }; + + use crate::{ + account::AccountHash, + gens::{u512_arb, uref_arb}, + DeployHash, DeployInfo, TransferAddr, + }; + + pub fn deploy_hash_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(DeployHash::new) + } + + pub fn transfer_addr_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(TransferAddr::new) + } + + pub fn transfers_arb(size: impl Into) -> impl Strategy> { + collection::vec(transfer_addr_arb(), size) + } + + pub fn account_hash_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(AccountHash::new) + } + + /// Creates an arbitrary `Deploy` + pub fn deploy_info_arb() -> impl Strategy { + let transfers_length_range = 0..5; + ( + deploy_hash_arb(), + transfers_arb(transfers_length_range), + account_hash_arb(), + uref_arb(), + u512_arb(), + ) + .prop_map(|(deploy_hash, transfers, from, source, gas)| DeployInfo { + deploy_hash, + transfers, + from, + source, + gas, + }) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn test_serialization_roundtrip(deploy_info in gens::deploy_info_arb()) { + bytesrepr::test_serialization_roundtrip(&deploy_info) + } + } +} diff --git a/casper_types/src/era_id.rs b/casper_types/src/era_id.rs new file mode 100644 index 00000000..9fc35cc3 --- /dev/null +++ b/casper_types/src/era_id.rs @@ -0,0 +1,241 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; +use core::{ + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, + ops::{Add, AddAssign, Sub}, + str::FromStr, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, +}; + +/// Era ID newtype. +#[derive( + Debug, Default, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "testing", derive(proptest_derive::Arbitrary))] +#[serde(deny_unknown_fields)] +pub struct EraId(u64); + +impl EraId { + /// Maximum possible value an [`EraId`] can hold. + pub const MAX: EraId = EraId(u64::max_value()); + + /// Creates new [`EraId`] instance. + pub const fn new(value: u64) -> EraId { + EraId(value) + } + + /// Returns an iterator over era IDs of `num_eras` future eras starting from current. + pub fn iter(&self, num_eras: u64) -> impl Iterator { + let current_era_id = self.0; + (current_era_id..current_era_id + num_eras).map(EraId) + } + + /// Returns an iterator over era IDs of `num_eras` future eras starting from current, plus the + /// provided one. + pub fn iter_inclusive(&self, num_eras: u64) -> impl Iterator { + let current_era_id = self.0; + (current_era_id..=current_era_id + num_eras).map(EraId) + } + + /// Returns a successor to current era. + /// + /// For `u64::MAX`, this returns `u64::MAX` again: We want to make sure this doesn't panic, and + /// that era number will never be reached in practice. + #[must_use] + pub fn successor(self) -> EraId { + EraId::from(self.0.saturating_add(1)) + } + + /// Returns the predecessor to current era, or `None` if genesis. + #[must_use] + pub fn predecessor(self) -> Option { + self.0.checked_sub(1).map(EraId) + } + + /// Returns the current era plus `x`, or `None` if that would overflow + pub fn checked_add(&self, x: u64) -> Option { + self.0.checked_add(x).map(EraId) + } + + /// Returns the current era minus `x`, or `None` if that would be less than `0`. + pub fn checked_sub(&self, x: u64) -> Option { + self.0.checked_sub(x).map(EraId) + } + + /// Returns the current era minus `x`, or `0` if that would be less than `0`. + #[must_use] + pub fn saturating_sub(&self, x: u64) -> EraId { + EraId::from(self.0.saturating_sub(x)) + } + + /// Returns the current era plus `x`, or [`EraId::MAX`] if overflow would occur. + #[must_use] + pub fn saturating_add(self, rhs: u64) -> EraId { + EraId(self.0.saturating_add(rhs)) + } + + /// Returns the current era times `x`, or [`EraId::MAX`] if overflow would occur. + #[must_use] + pub fn saturating_mul(&self, x: u64) -> EraId { + EraId::from(self.0.saturating_mul(x)) + } + + /// Returns whether this is era 0. + pub fn is_genesis(&self) -> bool { + self.0 == 0 + } + + /// Returns little endian bytes. + pub fn to_le_bytes(self) -> [u8; 8] { + self.0.to_le_bytes() + } + + /// Returns a raw value held by this [`EraId`] instance. + /// + /// You should prefer [`From`] trait implementations over this method where possible. + pub fn value(self) -> u64 { + self.0 + } +} + +impl FromStr for EraId { + type Err = ParseIntError; + + fn from_str(s: &str) -> Result { + u64::from_str(s).map(EraId) + } +} + +impl Add for EraId { + type Output = EraId; + + #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow. + fn add(self, x: u64) -> EraId { + EraId::from(self.0 + x) + } +} + +impl AddAssign for EraId { + fn add_assign(&mut self, x: u64) { + self.0 += x; + } +} + +impl Sub for EraId { + type Output = EraId; + + #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow. + fn sub(self, x: u64) -> EraId { + EraId::from(self.0 - x) + } +} + +impl Display for EraId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "era {}", self.0) + } +} + +impl From for u64 { + fn from(era_id: EraId) -> Self { + era_id.value() + } +} + +impl From for EraId { + fn from(era_id: u64) -> Self { + EraId(era_id) + } +} + +impl ToBytes for EraId { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EraId { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (id_value, remainder) = u64::from_bytes(bytes)?; + let era_id = EraId::from(id_value); + Ok((era_id, remainder)) + } +} + +impl CLTyped for EraId { + fn cl_type() -> CLType { + CLType::U64 + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> EraId { + EraId(rng.gen_range(0..1_000_000)) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use super::*; + use crate::gens::era_id_arb; + + #[test] + fn should_calculate_correct_inclusive_future_eras() { + let auction_delay = 3; + + let current_era = EraId::from(42); + + let window: Vec = current_era.iter_inclusive(auction_delay).collect(); + assert_eq!(window.len(), auction_delay as usize + 1); + assert_eq!(window.first(), Some(¤t_era)); + assert_eq!( + window.iter().next_back(), + Some(&(current_era + auction_delay)) + ); + } + + #[test] + fn should_have_valid_genesis_era_id() { + let expected_initial_era_id = EraId::from(0); + assert!(expected_initial_era_id.is_genesis()); + assert!(!expected_initial_era_id.successor().is_genesis()) + } + + proptest! { + #[test] + fn bytesrepr_roundtrip(era_id in era_id_arb()) { + bytesrepr::test_serialization_roundtrip(&era_id); + } + } +} diff --git a/casper_types/src/execution_result.rs b/casper_types/src/execution_result.rs new file mode 100644 index 00000000..87788fc9 --- /dev/null +++ b/casper_types/src/execution_result.rs @@ -0,0 +1,814 @@ +//! This file provides types to allow conversion from an EE `ExecutionResult` into a similar type +//! which can be serialized to a valid binary or JSON representation. +//! +//! It is stored as metadata related to a given deploy, and made available to clients via the +//! JSON-RPC API. + +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use core::convert::TryFrom; + +use alloc::{ + boxed::Box, + format, + string::{String, ToString}, + vec, + vec::Vec, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::{FromPrimitive, ToPrimitive}; +use num_derive::{FromPrimitive, ToPrimitive}; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +use rand::{ + distributions::{Distribution, Standard}, + seq::SliceRandom, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "json-schema")] +use crate::KEY_HASH_LENGTH; +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + system::auction::{Bid, EraInfo, UnbondingPurse, WithdrawPurse}, + CLValue, DeployInfo, NamedKey, Transfer, TransferAddr, U128, U256, U512, +}; + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum ExecutionResultTag { + Failure = 0, + Success = 1, +} + +impl TryFrom for ExecutionResultTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum OpTag { + Read = 0, + Write = 1, + Add = 2, + NoOp = 3, + Delete = 4, +} + +impl TryFrom for OpTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum TransformTag { + Identity = 0, + WriteCLValue = 1, + WriteAccount = 2, + WriteContractWasm = 3, + WriteContract = 4, + WriteContractPackage = 5, + WriteDeployInfo = 6, + WriteTransfer = 7, + WriteEraInfo = 8, + WriteBid = 9, + WriteWithdraw = 10, + AddInt32 = 11, + AddUInt64 = 12, + AddUInt128 = 13, + AddUInt256 = 14, + AddUInt512 = 15, + AddKeys = 16, + Failure = 17, + WriteUnbonding = 18, + Prune = 19, +} + +impl TryFrom for TransformTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +#[cfg(feature = "json-schema")] +static EXECUTION_RESULT: Lazy = Lazy::new(|| { + let operations = vec![ + Operation { + key: "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb" + .to_string(), + kind: OpKind::Write, + }, + Operation { + key: "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" + .to_string(), + kind: OpKind::Read, + }, + ]; + + let transforms = vec![ + TransformEntry { + key: "uref-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb-007" + .to_string(), + transform: Transform::AddUInt64(8u64), + }, + TransformEntry { + key: "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" + .to_string(), + transform: Transform::Identity, + }, + ]; + + let effect = ExecutionEffect { + operations, + transforms, + }; + + let transfers = vec![ + TransferAddr::new([89; KEY_HASH_LENGTH]), + TransferAddr::new([130; KEY_HASH_LENGTH]), + ]; + + ExecutionResult::Success { + effect, + transfers, + cost: U512::from(123_456), + } +}); + +/// The result of executing a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutionResult { + /// The result of a failed execution. + Failure { + /// The effect of executing the deploy. + effect: ExecutionEffect, + /// A record of Transfers performed while executing the deploy. + transfers: Vec, + /// The cost of executing the deploy. + cost: U512, + /// The error message associated with executing the deploy. + error_message: String, + }, + /// The result of a successful execution. + Success { + /// The effect of executing the deploy. + effect: ExecutionEffect, + /// A record of Transfers performed while executing the deploy. + transfers: Vec, + /// The cost of executing the deploy. + cost: U512, + }, +} + +impl ExecutionResult { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &EXECUTION_RESULT + } + + fn tag(&self) -> ExecutionResultTag { + match self { + ExecutionResult::Failure { + effect: _, + transfers: _, + cost: _, + error_message: _, + } => ExecutionResultTag::Failure, + ExecutionResult::Success { + effect: _, + transfers: _, + cost: _, + } => ExecutionResultTag::Success, + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExecutionResult { + let op_count = rng.gen_range(0..6); + let mut operations = Vec::new(); + for _ in 0..op_count { + let op = [OpKind::Read, OpKind::Add, OpKind::NoOp, OpKind::Write] + .choose(rng) + .unwrap(); + operations.push(Operation { + key: rng.gen::().to_string(), + kind: *op, + }); + } + + let transform_count = rng.gen_range(0..6); + let mut transforms = Vec::new(); + for _ in 0..transform_count { + transforms.push(TransformEntry { + key: rng.gen::().to_string(), + transform: rng.gen(), + }); + } + + let execution_effect = ExecutionEffect::new(transforms); + + let transfer_count = rng.gen_range(0..6); + let mut transfers = vec![]; + for _ in 0..transfer_count { + transfers.push(TransferAddr::new(rng.gen())) + } + + if rng.gen() { + ExecutionResult::Failure { + effect: execution_effect, + transfers, + cost: rng.gen::().into(), + error_message: format!("Error message {}", rng.gen::()), + } + } else { + ExecutionResult::Success { + effect: execution_effect, + transfers, + cost: rng.gen::().into(), + } + } + } +} + +// TODO[goral09]: Add `write_bytes` impl. +impl ToBytes for ExecutionResult { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + let tag_byte = self.tag().to_u8().ok_or(bytesrepr::Error::Formatting)?; + buffer.push(tag_byte); + match self { + ExecutionResult::Failure { + effect, + transfers, + cost, + error_message, + } => { + buffer.extend(effect.to_bytes()?); + buffer.extend(transfers.to_bytes()?); + buffer.extend(cost.to_bytes()?); + buffer.extend(error_message.to_bytes()?); + } + ExecutionResult::Success { + effect, + transfers, + cost, + } => { + buffer.extend(effect.to_bytes()?); + buffer.extend(transfers.to_bytes()?); + buffer.extend(cost.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ExecutionResult::Failure { + effect: execution_effect, + transfers, + cost, + error_message, + } => { + execution_effect.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + + error_message.serialized_length() + } + ExecutionResult::Success { + effect: execution_effect, + transfers, + cost, + } => { + execution_effect.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + } + } + } +} + +impl FromBytes for ExecutionResult { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + ExecutionResultTag::Failure => { + let (effect, remainder) = ExecutionEffect::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let (error_message, remainder) = String::from_bytes(remainder)?; + let execution_result = ExecutionResult::Failure { + effect, + transfers, + cost, + error_message, + }; + Ok((execution_result, remainder)) + } + ExecutionResultTag::Success => { + let (execution_effect, remainder) = ExecutionEffect::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let execution_result = ExecutionResult::Success { + effect: execution_effect, + transfers, + cost, + }; + Ok((execution_result, remainder)) + } + } + } +} + +/// The journal of execution transforms from a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ExecutionEffect { + /// The resulting operations. + pub operations: Vec, + /// The journal of execution transforms. + pub transforms: Vec, +} + +impl ExecutionEffect { + /// Constructor for [`ExecutionEffect`]. + pub fn new(transforms: Vec) -> Self { + Self { + transforms, + operations: Default::default(), + } + } +} + +// TODO[goral09]: Add `write_bytes` impl. +impl ToBytes for ExecutionEffect { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.operations.to_bytes()?); + buffer.extend(self.transforms.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.operations.serialized_length() + self.transforms.serialized_length() + } +} + +impl FromBytes for ExecutionEffect { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (operations, remainder) = Vec::::from_bytes(bytes)?; + let (transforms, remainder) = Vec::::from_bytes(remainder)?; + let json_execution_journal = ExecutionEffect { + operations, + transforms, + }; + Ok((json_execution_journal, remainder)) + } +} + +/// An operation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Operation { + /// The formatted string of the `Key`. + pub key: String, + /// The type of operation. + pub kind: OpKind, +} + +// TODO[goral09]: Add `write_bytes` impl. +impl ToBytes for Operation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.key.to_bytes()?); + buffer.extend(self.kind.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.kind.serialized_length() + } +} + +impl FromBytes for Operation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = String::from_bytes(bytes)?; + let (kind, remainder) = OpKind::from_bytes(remainder)?; + let operation = Operation { key, kind }; + Ok((operation, remainder)) + } +} + +/// The type of operation performed while executing a deploy. +#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum OpKind { + /// A read operation. + Read, + /// A write operation. + Write, + /// An addition. + Add, + /// An operation which has no effect. + NoOp, + /// A delete operation. + Delete, +} + +impl OpKind { + fn tag(&self) -> OpTag { + match self { + OpKind::Read => OpTag::Read, + OpKind::Write => OpTag::Write, + OpKind::Add => OpTag::Add, + OpKind::NoOp => OpTag::NoOp, + OpKind::Delete => OpTag::Delete, + } + } +} + +// TODO[goral09]: Add `write_bytes` impl. +impl ToBytes for OpKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let tag_bytes = self.tag().to_u8().ok_or(bytesrepr::Error::Formatting)?; + tag_bytes.to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for OpKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + OpTag::Read => Ok((OpKind::Read, remainder)), + OpTag::Write => Ok((OpKind::Write, remainder)), + OpTag::Add => Ok((OpKind::Add, remainder)), + OpTag::NoOp => Ok((OpKind::NoOp, remainder)), + OpTag::Delete => Ok((OpKind::Delete, remainder)), + } + } +} + +/// A transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct TransformEntry { + /// The formatted string of the `Key`. + pub key: String, + /// The transformation. + pub transform: Transform, +} + +// TODO[goral09]: Add `write_bytes`. +impl ToBytes for TransformEntry { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.key.to_bytes()?); + buffer.extend(self.transform.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.transform.serialized_length() + } +} + +impl FromBytes for TransformEntry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = String::from_bytes(bytes)?; + let (transform, remainder) = Transform::from_bytes(remainder)?; + let transform_entry = TransformEntry { key, transform }; + Ok((transform_entry, remainder)) + } +} + +/// The actual transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum Transform { + /// A transform having no effect. + Identity, + /// Writes the given CLValue to global state. + WriteCLValue(CLValue), + /// Writes the given Account to global state. + WriteAccount(AccountHash), + /// Writes a smart contract as Wasm to global state. + WriteContractWasm, + /// Writes a smart contract to global state. + WriteContract, + /// Writes a smart contract package to global state. + WriteContractPackage, + /// Writes the given DeployInfo to global state. + WriteDeployInfo(DeployInfo), + /// Writes the given EraInfo to global state. + WriteEraInfo(EraInfo), + /// Writes the given Transfer to global state. + WriteTransfer(Transfer), + /// Writes the given Bid to global state. + WriteBid(Box), + /// Writes the given Withdraw to global state. + WriteWithdraw(Vec), + /// Adds the given `i32`. + AddInt32(i32), + /// Adds the given `u64`. + AddUInt64(u64), + /// Adds the given `U128`. + AddUInt128(U128), + /// Adds the given `U256`. + AddUInt256(U256), + /// Adds the given `U512`. + AddUInt512(U512), + /// Adds the given collection of named keys. + AddKeys(Vec), + /// A failed transformation, containing an error message. + Failure(String), + /// Writes the given Unbonding to global state. + WriteUnbonding(Vec), + /// Prunes a key. + Prune, +} + +impl Transform { + fn tag(&self) -> TransformTag { + match self { + Transform::Identity => TransformTag::Identity, + Transform::WriteCLValue(_) => TransformTag::WriteCLValue, + Transform::WriteAccount(_) => TransformTag::WriteAccount, + Transform::WriteContractWasm => TransformTag::WriteContractWasm, + Transform::WriteContract => TransformTag::WriteContract, + Transform::WriteContractPackage => TransformTag::WriteContractPackage, + Transform::WriteDeployInfo(_) => TransformTag::WriteDeployInfo, + Transform::WriteEraInfo(_) => TransformTag::WriteEraInfo, + Transform::WriteTransfer(_) => TransformTag::WriteTransfer, + Transform::WriteBid(_) => TransformTag::WriteBid, + Transform::WriteWithdraw(_) => TransformTag::WriteWithdraw, + Transform::AddInt32(_) => TransformTag::AddInt32, + Transform::AddUInt64(_) => TransformTag::AddUInt64, + Transform::AddUInt128(_) => TransformTag::AddUInt128, + Transform::AddUInt256(_) => TransformTag::AddUInt256, + Transform::AddUInt512(_) => TransformTag::AddUInt512, + Transform::AddKeys(_) => TransformTag::AddKeys, + Transform::Failure(_) => TransformTag::Failure, + Transform::WriteUnbonding(_) => TransformTag::WriteUnbonding, + Transform::Prune => TransformTag::Prune, + } + } +} + +// TODO[goral09]: Add `write_bytes` impl. +impl ToBytes for Transform { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + let tag_bytes = self.tag().to_u8().ok_or(bytesrepr::Error::Formatting)?; + buffer.insert(0, tag_bytes); + match self { + Transform::Identity => {} + Transform::WriteCLValue(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::WriteAccount(account_hash) => { + buffer.extend(account_hash.to_bytes()?); + } + Transform::WriteContractWasm => {} + Transform::WriteContract => {} + Transform::WriteContractPackage => {} + Transform::WriteDeployInfo(deploy_info) => { + buffer.extend(deploy_info.to_bytes()?); + } + Transform::WriteEraInfo(era_info) => { + buffer.extend(era_info.to_bytes()?); + } + Transform::WriteTransfer(transfer) => { + buffer.extend(transfer.to_bytes()?); + } + Transform::WriteBid(bid) => { + buffer.extend(bid.to_bytes()?); + } + Transform::WriteWithdraw(unbonding_purses) => { + buffer.extend(unbonding_purses.to_bytes()?); + } + Transform::AddInt32(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::AddUInt64(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::AddUInt128(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::AddUInt256(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::AddUInt512(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::AddKeys(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::Failure(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::WriteUnbonding(value) => { + buffer.extend(value.to_bytes()?); + } + Transform::Prune => {} + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let body_len = match self { + Transform::WriteCLValue(value) => value.serialized_length(), + Transform::WriteAccount(value) => value.serialized_length(), + Transform::WriteDeployInfo(value) => value.serialized_length(), + Transform::WriteEraInfo(value) => value.serialized_length(), + Transform::WriteTransfer(value) => value.serialized_length(), + Transform::AddInt32(value) => value.serialized_length(), + Transform::AddUInt64(value) => value.serialized_length(), + Transform::AddUInt128(value) => value.serialized_length(), + Transform::AddUInt256(value) => value.serialized_length(), + Transform::AddUInt512(value) => value.serialized_length(), + Transform::AddKeys(value) => value.serialized_length(), + Transform::Failure(value) => value.serialized_length(), + Transform::Identity + | Transform::WriteContractWasm + | Transform::WriteContract + | Transform::WriteContractPackage => 0, + Transform::WriteBid(value) => value.serialized_length(), + Transform::WriteWithdraw(value) => value.serialized_length(), + Transform::WriteUnbonding(value) => value.serialized_length(), + Transform::Prune => 0, + }; + U8_SERIALIZED_LENGTH + body_len + } +} + +impl FromBytes for Transform { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + TransformTag::Identity => Ok((Transform::Identity, remainder)), + TransformTag::WriteCLValue => { + let (cl_value, remainder) = CLValue::from_bytes(remainder)?; + Ok((Transform::WriteCLValue(cl_value), remainder)) + } + TransformTag::WriteAccount => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((Transform::WriteAccount(account_hash), remainder)) + } + TransformTag::WriteContractWasm => Ok((Transform::WriteContractWasm, remainder)), + TransformTag::WriteContract => Ok((Transform::WriteContract, remainder)), + TransformTag::WriteContractPackage => Ok((Transform::WriteContractPackage, remainder)), + TransformTag::WriteDeployInfo => { + let (deploy_info, remainder) = DeployInfo::from_bytes(remainder)?; + Ok((Transform::WriteDeployInfo(deploy_info), remainder)) + } + TransformTag::WriteEraInfo => { + let (era_info, remainder) = EraInfo::from_bytes(remainder)?; + Ok((Transform::WriteEraInfo(era_info), remainder)) + } + TransformTag::WriteTransfer => { + let (transfer, remainder) = Transfer::from_bytes(remainder)?; + Ok((Transform::WriteTransfer(transfer), remainder)) + } + TransformTag::AddInt32 => { + let (value_i32, remainder) = i32::from_bytes(remainder)?; + Ok((Transform::AddInt32(value_i32), remainder)) + } + TransformTag::AddUInt64 => { + let (value_u64, remainder) = u64::from_bytes(remainder)?; + Ok((Transform::AddUInt64(value_u64), remainder)) + } + TransformTag::AddUInt128 => { + let (value_u128, remainder) = U128::from_bytes(remainder)?; + Ok((Transform::AddUInt128(value_u128), remainder)) + } + TransformTag::AddUInt256 => { + let (value_u256, remainder) = U256::from_bytes(remainder)?; + Ok((Transform::AddUInt256(value_u256), remainder)) + } + TransformTag::AddUInt512 => { + let (value_u512, remainder) = U512::from_bytes(remainder)?; + Ok((Transform::AddUInt512(value_u512), remainder)) + } + TransformTag::AddKeys => { + let (value, remainder) = Vec::::from_bytes(remainder)?; + Ok((Transform::AddKeys(value), remainder)) + } + TransformTag::Failure => { + let (value, remainder) = String::from_bytes(remainder)?; + Ok((Transform::Failure(value), remainder)) + } + TransformTag::WriteBid => { + let (bid, remainder) = Bid::from_bytes(remainder)?; + Ok((Transform::WriteBid(Box::new(bid)), remainder)) + } + TransformTag::WriteWithdraw => { + let (withdraw_purses, remainder) = + as FromBytes>::from_bytes(remainder)?; + Ok((Transform::WriteWithdraw(withdraw_purses), remainder)) + } + TransformTag::WriteUnbonding => { + let (unbonding_purses, remainder) = + as FromBytes>::from_bytes(remainder)?; + Ok((Transform::WriteUnbonding(unbonding_purses), remainder)) + } + TransformTag::Prune => Ok((Transform::Prune, remainder)), + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Transform { + // TODO - include WriteDeployInfo and WriteTransfer as options + match rng.gen_range(0..14) { + 0 => Transform::Identity, + 1 => Transform::WriteCLValue(CLValue::from_t(true).unwrap()), + 2 => Transform::WriteAccount(AccountHash::new(rng.gen())), + 3 => Transform::WriteContractWasm, + 4 => Transform::WriteContract, + 5 => Transform::WriteContractPackage, + 6 => Transform::AddInt32(rng.gen()), + 7 => Transform::AddUInt64(rng.gen()), + 8 => Transform::AddUInt128(rng.gen::().into()), + 9 => Transform::AddUInt256(rng.gen::().into()), + 10 => Transform::AddUInt512(rng.gen::().into()), + 11 => { + let mut named_keys = Vec::new(); + for _ in 0..rng.gen_range(1..6) { + named_keys.push(NamedKey { + name: rng.gen::().to_string(), + key: rng.gen::().to_string(), + }); + } + Transform::AddKeys(named_keys) + } + 12 => Transform::Failure(rng.gen::().to_string()), + 13 => Transform::Prune, + _ => unreachable!(), + } + } +} + +#[cfg(test)] +mod tests { + use rand::{rngs::SmallRng, Rng, SeedableRng}; + + use super::*; + + fn get_rng() -> SmallRng { + let mut seed = [0u8; 32]; + getrandom::getrandom(seed.as_mut()).unwrap(); + SmallRng::from_seed(seed) + } + + #[test] + fn bytesrepr_test_transform() { + let mut rng = get_rng(); + let transform: Transform = rng.gen(); + bytesrepr::test_serialization_roundtrip(&transform); + } + + #[test] + fn bytesrepr_test_execution_result() { + let mut rng = get_rng(); + let execution_result: ExecutionResult = rng.gen(); + bytesrepr::test_serialization_roundtrip(&execution_result); + } +} diff --git a/casper_types/src/file_utils.rs b/casper_types/src/file_utils.rs new file mode 100644 index 00000000..775a7315 --- /dev/null +++ b/casper_types/src/file_utils.rs @@ -0,0 +1,77 @@ +//! Utilities for handling reading from and writing to files. + +use std::{ + fs, + io::{self, Write}, + os::unix::fs::OpenOptionsExt, + path::{Path, PathBuf}, +}; + +use thiserror::Error; + +/// Error reading a file. +#[derive(Debug, Error)] +#[error("could not read '{0}': {error}", .path.display())] +pub struct ReadFileError { + /// Path that failed to be read. + path: PathBuf, + /// The underlying OS error. + #[source] + error: io::Error, +} + +/// Error writing a file +#[derive(Debug, Error)] +#[error("could not write to '{0}': {error}", .path.display())] +pub struct WriteFileError { + /// Path that failed to be written to. + path: PathBuf, + /// The underlying OS error. + #[source] + error: io::Error, +} + +/// Read complete at `path` into memory. +/// +/// Wraps `fs::read`, but preserves the filename for better error printing. +pub fn read_file>(filename: P) -> Result, ReadFileError> { + let path = filename.as_ref(); + fs::read(path).map_err(|error| ReadFileError { + path: path.to_owned(), + error, + }) +} + +/// Write data to `path`. +/// +/// Wraps `fs::write`, but preserves the filename for better error printing. +pub(crate) fn write_file, B: AsRef<[u8]>>( + filename: P, + data: B, +) -> Result<(), WriteFileError> { + let path = filename.as_ref(); + fs::write(path, data.as_ref()).map_err(|error| WriteFileError { + path: path.to_owned(), + error, + }) +} + +/// Writes data to `path`, ensuring only the owner can read or write it. +/// +/// Otherwise functions like [`write_file`]. +pub(crate) fn write_private_file, B: AsRef<[u8]>>( + filename: P, + data: B, +) -> Result<(), WriteFileError> { + let path = filename.as_ref(); + fs::OpenOptions::new() + .write(true) + .create(true) + .mode(0o600) + .open(path) + .and_then(|mut file| file.write_all(data.as_ref())) + .map_err(|error| WriteFileError { + path: path.to_owned(), + error, + }) +} diff --git a/casper_types/src/gas.rs b/casper_types/src/gas.rs new file mode 100644 index 00000000..0d0d1a40 --- /dev/null +++ b/casper_types/src/gas.rs @@ -0,0 +1,232 @@ +//! The `gas` module is used for working with Gas including converting to and from Motes. + +use core::{ + fmt, + iter::Sum, + ops::{Add, AddAssign, Div, Mul, Sub}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::Zero; +use serde::{Deserialize, Serialize}; + +use crate::{Motes, U512}; + +/// The `Gas` struct represents a `U512` amount of gas. +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Gas(U512); + +impl Gas { + /// Constructs a new `Gas`. + pub fn new(value: U512) -> Self { + Gas(value) + } + + /// Returns the inner `U512` value. + pub fn value(&self) -> U512 { + self.0 + } + + /// Converts the given `motes` to `Gas` by dividing them by `conv_rate`. + /// + /// Returns `None` if `conv_rate == 0`. + pub fn from_motes(motes: Motes, conv_rate: u64) -> Option { + motes + .value() + .checked_div(U512::from(conv_rate)) + .map(Self::new) + } + + /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(&self, rhs: Self) -> Option { + self.0.checked_add(rhs.value()).map(Self::new) + } + + /// Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred. + pub fn checked_sub(&self, rhs: Self) -> Option { + self.0.checked_sub(rhs.value()).map(Self::new) + } +} + +impl fmt::Display for Gas { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl Add for Gas { + type Output = Gas; + + fn add(self, rhs: Self) -> Self::Output { + let val = self.value() + rhs.value(); + Gas::new(val) + } +} + +impl Sub for Gas { + type Output = Gas; + + fn sub(self, rhs: Self) -> Self::Output { + let val = self.value() - rhs.value(); + Gas::new(val) + } +} + +impl Div for Gas { + type Output = Gas; + + fn div(self, rhs: Self) -> Self::Output { + let val = self.value() / rhs.value(); + Gas::new(val) + } +} + +impl Mul for Gas { + type Output = Gas; + + fn mul(self, rhs: Self) -> Self::Output { + let val = self.value() * rhs.value(); + Gas::new(val) + } +} + +impl AddAssign for Gas { + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0 + } +} + +impl Zero for Gas { + fn zero() -> Self { + Gas::new(U512::zero()) + } + + fn is_zero(&self) -> bool { + self.0.is_zero() + } +} + +impl Sum for Gas { + fn sum>(iter: I) -> Self { + iter.fold(Gas::zero(), Add::add) + } +} + +impl From for Gas { + fn from(gas: u32) -> Self { + let gas_u512: U512 = gas.into(); + Gas::new(gas_u512) + } +} + +impl From for Gas { + fn from(gas: u64) -> Self { + let gas_u512: U512 = gas.into(); + Gas::new(gas_u512) + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use crate::{Gas, Motes}; + + #[test] + fn should_be_able_to_get_instance_of_gas() { + let initial_value = 1; + let gas = Gas::new(U512::from(initial_value)); + assert_eq!( + initial_value, + gas.value().as_u64(), + "should have equal value" + ) + } + + #[test] + fn should_be_able_to_compare_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + assert_eq!(left_gas, right_gas, "should be equal"); + let right_gas = Gas::new(U512::from(2)); + assert_ne!(left_gas, right_gas, "should not be equal") + } + + #[test] + fn should_be_able_to_add_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + let expected_gas = Gas::new(U512::from(2)); + assert_eq!((left_gas + right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_subtract_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + let expected_gas = Gas::new(U512::from(0)); + assert_eq!((left_gas - right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_multiply_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(100)); + let right_gas = Gas::new(U512::from(10)); + let expected_gas = Gas::new(U512::from(1000)); + assert_eq!((left_gas * right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_divide_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1000)); + let right_gas = Gas::new(U512::from(100)); + let expected_gas = Gas::new(U512::from(10)); + assert_eq!((left_gas / right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_convert_from_mote() { + let mote = Motes::new(U512::from(100)); + let gas = Gas::from_motes(mote, 10).expect("should have gas"); + let expected_gas = Gas::new(U512::from(10)); + assert_eq!(gas, expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_default() { + let gas = Gas::default(); + let expected_gas = Gas::new(U512::from(0)); + assert_eq!(gas, expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let left_gas = Gas::new(U512::from(100)); + let right_gas = Gas::new(U512::from(10)); + assert!(left_gas > right_gas, "should be gt"); + let right_gas = Gas::new(U512::from(100)); + assert!(left_gas >= right_gas, "should be gte"); + assert!(left_gas <= right_gas, "should be lte"); + let left_gas = Gas::new(U512::from(10)); + assert!(left_gas < right_gas, "should be lt"); + } + + #[test] + fn should_default() { + let left_gas = Gas::new(U512::from(0)); + let right_gas = Gas::default(); + assert_eq!(left_gas, right_gas, "should be equal"); + let u512 = U512::zero(); + assert_eq!(left_gas.value(), u512, "should be equal"); + } + + #[test] + fn should_support_checked_div_from_motes() { + let motes = Motes::new(U512::zero()); + let conv_rate = 0; + let maybe = Gas::from_motes(motes, conv_rate); + assert!(maybe.is_none(), "should be none due to divide by zero"); + } +} diff --git a/casper_types/src/gens.rs b/casper_types/src/gens.rs new file mode 100644 index 00000000..94b3733c --- /dev/null +++ b/casper_types/src/gens.rs @@ -0,0 +1,531 @@ +//! Contains functions for generating arbitrary values for use by +//! [`Proptest`](https://crates.io/crates/proptest). +#![allow(missing_docs)] + +use alloc::{boxed::Box, string::String, vec}; + +use proptest::{ + array, bits, bool, + collection::{self, SizeRange}, + option, + prelude::*, + result, +}; + +use crate::{ + account::{gens::account_arb, AccountHash, Weight}, + contracts::{ + ContractPackageStatus, ContractVersions, DisabledVersions, Groups, NamedKeys, Parameters, + }, + crypto::gens::public_key_arb_no_system, + system::auction::{ + gens::era_info_arb, Bid, DelegationRate, Delegator, UnbondingPurse, WithdrawPurse, + DELEGATION_RATE_DENOMINATOR, + }, + transfer::TransferAddr, + AccessRights, CLType, CLValue, Contract, ContractHash, ContractPackage, ContractVersionKey, + ContractWasm, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, EraId, Group, Key, + NamedArg, Parameter, Phase, ProtocolVersion, SemVer, StoredValue, URef, U128, U256, U512, +}; + +use crate::deploy_info::gens::{deploy_hash_arb, transfer_addr_arb}; +pub use crate::{deploy_info::gens::deploy_info_arb, transfer::gens::transfer_arb}; + +pub fn u8_slice_32() -> impl Strategy { + collection::vec(any::(), 32).prop_map(|b| { + let mut res = [0u8; 32]; + res.clone_from_slice(b.as_slice()); + res + }) +} + +pub fn u2_slice_32() -> impl Strategy { + array::uniform32(any::()).prop_map(|mut arr| { + for byte in arr.iter_mut() { + *byte &= 0b11; + } + arr + }) +} + +pub fn named_keys_arb(depth: usize) -> impl Strategy { + collection::btree_map("\\PC*", key_arb(), depth) +} + +pub fn access_rights_arb() -> impl Strategy { + prop_oneof![ + Just(AccessRights::NONE), + Just(AccessRights::READ), + Just(AccessRights::ADD), + Just(AccessRights::WRITE), + Just(AccessRights::READ_ADD), + Just(AccessRights::READ_WRITE), + Just(AccessRights::ADD_WRITE), + Just(AccessRights::READ_ADD_WRITE), + ] +} + +pub fn phase_arb() -> impl Strategy { + prop_oneof![ + Just(Phase::Payment), + Just(Phase::Session), + Just(Phase::FinalizePayment), + ] +} + +pub fn uref_arb() -> impl Strategy { + (array::uniform32(bits::u8::ANY), access_rights_arb()) + .prop_map(|(id, access_rights)| URef::new(id, access_rights)) +} + +pub fn era_id_arb() -> impl Strategy { + any::().prop_map(EraId::from) +} + +pub fn key_arb() -> impl Strategy { + prop_oneof![ + account_hash_arb().prop_map(Key::Account), + u8_slice_32().prop_map(Key::Hash), + uref_arb().prop_map(Key::URef), + transfer_addr_arb().prop_map(Key::Transfer), + deploy_hash_arb().prop_map(Key::DeployInfo), + era_id_arb().prop_map(Key::EraInfo), + uref_arb().prop_map(|uref| Key::Balance(uref.addr())), + account_hash_arb().prop_map(Key::Bid), + account_hash_arb().prop_map(Key::Withdraw), + u8_slice_32().prop_map(Key::Dictionary), + Just(Key::EraSummary), + ] +} + +pub fn colliding_key_arb() -> impl Strategy { + prop_oneof![ + u2_slice_32().prop_map(|bytes| Key::Account(AccountHash::new(bytes))), + u2_slice_32().prop_map(Key::Hash), + u2_slice_32().prop_map(|bytes| Key::URef(URef::new(bytes, AccessRights::NONE))), + u2_slice_32().prop_map(|bytes| Key::Transfer(TransferAddr::new(bytes))), + u2_slice_32().prop_map(Key::Dictionary), + ] +} + +pub fn account_hash_arb() -> impl Strategy { + u8_slice_32().prop_map(AccountHash::new) +} + +pub fn weight_arb() -> impl Strategy { + any::().prop_map(Weight::new) +} + +pub fn sem_ver_arb() -> impl Strategy { + (any::(), any::(), any::()) + .prop_map(|(major, minor, patch)| SemVer::new(major, minor, patch)) +} + +pub fn protocol_version_arb() -> impl Strategy { + sem_ver_arb().prop_map(ProtocolVersion::new) +} + +pub fn u128_arb() -> impl Strategy { + collection::vec(any::(), 0..16).prop_map(|b| U128::from_little_endian(b.as_slice())) +} + +pub fn u256_arb() -> impl Strategy { + collection::vec(any::(), 0..32).prop_map(|b| U256::from_little_endian(b.as_slice())) +} + +pub fn u512_arb() -> impl Strategy { + prop_oneof![ + 1 => Just(U512::zero()), + 8 => collection::vec(any::(), 0..64).prop_map(|b| U512::from_little_endian(b.as_slice())), + 1 => Just(U512::MAX), + ] +} + +pub fn cl_simple_type_arb() -> impl Strategy { + prop_oneof![ + Just(CLType::Bool), + Just(CLType::I32), + Just(CLType::I64), + Just(CLType::U8), + Just(CLType::U32), + Just(CLType::U64), + Just(CLType::U128), + Just(CLType::U256), + Just(CLType::U512), + Just(CLType::Unit), + Just(CLType::String), + Just(CLType::Key), + Just(CLType::URef), + ] +} + +pub fn cl_type_arb() -> impl Strategy { + cl_simple_type_arb().prop_recursive(4, 16, 8, |element| { + prop_oneof![ + // We want to produce basic types too + element.clone(), + // For complex type + element + .clone() + .prop_map(|val| CLType::Option(Box::new(val))), + element.clone().prop_map(|val| CLType::List(Box::new(val))), + // Realistic Result type generator: ok is anything recursive, err is simple type + (element.clone(), cl_simple_type_arb()).prop_map(|(ok, err)| CLType::Result { + ok: Box::new(ok), + err: Box::new(err) + }), + // Realistic Map type generator: key is simple type, value is complex recursive type + (cl_simple_type_arb(), element.clone()).prop_map(|(key, value)| CLType::Map { + key: Box::new(key), + value: Box::new(value) + }), + // Various tuples + element + .clone() + .prop_map(|cl_type| CLType::Tuple1([Box::new(cl_type)])), + (element.clone(), element.clone()).prop_map(|(cl_type1, cl_type2)| CLType::Tuple2([ + Box::new(cl_type1), + Box::new(cl_type2) + ])), + (element.clone(), element.clone(), element).prop_map( + |(cl_type1, cl_type2, cl_type3)| CLType::Tuple3([ + Box::new(cl_type1), + Box::new(cl_type2), + Box::new(cl_type3) + ]) + ), + ] + }) +} + +pub fn cl_value_arb() -> impl Strategy { + // If compiler brings you here it most probably means you've added a variant to `CLType` enum + // but forgot to add generator for it. + let stub: Option = None; + if let Some(cl_type) = stub { + match cl_type { + CLType::Bool + | CLType::I32 + | CLType::I64 + | CLType::U8 + | CLType::U32 + | CLType::U64 + | CLType::U128 + | CLType::U256 + | CLType::U512 + | CLType::Unit + | CLType::String + | CLType::Key + | CLType::URef + | CLType::PublicKey + | CLType::Option(_) + | CLType::List(_) + | CLType::ByteArray(..) + | CLType::Result { .. } + | CLType::Map { .. } + | CLType::Tuple1(_) + | CLType::Tuple2(_) + | CLType::Tuple3(_) + | CLType::Any => (), + } + }; + + prop_oneof![ + Just(CLValue::from_t(()).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + u128_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + u256_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + u512_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + key_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + uref_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + ".*".prop_map(|x: String| CLValue::from_t(x).expect("should create CLValue")), + option::of(any::()).prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + collection::vec(uref_arb(), 0..100) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + result::maybe_err(key_arb(), ".*") + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + collection::btree_map(".*", u512_arb(), 0..100) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + (any::()).prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + (any::(), any::()) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + (any::(), any::(), any::()) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + // Fixed lists of any size + any::().prop_map(|len| CLValue::from_t([len; 32]).expect("should create CLValue")), + ] +} + +pub fn result_arb() -> impl Strategy> { + result::maybe_ok(any::(), any::()) +} + +pub fn named_args_arb() -> impl Strategy { + (".*", cl_value_arb()).prop_map(|(name, value)| NamedArg::new(name, value)) +} + +pub fn group_arb() -> impl Strategy { + ".*".prop_map(Group::new) +} + +pub fn entry_point_access_arb() -> impl Strategy { + prop_oneof![ + Just(EntryPointAccess::Public), + collection::vec(group_arb(), 0..32).prop_map(EntryPointAccess::Groups), + ] +} + +pub fn entry_point_type_arb() -> impl Strategy { + prop_oneof![ + Just(EntryPointType::Session), + Just(EntryPointType::Contract), + ] +} + +pub fn parameter_arb() -> impl Strategy { + (".*", cl_type_arb()).prop_map(|(name, cl_type)| Parameter::new(name, cl_type)) +} + +pub fn parameters_arb() -> impl Strategy { + collection::vec(parameter_arb(), 0..10) +} + +pub fn entry_point_arb() -> impl Strategy { + ( + ".*", + parameters_arb(), + entry_point_type_arb(), + entry_point_access_arb(), + cl_type_arb(), + ) + .prop_map( + |(name, parameters, entry_point_type, entry_point_access, ret)| { + EntryPoint::new(name, parameters, ret, entry_point_access, entry_point_type) + }, + ) +} + +pub fn entry_points_arb() -> impl Strategy { + collection::vec(entry_point_arb(), 1..10).prop_map(EntryPoints::from) +} + +pub fn contract_arb() -> impl Strategy { + ( + protocol_version_arb(), + entry_points_arb(), + u8_slice_32(), + u8_slice_32(), + named_keys_arb(20), + ) + .prop_map( + |( + protocol_version, + entry_points, + contract_package_hash_arb, + contract_wasm_hash, + named_keys, + )| { + Contract::new( + contract_package_hash_arb.into(), + contract_wasm_hash.into(), + named_keys, + entry_points, + protocol_version, + ) + }, + ) +} + +pub fn contract_wasm_arb() -> impl Strategy { + collection::vec(any::(), 1..1000).prop_map(ContractWasm::new) +} + +pub fn contract_version_key_arb() -> impl Strategy { + (1..32u32, 1..1000u32) + .prop_map(|(major, contract_ver)| ContractVersionKey::new(major, contract_ver)) +} + +pub fn contract_versions_arb() -> impl Strategy { + collection::btree_map( + contract_version_key_arb(), + u8_slice_32().prop_map(ContractHash::new), + 1..5, + ) +} + +pub fn disabled_versions_arb() -> impl Strategy { + collection::btree_set(contract_version_key_arb(), 0..5) +} + +pub fn groups_arb() -> impl Strategy { + collection::btree_map(group_arb(), collection::btree_set(uref_arb(), 1..10), 0..5) +} + +pub fn contract_package_arb() -> impl Strategy { + ( + uref_arb(), + contract_versions_arb(), + disabled_versions_arb(), + groups_arb(), + ) + .prop_map(|(access_key, versions, disabled_versions, groups)| { + ContractPackage::new( + access_key, + versions, + disabled_versions, + groups, + ContractPackageStatus::default(), + ) + }) +} + +fn delegator_arb() -> impl Strategy { + ( + public_key_arb_no_system(), + u512_arb(), + uref_arb(), + public_key_arb_no_system(), + ) + .prop_map( + |(delegator_pk, staked_amount, bonding_purse, validator_pk)| { + Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk) + }, + ) +} + +fn delegation_rate_arb() -> impl Strategy { + 0..=DELEGATION_RATE_DENOMINATOR // Maximum, allowed value for delegation rate. +} + +pub(crate) fn bid_arb(delegations_len: impl Into) -> impl Strategy { + ( + public_key_arb_no_system(), + uref_arb(), + u512_arb(), + delegation_rate_arb(), + bool::ANY, + collection::vec(delegator_arb(), delegations_len), + ) + .prop_map( + |( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + is_locked, + new_delegators, + )| { + let mut bid = if is_locked { + Bid::locked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + 1u64, + ) + } else { + Bid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + ) + }; + let delegators = bid.delegators_mut(); + new_delegators.into_iter().for_each(|delegator| { + assert!(delegators + .insert(delegator.delegator_public_key().clone(), delegator) + .is_none()); + }); + bid + }, + ) +} + +fn withdraw_arb() -> impl Strategy { + ( + uref_arb(), + public_key_arb_no_system(), + public_key_arb_no_system(), + era_id_arb(), + u512_arb(), + ) + .prop_map(|(bonding_purse, validator_pk, unbonder_pk, era, amount)| { + WithdrawPurse::new(bonding_purse, validator_pk, unbonder_pk, era, amount) + }) +} + +fn withdraws_arb(size: impl Into) -> impl Strategy> { + collection::vec(withdraw_arb(), size) +} + +fn unbonding_arb() -> impl Strategy { + ( + uref_arb(), + public_key_arb_no_system(), + public_key_arb_no_system(), + era_id_arb(), + u512_arb(), + option::of(public_key_arb_no_system()), + ) + .prop_map( + |( + bonding_purse, + validator_public_key, + unbonder_public_key, + era, + amount, + new_validator, + )| { + UnbondingPurse::new( + bonding_purse, + validator_public_key, + unbonder_public_key, + era, + amount, + new_validator, + ) + }, + ) +} + +fn unbondings_arb(size: impl Into) -> impl Strategy> { + collection::vec(unbonding_arb(), size) +} + +pub fn stored_value_arb() -> impl Strategy { + prop_oneof![ + cl_value_arb().prop_map(StoredValue::CLValue), + account_arb().prop_map(StoredValue::Account), + contract_wasm_arb().prop_map(StoredValue::ContractWasm), + contract_arb().prop_map(StoredValue::Contract), + contract_package_arb().prop_map(StoredValue::ContractPackage), + transfer_arb().prop_map(StoredValue::Transfer), + deploy_info_arb().prop_map(StoredValue::DeployInfo), + era_info_arb(1..10).prop_map(StoredValue::EraInfo), + bid_arb(0..100).prop_map(|bid| StoredValue::Bid(Box::new(bid))), + withdraws_arb(1..50).prop_map(StoredValue::Withdraw), + unbondings_arb(1..50).prop_map(StoredValue::Unbonding) + ] + .prop_map(|stored_value| + // The following match statement is here only to make sure + // we don't forget to update the generator when a new variant is added. + match stored_value { + StoredValue::CLValue(_) => stored_value, + StoredValue::Account(_) => stored_value, + StoredValue::ContractWasm(_) => stored_value, + StoredValue::Contract(_) => stored_value, + StoredValue::ContractPackage(_) => stored_value, + StoredValue::Transfer(_) => stored_value, + StoredValue::DeployInfo(_) => stored_value, + StoredValue::EraInfo(_) => stored_value, + StoredValue::Bid(_) => stored_value, + StoredValue::Withdraw(_) => stored_value, + StoredValue::Unbonding(_) => stored_value, + }) +} diff --git a/casper_types/src/json_pretty_printer.rs b/casper_types/src/json_pretty_printer.rs new file mode 100644 index 00000000..3648d38c --- /dev/null +++ b/casper_types/src/json_pretty_printer.rs @@ -0,0 +1,291 @@ +extern crate alloc; + +use alloc::{format, string::String, vec::Vec}; + +use serde::Serialize; +use serde_json::{json, Value}; + +const MAX_STRING_LEN: usize = 150; + +/// Represents the information about a substring found in a string. +#[derive(Debug)] +struct SubstringSpec { + /// Index of the first character. + start_index: usize, + /// Length of the substring. + length: usize, +} + +impl SubstringSpec { + /// Constructs a new StringSpec with the given start index and length. + fn new(start_index: usize, length: usize) -> Self { + Self { + start_index, + length, + } + } +} + +/// Serializes the given data structure as a pretty-printed `String` of JSON using +/// `serde_json::to_string_pretty()`, but after first reducing any large hex-string values. +/// +/// A large hex-string is one containing only hex characters and which is over `MAX_STRING_LEN`. +/// Such hex-strings will be replaced by an indication of the number of chars redacted, for example +/// `[130 hex chars]`. +pub fn json_pretty_print(value: &T) -> serde_json::Result +where + T: ?Sized + Serialize, +{ + let mut json_value = json!(value); + shorten_string_field(&mut json_value); + + serde_json::to_string_pretty(&json_value) +} + +/// Searches the given string for all occurrences of hex substrings +/// that are longer than the specified `max_len`. +fn find_hex_strings_longer_than(string: &str, max_len: usize) -> Vec { + let mut ranges_to_remove = Vec::new(); + let mut start_index = 0; + let mut contiguous_hex_count = 0; + + // Record all large hex-strings' start positions and lengths. + for (index, char) in string.char_indices() { + if char.is_ascii_hexdigit() { + if contiguous_hex_count == 0 { + // This is the start of a new hex-string. + start_index = index; + } + contiguous_hex_count += 1; + } else if contiguous_hex_count != 0 { + // This is the end of a hex-string: if it's too long, record it. + if contiguous_hex_count > max_len { + ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count)); + } + contiguous_hex_count = 0; + } + } + // If the string contains a large hex-string at the end, record it now. + if contiguous_hex_count > max_len { + ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count)); + } + ranges_to_remove +} + +fn shorten_string_field(value: &mut Value) { + match value { + Value::String(string) => { + // Iterate over the ranges to remove from last to first so each + // replacement start index remains valid. + find_hex_strings_longer_than(string, MAX_STRING_LEN) + .into_iter() + .rev() + .for_each( + |SubstringSpec { + start_index, + length, + }| { + let range = start_index..(start_index + length); + string.replace_range(range, &format!("[{} hex chars]", length)); + }, + ) + } + Value::Array(values) => { + for value in values { + shorten_string_field(value); + } + } + Value::Object(map) => { + for map_value in map.values_mut() { + shorten_string_field(map_value); + } + } + Value::Null | Value::Bool(_) | Value::Number(_) => {} + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn hex_string(length: usize) -> String { + "0123456789abcdef".chars().cycle().take(length).collect() + } + + impl PartialEq<(usize, usize)> for SubstringSpec { + fn eq(&self, other: &(usize, usize)) -> bool { + self.start_index == other.0 && self.length == other.1 + } + } + + #[test] + fn finds_hex_strings_longer_than() { + const TESTING_LEN: usize = 3; + + let input = "01234"; + let expected = vec![(0, 5)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "01234-0123"; + let expected = vec![(0, 5), (6, 4)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "012-34-0123"; + let expected = vec![(7, 4)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "012-34-01-23"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "0"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = ""; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + } + + #[test] + fn respects_length() { + let input = "I like beef"; + let expected = vec![(7, 4)]; + let actual = find_hex_strings_longer_than(input, 3); + assert_eq!(actual, expected); + + let input = "I like beef"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, 1000); + assert_eq!(actual, expected); + } + + #[test] + fn should_shorten_long_strings() { + let max_unshortened_hex_string = hex_string(MAX_STRING_LEN); + let long_hex_string = hex_string(MAX_STRING_LEN + 1); + let long_non_hex_string: String = "g".repeat(MAX_STRING_LEN + 1); + let long_hex_substring = format!("a-{}-b", hex_string(MAX_STRING_LEN + 1)); + let multiple_long_hex_substrings = + format!("a: {0}, b: {0}, c: {0}", hex_string(MAX_STRING_LEN + 1)); + + let mut long_strings: Vec = vec![]; + for i in 1..=5 { + long_strings.push("a".repeat(MAX_STRING_LEN + i)); + } + let value = json!({ + "field_1": Option::::None, + "field_2": true, + "field_3": 123, + "field_4": max_unshortened_hex_string, + "field_5": ["short string value", long_hex_string], + "field_6": { + "f1": Option::::None, + "f2": false, + "f3": -123, + "f4": long_non_hex_string, + "f5": ["short string value", long_hex_substring], + "f6": { + "final long string": multiple_long_hex_substrings + } + } + }); + + let expected = r#"{ + "field_1": null, + "field_2": true, + "field_3": 123, + "field_4": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012345", + "field_5": [ + "short string value", + "[151 hex chars]" + ], + "field_6": { + "f1": null, + "f2": false, + "f3": -123, + "f4": "ggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg", + "f5": [ + "short string value", + "a-[151 hex chars]-b" + ], + "f6": { + "final long string": "a: [151 hex chars], b: [151 hex chars], c: [151 hex chars]" + } + } +}"#; + + let output = json_pretty_print(&value).unwrap(); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); + } + + #[test] + fn should_not_modify_short_strings() { + let max_string: String = "a".repeat(MAX_STRING_LEN); + let value = json!({ + "field_1": Option::::None, + "field_2": true, + "field_3": 123, + "field_4": max_string, + "field_5": [ + "short string value", + "another short string" + ], + "field_6": { + "f1": Option::::None, + "f2": false, + "f3": -123, + "f4": "short", + "f5": [ + "short string value", + "another short string" + ], + "f6": { + "final string": "the last short string" + } + } + }); + + let expected = serde_json::to_string_pretty(&value).unwrap(); + let output = json_pretty_print(&value).unwrap(); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); + } + + #[test] + /// Ref: https://github.com/casper-network/casper-node/issues/1456 + fn regression_1456() { + let long_string = r#"state query failed: ValueNotFound("Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)")"#; + assert_eq!(long_string.len(), 148); + + let value = json!({ + "code": -32003, + "message": long_string, + }); + + let expected = r#"{ + "code": -32003, + "message": "state query failed: ValueNotFound(\"Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)\")" +}"#; + + let output = json_pretty_print(&value).unwrap(); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); + } +} diff --git a/casper_types/src/key.rs b/casper_types/src/key.rs new file mode 100644 index 00000000..addede02 --- /dev/null +++ b/casper_types/src/key.rs @@ -0,0 +1,1458 @@ +//! Key types. + +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; + +use core::{ + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + str::FromStr, +}; + +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account::{self, AccountHash, ACCOUNT_HASH_LENGTH}, + bytesrepr::{self, Error, FromBytes, ToBytes, U64_SERIALIZED_LENGTH}, + checksummed_hex, + contract_wasm::ContractWasmHash, + contracts::{ContractHash, ContractPackageHash}, + uref::{self, URef, URefAddr, UREF_SERIALIZED_LENGTH}, + DeployHash, EraId, Tagged, TransferAddr, TransferFromStrError, DEPLOY_HASH_LENGTH, + TRANSFER_ADDR_LENGTH, UREF_ADDR_LENGTH, +}; + +const HASH_PREFIX: &str = "hash-"; +const DEPLOY_INFO_PREFIX: &str = "deploy-"; +const ERA_INFO_PREFIX: &str = "era-"; +const BALANCE_PREFIX: &str = "balance-"; +const BID_PREFIX: &str = "bid-"; +const WITHDRAW_PREFIX: &str = "withdraw-"; +const DICTIONARY_PREFIX: &str = "dictionary-"; +const UNBOND_PREFIX: &str = "unbond-"; +const SYSTEM_CONTRACT_REGISTRY_PREFIX: &str = "system-contract-registry-"; +const ERA_SUMMARY_PREFIX: &str = "era-summary-"; +const CHAINSPEC_REGISTRY_PREFIX: &str = "chainspec-registry-"; +const CHECKSUM_REGISTRY_PREFIX: &str = "checksum-registry-"; + +/// The number of bytes in a Blake2b hash +pub const BLAKE2B_DIGEST_LENGTH: usize = 32; +/// The number of bytes in a [`Key::Hash`]. +pub const KEY_HASH_LENGTH: usize = 32; +/// The number of bytes in a [`Key::Transfer`]. +pub const KEY_TRANSFER_LENGTH: usize = TRANSFER_ADDR_LENGTH; +/// The number of bytes in a [`Key::DeployInfo`]. +pub const KEY_DEPLOY_INFO_LENGTH: usize = DEPLOY_HASH_LENGTH; +/// The number of bytes in a [`Key::Dictionary`]. +pub const KEY_DICTIONARY_LENGTH: usize = 32; +/// The maximum length for a `dictionary_item_key`. +pub const DICTIONARY_ITEM_KEY_MAX_LENGTH: usize = 128; +const PADDING_BYTES: [u8; 32] = [0u8; 32]; +const KEY_ID_SERIALIZED_LENGTH: usize = 1; +// u8 used to determine the ID +const KEY_HASH_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_UREF_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_SERIALIZED_LENGTH; +const KEY_TRANSFER_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_TRANSFER_LENGTH; +const KEY_DEPLOY_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DEPLOY_INFO_LENGTH; +const KEY_ERA_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + U64_SERIALIZED_LENGTH; +const KEY_BALANCE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_ADDR_LENGTH; +const KEY_BID_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_WITHDRAW_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_UNBOND_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_DICTIONARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DICTIONARY_LENGTH; +const KEY_SYSTEM_CONTRACT_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_ERA_SUMMARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); + +/// An alias for [`Key`]s hash variant. +pub type HashAddr = [u8; KEY_HASH_LENGTH]; + +/// An alias for [`Key`]s dictionary variant. +pub type DictionaryAddr = [u8; KEY_DICTIONARY_LENGTH]; + +#[allow(missing_docs)] +#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum KeyTag { + Account = 0, + Hash = 1, + URef = 2, + Transfer = 3, + DeployInfo = 4, + EraInfo = 5, + Balance = 6, + Bid = 7, + Withdraw = 8, + Dictionary = 9, + SystemContractRegistry = 10, + EraSummary = 11, + Unbond = 12, + ChainspecRegistry = 13, + ChecksumRegistry = 14, +} + +/// The type under which data (e.g. [`CLValue`](crate::CLValue)s, smart contracts, user accounts) +/// are indexed on the network. +#[repr(C)] +#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum Key { + /// A `Key` under which a user account is stored. + Account(AccountHash), + /// A `Key` under which a smart contract is stored and which is the pseudo-hash of the + /// contract. + Hash(HashAddr), + /// A `Key` which is a [`URef`], under which most types of data can be stored. + URef(URef), + /// A `Key` under which we store a transfer. + Transfer(TransferAddr), + /// A `Key` under which we store a deploy info. + DeployInfo(DeployHash), + /// A `Key` under which we store an era info. + EraInfo(EraId), + /// A `Key` under which we store a purse balance. + Balance(URefAddr), + /// A `Key` under which we store bid information + Bid(AccountHash), + /// A `Key` under which we store withdraw information. + Withdraw(AccountHash), + /// A `Key` variant whose value is derived by hashing [`URef`]s address and arbitrary data. + Dictionary(DictionaryAddr), + /// A `Key` variant under which system contract hashes are stored. + SystemContractRegistry, + /// A `Key` under which we store current era info. + EraSummary, + /// A `Key` under which we store unbond information. + Unbond(AccountHash), + /// A `Key` variant under which chainspec and other hashes are stored. + ChainspecRegistry, + /// A `Key` variant under which we store a registry of checksums. + ChecksumRegistry, +} + +/// Errors produced when converting a `String` into a `Key`. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Account parse error. + Account(account::FromStrError), + /// Hash parse error. + Hash(String), + /// URef parse error. + URef(uref::FromStrError), + /// Transfer parse error. + Transfer(TransferFromStrError), + /// DeployInfo parse error. + DeployInfo(String), + /// EraInfo parse error. + EraInfo(String), + /// Balance parse error. + Balance(String), + /// Bid parse error. + Bid(String), + /// Withdraw parse error. + Withdraw(String), + /// Dictionary parse error. + Dictionary(String), + /// System contract registry parse error. + SystemContractRegistry(String), + /// Era summary parse error. + EraSummary(String), + /// Unbond parse error. + Unbond(String), + /// Chainspec registry error. + ChainspecRegistry(String), + /// Checksum registry error. + ChecksumRegistry(String), + /// Unknown prefix. + UnknownPrefix, +} + +impl From for FromStrError { + fn from(error: account::FromStrError) -> Self { + FromStrError::Account(error) + } +} + +impl From for FromStrError { + fn from(error: TransferFromStrError) -> Self { + FromStrError::Transfer(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::Account(error) => write!(f, "account-key from string error: {}", error), + FromStrError::Hash(error) => write!(f, "hash-key from string error: {}", error), + FromStrError::URef(error) => write!(f, "uref-key from string error: {}", error), + FromStrError::Transfer(error) => write!(f, "transfer-key from string error: {}", error), + FromStrError::DeployInfo(error) => { + write!(f, "deploy-info-key from string error: {}", error) + } + FromStrError::EraInfo(error) => write!(f, "era-info-key from string error: {}", error), + FromStrError::Balance(error) => write!(f, "balance-key from string error: {}", error), + FromStrError::Bid(error) => write!(f, "bid-key from string error: {}", error), + FromStrError::Withdraw(error) => write!(f, "withdraw-key from string error: {}", error), + FromStrError::Dictionary(error) => { + write!(f, "dictionary-key from string error: {}", error) + } + FromStrError::SystemContractRegistry(error) => { + write!( + f, + "system-contract-registry-key from string error: {}", + error + ) + } + FromStrError::EraSummary(error) => { + write!(f, "era-summary-key from string error: {}", error) + } + FromStrError::Unbond(error) => { + write!(f, "unbond-key from string error: {}", error) + } + FromStrError::ChainspecRegistry(error) => { + write!(f, "chainspec-registry-key from string error: {}", error) + } + FromStrError::ChecksumRegistry(error) => { + write!(f, "checksum-registry-key from string error: {}", error) + } + FromStrError::UnknownPrefix => write!(f, "unknown prefix for key"), + } + } +} + +impl Key { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn type_string(&self) -> String { + match self { + Key::Account(_) => String::from("Key::Account"), + Key::Hash(_) => String::from("Key::Hash"), + Key::URef(_) => String::from("Key::URef"), + Key::Transfer(_) => String::from("Key::Transfer"), + Key::DeployInfo(_) => String::from("Key::DeployInfo"), + Key::EraInfo(_) => String::from("Key::EraInfo"), + Key::Balance(_) => String::from("Key::Balance"), + Key::Bid(_) => String::from("Key::Bid"), + Key::Withdraw(_) => String::from("Key::Unbond"), + Key::Dictionary(_) => String::from("Key::Dictionary"), + Key::SystemContractRegistry => String::from("Key::SystemContractRegistry"), + Key::EraSummary => String::from("Key::EraSummary"), + Key::Unbond(_) => String::from("Key::Unbond"), + Key::ChainspecRegistry => String::from("Key::ChainspecRegistry"), + Key::ChecksumRegistry => String::from("Key::ChecksumRegistry"), + } + } + + /// Returns the maximum size a [`Key`] can be serialized into. + pub const fn max_serialized_length() -> usize { + KEY_UREF_SERIALIZED_LENGTH + } + + /// If `self` is of type [`Key::URef`], returns `self` with the + /// [`AccessRights`](crate::AccessRights) stripped from the wrapped [`URef`], otherwise + /// returns `self` unmodified. + #[must_use] + pub fn normalize(self) -> Key { + match self { + Key::URef(uref) => Key::URef(uref.remove_access_rights()), + other => other, + } + } + + /// Returns a human-readable version of `self`, with the inner bytes encoded to Base16. + pub fn to_formatted_string(self) -> String { + match self { + Key::Account(account_hash) => account_hash.to_formatted_string(), + Key::Hash(addr) => format!("{}{}", HASH_PREFIX, base16::encode_lower(&addr)), + Key::URef(uref) => uref.to_formatted_string(), + Key::Transfer(transfer_addr) => transfer_addr.to_formatted_string(), + Key::DeployInfo(addr) => { + format!( + "{}{}", + DEPLOY_INFO_PREFIX, + base16::encode_lower(addr.as_bytes()) + ) + } + Key::EraInfo(era_id) => { + format!("{}{}", ERA_INFO_PREFIX, era_id.value()) + } + Key::Balance(uref_addr) => { + format!("{}{}", BALANCE_PREFIX, base16::encode_lower(&uref_addr)) + } + Key::Bid(account_hash) => { + format!("{}{}", BID_PREFIX, base16::encode_lower(&account_hash)) + } + Key::Withdraw(account_hash) => { + format!("{}{}", WITHDRAW_PREFIX, base16::encode_lower(&account_hash)) + } + Key::Dictionary(dictionary_addr) => { + format!( + "{}{}", + DICTIONARY_PREFIX, + base16::encode_lower(&dictionary_addr) + ) + } + Key::SystemContractRegistry => { + format!( + "{}{}", + SYSTEM_CONTRACT_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::EraSummary => { + format!( + "{}{}", + ERA_SUMMARY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::Unbond(account_hash) => { + format!("{}{}", UNBOND_PREFIX, base16::encode_lower(&account_hash)) + } + Key::ChainspecRegistry => { + format!( + "{}{}", + CHAINSPEC_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::ChecksumRegistry => { + format!( + "{}{}", + CHECKSUM_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + } + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a `Key`. + pub fn from_formatted_str(input: &str) -> Result { + match AccountHash::from_formatted_str(input) { + Ok(account_hash) => return Ok(Key::Account(account_hash)), + Err(account::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + if let Some(hex) = input.strip_prefix(HASH_PREFIX) { + let addr = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Hash(error.to_string()))?; + let hash_addr = HashAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::Hash(error.to_string()))?; + return Ok(Key::Hash(hash_addr)); + } + + if let Some(hex) = input.strip_prefix(DEPLOY_INFO_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::DeployInfo(error.to_string()))?; + let hash_array = <[u8; DEPLOY_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::DeployInfo(error.to_string()))?; + return Ok(Key::DeployInfo(DeployHash::new(hash_array))); + } + + match TransferAddr::from_formatted_str(input) { + Ok(transfer_addr) => return Ok(Key::Transfer(transfer_addr)), + Err(TransferFromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + match URef::from_formatted_str(input) { + Ok(uref) => return Ok(Key::URef(uref)), + Err(uref::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + if let Some(era_summary_padding) = input.strip_prefix(ERA_SUMMARY_PREFIX) { + let padded_bytes = checksummed_hex::decode(era_summary_padding) + .map_err(|error| FromStrError::EraSummary(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::EraSummary("Failed to deserialize era summary key".to_string()) + })?; + return Ok(Key::EraSummary); + } + + if let Some(era_id_str) = input.strip_prefix(ERA_INFO_PREFIX) { + let era_id = EraId::from_str(era_id_str) + .map_err(|error| FromStrError::EraInfo(error.to_string()))?; + return Ok(Key::EraInfo(era_id)); + } + + if let Some(hex) = input.strip_prefix(BALANCE_PREFIX) { + let addr = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Balance(error.to_string()))?; + let uref_addr = URefAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::Balance(error.to_string()))?; + return Ok(Key::Balance(uref_addr)); + } + + if let Some(hex) = input.strip_prefix(BID_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Bid(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Bid(error.to_string()))?; + return Ok(Key::Bid(AccountHash::new(account_hash))); + } + + if let Some(hex) = input.strip_prefix(WITHDRAW_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Withdraw(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Withdraw(error.to_string()))?; + return Ok(Key::Withdraw(AccountHash::new(account_hash))); + } + + if let Some(hex) = input.strip_prefix(UNBOND_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Unbond(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Unbond(error.to_string()))?; + return Ok(Key::Unbond(AccountHash::new(account_hash))); + } + + if let Some(dictionary_addr) = input.strip_prefix(DICTIONARY_PREFIX) { + let dictionary_addr_bytes = checksummed_hex::decode(dictionary_addr) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + let addr = DictionaryAddr::try_from(dictionary_addr_bytes.as_ref()) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + return Ok(Key::Dictionary(addr)); + } + + if let Some(registry_address) = input.strip_prefix(SYSTEM_CONTRACT_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::SystemContractRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::SystemContractRegistry( + "Failed to deserialize system registry key".to_string(), + ) + })?; + return Ok(Key::SystemContractRegistry); + } + + if let Some(registry_address) = input.strip_prefix(CHAINSPEC_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::ChainspecRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::ChainspecRegistry( + "Failed to deserialize chainspec registry key".to_string(), + ) + })?; + return Ok(Key::ChainspecRegistry); + } + + if let Some(registry_address) = input.strip_prefix(CHECKSUM_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::ChecksumRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::ChecksumRegistry( + "Failed to deserialize checksum registry key".to_string(), + ) + })?; + return Ok(Key::ChecksumRegistry); + } + + Err(FromStrError::UnknownPrefix) + } + + /// Returns the inner bytes of `self` if `self` is of type [`Key::Account`], otherwise returns + /// `None`. + pub fn into_account(self) -> Option { + match self { + Key::Account(bytes) => Some(bytes), + _ => None, + } + } + + /// Returns the inner bytes of `self` if `self` is of type [`Key::Hash`], otherwise returns + /// `None`. + pub fn into_hash(self) -> Option { + match self { + Key::Hash(hash) => Some(hash), + _ => None, + } + } + + /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise + /// returns `None`. + pub fn as_uref(&self) -> Option<&URef> { + match self { + Key::URef(uref) => Some(uref), + _ => None, + } + } + + /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise + /// returns `None`. + pub fn as_uref_mut(&mut self) -> Option<&mut URef> { + match self { + Key::URef(uref) => Some(uref), + _ => None, + } + } + + /// Returns a reference to the inner `URefAddr` if `self` is of type [`Key::Balance`], + /// otherwise returns `None`. + pub fn as_balance(&self) -> Option<&URefAddr> { + if let Self::Balance(v) = self { + Some(v) + } else { + None + } + } + + /// Returns the inner [`URef`] if `self` is of type [`Key::URef`], otherwise returns `None`. + pub fn into_uref(self) -> Option { + match self { + Key::URef(uref) => Some(uref), + _ => None, + } + } + + /// Returns a reference to the inner [`DictionaryAddr`] if `self` is of type + /// [`Key::Dictionary`], otherwise returns `None`. + pub fn as_dictionary(&self) -> Option<&DictionaryAddr> { + match self { + Key::Dictionary(v) => Some(v), + _ => None, + } + } + + /// Casts a [`Key::URef`] to a [`Key::Hash`] + pub fn uref_to_hash(&self) -> Option { + let uref = self.as_uref()?; + let addr = uref.addr(); + Some(Key::Hash(addr)) + } + + /// Casts a [`Key::Withdraw`] to a [`Key::Unbond`] + pub fn withdraw_to_unbond(&self) -> Option { + if let Key::Withdraw(account_hash) = self { + return Some(Key::Unbond(*account_hash)); + } + None + } + + /// Creates a new [`Key::Dictionary`] variant based on a `seed_uref` and a `dictionary_item_key` + /// bytes. + pub fn dictionary(seed_uref: URef, dictionary_item_key: &[u8]) -> Key { + // NOTE: Expect below is safe because the length passed is supported. + let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect("should create hasher"); + hasher.update(seed_uref.addr().as_ref()); + hasher.update(dictionary_item_key); + // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher. + let mut addr = HashAddr::default(); + hasher.finalize_variable(|hash| addr.clone_from_slice(hash)); + Key::Dictionary(addr) + } + + /// Returns true if the key is of type [`Key::Dictionary`]. + pub fn is_dictionary_key(&self) -> bool { + if let Key::Dictionary(_) = self { + return true; + } + false + } + + /// Returns a reference to the inner [`AccountHash`] if `self` is of type + /// [`Key::Withdraw`], otherwise returns `None`. + pub fn as_withdraw(&self) -> Option<&AccountHash> { + if let Self::Withdraw(v) = self { + Some(v) + } else { + None + } + } +} + +impl Display for Key { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + Key::Account(account_hash) => write!(f, "Key::Account({})", account_hash), + Key::Hash(addr) => write!(f, "Key::Hash({})", base16::encode_lower(&addr)), + Key::URef(uref) => write!(f, "Key::{}", uref), /* Display impl for URef will append */ + Key::Transfer(transfer_addr) => write!(f, "Key::Transfer({})", transfer_addr), + Key::DeployInfo(addr) => write!( + f, + "Key::DeployInfo({})", + base16::encode_lower(addr.as_bytes()) + ), + Key::EraInfo(era_id) => write!(f, "Key::EraInfo({})", era_id), + Key::Balance(uref_addr) => { + write!(f, "Key::Balance({})", base16::encode_lower(uref_addr)) + } + Key::Bid(account_hash) => write!(f, "Key::Bid({})", account_hash), + Key::Withdraw(account_hash) => write!(f, "Key::Withdraw({})", account_hash), + Key::Dictionary(addr) => { + write!(f, "Key::Dictionary({})", base16::encode_lower(addr)) + } + Key::SystemContractRegistry => write!( + f, + "Key::SystemContractRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ), + Key::EraSummary => write!( + f, + "Key::EraSummary({})", + base16::encode_lower(&PADDING_BYTES), + ), + Key::Unbond(account_hash) => write!(f, "Key::Unbond({})", account_hash), + Key::ChainspecRegistry => write!( + f, + "Key::ChainspecRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ), + Key::ChecksumRegistry => { + write!( + f, + "Key::ChecksumRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + } + } + } +} + +impl Debug for Key { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +impl Tagged for Key { + fn tag(&self) -> KeyTag { + match self { + Key::Account(_) => KeyTag::Account, + Key::Hash(_) => KeyTag::Hash, + Key::URef(_) => KeyTag::URef, + Key::Transfer(_) => KeyTag::Transfer, + Key::DeployInfo(_) => KeyTag::DeployInfo, + Key::EraInfo(_) => KeyTag::EraInfo, + Key::Balance(_) => KeyTag::Balance, + Key::Bid(_) => KeyTag::Bid, + Key::Withdraw(_) => KeyTag::Withdraw, + Key::Dictionary(_) => KeyTag::Dictionary, + Key::SystemContractRegistry => KeyTag::SystemContractRegistry, + Key::EraSummary => KeyTag::EraSummary, + Key::Unbond(_) => KeyTag::Unbond, + Key::ChainspecRegistry => KeyTag::ChainspecRegistry, + Key::ChecksumRegistry => KeyTag::ChecksumRegistry, + } + } +} + +impl Tagged for Key { + fn tag(&self) -> u8 { + let key_tag: KeyTag = self.tag(); + key_tag as u8 + } +} + +impl From for Key { + fn from(uref: URef) -> Key { + Key::URef(uref) + } +} + +impl From for Key { + fn from(account_hash: AccountHash) -> Key { + Key::Account(account_hash) + } +} + +impl From for Key { + fn from(transfer_addr: TransferAddr) -> Key { + Key::Transfer(transfer_addr) + } +} + +impl From for Key { + fn from(contract_hash: ContractHash) -> Key { + Key::Hash(contract_hash.value()) + } +} + +impl From for Key { + fn from(wasm_hash: ContractWasmHash) -> Key { + Key::Hash(wasm_hash.value()) + } +} + +impl From for Key { + fn from(package_hash: ContractPackageHash) -> Key { + Key::Hash(package_hash.value()) + } +} + +impl ToBytes for Key { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + Key::Account(account_hash) => { + KEY_ID_SERIALIZED_LENGTH + account_hash.serialized_length() + } + Key::Hash(_) => KEY_HASH_SERIALIZED_LENGTH, + Key::URef(_) => KEY_UREF_SERIALIZED_LENGTH, + Key::Transfer(_) => KEY_TRANSFER_SERIALIZED_LENGTH, + Key::DeployInfo(_) => KEY_DEPLOY_INFO_SERIALIZED_LENGTH, + Key::EraInfo(_) => KEY_ERA_INFO_SERIALIZED_LENGTH, + Key::Balance(_) => KEY_BALANCE_SERIALIZED_LENGTH, + Key::Bid(_) => KEY_BID_SERIALIZED_LENGTH, + Key::Withdraw(_) => KEY_WITHDRAW_SERIALIZED_LENGTH, + Key::Dictionary(_) => KEY_DICTIONARY_SERIALIZED_LENGTH, + Key::SystemContractRegistry => KEY_SYSTEM_CONTRACT_REGISTRY_SERIALIZED_LENGTH, + Key::EraSummary => KEY_ERA_SUMMARY_SERIALIZED_LENGTH, + Key::Unbond(_) => KEY_UNBOND_SERIALIZED_LENGTH, + Key::ChainspecRegistry => KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH, + Key::ChecksumRegistry => KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(self.tag()); + match self { + Key::Account(account_hash) => account_hash.write_bytes(writer), + Key::Hash(hash) => hash.write_bytes(writer), + Key::URef(uref) => uref.write_bytes(writer), + Key::Transfer(addr) => addr.write_bytes(writer), + Key::DeployInfo(deploy_hash) => deploy_hash.write_bytes(writer), + Key::EraInfo(era_id) => era_id.write_bytes(writer), + Key::Balance(uref_addr) => uref_addr.write_bytes(writer), + Key::Bid(account_hash) => account_hash.write_bytes(writer), + Key::Withdraw(account_hash) => account_hash.write_bytes(writer), + Key::Dictionary(addr) => addr.write_bytes(writer), + Key::Unbond(account_hash) => account_hash.write_bytes(writer), + Key::SystemContractRegistry + | Key::EraSummary + | Key::ChainspecRegistry + | Key::ChecksumRegistry => PADDING_BYTES.write_bytes(writer), + } + } +} + +impl FromBytes for Key { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == KeyTag::Account as u8 => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Account(account_hash), rem)) + } + tag if tag == KeyTag::Hash as u8 => { + let (hash, rem) = HashAddr::from_bytes(remainder)?; + Ok((Key::Hash(hash), rem)) + } + tag if tag == KeyTag::URef as u8 => { + let (uref, rem) = URef::from_bytes(remainder)?; + Ok((Key::URef(uref), rem)) + } + tag if tag == KeyTag::Transfer as u8 => { + let (transfer_addr, rem) = TransferAddr::from_bytes(remainder)?; + Ok((Key::Transfer(transfer_addr), rem)) + } + tag if tag == KeyTag::DeployInfo as u8 => { + let (deploy_hash, rem) = DeployHash::from_bytes(remainder)?; + Ok((Key::DeployInfo(deploy_hash), rem)) + } + tag if tag == KeyTag::EraInfo as u8 => { + let (era_id, rem) = EraId::from_bytes(remainder)?; + Ok((Key::EraInfo(era_id), rem)) + } + tag if tag == KeyTag::Balance as u8 => { + let (uref_addr, rem) = URefAddr::from_bytes(remainder)?; + Ok((Key::Balance(uref_addr), rem)) + } + tag if tag == KeyTag::Bid as u8 => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Bid(account_hash), rem)) + } + tag if tag == KeyTag::Withdraw as u8 => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Withdraw(account_hash), rem)) + } + tag if tag == KeyTag::Dictionary as u8 => { + let (addr, rem) = DictionaryAddr::from_bytes(remainder)?; + Ok((Key::Dictionary(addr), rem)) + } + tag if tag == KeyTag::SystemContractRegistry as u8 => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::SystemContractRegistry, rem)) + } + tag if tag == KeyTag::EraSummary as u8 => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::EraSummary, rem)) + } + tag if tag == KeyTag::Unbond as u8 => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Unbond(account_hash), rem)) + } + tag if tag == KeyTag::ChainspecRegistry as u8 => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::ChainspecRegistry, rem)) + } + tag if tag == KeyTag::ChecksumRegistry as u8 => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::ChecksumRegistry, rem)) + } + _ => Err(Error::Formatting), + } + } +} + +#[allow(dead_code)] +fn please_add_to_distribution_impl(key: Key) { + // If you've been forced to come here, you likely need to add your variant to the + // `Distribution` impl for `Key`. + match key { + Key::Account(_) => unimplemented!(), + Key::Hash(_) => unimplemented!(), + Key::URef(_) => unimplemented!(), + Key::Transfer(_) => unimplemented!(), + Key::DeployInfo(_) => unimplemented!(), + Key::EraInfo(_) => unimplemented!(), + Key::Balance(_) => unimplemented!(), + Key::Bid(_) => unimplemented!(), + Key::Withdraw(_) => unimplemented!(), + Key::Dictionary(_) => unimplemented!(), + Key::SystemContractRegistry => unimplemented!(), + Key::EraSummary => unimplemented!(), + Key::Unbond(_) => unimplemented!(), + Key::ChainspecRegistry => unimplemented!(), + Key::ChecksumRegistry => unimplemented!(), + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Key { + match rng.gen_range(0..=14) { + 0 => Key::Account(rng.gen()), + 1 => Key::Hash(rng.gen()), + 2 => Key::URef(rng.gen()), + 3 => Key::Transfer(rng.gen()), + 4 => Key::DeployInfo(rng.gen()), + 5 => Key::EraInfo(rng.gen()), + 6 => Key::Balance(rng.gen()), + 7 => Key::Bid(rng.gen()), + 8 => Key::Withdraw(rng.gen()), + 9 => Key::Dictionary(rng.gen()), + 10 => Key::SystemContractRegistry, + 11 => Key::EraSummary, + 12 => Key::Unbond(rng.gen()), + 13 => Key::ChainspecRegistry, + 14 => Key::ChecksumRegistry, + _ => unreachable!(), + } + } +} + +mod serde_helpers { + use super::*; + + #[derive(Serialize, Deserialize)] + pub(super) enum HumanReadable { + Account(String), + Hash(String), + URef(String), + Transfer(String), + DeployInfo(String), + EraInfo(String), + Balance(String), + Bid(String), + Withdraw(String), + Dictionary(String), + SystemContractRegistry(String), + EraSummary(String), + Unbond(String), + ChainspecRegistry(String), + ChecksumRegistry(String), + } + + impl From<&Key> for HumanReadable { + fn from(key: &Key) -> Self { + let formatted_string = key.to_formatted_string(); + match key { + Key::Account(_) => HumanReadable::Account(formatted_string), + Key::Hash(_) => HumanReadable::Hash(formatted_string), + Key::URef(_) => HumanReadable::URef(formatted_string), + Key::Transfer(_) => HumanReadable::Transfer(formatted_string), + Key::DeployInfo(_) => HumanReadable::DeployInfo(formatted_string), + Key::EraInfo(_) => HumanReadable::EraInfo(formatted_string), + Key::Balance(_) => HumanReadable::Balance(formatted_string), + Key::Bid(_) => HumanReadable::Bid(formatted_string), + Key::Withdraw(_) => HumanReadable::Withdraw(formatted_string), + Key::Dictionary(_) => HumanReadable::Dictionary(formatted_string), + Key::SystemContractRegistry => { + HumanReadable::SystemContractRegistry(formatted_string) + } + Key::EraSummary => HumanReadable::EraSummary(formatted_string), + Key::Unbond(_) => HumanReadable::Unbond(formatted_string), + Key::ChainspecRegistry => HumanReadable::ChainspecRegistry(formatted_string), + Key::ChecksumRegistry => HumanReadable::ChecksumRegistry(formatted_string), + } + } + } + + impl TryFrom for Key { + type Error = FromStrError; + + fn try_from(helper: HumanReadable) -> Result { + match helper { + HumanReadable::Account(formatted_string) + | HumanReadable::Hash(formatted_string) + | HumanReadable::URef(formatted_string) + | HumanReadable::Transfer(formatted_string) + | HumanReadable::DeployInfo(formatted_string) + | HumanReadable::EraInfo(formatted_string) + | HumanReadable::Balance(formatted_string) + | HumanReadable::Bid(formatted_string) + | HumanReadable::Withdraw(formatted_string) + | HumanReadable::Dictionary(formatted_string) + | HumanReadable::SystemContractRegistry(formatted_string) + | HumanReadable::EraSummary(formatted_string) + | HumanReadable::Unbond(formatted_string) + | HumanReadable::ChainspecRegistry(formatted_string) + | HumanReadable::ChecksumRegistry(formatted_string) => { + Key::from_formatted_str(&formatted_string) + } + } + } + } + + #[derive(Serialize)] + pub(super) enum BinarySerHelper<'a> { + Account(&'a AccountHash), + Hash(&'a HashAddr), + URef(&'a URef), + Transfer(&'a TransferAddr), + DeployInfo(&'a DeployHash), + EraInfo(&'a EraId), + Balance(&'a URefAddr), + Bid(&'a AccountHash), + Withdraw(&'a AccountHash), + Dictionary(&'a HashAddr), + SystemContractRegistry, + EraSummary, + Unbond(&'a AccountHash), + ChainspecRegistry, + ChecksumRegistry, + } + + impl<'a> From<&'a Key> for BinarySerHelper<'a> { + fn from(key: &'a Key) -> Self { + match key { + Key::Account(account_hash) => BinarySerHelper::Account(account_hash), + Key::Hash(hash_addr) => BinarySerHelper::Hash(hash_addr), + Key::URef(uref) => BinarySerHelper::URef(uref), + Key::Transfer(transfer_addr) => BinarySerHelper::Transfer(transfer_addr), + Key::DeployInfo(deploy_hash) => BinarySerHelper::DeployInfo(deploy_hash), + Key::EraInfo(era_id) => BinarySerHelper::EraInfo(era_id), + Key::Balance(uref_addr) => BinarySerHelper::Balance(uref_addr), + Key::Bid(account_hash) => BinarySerHelper::Bid(account_hash), + Key::Withdraw(account_hash) => BinarySerHelper::Withdraw(account_hash), + Key::Dictionary(addr) => BinarySerHelper::Dictionary(addr), + Key::SystemContractRegistry => BinarySerHelper::SystemContractRegistry, + Key::EraSummary => BinarySerHelper::EraSummary, + Key::Unbond(account_hash) => BinarySerHelper::Unbond(account_hash), + Key::ChainspecRegistry => BinarySerHelper::ChainspecRegistry, + Key::ChecksumRegistry => BinarySerHelper::ChecksumRegistry, + } + } + } + + #[derive(Deserialize)] + pub(super) enum BinaryDeserHelper { + Account(AccountHash), + Hash(HashAddr), + URef(URef), + Transfer(TransferAddr), + DeployInfo(DeployHash), + EraInfo(EraId), + Balance(URefAddr), + Bid(AccountHash), + Withdraw(AccountHash), + Dictionary(DictionaryAddr), + SystemContractRegistry, + EraSummary, + Unbond(AccountHash), + ChainspecRegistry, + ChecksumRegistry, + } + + impl From for Key { + fn from(helper: BinaryDeserHelper) -> Self { + match helper { + BinaryDeserHelper::Account(account_hash) => Key::Account(account_hash), + BinaryDeserHelper::Hash(hash_addr) => Key::Hash(hash_addr), + BinaryDeserHelper::URef(uref) => Key::URef(uref), + BinaryDeserHelper::Transfer(transfer_addr) => Key::Transfer(transfer_addr), + BinaryDeserHelper::DeployInfo(deploy_hash) => Key::DeployInfo(deploy_hash), + BinaryDeserHelper::EraInfo(era_id) => Key::EraInfo(era_id), + BinaryDeserHelper::Balance(uref_addr) => Key::Balance(uref_addr), + BinaryDeserHelper::Bid(account_hash) => Key::Bid(account_hash), + BinaryDeserHelper::Withdraw(account_hash) => Key::Withdraw(account_hash), + BinaryDeserHelper::Dictionary(addr) => Key::Dictionary(addr), + BinaryDeserHelper::SystemContractRegistry => Key::SystemContractRegistry, + BinaryDeserHelper::EraSummary => Key::EraSummary, + BinaryDeserHelper::Unbond(account_hash) => Key::Unbond(account_hash), + BinaryDeserHelper::ChainspecRegistry => Key::ChainspecRegistry, + BinaryDeserHelper::ChecksumRegistry => Key::ChecksumRegistry, + } + } + } +} + +impl Serialize for Key { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + serde_helpers::HumanReadable::from(self).serialize(serializer) + } else { + serde_helpers::BinarySerHelper::from(self).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for Key { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let human_readable = serde_helpers::HumanReadable::deserialize(deserializer)?; + Key::try_from(human_readable).map_err(SerdeError::custom) + } else { + let binary_helper = serde_helpers::BinaryDeserHelper::deserialize(deserializer)?; + Ok(Key::from(binary_helper)) + } + } +} + +#[cfg(test)] +mod tests { + use std::string::ToString; + + use serde_json::json; + + use super::*; + use crate::{ + account::ACCOUNT_HASH_FORMATTED_STRING_PREFIX, + bytesrepr::{Error, FromBytes}, + transfer::TRANSFER_ADDR_FORMATTED_STRING_PREFIX, + uref::UREF_FORMATTED_STRING_PREFIX, + AccessRights, URef, + }; + + const ACCOUNT_KEY: Key = Key::Account(AccountHash::new([42; 32])); + const HASH_KEY: Key = Key::Hash([42; 32]); + const UREF_KEY: Key = Key::URef(URef::new([42; 32], AccessRights::READ)); + const TRANSFER_KEY: Key = Key::Transfer(TransferAddr::new([42; 32])); + const DEPLOY_INFO_KEY: Key = Key::DeployInfo(DeployHash::new([42; 32])); + const ERA_INFO_KEY: Key = Key::EraInfo(EraId::new(42)); + const BALANCE_KEY: Key = Key::Balance([42; 32]); + const BID_KEY: Key = Key::Bid(AccountHash::new([42; 32])); + const WITHDRAW_KEY: Key = Key::Withdraw(AccountHash::new([42; 32])); + const DICTIONARY_KEY: Key = Key::Dictionary([42; 32]); + const SYSTEM_CONTRACT_REGISTRY_KEY: Key = Key::SystemContractRegistry; + const ERA_SUMMARY_KEY: Key = Key::EraSummary; + const UNBOND_KEY: Key = Key::Unbond(AccountHash::new([42; 32])); + const CHAINSPEC_REGISTRY_KEY: Key = Key::ChainspecRegistry; + const CHECKSUM_REGISTRY_KEY: Key = Key::ChecksumRegistry; + const KEYS: &[Key] = &[ + ACCOUNT_KEY, + HASH_KEY, + UREF_KEY, + TRANSFER_KEY, + DEPLOY_INFO_KEY, + ERA_INFO_KEY, + BALANCE_KEY, + BID_KEY, + WITHDRAW_KEY, + DICTIONARY_KEY, + SYSTEM_CONTRACT_REGISTRY_KEY, + ERA_SUMMARY_KEY, + UNBOND_KEY, + CHAINSPEC_REGISTRY_KEY, + CHECKSUM_REGISTRY_KEY, + ]; + const HEX_STRING: &str = "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"; + + fn test_readable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_readable(), is_true) + } + + #[test] + fn test_is_readable() { + test_readable(AccessRights::READ, true); + test_readable(AccessRights::READ_ADD, true); + test_readable(AccessRights::READ_WRITE, true); + test_readable(AccessRights::READ_ADD_WRITE, true); + test_readable(AccessRights::ADD, false); + test_readable(AccessRights::ADD_WRITE, false); + test_readable(AccessRights::WRITE, false); + } + + fn test_writable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_writeable(), is_true) + } + + #[test] + fn test_is_writable() { + test_writable(AccessRights::WRITE, true); + test_writable(AccessRights::READ_WRITE, true); + test_writable(AccessRights::ADD_WRITE, true); + test_writable(AccessRights::READ, false); + test_writable(AccessRights::ADD, false); + test_writable(AccessRights::READ_ADD, false); + test_writable(AccessRights::READ_ADD_WRITE, true); + } + + fn test_addable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_addable(), is_true) + } + + #[test] + fn test_is_addable() { + test_addable(AccessRights::ADD, true); + test_addable(AccessRights::READ_ADD, true); + test_addable(AccessRights::READ_WRITE, false); + test_addable(AccessRights::ADD_WRITE, true); + test_addable(AccessRights::READ, false); + test_addable(AccessRights::WRITE, false); + test_addable(AccessRights::READ_ADD_WRITE, true); + } + + #[test] + fn should_display_key() { + assert_eq!( + format!("{}", ACCOUNT_KEY), + format!("Key::Account({})", HEX_STRING) + ); + assert_eq!( + format!("{}", HASH_KEY), + format!("Key::Hash({})", HEX_STRING) + ); + assert_eq!( + format!("{}", UREF_KEY), + format!("Key::URef({}, READ)", HEX_STRING) + ); + assert_eq!( + format!("{}", TRANSFER_KEY), + format!("Key::Transfer({})", HEX_STRING) + ); + assert_eq!( + format!("{}", DEPLOY_INFO_KEY), + format!("Key::DeployInfo({})", HEX_STRING) + ); + assert_eq!( + format!("{}", ERA_INFO_KEY), + "Key::EraInfo(era 42)".to_string() + ); + assert_eq!( + format!("{}", BALANCE_KEY), + format!("Key::Balance({})", HEX_STRING) + ); + assert_eq!(format!("{}", BID_KEY), format!("Key::Bid({})", HEX_STRING)); + assert_eq!( + format!("{}", WITHDRAW_KEY), + format!("Key::Withdraw({})", HEX_STRING) + ); + assert_eq!( + format!("{}", DICTIONARY_KEY), + format!("Key::Dictionary({})", HEX_STRING) + ); + assert_eq!( + format!("{}", SYSTEM_CONTRACT_REGISTRY_KEY), + format!( + "Key::SystemContractRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + ); + assert_eq!( + format!("{}", ERA_SUMMARY_KEY), + format!("Key::EraSummary({})", base16::encode_lower(&PADDING_BYTES)) + ); + assert_eq!( + format!("{}", UNBOND_KEY), + format!("Key::Unbond({})", HEX_STRING) + ); + assert_eq!( + format!("{}", CHAINSPEC_REGISTRY_KEY), + format!( + "Key::ChainspecRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + ); + assert_eq!( + format!("{}", CHECKSUM_REGISTRY_KEY), + format!( + "Key::ChecksumRegistry({})", + base16::encode_lower(&PADDING_BYTES), + ) + ); + } + + #[test] + fn abuse_vec_key() { + // Prefix is 2^32-1 = shouldn't allocate that much + let bytes: Vec = vec![255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let res: Result<(Vec, &[u8]), _> = FromBytes::from_bytes(&bytes); + #[cfg(target_os = "linux")] + assert_eq!(res.expect_err("should fail"), Error::OutOfMemory); + #[cfg(target_os = "macos")] + assert_eq!(res.expect_err("should fail"), Error::EarlyEndOfStream); + } + + #[test] + fn check_key_account_getters() { + let account = [42; 32]; + let account_hash = AccountHash::new(account); + let key1 = Key::Account(account_hash); + assert_eq!(key1.into_account(), Some(account_hash)); + assert!(key1.into_hash().is_none()); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_key_hash_getters() { + let hash = [42; KEY_HASH_LENGTH]; + let key1 = Key::Hash(hash); + assert!(key1.into_account().is_none()); + assert_eq!(key1.into_hash(), Some(hash)); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_key_uref_getters() { + let uref = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + let key1 = Key::URef(uref); + assert!(key1.into_account().is_none()); + assert!(key1.into_hash().is_none()); + assert_eq!(key1.as_uref(), Some(&uref)); + } + + #[test] + fn key_max_serialized_length() { + let mut got_max = false; + for key in KEYS { + assert!(key.serialized_length() <= Key::max_serialized_length()); + if key.serialized_length() == Key::max_serialized_length() { + got_max = true; + } + } + assert!( + got_max, + "None of the Key variants has a serialized_length equal to \ + Key::max_serialized_length(), so Key::max_serialized_length() should be reduced" + ); + } + + #[test] + fn should_parse_key_from_str() { + for key in KEYS { + let string = key.to_formatted_string(); + let parsed_key = Key::from_formatted_str(&string).unwrap(); + assert_eq!(parsed_key, *key, "{string} (key = {key:?})"); + } + } + + #[test] + fn should_fail_to_parse_key_from_str() { + assert!( + Key::from_formatted_str(ACCOUNT_HASH_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("account-key from string error: ") + ); + assert!(Key::from_formatted_str(HASH_PREFIX) + .unwrap_err() + .to_string() + .starts_with("hash-key from string error: ")); + assert!(Key::from_formatted_str(UREF_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("uref-key from string error: ")); + assert!( + Key::from_formatted_str(TRANSFER_ADDR_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("transfer-key from string error: ") + ); + assert!(Key::from_formatted_str(DEPLOY_INFO_PREFIX) + .unwrap_err() + .to_string() + .starts_with("deploy-info-key from string error: ")); + assert!(Key::from_formatted_str(ERA_INFO_PREFIX) + .unwrap_err() + .to_string() + .starts_with("era-info-key from string error: ")); + assert!(Key::from_formatted_str(BALANCE_PREFIX) + .unwrap_err() + .to_string() + .starts_with("balance-key from string error: ")); + assert!(Key::from_formatted_str(BID_PREFIX) + .unwrap_err() + .to_string() + .starts_with("bid-key from string error: ")); + assert!(Key::from_formatted_str(WITHDRAW_PREFIX) + .unwrap_err() + .to_string() + .starts_with("withdraw-key from string error: ")); + assert!(Key::from_formatted_str(DICTIONARY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("dictionary-key from string error: ")); + assert!(Key::from_formatted_str(SYSTEM_CONTRACT_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("system-contract-registry-key from string error: ")); + assert!(Key::from_formatted_str(ERA_SUMMARY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("era-summary-key from string error")); + assert!(Key::from_formatted_str(UNBOND_PREFIX) + .unwrap_err() + .to_string() + .starts_with("unbond-key from string error: ")); + assert!(Key::from_formatted_str(CHAINSPEC_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("chainspec-registry-key from string error: ")); + assert!(Key::from_formatted_str(CHECKSUM_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("checksum-registry-key from string error: ")); + let invalid_prefix = "a-0000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + Key::from_formatted_str(invalid_prefix) + .unwrap_err() + .to_string(), + "unknown prefix for key" + ); + + let missing_hyphen_prefix = + "hash0000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + Key::from_formatted_str(missing_hyphen_prefix) + .unwrap_err() + .to_string(), + "unknown prefix for key" + ); + + let no_prefix = "0000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + Key::from_formatted_str(no_prefix).unwrap_err().to_string(), + "unknown prefix for key" + ); + } + + #[test] + fn key_to_json() { + let expected_json = &[ + json!({ "Account": format!("account-hash-{}", HEX_STRING) }), + json!({ "Hash": format!("hash-{}", HEX_STRING) }), + json!({ "URef": format!("uref-{}-001", HEX_STRING) }), + json!({ "Transfer": format!("transfer-{}", HEX_STRING) }), + json!({ "DeployInfo": format!("deploy-{}", HEX_STRING) }), + json!({ "EraInfo": "era-42" }), + json!({ "Balance": format!("balance-{}", HEX_STRING) }), + json!({ "Bid": format!("bid-{}", HEX_STRING) }), + json!({ "Withdraw": format!("withdraw-{}", HEX_STRING) }), + json!({ "Dictionary": format!("dictionary-{}", HEX_STRING) }), + json!({ + "SystemContractRegistry": + format!( + "system-contract-registry-{}", + base16::encode_lower(&PADDING_BYTES) + ) + }), + json!({ + "EraSummary": format!("era-summary-{}", base16::encode_lower(&PADDING_BYTES)) + }), + json!({ "Unbond": format!("unbond-{}", HEX_STRING) }), + json!({ + "ChainspecRegistry": + format!( + "chainspec-registry-{}", + base16::encode_lower(&PADDING_BYTES) + ) + }), + json!({ + "ChecksumRegistry": + format!("checksum-registry-{}", base16::encode_lower(&PADDING_BYTES)) + }), + ]; + + assert_eq!( + KEYS.len(), + expected_json.len(), + "There should be exactly one expected JSON string per test key" + ); + + for (key, expected_json_key) in KEYS.iter().zip(expected_json.iter()) { + assert_eq!(serde_json::to_value(key).unwrap(), *expected_json_key); + } + } + + #[test] + fn serialization_roundtrip_bincode() { + for key in KEYS { + let encoded = bincode::serialize(key).unwrap(); + let decoded = bincode::deserialize(&encoded).unwrap(); + assert_eq!(key, &decoded); + } + } + + #[test] + fn serialization_roundtrip_json() { + let round_trip = |key: &Key| { + let encoded = serde_json::to_value(key).unwrap(); + let decoded = serde_json::from_value(encoded).unwrap(); + assert_eq!(key, &decoded); + }; + + for key in KEYS { + round_trip(key); + } + + let zeros = [0; BLAKE2B_DIGEST_LENGTH]; + + round_trip(&Key::Account(AccountHash::new(zeros))); + round_trip(&Key::Hash(zeros)); + round_trip(&Key::URef(URef::new(zeros, AccessRights::READ))); + round_trip(&Key::Transfer(TransferAddr::new(zeros))); + round_trip(&Key::DeployInfo(DeployHash::new(zeros))); + round_trip(&Key::EraInfo(EraId::from(0))); + round_trip(&Key::Balance(URef::new(zeros, AccessRights::READ).addr())); + round_trip(&Key::Bid(AccountHash::new(zeros))); + round_trip(&Key::Withdraw(AccountHash::new(zeros))); + round_trip(&Key::Dictionary(zeros)); + round_trip(&Key::SystemContractRegistry); + round_trip(&Key::EraSummary); + round_trip(&Key::Unbond(AccountHash::new(zeros))); + round_trip(&Key::ChainspecRegistry); + round_trip(&Key::ChecksumRegistry); + } +} diff --git a/casper_types/src/lib.rs b/casper_types/src/lib.rs new file mode 100644 index 00000000..c2aeac55 --- /dev/null +++ b/casper_types/src/lib.rs @@ -0,0 +1,113 @@ +//! Types used to allow creation of Wasm contracts and tests for use on the Casper Platform. + +#![cfg_attr( + not(any( + feature = "json-schema", + feature = "datasize", + feature = "std", + feature = "testing", + test, + )), + no_std +)] +#![doc(html_root_url = "https://docs.rs/casper-types/4.0.1")] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png", + test(attr(forbid(warnings))) +)] +#![warn(missing_docs)] + +#[cfg_attr(not(test), macro_use)] +extern crate alloc; + +mod access_rights; +pub mod account; +pub mod api_error; +mod block_time; +pub mod bytesrepr; +pub mod checksummed_hex; +mod cl_type; +mod cl_value; +mod contract_wasm; +pub mod contracts; +pub mod crypto; +mod deploy_info; +mod era_id; +mod execution_result; +#[cfg(any(feature = "std", test))] +pub mod file_utils; +mod gas; +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens; +mod json_pretty_printer; +mod key; +mod motes; +mod named_key; +mod phase; +mod protocol_version; +pub mod runtime_args; +mod semver; +mod stored_value; +pub mod system; +mod tagged; +#[cfg(any(feature = "testing", test))] +pub mod testing; +mod timestamp; +mod transfer; +mod transfer_result; +mod uint; +mod uref; + +pub use access_rights::{ + AccessRights, ContextAccessRights, GrantedAccess, ACCESS_RIGHTS_SERIALIZED_LENGTH, +}; +#[doc(inline)] +pub use api_error::ApiError; +pub use block_time::{BlockTime, BLOCKTIME_SERIALIZED_LENGTH}; +pub use cl_type::{named_key_type, CLType, CLTyped}; +pub use cl_value::{CLTypeMismatch, CLValue, CLValueError}; +pub use contract_wasm::{ContractWasm, ContractWasmHash}; +#[doc(inline)] +pub use contracts::{ + Contract, ContractHash, ContractPackage, ContractPackageHash, ContractVersion, + ContractVersionKey, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Group, + Parameter, +}; +pub use crypto::*; +pub use deploy_info::DeployInfo; +pub use execution_result::{ + ExecutionEffect, ExecutionResult, OpKind, Operation, Transform, TransformEntry, +}; +pub use gas::Gas; +pub use json_pretty_printer::json_pretty_print; +#[doc(inline)] +pub use key::{ + DictionaryAddr, FromStrError as KeyFromStrError, HashAddr, Key, KeyTag, BLAKE2B_DIGEST_LENGTH, + DICTIONARY_ITEM_KEY_MAX_LENGTH, KEY_DICTIONARY_LENGTH, KEY_HASH_LENGTH, +}; +pub use motes::Motes; +pub use named_key::NamedKey; +pub use phase::{Phase, PHASE_SERIALIZED_LENGTH}; +pub use protocol_version::{ProtocolVersion, VersionCheckResult}; +#[doc(inline)] +pub use runtime_args::{NamedArg, RuntimeArgs}; +pub use semver::{ParseSemVerError, SemVer, SEM_VER_SERIALIZED_LENGTH}; +pub use stored_value::{StoredValue, TypeMismatch as StoredValueTypeMismatch}; +pub use tagged::Tagged; +#[cfg(any(feature = "std", test))] +pub use timestamp::serde_option_time_diff; +pub use timestamp::{TimeDiff, Timestamp}; +pub use transfer::{ + DeployHash, FromStrError as TransferFromStrError, Transfer, TransferAddr, DEPLOY_HASH_LENGTH, + TRANSFER_ADDR_LENGTH, +}; +pub use transfer_result::{TransferResult, TransferredTo}; +pub use uref::{ + FromStrError as URefFromStrError, URef, URefAddr, UREF_ADDR_LENGTH, UREF_SERIALIZED_LENGTH, +}; + +pub use crate::{ + era_id::EraId, + uint::{UIntParseError, U128, U256, U512}, +}; diff --git a/casper_types/src/motes.rs b/casper_types/src/motes.rs new file mode 100644 index 00000000..8008a81c --- /dev/null +++ b/casper_types/src/motes.rs @@ -0,0 +1,248 @@ +//! The `motes` module is used for working with Motes. + +use alloc::vec::Vec; +use core::{ + fmt, + iter::Sum, + ops::{Add, Div, Mul, Sub}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::Zero; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Gas, U512, +}; + +/// A struct representing a number of `Motes`. +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Motes(U512); + +impl Motes { + /// Constructs a new `Motes`. + pub fn new(value: U512) -> Motes { + Motes(value) + } + + /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(&self, rhs: Self) -> Option { + self.0.checked_add(rhs.value()).map(Self::new) + } + + /// Checked integer subtraction. Computes `self - rhs`, returning `None` if underflow occurred. + pub fn checked_sub(&self, rhs: Self) -> Option { + self.0.checked_sub(rhs.value()).map(Self::new) + } + + /// Returns the inner `U512` value. + pub fn value(&self) -> U512 { + self.0 + } + + /// Converts the given `gas` to `Motes` by multiplying them by `conv_rate`. + /// + /// Returns `None` if an arithmetic overflow occurred. + pub fn from_gas(gas: Gas, conv_rate: u64) -> Option { + gas.value() + .checked_mul(U512::from(conv_rate)) + .map(Self::new) + } +} + +impl fmt::Display for Motes { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl Add for Motes { + type Output = Motes; + + fn add(self, rhs: Self) -> Self::Output { + let val = self.value() + rhs.value(); + Motes::new(val) + } +} + +impl Sub for Motes { + type Output = Motes; + + fn sub(self, rhs: Self) -> Self::Output { + let val = self.value() - rhs.value(); + Motes::new(val) + } +} + +impl Div for Motes { + type Output = Motes; + + fn div(self, rhs: Self) -> Self::Output { + let val = self.value() / rhs.value(); + Motes::new(val) + } +} + +impl Mul for Motes { + type Output = Motes; + + fn mul(self, rhs: Self) -> Self::Output { + let val = self.value() * rhs.value(); + Motes::new(val) + } +} + +impl Zero for Motes { + fn zero() -> Self { + Motes::new(U512::zero()) + } + + fn is_zero(&self) -> bool { + self.0.is_zero() + } +} + +impl Sum for Motes { + fn sum>(iter: I) -> Self { + iter.fold(Motes::zero(), Add::add) + } +} + +impl ToBytes for Motes { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Motes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, remainder) = FromBytes::from_bytes(bytes)?; + Ok((Motes::new(value), remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use crate::{Gas, Motes}; + + #[test] + fn should_be_able_to_get_instance_of_motes() { + let initial_value = 1; + let motes = Motes::new(U512::from(initial_value)); + assert_eq!( + initial_value, + motes.value().as_u64(), + "should have equal value" + ) + } + + #[test] + fn should_be_able_to_compare_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1)); + let right_motes = Motes::new(U512::from(1)); + assert_eq!(left_motes, right_motes, "should be equal"); + let right_motes = Motes::new(U512::from(2)); + assert_ne!(left_motes, right_motes, "should not be equal") + } + + #[test] + fn should_be_able_to_add_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1)); + let right_motes = Motes::new(U512::from(1)); + let expected_motes = Motes::new(U512::from(2)); + assert_eq!( + (left_motes + right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_subtract_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1)); + let right_motes = Motes::new(U512::from(1)); + let expected_motes = Motes::new(U512::from(0)); + assert_eq!( + (left_motes - right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_multiply_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(100)); + let right_motes = Motes::new(U512::from(10)); + let expected_motes = Motes::new(U512::from(1000)); + assert_eq!( + (left_motes * right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_divide_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1000)); + let right_motes = Motes::new(U512::from(100)); + let expected_motes = Motes::new(U512::from(10)); + assert_eq!( + (left_motes / right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_convert_from_motes() { + let gas = Gas::new(U512::from(100)); + let motes = Motes::from_gas(gas, 10).expect("should have value"); + let expected_motes = Motes::new(U512::from(1000)); + assert_eq!(motes, expected_motes, "should be equal") + } + + #[test] + fn should_be_able_to_default() { + let motes = Motes::default(); + let expected_motes = Motes::new(U512::from(0)); + assert_eq!(motes, expected_motes, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let left_motes = Motes::new(U512::from(100)); + let right_motes = Motes::new(U512::from(10)); + assert!(left_motes > right_motes, "should be gt"); + let right_motes = Motes::new(U512::from(100)); + assert!(left_motes >= right_motes, "should be gte"); + assert!(left_motes <= right_motes, "should be lte"); + let left_motes = Motes::new(U512::from(10)); + assert!(left_motes < right_motes, "should be lt"); + } + + #[test] + fn should_default() { + let left_motes = Motes::new(U512::from(0)); + let right_motes = Motes::default(); + assert_eq!(left_motes, right_motes, "should be equal"); + let u512 = U512::zero(); + assert_eq!(left_motes.value(), u512, "should be equal"); + } + + #[test] + fn should_support_checked_mul_from_gas() { + let gas = Gas::new(U512::MAX); + let conv_rate = 10; + let maybe = Motes::from_gas(gas, conv_rate); + assert!(maybe.is_none(), "should be none due to overflow"); + } +} diff --git a/casper_types/src/named_key.rs b/casper_types/src/named_key.rs new file mode 100644 index 00000000..29214a52 --- /dev/null +++ b/casper_types/src/named_key.rs @@ -0,0 +1,46 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// A named key. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct NamedKey { + /// The name of the entry. + pub name: String, + /// The value of the entry: a casper `Key` type. + pub key: String, +} + +impl ToBytes for NamedKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.name.to_bytes()?); + buffer.extend(self.key.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + self.key.serialized_length() + } +} + +impl FromBytes for NamedKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (key, remainder) = String::from_bytes(remainder)?; + let named_key = NamedKey { name, key }; + Ok((named_key, remainder)) + } +} diff --git a/casper_types/src/phase.rs b/casper_types/src/phase.rs new file mode 100644 index 00000000..35586889 --- /dev/null +++ b/casper_types/src/phase.rs @@ -0,0 +1,56 @@ +// Can be removed once https://github.com/rust-lang/rustfmt/issues/3362 is resolved. +#[rustfmt::skip] +use alloc::vec; +use alloc::vec::Vec; + +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::{FromPrimitive, ToPrimitive}; + +use crate::{ + bytesrepr::{Error, FromBytes, ToBytes}, + CLType, CLTyped, +}; + +/// The number of bytes in a serialized [`Phase`]. +pub const PHASE_SERIALIZED_LENGTH: usize = 1; + +/// The phase in which a given contract is executing. +#[derive(Debug, PartialEq, Eq, Clone, Copy, FromPrimitive, ToPrimitive)] +#[repr(u8)] +pub enum Phase { + /// Set while committing the genesis or upgrade configurations. + System = 0, + /// Set while executing the payment code of a deploy. + Payment = 1, + /// Set while executing the session code of a deploy. + Session = 2, + /// Set while finalizing payment at the end of a deploy. + FinalizePayment = 3, +} + +impl ToBytes for Phase { + fn to_bytes(&self) -> Result, Error> { + // NOTE: Assumed safe as [`Phase`] is represented as u8. + let id = self.to_u8().expect("Phase is represented as a u8"); + + Ok(vec![id]) + } + + fn serialized_length(&self) -> usize { + PHASE_SERIALIZED_LENGTH + } +} + +impl FromBytes for Phase { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (id, rest) = u8::from_bytes(bytes)?; + let phase = FromPrimitive::from_u8(id).ok_or(Error::Formatting)?; + Ok((phase, rest)) + } +} + +impl CLTyped for Phase { + fn cl_type() -> CLType { + CLType::U8 + } +} diff --git a/casper_types/src/protocol_version.rs b/casper_types/src/protocol_version.rs new file mode 100644 index 00000000..fe889f1c --- /dev/null +++ b/casper_types/src/protocol_version.rs @@ -0,0 +1,550 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{convert::TryFrom, fmt, str::FromStr}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + bytesrepr::{Error, FromBytes, ToBytes}, + ParseSemVerError, SemVer, +}; + +/// A newtype wrapping a [`SemVer`] which represents a Casper Platform protocol version. +#[derive(Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ProtocolVersion(SemVer); + +/// The result of [`ProtocolVersion::check_next_version`]. +#[derive(Debug, PartialEq, Eq)] +pub enum VersionCheckResult { + /// Upgrade possible. + Valid { + /// Is this a major protocol version upgrade? + is_major_version: bool, + }, + /// Upgrade is invalid. + Invalid, +} + +impl VersionCheckResult { + /// Checks if given version result is invalid. + /// + /// Invalid means that a given version can not be followed. + pub fn is_invalid(&self) -> bool { + matches!(self, VersionCheckResult::Invalid) + } + + /// Checks if given version is a major protocol version upgrade. + pub fn is_major_version(&self) -> bool { + match self { + VersionCheckResult::Valid { is_major_version } => *is_major_version, + VersionCheckResult::Invalid => false, + } + } +} + +impl ProtocolVersion { + /// Version 1.0.0. + pub const V1_0_0: ProtocolVersion = ProtocolVersion(SemVer { + major: 1, + minor: 0, + patch: 0, + }); + + /// Constructs a new `ProtocolVersion` from `version`. + pub const fn new(version: SemVer) -> ProtocolVersion { + ProtocolVersion(version) + } + + /// Constructs a new `ProtocolVersion` from the given semver parts. + pub const fn from_parts(major: u32, minor: u32, patch: u32) -> ProtocolVersion { + let sem_ver = SemVer::new(major, minor, patch); + Self::new(sem_ver) + } + + /// Returns the inner [`SemVer`]. + pub fn value(&self) -> SemVer { + self.0 + } + + /// Checks if next version can be followed. + pub fn check_next_version(&self, next: &ProtocolVersion) -> VersionCheckResult { + // Protocol major versions should increase monotonically by 1. + let major_bumped = self.0.major.saturating_add(1); + if next.0.major < self.0.major || next.0.major > major_bumped { + return VersionCheckResult::Invalid; + } + + if next.0.major == major_bumped { + return VersionCheckResult::Valid { + is_major_version: true, + }; + } + + // Covers the equal major versions + debug_assert_eq!(next.0.major, self.0.major); + + if next.0.minor < self.0.minor { + // Protocol minor versions within the same major version should not go backwards. + return VersionCheckResult::Invalid; + } + + if next.0.minor > self.0.minor { + return VersionCheckResult::Valid { + is_major_version: false, + }; + } + + // Code belows covers equal minor versions + debug_assert_eq!(next.0.minor, self.0.minor); + + // Protocol patch versions should increase monotonically but can be skipped. + if next.0.patch <= self.0.patch { + return VersionCheckResult::Invalid; + } + + VersionCheckResult::Valid { + is_major_version: false, + } + } + + /// Checks if given protocol version is compatible with current one. + /// + /// Two protocol versions with different major version are considered to be incompatible. + pub fn is_compatible_with(&self, version: &ProtocolVersion) -> bool { + self.0.major == version.0.major + } +} + +impl ToBytes for ProtocolVersion { + fn to_bytes(&self) -> Result, Error> { + self.value().to_bytes() + } + + fn serialized_length(&self) -> usize { + self.value().serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend(self.0.major.to_le_bytes()); + writer.extend(self.0.minor.to_le_bytes()); + writer.extend(self.0.patch.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for ProtocolVersion { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (version, rem) = SemVer::from_bytes(bytes)?; + let protocol_version = ProtocolVersion::new(version); + Ok((protocol_version, rem)) + } +} + +impl FromStr for ProtocolVersion { + type Err = ParseSemVerError; + + fn from_str(s: &str) -> Result { + let version = SemVer::try_from(s)?; + Ok(ProtocolVersion::new(version)) + } +} + +impl Serialize for ProtocolVersion { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + let str = format!("{}.{}.{}", self.0.major, self.0.minor, self.0.patch); + String::serialize(&str, serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ProtocolVersion { + fn deserialize>(deserializer: D) -> Result { + let semver = if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + SemVer::try_from(value_as_string.as_str()).map_err(SerdeError::custom)? + } else { + SemVer::deserialize(deserializer)? + }; + Ok(ProtocolVersion(semver)) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ProtocolVersion { + fn schema_name() -> String { + String::from("ProtocolVersion") + } + + fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("Casper Platform protocol version".to_string()); + schema_object.into() + } +} + +impl fmt::Display for ProtocolVersion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::SemVer; + + #[test] + fn should_follow_version_with_optional_code() { + let value = VersionCheckResult::Valid { + is_major_version: false, + }; + assert!(!value.is_invalid()); + assert!(!value.is_major_version()); + } + + #[test] + fn should_follow_version_with_required_code() { + let value = VersionCheckResult::Valid { + is_major_version: true, + }; + assert!(!value.is_invalid()); + assert!(value.is_major_version()); + } + + #[test] + fn should_not_follow_version_with_invalid_code() { + let value = VersionCheckResult::Invalid; + assert!(value.is_invalid()); + assert!(!value.is_major_version()); + } + + #[test] + fn should_be_able_to_get_instance() { + let initial_value = SemVer::new(1, 0, 0); + let item = ProtocolVersion::new(initial_value); + assert_eq!(initial_value, item.value(), "should have equal value") + } + + #[test] + fn should_be_able_to_compare_two_instances() { + let lhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let rhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + assert_eq!(lhs, rhs, "should be equal"); + let rhs = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert_ne!(lhs, rhs, "should not be equal") + } + + #[test] + fn should_be_able_to_default() { + let defaulted = ProtocolVersion::default(); + let expected = ProtocolVersion::new(SemVer::new(0, 0, 0)); + assert_eq!(defaulted, expected, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let lhs = ProtocolVersion::new(SemVer::new(2, 0, 0)); + let rhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + assert!(lhs > rhs, "should be gt"); + let rhs = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert!(lhs >= rhs, "should be gte"); + assert!(lhs <= rhs, "should be lte"); + let lhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + assert!(lhs < rhs, "should be lt"); + } + + #[test] + fn should_follow_major_version_upgrade() { + // If the upgrade protocol version is lower than or the same as EE's current in-use protocol + // version the upgrade is rejected and an error is returned; this includes the special case + // of a defaulted protocol version ( 0.0.0 ). + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert!( + prev.check_next_version(&next).is_major_version(), + "should be major version" + ); + } + + #[test] + fn should_reject_if_major_version_decreases() { + let prev = ProtocolVersion::new(SemVer::new(10, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(9, 0, 0)); + // Major version must not decrease ... + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_check_follows_minor_version_upgrade() { + // [major version] may remain the same in the case of a minor or patch version increase. + + // Minor version must not decrease within the same major version + let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 2, 0)); + + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + } + + #[test] + fn should_not_care_if_minor_bump_resets_patch() { + let prev = ProtocolVersion::new(SemVer::new(1, 2, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 3, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + + let prev = ProtocolVersion::new(SemVer::new(1, 20, 42)); + let next = ProtocolVersion::new(SemVer::new(1, 30, 43)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + } + + #[test] + fn should_not_care_if_major_bump_resets_minor_or_patch() { + // A major version increase resets both the minor and patch versions to ( 0.0 ). + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 1, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + let next = ProtocolVersion::new(SemVer::new(2, 0, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + let next = ProtocolVersion::new(SemVer::new(2, 1, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + } + + #[test] + fn should_reject_patch_version_rollback() { + // Patch version must not decrease or remain the same within the same major and minor + // version pair, but may skip. + let prev = ProtocolVersion::new(SemVer::new(1, 0, 42)); + let next = ProtocolVersion::new(SemVer::new(1, 0, 41)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + let next = ProtocolVersion::new(SemVer::new(1, 0, 13)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_accept_patch_version_update_with_optional_code() { + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 0, 1)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + + let prev = ProtocolVersion::new(SemVer::new(1, 0, 8)); + let next = ProtocolVersion::new(SemVer::new(1, 0, 42)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + } + + #[test] + fn should_accept_minor_version_update_with_optional_code() { + // installer is optional for minor bump + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 1, 0)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + + let prev = ProtocolVersion::new(SemVer::new(3, 98, 0)); + let next = ProtocolVersion::new(SemVer::new(3, 99, 0)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + } + + #[test] + fn should_allow_skip_minor_version_within_major_version() { + let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); + + let next = ProtocolVersion::new(SemVer::new(1, 3, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + + let next = ProtocolVersion::new(SemVer::new(1, 7, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + } + + #[test] + fn should_allow_skip_patch_version_within_minor_version() { + let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); + + let next = ProtocolVersion::new(SemVer::new(1, 1, 2)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + } + + #[test] + fn should_allow_skipped_minor_and_patch_on_major_bump() { + // skip minor + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 1, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + // skip patch + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 0, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + // skip many minors and patches + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 3, 10)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + } + + #[test] + fn should_allow_code_on_major_update() { + // major upgrade requires installer to be present + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert!( + prev.check_next_version(&next).is_major_version(), + "should be major version" + ); + + let prev = ProtocolVersion::new(SemVer::new(2, 99, 99)); + let next = ProtocolVersion::new(SemVer::new(3, 0, 0)); + assert!( + prev.check_next_version(&next).is_major_version(), + "should be major version" + ); + } + + #[test] + fn should_not_skip_major_version() { + // can bump only by 1 + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(3, 0, 0)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_reject_major_version_rollback() { + // can bump forward + let prev = ProtocolVersion::new(SemVer::new(2, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(0, 0, 0)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_check_same_version_is_invalid() { + for ver in &[ + ProtocolVersion::from_parts(1, 0, 0), + ProtocolVersion::from_parts(1, 2, 0), + ProtocolVersion::from_parts(1, 2, 3), + ] { + assert_eq!(ver.check_next_version(ver), VersionCheckResult::Invalid); + } + } + + #[test] + fn should_not_be_compatible_with_different_major_version() { + let current = ProtocolVersion::from_parts(1, 2, 3); + let other = ProtocolVersion::from_parts(2, 5, 6); + assert!(!current.is_compatible_with(&other)); + + let current = ProtocolVersion::from_parts(1, 0, 0); + let other = ProtocolVersion::from_parts(2, 0, 0); + assert!(!current.is_compatible_with(&other)); + } + + #[test] + fn should_be_compatible_with_equal_major_version_backwards() { + let current = ProtocolVersion::from_parts(1, 99, 99); + let other = ProtocolVersion::from_parts(1, 0, 0); + assert!(current.is_compatible_with(&other)); + } + + #[test] + fn should_be_compatible_with_equal_major_version_forwards() { + let current = ProtocolVersion::from_parts(1, 0, 0); + let other = ProtocolVersion::from_parts(1, 99, 99); + assert!(current.is_compatible_with(&other)); + } + + #[test] + fn should_serialize_to_json_properly() { + let protocol_version = ProtocolVersion::from_parts(1, 1, 1); + let json = serde_json::to_string(&protocol_version).unwrap(); + let expected = "\"1.1.1\""; + assert_eq!(json, expected); + } + + #[test] + fn serialize_roundtrip() { + let protocol_version = ProtocolVersion::from_parts(1, 1, 1); + let serialized_json = serde_json::to_string(&protocol_version).unwrap(); + assert_eq!( + protocol_version, + serde_json::from_str(&serialized_json).unwrap() + ); + + let serialized_bincode = bincode::serialize(&protocol_version).unwrap(); + assert_eq!( + protocol_version, + bincode::deserialize(&serialized_bincode).unwrap() + ); + } +} diff --git a/casper_types/src/runtime_args.rs b/casper_types/src/runtime_args.rs new file mode 100644 index 00000000..271de625 --- /dev/null +++ b/casper_types/src/runtime_args.rs @@ -0,0 +1,368 @@ +//! Home of RuntimeArgs for calling contracts + +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{collections::BTreeMap, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes}, + CLType, CLTyped, CLValue, CLValueError, U512, +}; +/// Named arguments to a contract. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct NamedArg(String, CLValue); + +impl NamedArg { + /// Returns a new `NamedArg`. + pub fn new(name: String, value: CLValue) -> Self { + NamedArg(name, value) + } + + /// Returns the name of the named arg. + pub fn name(&self) -> &str { + &self.0 + } + + /// Returns the value of the named arg. + pub fn cl_value(&self) -> &CLValue { + &self.1 + } + + /// Returns a mutable reference to the value of the named arg. + pub fn cl_value_mut(&mut self) -> &mut CLValue { + &mut self.1 + } +} + +impl From<(String, CLValue)> for NamedArg { + fn from((name, value): (String, CLValue)) -> NamedArg { + NamedArg(name, value) + } +} + +impl ToBytes for NamedArg { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + self.1.serialized_length() + } +} + +impl FromBytes for NamedArg { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (cl_value, remainder) = CLValue::from_bytes(remainder)?; + Ok((NamedArg(name, cl_value), remainder)) + } +} + +/// Represents a collection of arguments passed to a smart contract. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct RuntimeArgs(Vec); + +impl RuntimeArgs { + /// Create an empty [`RuntimeArgs`] instance. + pub fn new() -> RuntimeArgs { + RuntimeArgs::default() + } + + /// A wrapper that lets you easily and safely create runtime arguments. + /// + /// This method is useful when you have to construct a [`RuntimeArgs`] with multiple entries, + /// but error handling at given call site would require to have a match statement for each + /// [`RuntimeArgs::insert`] call. With this method you can use ? operator inside the closure and + /// then handle single result. When `try_block` will be stabilized this method could be + /// deprecated in favor of using those blocks. + pub fn try_new(func: F) -> Result + where + F: FnOnce(&mut RuntimeArgs) -> Result<(), CLValueError>, + { + let mut runtime_args = RuntimeArgs::new(); + func(&mut runtime_args)?; + Ok(runtime_args) + } + + /// Gets an argument by its name. + pub fn get(&self, name: &str) -> Option<&CLValue> { + self.0.iter().find_map(|NamedArg(named_name, named_value)| { + if named_name == name { + Some(named_value) + } else { + None + } + }) + } + + /// Gets the length of the collection. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the collection of arguments is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Inserts a new named argument into the collection. + pub fn insert(&mut self, key: K, value: V) -> Result<(), CLValueError> + where + K: Into, + V: CLTyped + ToBytes, + { + let cl_value = CLValue::from_t(value)?; + self.0.push(NamedArg(key.into(), cl_value)); + Ok(()) + } + + /// Inserts a new named argument into the collection. + pub fn insert_cl_value(&mut self, key: K, cl_value: CLValue) + where + K: Into, + { + self.0.push(NamedArg(key.into(), cl_value)); + } + + /// Returns all the values of the named args. + pub fn to_values(&self) -> Vec<&CLValue> { + self.0.iter().map(|NamedArg(_name, value)| value).collect() + } + + /// Returns an iterator of references over all arguments in insertion order. + pub fn named_args(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns an iterator of mutable references over all arguments in insertion order. + pub fn named_args_mut(&mut self) -> impl Iterator { + self.0.iter_mut() + } + + /// Returns the numeric value of `name` arg from the runtime arguments or defaults to + /// 0 if that arg doesn't exist or is not an integer type. + /// + /// Supported [`CLType`]s for numeric conversions are U64, and U512. + /// + /// Returns an error if parsing the arg fails. + pub fn try_get_number(&self, name: &str) -> Result { + let amount_arg = match self.get(name) { + None => return Ok(U512::zero()), + Some(arg) => arg, + }; + match amount_arg.cl_type() { + CLType::U512 => amount_arg.clone().into_t::(), + CLType::U64 => amount_arg.clone().into_t::().map(U512::from), + _ => Ok(U512::zero()), + } + } +} + +impl From> for RuntimeArgs { + fn from(values: Vec) -> Self { + RuntimeArgs(values) + } +} + +impl From> for RuntimeArgs { + fn from(cl_values: BTreeMap) -> RuntimeArgs { + RuntimeArgs(cl_values.into_iter().map(NamedArg::from).collect()) + } +} + +impl From for BTreeMap { + fn from(args: RuntimeArgs) -> BTreeMap { + let mut map = BTreeMap::new(); + for named in args.0 { + map.insert(named.0, named.1); + } + map + } +} + +impl ToBytes for RuntimeArgs { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for RuntimeArgs { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (args, remainder) = Vec::::from_bytes(bytes)?; + Ok((RuntimeArgs(args), remainder)) + } +} + +/// Macro that makes it easier to construct named arguments. +/// +/// NOTE: This macro does not propagate possible errors that could occur while creating a +/// [`crate::CLValue`]. For such cases creating [`RuntimeArgs`] manually is recommended. +/// +/// # Example usage +/// ``` +/// use casper_types::{RuntimeArgs, runtime_args}; +/// let _named_args = runtime_args! { +/// "foo" => 42, +/// "bar" => "Hello, world!" +/// }; +/// ``` +#[macro_export] +macro_rules! runtime_args { + () => (RuntimeArgs::new()); + ( $($key:expr => $value:expr,)+ ) => (runtime_args!($($key => $value),+)); + ( $($key:expr => $value:expr),* ) => { + { + let mut named_args = RuntimeArgs::new(); + $( + named_args.insert($key, $value).unwrap(); + )* + named_args + } + }; +} + +#[cfg(test)] +mod tests { + use super::*; + + const ARG_AMOUNT: &str = "amount"; + + #[test] + fn test_runtime_args() { + let arg1 = CLValue::from_t(1).unwrap(); + let arg2 = CLValue::from_t("Foo").unwrap(); + let arg3 = CLValue::from_t(Some(1)).unwrap(); + let args = { + let mut map = BTreeMap::new(); + map.insert("bar".into(), arg2.clone()); + map.insert("foo".into(), arg1.clone()); + map.insert("qwer".into(), arg3.clone()); + map + }; + let runtime_args = RuntimeArgs::from(args); + assert_eq!(runtime_args.get("qwer"), Some(&arg3)); + assert_eq!(runtime_args.get("foo"), Some(&arg1)); + assert_eq!(runtime_args.get("bar"), Some(&arg2)); + assert_eq!(runtime_args.get("aaa"), None); + + // Ensure macro works + + let runtime_args_2 = runtime_args! { + "bar" => "Foo", + "foo" => 1i32, + "qwer" => Some(1i32), + }; + assert_eq!(runtime_args, runtime_args_2); + } + + #[test] + fn empty_macro() { + assert_eq!(runtime_args! {}, RuntimeArgs::new()); + } + + #[test] + fn btreemap_compat() { + // This test assumes same serialization format as BTreeMap + let runtime_args_1 = runtime_args! { + "bar" => "Foo", + "foo" => 1i32, + "qwer" => Some(1i32), + }; + let tagless = runtime_args_1.to_bytes().unwrap().to_vec(); + + let mut runtime_args_2 = BTreeMap::new(); + runtime_args_2.insert(String::from("bar"), CLValue::from_t("Foo").unwrap()); + runtime_args_2.insert(String::from("foo"), CLValue::from_t(1i32).unwrap()); + runtime_args_2.insert(String::from("qwer"), CLValue::from_t(Some(1i32)).unwrap()); + + assert_eq!(tagless, runtime_args_2.to_bytes().unwrap()); + } + + #[test] + fn named_serialization_roundtrip() { + let args = runtime_args! { + "foo" => 1i32, + }; + bytesrepr::test_serialization_roundtrip(&args); + } + + #[test] + fn should_create_args_with() { + let res = RuntimeArgs::try_new(|runtime_args| { + runtime_args.insert(String::from("foo"), 123)?; + runtime_args.insert(String::from("bar"), 456)?; + Ok(()) + }); + + let expected = runtime_args! { + "foo" => 123, + "bar" => 456, + }; + assert!(matches!(res, Ok(args) if expected == args)); + } + + #[test] + fn try_get_number_should_work() { + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, 0u64).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, U512::zero()).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let args = RuntimeArgs::new(); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let hundred = 100u64; + + let mut args = RuntimeArgs::new(); + let input = U512::from(hundred); + args.insert(ARG_AMOUNT, input).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), input); + + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, hundred).expect("is ok"); + assert_eq!( + args.try_get_number(ARG_AMOUNT).unwrap(), + U512::from(hundred) + ); + } + + #[test] + fn try_get_number_should_return_zero_for_non_numeric_type() { + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, "Non-numeric-string").unwrap(); + assert_eq!( + args.try_get_number(ARG_AMOUNT).expect("should get amount"), + U512::zero() + ); + } + + #[test] + fn try_get_number_should_return_zero_if_amount_is_missing() { + let args = RuntimeArgs::new(); + assert_eq!( + args.try_get_number(ARG_AMOUNT).expect("should get amount"), + U512::zero() + ); + } +} diff --git a/casper_types/src/semver.rs b/casper_types/src/semver.rs new file mode 100644 index 00000000..5feafe53 --- /dev/null +++ b/casper_types/src/semver.rs @@ -0,0 +1,152 @@ +use alloc::vec::Vec; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, + num::ParseIntError, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}; + +/// Length of SemVer when serialized +pub const SEM_VER_SERIALIZED_LENGTH: usize = 3 * U32_SERIALIZED_LENGTH; + +/// A struct for semantic versioning. +#[derive( + Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct SemVer { + /// Major version. + pub major: u32, + /// Minor version. + pub minor: u32, + /// Patch version. + pub patch: u32, +} + +impl SemVer { + /// Version 1.0.0. + pub const V1_0_0: SemVer = SemVer { + major: 1, + minor: 0, + patch: 0, + }; + + /// Constructs a new `SemVer` from the given semver parts. + pub const fn new(major: u32, minor: u32, patch: u32) -> SemVer { + SemVer { + major, + minor, + patch, + } + } +} + +impl ToBytes for SemVer { + fn to_bytes(&self) -> Result, Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.major.to_bytes()?); + ret.append(&mut self.minor.to_bytes()?); + ret.append(&mut self.patch.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + SEM_VER_SERIALIZED_LENGTH + } +} + +impl FromBytes for SemVer { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (major, rem): (u32, &[u8]) = FromBytes::from_bytes(bytes)?; + let (minor, rem): (u32, &[u8]) = FromBytes::from_bytes(rem)?; + let (patch, rem): (u32, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((SemVer::new(major, minor, patch), rem)) + } +} + +impl Display for SemVer { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}.{}.{}", self.major, self.minor, self.patch) + } +} + +/// Parsing error when creating a SemVer. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ParseSemVerError { + /// Invalid version format. + InvalidVersionFormat, + /// Error parsing an integer. + ParseIntError(ParseIntError), +} + +impl Display for ParseSemVerError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + ParseSemVerError::InvalidVersionFormat => formatter.write_str("invalid version format"), + ParseSemVerError::ParseIntError(error) => error.fmt(formatter), + } + } +} + +impl From for ParseSemVerError { + fn from(error: ParseIntError) -> ParseSemVerError { + ParseSemVerError::ParseIntError(error) + } +} + +impl TryFrom<&str> for SemVer { + type Error = ParseSemVerError; + fn try_from(value: &str) -> Result { + let tokens: Vec<&str> = value.split('.').collect(); + if tokens.len() != 3 { + return Err(ParseSemVerError::InvalidVersionFormat); + } + + Ok(SemVer { + major: tokens[0].parse()?, + minor: tokens[1].parse()?, + patch: tokens[2].parse()?, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use core::convert::TryInto; + + #[test] + fn should_compare_semver_versions() { + assert!(SemVer::new(0, 0, 0) < SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 1, 0) < SemVer::new(1, 2, 0)); + assert!(SemVer::new(1, 0, 0) < SemVer::new(1, 2, 0)); + assert!(SemVer::new(1, 0, 0) < SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 0) < SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 3) == SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 3) >= SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 3) <= SemVer::new(1, 2, 3)); + assert!(SemVer::new(2, 0, 0) >= SemVer::new(1, 99, 99)); + assert!(SemVer::new(2, 0, 0) > SemVer::new(1, 99, 99)); + } + + #[test] + fn parse_from_string() { + let ver1: SemVer = "100.20.3".try_into().expect("should parse"); + assert_eq!(ver1, SemVer::new(100, 20, 3)); + let ver2: SemVer = "0.0.1".try_into().expect("should parse"); + assert_eq!(ver2, SemVer::new(0, 0, 1)); + + assert!(SemVer::try_from("1.a.2.3").is_err()); + assert!(SemVer::try_from("1. 2.3").is_err()); + assert!(SemVer::try_from("12345124361461.0.1").is_err()); + assert!(SemVer::try_from("1.2.3.4").is_err()); + assert!(SemVer::try_from("1.2").is_err()); + assert!(SemVer::try_from("1").is_err()); + assert!(SemVer::try_from("0").is_err()); + } +} diff --git a/casper_types/src/stored_value.rs b/casper_types/src/stored_value.rs new file mode 100644 index 00000000..d8190078 --- /dev/null +++ b/casper_types/src/stored_value.rs @@ -0,0 +1,464 @@ +mod type_mismatch; + +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; +use core::{convert::TryFrom, fmt::Debug}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{de, ser, Deserialize, Deserializer, Serialize, Serializer}; +use serde_bytes::ByteBuf; + +use crate::{ + account::Account, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contracts::ContractPackage, + system::auction::{Bid, EraInfo, UnbondingPurse, WithdrawPurse}, + CLValue, Contract, ContractWasm, DeployInfo, Transfer, +}; +pub use type_mismatch::TypeMismatch; + +#[allow(clippy::large_enum_variant)] +#[repr(u8)] +enum Tag { + CLValue = 0, + Account = 1, + ContractWasm = 2, + Contract = 3, + ContractPackage = 4, + Transfer = 5, + DeployInfo = 6, + EraInfo = 7, + Bid = 8, + Withdraw = 9, + Unbonding = 10, +} + +#[allow(clippy::large_enum_variant)] +#[derive(Eq, PartialEq, Clone, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +/// StoredValue represents all possible variants of values stored in Global State. +pub enum StoredValue { + /// Variant that stores [`CLValue`]. + CLValue(CLValue), + /// Variant that stores [`Account`]. + Account(Account), + /// Variant that stores [`ContractWasm`]. + ContractWasm(ContractWasm), + /// Variant that stores [`Contract`]. + Contract(Contract), + /// Variant that stores [`ContractPackage`]. + ContractPackage(ContractPackage), + /// Variant that stores [`Transfer`]. + Transfer(Transfer), + /// Variant that stores [`DeployInfo`]. + DeployInfo(DeployInfo), + /// Variant that stores [`EraInfo`]. + EraInfo(EraInfo), + /// Variant that stores [`Bid`]. + Bid(Box), + /// Variant that stores withdraw information. + Withdraw(Vec), + /// Variant that stores unbonding information. + Unbonding(Vec), +} + +impl StoredValue { + /// Returns a wrapped [`CLValue`] if this is a `CLValue` variant. + pub fn as_cl_value(&self) -> Option<&CLValue> { + match self { + StoredValue::CLValue(cl_value) => Some(cl_value), + _ => None, + } + } + + /// Returns a wrapped [`Account`] if this is an `Account` variant. + pub fn as_account(&self) -> Option<&Account> { + match self { + StoredValue::Account(account) => Some(account), + _ => None, + } + } + + /// Returns a wrapped [`Contract`] if this is a `Contract` variant. + pub fn as_contract(&self) -> Option<&Contract> { + match self { + StoredValue::Contract(contract) => Some(contract), + _ => None, + } + } + + /// Returns a wrapped [`ContractWasm`] if this is a `ContractWasm` variant. + pub fn as_contract_wasm(&self) -> Option<&ContractWasm> { + match self { + StoredValue::ContractWasm(contract_wasm) => Some(contract_wasm), + _ => None, + } + } + + /// Returns a wrapped [`ContractPackage`] if this is a `ContractPackage` variant. + pub fn as_contract_package(&self) -> Option<&ContractPackage> { + match self { + StoredValue::ContractPackage(contract_package) => Some(contract_package), + _ => None, + } + } + + /// Returns a wrapped [`DeployInfo`] if this is a `DeployInfo` variant. + pub fn as_deploy_info(&self) -> Option<&DeployInfo> { + match self { + StoredValue::DeployInfo(deploy_info) => Some(deploy_info), + _ => None, + } + } + + /// Returns a wrapped [`EraInfo`] if this is a `EraInfo` variant. + pub fn as_era_info(&self) -> Option<&EraInfo> { + match self { + StoredValue::EraInfo(era_info) => Some(era_info), + _ => None, + } + } + + /// Returns a wrapped [`Bid`] if this is a `Bid` variant. + pub fn as_bid(&self) -> Option<&Bid> { + match self { + StoredValue::Bid(bid) => Some(bid), + _ => None, + } + } + + /// Returns a wrapped list of [`WithdrawPurse`]s if this is a `Withdraw` variant. + pub fn as_withdraw(&self) -> Option<&Vec> { + match self { + StoredValue::Withdraw(withdraw_purses) => Some(withdraw_purses), + _ => None, + } + } + + /// Returns a wrapped list of [`UnbondingPurse`]s if this is a `Unbonding` variant. + pub fn as_unbonding(&self) -> Option<&Vec> { + match self { + StoredValue::Unbonding(unbonding_purses) => Some(unbonding_purses), + _ => None, + } + } + + /// Returns the type name of the [`StoredValue`] enum variant. + /// + /// For [`CLValue`] variants it will return the name of the [`CLType`](crate::cl_type::CLType) + pub fn type_name(&self) -> String { + match self { + StoredValue::CLValue(cl_value) => format!("{:?}", cl_value.cl_type()), + StoredValue::Account(_) => "Account".to_string(), + StoredValue::ContractWasm(_) => "ContractWasm".to_string(), + StoredValue::Contract(_) => "Contract".to_string(), + StoredValue::ContractPackage(_) => "ContractPackage".to_string(), + StoredValue::Transfer(_) => "Transfer".to_string(), + StoredValue::DeployInfo(_) => "DeployInfo".to_string(), + StoredValue::EraInfo(_) => "EraInfo".to_string(), + StoredValue::Bid(_) => "Bid".to_string(), + StoredValue::Withdraw(_) => "Withdraw".to_string(), + StoredValue::Unbonding(_) => "Unbonding".to_string(), + } + } + + fn tag(&self) -> Tag { + match self { + StoredValue::CLValue(_) => Tag::CLValue, + StoredValue::Account(_) => Tag::Account, + StoredValue::ContractWasm(_) => Tag::ContractWasm, + StoredValue::Contract(_) => Tag::Contract, + StoredValue::ContractPackage(_) => Tag::ContractPackage, + StoredValue::Transfer(_) => Tag::Transfer, + StoredValue::DeployInfo(_) => Tag::DeployInfo, + StoredValue::EraInfo(_) => Tag::EraInfo, + StoredValue::Bid(_) => Tag::Bid, + StoredValue::Withdraw(_) => Tag::Withdraw, + StoredValue::Unbonding(_) => Tag::Unbonding, + } + } +} + +impl From for StoredValue { + fn from(value: CLValue) -> StoredValue { + StoredValue::CLValue(value) + } +} +impl From for StoredValue { + fn from(value: Account) -> StoredValue { + StoredValue::Account(value) + } +} +impl From for StoredValue { + fn from(value: ContractWasm) -> StoredValue { + StoredValue::ContractWasm(value) + } +} +impl From for StoredValue { + fn from(value: Contract) -> StoredValue { + StoredValue::Contract(value) + } +} +impl From for StoredValue { + fn from(value: ContractPackage) -> StoredValue { + StoredValue::ContractPackage(value) + } +} +impl From for StoredValue { + fn from(bid: Bid) -> StoredValue { + StoredValue::Bid(Box::new(bid)) + } +} + +impl TryFrom for CLValue { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + let type_name = stored_value.type_name(); + match stored_value { + StoredValue::CLValue(cl_value) => Ok(cl_value), + StoredValue::ContractPackage(contract_package) => Ok(CLValue::from_t(contract_package) + .map_err(|_error| TypeMismatch::new("ContractPackage".to_string(), type_name))?), + _ => Err(TypeMismatch::new("CLValue".to_string(), type_name)), + } + } +} + +impl TryFrom for Account { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::Account(account) => Ok(account), + _ => Err(TypeMismatch::new( + "Account".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ContractWasm { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::ContractWasm(contract_wasm) => Ok(contract_wasm), + _ => Err(TypeMismatch::new( + "ContractWasm".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ContractPackage { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::ContractPackage(contract_package) => Ok(contract_package), + _ => Err(TypeMismatch::new( + "ContractPackage".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for Contract { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::Contract(contract) => Ok(contract), + _ => Err(TypeMismatch::new( + "Contract".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for Transfer { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::Transfer(transfer) => Ok(transfer), + _ => Err(TypeMismatch::new("Transfer".to_string(), value.type_name())), + } + } +} + +impl TryFrom for DeployInfo { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::DeployInfo(deploy_info) => Ok(deploy_info), + _ => Err(TypeMismatch::new( + "DeployInfo".to_string(), + value.type_name(), + )), + } + } +} + +impl TryFrom for EraInfo { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::EraInfo(era_info) => Ok(era_info), + _ => Err(TypeMismatch::new("EraInfo".to_string(), value.type_name())), + } + } +} + +impl ToBytes for StoredValue { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + let (tag, mut serialized_data) = match self { + StoredValue::CLValue(cl_value) => (Tag::CLValue, cl_value.to_bytes()?), + StoredValue::Account(account) => (Tag::Account, account.to_bytes()?), + StoredValue::ContractWasm(contract_wasm) => { + (Tag::ContractWasm, contract_wasm.to_bytes()?) + } + StoredValue::Contract(contract_header) => (Tag::Contract, contract_header.to_bytes()?), + StoredValue::ContractPackage(contract_package) => { + (Tag::ContractPackage, contract_package.to_bytes()?) + } + StoredValue::Transfer(transfer) => (Tag::Transfer, transfer.to_bytes()?), + StoredValue::DeployInfo(deploy_info) => (Tag::DeployInfo, deploy_info.to_bytes()?), + StoredValue::EraInfo(era_info) => (Tag::EraInfo, era_info.to_bytes()?), + StoredValue::Bid(bid) => (Tag::Bid, bid.to_bytes()?), + StoredValue::Withdraw(withdraw_purses) => (Tag::Withdraw, withdraw_purses.to_bytes()?), + StoredValue::Unbonding(unbonding_purses) => { + (Tag::Unbonding, unbonding_purses.to_bytes()?) + } + }; + result.push(tag as u8); + result.append(&mut serialized_data); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + StoredValue::CLValue(cl_value) => cl_value.serialized_length(), + StoredValue::Account(account) => account.serialized_length(), + StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(), + StoredValue::Contract(contract_header) => contract_header.serialized_length(), + StoredValue::ContractPackage(contract_package) => { + contract_package.serialized_length() + } + StoredValue::Transfer(transfer) => transfer.serialized_length(), + StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(), + StoredValue::EraInfo(era_info) => era_info.serialized_length(), + StoredValue::Bid(bid) => bid.serialized_length(), + StoredValue::Withdraw(withdraw_purses) => withdraw_purses.serialized_length(), + StoredValue::Unbonding(unbonding_purses) => unbonding_purses.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag() as u8); + match self { + StoredValue::CLValue(cl_value) => cl_value.write_bytes(writer)?, + StoredValue::Account(account) => account.write_bytes(writer)?, + StoredValue::ContractWasm(contract_wasm) => contract_wasm.write_bytes(writer)?, + StoredValue::Contract(contract_header) => contract_header.write_bytes(writer)?, + StoredValue::ContractPackage(contract_package) => { + contract_package.write_bytes(writer)? + } + StoredValue::Transfer(transfer) => transfer.write_bytes(writer)?, + StoredValue::DeployInfo(deploy_info) => deploy_info.write_bytes(writer)?, + StoredValue::EraInfo(era_info) => era_info.write_bytes(writer)?, + StoredValue::Bid(bid) => bid.write_bytes(writer)?, + StoredValue::Withdraw(unbonding_purses) => unbonding_purses.write_bytes(writer)?, + StoredValue::Unbonding(unbonding_purses) => unbonding_purses.write_bytes(writer)?, + }; + Ok(()) + } +} + +impl FromBytes for StoredValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == Tag::CLValue as u8 => CLValue::from_bytes(remainder) + .map(|(cl_value, remainder)| (StoredValue::CLValue(cl_value), remainder)), + tag if tag == Tag::Account as u8 => Account::from_bytes(remainder) + .map(|(account, remainder)| (StoredValue::Account(account), remainder)), + tag if tag == Tag::ContractWasm as u8 => { + ContractWasm::from_bytes(remainder).map(|(contract_wasm, remainder)| { + (StoredValue::ContractWasm(contract_wasm), remainder) + }) + } + tag if tag == Tag::ContractPackage as u8 => { + ContractPackage::from_bytes(remainder).map(|(contract_package, remainder)| { + (StoredValue::ContractPackage(contract_package), remainder) + }) + } + tag if tag == Tag::Contract as u8 => Contract::from_bytes(remainder) + .map(|(contract, remainder)| (StoredValue::Contract(contract), remainder)), + tag if tag == Tag::Transfer as u8 => Transfer::from_bytes(remainder) + .map(|(transfer, remainder)| (StoredValue::Transfer(transfer), remainder)), + tag if tag == Tag::DeployInfo as u8 => DeployInfo::from_bytes(remainder) + .map(|(deploy_info, remainder)| (StoredValue::DeployInfo(deploy_info), remainder)), + tag if tag == Tag::EraInfo as u8 => EraInfo::from_bytes(remainder) + .map(|(deploy_info, remainder)| (StoredValue::EraInfo(deploy_info), remainder)), + tag if tag == Tag::Bid as u8 => Bid::from_bytes(remainder) + .map(|(bid, remainder)| (StoredValue::Bid(Box::new(bid)), remainder)), + tag if tag == Tag::Withdraw as u8 => { + Vec::::from_bytes(remainder).map(|(withdraw_purses, remainder)| { + (StoredValue::Withdraw(withdraw_purses), remainder) + }) + } + tag if tag == Tag::Unbonding as u8 => { + Vec::::from_bytes(remainder).map(|(unbonding_purses, remainder)| { + (StoredValue::Unbonding(unbonding_purses), remainder) + }) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Serialize for StoredValue { + fn serialize(&self, serializer: S) -> Result { + // The JSON representation of a StoredValue is just its bytesrepr + // While this makes it harder to inspect, it makes deterministic representation simple. + let bytes = self + .to_bytes() + .map_err(|error| ser::Error::custom(format!("{:?}", error)))?; + ByteBuf::from(bytes).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for StoredValue { + fn deserialize>(deserializer: D) -> Result { + let bytes = ByteBuf::deserialize(deserializer)?.into_vec(); + bytesrepr::deserialize::(bytes) + .map_err(|error| de::Error::custom(format!("{:?}", error))) + } +} + +#[cfg(test)] +mod tests { + use proptest::proptest; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn serialization_roundtrip(v in gens::stored_value_arb()) { + bytesrepr::test_serialization_roundtrip(&v); + } + } +} diff --git a/casper_types/src/stored_value/type_mismatch.rs b/casper_types/src/stored_value/type_mismatch.rs new file mode 100644 index 00000000..cd59b766 --- /dev/null +++ b/casper_types/src/stored_value/type_mismatch.rs @@ -0,0 +1,30 @@ +use alloc::string::String; +use core::fmt::{self, Display, Formatter}; + +use serde::{Deserialize, Serialize}; + +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +/// An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations. +pub struct TypeMismatch { + /// The name of the expected type. + expected: String, + /// The actual type found. + found: String, +} + +impl TypeMismatch { + /// Creates a new `TypeMismatch`. + pub fn new(expected: String, found: String) -> TypeMismatch { + TypeMismatch { expected, found } + } +} + +impl Display for TypeMismatch { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "Type mismatch. Expected {} but found {}.", + self.expected, self.found + ) + } +} diff --git a/casper_types/src/system.rs b/casper_types/src/system.rs new file mode 100644 index 00000000..cdae3f6f --- /dev/null +++ b/casper_types/src/system.rs @@ -0,0 +1,14 @@ +//! System modules, formerly known as "system contracts" +pub mod auction; +mod call_stack_element; +mod error; +pub mod handle_payment; +pub mod mint; +pub mod standard_payment; +mod system_contract_type; + +pub use call_stack_element::{CallStackElement, CallStackElementTag}; +pub use error::Error; +pub use system_contract_type::{ + SystemContractType, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT, +}; diff --git a/casper_types/src/system/auction.rs b/casper_types/src/system/auction.rs new file mode 100644 index 00000000..5831ab24 --- /dev/null +++ b/casper_types/src/system/auction.rs @@ -0,0 +1,53 @@ +//! Contains implementation of a Auction contract functionality. +mod bid; +mod constants; +mod delegator; +mod entry_points; +mod era_info; +mod error; +mod seigniorage_recipient; +mod unbonding_purse; +mod withdraw_purse; + +use alloc::{collections::BTreeMap, vec::Vec}; + +pub use bid::{Bid, VESTING_SCHEDULE_LENGTH_MILLIS}; +pub use constants::*; +pub use delegator::Delegator; +pub use entry_points::auction_entry_points; +pub use era_info::{EraInfo, SeigniorageAllocation}; +pub use error::Error; +pub use seigniorage_recipient::SeigniorageRecipient; +pub use unbonding_purse::UnbondingPurse; +pub use withdraw_purse::WithdrawPurse; + +#[cfg(any(feature = "testing", test))] +pub(crate) mod gens { + pub use super::era_info::gens::*; +} + +use crate::{account::AccountHash, EraId, PublicKey, U512}; + +/// Representation of delegation rate of tokens. Range from 0..=100. +pub type DelegationRate = u8; + +/// Validators mapped to their bids. +pub type Bids = BTreeMap; + +/// Weights of validators. "Weight" in this context means a sum of their stakes. +pub type ValidatorWeights = BTreeMap; + +/// List of era validators +pub type EraValidators = BTreeMap; + +/// Collection of seigniorage recipients. +pub type SeigniorageRecipients = BTreeMap; + +/// Snapshot of `SeigniorageRecipients` for a given era. +pub type SeigniorageRecipientsSnapshot = BTreeMap; + +/// Validators and delegators mapped to their unbonding purses. +pub type UnbondingPurses = BTreeMap>; + +/// Validators and delegators mapped to their withdraw purses. +pub type WithdrawPurses = BTreeMap>; diff --git a/casper_types/src/system/auction/bid.rs b/casper_types/src/system/auction/bid.rs new file mode 100644 index 00000000..ca5f7625 --- /dev/null +++ b/casper_types/src/system/auction/bid.rs @@ -0,0 +1,554 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +mod vesting; + +use alloc::{collections::BTreeMap, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{DelegationRate, Delegator, Error}, + CLType, CLTyped, PublicKey, URef, U512, +}; + +pub use vesting::{VestingSchedule, VESTING_SCHEDULE_LENGTH_MILLIS}; + +/// An entry in the validator map. +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Bid { + /// Validator public key + validator_public_key: PublicKey, + /// The purse that was used for bonding. + bonding_purse: URef, + /// The amount of tokens staked by a validator (not including delegators). + staked_amount: U512, + /// Delegation rate + delegation_rate: DelegationRate, + /// Vesting schedule for a genesis validator. `None` if non-genesis validator. + vesting_schedule: Option, + /// This validator's delegators, indexed by their public keys + delegators: BTreeMap, + /// `true` if validator has been "evicted" + inactive: bool, +} + +impl Bid { + /// Creates new instance of a bid with locked funds. + pub fn locked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + release_timestamp_millis: u64, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + let delegators = BTreeMap::new(); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Creates new instance of a bid with unlocked funds. + pub fn unlocked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + ) -> Self { + let vesting_schedule = None; + let delegators = BTreeMap::new(); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty(validator_public_key: PublicKey, bonding_purse: URef) -> Self { + let vesting_schedule = None; + let delegators = BTreeMap::new(); + let inactive = true; + let staked_amount = 0.into(); + let delegation_rate = Default::default(); + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Gets the validator public key of the provided bid + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Gets the bonding purse of the provided bid + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount(&self) -> &U512 { + &self.staked_amount + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Gets the delegation rate of the provided bid + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis + /// validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided bid. `None` if a + /// non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } + + /// Returns a reference to the delegators of the provided bid + pub fn delegators(&self) -> &BTreeMap { + &self.delegators + } + + /// Returns a mutable reference to the delegators of the provided bid + pub fn delegators_mut(&mut self) -> &mut BTreeMap { + &mut self.delegators + } + + /// Returns `true` if validator is inactive + pub fn inactive(&self) -> bool { + self.inactive + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::UnbondTooLarge)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::ValidatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::ValidatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Updates the delegation rate of the provided bid + pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self { + self.delegation_rate = delegation_rate; + self + } + + /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than + /// or equal to the bid's initial release timestamp and the bid is owned by a genesis + /// validator. This method initializes with default 14 week vesting schedule. + /// + /// Returns `true` if the provided bid's vesting schedule was initialized. + pub fn process(&mut self, timestamp_millis: u64) -> bool { + self.process_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than + /// or equal to the bid's initial release timestamp and the bid is owned by a genesis + /// validator. + /// + /// Returns `true` if the provided bid's vesting schedule was initialized. + pub fn process_with_vesting_schedule( + &mut self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + // Put timestamp-sensitive processing logic in here + let staked_amount = self.staked_amount; + let vesting_schedule = match self.vesting_schedule_mut() { + Some(vesting_schedule) => vesting_schedule, + None => return false, + }; + if timestamp_millis < vesting_schedule.initial_release_timestamp_millis() { + return false; + } + + let mut initialized = false; + + if vesting_schedule.initialize_with_schedule(staked_amount, vesting_schedule_period_millis) + { + initialized = true; + } + + for delegator in self.delegators_mut().values_mut() { + let staked_amount = *delegator.staked_amount(); + if let Some(vesting_schedule) = delegator.vesting_schedule_mut() { + if timestamp_millis >= vesting_schedule.initial_release_timestamp_millis() + && vesting_schedule + .initialize_with_schedule(staked_amount, vesting_schedule_period_millis) + { + initialized = true; + } + } + } + + initialized + } + + /// Sets given bid's `inactive` field to `false` + pub fn activate(&mut self) -> bool { + self.inactive = false; + false + } + + /// Sets given bid's `inactive` field to `true` + pub fn deactivate(&mut self) -> bool { + self.inactive = true; + true + } + + /// Returns the total staked amount of validator + all delegators + pub fn total_staked_amount(&self) -> Result { + self.delegators + .iter() + .try_fold(U512::zero(), |a, (_, b)| a.checked_add(*b.staked_amount())) + .and_then(|delegators_sum| delegators_sum.checked_add(*self.staked_amount())) + .ok_or(Error::InvalidAmount) + } +} + +impl CLTyped for Bid { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for Bid { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.validator_public_key.write_bytes(&mut result)?; + self.bonding_purse.write_bytes(&mut result)?; + self.staked_amount.write_bytes(&mut result)?; + self.delegation_rate.write_bytes(&mut result)?; + self.vesting_schedule.write_bytes(&mut result)?; + self.delegators().write_bytes(&mut result)?; + self.inactive.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.bonding_purse.serialized_length() + + self.staked_amount.serialized_length() + + self.delegation_rate.serialized_length() + + self.vesting_schedule.serialized_length() + + self.delegators.serialized_length() + + self.inactive.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + self.delegators().write_bytes(writer)?; + self.inactive.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Bid { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; + let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?; + let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + let (delegators, bytes) = FromBytes::from_bytes(bytes)?; + let (inactive, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Bid { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + }, + bytes, + )) + } +} + +#[cfg(test)] +mod tests { + use alloc::collections::BTreeMap; + + use crate::{ + bytesrepr, + system::auction::{bid::VestingSchedule, Bid, DelegationRate, Delegator}, + AccessRights, PublicKey, SecretKey, URef, U512, + }; + + const WEEK_MILLIS: u64 = 7 * 24 * 60 * 60 * 1000; + const TEST_VESTING_SCHEDULE_LENGTH_MILLIS: u64 = 7 * WEEK_MILLIS; + + #[test] + fn serialization_roundtrip() { + let founding_validator = Bid { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), + staked_amount: U512::one(), + delegation_rate: DelegationRate::max_value(), + vesting_schedule: Some(VestingSchedule::default()), + delegators: BTreeMap::default(), + inactive: true, + }; + bytesrepr::test_serialization_roundtrip(&founding_validator); + } + + #[test] + fn should_immediately_initialize_unlock_amounts() { + const TIMESTAMP_MILLIS: u64 = 0; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let vesting_schedule_period_millis = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let mut bid = Bid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + ); + + assert!(bid.process_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis, + )); + assert!(!bid.is_locked_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis + )); + } + + #[test] + fn should_initialize_delegators_different_timestamps() { + const TIMESTAMP_MILLIS: u64 = WEEK_MILLIS; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let delegator_1_pk: PublicKey = (&SecretKey::ed25519_from_bytes([43; 32]).unwrap()).into(); + let delegator_2_pk: PublicKey = (&SecretKey::ed25519_from_bytes([44; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let delegator_1_release_timestamp = TIMESTAMP_MILLIS + 1; + let delegator_1_bonding_purse = URef::new([52; 32], AccessRights::ADD); + let delegator_1_staked_amount = U512::from(2000); + + let delegator_2_release_timestamp = TIMESTAMP_MILLIS + 2; + let delegator_2_bonding_purse = URef::new([62; 32], AccessRights::ADD); + let delegator_2_staked_amount = U512::from(3000); + + let delegator_1 = Delegator::locked( + delegator_1_pk.clone(), + delegator_1_staked_amount, + delegator_1_bonding_purse, + validator_pk.clone(), + delegator_1_release_timestamp, + ); + + let delegator_2 = Delegator::locked( + delegator_2_pk.clone(), + delegator_2_staked_amount, + delegator_2_bonding_purse, + validator_pk.clone(), + delegator_2_release_timestamp, + ); + + let mut bid = Bid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + ); + + assert!(!bid.process_with_vesting_schedule( + validator_release_timestamp - 1, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + { + let delegators = bid.delegators_mut(); + + delegators.insert(delegator_1_pk.clone(), delegator_1); + delegators.insert(delegator_2_pk.clone(), delegator_2); + } + + assert!(bid.process_with_vesting_schedule( + delegator_1_release_timestamp, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + let delegator_1_updated_1 = bid.delegators().get(&delegator_1_pk).cloned().unwrap(); + assert!(delegator_1_updated_1 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + + let delegator_2_updated_1 = bid.delegators().get(&delegator_2_pk).cloned().unwrap(); + assert!(delegator_2_updated_1 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_none()); + + assert!(bid.process_with_vesting_schedule( + delegator_2_release_timestamp, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + let delegator_1_updated_2 = bid.delegators().get(&delegator_1_pk).cloned().unwrap(); + assert!(delegator_1_updated_2 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + // Delegator 1 is already initialized and did not change after 2nd Bid::process + assert_eq!(delegator_1_updated_1, delegator_1_updated_2); + + let delegator_2_updated_2 = bid.delegators().get(&delegator_2_pk).cloned().unwrap(); + assert!(delegator_2_updated_2 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + + // Delegator 2 is different compared to first Bid::process + assert_ne!(delegator_2_updated_1, delegator_2_updated_2); + + // Validator initialized, and all delegators initialized + assert!(!bid.process_with_vesting_schedule( + delegator_2_release_timestamp + 1, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::bid_arb(1..100)) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/casper_types/src/system/auction/bid/vesting.rs b/casper_types/src/system/auction/bid/vesting.rs new file mode 100644 index 00000000..6d59f27c --- /dev/null +++ b/casper_types/src/system/auction/bid/vesting.rs @@ -0,0 +1,523 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes}, + U512, +}; + +const DAY_MILLIS: usize = 24 * 60 * 60 * 1000; +const DAYS_IN_WEEK: usize = 7; +const WEEK_MILLIS: usize = DAYS_IN_WEEK * DAY_MILLIS; + +/// Length of total vesting schedule in days. +const VESTING_SCHEDULE_LENGTH_DAYS: usize = 91; +/// Length of total vesting schedule expressed in days. +pub const VESTING_SCHEDULE_LENGTH_MILLIS: u64 = + VESTING_SCHEDULE_LENGTH_DAYS as u64 * DAY_MILLIS as u64; +/// 91 days / 7 days in a week = 13 weeks +const LOCKED_AMOUNTS_MAX_LENGTH: usize = (VESTING_SCHEDULE_LENGTH_DAYS / DAYS_IN_WEEK) + 1; + +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct VestingSchedule { + initial_release_timestamp_millis: u64, + locked_amounts: Option<[U512; LOCKED_AMOUNTS_MAX_LENGTH]>, +} + +fn vesting_schedule_period_to_weeks(vesting_schedule_period_millis: u64) -> usize { + debug_assert_ne!(DAY_MILLIS, 0); + debug_assert_ne!(DAYS_IN_WEEK, 0); + vesting_schedule_period_millis as usize / DAY_MILLIS / DAYS_IN_WEEK +} + +impl VestingSchedule { + pub fn new(initial_release_timestamp_millis: u64) -> Self { + let locked_amounts = None; + VestingSchedule { + initial_release_timestamp_millis, + locked_amounts, + } + } + + /// Initializes vesting schedule with a configured amount of weekly releases. + /// + /// Returns `false` if already initialized. + /// + /// # Panics + /// + /// Panics if `vesting_schedule_period_millis` represents more than 13 weeks. + pub fn initialize_with_schedule( + &mut self, + staked_amount: U512, + vesting_schedule_period_millis: u64, + ) -> bool { + if self.locked_amounts.is_some() { + return false; + } + + let locked_amounts_length = + vesting_schedule_period_to_weeks(vesting_schedule_period_millis); + + assert!( + locked_amounts_length < LOCKED_AMOUNTS_MAX_LENGTH, + "vesting schedule period must be less than {} weeks", + LOCKED_AMOUNTS_MAX_LENGTH, + ); + + if locked_amounts_length == 0 || vesting_schedule_period_millis == 0 { + // Zero weeks means instant unlock of staked amount. + self.locked_amounts = Some(Default::default()); + return true; + } + + let release_period: U512 = U512::from(locked_amounts_length + 1); + let weekly_release = staked_amount / release_period; + + let mut locked_amounts = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH]; + let mut remaining_locked = staked_amount; + + for locked_amount in locked_amounts.iter_mut().take(locked_amounts_length) { + remaining_locked -= weekly_release; + *locked_amount = remaining_locked; + } + + assert_eq!( + locked_amounts.get(locked_amounts_length), + Some(&U512::zero()), + "first element after the schedule should be zero" + ); + + self.locked_amounts = Some(locked_amounts); + true + } + + /// Initializes weekly release for a fixed amount of 14 weeks period. + /// + /// Returns `false` if already initialized. + pub fn initialize(&mut self, staked_amount: U512) -> bool { + self.initialize_with_schedule(staked_amount, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + pub fn initial_release_timestamp_millis(&self) -> u64 { + self.initial_release_timestamp_millis + } + + pub fn locked_amounts(&self) -> Option<&[U512]> { + let locked_amounts = self.locked_amounts.as_ref()?; + Some(locked_amounts.as_slice()) + } + + pub fn locked_amount(&self, timestamp_millis: u64) -> Option { + let locked_amounts = self.locked_amounts()?; + + let index = { + let index_timestamp = + timestamp_millis.checked_sub(self.initial_release_timestamp_millis)?; + (index_timestamp as usize).checked_div(WEEK_MILLIS)? + }; + + let locked_amount = locked_amounts.get(index).cloned().unwrap_or_default(); + + Some(locked_amount) + } + + /// Checks if this vesting schedule is still under the vesting + pub(crate) fn is_vesting( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + let vested_period = match self.locked_amounts() { + Some(locked_amounts) => { + let vesting_weeks = locked_amounts + .iter() + .position(|amount| amount.is_zero()) + .expect("vesting schedule should always have zero at the end"); // SAFETY: at least one zero is guaranteed by `initialize_with_schedule` method + + let vesting_weeks_millis = + (vesting_weeks as u64).saturating_mul(WEEK_MILLIS as u64); + + self.initial_release_timestamp_millis() + .saturating_add(vesting_weeks_millis) + } + None => { + // Uninitialized yet but we know this will be the configured period of time. + self.initial_release_timestamp_millis() + .saturating_add(vesting_schedule_period_millis) + } + }; + + timestamp_millis < vested_period + } +} + +impl ToBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.iter().map(ToBytes::serialized_length).sum::() + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + for amount in self { + amount.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] { + fn from_bytes(mut bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH]; + for value in &mut result { + let (amount, rem) = FromBytes::from_bytes(bytes)?; + *value = amount; + bytes = rem; + } + Ok((result, bytes)) + } +} + +impl ToBytes for VestingSchedule { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.initial_release_timestamp_millis.to_bytes()?); + result.append(&mut self.locked_amounts.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.initial_release_timestamp_millis.serialized_length() + + self.locked_amounts.serialized_length() + } +} + +impl FromBytes for VestingSchedule { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (initial_release_timestamp_millis, bytes) = FromBytes::from_bytes(bytes)?; + let (locked_amounts, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + VestingSchedule { + initial_release_timestamp_millis, + locked_amounts, + }, + bytes, + )) + } +} + +/// Generators for [`VestingSchedule`] +#[cfg(test)] +mod gens { + use proptest::{ + array, option, + prelude::{Arbitrary, Strategy}, + }; + + use super::VestingSchedule; + use crate::gens::u512_arb; + + pub fn vesting_schedule_arb() -> impl Strategy { + (::arbitrary(), option::of(array::uniform14(u512_arb()))).prop_map( + |(initial_release_timestamp_millis, locked_amounts)| VestingSchedule { + initial_release_timestamp_millis, + locked_amounts, + }, + ) + } +} + +#[cfg(test)] +mod tests { + use proptest::{prop_assert, proptest}; + + use crate::{ + bytesrepr, + gens::u512_arb, + system::auction::bid::{ + vesting::{gens::vesting_schedule_arb, vesting_schedule_period_to_weeks, WEEK_MILLIS}, + VestingSchedule, + }, + U512, + }; + + use super::*; + + /// Default lock-in period of 90 days + const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS as u64; + const RELEASE_TIMESTAMP: u64 = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + const STAKE: u64 = 140; + + const DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS as u64; + const LOCKED_AMOUNTS_LENGTH: usize = + (DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS as usize / WEEK_MILLIS) + 1; + + #[test] + #[should_panic = "vesting schedule period must be less than"] + fn test_vesting_schedule_exceeding_the_maximum_should_not_panic() { + let future_date = 98 * DAY_MILLIS as u64; + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize_with_schedule(U512::from(STAKE), future_date); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_amount_check_should_not_panic() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize(U512::from(STAKE)); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_with_zero_length_schedule_should_not_panic() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize_with_schedule(U512::from(STAKE), 0); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_amount() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize(U512::from(STAKE)); + + let mut timestamp = RELEASE_TIMESTAMP; + + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(130)) + ); + + timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64 - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(130)) + ); + + timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(120)) + ); + + timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64 + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(120)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(120)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(110)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(110)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(110)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(100)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(100)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(20)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(10)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(10)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(10)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + } + + fn vested_amounts_match_initial_stake( + initial_stake: U512, + release_timestamp: u64, + vesting_schedule_length: u64, + ) -> bool { + let mut vesting_schedule = VestingSchedule::new(release_timestamp); + vesting_schedule.initialize_with_schedule(initial_stake, vesting_schedule_length); + + let mut total_vested_amounts = U512::zero(); + + for i in 0..LOCKED_AMOUNTS_LENGTH { + let timestamp = release_timestamp + (WEEK_MILLIS * i) as u64; + if let Some(locked_amount) = vesting_schedule.locked_amount(timestamp) { + let current_vested_amount = initial_stake - locked_amount - total_vested_amounts; + total_vested_amounts += current_vested_amount + } + } + + total_vested_amounts == initial_stake + } + + #[test] + fn vested_amounts_conserve_stake() { + let stake = U512::from(1000); + assert!(vested_amounts_match_initial_stake( + stake, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + )) + } + + #[test] + fn is_vesting_with_default_schedule() { + let initial_stake = U512::from(1000u64); + let release_timestamp = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + let mut vesting_schedule = VestingSchedule::new(release_timestamp); + + let is_vesting_before: Vec = (0..LOCKED_AMOUNTS_LENGTH + 1) + .map(|i| { + vesting_schedule.is_vesting( + release_timestamp + (WEEK_MILLIS * i) as u64, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + ) + }) + .collect(); + + assert_eq!( + is_vesting_before, + vec![ + true, true, true, true, true, true, true, true, true, true, true, true, true, + false, // week after is always set to zero + false + ] + ); + vesting_schedule.initialize(initial_stake); + + let is_vesting_after: Vec = (0..LOCKED_AMOUNTS_LENGTH + 1) + .map(|i| { + vesting_schedule.is_vesting( + release_timestamp + (WEEK_MILLIS * i) as u64, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + ) + }) + .collect(); + + assert_eq!( + is_vesting_after, + vec![ + true, true, true, true, true, true, true, true, true, true, true, true, true, + false, // week after is always set to zero + false, + ] + ); + } + + #[test] + fn should_calculate_vesting_schedule_period_to_weeks() { + let thirteen_weeks_millis = 13 * 7 * DAY_MILLIS as u64; + assert_eq!(vesting_schedule_period_to_weeks(thirteen_weeks_millis), 13,); + + assert_eq!(vesting_schedule_period_to_weeks(0), 0); + assert_eq!( + vesting_schedule_period_to_weeks(u64::MAX), + 30_500_568_904usize + ); + } + + proptest! { + #[test] + fn prop_total_vested_amounts_conserve_stake(stake in u512_arb()) { + prop_assert!(vested_amounts_match_initial_stake( + stake, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + )) + } + + #[test] + fn prop_serialization_roundtrip(vesting_schedule in vesting_schedule_arb()) { + bytesrepr::test_serialization_roundtrip(&vesting_schedule) + } + } +} diff --git a/casper_types/src/system/auction/constants.rs b/casper_types/src/system/auction/constants.rs new file mode 100644 index 00000000..e54e1f4d --- /dev/null +++ b/casper_types/src/system/auction/constants.rs @@ -0,0 +1,98 @@ +use crate::EraId; + +use super::DelegationRate; + +/// Initial value of era id we start at genesis. +pub const INITIAL_ERA_ID: EraId = EraId::new(0); + +/// Initial value of era end timestamp. +pub const INITIAL_ERA_END_TIMESTAMP_MILLIS: u64 = 0; + +/// Delegation rate is a fraction between 0-1. Validator sets the delegation rate +/// in integer terms, which is then divided by the denominator to obtain the fraction. +pub const DELEGATION_RATE_DENOMINATOR: DelegationRate = 100; + +/// We use one trillion as a block reward unit because it's large enough to allow precise +/// fractions, and small enough for many block rewards to fit into a u64. +pub const BLOCK_REWARD: u64 = 1_000_000_000_000; + +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; +/// Named constant for `delegation_rate`. +pub const ARG_DELEGATION_RATE: &str = "delegation_rate"; +/// Named constant for `account_hash`. +pub const ARG_PUBLIC_KEY: &str = "public_key"; +/// Named constant for `validator`. +pub const ARG_VALIDATOR: &str = "validator"; +/// Named constant for `delegator`. +pub const ARG_DELEGATOR: &str = "delegator"; +/// Named constant for `validator_purse`. +pub const ARG_VALIDATOR_PURSE: &str = "validator_purse"; +/// Named constant for `validator_keys`. +pub const ARG_VALIDATOR_KEYS: &str = "validator_keys"; +/// Named constant for `validator_public_keys`. +pub const ARG_VALIDATOR_PUBLIC_KEYS: &str = "validator_public_keys"; +/// Named constant for `new_validator`. +pub const ARG_NEW_VALIDATOR: &str = "new_validator"; +/// Named constant for `era_id`. +pub const ARG_ERA_ID: &str = "era_id"; +/// Named constant for `reward_factors`. +pub const ARG_REWARD_FACTORS: &str = "reward_factors"; +/// Named constant for `validator_public_key`. +pub const ARG_VALIDATOR_PUBLIC_KEY: &str = "validator_public_key"; +/// Named constant for `delegator_public_key`. +pub const ARG_DELEGATOR_PUBLIC_KEY: &str = "delegator_public_key"; +/// Named constant for `validator_slots` argument. +pub const ARG_VALIDATOR_SLOTS: &str = VALIDATOR_SLOTS_KEY; +/// Named constant for `mint_contract_package_hash` +pub const ARG_MINT_CONTRACT_PACKAGE_HASH: &str = "mint_contract_package_hash"; +/// Named constant for `genesis_validators` +pub const ARG_GENESIS_VALIDATORS: &str = "genesis_validators"; +/// Named constant of `auction_delay` +pub const ARG_AUCTION_DELAY: &str = "auction_delay"; +/// Named constant for `locked_funds_period` +pub const ARG_LOCKED_FUNDS_PERIOD: &str = "locked_funds_period"; +/// Named constant for `unbonding_delay` +pub const ARG_UNBONDING_DELAY: &str = "unbonding_delay"; +/// Named constant for `era_end_timestamp_millis`; +pub const ARG_ERA_END_TIMESTAMP_MILLIS: &str = "era_end_timestamp_millis"; +/// Named constant for `evicted_validators`; +pub const ARG_EVICTED_VALIDATORS: &str = "evicted_validators"; + +/// Named constant for method `get_era_validators`. +pub const METHOD_GET_ERA_VALIDATORS: &str = "get_era_validators"; +/// Named constant for method `add_bid`. +pub const METHOD_ADD_BID: &str = "add_bid"; +/// Named constant for method `withdraw_bid`. +pub const METHOD_WITHDRAW_BID: &str = "withdraw_bid"; +/// Named constant for method `delegate`. +pub const METHOD_DELEGATE: &str = "delegate"; +/// Named constant for method `undelegate`. +pub const METHOD_UNDELEGATE: &str = "undelegate"; +/// Named constant for method `redelegate`. +pub const METHOD_REDELEGATE: &str = "redelegate"; +/// Named constant for method `run_auction`. +pub const METHOD_RUN_AUCTION: &str = "run_auction"; +/// Named constant for method `slash`. +pub const METHOD_SLASH: &str = "slash"; +/// Named constant for method `distribute`. +pub const METHOD_DISTRIBUTE: &str = "distribute"; +/// Named constant for method `read_era_id`. +pub const METHOD_READ_ERA_ID: &str = "read_era_id"; +/// Named constant for method `activate_bid`. +pub const METHOD_ACTIVATE_BID: &str = "activate_bid"; + +/// Storage for `EraId`. +pub const ERA_ID_KEY: &str = "era_id"; +/// Storage for era-end timestamp. +pub const ERA_END_TIMESTAMP_MILLIS_KEY: &str = "era_end_timestamp_millis"; +/// Storage for `SeigniorageRecipientsSnapshot`. +pub const SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY: &str = "seigniorage_recipients_snapshot"; +/// Total validator slots allowed. +pub const VALIDATOR_SLOTS_KEY: &str = "validator_slots"; +/// Amount of auction delay. +pub const AUCTION_DELAY_KEY: &str = "auction_delay"; +/// Default lock period for new bid entries represented in eras. +pub const LOCKED_FUNDS_PERIOD_KEY: &str = "locked_funds_period"; +/// Unbonding delay expressed in eras. +pub const UNBONDING_DELAY_KEY: &str = "unbonding_delay"; diff --git a/casper_types/src/system/auction/delegator.rs b/casper_types/src/system/auction/delegator.rs new file mode 100644 index 00000000..7834e42b --- /dev/null +++ b/casper_types/src/system/auction/delegator.rs @@ -0,0 +1,242 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{bid::VestingSchedule, Error}, + CLType, CLTyped, PublicKey, URef, U512, +}; + +/// Represents a party delegating their stake to a validator (or "delegatee") +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Delegator { + delegator_public_key: PublicKey, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + vesting_schedule: Option, +} + +impl Delegator { + /// Creates a new [`Delegator`] + pub fn unlocked( + delegator_public_key: PublicKey, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + ) -> Self { + let vesting_schedule = None; + Delegator { + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + } + } + + /// Creates new instance of a [`Delegator`] with locked funds. + pub fn locked( + delegator_public_key: PublicKey, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + release_timestamp_millis: u64, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + Delegator { + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + } + } + + /// Returns public key of the delegator. + pub fn delegator_public_key(&self) -> &PublicKey { + &self.delegator_public_key + } + + /// Returns the staked amount + pub fn staked_amount(&self) -> &U512 { + &self.staked_amount + } + + /// Returns the mutable staked amount + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Returns the bonding purse + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns delegatee + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::InvalidAmount)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::DelegatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::DelegatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Returns a reference to the vesting schedule of the provided + /// delegator bid. `None` if a non-genesis validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided + /// delegator bid. `None` if a non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } +} + +impl CLTyped for Delegator { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for Delegator { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.delegator_public_key.to_bytes()?); + buffer.extend(self.staked_amount.to_bytes()?); + buffer.extend(self.bonding_purse.to_bytes()?); + buffer.extend(self.validator_public_key.to_bytes()?); + buffer.extend(self.vesting_schedule.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.delegator_public_key.serialized_length() + + self.staked_amount.serialized_length() + + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.vesting_schedule.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.delegator_public_key.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Delegator { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (delegator_public_key, bytes) = PublicKey::from_bytes(bytes)?; + let (staked_amount, bytes) = U512::from_bytes(bytes)?; + let (bonding_purse, bytes) = URef::from_bytes(bytes)?; + let (validator_public_key, bytes) = PublicKey::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Delegator { + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + }, + bytes, + )) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, system::auction::Delegator, AccessRights, PublicKey, SecretKey, URef, U512, + }; + + #[test] + fn serialization_roundtrip() { + let staked_amount = U512::one(); + let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + let delegator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + + let validator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let unlocked_delegator = Delegator::unlocked( + delegator_public_key.clone(), + staked_amount, + bonding_purse, + validator_public_key.clone(), + ); + bytesrepr::test_serialization_roundtrip(&unlocked_delegator); + + let release_timestamp_millis = 42; + let locked_delegator = Delegator::locked( + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + release_timestamp_millis, + ); + bytesrepr::test_serialization_roundtrip(&locked_delegator); + } +} diff --git a/casper_types/src/system/auction/entry_points.rs b/casper_types/src/system/auction/entry_points.rs new file mode 100644 index 00000000..69915711 --- /dev/null +++ b/casper_types/src/system/auction/entry_points.rs @@ -0,0 +1,146 @@ +use alloc::boxed::Box; + +use crate::{ + system::auction::{ + DelegationRate, ValidatorWeights, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, + ARG_ERA_END_TIMESTAMP_MILLIS, ARG_NEW_VALIDATOR, ARG_PUBLIC_KEY, ARG_REWARD_FACTORS, + ARG_VALIDATOR, ARG_VALIDATOR_PUBLIC_KEY, METHOD_ACTIVATE_BID, METHOD_ADD_BID, + METHOD_DELEGATE, METHOD_DISTRIBUTE, METHOD_GET_ERA_VALIDATORS, METHOD_READ_ERA_ID, + METHOD_REDELEGATE, METHOD_RUN_AUCTION, METHOD_SLASH, METHOD_UNDELEGATE, + METHOD_WITHDRAW_BID, + }, + CLType, CLTyped, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, + PublicKey, U512, +}; + +/// Creates auction contract entry points. +pub fn auction_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntryPoint::new( + METHOD_GET_ERA_VALIDATORS, + vec![], + Option::::cl_type(), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_ADD_BID, + vec![ + Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(ARG_DELEGATION_RATE, DelegationRate::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_WITHDRAW_BID, + vec![ + Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_DELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_UNDELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_REDELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + Parameter::new(ARG_NEW_VALIDATOR, PublicKey::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_RUN_AUCTION, + vec![Parameter::new(ARG_ERA_END_TIMESTAMP_MILLIS, u64::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_SLASH, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_DISTRIBUTE, + vec![Parameter::new( + ARG_REWARD_FACTORS, + CLType::Map { + key: Box::new(CLType::PublicKey), + value: Box::new(CLType::U64), + }, + )], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_READ_ERA_ID, + vec![], + CLType::U64, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_ACTIVATE_BID, + vec![Parameter::new(ARG_VALIDATOR_PUBLIC_KEY, CLType::PublicKey)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/casper_types/src/system/auction/era_info.rs b/casper_types/src/system/auction/era_info.rs new file mode 100644 index 00000000..ea69dd16 --- /dev/null +++ b/casper_types/src/system/auction/era_info.rs @@ -0,0 +1,314 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{boxed::Box, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, PublicKey, U512, +}; + +const SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG: u8 = 0; +const SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG: u8 = 1; + +/// Information about a seigniorage allocation +#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum SeigniorageAllocation { + /// Info about a seigniorage allocation for a validator + Validator { + /// Validator's public key + validator_public_key: PublicKey, + /// Allocated amount + amount: U512, + }, + /// Info about a seigniorage allocation for a delegator + Delegator { + /// Delegator's public key + delegator_public_key: PublicKey, + /// Validator's public key + validator_public_key: PublicKey, + /// Allocated amount + amount: U512, + }, +} + +impl SeigniorageAllocation { + /// Constructs a [`SeigniorageAllocation::Validator`] + pub const fn validator(validator_public_key: PublicKey, amount: U512) -> Self { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } + } + + /// Constructs a [`SeigniorageAllocation::Delegator`] + pub const fn delegator( + delegator_public_key: PublicKey, + validator_public_key: PublicKey, + amount: U512, + ) -> Self { + SeigniorageAllocation::Delegator { + delegator_public_key, + validator_public_key, + amount, + } + } + + /// Returns the amount for a given seigniorage allocation + pub fn amount(&self) -> &U512 { + match self { + SeigniorageAllocation::Validator { amount, .. } => amount, + SeigniorageAllocation::Delegator { amount, .. } => amount, + } + } + + fn tag(&self) -> u8 { + match self { + SeigniorageAllocation::Validator { .. } => SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG, + SeigniorageAllocation::Delegator { .. } => SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG, + } + } +} + +impl ToBytes for SeigniorageAllocation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.tag().serialized_length() + + match self { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } => validator_public_key.serialized_length() + amount.serialized_length(), + SeigniorageAllocation::Delegator { + delegator_public_key, + validator_public_key, + amount, + } => { + delegator_public_key.serialized_length() + + validator_public_key.serialized_length() + + amount.serialized_length() + } + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag()); + match self { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } => { + validator_public_key.write_bytes(writer)?; + amount.write_bytes(writer)?; + } + SeigniorageAllocation::Delegator { + delegator_public_key, + validator_public_key, + amount, + } => { + delegator_public_key.write_bytes(writer)?; + validator_public_key.write_bytes(writer)?; + amount.write_bytes(writer)?; + } + } + Ok(()) + } +} + +impl FromBytes for SeigniorageAllocation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = ::from_bytes(bytes)?; + match tag { + SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG => { + let (validator_public_key, rem) = PublicKey::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + Ok(( + SeigniorageAllocation::validator(validator_public_key, amount), + rem, + )) + } + SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG => { + let (delegator_public_key, rem) = PublicKey::from_bytes(rem)?; + let (validator_public_key, rem) = PublicKey::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + Ok(( + SeigniorageAllocation::delegator( + delegator_public_key, + validator_public_key, + amount, + ), + rem, + )) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl CLTyped for SeigniorageAllocation { + fn cl_type() -> CLType { + CLType::Any + } +} + +/// Auction metadata. Intended to be recorded at each era. +#[derive(Debug, Default, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct EraInfo { + seigniorage_allocations: Vec, +} + +impl EraInfo { + /// Constructs a [`EraInfo`]. + pub fn new() -> Self { + let seigniorage_allocations = Vec::new(); + EraInfo { + seigniorage_allocations, + } + } + + /// Returns a reference to the seigniorage allocations collection + pub fn seigniorage_allocations(&self) -> &Vec { + &self.seigniorage_allocations + } + + /// Returns a mutable reference to the seigniorage allocations collection + pub fn seigniorage_allocations_mut(&mut self) -> &mut Vec { + &mut self.seigniorage_allocations + } + + /// Returns all seigniorage allocations that match the provided public key + /// using the following criteria: + /// * If the match candidate is a validator allocation, the provided public key is matched + /// against the validator public key. + /// * If the match candidate is a delegator allocation, the provided public key is matched + /// against the delegator public key. + pub fn select(&self, public_key: PublicKey) -> impl Iterator { + self.seigniorage_allocations + .iter() + .filter(move |allocation| match allocation { + SeigniorageAllocation::Validator { + validator_public_key, + .. + } => public_key == *validator_public_key, + SeigniorageAllocation::Delegator { + delegator_public_key, + .. + } => public_key == *delegator_public_key, + }) + } +} + +impl ToBytes for EraInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.seigniorage_allocations().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.seigniorage_allocations.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.seigniorage_allocations().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EraInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (seigniorage_allocations, rem) = Vec::::from_bytes(bytes)?; + Ok(( + EraInfo { + seigniorage_allocations, + }, + rem, + )) + } +} + +impl CLTyped for EraInfo { + fn cl_type() -> CLType { + CLType::List(Box::new(SeigniorageAllocation::cl_type())) + } +} + +/// Generators for [`SeigniorageAllocation`] and [`EraInfo`] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::{ + collection::{self, SizeRange}, + prelude::Strategy, + prop_oneof, + }; + + use crate::{ + crypto::gens::public_key_arb, + gens::u512_arb, + system::auction::{EraInfo, SeigniorageAllocation}, + }; + + fn seigniorage_allocation_validator_arb() -> impl Strategy { + (public_key_arb(), u512_arb()).prop_map(|(validator_public_key, amount)| { + SeigniorageAllocation::validator(validator_public_key, amount) + }) + } + + fn seigniorage_allocation_delegator_arb() -> impl Strategy { + (public_key_arb(), public_key_arb(), u512_arb()).prop_map( + |(delegator_public_key, validator_public_key, amount)| { + SeigniorageAllocation::delegator(delegator_public_key, validator_public_key, amount) + }, + ) + } + + /// Creates an arbitrary [`SeignorageAllocation`](crate::system::auction::SeigniorageAllocation) + pub fn seigniorage_allocation_arb() -> impl Strategy { + prop_oneof![ + seigniorage_allocation_validator_arb(), + seigniorage_allocation_delegator_arb() + ] + } + + /// Creates an arbitrary [`EraInfo`] + pub fn era_info_arb(size: impl Into) -> impl Strategy { + collection::vec(seigniorage_allocation_arb(), size).prop_map(|allocations| { + let mut era_info = EraInfo::new(); + *era_info.seigniorage_allocations_mut() = allocations; + era_info + }) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn test_serialization_roundtrip(era_info in gens::era_info_arb(0..32)) { + bytesrepr::test_serialization_roundtrip(&era_info) + } + } +} diff --git a/casper_types/src/system/auction/error.rs b/casper_types/src/system/auction/error.rs new file mode 100644 index 00000000..00bd1741 --- /dev/null +++ b/casper_types/src/system/auction/error.rs @@ -0,0 +1,543 @@ +//! Home of the Auction contract's [`enum@Error`] type. +use alloc::vec::Vec; +use core::{ + convert::{TryFrom, TryInto}, + fmt::{self, Display, Formatter}, + result, +}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// Errors which can occur while executing the Auction contract. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(test, derive(strum::EnumIter))] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Unable to find named key in the contract's named keys. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(0, Error::MissingKey as u8); + /// ``` + MissingKey = 0, + /// Given named key contains invalid variant. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(1, Error::InvalidKeyVariant as u8); + /// ``` + InvalidKeyVariant = 1, + /// Value under an uref does not exist. This means the installer contract didn't work properly. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(2, Error::MissingValue as u8); + /// ``` + MissingValue = 2, + /// ABI serialization issue while reading or writing. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(3, Error::Serialization as u8); + /// ``` + Serialization = 3, + /// Triggered when contract was unable to transfer desired amount of tokens into a bid purse. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(4, Error::TransferToBidPurse as u8); + /// ``` + TransferToBidPurse = 4, + /// User passed invalid amount of tokens which might result in wrong values after calculation. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(5, Error::InvalidAmount as u8); + /// ``` + InvalidAmount = 5, + /// Unable to find a bid by account hash in `active_bids` map. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(6, Error::BidNotFound as u8); + /// ``` + BidNotFound = 6, + /// Validator's account hash was not found in the map. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(7, Error::ValidatorNotFound as u8); + /// ``` + ValidatorNotFound = 7, + /// Delegator's account hash was not found in the map. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(8, Error::DelegatorNotFound as u8); + /// ``` + DelegatorNotFound = 8, + /// Storage problem. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(9, Error::Storage as u8); + /// ``` + Storage = 9, + /// Raised when system is unable to bond. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(10, Error::Bonding as u8); + /// ``` + Bonding = 10, + /// Raised when system is unable to unbond. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(11, Error::Unbonding as u8); + /// ``` + Unbonding = 11, + /// Raised when Mint contract is unable to release founder stake. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(12, Error::ReleaseFounderStake as u8); + /// ``` + ReleaseFounderStake = 12, + /// Raised when the system is unable to determine purse balance. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(13, Error::GetBalance as u8); + /// ``` + GetBalance = 13, + /// Raised when an entry point is called from invalid account context. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(14, Error::InvalidContext as u8); + /// ``` + InvalidContext = 14, + /// Raised whenever a validator's funds are still locked in but an attempt to withdraw was + /// made. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(15, Error::ValidatorFundsLocked as u8); + /// ``` + ValidatorFundsLocked = 15, + /// Raised when caller is not the system account. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(16, Error::InvalidCaller as u8); + /// ``` + InvalidCaller = 16, + /// Raised when function is supplied a public key that does match the caller's or does not have + /// an associated account. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(17, Error::InvalidPublicKey as u8); + /// ``` + InvalidPublicKey = 17, + /// Validator is not not bonded. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(18, Error::BondNotFound as u8); + /// ``` + BondNotFound = 18, + /// Unable to create purse. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(19, Error::CreatePurseFailed as u8); + /// ``` + CreatePurseFailed = 19, + /// Attempted to unbond an amount which was too large. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(20, Error::UnbondTooLarge as u8); + /// ``` + UnbondTooLarge = 20, + /// Attempted to bond with a stake which was too small. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(21, Error::BondTooSmall as u8); + /// ``` + BondTooSmall = 21, + /// Raised when rewards are to be distributed to delegators, but the validator has no + /// delegations. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(22, Error::MissingDelegations as u8); + /// ``` + MissingDelegations = 22, + /// The validators returned by the consensus component should match + /// current era validators when distributing rewards. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(23, Error::MismatchedEraValidators as u8); + /// ``` + MismatchedEraValidators = 23, + /// Failed to mint reward tokens. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(24, Error::MintReward as u8); + /// ``` + MintReward = 24, + /// Invalid number of validator slots. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(25, Error::InvalidValidatorSlotsValue as u8); + /// ``` + InvalidValidatorSlotsValue = 25, + /// Failed to reduce total supply. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(26, Error::MintReduceTotalSupply as u8); + /// ``` + MintReduceTotalSupply = 26, + /// Triggered when contract was unable to transfer desired amount of tokens into a delegators + /// purse. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(27, Error::TransferToDelegatorPurse as u8); + /// ``` + TransferToDelegatorPurse = 27, + /// Triggered when contract was unable to perform a transfer to distribute validators reward. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(28, Error::ValidatorRewardTransfer as u8); + /// ``` + ValidatorRewardTransfer = 28, + /// Triggered when contract was unable to perform a transfer to distribute delegators rewards. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(29, Error::DelegatorRewardTransfer as u8); + /// ``` + DelegatorRewardTransfer = 29, + /// Failed to transfer desired amount while withdrawing delegators reward. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(30, Error::WithdrawDelegatorReward as u8); + /// ``` + WithdrawDelegatorReward = 30, + /// Failed to transfer desired amount while withdrawing validators reward. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(31, Error::WithdrawValidatorReward as u8); + /// ``` + WithdrawValidatorReward = 31, + /// Failed to transfer desired amount into unbonding purse. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(32, Error::TransferToUnbondingPurse as u8); + /// ``` + TransferToUnbondingPurse = 32, + /// Failed to record era info. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(33, Error::RecordEraInfo as u8); + /// ``` + RecordEraInfo = 33, + /// Failed to create a [`crate::CLValue`]. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(34, Error::CLValue as u8); + /// ``` + CLValue = 34, + /// Missing seigniorage recipients for given era. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(35, Error::MissingSeigniorageRecipients as u8); + /// ``` + MissingSeigniorageRecipients = 35, + /// Failed to transfer funds. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(36, Error::Transfer as u8); + /// ``` + Transfer = 36, + /// Delegation rate exceeds rate. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(37, Error::DelegationRateTooLarge as u8); + /// ``` + DelegationRateTooLarge = 37, + /// Raised whenever a delegator's funds are still locked in but an attempt to undelegate was + /// made. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(38, Error::DelegatorFundsLocked as u8); + /// ``` + DelegatorFundsLocked = 38, + /// An arithmetic overflow has occurred. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(39, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 39, + /// Execution exceeded the gas limit. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(40, Error::GasLimit as u8); + /// ``` + GasLimit = 40, + /// Too many frames on the runtime stack. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(41, Error::RuntimeStackOverflow as u8); + /// ``` + RuntimeStackOverflow = 41, + /// An error that is raised when there is an error in the mint contract that cannot + /// be mapped to a specific auction error. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(42, Error::MintError as u8); + /// ``` + MintError = 42, + /// The validator has exceeded the maximum amount of delegators allowed. + /// NOTE: This variant is no longer in use. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(43, Error::ExceededDelegatorSizeLimit as u8); + /// ``` + ExceededDelegatorSizeLimit = 43, + /// The global delegator capacity for the auction has been reached. + /// NOTE: This variant is no longer in use. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(44, Error::GlobalDelegatorCapacityReached as u8); + /// ``` + GlobalDelegatorCapacityReached = 44, + /// The delegated amount is below the minimum allowed. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(45, Error::DelegationAmountTooSmall as u8); + /// ``` + DelegationAmountTooSmall = 45, + /// Runtime stack error. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(46, Error::RuntimeStack as u8); + /// ``` + RuntimeStack = 46, + /// An error that is raised on private chain only when a `disable_auction_bids` flag is set to + /// `true`. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(47, Error::AuctionBidsDisabled as u8); + /// ``` + AuctionBidsDisabled = 47, + /// Error getting accumulation purse. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(48, Error::GetAccumulationPurse as u8); + /// ``` + GetAccumulationPurse = 48, + /// Failed to transfer desired amount into administrators account. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(49, Error::TransferToAdministrator as u8); + /// ``` + TransferToAdministrator = 49, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::MissingKey => formatter.write_str("Missing key"), + Error::InvalidKeyVariant => formatter.write_str("Invalid key variant"), + Error::MissingValue => formatter.write_str("Missing value"), + Error::Serialization => formatter.write_str("Serialization error"), + Error::TransferToBidPurse => formatter.write_str("Transfer to bid purse error"), + Error::InvalidAmount => formatter.write_str("Invalid amount"), + Error::BidNotFound => formatter.write_str("Bid not found"), + Error::ValidatorNotFound => formatter.write_str("Validator not found"), + Error::DelegatorNotFound => formatter.write_str("Delegator not found"), + Error::Storage => formatter.write_str("Storage error"), + Error::Bonding => formatter.write_str("Bonding error"), + Error::Unbonding => formatter.write_str("Unbonding error"), + Error::ReleaseFounderStake => formatter.write_str("Unable to release founder stake"), + Error::GetBalance => formatter.write_str("Unable to get purse balance"), + Error::InvalidContext => formatter.write_str("Invalid context"), + Error::ValidatorFundsLocked => formatter.write_str("Validator's funds are locked"), + Error::InvalidCaller => formatter.write_str("Function must be called by system account"), + Error::InvalidPublicKey => formatter.write_str("Supplied public key does not match caller's public key or has no associated account"), + Error::BondNotFound => formatter.write_str("Validator's bond not found"), + Error::CreatePurseFailed => formatter.write_str("Unable to create purse"), + Error::UnbondTooLarge => formatter.write_str("Unbond is too large"), + Error::BondTooSmall => formatter.write_str("Bond is too small"), + Error::MissingDelegations => formatter.write_str("Validators has not received any delegations"), + Error::MismatchedEraValidators => formatter.write_str("Mismatched era validator sets to distribute rewards"), + Error::MintReward => formatter.write_str("Failed to mint rewards"), + Error::InvalidValidatorSlotsValue => formatter.write_str("Invalid number of validator slots"), + Error::MintReduceTotalSupply => formatter.write_str("Failed to reduce total supply"), + Error::TransferToDelegatorPurse => formatter.write_str("Transfer to delegators purse error"), + Error::ValidatorRewardTransfer => formatter.write_str("Reward transfer to validator error"), + Error::DelegatorRewardTransfer => formatter.write_str("Rewards transfer to delegator error"), + Error::WithdrawDelegatorReward => formatter.write_str("Withdraw delegator reward error"), + Error::WithdrawValidatorReward => formatter.write_str("Withdraw validator reward error"), + Error::TransferToUnbondingPurse => formatter.write_str("Transfer to unbonding purse error"), + Error::RecordEraInfo => formatter.write_str("Record era info error"), + Error::CLValue => formatter.write_str("CLValue error"), + Error::MissingSeigniorageRecipients => formatter.write_str("Missing seigniorage recipients for given era"), + Error::Transfer => formatter.write_str("Transfer error"), + Error::DelegationRateTooLarge => formatter.write_str("Delegation rate too large"), + Error::DelegatorFundsLocked => formatter.write_str("Delegator's funds are locked"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow"), + Error::GasLimit => formatter.write_str("Execution exceeded the gas limit"), + Error::RuntimeStackOverflow => formatter.write_str("Runtime stack overflow"), + Error::MintError => formatter.write_str("An error in the mint contract execution"), + Error::ExceededDelegatorSizeLimit => formatter.write_str("The amount of delegators per validator has been exceeded"), + Error::GlobalDelegatorCapacityReached => formatter.write_str("The global delegator capacity has been reached"), + Error::DelegationAmountTooSmall => formatter.write_str("The delegated amount is below the minimum allowed"), + Error::RuntimeStack => formatter.write_str("Runtime stack error"), + Error::AuctionBidsDisabled => formatter.write_str("Auction bids are disabled"), + Error::GetAccumulationPurse => formatter.write_str("Get accumulation purse error"), + Error::TransferToAdministrator => formatter.write_str("Transfer to administrator error"), + } + } +} + +impl CLTyped for Error { + fn cl_type() -> CLType { + CLType::U8 + } +} + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +#[derive(Debug, PartialEq, Eq)] +pub struct TryFromU8ForError(()); + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for Error { + type Error = TryFromU8ForError; + + fn try_from(value: u8) -> result::Result { + match value { + d if d == Error::MissingKey as u8 => Ok(Error::MissingKey), + d if d == Error::InvalidKeyVariant as u8 => Ok(Error::InvalidKeyVariant), + d if d == Error::MissingValue as u8 => Ok(Error::MissingValue), + d if d == Error::Serialization as u8 => Ok(Error::Serialization), + d if d == Error::TransferToBidPurse as u8 => Ok(Error::TransferToBidPurse), + d if d == Error::InvalidAmount as u8 => Ok(Error::InvalidAmount), + d if d == Error::BidNotFound as u8 => Ok(Error::BidNotFound), + d if d == Error::ValidatorNotFound as u8 => Ok(Error::ValidatorNotFound), + d if d == Error::DelegatorNotFound as u8 => Ok(Error::DelegatorNotFound), + d if d == Error::Storage as u8 => Ok(Error::Storage), + d if d == Error::Bonding as u8 => Ok(Error::Bonding), + d if d == Error::Unbonding as u8 => Ok(Error::Unbonding), + d if d == Error::ReleaseFounderStake as u8 => Ok(Error::ReleaseFounderStake), + d if d == Error::GetBalance as u8 => Ok(Error::GetBalance), + d if d == Error::InvalidContext as u8 => Ok(Error::InvalidContext), + d if d == Error::ValidatorFundsLocked as u8 => Ok(Error::ValidatorFundsLocked), + d if d == Error::InvalidCaller as u8 => Ok(Error::InvalidCaller), + d if d == Error::InvalidPublicKey as u8 => Ok(Error::InvalidPublicKey), + d if d == Error::BondNotFound as u8 => Ok(Error::BondNotFound), + d if d == Error::CreatePurseFailed as u8 => Ok(Error::CreatePurseFailed), + d if d == Error::UnbondTooLarge as u8 => Ok(Error::UnbondTooLarge), + d if d == Error::BondTooSmall as u8 => Ok(Error::BondTooSmall), + d if d == Error::MissingDelegations as u8 => Ok(Error::MissingDelegations), + d if d == Error::MismatchedEraValidators as u8 => Ok(Error::MismatchedEraValidators), + d if d == Error::MintReward as u8 => Ok(Error::MintReward), + d if d == Error::InvalidValidatorSlotsValue as u8 => { + Ok(Error::InvalidValidatorSlotsValue) + } + d if d == Error::MintReduceTotalSupply as u8 => Ok(Error::MintReduceTotalSupply), + d if d == Error::TransferToDelegatorPurse as u8 => Ok(Error::TransferToDelegatorPurse), + d if d == Error::ValidatorRewardTransfer as u8 => Ok(Error::ValidatorRewardTransfer), + d if d == Error::DelegatorRewardTransfer as u8 => Ok(Error::DelegatorRewardTransfer), + d if d == Error::WithdrawDelegatorReward as u8 => Ok(Error::WithdrawDelegatorReward), + d if d == Error::WithdrawValidatorReward as u8 => Ok(Error::WithdrawValidatorReward), + d if d == Error::TransferToUnbondingPurse as u8 => Ok(Error::TransferToUnbondingPurse), + + d if d == Error::RecordEraInfo as u8 => Ok(Error::RecordEraInfo), + d if d == Error::CLValue as u8 => Ok(Error::CLValue), + d if d == Error::MissingSeigniorageRecipients as u8 => { + Ok(Error::MissingSeigniorageRecipients) + } + d if d == Error::Transfer as u8 => Ok(Error::Transfer), + d if d == Error::DelegationRateTooLarge as u8 => Ok(Error::DelegationRateTooLarge), + d if d == Error::DelegatorFundsLocked as u8 => Ok(Error::DelegatorFundsLocked), + d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow), + d if d == Error::GasLimit as u8 => Ok(Error::GasLimit), + d if d == Error::RuntimeStackOverflow as u8 => Ok(Error::RuntimeStackOverflow), + d if d == Error::MintError as u8 => Ok(Error::MintError), + d if d == Error::ExceededDelegatorSizeLimit as u8 => { + Ok(Error::ExceededDelegatorSizeLimit) + } + d if d == Error::GlobalDelegatorCapacityReached as u8 => { + Ok(Error::GlobalDelegatorCapacityReached) + } + d if d == Error::DelegationAmountTooSmall as u8 => Ok(Error::DelegationAmountTooSmall), + d if d == Error::RuntimeStack as u8 => Ok(Error::RuntimeStack), + d if d == Error::AuctionBidsDisabled as u8 => Ok(Error::AuctionBidsDisabled), + d if d == Error::GetAccumulationPurse as u8 => Ok(Error::GetAccumulationPurse), + d if d == Error::TransferToAdministrator as u8 => Ok(Error::TransferToAdministrator), + _ => Err(TryFromU8ForError(())), + } + } +} + +impl ToBytes for Error { + fn to_bytes(&self) -> result::Result, bytesrepr::Error> { + let value = *self as u8; + value.to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for Error { + fn from_bytes(bytes: &[u8]) -> result::Result<(Self, &[u8]), bytesrepr::Error> { + let (value, rem): (u8, _) = FromBytes::from_bytes(bytes)?; + let error: Error = value + .try_into() + // In case an Error variant is unable to be determined it would return an + // Error::Formatting as if its unable to be correctly deserialized. + .map_err(|_| bytesrepr::Error::Formatting)?; + Ok((error, rem)) + } +} + +impl From for Error { + fn from(_: bytesrepr::Error) -> Self { + Error::Serialization + } +} + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +pub enum PurseLookupError { + KeyNotFound, + KeyUnexpectedType, +} + +impl From for Error { + fn from(error: PurseLookupError) -> Self { + match error { + PurseLookupError::KeyNotFound => Error::MissingKey, + PurseLookupError::KeyUnexpectedType => Error::InvalidKeyVariant, + } + } +} + +#[cfg(test)] +mod tests { + use strum::IntoEnumIterator; + + use super::Error; + + #[test] + fn error_forward_trips() { + for expected_error_variant in Error::iter() { + assert_eq!( + Error::try_from(expected_error_variant as u8), + Ok(expected_error_variant) + ) + } + } + + #[test] + fn error_backward_trips() { + for u8 in 0..=u8::max_value() { + match Error::try_from(u8) { + Ok(error_variant) => { + assert_eq!(u8, error_variant as u8, "Error code mismatch") + } + Err(_) => continue, + }; + } + } +} diff --git a/casper_types/src/system/auction/seigniorage_recipient.rs b/casper_types/src/system/auction/seigniorage_recipient.rs new file mode 100644 index 00000000..4387ca25 --- /dev/null +++ b/casper_types/src/system/auction/seigniorage_recipient.rs @@ -0,0 +1,196 @@ +use alloc::{collections::BTreeMap, vec::Vec}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{Bid, DelegationRate}, + CLType, CLTyped, PublicKey, U512, +}; + +/// The seigniorage recipient details. +#[derive(Default, PartialEq, Eq, Clone, Debug)] +pub struct SeigniorageRecipient { + /// Validator stake (not including delegators) + stake: U512, + /// Delegation rate of a seigniorage recipient. + delegation_rate: DelegationRate, + /// Delegators and their bids. + delegator_stake: BTreeMap, +} + +impl SeigniorageRecipient { + /// Creates a new SeigniorageRecipient + pub fn new( + stake: U512, + delegation_rate: DelegationRate, + delegator_stake: BTreeMap, + ) -> Self { + Self { + stake, + delegation_rate, + delegator_stake, + } + } + + /// Returns stake of the provided recipient + pub fn stake(&self) -> &U512 { + &self.stake + } + + /// Returns delegation rate of the provided recipient + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns delegators of the provided recipient and their stake + pub fn delegator_stake(&self) -> &BTreeMap { + &self.delegator_stake + } + + /// Calculates total stake, including delegators' total stake + pub fn total_stake(&self) -> Option { + self.delegator_total_stake()?.checked_add(self.stake) + } + + /// Calculates total stake for all delegators + pub fn delegator_total_stake(&self) -> Option { + let mut total_stake: U512 = U512::zero(); + for stake in self.delegator_stake.values() { + total_stake = total_stake.checked_add(*stake)?; + } + Some(total_stake) + } +} + +impl CLTyped for SeigniorageRecipient { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for SeigniorageRecipient { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(self.stake.to_bytes()?); + result.extend(self.delegation_rate.to_bytes()?); + result.extend(self.delegator_stake.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.stake.serialized_length() + + self.delegation_rate.serialized_length() + + self.delegator_stake.serialized_length() + } +} + +impl FromBytes for SeigniorageRecipient { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (stake, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (delegator_stake, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + SeigniorageRecipient { + stake, + delegation_rate, + delegator_stake, + }, + bytes, + )) + } +} + +impl From<&Bid> for SeigniorageRecipient { + fn from(bid: &Bid) -> Self { + let delegator_stake = bid + .delegators() + .iter() + .map(|(public_key, delegator)| (public_key.clone(), *delegator.staked_amount())) + .collect(); + Self { + stake: *bid.staked_amount(), + delegation_rate: *bid.delegation_rate(), + delegator_stake, + } + } +} + +#[cfg(test)] +mod tests { + use alloc::collections::BTreeMap; + use core::iter::FromIterator; + + use crate::{ + bytesrepr, + system::auction::{DelegationRate, SeigniorageRecipient}, + PublicKey, SecretKey, U512, + }; + + #[test] + fn serialization_roundtrip() { + let delegator_1_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_2_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_3_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let seigniorage_recipient = SeigniorageRecipient { + stake: U512::max_value(), + delegation_rate: DelegationRate::max_value(), + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_key, U512::max_value()), + (delegator_2_key, U512::max_value()), + (delegator_3_key, U512::zero()), + ]), + }; + bytesrepr::test_serialization_roundtrip(&seigniorage_recipient); + } + + #[test] + fn test_overflow_in_delegation_rate() { + let delegator_1_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_2_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_3_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let seigniorage_recipient = SeigniorageRecipient { + stake: U512::max_value(), + delegation_rate: DelegationRate::max_value(), + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_key, U512::max_value()), + (delegator_2_key, U512::max_value()), + (delegator_3_key, U512::zero()), + ]), + }; + assert_eq!(seigniorage_recipient.total_stake(), None) + } + + #[test] + fn test_overflow_in_delegation_total_stake() { + let delegator_1_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_2_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_3_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let seigniorage_recipient = SeigniorageRecipient { + stake: U512::max_value(), + delegation_rate: DelegationRate::max_value(), + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_key, U512::max_value()), + (delegator_2_key, U512::max_value()), + (delegator_3_key, U512::max_value()), + ]), + }; + assert_eq!(seigniorage_recipient.delegator_total_stake(), None) + } +} diff --git a/casper_types/src/system/auction/unbonding_purse.rs b/casper_types/src/system/auction/unbonding_purse.rs new file mode 100644 index 00000000..1f36d828 --- /dev/null +++ b/casper_types/src/system/auction/unbonding_purse.rs @@ -0,0 +1,236 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, EraId, PublicKey, URef, U512, +}; + +use super::WithdrawPurse; + +/// Unbonding purse. +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct UnbondingPurse { + /// Bonding Purse + bonding_purse: URef, + /// Validators public key. + validator_public_key: PublicKey, + /// Unbonders public key. + unbonder_public_key: PublicKey, + /// Era in which this unbonding request was created. + era_of_creation: EraId, + /// Unbonding Amount. + amount: U512, + /// The validator public key to re-delegate to. + new_validator: Option, +} + +impl UnbondingPurse { + /// Creates [`UnbondingPurse`] instance for an unbonding request. + pub const fn new( + bonding_purse: URef, + validator_public_key: PublicKey, + unbonder_public_key: PublicKey, + era_of_creation: EraId, + amount: U512, + new_validator: Option, + ) -> Self { + Self { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + new_validator, + } + } + + /// Checks if given request is made by a validator by checking if public key of unbonder is same + /// as a key owned by validator. + pub fn is_validator(&self) -> bool { + self.validator_public_key == self.unbonder_public_key + } + + /// Returns bonding purse used to make this unbonding request. + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns public key of validator. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns public key of unbonder. + /// + /// For withdrawal requests that originated from validator's public key through `withdraw_bid` + /// entrypoint this is equal to [`UnbondingPurse::validator_public_key`] and + /// [`UnbondingPurse::is_validator`] is `true`. + pub fn unbonder_public_key(&self) -> &PublicKey { + &self.unbonder_public_key + } + + /// Returns era which was used to create this unbonding request. + pub fn era_of_creation(&self) -> EraId { + self.era_of_creation + } + + /// Returns unbonding amount. + pub fn amount(&self) -> &U512 { + &self.amount + } + + /// Returns the public key for the new validator. + pub fn new_validator(&self) -> &Option { + &self.new_validator + } +} + +impl ToBytes for UnbondingPurse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(&self.bonding_purse.to_bytes()?); + result.extend(&self.validator_public_key.to_bytes()?); + result.extend(&self.unbonder_public_key.to_bytes()?); + result.extend(&self.era_of_creation.to_bytes()?); + result.extend(&self.amount.to_bytes()?); + result.extend(&self.new_validator.to_bytes()?); + Ok(result) + } + fn serialized_length(&self) -> usize { + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.unbonder_public_key.serialized_length() + + self.era_of_creation.serialized_length() + + self.amount.serialized_length() + + self.new_validator.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.bonding_purse.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.unbonder_public_key.write_bytes(writer)?; + self.era_of_creation.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.new_validator.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for UnbondingPurse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?; + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + let (new_validator, remainder) = Option::::from_bytes(remainder)?; + + Ok(( + UnbondingPurse { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + new_validator, + }, + remainder, + )) + } +} + +impl CLTyped for UnbondingPurse { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl From for UnbondingPurse { + fn from(withdraw_purse: WithdrawPurse) -> Self { + UnbondingPurse::new( + withdraw_purse.bonding_purse, + withdraw_purse.validator_public_key, + withdraw_purse.unbonder_public_key, + withdraw_purse.era_of_creation, + withdraw_purse.amount, + None, + ) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, system::auction::UnbondingPurse, AccessRights, EraId, PublicKey, SecretKey, + URef, U512, + }; + + const BONDING_PURSE: URef = URef::new([14; 32], AccessRights::READ_ADD_WRITE); + const ERA_OF_WITHDRAWAL: EraId = EraId::MAX; + + fn validator_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn unbonder_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn amount() -> U512 { + U512::max_value() - 1 + } + + #[test] + fn serialization_roundtrip_for_unbonding_purse() { + let unbonding_purse = UnbondingPurse { + bonding_purse: BONDING_PURSE, + validator_public_key: validator_public_key(), + unbonder_public_key: unbonder_public_key(), + era_of_creation: ERA_OF_WITHDRAWAL, + amount: amount(), + new_validator: None, + }; + + bytesrepr::test_serialization_roundtrip(&unbonding_purse); + } + + #[test] + fn should_be_validator_condition_for_unbonding_purse() { + let validator_unbonding_purse = UnbondingPurse::new( + BONDING_PURSE, + validator_public_key(), + validator_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + None, + ); + assert!(validator_unbonding_purse.is_validator()); + } + + #[test] + fn should_be_delegator_condition_for_unbonding_purse() { + let delegator_unbonding_purse = UnbondingPurse::new( + BONDING_PURSE, + validator_public_key(), + unbonder_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + None, + ); + assert!(!delegator_unbonding_purse.is_validator()); + } +} diff --git a/casper_types/src/system/auction/withdraw_purse.rs b/casper_types/src/system/auction/withdraw_purse.rs new file mode 100644 index 00000000..b79ee1e5 --- /dev/null +++ b/casper_types/src/system/auction/withdraw_purse.rs @@ -0,0 +1,195 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, EraId, PublicKey, URef, U512, +}; + +/// A withdraw purse, a legacy structure. +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct WithdrawPurse { + /// Bonding Purse + pub(crate) bonding_purse: URef, + /// Validators public key. + pub(crate) validator_public_key: PublicKey, + /// Unbonders public key. + pub(crate) unbonder_public_key: PublicKey, + /// Era in which this unbonding request was created. + pub(crate) era_of_creation: EraId, + /// Unbonding Amount. + pub(crate) amount: U512, +} + +impl WithdrawPurse { + /// Creates [`WithdrawPurse`] instance for an unbonding request. + pub const fn new( + bonding_purse: URef, + validator_public_key: PublicKey, + unbonder_public_key: PublicKey, + era_of_creation: EraId, + amount: U512, + ) -> Self { + Self { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + } + } + + /// Checks if given request is made by a validator by checking if public key of unbonder is same + /// as a key owned by validator. + pub fn is_validator(&self) -> bool { + self.validator_public_key == self.unbonder_public_key + } + + /// Returns bonding purse used to make this unbonding request. + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns public key of validator. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns public key of unbonder. + /// + /// For withdrawal requests that originated from validator's public key through `withdraw_bid` + /// entrypoint this is equal to [`WithdrawPurse::validator_public_key`] and + /// [`WithdrawPurse::is_validator`] is `true`. + pub fn unbonder_public_key(&self) -> &PublicKey { + &self.unbonder_public_key + } + + /// Returns era which was used to create this unbonding request. + pub fn era_of_creation(&self) -> EraId { + self.era_of_creation + } + + /// Returns unbonding amount. + pub fn amount(&self) -> &U512 { + &self.amount + } +} + +impl ToBytes for WithdrawPurse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(&self.bonding_purse.to_bytes()?); + result.extend(&self.validator_public_key.to_bytes()?); + result.extend(&self.unbonder_public_key.to_bytes()?); + result.extend(&self.era_of_creation.to_bytes()?); + result.extend(&self.amount.to_bytes()?); + + Ok(result) + } + fn serialized_length(&self) -> usize { + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.unbonder_public_key.serialized_length() + + self.era_of_creation.serialized_length() + + self.amount.serialized_length() + } +} + +impl FromBytes for WithdrawPurse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?; + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + WithdrawPurse { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + }, + remainder, + )) + } +} + +impl CLTyped for WithdrawPurse { + fn cl_type() -> CLType { + CLType::Any + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, AccessRights, EraId, PublicKey, SecretKey, URef, U512}; + + use super::WithdrawPurse; + + const BONDING_PURSE: URef = URef::new([41; 32], AccessRights::READ_ADD_WRITE); + const ERA_OF_WITHDRAWAL: EraId = EraId::MAX; + + fn validator_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn unbonder_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([45; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn amount() -> U512 { + U512::max_value() - 1 + } + + #[test] + fn serialization_roundtrip_for_withdraw_purse() { + let withdraw_purse = WithdrawPurse { + bonding_purse: BONDING_PURSE, + validator_public_key: validator_public_key(), + unbonder_public_key: unbonder_public_key(), + era_of_creation: ERA_OF_WITHDRAWAL, + amount: amount(), + }; + + bytesrepr::test_serialization_roundtrip(&withdraw_purse); + } + + #[test] + fn should_be_validator_condition_for_withdraw_purse() { + let validator_withdraw_purse = WithdrawPurse::new( + BONDING_PURSE, + validator_public_key(), + validator_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + ); + assert!(validator_withdraw_purse.is_validator()); + } + + #[test] + fn should_be_delegator_condition_for_withdraw_purse() { + let delegator_withdraw_purse = WithdrawPurse::new( + BONDING_PURSE, + validator_public_key(), + unbonder_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + ); + assert!(!delegator_withdraw_purse.is_validator()); + } +} diff --git a/casper_types/src/system/call_stack_element.rs b/casper_types/src/system/call_stack_element.rs new file mode 100644 index 00000000..e0741f0c --- /dev/null +++ b/casper_types/src/system/call_stack_element.rs @@ -0,0 +1,194 @@ +use alloc::vec::Vec; + +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::FromPrimitive; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, ContractHash, ContractPackageHash, +}; + +/// Tag representing variants of CallStackElement for purposes of serialization. +#[derive(FromPrimitive, ToPrimitive)] +#[repr(u8)] +pub enum CallStackElementTag { + /// Session tag. + Session = 0, + /// StoredSession tag. + StoredSession, + /// StoredContract tag. + StoredContract, +} + +/// Represents the origin of a sub-call. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum CallStackElement { + /// Session + Session { + /// The account hash of the caller + account_hash: AccountHash, + }, + /// Effectively an EntryPointType::Session - stored access to a session. + StoredSession { + /// The account hash of the caller + account_hash: AccountHash, + /// The contract package hash + contract_package_hash: ContractPackageHash, + /// The contract hash + contract_hash: ContractHash, + }, + /// Contract + StoredContract { + /// The contract package hash + contract_package_hash: ContractPackageHash, + /// The contract hash + contract_hash: ContractHash, + }, +} + +impl CallStackElement { + /// Creates a [`CallStackElement::Session`]. This represents a call into session code, and + /// should only ever happen once in a call stack. + pub fn session(account_hash: AccountHash) -> Self { + CallStackElement::Session { account_hash } + } + + /// Creates a [`'CallStackElement::StoredContract`]. This represents a call into a contract with + /// `EntryPointType::Contract`. + pub fn stored_contract( + contract_package_hash: ContractPackageHash, + contract_hash: ContractHash, + ) -> Self { + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + } + } + + /// Creates a [`'CallStackElement::StoredSession`]. This represents a call into a contract with + /// `EntryPointType::Session`. + pub fn stored_session( + account_hash: AccountHash, + contract_package_hash: ContractPackageHash, + contract_hash: ContractHash, + ) -> Self { + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + } + } + + /// Gets the tag from self. + pub fn tag(&self) -> CallStackElementTag { + match self { + CallStackElement::Session { .. } => CallStackElementTag::Session, + CallStackElement::StoredSession { .. } => CallStackElementTag::StoredSession, + CallStackElement::StoredContract { .. } => CallStackElementTag::StoredContract, + } + } + + /// Gets the [`ContractHash`] for both stored session and stored contract variants. + pub fn contract_hash(&self) -> Option<&ContractHash> { + match self { + CallStackElement::Session { .. } => None, + CallStackElement::StoredSession { contract_hash, .. } + | CallStackElement::StoredContract { contract_hash, .. } => Some(contract_hash), + } + } +} + +impl ToBytes for CallStackElement { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.push(self.tag() as u8); + match self { + CallStackElement::Session { account_hash } => { + result.append(&mut account_hash.to_bytes()?) + } + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + } => { + result.append(&mut account_hash.to_bytes()?); + result.append(&mut contract_package_hash.to_bytes()?); + result.append(&mut contract_hash.to_bytes()?); + } + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + } => { + result.append(&mut contract_package_hash.to_bytes()?); + result.append(&mut contract_hash.to_bytes()?); + } + }; + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + CallStackElement::Session { account_hash } => account_hash.serialized_length(), + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + } => { + account_hash.serialized_length() + + contract_package_hash.serialized_length() + + contract_hash.serialized_length() + } + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + } => contract_package_hash.serialized_length() + contract_hash.serialized_length(), + } + } +} + +impl FromBytes for CallStackElement { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + let tag = CallStackElementTag::from_u8(tag).ok_or(bytesrepr::Error::Formatting)?; + match tag { + CallStackElementTag::Session => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((CallStackElement::Session { account_hash }, remainder)) + } + CallStackElementTag::StoredSession => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + let (contract_package_hash, remainder) = + ContractPackageHash::from_bytes(remainder)?; + let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?; + Ok(( + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + }, + remainder, + )) + } + CallStackElementTag::StoredContract => { + let (contract_package_hash, remainder) = + ContractPackageHash::from_bytes(remainder)?; + let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?; + Ok(( + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + }, + remainder, + )) + } + } + } +} + +impl CLTyped for CallStackElement { + fn cl_type() -> CLType { + CLType::Any + } +} diff --git a/casper_types/src/system/error.rs b/casper_types/src/system/error.rs new file mode 100644 index 00000000..c63e3f58 --- /dev/null +++ b/casper_types/src/system/error.rs @@ -0,0 +1,43 @@ +use core::fmt::{self, Display, Formatter}; + +use crate::system::{auction, handle_payment, mint}; + +/// An aggregate enum error with variants for each system contract's error. +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub enum Error { + /// Contains a [`mint::Error`]. + Mint(mint::Error), + /// Contains a [`handle_payment::Error`]. + HandlePayment(handle_payment::Error), + /// Contains a [`auction::Error`]. + Auction(auction::Error), +} + +impl From for Error { + fn from(error: mint::Error) -> Error { + Error::Mint(error) + } +} + +impl From for Error { + fn from(error: handle_payment::Error) -> Error { + Error::HandlePayment(error) + } +} + +impl From for Error { + fn from(error: auction::Error) -> Error { + Error::Auction(error) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::Mint(error) => write!(formatter, "Mint error: {}", error), + Error::HandlePayment(error) => write!(formatter, "HandlePayment error: {}", error), + Error::Auction(error) => write!(formatter, "Auction error: {}", error), + } + } +} diff --git a/casper_types/src/system/handle_payment.rs b/casper_types/src/system/handle_payment.rs new file mode 100644 index 00000000..1b12f3ec --- /dev/null +++ b/casper_types/src/system/handle_payment.rs @@ -0,0 +1,8 @@ +//! Contains implementation of a Handle Payment contract functionality. +mod constants; +mod entry_points; +mod error; + +pub use constants::*; +pub use entry_points::handle_payment_entry_points; +pub use error::Error; diff --git a/casper_types/src/system/handle_payment/constants.rs b/casper_types/src/system/handle_payment/constants.rs new file mode 100644 index 00000000..ef0feedd --- /dev/null +++ b/casper_types/src/system/handle_payment/constants.rs @@ -0,0 +1,37 @@ +/// Named constant for `purse`. +pub const ARG_PURSE: &str = "purse"; +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; +/// Named constant for `source`. +pub const ARG_ACCOUNT: &str = "account"; +/// Named constant for `target`. +pub const ARG_TARGET: &str = "target"; + +/// Named constant for method `get_payment_purse`. +pub const METHOD_GET_PAYMENT_PURSE: &str = "get_payment_purse"; +/// Named constant for method `set_refund_purse`. +pub const METHOD_SET_REFUND_PURSE: &str = "set_refund_purse"; +/// Named constant for method `get_refund_purse`. +pub const METHOD_GET_REFUND_PURSE: &str = "get_refund_purse"; +/// Named constant for method `finalize_payment`. +pub const METHOD_FINALIZE_PAYMENT: &str = "finalize_payment"; +/// Named constant for method `distribute_accumulated_fees`. +pub const METHOD_DISTRIBUTE_ACCUMULATED_FEES: &str = "distribute_accumulated_fees"; + +/// Storage for handle payment contract hash. +pub const CONTRACT_HASH_KEY: &str = "contract_hash"; + +/// Storage for handle payment access key. +pub const CONTRACT_ACCESS_KEY: &str = "access_key"; + +/// The uref name where the Handle Payment accepts payment for computation on behalf of validators. +pub const PAYMENT_PURSE_KEY: &str = "payment_purse"; + +/// The uref name where the Handle Payment will refund unused payment back to the user. The uref +/// this name corresponds to is set by the user. +pub const REFUND_PURSE_KEY: &str = "refund_purse"; +/// Storage for handle payment accumulation purse key. +/// +/// This purse is used when `fee_elimination` config is set to `Accumulate` which makes sense for +/// some private chains. +pub const ACCUMULATION_PURSE_KEY: &str = "accumulation_purse"; diff --git a/casper_types/src/system/handle_payment/entry_points.rs b/casper_types/src/system/handle_payment/entry_points.rs new file mode 100644 index 00000000..9f5c032e --- /dev/null +++ b/casper_types/src/system/handle_payment/entry_points.rs @@ -0,0 +1,66 @@ +use alloc::boxed::Box; + +use crate::{ + system::handle_payment::{ + ARG_ACCOUNT, ARG_AMOUNT, ARG_PURSE, METHOD_FINALIZE_PAYMENT, METHOD_GET_PAYMENT_PURSE, + METHOD_GET_REFUND_PURSE, METHOD_SET_REFUND_PURSE, + }, + CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, +}; + +use super::METHOD_DISTRIBUTE_ACCUMULATED_FEES; + +/// Creates handle payment contract entry points. +pub fn handle_payment_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let get_payment_purse = EntryPoint::new( + METHOD_GET_PAYMENT_PURSE, + vec![], + CLType::URef, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(get_payment_purse); + + let set_refund_purse = EntryPoint::new( + METHOD_SET_REFUND_PURSE, + vec![Parameter::new(ARG_PURSE, CLType::URef)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(set_refund_purse); + + let get_refund_purse = EntryPoint::new( + METHOD_GET_REFUND_PURSE, + vec![], + CLType::Option(Box::new(CLType::URef)), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(get_refund_purse); + + let finalize_payment = EntryPoint::new( + METHOD_FINALIZE_PAYMENT, + vec![ + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_ACCOUNT, CLType::ByteArray(32)), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(finalize_payment); + + let distribute_accumulated_fees = EntryPoint::new( + METHOD_DISTRIBUTE_ACCUMULATED_FEES, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(distribute_accumulated_fees); + + entry_points +} diff --git a/casper_types/src/system/handle_payment/error.rs b/casper_types/src/system/handle_payment/error.rs new file mode 100644 index 00000000..77867a36 --- /dev/null +++ b/casper_types/src/system/handle_payment/error.rs @@ -0,0 +1,424 @@ +//! Home of the Handle Payment contract's [`enum@Error`] type. +use alloc::vec::Vec; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, + result, +}; + +use crate::{ + bytesrepr::{self, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// Errors which can occur while executing the Handle Payment contract. +// TODO: Split this up into user errors vs. system errors. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + // ===== User errors ===== + /// The given validator is not bonded. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(0, Error::NotBonded as u8); + /// ``` + NotBonded = 0, + /// There are too many bonding or unbonding attempts already enqueued to allow more. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(1, Error::TooManyEventsInQueue as u8); + /// ``` + TooManyEventsInQueue = 1, + /// At least one validator must remain bonded. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(2, Error::CannotUnbondLastValidator as u8); + /// ``` + CannotUnbondLastValidator = 2, + /// Failed to bond or unbond as this would have resulted in exceeding the maximum allowed + /// difference between the largest and smallest stakes. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(3, Error::SpreadTooHigh as u8); + /// ``` + SpreadTooHigh = 3, + /// The given validator already has a bond or unbond attempt enqueued. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(4, Error::MultipleRequests as u8); + /// ``` + MultipleRequests = 4, + /// Attempted to bond with a stake which was too small. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(5, Error::BondTooSmall as u8); + /// ``` + BondTooSmall = 5, + /// Attempted to bond with a stake which was too large. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(6, Error::BondTooLarge as u8); + /// ``` + BondTooLarge = 6, + /// Attempted to unbond an amount which was too large. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(7, Error::UnbondTooLarge as u8); + /// ``` + UnbondTooLarge = 7, + /// While bonding, the transfer from source purse to the Handle Payment internal purse failed. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(8, Error::BondTransferFailed as u8); + /// ``` + BondTransferFailed = 8, + /// While unbonding, the transfer from the Handle Payment internal purse to the destination + /// purse failed. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(9, Error::UnbondTransferFailed as u8); + /// ``` + UnbondTransferFailed = 9, + // ===== System errors ===== + /// Internal error: a [`BlockTime`](crate::BlockTime) was unexpectedly out of sequence. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(10, Error::TimeWentBackwards as u8); + /// ``` + TimeWentBackwards = 10, + /// Internal error: stakes were unexpectedly empty. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(11, Error::StakesNotFound as u8); + /// ``` + StakesNotFound = 11, + /// Internal error: the Handle Payment contract's payment purse wasn't found. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(12, Error::PaymentPurseNotFound as u8); + /// ``` + PaymentPurseNotFound = 12, + /// Internal error: the Handle Payment contract's payment purse key was the wrong type. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(13, Error::PaymentPurseKeyUnexpectedType as u8); + /// ``` + PaymentPurseKeyUnexpectedType = 13, + /// Internal error: couldn't retrieve the balance for the Handle Payment contract's payment + /// purse. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(14, Error::PaymentPurseBalanceNotFound as u8); + /// ``` + PaymentPurseBalanceNotFound = 14, + /// Internal error: the Handle Payment contract's bonding purse wasn't found. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(15, Error::BondingPurseNotFound as u8); + /// ``` + BondingPurseNotFound = 15, + /// Internal error: the Handle Payment contract's bonding purse key was the wrong type. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(16, Error::BondingPurseKeyUnexpectedType as u8); + /// ``` + BondingPurseKeyUnexpectedType = 16, + /// Internal error: the Handle Payment contract's refund purse key was the wrong type. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(17, Error::RefundPurseKeyUnexpectedType as u8); + /// ``` + RefundPurseKeyUnexpectedType = 17, + /// Internal error: the Handle Payment contract's rewards purse wasn't found. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(18, Error::RewardsPurseNotFound as u8); + /// ``` + RewardsPurseNotFound = 18, + /// Internal error: the Handle Payment contract's rewards purse key was the wrong type. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(19, Error::RewardsPurseKeyUnexpectedType as u8); + /// ``` + RewardsPurseKeyUnexpectedType = 19, + // TODO: Put these in their own enum, and wrap them separately in `BondingError` and + // `UnbondingError`. + /// Internal error: failed to deserialize the stake's key. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(20, Error::StakesKeyDeserializationFailed as u8); + /// ``` + StakesKeyDeserializationFailed = 20, + /// Internal error: failed to deserialize the stake's balance. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(21, Error::StakesDeserializationFailed as u8); + /// ``` + StakesDeserializationFailed = 21, + /// The invoked Handle Payment function can only be called by system contracts, but was called + /// by a user contract. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(22, Error::SystemFunctionCalledByUserAccount as u8); + /// ``` + SystemFunctionCalledByUserAccount = 22, + /// Internal error: while finalizing payment, the amount spent exceeded the amount available. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(23, Error::InsufficientPaymentForAmountSpent as u8); + /// ``` + InsufficientPaymentForAmountSpent = 23, + /// Internal error: while finalizing payment, failed to pay the validators (the transfer from + /// the Handle Payment contract's payment purse to rewards purse failed). + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(24, Error::FailedTransferToRewardsPurse as u8); + /// ``` + FailedTransferToRewardsPurse = 24, + /// Internal error: while finalizing payment, failed to refund the caller's purse (the transfer + /// from the Handle Payment contract's payment purse to refund purse or account's main purse + /// failed). + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(25, Error::FailedTransferToAccountPurse as u8); + /// ``` + FailedTransferToAccountPurse = 25, + /// Handle Payment contract's "set_refund_purse" method can only be called by the payment code + /// of a deploy, but was called by the session code. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(26, Error::SetRefundPurseCalledOutsidePayment as u8); + /// ``` + SetRefundPurseCalledOutsidePayment = 26, + /// Raised when the system is unable to determine purse balance. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(27, Error::GetBalance as u8); + /// ``` + GetBalance = 27, + /// Raised when the system is unable to put named key. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(28, Error::PutKey as u8); + /// ``` + PutKey = 28, + /// Raised when the system is unable to remove given named key. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(29, Error::RemoveKey as u8); + /// ``` + RemoveKey = 29, + /// Failed to transfer funds. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(30, Error::Transfer as u8); + /// ``` + Transfer = 30, + /// An arithmetic overflow occurred + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(31, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 31, + // NOTE: These variants below will be removed once support for WASM system contracts will be + // dropped. + #[doc(hidden)] + GasLimit = 32, + /// Refund purse is a payment purse. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(33, Error::RefundPurseIsPaymentPurse as u8); + /// ``` + RefundPurseIsPaymentPurse = 33, + /// Error raised while reducing total supply on the mint system contract. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(34, Error::ReduceTotalSupply as u8); + /// ``` + ReduceTotalSupply = 34, + /// Error writing to a storage. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(35, Error::Storage as u8); + /// ``` + Storage = 35, + /// Internal error: the Handle Payment contract's accumulation purse wasn't found. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(36, Error::AccumulationPurseNotFound as u8); + /// ``` + AccumulationPurseNotFound = 36, + /// Internal error: the Handle Payment contract's accumulation purse key was the wrong type. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(37, Error::AccumulationPurseKeyUnexpectedType as u8); + /// ``` + AccumulationPurseKeyUnexpectedType = 37, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::NotBonded => formatter.write_str("Not bonded"), + Error::TooManyEventsInQueue => formatter.write_str("Too many events in queue"), + Error::CannotUnbondLastValidator => formatter.write_str("Cannot unbond last validator"), + Error::SpreadTooHigh => formatter.write_str("Spread is too high"), + Error::MultipleRequests => formatter.write_str("Multiple requests"), + Error::BondTooSmall => formatter.write_str("Bond is too small"), + Error::BondTooLarge => formatter.write_str("Bond is too large"), + Error::UnbondTooLarge => formatter.write_str("Unbond is too large"), + Error::BondTransferFailed => formatter.write_str("Bond transfer failed"), + Error::UnbondTransferFailed => formatter.write_str("Unbond transfer failed"), + Error::TimeWentBackwards => formatter.write_str("Time went backwards"), + Error::StakesNotFound => formatter.write_str("Stakes not found"), + Error::PaymentPurseNotFound => formatter.write_str("Payment purse not found"), + Error::PaymentPurseKeyUnexpectedType => { + formatter.write_str("Payment purse has unexpected type") + } + Error::PaymentPurseBalanceNotFound => { + formatter.write_str("Payment purse balance not found") + } + Error::BondingPurseNotFound => formatter.write_str("Bonding purse not found"), + Error::BondingPurseKeyUnexpectedType => { + formatter.write_str("Bonding purse key has unexpected type") + } + Error::RefundPurseKeyUnexpectedType => { + formatter.write_str("Refund purse key has unexpected type") + } + Error::RewardsPurseNotFound => formatter.write_str("Rewards purse not found"), + Error::RewardsPurseKeyUnexpectedType => { + formatter.write_str("Rewards purse has unexpected type") + } + Error::StakesKeyDeserializationFailed => { + formatter.write_str("Failed to deserialize stake's key") + } + Error::StakesDeserializationFailed => { + formatter.write_str("Failed to deserialize stake's balance") + } + Error::SystemFunctionCalledByUserAccount => { + formatter.write_str("System function was called by user account") + } + Error::InsufficientPaymentForAmountSpent => { + formatter.write_str("Insufficient payment for amount spent") + } + Error::FailedTransferToRewardsPurse => { + formatter.write_str("Transfer to rewards purse has failed") + } + Error::FailedTransferToAccountPurse => { + formatter.write_str("Transfer to account's purse failed") + } + Error::SetRefundPurseCalledOutsidePayment => { + formatter.write_str("Set refund purse was called outside payment") + } + Error::GetBalance => formatter.write_str("Unable to get purse balance"), + Error::PutKey => formatter.write_str("Unable to put named key"), + Error::RemoveKey => formatter.write_str("Unable to remove named key"), + Error::Transfer => formatter.write_str("Failed to transfer funds"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow"), + Error::GasLimit => formatter.write_str("GasLimit"), + Error::RefundPurseIsPaymentPurse => { + formatter.write_str("Refund purse is a payment purse.") + } + Error::ReduceTotalSupply => formatter.write_str("Failed to reduce total supply."), + Error::Storage => formatter.write_str("Failed to write to storage."), + Error::AccumulationPurseNotFound => formatter.write_str("Accumulation purse not found"), + Error::AccumulationPurseKeyUnexpectedType => { + formatter.write_str("Accumulation purse has unexpected type") + } + } + } +} + +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Error::NotBonded as u8 => Error::NotBonded, + v if v == Error::TooManyEventsInQueue as u8 => Error::TooManyEventsInQueue, + v if v == Error::CannotUnbondLastValidator as u8 => Error::CannotUnbondLastValidator, + v if v == Error::SpreadTooHigh as u8 => Error::SpreadTooHigh, + v if v == Error::MultipleRequests as u8 => Error::MultipleRequests, + v if v == Error::BondTooSmall as u8 => Error::BondTooSmall, + v if v == Error::BondTooLarge as u8 => Error::BondTooLarge, + v if v == Error::UnbondTooLarge as u8 => Error::UnbondTooLarge, + v if v == Error::BondTransferFailed as u8 => Error::BondTransferFailed, + v if v == Error::UnbondTransferFailed as u8 => Error::UnbondTransferFailed, + v if v == Error::TimeWentBackwards as u8 => Error::TimeWentBackwards, + v if v == Error::StakesNotFound as u8 => Error::StakesNotFound, + v if v == Error::PaymentPurseNotFound as u8 => Error::PaymentPurseNotFound, + v if v == Error::PaymentPurseKeyUnexpectedType as u8 => { + Error::PaymentPurseKeyUnexpectedType + } + v if v == Error::PaymentPurseBalanceNotFound as u8 => { + Error::PaymentPurseBalanceNotFound + } + v if v == Error::BondingPurseNotFound as u8 => Error::BondingPurseNotFound, + v if v == Error::BondingPurseKeyUnexpectedType as u8 => { + Error::BondingPurseKeyUnexpectedType + } + v if v == Error::RefundPurseKeyUnexpectedType as u8 => { + Error::RefundPurseKeyUnexpectedType + } + v if v == Error::RewardsPurseNotFound as u8 => Error::RewardsPurseNotFound, + v if v == Error::RewardsPurseKeyUnexpectedType as u8 => { + Error::RewardsPurseKeyUnexpectedType + } + v if v == Error::StakesKeyDeserializationFailed as u8 => { + Error::StakesKeyDeserializationFailed + } + v if v == Error::StakesDeserializationFailed as u8 => { + Error::StakesDeserializationFailed + } + v if v == Error::SystemFunctionCalledByUserAccount as u8 => { + Error::SystemFunctionCalledByUserAccount + } + v if v == Error::InsufficientPaymentForAmountSpent as u8 => { + Error::InsufficientPaymentForAmountSpent + } + v if v == Error::FailedTransferToRewardsPurse as u8 => { + Error::FailedTransferToRewardsPurse + } + v if v == Error::FailedTransferToAccountPurse as u8 => { + Error::FailedTransferToAccountPurse + } + v if v == Error::SetRefundPurseCalledOutsidePayment as u8 => { + Error::SetRefundPurseCalledOutsidePayment + } + + v if v == Error::GetBalance as u8 => Error::GetBalance, + v if v == Error::PutKey as u8 => Error::PutKey, + v if v == Error::RemoveKey as u8 => Error::RemoveKey, + v if v == Error::Transfer as u8 => Error::Transfer, + v if v == Error::ArithmeticOverflow as u8 => Error::ArithmeticOverflow, + v if v == Error::GasLimit as u8 => Error::GasLimit, + v if v == Error::RefundPurseIsPaymentPurse as u8 => Error::RefundPurseIsPaymentPurse, + v if v == Error::ReduceTotalSupply as u8 => Error::ReduceTotalSupply, + v if v == Error::Storage as u8 => Error::Storage, + v if v == Error::AccumulationPurseNotFound as u8 => Error::AccumulationPurseNotFound, + v if v == Error::AccumulationPurseKeyUnexpectedType as u8 => { + Error::AccumulationPurseKeyUnexpectedType + } + _ => return Err(()), + }; + Ok(error) + } +} + +impl CLTyped for Error { + fn cl_type() -> CLType { + CLType::U8 + } +} + +impl ToBytes for Error { + fn to_bytes(&self) -> result::Result, bytesrepr::Error> { + let value = *self as u8; + value.to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} diff --git a/casper_types/src/system/mint.rs b/casper_types/src/system/mint.rs new file mode 100644 index 00000000..4a7e58a1 --- /dev/null +++ b/casper_types/src/system/mint.rs @@ -0,0 +1,8 @@ +//! Contains implementation of a Mint contract functionality. +mod constants; +mod entry_points; +mod error; + +pub use constants::*; +pub use entry_points::mint_entry_points; +pub use error::Error; diff --git a/casper_types/src/system/mint/constants.rs b/casper_types/src/system/mint/constants.rs new file mode 100644 index 00000000..cffada44 --- /dev/null +++ b/casper_types/src/system/mint/constants.rs @@ -0,0 +1,40 @@ +/// Named constant for `purse`. +pub const ARG_PURSE: &str = "purse"; +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; +/// Named constant for `id`. +pub const ARG_ID: &str = "id"; +/// Named constant for `to`. +pub const ARG_TO: &str = "to"; +/// Named constant for `source`. +pub const ARG_SOURCE: &str = "source"; +/// Named constant for `target`. +pub const ARG_TARGET: &str = "target"; +/// Named constant for `round_seigniorage_rate` used in installer. +pub const ARG_ROUND_SEIGNIORAGE_RATE: &str = "round_seigniorage_rate"; + +/// Named constant for method `mint`. +pub const METHOD_MINT: &str = "mint"; +/// Named constant for method `reduce_total_supply`. +pub const METHOD_REDUCE_TOTAL_SUPPLY: &str = "reduce_total_supply"; +/// Named constant for (synthetic) method `create` +pub const METHOD_CREATE: &str = "create"; +/// Named constant for method `balance`. +pub const METHOD_BALANCE: &str = "balance"; +/// Named constant for method `transfer`. +pub const METHOD_TRANSFER: &str = "transfer"; +/// Named constant for method `read_base_round_reward`. +pub const METHOD_READ_BASE_ROUND_REWARD: &str = "read_base_round_reward"; +/// Named constant for method `mint_into_existing_purse`. +pub const METHOD_MINT_INTO_EXISTING_PURSE: &str = "mint_into_existing_purse"; + +/// Storage for mint contract hash. +pub const HASH_KEY: &str = "mint_hash"; +/// Storage for mint access key. +pub const ACCESS_KEY: &str = "mint_access"; +/// Storage for base round reward key. +pub const BASE_ROUND_REWARD_KEY: &str = "mint_base_round_reward"; +/// Storage for mint total supply key. +pub const TOTAL_SUPPLY_KEY: &str = "total_supply"; +/// Storage for mint round seigniorage rate. +pub const ROUND_SEIGNIORAGE_RATE_KEY: &str = "round_seigniorage_rate"; diff --git a/casper_types/src/system/mint/entry_points.rs b/casper_types/src/system/mint/entry_points.rs new file mode 100644 index 00000000..bbc82c20 --- /dev/null +++ b/casper_types/src/system/mint/entry_points.rs @@ -0,0 +1,102 @@ +use alloc::boxed::Box; + +use crate::{ + contracts::Parameters, + system::mint::{ + ARG_AMOUNT, ARG_ID, ARG_PURSE, ARG_SOURCE, ARG_TARGET, ARG_TO, METHOD_BALANCE, + METHOD_CREATE, METHOD_MINT, METHOD_MINT_INTO_EXISTING_PURSE, METHOD_READ_BASE_ROUND_REWARD, + METHOD_REDUCE_TOTAL_SUPPLY, METHOD_TRANSFER, + }, + CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, +}; + +/// Returns entry points for a mint system contract. +pub fn mint_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntryPoint::new( + METHOD_MINT, + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::URef), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_REDUCE_TOTAL_SUPPLY, + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_CREATE, + Parameters::new(), + CLType::URef, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_BALANCE, + vec![Parameter::new(ARG_PURSE, CLType::URef)], + CLType::Option(Box::new(CLType::U512)), + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_TRANSFER, + vec![ + Parameter::new(ARG_TO, CLType::Option(Box::new(CLType::ByteArray(32)))), + Parameter::new(ARG_SOURCE, CLType::URef), + Parameter::new(ARG_TARGET, CLType::URef), + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_ID, CLType::Option(Box::new(CLType::U64))), + ], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_READ_BASE_ROUND_REWARD, + Parameters::new(), + CLType::U512, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_MINT_INTO_EXISTING_PURSE, + vec![ + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_PURSE, CLType::URef), + ], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Contract, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/casper_types/src/system/mint/error.rs b/casper_types/src/system/mint/error.rs new file mode 100644 index 00000000..db327a40 --- /dev/null +++ b/casper_types/src/system/mint/error.rs @@ -0,0 +1,298 @@ +//! Home of the Mint contract's [`enum@Error`] type. + +use alloc::vec::Vec; +use core::{ + convert::{TryFrom, TryInto}, + fmt::{self, Display, Formatter}, +}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// Errors which can occur while executing the Mint contract. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Insufficient funds to complete the transfer. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(0, Error::InsufficientFunds as u8); + /// ``` + InsufficientFunds = 0, + /// Source purse not found. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(1, Error::SourceNotFound as u8); + /// ``` + SourceNotFound = 1, + /// Destination purse not found. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(2, Error::DestNotFound as u8); + /// ``` + DestNotFound = 2, + /// The given [`URef`](crate::URef) does not reference the account holder's purse, or such a + /// `URef` does not have the required [`AccessRights`](crate::AccessRights). + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(3, Error::InvalidURef as u8); + /// ``` + InvalidURef = 3, + /// The source purse is not writeable (see [`URef::is_writeable`](crate::URef::is_writeable)), + /// or the destination purse is not addable (see + /// [`URef::is_addable`](crate::URef::is_addable)). + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(4, Error::InvalidAccessRights as u8); + /// ``` + InvalidAccessRights = 4, + /// Tried to create a new purse with a non-zero initial balance. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(5, Error::InvalidNonEmptyPurseCreation as u8); + /// ``` + InvalidNonEmptyPurseCreation = 5, + /// Failed to read from local or global storage. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(6, Error::Storage as u8); + /// ``` + Storage = 6, + /// Purse not found while trying to get balance. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(7, Error::PurseNotFound as u8); + /// ``` + PurseNotFound = 7, + /// Unable to obtain a key by its name. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(8, Error::MissingKey as u8); + /// ``` + MissingKey = 8, + /// Total supply not found. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(9, Error::TotalSupplyNotFound as u8); + /// ``` + TotalSupplyNotFound = 9, + /// Failed to record transfer. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(10, Error::RecordTransferFailure as u8); + /// ``` + RecordTransferFailure = 10, + /// Invalid attempt to reduce total supply. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(11, Error::InvalidTotalSupplyReductionAttempt as u8); + /// ``` + InvalidTotalSupplyReductionAttempt = 11, + /// Failed to create new uref. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(12, Error::NewURef as u8); + /// ``` + NewURef = 12, + /// Failed to put key. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(13, Error::PutKey as u8); + /// ``` + PutKey = 13, + /// Failed to write to dictionary. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(14, Error::WriteDictionary as u8); + /// ``` + WriteDictionary = 14, + /// Failed to create a [`crate::CLValue`]. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(15, Error::CLValue as u8); + /// ``` + CLValue = 15, + /// Failed to serialize data. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(16, Error::Serialize as u8); + /// ``` + Serialize = 16, + /// Source and target purse [`crate::URef`]s are equal. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(17, Error::EqualSourceAndTarget as u8); + /// ``` + EqualSourceAndTarget = 17, + /// An arithmetic overflow has occurred. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(18, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 18, + + // NOTE: These variants below will be removed once support for WASM system contracts will be + // dropped. + #[doc(hidden)] + GasLimit = 19, + + /// Raised when an entry point is called from invalid account context. + InvalidContext = 20, + + /// Session code tried to transfer more CSPR than user approved. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(21, Error::UnapprovedSpendingAmount as u8); + UnapprovedSpendingAmount = 21, + + /// Failed to transfer tokens on a private chain. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(22, Error::DisabledUnrestrictedTransfers as u8); + DisabledUnrestrictedTransfers = 22, + + #[cfg(test)] + #[doc(hidden)] + Sentinel, +} + +/// Used for testing; this should be guaranteed to be the maximum valid value of [`Error`] enum. +#[cfg(test)] +const MAX_ERROR_VALUE: u8 = Error::Sentinel as u8; + +impl CLTyped for Error { + fn cl_type() -> CLType { + CLType::U8 + } +} + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +pub struct TryFromU8ForError(()); + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for Error { + type Error = TryFromU8ForError; + + fn try_from(value: u8) -> Result { + match value { + d if d == Error::InsufficientFunds as u8 => Ok(Error::InsufficientFunds), + d if d == Error::SourceNotFound as u8 => Ok(Error::SourceNotFound), + d if d == Error::DestNotFound as u8 => Ok(Error::DestNotFound), + d if d == Error::InvalidURef as u8 => Ok(Error::InvalidURef), + d if d == Error::InvalidAccessRights as u8 => Ok(Error::InvalidAccessRights), + d if d == Error::InvalidNonEmptyPurseCreation as u8 => { + Ok(Error::InvalidNonEmptyPurseCreation) + } + d if d == Error::Storage as u8 => Ok(Error::Storage), + d if d == Error::PurseNotFound as u8 => Ok(Error::PurseNotFound), + d if d == Error::MissingKey as u8 => Ok(Error::MissingKey), + d if d == Error::TotalSupplyNotFound as u8 => Ok(Error::TotalSupplyNotFound), + d if d == Error::RecordTransferFailure as u8 => Ok(Error::RecordTransferFailure), + d if d == Error::InvalidTotalSupplyReductionAttempt as u8 => { + Ok(Error::InvalidTotalSupplyReductionAttempt) + } + d if d == Error::NewURef as u8 => Ok(Error::NewURef), + d if d == Error::PutKey as u8 => Ok(Error::PutKey), + d if d == Error::WriteDictionary as u8 => Ok(Error::WriteDictionary), + d if d == Error::CLValue as u8 => Ok(Error::CLValue), + d if d == Error::Serialize as u8 => Ok(Error::Serialize), + d if d == Error::EqualSourceAndTarget as u8 => Ok(Error::EqualSourceAndTarget), + d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow), + d if d == Error::GasLimit as u8 => Ok(Error::GasLimit), + d if d == Error::InvalidContext as u8 => Ok(Error::InvalidContext), + d if d == Error::UnapprovedSpendingAmount as u8 => Ok(Error::UnapprovedSpendingAmount), + d if d == Error::DisabledUnrestrictedTransfers as u8 => { + Ok(Error::DisabledUnrestrictedTransfers) + } + _ => Err(TryFromU8ForError(())), + } + } +} + +impl ToBytes for Error { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let value = *self as u8; + value.to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for Error { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, rem): (u8, _) = FromBytes::from_bytes(bytes)?; + let error: Error = value + .try_into() + // In case an Error variant is unable to be determined it would return an + // Error::Formatting as if its unable to be correctly deserialized. + .map_err(|_| bytesrepr::Error::Formatting)?; + Ok((error, rem)) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::InsufficientFunds => formatter.write_str("Insufficient funds"), + Error::SourceNotFound => formatter.write_str("Source not found"), + Error::DestNotFound => formatter.write_str("Destination not found"), + Error::InvalidURef => formatter.write_str("Invalid URef"), + Error::InvalidAccessRights => formatter.write_str("Invalid AccessRights"), + Error::InvalidNonEmptyPurseCreation => { + formatter.write_str("Invalid non-empty purse creation") + } + Error::Storage => formatter.write_str("Storage error"), + Error::PurseNotFound => formatter.write_str("Purse not found"), + Error::MissingKey => formatter.write_str("Missing key"), + Error::TotalSupplyNotFound => formatter.write_str("Total supply not found"), + Error::RecordTransferFailure => formatter.write_str("Failed to record transfer"), + Error::InvalidTotalSupplyReductionAttempt => { + formatter.write_str("Invalid attempt to reduce total supply") + } + Error::NewURef => formatter.write_str("Failed to create new uref"), + Error::PutKey => formatter.write_str("Failed to put key"), + Error::WriteDictionary => formatter.write_str("Failed to write dictionary"), + Error::CLValue => formatter.write_str("Failed to create a CLValue"), + Error::Serialize => formatter.write_str("Failed to serialize data"), + Error::EqualSourceAndTarget => formatter.write_str("Invalid target purse"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow has occurred"), + Error::GasLimit => formatter.write_str("GasLimit"), + Error::InvalidContext => formatter.write_str("Invalid context"), + Error::UnapprovedSpendingAmount => formatter.write_str("Unapproved spending amount"), + Error::DisabledUnrestrictedTransfers => { + formatter.write_str("Disabled unrestricted transfers") + } + #[cfg(test)] + Error::Sentinel => formatter.write_str("Sentinel error"), + } + } +} + +#[cfg(test)] +mod tests { + use super::{Error, TryFromU8ForError, MAX_ERROR_VALUE}; + + #[test] + fn error_round_trips() { + for i in 0..=u8::max_value() { + match Error::try_from(i) { + Ok(error) if i < MAX_ERROR_VALUE => assert_eq!(error as u8, i), + Ok(error) => panic!( + "value of variant {:?} ({}) exceeds MAX_ERROR_VALUE ({})", + error, i, MAX_ERROR_VALUE + ), + Err(TryFromU8ForError(())) if i >= MAX_ERROR_VALUE => (), + Err(TryFromU8ForError(())) => { + panic!("missing conversion from u8 to error value: {}", i) + } + } + } + } +} diff --git a/casper_types/src/system/standard_payment.rs b/casper_types/src/system/standard_payment.rs new file mode 100644 index 00000000..92c3fab3 --- /dev/null +++ b/casper_types/src/system/standard_payment.rs @@ -0,0 +1,6 @@ +//! Contains implementation of a standard payment contract implementation. +mod constants; +mod entry_points; + +pub use constants::*; +pub use entry_points::standard_payment_entry_points; diff --git a/casper_types/src/system/standard_payment/constants.rs b/casper_types/src/system/standard_payment/constants.rs new file mode 100644 index 00000000..9bd88784 --- /dev/null +++ b/casper_types/src/system/standard_payment/constants.rs @@ -0,0 +1,10 @@ +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; + +/// Named constant for method `pay`. +pub const METHOD_PAY: &str = "pay"; + +/// Storage for standard payment contract hash. +pub const HASH_KEY: &str = "standard_payment_hash"; +/// Storage for standard payment access key. +pub const ACCESS_KEY: &str = "standard_payment_access"; diff --git a/casper_types/src/system/standard_payment/entry_points.rs b/casper_types/src/system/standard_payment/entry_points.rs new file mode 100644 index 00000000..3eeaed52 --- /dev/null +++ b/casper_types/src/system/standard_payment/entry_points.rs @@ -0,0 +1,25 @@ +use alloc::{boxed::Box, string::ToString}; + +use crate::{ + system::standard_payment::{ARG_AMOUNT, METHOD_PAY}, + CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, +}; + +/// Creates standard payment contract entry points. +pub fn standard_payment_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntryPoint::new( + METHOD_PAY.to_string(), + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U32), + }, + EntryPointAccess::Public, + EntryPointType::Session, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/casper_types/src/system/system_contract_type.rs b/casper_types/src/system/system_contract_type.rs new file mode 100644 index 00000000..7709f6d9 --- /dev/null +++ b/casper_types/src/system/system_contract_type.rs @@ -0,0 +1,171 @@ +//! Home of system contract type enum. + +use alloc::string::{String, ToString}; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; + +use crate::{ApiError, EntryPoints}; + +use super::{ + auction::auction_entry_points, handle_payment::handle_payment_entry_points, + mint::mint_entry_points, standard_payment::standard_payment_entry_points, +}; + +/// System contract types. +/// +/// Used by converting to a `u32` and passing as the `system_contract_index` argument of +/// `ext_ffi::casper_get_system_contract()`. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum SystemContractType { + /// Mint contract. + Mint, + /// Handle Payment contract. + HandlePayment, + /// Standard Payment contract. + StandardPayment, + /// Auction contract. + Auction, +} + +/// Name of mint system contract +pub const MINT: &str = "mint"; +/// Name of handle payment system contract +pub const HANDLE_PAYMENT: &str = "handle payment"; +/// Name of standard payment system contract +pub const STANDARD_PAYMENT: &str = "standard payment"; +/// Name of auction system contract +pub const AUCTION: &str = "auction"; + +impl SystemContractType { + /// Returns the name of the system contract. + pub fn contract_name(&self) -> String { + match self { + SystemContractType::Mint => MINT.to_string(), + SystemContractType::HandlePayment => HANDLE_PAYMENT.to_string(), + SystemContractType::StandardPayment => STANDARD_PAYMENT.to_string(), + SystemContractType::Auction => AUCTION.to_string(), + } + } + + /// Returns the entrypoint of the system contract. + pub fn contract_entry_points(&self) -> EntryPoints { + match self { + SystemContractType::Mint => mint_entry_points(), + SystemContractType::HandlePayment => handle_payment_entry_points(), + SystemContractType::StandardPayment => standard_payment_entry_points(), + SystemContractType::Auction => auction_entry_points(), + } + } +} + +impl From for u32 { + fn from(system_contract_type: SystemContractType) -> u32 { + match system_contract_type { + SystemContractType::Mint => 0, + SystemContractType::HandlePayment => 1, + SystemContractType::StandardPayment => 2, + SystemContractType::Auction => 3, + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for SystemContractType { + type Error = ApiError; + fn try_from(value: u32) -> Result { + match value { + 0 => Ok(SystemContractType::Mint), + 1 => Ok(SystemContractType::HandlePayment), + 2 => Ok(SystemContractType::StandardPayment), + 3 => Ok(SystemContractType::Auction), + _ => Err(ApiError::InvalidSystemContract), + } + } +} + +impl Display for SystemContractType { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match *self { + SystemContractType::Mint => write!(f, "{}", MINT), + SystemContractType::HandlePayment => write!(f, "{}", HANDLE_PAYMENT), + SystemContractType::StandardPayment => write!(f, "{}", STANDARD_PAYMENT), + SystemContractType::Auction => write!(f, "{}", AUCTION), + } + } +} + +#[cfg(test)] +mod tests { + use std::string::ToString; + + use super::*; + + #[test] + fn get_index_of_mint_contract() { + let index: u32 = SystemContractType::Mint.into(); + assert_eq!(index, 0u32); + assert_eq!(SystemContractType::Mint.to_string(), MINT); + } + + #[test] + fn get_index_of_handle_payment_contract() { + let index: u32 = SystemContractType::HandlePayment.into(); + assert_eq!(index, 1u32); + assert_eq!( + SystemContractType::HandlePayment.to_string(), + HANDLE_PAYMENT + ); + } + + #[test] + fn get_index_of_standard_payment_contract() { + let index: u32 = SystemContractType::StandardPayment.into(); + assert_eq!(index, 2u32); + assert_eq!( + SystemContractType::StandardPayment.to_string(), + STANDARD_PAYMENT + ); + } + + #[test] + fn get_index_of_auction_contract() { + let index: u32 = SystemContractType::Auction.into(); + assert_eq!(index, 3u32); + assert_eq!(SystemContractType::Auction.to_string(), AUCTION); + } + + #[test] + fn create_mint_variant_from_int() { + let mint = SystemContractType::try_from(0).ok().unwrap(); + assert_eq!(mint, SystemContractType::Mint); + } + + #[test] + fn create_handle_payment_variant_from_int() { + let handle_payment = SystemContractType::try_from(1).ok().unwrap(); + assert_eq!(handle_payment, SystemContractType::HandlePayment); + } + + #[test] + fn create_standard_payment_variant_from_int() { + let handle_payment = SystemContractType::try_from(2).ok().unwrap(); + assert_eq!(handle_payment, SystemContractType::StandardPayment); + } + + #[test] + fn create_auction_variant_from_int() { + let auction = SystemContractType::try_from(3).ok().unwrap(); + assert_eq!(auction, SystemContractType::Auction); + } + + #[test] + fn create_unknown_system_contract_variant() { + assert!(SystemContractType::try_from(4).is_err()); + assert!(SystemContractType::try_from(5).is_err()); + assert!(SystemContractType::try_from(10).is_err()); + assert!(SystemContractType::try_from(u32::max_value()).is_err()); + } +} diff --git a/casper_types/src/tagged.rs b/casper_types/src/tagged.rs new file mode 100644 index 00000000..deddfe83 --- /dev/null +++ b/casper_types/src/tagged.rs @@ -0,0 +1,5 @@ +/// The quality of having a tag +pub trait Tagged { + /// Returns the tag of a given object + fn tag(&self) -> T; +} diff --git a/casper_types/src/testing.rs b/casper_types/src/testing.rs new file mode 100644 index 00000000..9bbb0e2b --- /dev/null +++ b/casper_types/src/testing.rs @@ -0,0 +1,174 @@ +//! An RNG for testing purposes. +use std::{ + cell::RefCell, + cmp, env, + fmt::{self, Debug, Display, Formatter}, + thread, +}; + +use rand::{self, CryptoRng, Error, Rng, RngCore, SeedableRng}; +use rand_pcg::Pcg64Mcg; + +thread_local! { + static THIS_THREAD_HAS_RNG: RefCell = RefCell::new(false); +} + +const CL_TEST_SEED: &str = "CL_TEST_SEED"; + +type Seed = ::Seed; // [u8; 16] + +/// A fast, seedable pseudorandom number generator for use in tests which prints the seed if the +/// thread in which it is created panics. +/// +/// Only one `TestRng` is permitted per thread. +pub struct TestRng { + seed: Seed, + rng: Pcg64Mcg, +} + +impl TestRng { + /// Constructs a new `TestRng` using a seed generated from the env var `CL_TEST_SEED` if set or + /// from cryptographically secure random data if not. + /// + /// Note that `new()` or `default()` should only be called once per test. If a test needs to + /// spawn multiple threads each with their own `TestRng`, then use `new()` to create a single, + /// master `TestRng`, then use it to create a seed per child thread. The child `TestRng`s can + /// then be constructed in their own threads via `from_seed()`. + /// + /// # Panics + /// + /// Panics if a `TestRng` has already been created on this thread. + pub fn new() -> Self { + Self::set_flag_or_panic(); + + let mut seed = Seed::default(); + match env::var(CL_TEST_SEED) { + Ok(seed_as_hex) => { + base16::decode_slice(&seed_as_hex, &mut seed).unwrap_or_else(|error| { + THIS_THREAD_HAS_RNG.with(|flag| { + *flag.borrow_mut() = false; + }); + panic!("can't parse '{}' as a TestRng seed: {}", seed_as_hex, error) + }); + } + Err(_) => { + rand::thread_rng().fill(&mut seed); + } + }; + + let rng = Pcg64Mcg::from_seed(seed); + + TestRng { seed, rng } + } + + /// Constructs a new `TestRng` using `seed`. This should be used in cases where a test needs to + /// spawn multiple threads each with their own `TestRng`. A single, master `TestRng` should be + /// constructed before any child threads are spawned, and that one should be used to create + /// seeds for the child threads' `TestRng`s. + /// + /// # Panics + /// + /// Panics if a `TestRng` has already been created on this thread. + pub fn from_seed(seed: Seed) -> Self { + Self::set_flag_or_panic(); + let rng = Pcg64Mcg::from_seed(seed); + TestRng { seed, rng } + } + + fn set_flag_or_panic() { + THIS_THREAD_HAS_RNG.with(|flag| { + if *flag.borrow() { + panic!("cannot create multiple TestRngs on the same thread"); + } + *flag.borrow_mut() = true; + }); + } + + /// Creates a child RNG. + /// + /// The resulting RNG is seeded from `self` deterministically. + pub fn create_child(&mut self) -> Self { + let seed = self.gen(); + let rng = Pcg64Mcg::from_seed(seed); + TestRng { seed, rng } + } +} + +impl Default for TestRng { + fn default() -> Self { + TestRng::new() + } +} + +impl Display for TestRng { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "TestRng seed: {}", + base16::encode_lower(&self.seed) + ) + } +} + +impl Debug for TestRng { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + Display::fmt(self, formatter) + } +} + +impl Drop for TestRng { + fn drop(&mut self) { + if thread::panicking() { + let line_1 = format!("Thread: {}", thread::current().name().unwrap_or("unnamed")); + let line_2 = "To reproduce failure, try running with env var:"; + let line_3 = format!("{}={}", CL_TEST_SEED, base16::encode_lower(&self.seed)); + let max_length = cmp::max(line_1.len(), line_2.len()); + let border = "=".repeat(max_length); + println!( + "\n{}\n{}\n{}\n{}\n{}\n", + border, line_1, line_2, line_3, border + ); + } + } +} + +impl SeedableRng for TestRng { + type Seed = ::Seed; + + fn from_seed(seed: Self::Seed) -> Self { + Self::from_seed(seed) + } +} + +impl RngCore for TestRng { + fn next_u32(&mut self) -> u32 { + self.rng.next_u32() + } + + fn next_u64(&mut self) -> u64 { + self.rng.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.rng.fill_bytes(dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.rng.try_fill_bytes(dest) + } +} + +impl CryptoRng for TestRng {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[should_panic(expected = "cannot create multiple TestRngs on the same thread")] + fn second_test_rng_in_thread_should_panic() { + let _test_rng1 = TestRng::new(); + let seed = [1; 16]; + let _test_rng2 = TestRng::from_seed(seed); + } +} diff --git a/casper_types/src/timestamp.rs b/casper_types/src/timestamp.rs new file mode 100644 index 00000000..563beb69 --- /dev/null +++ b/casper_types/src/timestamp.rs @@ -0,0 +1,472 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; +use core::{ + ops::{Add, AddAssign, Div, Mul, Rem, Shl, Shr, Sub, SubAssign}, + time::Duration, +}; +#[cfg(any(feature = "std", test))] +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, + time::SystemTime, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use humantime::{DurationError, TimestampError}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// A timestamp type, representing a concrete moment in time. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(with = "String", description = "Timestamp formatted as per RFC 3339") +)] +pub struct Timestamp(u64); + +impl Timestamp { + /// The maximum value a timestamp can have. + pub const MAX: Timestamp = Timestamp(u64::MAX); + + #[cfg(any(feature = "std", test))] + /// Returns the timestamp of the current moment. + pub fn now() -> Self { + let millis = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_millis() as u64; + Timestamp(millis) + } + + #[cfg(any(feature = "std", test))] + /// Returns the time that has elapsed since this timestamp. + pub fn elapsed(&self) -> TimeDiff { + TimeDiff(Timestamp::now().0.saturating_sub(self.0)) + } + + /// Returns a zero timestamp. + pub fn zero() -> Self { + Timestamp(0) + } + + /// Returns the timestamp as the number of milliseconds since the Unix epoch + pub fn millis(&self) -> u64 { + self.0 + } + + /// Returns the difference between `self` and `other`, or `0` if `self` is earlier than `other`. + pub fn saturating_diff(self, other: Timestamp) -> TimeDiff { + TimeDiff(self.0.saturating_sub(other.0)) + } + + /// Returns the difference between `self` and `other`, or `0` if that would be before the epoch. + #[must_use] + pub fn saturating_sub(self, other: TimeDiff) -> Timestamp { + Timestamp(self.0.saturating_sub(other.0)) + } + + /// Returns the sum of `self` and `other`, or the maximum possible value if that would be + /// exceeded. + #[must_use] + pub fn saturating_add(self, other: TimeDiff) -> Timestamp { + Timestamp(self.0.saturating_add(other.0)) + } + + /// Returns the number of trailing zeros in the number of milliseconds since the epoch. + pub fn trailing_zeros(&self) -> u8 { + self.0.trailing_zeros() as u8 + } +} + +#[cfg(any(feature = "testing", test))] +impl Timestamp { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + Timestamp(1_596_763_000_000 + rng.gen_range(200_000..1_000_000)) + } + + /// Checked subtraction for timestamps + pub fn checked_sub(self, other: TimeDiff) -> Option { + self.0.checked_sub(other.0).map(Timestamp) + } +} + +#[cfg(any(feature = "std", test))] +impl Display for Timestamp { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match SystemTime::UNIX_EPOCH.checked_add(Duration::from_millis(self.0)) { + Some(system_time) => write!(f, "{}", humantime::format_rfc3339_millis(system_time)) + .or_else(|e| write!(f, "Invalid timestamp: {}: {}", e, self.0)), + None => write!(f, "invalid Timestamp: {} ms after the Unix epoch", self.0), + } + } +} + +#[cfg(any(feature = "std", test))] +impl FromStr for Timestamp { + type Err = TimestampError; + + fn from_str(value: &str) -> Result { + let system_time = humantime::parse_rfc3339_weak(value)?; + let inner = system_time + .duration_since(SystemTime::UNIX_EPOCH) + .map_err(|_| TimestampError::OutOfRange)? + .as_millis() as u64; + Ok(Timestamp(inner)) + } +} + +impl Add for Timestamp { + type Output = Timestamp; + + fn add(self, diff: TimeDiff) -> Timestamp { + Timestamp(self.0 + diff.0) + } +} + +impl AddAssign for Timestamp { + fn add_assign(&mut self, rhs: TimeDiff) { + self.0 += rhs.0; + } +} + +#[cfg(any(feature = "testing", test))] +impl std::ops::Sub for Timestamp { + type Output = Timestamp; + + fn sub(self, diff: TimeDiff) -> Timestamp { + Timestamp(self.0 - diff.0) + } +} + +impl Rem for Timestamp { + type Output = TimeDiff; + + fn rem(self, diff: TimeDiff) -> TimeDiff { + TimeDiff(self.0 % diff.0) + } +} + +impl Shl for Timestamp +where + u64: Shl, +{ + type Output = Timestamp; + + fn shl(self, rhs: T) -> Timestamp { + Timestamp(self.0 << rhs) + } +} + +impl Shr for Timestamp +where + u64: Shr, +{ + type Output = Timestamp; + + fn shr(self, rhs: T) -> Timestamp { + Timestamp(self.0 >> rhs) + } +} + +#[cfg(any(feature = "std", test))] +impl Serialize for Timestamp { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for Timestamp { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + Timestamp::from_str(&value_as_string).map_err(SerdeError::custom) + } else { + let inner = u64::deserialize(deserializer)?; + Ok(Timestamp(inner)) + } + } +} + +impl ToBytes for Timestamp { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Timestamp { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + u64::from_bytes(bytes).map(|(inner, remainder)| (Timestamp(inner), remainder)) + } +} + +impl From for Timestamp { + fn from(milliseconds_since_epoch: u64) -> Timestamp { + Timestamp(milliseconds_since_epoch) + } +} + +/// A time difference between two timestamps. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(with = "String", description = "Human-readable duration.") +)] +pub struct TimeDiff(u64); + +#[cfg(any(feature = "std", test))] +impl Display for TimeDiff { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", humantime::format_duration(Duration::from(*self))) + } +} + +#[cfg(any(feature = "std", test))] +impl FromStr for TimeDiff { + type Err = DurationError; + + fn from_str(value: &str) -> Result { + let inner = humantime::parse_duration(value)?.as_millis() as u64; + Ok(TimeDiff(inner)) + } +} + +impl TimeDiff { + /// Returns the time difference as the number of milliseconds since the Unix epoch + pub fn millis(&self) -> u64 { + self.0 + } + + /// Creates a new time difference from seconds. + pub const fn from_seconds(seconds: u32) -> Self { + TimeDiff(seconds as u64 * 1_000) + } + + /// Creates a new time difference from milliseconds. + pub const fn from_millis(millis: u64) -> Self { + TimeDiff(millis) + } + + /// Returns the sum, or `TimeDiff(u64::MAX)` if it would overflow. + #[must_use] + pub fn saturating_add(self, rhs: u64) -> Self { + TimeDiff(self.0.saturating_add(rhs)) + } + + /// Returns the product, or `TimeDiff(u64::MAX)` if it would overflow. + #[must_use] + pub fn saturating_mul(self, rhs: u64) -> Self { + TimeDiff(self.0.saturating_mul(rhs)) + } + + /// Returns the product, or `None` if it would overflow. + #[must_use] + pub fn checked_mul(self, rhs: u64) -> Option { + Some(TimeDiff(self.0.checked_mul(rhs)?)) + } +} + +impl Add for TimeDiff { + type Output = TimeDiff; + + fn add(self, rhs: TimeDiff) -> TimeDiff { + TimeDiff(self.0 + rhs.0) + } +} + +impl AddAssign for TimeDiff { + fn add_assign(&mut self, rhs: TimeDiff) { + self.0 += rhs.0; + } +} + +impl Sub for TimeDiff { + type Output = TimeDiff; + + fn sub(self, rhs: TimeDiff) -> TimeDiff { + TimeDiff(self.0 - rhs.0) + } +} + +impl SubAssign for TimeDiff { + fn sub_assign(&mut self, rhs: TimeDiff) { + self.0 -= rhs.0; + } +} + +impl Mul for TimeDiff { + type Output = TimeDiff; + + fn mul(self, rhs: u64) -> TimeDiff { + TimeDiff(self.0 * rhs) + } +} + +impl Div for TimeDiff { + type Output = TimeDiff; + + fn div(self, rhs: u64) -> TimeDiff { + TimeDiff(self.0 / rhs) + } +} + +impl Div for TimeDiff { + type Output = u64; + + fn div(self, rhs: TimeDiff) -> u64 { + self.0 / rhs.0 + } +} + +impl From for Duration { + fn from(diff: TimeDiff) -> Duration { + Duration::from_millis(diff.0) + } +} + +#[cfg(any(feature = "std", test))] +impl Serialize for TimeDiff { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for TimeDiff { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + TimeDiff::from_str(&value_as_string).map_err(SerdeError::custom) + } else { + let inner = u64::deserialize(deserializer)?; + Ok(TimeDiff(inner)) + } + } +} + +impl ToBytes for TimeDiff { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TimeDiff { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + u64::from_bytes(bytes).map(|(inner, remainder)| (TimeDiff(inner), remainder)) + } +} + +impl From for TimeDiff { + fn from(duration: Duration) -> TimeDiff { + TimeDiff(duration.as_millis() as u64) + } +} + +/// A module for the `[serde(with = serde_option_time_diff)]` attribute, to serialize and +/// deserialize `Option` treating `None` as 0. +#[cfg(any(feature = "std", test))] +pub mod serde_option_time_diff { + use super::*; + + /// Serializes an `Option`, using `0` if the value is `None`. + pub fn serialize( + maybe_td: &Option, + serializer: S, + ) -> Result { + maybe_td + .unwrap_or_else(|| TimeDiff::from_millis(0)) + .serialize(serializer) + } + + /// Deserializes an `Option`, returning `None` if the value is `0`. + pub fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result, D::Error> { + let td = TimeDiff::deserialize(deserializer)?; + if td.0 == 0 { + Ok(None) + } else { + Ok(Some(td)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn timestamp_serialization_roundtrip() { + let timestamp = Timestamp::now(); + + let timestamp_as_string = timestamp.to_string(); + assert_eq!( + timestamp, + Timestamp::from_str(×tamp_as_string).unwrap() + ); + + let serialized_json = serde_json::to_string(×tamp).unwrap(); + assert_eq!(timestamp, serde_json::from_str(&serialized_json).unwrap()); + + let serialized_bincode = bincode::serialize(×tamp).unwrap(); + assert_eq!( + timestamp, + bincode::deserialize(&serialized_bincode).unwrap() + ); + + bytesrepr::test_serialization_roundtrip(×tamp); + } + + #[test] + fn timediff_serialization_roundtrip() { + let mut rng = TestRng::new(); + let timediff = TimeDiff(rng.gen()); + + let timediff_as_string = timediff.to_string(); + assert_eq!(timediff, TimeDiff::from_str(&timediff_as_string).unwrap()); + + let serialized_json = serde_json::to_string(&timediff).unwrap(); + assert_eq!(timediff, serde_json::from_str(&serialized_json).unwrap()); + + let serialized_bincode = bincode::serialize(&timediff).unwrap(); + assert_eq!(timediff, bincode::deserialize(&serialized_bincode).unwrap()); + + bytesrepr::test_serialization_roundtrip(&timediff); + } + + #[test] + fn does_not_crash_for_big_timestamp_value() { + assert!(Timestamp::MAX.to_string().starts_with("Invalid timestamp:")); + } +} diff --git a/casper_types/src/transfer.rs b/casper_types/src/transfer.rs new file mode 100644 index 00000000..23f51df8 --- /dev/null +++ b/casper_types/src/transfer.rs @@ -0,0 +1,506 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, CLType, CLTyped, URef, U512, +}; + +/// The length of a deploy hash. +pub const DEPLOY_HASH_LENGTH: usize = 32; +/// The length of a transfer address. +pub const TRANSFER_ADDR_LENGTH: usize = 32; +pub(super) const TRANSFER_ADDR_FORMATTED_STRING_PREFIX: &str = "transfer-"; + +/// A newtype wrapping a [u8; [DEPLOY_HASH_LENGTH]] which is the raw bytes of the +/// deploy hash. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct DeployHash([u8; DEPLOY_HASH_LENGTH]); + +impl DeployHash { + /// Constructs a new `DeployHash` instance from the raw bytes of a deploy hash. + pub const fn new(value: [u8; DEPLOY_HASH_LENGTH]) -> DeployHash { + DeployHash(value) + } + + /// Returns the raw bytes of the deploy hash as an array. + pub fn value(&self) -> [u8; DEPLOY_HASH_LENGTH] { + self.0 + } + + /// Returns the raw bytes of the deploy hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for DeployHash { + fn schema_name() -> String { + String::from("DeployHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("Hex-encoded deploy hash.".to_string()); + schema_object.into() + } +} + +impl ToBytes for DeployHash { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for DeployHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + <[u8; DEPLOY_HASH_LENGTH]>::from_bytes(bytes) + .map(|(inner, remainder)| (DeployHash(inner), remainder)) + } +} + +impl Serialize for DeployHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + base16::encode_lower(&self.0).serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for DeployHash { + fn deserialize>(deserializer: D) -> Result { + let bytes = if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let vec_bytes = + checksummed_hex::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; + <[u8; DEPLOY_HASH_LENGTH]>::try_from(vec_bytes.as_ref()).map_err(SerdeError::custom)? + } else { + <[u8; DEPLOY_HASH_LENGTH]>::deserialize(deserializer)? + }; + Ok(DeployHash(bytes)) + } +} + +impl Debug for DeployHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "DeployHash({})", base16::encode_lower(&self.0)) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> DeployHash { + DeployHash::new(rng.gen()) + } +} + +/// Represents a transfer from one purse to another +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Transfer { + /// Deploy that created the transfer + pub deploy_hash: DeployHash, + /// Account from which transfer was executed + pub from: AccountHash, + /// Account to which funds are transferred + pub to: Option, + /// Source purse + pub source: URef, + /// Target purse + pub target: URef, + /// Transfer amount + pub amount: U512, + /// Gas + pub gas: U512, + /// User-defined id + pub id: Option, +} + +impl Transfer { + /// Creates a [`Transfer`]. + #[allow(clippy::too_many_arguments)] + pub fn new( + deploy_hash: DeployHash, + from: AccountHash, + to: Option, + source: URef, + target: URef, + amount: U512, + gas: U512, + id: Option, + ) -> Self { + Transfer { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + } + } +} + +impl FromBytes for Transfer { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (deploy_hash, rem) = FromBytes::from_bytes(bytes)?; + let (from, rem) = AccountHash::from_bytes(rem)?; + let (to, rem) = >::from_bytes(rem)?; + let (source, rem) = URef::from_bytes(rem)?; + let (target, rem) = URef::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + let (gas, rem) = U512::from_bytes(rem)?; + let (id, rem) = >::from_bytes(rem)?; + Ok(( + Transfer { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + }, + rem, + )) + } +} + +impl ToBytes for Transfer { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.deploy_hash.write_bytes(&mut result)?; + self.from.write_bytes(&mut result)?; + self.to.write_bytes(&mut result)?; + self.source.write_bytes(&mut result)?; + self.target.write_bytes(&mut result)?; + self.amount.write_bytes(&mut result)?; + self.gas.write_bytes(&mut result)?; + self.id.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.deploy_hash.serialized_length() + + self.from.serialized_length() + + self.to.serialized_length() + + self.source.serialized_length() + + self.target.serialized_length() + + self.amount.serialized_length() + + self.gas.serialized_length() + + self.id.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.from.write_bytes(writer)?; + self.to.write_bytes(writer)?; + self.source.write_bytes(writer)?; + self.target.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.gas.write_bytes(writer)?; + self.id.write_bytes(writer)?; + Ok(()) + } +} + +/// Error returned when decoding a `TransferAddr` from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The address is not valid hex. + Hex(base16::DecodeError), + /// The slice is the wrong length. + Length(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Length(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "prefix is not 'transfer-'"), + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Length(error) => write!(f, "address portion is wrong length: {}", error), + } + } +} + +/// A newtype wrapping a [u8; [TRANSFER_ADDR_LENGTH]] which is the raw bytes of the +/// transfer address. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct TransferAddr([u8; TRANSFER_ADDR_LENGTH]); + +impl TransferAddr { + /// Constructs a new `TransferAddr` instance from the raw bytes. + pub const fn new(value: [u8; TRANSFER_ADDR_LENGTH]) -> TransferAddr { + TransferAddr(value) + } + + /// Returns the raw bytes of the transfer address as an array. + pub fn value(&self) -> [u8; TRANSFER_ADDR_LENGTH] { + self.0 + } + + /// Returns the raw bytes of the transfer address as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `TransferAddr` as a prefixed, hex-encoded string. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + TRANSFER_ADDR_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a `TransferAddr`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(TRANSFER_ADDR_FORMATTED_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = + <[u8; TRANSFER_ADDR_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(TransferAddr(bytes)) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for TransferAddr { + fn schema_name() -> String { + String::from("TransferAddr") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("Hex-encoded transfer address.".to_string()); + schema_object.into() + } +} + +impl Serialize for TransferAddr { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for TransferAddr { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + TransferAddr::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; TRANSFER_ADDR_LENGTH]>::deserialize(deserializer)?; + Ok(TransferAddr(bytes)) + } + } +} + +impl Display for TransferAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for TransferAddr { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "TransferAddr({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for TransferAddr { + fn cl_type() -> CLType { + CLType::ByteArray(TRANSFER_ADDR_LENGTH as u32) + } +} + +impl ToBytes for TransferAddr { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for TransferAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, remainder) = FromBytes::from_bytes(bytes)?; + Ok((TransferAddr::new(bytes), remainder)) + } +} + +impl AsRef<[u8]> for TransferAddr { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> TransferAddr { + TransferAddr::new(rng.gen()) + } +} + +/// Generators for [`Transfer`] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::{prop::option, Arbitrary, Strategy}; + + use crate::{ + deploy_info::gens::{account_hash_arb, deploy_hash_arb}, + gens::{u512_arb, uref_arb}, + Transfer, + }; + + /// Creates an arbitrary [`Transfer`] + pub fn transfer_arb() -> impl Strategy { + ( + deploy_hash_arb(), + account_hash_arb(), + option::of(account_hash_arb()), + uref_arb(), + uref_arb(), + u512_arb(), + u512_arb(), + option::of(::arbitrary()), + ) + .prop_map(|(deploy_hash, from, to, source, target, amount, gas, id)| { + Transfer { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + } + }) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + proptest! { + #[test] + fn test_serialization_roundtrip(transfer in gens::transfer_arb()) { + bytesrepr::test_serialization_roundtrip(&transfer) + } + } + + #[test] + fn transfer_addr_from_str() { + let transfer_address = TransferAddr([4; 32]); + let encoded = transfer_address.to_formatted_string(); + let decoded = TransferAddr::from_formatted_str(&encoded).unwrap(); + assert_eq!(transfer_address, decoded); + + let invalid_prefix = + "transfe-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(invalid_prefix).is_err()); + + let invalid_prefix = + "transfer0000000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = "transfer-00000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(short_addr).is_err()); + + let long_addr = + "transfer-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "transfer-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(TransferAddr::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn transfer_addr_serde_roundtrip() { + let transfer_address = TransferAddr([255; 32]); + let serialized = bincode::serialize(&transfer_address).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transfer_address, decoded); + } + + #[test] + fn transfer_addr_json_roundtrip() { + let transfer_address = TransferAddr([255; 32]); + let json_string = serde_json::to_string_pretty(&transfer_address).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transfer_address, decoded); + } +} diff --git a/casper_types/src/transfer_result.rs b/casper_types/src/transfer_result.rs new file mode 100644 index 00000000..ba9ce66b --- /dev/null +++ b/casper_types/src/transfer_result.rs @@ -0,0 +1,39 @@ +use core::fmt::Debug; + +use crate::ApiError; + +/// The result of an attempt to transfer between purses. +pub type TransferResult = Result; + +/// The result of a successful transfer between purses. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(i32)] +pub enum TransferredTo { + /// The destination account already existed. + ExistingAccount = 0, + /// The destination account was created. + NewAccount = 1, +} + +impl TransferredTo { + /// Converts an `i32` to a [`TransferResult`], where: + /// * `0` represents `Ok(TransferredTo::ExistingAccount)`, + /// * `1` represents `Ok(TransferredTo::NewAccount)`, + /// * all other inputs are mapped to `Err(ApiError::Transfer)`. + pub fn result_from(value: i32) -> TransferResult { + match value { + x if x == TransferredTo::ExistingAccount as i32 => Ok(TransferredTo::ExistingAccount), + x if x == TransferredTo::NewAccount as i32 => Ok(TransferredTo::NewAccount), + _ => Err(ApiError::Transfer), + } + } + + // This conversion is not intended to be used by third party crates. + #[doc(hidden)] + pub fn i32_from(result: TransferResult) -> i32 { + match result { + Ok(transferred_to) => transferred_to as i32, + Err(_) => 2, + } + } +} diff --git a/casper_types/src/uint.rs b/casper_types/src/uint.rs new file mode 100644 index 00000000..bdb30a45 --- /dev/null +++ b/casper_types/src/uint.rs @@ -0,0 +1,1001 @@ +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; +use core::{ + fmt::{self, Formatter}, + iter::Sum, + ops::Add, +}; + +use num_integer::Integer; +use num_traits::{ + AsPrimitive, Bounded, CheckedAdd, CheckedMul, CheckedSub, Num, One, Unsigned, WrappingAdd, + WrappingSub, Zero, +}; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{ + de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor}, + ser::{Serialize, SerializeStruct, Serializer}, +}; + +use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +#[allow( + clippy::assign_op_pattern, + clippy::ptr_offset_with_cast, + clippy::manual_range_contains, + clippy::range_plus_one, + clippy::transmute_ptr_to_ptr, + clippy::reversed_empty_ranges +)] +mod macro_code { + #[cfg(feature = "datasize")] + use datasize::DataSize; + use uint::construct_uint; + + construct_uint! { + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct U512(8); + } + construct_uint! { + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct U256(4); + } + construct_uint! { + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct U128(2); + } +} + +pub use self::macro_code::{U128, U256, U512}; + +/// Error type for parsing [`U128`], [`U256`], [`U512`] from a string. +#[derive(Debug)] +#[non_exhaustive] +pub enum UIntParseError { + /// Contains the parsing error from the `uint` crate, which only supports base-10 parsing. + FromDecStr(uint::FromDecStrErr), + /// Parsing was attempted on a string representing the number in some base other than 10. + /// + /// Note: a general radix may be supported in the future. + InvalidRadix, +} + +macro_rules! impl_traits_for_uint { + ($type:ident, $total_bytes:expr, $test_mod:ident) => { + impl Serialize for $type { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + return self.to_string().serialize(serializer); + } + + let mut buffer = [0u8; $total_bytes]; + self.to_little_endian(&mut buffer); + let non_zero_bytes: Vec = buffer + .iter() + .rev() + .skip_while(|b| **b == 0) + .cloned() + .collect(); + let num_bytes = non_zero_bytes.len(); + + let mut state = serializer.serialize_struct("bigint", num_bytes + 1)?; + state.serialize_field("", &(num_bytes as u8))?; + + for byte in non_zero_bytes.into_iter().rev() { + state.serialize_field("", &byte)?; + } + state.end() + } + } + + impl<'de> Deserialize<'de> for $type { + fn deserialize>(deserializer: D) -> Result { + struct BigNumVisitor; + + impl<'de> Visitor<'de> for BigNumVisitor { + type Value = $type; + + fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + formatter.write_str("bignum struct") + } + + fn visit_seq>( + self, + mut sequence: V, + ) -> Result<$type, V::Error> { + let length: u8 = sequence + .next_element()? + .ok_or_else(|| de::Error::invalid_length(0, &self))?; + let mut buffer = [0u8; $total_bytes]; + for index in 0..length as usize { + let value = sequence + .next_element()? + .ok_or_else(|| de::Error::invalid_length(index + 1, &self))?; + buffer[index as usize] = value; + } + let result = $type::from_little_endian(&buffer); + Ok(result) + } + + fn visit_map>(self, mut map: V) -> Result<$type, V::Error> { + let _length_key: u8 = map + .next_key()? + .ok_or_else(|| de::Error::missing_field("length"))?; + let length: u8 = map + .next_value() + .map_err(|_| de::Error::invalid_length(0, &self))?; + let mut buffer = [0u8; $total_bytes]; + for index in 0..length { + let _byte_key: u8 = map + .next_key()? + .ok_or_else(|| de::Error::missing_field("byte"))?; + let value = map.next_value().map_err(|_| { + de::Error::invalid_length(index as usize + 1, &self) + })?; + buffer[index as usize] = value; + } + let result = $type::from_little_endian(&buffer); + Ok(result) + } + } + + const FIELDS: &'static [&'static str] = &[ + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", + "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", + "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", + "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", + "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", + ]; + + if deserializer.is_human_readable() { + let decimal_string = String::deserialize(deserializer)?; + return Self::from_dec_str(&decimal_string) + .map_err(|error| de::Error::custom(format!("{:?}", error))); + } + + deserializer.deserialize_struct("bigint", FIELDS, BigNumVisitor) + } + } + + impl ToBytes for $type { + fn to_bytes(&self) -> Result, Error> { + let mut buf = [0u8; $total_bytes]; + self.to_little_endian(&mut buf); + let mut non_zero_bytes: Vec = + buf.iter().rev().skip_while(|b| **b == 0).cloned().collect(); + let num_bytes = non_zero_bytes.len() as u8; + non_zero_bytes.push(num_bytes); + non_zero_bytes.reverse(); + Ok(non_zero_bytes) + } + + fn serialized_length(&self) -> usize { + let mut buf = [0u8; $total_bytes]; + self.to_little_endian(&mut buf); + let non_zero_bytes = buf.iter().rev().skip_while(|b| **b == 0).count(); + U8_SERIALIZED_LENGTH + non_zero_bytes + } + } + + impl FromBytes for $type { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (num_bytes, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + + if num_bytes > $total_bytes { + Err(Error::Formatting) + } else { + let (value, rem) = bytesrepr::safe_split_at(rem, num_bytes as usize)?; + let result = $type::from_little_endian(value); + Ok((result, rem)) + } + } + } + + // Trait implementations for unifying U* as numeric types + impl Zero for $type { + fn zero() -> Self { + $type::zero() + } + + fn is_zero(&self) -> bool { + self.is_zero() + } + } + + impl One for $type { + fn one() -> Self { + $type::one() + } + } + + // Requires Zero and One to be implemented + impl Num for $type { + type FromStrRadixErr = UIntParseError; + fn from_str_radix(str: &str, radix: u32) -> Result { + if radix == 10 { + $type::from_dec_str(str).map_err(UIntParseError::FromDecStr) + } else { + // TODO: other radix parsing + Err(UIntParseError::InvalidRadix) + } + } + } + + // Requires Num to be implemented + impl Unsigned for $type {} + + // Additional numeric trait, which also holds for these types + impl Bounded for $type { + fn min_value() -> Self { + $type::zero() + } + + fn max_value() -> Self { + $type::MAX + } + } + + // Instead of implementing arbitrary methods we can use existing traits from num_trait + // crate. + impl WrappingAdd for $type { + fn wrapping_add(&self, other: &$type) -> $type { + self.overflowing_add(*other).0 + } + } + + impl WrappingSub for $type { + fn wrapping_sub(&self, other: &$type) -> $type { + self.overflowing_sub(*other).0 + } + } + + impl CheckedMul for $type { + fn checked_mul(&self, v: &$type) -> Option<$type> { + $type::checked_mul(*self, *v) + } + } + + impl CheckedSub for $type { + fn checked_sub(&self, v: &$type) -> Option<$type> { + $type::checked_sub(*self, *v) + } + } + + impl CheckedAdd for $type { + fn checked_add(&self, v: &$type) -> Option<$type> { + $type::checked_add(*self, *v) + } + } + + impl Integer for $type { + /// Unsigned integer division. Returns the same result as `div` (`/`). + #[inline] + fn div_floor(&self, other: &Self) -> Self { + *self / *other + } + + /// Unsigned integer modulo operation. Returns the same result as `rem` (`%`). + #[inline] + fn mod_floor(&self, other: &Self) -> Self { + *self % *other + } + + /// Calculates the Greatest Common Divisor (GCD) of the number and `other` + #[inline] + fn gcd(&self, other: &Self) -> Self { + let zero = Self::zero(); + // Use Stein's algorithm + let mut m = *self; + let mut n = *other; + if m == zero || n == zero { + return m | n; + } + + // find common factors of 2 + let shift = (m | n).trailing_zeros(); + + // divide n and m by 2 until odd + m >>= m.trailing_zeros(); + n >>= n.trailing_zeros(); + + while m != n { + if m > n { + m -= n; + m >>= m.trailing_zeros(); + } else { + n -= m; + n >>= n.trailing_zeros(); + } + } + m << shift + } + + /// Calculates the Lowest Common Multiple (LCM) of the number and `other`. + #[inline] + fn lcm(&self, other: &Self) -> Self { + self.gcd_lcm(other).1 + } + + /// Calculates the Greatest Common Divisor (GCD) and + /// Lowest Common Multiple (LCM) of the number and `other`. + #[inline] + fn gcd_lcm(&self, other: &Self) -> (Self, Self) { + if self.is_zero() && other.is_zero() { + return (Self::zero(), Self::zero()); + } + let gcd = self.gcd(other); + let lcm = *self * (*other / gcd); + (gcd, lcm) + } + + /// Deprecated, use `is_multiple_of` instead. + #[inline] + fn divides(&self, other: &Self) -> bool { + self.is_multiple_of(other) + } + + /// Returns `true` if the number is a multiple of `other`. + #[inline] + fn is_multiple_of(&self, other: &Self) -> bool { + *self % *other == $type::zero() + } + + /// Returns `true` if the number is divisible by `2`. + #[inline] + fn is_even(&self) -> bool { + (self.0[0]) & 1 == 0 + } + + /// Returns `true` if the number is not divisible by `2`. + #[inline] + fn is_odd(&self) -> bool { + !self.is_even() + } + + /// Simultaneous truncated integer division and modulus. + #[inline] + fn div_rem(&self, other: &Self) -> (Self, Self) { + (*self / *other, *self % *other) + } + } + + impl AsPrimitive<$type> for i32 { + fn as_(self) -> $type { + if self >= 0 { + $type::from(self as u32) + } else { + let abs = 0u32.wrapping_sub(self as u32); + $type::zero().wrapping_sub(&$type::from(abs)) + } + } + } + + impl AsPrimitive<$type> for i64 { + fn as_(self) -> $type { + if self >= 0 { + $type::from(self as u64) + } else { + let abs = 0u64.wrapping_sub(self as u64); + $type::zero().wrapping_sub(&$type::from(abs)) + } + } + } + + impl AsPrimitive<$type> for u8 { + fn as_(self) -> $type { + $type::from(self) + } + } + + impl AsPrimitive<$type> for u32 { + fn as_(self) -> $type { + $type::from(self) + } + } + + impl AsPrimitive<$type> for u64 { + fn as_(self) -> $type { + $type::from(self) + } + } + + impl AsPrimitive for $type { + fn as_(self) -> i32 { + self.0[0] as i32 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> i64 { + self.0[0] as i64 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> u8 { + self.0[0] as u8 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> u32 { + self.0[0] as u32 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> u64 { + self.0[0] + } + } + + impl Sum for $type { + fn sum>(iter: I) -> Self { + iter.fold($type::zero(), Add::add) + } + } + + impl Distribution<$type> for Standard { + fn sample(&self, rng: &mut R) -> $type { + let mut raw_bytes = [0u8; $total_bytes]; + rng.fill_bytes(raw_bytes.as_mut()); + $type::from(raw_bytes) + } + } + + #[cfg(feature = "json-schema")] + impl schemars::JsonSchema for $type { + fn schema_name() -> String { + format!("U{}", $total_bytes * 8) + } + + fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some(format!( + "Decimal representation of a {}-bit integer.", + $total_bytes * 8 + )); + schema_object.into() + } + } + + #[cfg(test)] + mod $test_mod { + use super::*; + + #[test] + fn test_div_mod_floor() { + assert_eq!($type::from(10).div_floor(&$type::from(3)), $type::from(3)); + assert_eq!($type::from(10).mod_floor(&$type::from(3)), $type::from(1)); + assert_eq!( + $type::from(10).div_mod_floor(&$type::from(3)), + ($type::from(3), $type::from(1)) + ); + assert_eq!($type::from(5).div_floor(&$type::from(5)), $type::from(1)); + assert_eq!($type::from(5).mod_floor(&$type::from(5)), $type::from(0)); + assert_eq!( + $type::from(5).div_mod_floor(&$type::from(5)), + ($type::from(1), $type::from(0)) + ); + assert_eq!($type::from(3).div_floor(&$type::from(7)), $type::from(0)); + assert_eq!($type::from(3).mod_floor(&$type::from(7)), $type::from(3)); + assert_eq!( + $type::from(3).div_mod_floor(&$type::from(7)), + ($type::from(0), $type::from(3)) + ); + } + + #[test] + fn test_gcd() { + assert_eq!($type::from(10).gcd(&$type::from(2)), $type::from(2)); + assert_eq!($type::from(10).gcd(&$type::from(3)), $type::from(1)); + assert_eq!($type::from(0).gcd(&$type::from(3)), $type::from(3)); + assert_eq!($type::from(3).gcd(&$type::from(3)), $type::from(3)); + assert_eq!($type::from(56).gcd(&$type::from(42)), $type::from(14)); + assert_eq!( + $type::MAX.gcd(&($type::MAX / $type::from(2))), + $type::from(1) + ); + assert_eq!($type::from(15).gcd(&$type::from(17)), $type::from(1)); + } + + #[test] + fn test_lcm() { + assert_eq!($type::from(1).lcm(&$type::from(0)), $type::from(0)); + assert_eq!($type::from(0).lcm(&$type::from(1)), $type::from(0)); + assert_eq!($type::from(1).lcm(&$type::from(1)), $type::from(1)); + assert_eq!($type::from(8).lcm(&$type::from(9)), $type::from(72)); + assert_eq!($type::from(11).lcm(&$type::from(5)), $type::from(55)); + assert_eq!($type::from(15).lcm(&$type::from(17)), $type::from(255)); + assert_eq!($type::from(4).lcm(&$type::from(8)), $type::from(8)); + } + + #[test] + fn test_is_multiple_of() { + assert!($type::from(6).is_multiple_of(&$type::from(6))); + assert!($type::from(6).is_multiple_of(&$type::from(3))); + assert!($type::from(6).is_multiple_of(&$type::from(1))); + assert!(!$type::from(3).is_multiple_of(&$type::from(5))) + } + + #[test] + fn is_even() { + assert_eq!($type::from(0).is_even(), true); + assert_eq!($type::from(1).is_even(), false); + assert_eq!($type::from(2).is_even(), true); + assert_eq!($type::from(3).is_even(), false); + assert_eq!($type::from(4).is_even(), true); + } + + #[test] + fn is_odd() { + assert_eq!($type::from(0).is_odd(), false); + assert_eq!($type::from(1).is_odd(), true); + assert_eq!($type::from(2).is_odd(), false); + assert_eq!($type::from(3).is_odd(), true); + assert_eq!($type::from(4).is_odd(), false); + } + + #[test] + #[should_panic] + fn overflow_mul_test() { + let _ = $type::MAX * $type::from(2); + } + + #[test] + #[should_panic] + fn overflow_add_test() { + let _ = $type::MAX + $type::from(1); + } + + #[test] + #[should_panic] + fn underflow_sub_test() { + let _ = $type::zero() - $type::from(1); + } + } + }; +} + +impl_traits_for_uint!(U128, 16, u128_test); +impl_traits_for_uint!(U256, 32, u256_test); +impl_traits_for_uint!(U512, 64, u512_test); + +impl AsPrimitive for U128 { + fn as_(self) -> U128 { + self + } +} + +impl AsPrimitive for U128 { + fn as_(self) -> U256 { + let mut result = U256::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U128 { + fn as_(self) -> U512 { + let mut result = U512::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U256 { + fn as_(self) -> U128 { + let mut result = U128::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U256 { + fn as_(self) -> U256 { + self + } +} + +impl AsPrimitive for U256 { + fn as_(self) -> U512 { + let mut result = U512::zero(); + result.0[..4].clone_from_slice(&self.0[..4]); + result + } +} + +impl AsPrimitive for U512 { + fn as_(self) -> U128 { + let mut result = U128::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U512 { + fn as_(self) -> U256 { + let mut result = U256::zero(); + result.0[..4].clone_from_slice(&self.0[..4]); + result + } +} + +impl AsPrimitive for U512 { + fn as_(self) -> U512 { + self + } +} + +#[cfg(test)] +mod tests { + use std::fmt::Debug; + + use serde::de::DeserializeOwned; + + use super::*; + + fn check_as_i32>(expected: i32, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_i64>(expected: i64, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u8>(expected: u8, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u32>(expected: u32, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u64>(expected: u64, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u128>(expected: U128, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u256>(expected: U256, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u512>(expected: U512, input: T) { + assert_eq!(expected, input.as_()); + } + + #[test] + fn as_primitive_from_i32() { + let mut input = 0_i32; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = i32::max_value() - 1; + check_as_i32(input, input); + check_as_i64(i64::from(input), input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + + input = i32::min_value() + 1; + check_as_i32(input, input); + check_as_i64(i64::from(input), input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + // i32::min_value() is -1 - i32::max_value() + check_as_u128( + U128::zero().wrapping_sub(&U128::from(i32::max_value())), + input, + ); + check_as_u256( + U256::zero().wrapping_sub(&U256::from(i32::max_value())), + input, + ); + check_as_u512( + U512::zero().wrapping_sub(&U512::from(i32::max_value())), + input, + ); + } + + #[test] + fn as_primitive_from_i64() { + let mut input = 0_i64; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = i64::max_value() - 1; + check_as_i32(input as i32, input); + check_as_i64(input, input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + + input = i64::min_value() + 1; + check_as_i32(input as i32, input); + check_as_i64(input, input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + // i64::min_value() is (-1 - i64::max_value()) + check_as_u128( + U128::zero().wrapping_sub(&U128::from(i64::max_value())), + input, + ); + check_as_u256( + U256::zero().wrapping_sub(&U256::from(i64::max_value())), + input, + ); + check_as_u512( + U512::zero().wrapping_sub(&U512::from(i64::max_value())), + input, + ); + } + + #[test] + fn as_primitive_from_u8() { + let mut input = 0_u8; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = u8::max_value() - 1; + check_as_i32(i32::from(input), input); + check_as_i64(i64::from(input), input); + check_as_u8(input, input); + check_as_u32(u32::from(input), input); + check_as_u64(u64::from(input), input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + } + + #[test] + fn as_primitive_from_u32() { + let mut input = 0_u32; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = u32::max_value() - 1; + check_as_i32(input as i32, input); + check_as_i64(i64::from(input), input); + check_as_u8(input as u8, input); + check_as_u32(input, input); + check_as_u64(u64::from(input), input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + } + + #[test] + fn as_primitive_from_u64() { + let mut input = 0_u64; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = u64::max_value() - 1; + check_as_i32(input as i32, input); + check_as_i64(input as i64, input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input, input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + } + + fn make_little_endian_arrays(little_endian_bytes: &[u8]) -> ([u8; 4], [u8; 8]) { + let le_32 = { + let mut le_32 = [0; 4]; + le_32.copy_from_slice(&little_endian_bytes[..4]); + le_32 + }; + + let le_64 = { + let mut le_64 = [0; 8]; + le_64.copy_from_slice(&little_endian_bytes[..8]); + le_64 + }; + + (le_32, le_64) + } + + #[test] + fn as_primitive_from_u128() { + let mut input = U128::zero(); + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = U128::max_value() - 1; + + let mut little_endian_bytes = [0_u8; 64]; + input.to_little_endian(&mut little_endian_bytes[..16]); + let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes); + + check_as_i32(i32::from_le_bytes(le_32), input); + check_as_i64(i64::from_le_bytes(le_64), input); + check_as_u8(little_endian_bytes[0], input); + check_as_u32(u32::from_le_bytes(le_32), input); + check_as_u64(u64::from_le_bytes(le_64), input); + check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input); + check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input); + check_as_u512(U512::from_little_endian(&little_endian_bytes), input); + } + + #[test] + fn as_primitive_from_u256() { + let mut input = U256::zero(); + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = U256::max_value() - 1; + + let mut little_endian_bytes = [0_u8; 64]; + input.to_little_endian(&mut little_endian_bytes[..32]); + let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes); + + check_as_i32(i32::from_le_bytes(le_32), input); + check_as_i64(i64::from_le_bytes(le_64), input); + check_as_u8(little_endian_bytes[0], input); + check_as_u32(u32::from_le_bytes(le_32), input); + check_as_u64(u64::from_le_bytes(le_64), input); + check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input); + check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input); + check_as_u512(U512::from_little_endian(&little_endian_bytes), input); + } + + #[test] + fn as_primitive_from_u512() { + let mut input = U512::zero(); + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = U512::max_value() - 1; + + let mut little_endian_bytes = [0_u8; 64]; + input.to_little_endian(&mut little_endian_bytes); + let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes); + + check_as_i32(i32::from_le_bytes(le_32), input); + check_as_i64(i64::from_le_bytes(le_64), input); + check_as_u8(little_endian_bytes[0], input); + check_as_u32(u32::from_le_bytes(le_32), input); + check_as_u64(u64::from_le_bytes(le_64), input); + check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input); + check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input); + check_as_u512(U512::from_little_endian(&little_endian_bytes), input); + } + + #[test] + fn wrapping_test_u512() { + let max = U512::max_value(); + let value = max.wrapping_add(&1.into()); + assert_eq!(value, 0.into()); + + let min = U512::min_value(); + let value = min.wrapping_sub(&1.into()); + assert_eq!(value, U512::max_value()); + } + + #[test] + fn wrapping_test_u256() { + let max = U256::max_value(); + let value = max.wrapping_add(&1.into()); + assert_eq!(value, 0.into()); + + let min = U256::min_value(); + let value = min.wrapping_sub(&1.into()); + assert_eq!(value, U256::max_value()); + } + + #[test] + fn wrapping_test_u128() { + let max = U128::max_value(); + let value = max.wrapping_add(&1.into()); + assert_eq!(value, 0.into()); + + let min = U128::min_value(); + let value = min.wrapping_sub(&1.into()); + assert_eq!(value, U128::max_value()); + } + + fn serde_roundtrip(value: T) { + { + let serialized = bincode::serialize(&value).unwrap(); + let deserialized = bincode::deserialize(serialized.as_slice()).unwrap(); + assert_eq!(value, deserialized); + } + { + let serialized = serde_json::to_string_pretty(&value).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); + assert_eq!(value, deserialized); + } + } + + #[test] + fn serde_roundtrip_u512() { + serde_roundtrip(U512::min_value()); + serde_roundtrip(U512::from(1)); + serde_roundtrip(U512::from(u64::max_value())); + serde_roundtrip(U512::max_value()); + } + + #[test] + fn serde_roundtrip_u256() { + serde_roundtrip(U256::min_value()); + serde_roundtrip(U256::from(1)); + serde_roundtrip(U256::from(u64::max_value())); + serde_roundtrip(U256::max_value()); + } + + #[test] + fn serde_roundtrip_u128() { + serde_roundtrip(U128::min_value()); + serde_roundtrip(U128::from(1)); + serde_roundtrip(U128::from(u64::max_value())); + serde_roundtrip(U128::max_value()); + } +} diff --git a/casper_types/src/uref.rs b/casper_types/src/uref.rs new file mode 100644 index 00000000..be673e5d --- /dev/null +++ b/casper_types/src/uref.rs @@ -0,0 +1,427 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + bytesrepr, + bytesrepr::{Error, FromBytes}, + checksummed_hex, AccessRights, ApiError, Key, ACCESS_RIGHTS_SERIALIZED_LENGTH, +}; + +/// The number of bytes in a [`URef`] address. +pub const UREF_ADDR_LENGTH: usize = 32; + +/// The number of bytes in a serialized [`URef`] where the [`AccessRights`] are not `None`. +pub const UREF_SERIALIZED_LENGTH: usize = UREF_ADDR_LENGTH + ACCESS_RIGHTS_SERIALIZED_LENGTH; + +pub(super) const UREF_FORMATTED_STRING_PREFIX: &str = "uref-"; + +/// The address of a `URef` (unforgeable reference) on the network. +pub type URefAddr = [u8; UREF_ADDR_LENGTH]; + +/// Error while parsing a URef from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Prefix is not "uref-". + InvalidPrefix, + /// No access rights as suffix. + MissingSuffix, + /// Access rights are invalid. + InvalidAccessRights, + /// Failed to decode address portion of URef. + Hex(base16::DecodeError), + /// Failed to parse an int. + Int(ParseIntError), + /// The address portion is the wrong length. + Address(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: ParseIntError) -> Self { + FromStrError::Int(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Address(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "prefix is not 'uref-'"), + FromStrError::MissingSuffix => write!(f, "no access rights as suffix"), + FromStrError::InvalidAccessRights => write!(f, "invalid access rights"), + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Int(error) => write!(f, "failed to parse an int: {}", error), + FromStrError::Address(error) => { + write!(f, "address portion is the wrong length: {}", error) + } + } + } +} + +/// Represents an unforgeable reference, containing an address in the network's global storage and +/// the [`AccessRights`] of the reference. +/// +/// A `URef` can be used to index entities such as [`CLValue`](crate::CLValue)s, or smart contracts. +#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct URef(URefAddr, AccessRights); + +impl URef { + /// Constructs a [`URef`] from an address and access rights. + pub const fn new(address: URefAddr, access_rights: AccessRights) -> Self { + URef(address, access_rights) + } + + /// Returns the address of this [`URef`]. + pub fn addr(&self) -> URefAddr { + self.0 + } + + /// Returns the access rights of this [`URef`]. + pub fn access_rights(&self) -> AccessRights { + self.1 + } + + /// Returns a new [`URef`] with the same address and updated access rights. + #[must_use] + pub fn with_access_rights(self, access_rights: AccessRights) -> Self { + URef(self.0, access_rights) + } + + /// Removes the access rights from this [`URef`]. + #[must_use] + pub fn remove_access_rights(self) -> Self { + URef(self.0, AccessRights::NONE) + } + + /// Returns `true` if the access rights are `Some` and + /// [`is_readable`](AccessRights::is_readable) is `true` for them. + #[must_use] + pub fn is_readable(self) -> bool { + self.1.is_readable() + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::READ`] permission. + #[must_use] + pub fn into_read(self) -> URef { + URef(self.0, AccessRights::READ) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::WRITE`] permission. + #[must_use] + pub fn into_write(self) -> URef { + URef(self.0, AccessRights::WRITE) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::ADD`] permission. + #[must_use] + pub fn into_add(self) -> URef { + URef(self.0, AccessRights::ADD) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::READ_ADD_WRITE`] + /// permission. + #[must_use] + pub fn into_read_add_write(self) -> URef { + URef(self.0, AccessRights::READ_ADD_WRITE) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::READ_WRITE`] + /// permission. + #[must_use] + pub fn into_read_write(self) -> URef { + URef(self.0, AccessRights::READ_WRITE) + } + + /// Returns `true` if the access rights are `Some` and + /// [`is_writeable`](AccessRights::is_writeable) is `true` for them. + pub fn is_writeable(self) -> bool { + self.1.is_writeable() + } + + /// Returns `true` if the access rights are `Some` and [`is_addable`](AccessRights::is_addable) + /// is `true` for them. + pub fn is_addable(self) -> bool { + self.1.is_addable() + } + + /// Formats the address and access rights of the [`URef`] in a unique way that could be used as + /// a name when storing the given `URef` in a global state. + pub fn to_formatted_string(self) -> String { + // Extract bits as numerical value, with no flags marked as 0. + let access_rights_bits = self.access_rights().bits(); + // Access rights is represented as octal, which means that max value of u8 can + // be represented as maximum of 3 octal digits. + format!( + "{}{}-{:03o}", + UREF_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.addr()), + access_rights_bits + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a `URef`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(UREF_FORMATTED_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let parts = remainder.splitn(2, '-').collect::>(); + if parts.len() != 2 { + return Err(FromStrError::MissingSuffix); + } + let addr = URefAddr::try_from(checksummed_hex::decode(parts[0])?.as_ref())?; + let access_rights_value = u8::from_str_radix(parts[1], 8)?; + let access_rights = AccessRights::from_bits(access_rights_value) + .ok_or(FromStrError::InvalidAccessRights)?; + Ok(URef(addr, access_rights)) + } + + /// Removes specific access rights from this URef if present. + pub fn disable_access_rights(&mut self, access_rights: AccessRights) { + self.1.remove(access_rights) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for URef { + fn schema_name() -> String { + String::from("URef") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some(String::from("Hex-encoded, formatted URef.")); + schema_object.into() + } +} + +impl Display for URef { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let addr = self.addr(); + let access_rights = self.access_rights(); + write!( + f, + "URef({}, {})", + base16::encode_lower(&addr), + access_rights + ) + } +} + +impl Debug for URef { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +impl bytesrepr::ToBytes for URef { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + UREF_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), self::Error> { + writer.extend_from_slice(&self.0); + self.1.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for URef { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (id, rem) = FromBytes::from_bytes(bytes)?; + let (access_rights, rem) = FromBytes::from_bytes(rem)?; + Ok((URef(id, access_rights), rem)) + } +} + +impl Serialize for URef { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + (self.0, self.1).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for URef { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + URef::from_formatted_str(&formatted_string).map_err(D::Error::custom) + } else { + let (address, access_rights) = <(URefAddr, AccessRights)>::deserialize(deserializer)?; + Ok(URef(address, access_rights)) + } + } +} + +impl TryFrom for URef { + type Error = ApiError; + + fn try_from(key: Key) -> Result { + if let Key::URef(uref) = key { + Ok(uref) + } else { + Err(ApiError::UnexpectedKeyVariant) + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> URef { + URef::new(rng.gen(), rng.gen()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn uref_as_string() { + // Since we are putting URefs to named_keys map keyed by the label that + // `as_string()` returns, any changes to the string representation of + // that type cannot break the format. + let addr_array = [0u8; 32]; + let uref_a = URef::new(addr_array, AccessRights::READ); + assert_eq!( + uref_a.to_formatted_string(), + "uref-0000000000000000000000000000000000000000000000000000000000000000-001" + ); + let uref_b = URef::new(addr_array, AccessRights::WRITE); + assert_eq!( + uref_b.to_formatted_string(), + "uref-0000000000000000000000000000000000000000000000000000000000000000-002" + ); + + let uref_c = uref_b.remove_access_rights(); + assert_eq!( + uref_c.to_formatted_string(), + "uref-0000000000000000000000000000000000000000000000000000000000000000-000" + ); + } + + fn round_trip(uref: URef) { + let string = uref.to_formatted_string(); + let parsed_uref = URef::from_formatted_str(&string).unwrap(); + assert_eq!(uref, parsed_uref); + } + + #[test] + fn uref_from_str() { + round_trip(URef::new([0; 32], AccessRights::NONE)); + round_trip(URef::new([255; 32], AccessRights::READ_ADD_WRITE)); + + let invalid_prefix = + "ref-0000000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(invalid_prefix).is_err()); + + let invalid_prefix = + "uref0000000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = "uref-00000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(short_addr).is_err()); + + let long_addr = + "uref-000000000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "uref-000000000000000000000000000000000000000000000000000000000000000g-000"; + assert!(URef::from_formatted_str(invalid_hex).is_err()); + + let invalid_suffix_separator = + "uref-0000000000000000000000000000000000000000000000000000000000000000:000"; + assert!(URef::from_formatted_str(invalid_suffix_separator).is_err()); + + let invalid_suffix = + "uref-0000000000000000000000000000000000000000000000000000000000000000-abc"; + assert!(URef::from_formatted_str(invalid_suffix).is_err()); + + let invalid_access_rights = + "uref-0000000000000000000000000000000000000000000000000000000000000000-200"; + assert!(URef::from_formatted_str(invalid_access_rights).is_err()); + } + + #[test] + fn serde_roundtrip() { + let uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE); + let serialized = bincode::serialize(&uref).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(uref, decoded); + } + + #[test] + fn json_roundtrip() { + let uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE); + let json_string = serde_json::to_string_pretty(&uref).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(uref, decoded); + } + + #[test] + fn should_disable_access_rights() { + let mut uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE); + assert!(uref.is_writeable()); + uref.disable_access_rights(AccessRights::WRITE); + assert_eq!(uref.access_rights(), AccessRights::READ_ADD); + + uref.disable_access_rights(AccessRights::WRITE); + assert!( + !uref.is_writeable(), + "Disabling access bit twice should be a noop" + ); + + assert_eq!(uref.access_rights(), AccessRights::READ_ADD); + + uref.disable_access_rights(AccessRights::READ_ADD); + assert_eq!(uref.access_rights(), AccessRights::NONE); + + uref.disable_access_rights(AccessRights::READ_ADD); + assert_eq!(uref.access_rights(), AccessRights::NONE); + + uref.disable_access_rights(AccessRights::NONE); + assert_eq!(uref.access_rights(), AccessRights::NONE); + } +} diff --git a/casper_types/tests/version_numbers.rs b/casper_types/tests/version_numbers.rs new file mode 100644 index 00000000..5787cf50 --- /dev/null +++ b/casper_types/tests/version_numbers.rs @@ -0,0 +1,5 @@ +#[cfg(feature = "version-sync")] +#[test] +fn test_html_root_url() { + version_sync::assert_html_root_url_updated!("src/lib.rs"); +} diff --git a/casper_types_ver_2_0/CHANGELOG.md b/casper_types_ver_2_0/CHANGELOG.md new file mode 100644 index 00000000..a50736b6 --- /dev/null +++ b/casper_types_ver_2_0/CHANGELOG.md @@ -0,0 +1,204 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + + + +## [Unreleased] (node 2.0) + +### Added +* Add new `EntryPointType::Install`, `EntryPointType::Normal`, `EntryPointAccess::Abstract` variants to support implementation of a factory pattern. + + + +## [Unreleased] (node 1.5.4) + +### Added +* Add a new `SyncHandling` enum, which allows a node to opt out of historical sync. + +### Changed +* Update `k256` to version 0.13.1. + +### Security +* Update `ed25519-dalek` to version 2.0.0 as mitigation for [RUSTSEC-2022-0093](https://rustsec.org/advisories/RUSTSEC-2022-0093) + + + +## 3.0.0 + +### Added +* Add new `bytesrepr::Error::NotRepresentable` error variant that represents values that are not representable by the serialization format. +* Add new `Key::Unbond` key variant under which the new unbonding information (to support redelegation) is written. +* Add new `Key::ChainspecRegistry` key variant under which the `ChainspecRegistry` is written. +* Add new `Key::ChecksumRegistry` key variant under which a registry of checksums for a given block is written. There are two checksums in the registry, one for the execution results and the other for the approvals of all deploys in the block. +* Add new `StoredValue::Unbonding` variant to support redelegating. +* Add a new type `WithdrawPurses` which is meant to represent `UnbondingPurses` as they exist in current live networks. + +### Changed +* Extend `UnbondingPurse` to take a new field `new_validator` which represents the validator to whom tokens will be re-delegated. +* Increase `DICTIONARY_ITEM_KEY_MAX_LENGTH` to 128. +* Change prefix of formatted string representation of `ContractPackageHash` from "contract-package-wasm" to "contract-package-". Parsing from the old format is still supported. +* Apply `#[non_exhaustive]` to error enums. +* Change Debug output of `DeployHash` to hex-encoded string rather than a list of integers. + +### Fixed +* Fix some integer casts, where failure is now detected and reported via new error variant `NotRepresentable`. + + + +## 2.0.0 + +### Fixed +* Republish v1.6.0 as v2.0.0 due to missed breaking change in API (addition of new variant to `Key`). + + + +## 1.6.0 [YANKED] + +### Added +* Extend asymmetric key functionality, available via feature `std` (moved from `casper-nodes` crate). +* Provide `Timestamp` and `TimeDiff` types for time operations, with extended functionality available via feature `std` (moved from `casper-nodes` crate). +* Provide test-only functionality, in particular a seedable RNG `TestRng` which outputs its seed on test failure. Available via a new feature `testing`. +* Add new `Key::EraSummary` key variant under which the era summary info is written on each switch block execution. + +### Deprecated +* Deprecate `gens` feature: its functionality is included in the new `testing` feature. + + + +## 1.5.0 + +### Added +* Provide types and functionality to support improved access control inside execution engine. +* Provide `CLTyped` impl for `ContractPackage` to allow it to be passed into contracts. + +### Fixed +* Limit parsing of CLTyped objects to a maximum of 50 types deep. + + + +## 1.4.6 - 2021-12-29 + +### Changed +* Disable checksummed-hex encoding, but leave checksummed-hex decoding in place. + + + +## 1.4.5 - 2021-12-06 + +### Added +* Add function to `auction::MintProvider` trait to support minting into an existing purse. + +### Changed +* Change checksummed hex implementation to use 32 byte rather than 64 byte blake2b digests. + + + +## [1.4.4] - 2021-11-18 + +### Fixed +* Revert the accidental change to the `std` feature causing a broken build when this feature is enabled. + + + +## [1.4.3] - 2021-11-17 [YANKED] + + + +## [1.4.2] - 2021-11-13 [YANKED] + +### Added +* Add checksummed hex encoding following a scheme similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55). + + + +## [1.4.1] - 2021-10-23 + +No changes. + + + +## [1.4.0] - 2021-10-21 [YANKED] + +### Added +* Add `json-schema` feature, disabled by default, to enable many types to be used to produce JSON-schema data. +* Add implicit `datasize` feature, disabled by default, to enable many types to derive the `DataSize` trait. +* Add `StoredValue` types to this crate. + +### Changed +* Support building and testing using stable Rust. +* Allow longer hex string to be presented in `json` files. Current maximum is increased from 100 to 150 characters. +* Improve documentation and `Debug` impls for `ApiError`. + +### Deprecated +* Feature `std` is deprecated as it is now a no-op, since there is no benefit to linking the std lib via this crate. + + + +## [1.3.0] - 2021-07-19 + +### Changed +* Restrict summarization when JSON pretty-printing to contiguous long hex strings. +* Update pinned version of Rust to `nightly-2021-06-17`. + +### Removed +* Remove ability to clone `SecretKey`s. + + + +## [1.2.0] - 2021-05-27 + +### Changed +* Change to Apache 2.0 license. +* Return a `Result` from the constructor of `SecretKey` rather than potentially panicking. +* Improve `Key` error reporting and tests. + +### Fixed +* Fix `Key` deserialization. + + + +## [1.1.1] - 2021-04-19 + +No changes. + + + +## [1.1.0] - 2021-04-13 [YANKED] + +No changes. + + + +## [1.0.1] - 2021-04-08 + +No changes. + + + +## [1.0.0] - 2021-03-30 + +### Added +* Initial release of types for use by software compatible with Casper mainnet. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/compare/24fc4027a...dev +[1.4.3]: https://github.com/casper-network/casper-node/compare/2be27b3f5...24fc4027a +[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.1...2be27b3f5 +[1.4.1]: https://github.com/casper-network/casper-node/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0 diff --git a/casper_types_ver_2_0/Cargo.toml b/casper_types_ver_2_0/Cargo.toml new file mode 100644 index 00000000..6e19e08f --- /dev/null +++ b/casper_types_ver_2_0/Cargo.toml @@ -0,0 +1,89 @@ +[package] +name = "casper-types-ver-2_0" +version = "3.0.0" # when updating, also update 'html_root_url' in lib.rs +authors = ["Fraser Hutchison "] +edition = "2018" +description = "Types shared by many casper crates for use on the Casper network." +readme = "README.md" +documentation = "https://docs.rs/casper-types" +homepage = "https://casperlabs.io" +repository = "https://github.com/CasperLabs/casper-node/tree/master/types" +license = "Apache-2.0" + +[dependencies] +base16 = { version = "0.2.1", default-features = false, features = ["alloc"] } +base64 = { version = "0.13.0", default-features = false } +bincode = { version = "1.3.1", optional = true } +bitflags = "1" +blake2 = { version = "0.9.0", default-features = false } +datasize = { workspace = true, optional = true } +derive_more = "0.99.17" +derp = { version = "0.0.14", optional = true } +ed25519-dalek = { version = "2.0.0", default-features = false, features = ["alloc", "zeroize"] } +getrandom = { version = "0.2.0", features = ["rdrand"], optional = true } +hex = { version = "0.4.2", default-features = false, features = ["alloc"] } +hex_fmt = "0.3.0" +humantime = { version = "2", optional = true } +itertools = { version = "0.10.3", default-features = false } +k256 = { version = "0.13.1", default-features = false, features = ["ecdsa", "sha256"] } +libc = { version = "0.2.146", optional = true, default-features = false } +num = { version = "0.4.0", default-features = false, features = ["alloc"] } +num-derive = { version = "0.3.0", default-features = false } +num-integer = { version = "0.1.42", default-features = false } +num-rational = { version = "0.4.0", default-features = false, features = ["serde"] } +num-traits = { version = "0.2.10", default-features = false } +once_cell = { workspace = true, optional = true } +pem = { version = "0.8.1", optional = true } +proptest = { version = "1.0.0", optional = true } +proptest-derive = { version = "0.3.0", optional = true } +rand = { version = "0.8.3", default-features = false, features = ["small_rng"] } +rand_pcg = { version = "0.3.0", optional = true } +schemars = { version = "0.8.16", features = ["preserve_order"], optional = true } +serde = { workspace = true, default-features = false, features = ["alloc", "derive"] } +serde-map-to-array = "1.1.0" +serde_bytes = { version = "0.11.5", default-features = false, features = ["alloc"] } +serde_json = { version = "1.0.59", default-features = false, features = ["alloc"] } +strum = { version = "0.24", features = ["derive"], optional = true } +thiserror = { workspace = true, optional = true } +tracing = { workspace = true, default-features = false } +uint = { version = "0.9.0", default-features = false } +untrusted = { version = "0.7.1", optional = true } +version-sync = { version = "0.9", optional = true } + +[dev-dependencies] +base16 = { version = "0.2.1", features = ["std"] } +bincode = "1.3.1" +criterion = "0.3.5" +derp = "0.0.14" +getrandom = "0.2.0" +humantime = "2" +once_cell = { workspace = true } +openssl = "0.10.55" +pem = "0.8.1" +proptest = "1.0.0" +proptest-attr-macro = "1.0.0" +proptest-derive = "0.3.0" +rand = "0.8.3" +rand_pcg = "0.3.0" +serde_json = "1" +serde_test = "1" +strum = { version = "0.24", features = ["derive"] } +tempfile = "3.4.0" +thiserror = { workspace = true } +untrusted = "0.7.1" + +[features] +json-schema = ["once_cell", "schemars", "serde-map-to-array/json-schema"] +std = ["base16/std", "derp", "getrandom/std", "humantime", "itertools/use_std", "libc", "once_cell", "pem", "serde_json/preserve_order", "thiserror", "untrusted"] +testing = ["proptest", "proptest-derive", "rand/default", "rand_pcg", "strum", "bincode"] +# DEPRECATED - use "testing" instead of "gens". +gens = ["testing"] + +[[bench]] +name = "bytesrepr_bench" +harness = false +required-features = ["testing"] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/casper_types_ver_2_0/README.md b/casper_types_ver_2_0/README.md new file mode 100644 index 00000000..46f14ea2 --- /dev/null +++ b/casper_types_ver_2_0/README.md @@ -0,0 +1,22 @@ +# `casper-types` + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Build Status](https://drone-auto-casper-network.casperlabs.io/api/badges/casper-network/casper-node/status.svg?branch=dev)](http://drone-auto-casper-network.casperlabs.io/casper-network/casper-node) +[![Crates.io](https://img.shields.io/crates/v/casper-types)](https://crates.io/crates/casper-types) +[![Documentation](https://docs.rs/casper-types/badge.svg)](https://docs.rs/casper-types) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) + +Types shared by many casper crates for use on the Casper network. + +## `no_std` + +The crate is `no_std` (using the `core` and `alloc` crates) unless any of the following features are enabled: + +* `json-schema` to enable many types to be used to produce JSON-schema data via the [`schemars`](https://crates.io/crates/schemars) crate +* `datasize` to enable many types to derive the [`DataSize`](https://github.com/casperlabs/datasize-rs) trait +* `gens` to enable many types to be produced in accordance with [`proptest`](https://crates.io/crates/proptest) usage for consumption within dependee crates' property testing suites + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/casper_types_ver_2_0/benches/bytesrepr_bench.rs b/casper_types_ver_2_0/benches/bytesrepr_bench.rs new file mode 100644 index 00000000..491cecba --- /dev/null +++ b/casper_types_ver_2_0/benches/bytesrepr_bench.rs @@ -0,0 +1,872 @@ +use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; + +use std::{ + collections::{BTreeMap, BTreeSet}, + iter, +}; + +use casper_types_ver_2_0::{ + account::AccountHash, + addressable_entity::{ + ActionThresholds, AddressableEntity, AssociatedKeys, MessageTopics, NamedKeys, + }, + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + package::{PackageKind, PackageStatus}, + system::auction::{Bid, Delegator, EraInfo, SeigniorageAllocation}, + AccessRights, AddressableEntityHash, ByteCodeHash, CLType, CLTyped, CLValue, DeployHash, + DeployInfo, EntityVersionKey, EntityVersions, EntryPoint, EntryPointAccess, EntryPointType, + EntryPoints, Group, Groups, Key, Package, PackageHash, Parameter, ProtocolVersion, PublicKey, + SecretKey, Transfer, TransferAddr, URef, KEY_HASH_LENGTH, TRANSFER_ADDR_LENGTH, U128, U256, + U512, UREF_ADDR_LENGTH, +}; + +static KB: usize = 1024; +static BATCH: usize = 4 * KB; + +const TEST_I32: i32 = 123_456_789; +const TEST_U128: U128 = U128([123_456_789, 0]); +const TEST_U256: U256 = U256([123_456_789, 0, 0, 0]); +const TEST_U512: U512 = U512([123_456_789, 0, 0, 0, 0, 0, 0, 0]); +const TEST_STR_1: &str = "String One"; +const TEST_STR_2: &str = "String Two"; + +fn prepare_vector(size: usize) -> Vec { + (0..size as i32).collect() +} + +fn serialize_vector_of_i32s(b: &mut Bencher) { + let data = prepare_vector(black_box(BATCH)); + b.iter(|| data.to_bytes()); +} + +fn deserialize_vector_of_i32s(b: &mut Bencher) { + let data = prepare_vector(black_box(BATCH)).to_bytes().unwrap(); + b.iter(|| { + let (res, _rem): (Vec, _) = FromBytes::from_bytes(&data).unwrap(); + res + }); +} + +fn serialize_vector_of_u8(b: &mut Bencher) { + // 0, 1, ... 254, 255, 0, 1, ... + let data: Bytes = prepare_vector(BATCH) + .into_iter() + .map(|value| value as u8) + .collect(); + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_vector_of_u8(b: &mut Bencher) { + // 0, 1, ... 254, 255, 0, 1, ... + let data: Vec = prepare_vector(BATCH) + .into_iter() + .map(|value| value as u8) + .collect::() + .to_bytes() + .unwrap(); + b.iter(|| Bytes::from_bytes(black_box(&data))) +} + +fn serialize_u8(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&129u8))); +} + +fn deserialize_u8(b: &mut Bencher) { + b.iter(|| u8::from_bytes(black_box(&[129u8]))); +} + +fn serialize_i32(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&1_816_142_132i32))); +} + +fn deserialize_i32(b: &mut Bencher) { + b.iter(|| i32::from_bytes(black_box(&[0x34, 0x21, 0x40, 0x6c]))); +} + +fn serialize_u64(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&14_157_907_845_468_752_670u64))); +} + +fn deserialize_u64(b: &mut Bencher) { + b.iter(|| u64::from_bytes(black_box(&[0x1e, 0x8b, 0xe1, 0x73, 0x2c, 0xfe, 0x7a, 0xc4]))); +} + +fn serialize_some_u64(b: &mut Bencher) { + let data = Some(14_157_907_845_468_752_670u64); + + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_some_u64(b: &mut Bencher) { + let data = Some(14_157_907_845_468_752_670u64); + let data = data.to_bytes().unwrap(); + + b.iter(|| Option::::from_bytes(&data)); +} + +fn serialize_none_u64(b: &mut Bencher) { + let data: Option = None; + + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_ok_u64(b: &mut Bencher) { + let data: Option = None; + let data = data.to_bytes().unwrap(); + b.iter(|| Option::::from_bytes(&data)); +} + +fn make_test_vec_of_vec8() -> Vec { + (0..4) + .map(|_v| { + // 0, 1, 2, ..., 254, 255 + let inner_vec = iter::repeat_with(|| 0..255u8) + .flatten() + // 4 times to create 4x 1024 bytes + .take(4) + .collect::>(); + Bytes::from(inner_vec) + }) + .collect() +} + +fn serialize_vector_of_vector_of_u8(b: &mut Bencher) { + let data = make_test_vec_of_vec8(); + b.iter(|| data.to_bytes()); +} + +fn deserialize_vector_of_vector_of_u8(b: &mut Bencher) { + let data = make_test_vec_of_vec8().to_bytes().unwrap(); + b.iter(|| Vec::::from_bytes(black_box(&data))); +} + +fn serialize_tree_map(b: &mut Bencher) { + let data = { + let mut res = BTreeMap::new(); + res.insert("asdf".to_string(), "zxcv".to_string()); + res.insert("qwer".to_string(), "rewq".to_string()); + res.insert("1234".to_string(), "5678".to_string()); + res + }; + + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_treemap(b: &mut Bencher) { + let data = { + let mut res = BTreeMap::new(); + res.insert("asdf".to_string(), "zxcv".to_string()); + res.insert("qwer".to_string(), "rewq".to_string()); + res.insert("1234".to_string(), "5678".to_string()); + res + }; + let data = data.to_bytes().unwrap(); + b.iter(|| BTreeMap::::from_bytes(black_box(&data))); +} + +fn serialize_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."; + let data = lorem.to_string(); + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."; + let data = lorem.to_bytes().unwrap(); + b.iter(|| String::from_bytes(&data)); +} + +fn serialize_vec_of_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.".to_string(); + let array_of_lorem: Vec = lorem.split(' ').map(Into::into).collect(); + let data = array_of_lorem; + b.iter(|| ToBytes::to_bytes(black_box(&data))); +} + +fn deserialize_vec_of_string(b: &mut Bencher) { + let lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.".to_string(); + let array_of_lorem: Vec = lorem.split(' ').map(Into::into).collect(); + let data = array_of_lorem.to_bytes().unwrap(); + + b.iter(|| Vec::::from_bytes(&data)); +} + +fn serialize_unit(b: &mut Bencher) { + b.iter(|| ToBytes::to_bytes(black_box(&()))) +} + +fn deserialize_unit(b: &mut Bencher) { + let data = ().to_bytes().unwrap(); + + b.iter(|| <()>::from_bytes(&data)) +} + +fn serialize_key_account(b: &mut Bencher) { + let account = Key::Account(AccountHash::new([0u8; 32])); + + b.iter(|| ToBytes::to_bytes(black_box(&account))) +} + +fn deserialize_key_account(b: &mut Bencher) { + let account = Key::Account(AccountHash::new([0u8; 32])); + let account_bytes = account.to_bytes().unwrap(); + + b.iter(|| Key::from_bytes(black_box(&account_bytes))) +} + +fn serialize_key_hash(b: &mut Bencher) { + let hash = Key::Hash([0u8; 32]); + b.iter(|| ToBytes::to_bytes(black_box(&hash))) +} + +fn deserialize_key_hash(b: &mut Bencher) { + let hash = Key::Hash([0u8; 32]); + let hash_bytes = hash.to_bytes().unwrap(); + + b.iter(|| Key::from_bytes(black_box(&hash_bytes))) +} + +fn serialize_key_uref(b: &mut Bencher) { + let uref = Key::URef(URef::new([0u8; 32], AccessRights::ADD_WRITE)); + b.iter(|| ToBytes::to_bytes(black_box(&uref))) +} + +fn deserialize_key_uref(b: &mut Bencher) { + let uref = Key::URef(URef::new([0u8; 32], AccessRights::ADD_WRITE)); + let uref_bytes = uref.to_bytes().unwrap(); + + b.iter(|| Key::from_bytes(black_box(&uref_bytes))) +} + +fn serialize_vec_of_keys(b: &mut Bencher) { + let keys: Vec = (0..32) + .map(|i| Key::URef(URef::new([i; 32], AccessRights::ADD_WRITE))) + .collect(); + b.iter(|| ToBytes::to_bytes(black_box(&keys))) +} + +fn deserialize_vec_of_keys(b: &mut Bencher) { + let keys: Vec = (0..32) + .map(|i| Key::URef(URef::new([i; 32], AccessRights::ADD_WRITE))) + .collect(); + let keys_bytes = keys.to_bytes().unwrap(); + b.iter(|| Vec::::from_bytes(black_box(&keys_bytes))); +} + +fn serialize_access_rights_read(b: &mut Bencher) { + b.iter(|| AccessRights::READ.to_bytes()); +} + +fn deserialize_access_rights_read(b: &mut Bencher) { + let data = AccessRights::READ.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_write(b: &mut Bencher) { + b.iter(|| AccessRights::WRITE.to_bytes()); +} + +fn deserialize_access_rights_write(b: &mut Bencher) { + let data = AccessRights::WRITE.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_add(b: &mut Bencher) { + b.iter(|| AccessRights::ADD.to_bytes()); +} + +fn deserialize_access_rights_add(b: &mut Bencher) { + let data = AccessRights::ADD.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_read_add(b: &mut Bencher) { + b.iter(|| AccessRights::READ_ADD.to_bytes()); +} + +fn deserialize_access_rights_read_add(b: &mut Bencher) { + let data = AccessRights::READ_ADD.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_read_write(b: &mut Bencher) { + b.iter(|| AccessRights::READ_WRITE.to_bytes()); +} + +fn deserialize_access_rights_read_write(b: &mut Bencher) { + let data = AccessRights::READ_WRITE.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_access_rights_add_write(b: &mut Bencher) { + b.iter(|| AccessRights::ADD_WRITE.to_bytes()); +} + +fn deserialize_access_rights_add_write(b: &mut Bencher) { + let data = AccessRights::ADD_WRITE.to_bytes().unwrap(); + b.iter(|| AccessRights::from_bytes(&data)); +} + +fn serialize_cl_value(raw_value: T) -> Vec { + CLValue::from_t(raw_value) + .expect("should create CLValue") + .to_bytes() + .expect("should serialize CLValue") +} + +fn benchmark_deserialization(b: &mut Bencher, raw_value: T) { + let serialized_value = serialize_cl_value(raw_value); + b.iter(|| { + let cl_value: CLValue = bytesrepr::deserialize_from_slice(&serialized_value).unwrap(); + let _raw_value: T = cl_value.into_t().unwrap(); + }); +} + +fn serialize_cl_value_int32(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_I32)); +} + +fn deserialize_cl_value_int32(b: &mut Bencher) { + benchmark_deserialization(b, TEST_I32); +} + +fn serialize_cl_value_uint128(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_U128)); +} + +fn deserialize_cl_value_uint128(b: &mut Bencher) { + benchmark_deserialization(b, TEST_U128); +} + +fn serialize_cl_value_uint256(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_U256)); +} + +fn deserialize_cl_value_uint256(b: &mut Bencher) { + benchmark_deserialization(b, TEST_U256); +} + +fn serialize_cl_value_uint512(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_U512)); +} + +fn deserialize_cl_value_uint512(b: &mut Bencher) { + benchmark_deserialization(b, TEST_U512); +} + +fn serialize_cl_value_bytearray(b: &mut Bencher) { + b.iter_with_setup( + || { + let vec: Vec = (0..255).collect(); + Bytes::from(vec) + }, + serialize_cl_value, + ); +} + +fn deserialize_cl_value_bytearray(b: &mut Bencher) { + let vec = (0..255).collect::>(); + let bytes: Bytes = vec.into(); + benchmark_deserialization(b, bytes); +} + +fn serialize_cl_value_listint32(b: &mut Bencher) { + b.iter(|| serialize_cl_value((0..1024).collect::>())); +} + +fn deserialize_cl_value_listint32(b: &mut Bencher) { + benchmark_deserialization(b, (0..1024).collect::>()); +} + +fn serialize_cl_value_string(b: &mut Bencher) { + b.iter(|| serialize_cl_value(TEST_STR_1.to_string())); +} + +fn deserialize_cl_value_string(b: &mut Bencher) { + benchmark_deserialization(b, TEST_STR_1.to_string()); +} + +fn serialize_cl_value_liststring(b: &mut Bencher) { + b.iter(|| serialize_cl_value(vec![TEST_STR_1.to_string(), TEST_STR_2.to_string()])); +} + +fn deserialize_cl_value_liststring(b: &mut Bencher) { + benchmark_deserialization(b, vec![TEST_STR_1.to_string(), TEST_STR_2.to_string()]); +} + +fn serialize_cl_value_namedkey(b: &mut Bencher) { + b.iter(|| { + serialize_cl_value(( + TEST_STR_1.to_string(), + Key::Account(AccountHash::new([0xffu8; 32])), + )) + }); +} + +fn deserialize_cl_value_namedkey(b: &mut Bencher) { + benchmark_deserialization( + b, + ( + TEST_STR_1.to_string(), + Key::Account(AccountHash::new([0xffu8; 32])), + ), + ); +} + +fn serialize_u128(b: &mut Bencher) { + let num_u128 = U128::default(); + b.iter(|| ToBytes::to_bytes(black_box(&num_u128))) +} + +fn deserialize_u128(b: &mut Bencher) { + let num_u128 = U128::default(); + let num_u128_bytes = num_u128.to_bytes().unwrap(); + + b.iter(|| U128::from_bytes(black_box(&num_u128_bytes))) +} + +fn serialize_u256(b: &mut Bencher) { + let num_u256 = U256::default(); + b.iter(|| ToBytes::to_bytes(black_box(&num_u256))) +} + +fn deserialize_u256(b: &mut Bencher) { + let num_u256 = U256::default(); + let num_u256_bytes = num_u256.to_bytes().unwrap(); + + b.iter(|| U256::from_bytes(black_box(&num_u256_bytes))) +} + +fn serialize_u512(b: &mut Bencher) { + let num_u512 = U512::default(); + b.iter(|| ToBytes::to_bytes(black_box(&num_u512))) +} + +fn deserialize_u512(b: &mut Bencher) { + let num_u512 = U512::default(); + let num_u512_bytes = num_u512.to_bytes().unwrap(); + + b.iter(|| U512::from_bytes(black_box(&num_u512_bytes))) +} + +fn serialize_contract(b: &mut Bencher) { + let contract = sample_contract(10, 10); + b.iter(|| ToBytes::to_bytes(black_box(&contract))); +} + +fn deserialize_contract(b: &mut Bencher) { + let contract = sample_contract(10, 10); + let contract_bytes = AddressableEntity::to_bytes(&contract).unwrap(); + b.iter(|| AddressableEntity::from_bytes(black_box(&contract_bytes)).unwrap()); +} + +fn sample_named_keys(len: u8) -> NamedKeys { + NamedKeys::from( + (0..len) + .map(|i| { + ( + format!("named-key-{}", i), + Key::Account(AccountHash::default()), + ) + }) + .collect::>(), + ) +} + +fn sample_contract(named_keys_len: u8, entry_points_len: u8) -> AddressableEntity { + let named_keys: NamedKeys = sample_named_keys(named_keys_len); + + let entry_points = { + let mut tmp = EntryPoints::new_with_default_entry_point(); + (1..entry_points_len).for_each(|i| { + let args = vec![ + Parameter::new("first", CLType::U32), + Parameter::new("Foo", CLType::U32), + ]; + let entry_point = EntryPoint::new( + format!("test-{}", i), + args, + casper_types_ver_2_0::CLType::U512, + EntryPointAccess::groups(&["Group 2"]), + EntryPointType::AddressableEntity, + ); + tmp.add_entry_point(entry_point); + }); + tmp + }; + + casper_types_ver_2_0::addressable_entity::AddressableEntity::new( + PackageHash::default(), + ByteCodeHash::default(), + named_keys, + entry_points, + ProtocolVersion::default(), + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + MessageTopics::default(), + ) +} + +fn contract_version_key_fn(i: u8) -> EntityVersionKey { + EntityVersionKey::new(i as u32, i as u32) +} + +fn contract_hash_fn(i: u8) -> AddressableEntityHash { + AddressableEntityHash::new([i; KEY_HASH_LENGTH]) +} + +fn sample_map(key_fn: FK, value_fn: FV, count: u8) -> BTreeMap +where + FK: Fn(u8) -> K, + FV: Fn(u8) -> V, +{ + (0..count) + .map(|i| { + let key = key_fn(i); + let value = value_fn(i); + (key, value) + }) + .collect() +} + +fn sample_set(fun: F, count: u8) -> BTreeSet +where + F: Fn(u8) -> K, +{ + (0..count).map(fun).collect() +} + +fn sample_group(i: u8) -> Group { + Group::new(format!("group-{}", i)) +} + +fn sample_uref(i: u8) -> URef { + URef::new([i; UREF_ADDR_LENGTH], AccessRights::all()) +} + +fn sample_contract_package( + contract_versions_len: u8, + disabled_versions_len: u8, + groups_len: u8, +) -> Package { + let access_key = URef::default(); + let versions = EntityVersions::from(sample_map( + contract_version_key_fn, + contract_hash_fn, + contract_versions_len, + )); + let disabled_versions = sample_set(contract_version_key_fn, disabled_versions_len); + let groups = Groups::from(sample_map( + sample_group, + |_| sample_set(sample_uref, 3), + groups_len, + )); + + Package::new( + access_key, + versions, + disabled_versions, + groups, + PackageStatus::Locked, + PackageKind::SmartContract, + ) +} + +fn serialize_contract_package(b: &mut Bencher) { + let contract = sample_contract_package(5, 1, 5); + b.iter(|| Package::to_bytes(black_box(&contract))); +} + +fn deserialize_contract_package(b: &mut Bencher) { + let contract_package = sample_contract_package(5, 1, 5); + let contract_bytes = Package::to_bytes(&contract_package).unwrap(); + b.iter(|| Package::from_bytes(black_box(&contract_bytes)).unwrap()); +} + +fn u32_to_pk(i: u32) -> PublicKey { + let mut sk_bytes = [0u8; 32]; + U256::from(i).to_big_endian(&mut sk_bytes); + let sk = SecretKey::ed25519_from_bytes(sk_bytes).unwrap(); + PublicKey::from(&sk) +} + +fn sample_delegators(delegators_len: u32) -> Vec { + (0..delegators_len) + .map(|i| { + let delegator_pk = u32_to_pk(i); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let bonding_purse = URef::default(); + let validator_pk = u32_to_pk(i); + Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk) + }) + .collect() +} + +fn sample_bid(delegators_len: u32) -> Bid { + let validator_public_key = PublicKey::System; + let bonding_purse = URef::default(); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let delegation_rate = 10u8; + let mut bid = Bid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + ); + let new_delegators = sample_delegators(delegators_len); + + let curr_delegators = bid.delegators_mut(); + for delegator in new_delegators.into_iter() { + assert!(curr_delegators + .insert(delegator.delegator_public_key().clone(), delegator) + .is_none()); + } + bid +} + +fn serialize_bid(delegators_len: u32, b: &mut Bencher) { + let bid = sample_bid(delegators_len); + b.iter(|| Bid::to_bytes(black_box(&bid))); +} + +fn deserialize_bid(delegators_len: u32, b: &mut Bencher) { + let bid = sample_bid(delegators_len); + let bid_bytes = Bid::to_bytes(&bid).unwrap(); + b.iter(|| Bid::from_bytes(black_box(&bid_bytes))); +} + +fn sample_transfer() -> Transfer { + Transfer::new( + DeployHash::default(), + AccountHash::default(), + None, + URef::default(), + URef::default(), + U512::MAX, + U512::from_dec_str("123123123123").unwrap(), + Some(1u64), + ) +} + +fn serialize_transfer(b: &mut Bencher) { + let transfer = sample_transfer(); + b.iter(|| Transfer::to_bytes(&transfer)); +} + +fn deserialize_transfer(b: &mut Bencher) { + let transfer = sample_transfer(); + let transfer_bytes = transfer.to_bytes().unwrap(); + b.iter(|| Transfer::from_bytes(&transfer_bytes)); +} + +fn sample_deploy_info(transfer_len: u16) -> DeployInfo { + let transfers = (0..transfer_len) + .map(|i| { + let mut tmp = [0u8; TRANSFER_ADDR_LENGTH]; + U256::from(i).to_little_endian(&mut tmp); + TransferAddr::new(tmp) + }) + .collect::>(); + DeployInfo::new( + DeployHash::default(), + &transfers, + AccountHash::default(), + URef::default(), + U512::MAX, + ) +} + +fn serialize_deploy_info(b: &mut Bencher) { + let deploy_info = sample_deploy_info(1000); + b.iter(|| DeployInfo::to_bytes(&deploy_info)); +} + +fn deserialize_deploy_info(b: &mut Bencher) { + let deploy_info = sample_deploy_info(1000); + let deploy_bytes = deploy_info.to_bytes().unwrap(); + b.iter(|| DeployInfo::from_bytes(&deploy_bytes)); +} + +fn sample_era_info(delegators_len: u32) -> EraInfo { + let mut base = EraInfo::new(); + let delegations = (0..delegators_len).map(|i| { + let pk = u32_to_pk(i); + SeigniorageAllocation::delegator(pk.clone(), pk, U512::MAX) + }); + base.seigniorage_allocations_mut().extend(delegations); + base +} + +fn serialize_era_info(delegators_len: u32, b: &mut Bencher) { + let era_info = sample_era_info(delegators_len); + b.iter(|| EraInfo::to_bytes(&era_info)); +} + +fn deserialize_era_info(delegators_len: u32, b: &mut Bencher) { + let era_info = sample_era_info(delegators_len); + let era_info_bytes = era_info.to_bytes().unwrap(); + b.iter(|| EraInfo::from_bytes(&era_info_bytes)); +} + +fn bytesrepr_bench(c: &mut Criterion) { + c.bench_function("serialize_vector_of_i32s", serialize_vector_of_i32s); + c.bench_function("deserialize_vector_of_i32s", deserialize_vector_of_i32s); + c.bench_function("serialize_vector_of_u8", serialize_vector_of_u8); + c.bench_function("deserialize_vector_of_u8", deserialize_vector_of_u8); + c.bench_function("serialize_u8", serialize_u8); + c.bench_function("deserialize_u8", deserialize_u8); + c.bench_function("serialize_i32", serialize_i32); + c.bench_function("deserialize_i32", deserialize_i32); + c.bench_function("serialize_u64", serialize_u64); + c.bench_function("deserialize_u64", deserialize_u64); + c.bench_function("serialize_some_u64", serialize_some_u64); + c.bench_function("deserialize_some_u64", deserialize_some_u64); + c.bench_function("serialize_none_u64", serialize_none_u64); + c.bench_function("deserialize_ok_u64", deserialize_ok_u64); + c.bench_function( + "serialize_vector_of_vector_of_u8", + serialize_vector_of_vector_of_u8, + ); + c.bench_function( + "deserialize_vector_of_vector_of_u8", + deserialize_vector_of_vector_of_u8, + ); + c.bench_function("serialize_tree_map", serialize_tree_map); + c.bench_function("deserialize_treemap", deserialize_treemap); + c.bench_function("serialize_string", serialize_string); + c.bench_function("deserialize_string", deserialize_string); + c.bench_function("serialize_vec_of_string", serialize_vec_of_string); + c.bench_function("deserialize_vec_of_string", deserialize_vec_of_string); + c.bench_function("serialize_unit", serialize_unit); + c.bench_function("deserialize_unit", deserialize_unit); + c.bench_function("serialize_key_account", serialize_key_account); + c.bench_function("deserialize_key_account", deserialize_key_account); + c.bench_function("serialize_key_hash", serialize_key_hash); + c.bench_function("deserialize_key_hash", deserialize_key_hash); + c.bench_function("serialize_key_uref", serialize_key_uref); + c.bench_function("deserialize_key_uref", deserialize_key_uref); + c.bench_function("serialize_vec_of_keys", serialize_vec_of_keys); + c.bench_function("deserialize_vec_of_keys", deserialize_vec_of_keys); + c.bench_function("serialize_access_rights_read", serialize_access_rights_read); + c.bench_function( + "deserialize_access_rights_read", + deserialize_access_rights_read, + ); + c.bench_function( + "serialize_access_rights_write", + serialize_access_rights_write, + ); + c.bench_function( + "deserialize_access_rights_write", + deserialize_access_rights_write, + ); + c.bench_function("serialize_access_rights_add", serialize_access_rights_add); + c.bench_function( + "deserialize_access_rights_add", + deserialize_access_rights_add, + ); + c.bench_function( + "serialize_access_rights_read_add", + serialize_access_rights_read_add, + ); + c.bench_function( + "deserialize_access_rights_read_add", + deserialize_access_rights_read_add, + ); + c.bench_function( + "serialize_access_rights_read_write", + serialize_access_rights_read_write, + ); + c.bench_function( + "deserialize_access_rights_read_write", + deserialize_access_rights_read_write, + ); + c.bench_function( + "serialize_access_rights_add_write", + serialize_access_rights_add_write, + ); + c.bench_function( + "deserialize_access_rights_add_write", + deserialize_access_rights_add_write, + ); + c.bench_function("serialize_cl_value_int32", serialize_cl_value_int32); + c.bench_function("deserialize_cl_value_int32", deserialize_cl_value_int32); + c.bench_function("serialize_cl_value_uint128", serialize_cl_value_uint128); + c.bench_function("deserialize_cl_value_uint128", deserialize_cl_value_uint128); + c.bench_function("serialize_cl_value_uint256", serialize_cl_value_uint256); + c.bench_function("deserialize_cl_value_uint256", deserialize_cl_value_uint256); + c.bench_function("serialize_cl_value_uint512", serialize_cl_value_uint512); + c.bench_function("deserialize_cl_value_uint512", deserialize_cl_value_uint512); + c.bench_function("serialize_cl_value_bytearray", serialize_cl_value_bytearray); + c.bench_function( + "deserialize_cl_value_bytearray", + deserialize_cl_value_bytearray, + ); + c.bench_function("serialize_cl_value_listint32", serialize_cl_value_listint32); + c.bench_function( + "deserialize_cl_value_listint32", + deserialize_cl_value_listint32, + ); + c.bench_function("serialize_cl_value_string", serialize_cl_value_string); + c.bench_function("deserialize_cl_value_string", deserialize_cl_value_string); + c.bench_function( + "serialize_cl_value_liststring", + serialize_cl_value_liststring, + ); + c.bench_function( + "deserialize_cl_value_liststring", + deserialize_cl_value_liststring, + ); + c.bench_function("serialize_cl_value_namedkey", serialize_cl_value_namedkey); + c.bench_function( + "deserialize_cl_value_namedkey", + deserialize_cl_value_namedkey, + ); + c.bench_function("serialize_u128", serialize_u128); + c.bench_function("deserialize_u128", deserialize_u128); + c.bench_function("serialize_u256", serialize_u256); + c.bench_function("deserialize_u256", deserialize_u256); + c.bench_function("serialize_u512", serialize_u512); + c.bench_function("deserialize_u512", deserialize_u512); + // c.bench_function("bytesrepr::serialize_account", serialize_account); + // c.bench_function("bytesrepr::deserialize_account", deserialize_account); + c.bench_function("bytesrepr::serialize_contract", serialize_contract); + c.bench_function("bytesrepr::deserialize_contract", deserialize_contract); + c.bench_function( + "bytesrepr::serialize_contract_package", + serialize_contract_package, + ); + c.bench_function( + "bytesrepr::deserialize_contract_package", + deserialize_contract_package, + ); + c.bench_function("bytesrepr::serialize_bid_small", |b| serialize_bid(10, b)); + c.bench_function("bytesrepr::serialize_bid_medium", |b| serialize_bid(100, b)); + c.bench_function("bytesrepr::serialize_bid_big", |b| serialize_bid(1000, b)); + c.bench_function("bytesrepr::deserialize_bid_small", |b| { + deserialize_bid(10, b) + }); + c.bench_function("bytesrepr::deserialize_bid_medium", |b| { + deserialize_bid(100, b) + }); + c.bench_function("bytesrepr::deserialize_bid_big", |b| { + deserialize_bid(1000, b) + }); + c.bench_function("bytesrepr::serialize_transfer", serialize_transfer); + c.bench_function("bytesrepr::deserialize_transfer", deserialize_transfer); + c.bench_function("bytesrepr::serialize_deploy_info", serialize_deploy_info); + c.bench_function( + "bytesrepr::deserialize_deploy_info", + deserialize_deploy_info, + ); + c.bench_function("bytesrepr::serialize_era_info", |b| { + serialize_era_info(500, b) + }); + c.bench_function("bytesrepr::deserialize_era_info", |b| { + deserialize_era_info(500, b) + }); +} + +criterion_group!(benches, bytesrepr_bench); +criterion_main!(benches); diff --git a/casper_types_ver_2_0/src/access_rights.rs b/casper_types_ver_2_0/src/access_rights.rs new file mode 100644 index 00000000..dd12ea68 --- /dev/null +++ b/casper_types_ver_2_0/src/access_rights.rs @@ -0,0 +1,421 @@ +// This allow was added so that bitflags! macro won't fail on clippy +#![allow(clippy::bad_bit_mask)] +use alloc::{ + collections::{btree_map::Entry, BTreeMap}, + vec::Vec, +}; +use core::fmt::{self, Display, Formatter}; + +use bitflags::bitflags; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{bytesrepr, AddressableEntityHash, URef, URefAddr}; + +/// The number of bytes in a serialized [`AccessRights`]. +pub const ACCESS_RIGHTS_SERIALIZED_LENGTH: usize = 1; + +bitflags! { + /// A struct which behaves like a set of bitflags to define access rights associated with a + /// [`URef`](crate::URef). + + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct AccessRights: u8 { + /// No permissions + const NONE = 0; + /// Permission to read the value under the associated `URef`. + const READ = 0b001; + /// Permission to write a value under the associated `URef`. + const WRITE = 0b010; + /// Permission to add to the value under the associated `URef`. + const ADD = 0b100; + /// Permission to read or add to the value under the associated `URef`. + const READ_ADD = Self::READ.bits() | Self::ADD.bits(); + /// Permission to read or write the value under the associated `URef`. + const READ_WRITE = Self::READ.bits() | Self::WRITE.bits(); + /// Permission to add to, or write the value under the associated `URef`. + const ADD_WRITE = Self::ADD.bits() | Self::WRITE.bits(); + /// Permission to read, add to, or write the value under the associated `URef`. + const READ_ADD_WRITE = Self::READ.bits() | Self::ADD.bits() | Self::WRITE.bits(); + } +} + +impl Default for AccessRights { + fn default() -> Self { + AccessRights::NONE + } +} + +impl AccessRights { + /// Returns `true` if the `READ` flag is set. + pub fn is_readable(self) -> bool { + self & AccessRights::READ == AccessRights::READ + } + + /// Returns `true` if the `WRITE` flag is set. + pub fn is_writeable(self) -> bool { + self & AccessRights::WRITE == AccessRights::WRITE + } + + /// Returns `true` if the `ADD` flag is set. + pub fn is_addable(self) -> bool { + self & AccessRights::ADD == AccessRights::ADD + } + + /// Returns `true` if no flags are set. + pub fn is_none(self) -> bool { + self == AccessRights::NONE + } +} + +impl Display for AccessRights { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match *self { + AccessRights::NONE => write!(f, "NONE"), + AccessRights::READ => write!(f, "READ"), + AccessRights::WRITE => write!(f, "WRITE"), + AccessRights::ADD => write!(f, "ADD"), + AccessRights::READ_ADD => write!(f, "READ_ADD"), + AccessRights::READ_WRITE => write!(f, "READ_WRITE"), + AccessRights::ADD_WRITE => write!(f, "ADD_WRITE"), + AccessRights::READ_ADD_WRITE => write!(f, "READ_ADD_WRITE"), + _ => write!(f, "UNKNOWN"), + } + } +} + +impl bytesrepr::ToBytes for AccessRights { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.bits().to_bytes() + } + + fn serialized_length(&self) -> usize { + ACCESS_RIGHTS_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.bits()); + Ok(()) + } +} + +impl bytesrepr::FromBytes for AccessRights { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (id, rem) = u8::from_bytes(bytes)?; + match AccessRights::from_bits(id) { + Some(rights) => Ok((rights, rem)), + None => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Serialize for AccessRights { + fn serialize(&self, serializer: S) -> Result { + self.bits().serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for AccessRights { + fn deserialize>(deserializer: D) -> Result { + let bits = u8::deserialize(deserializer)?; + AccessRights::from_bits(bits).ok_or_else(|| SerdeError::custom("invalid bits")) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AccessRights { + let mut result = AccessRights::NONE; + if rng.gen() { + result |= AccessRights::READ; + } + if rng.gen() { + result |= AccessRights::WRITE; + } + if rng.gen() { + result |= AccessRights::ADD; + } + result + } +} + +/// Used to indicate if a granted [`URef`] was already held by the context. +#[derive(Debug, PartialEq, Eq)] +pub enum GrantedAccess { + /// No new set of access rights were granted. + PreExisting, + /// A new set of access rights were granted. + Granted { + /// The address of the URef. + uref_addr: URefAddr, + /// The set of the newly granted access rights. + newly_granted_access_rights: AccessRights, + }, +} + +/// Access rights for a given runtime context. +#[derive(Debug, PartialEq, Eq)] +pub struct ContextAccessRights { + context_entity_hash: AddressableEntityHash, + access_rights: BTreeMap, +} + +impl ContextAccessRights { + /// Creates a new instance of access rights from an iterator of URefs merging any duplicates, + /// taking the union of their rights. + pub fn new>( + context_entity_hash: AddressableEntityHash, + uref_iter: T, + ) -> Self { + let mut context_access_rights = ContextAccessRights { + context_entity_hash, + access_rights: BTreeMap::new(), + }; + context_access_rights.do_extend(uref_iter); + context_access_rights + } + + /// Returns the current context key. + pub fn context_key(&self) -> AddressableEntityHash { + self.context_entity_hash + } + + /// Extends the current access rights from a given set of URefs. + pub fn extend(&mut self, urefs: &[URef]) { + self.do_extend(urefs.iter().copied()) + } + + /// Extends the current access rights from a given set of URefs. + fn do_extend>(&mut self, uref_iter: T) { + for uref in uref_iter { + match self.access_rights.entry(uref.addr()) { + Entry::Occupied(rights) => { + *rights.into_mut() = rights.get().union(uref.access_rights()); + } + Entry::Vacant(rights) => { + rights.insert(uref.access_rights()); + } + } + } + } + + /// Checks whether given uref has enough access rights. + pub fn has_access_rights_to_uref(&self, uref: &URef) -> bool { + if let Some(known_rights) = self.access_rights.get(&uref.addr()) { + let rights_to_check = uref.access_rights(); + known_rights.contains(rights_to_check) + } else { + // URef is not known + false + } + } + + /// Grants access to a [`URef`]; unless access was pre-existing. + pub fn grant_access(&mut self, uref: URef) -> GrantedAccess { + match self.access_rights.entry(uref.addr()) { + Entry::Occupied(existing_rights) => { + let newly_granted_access_rights = + uref.access_rights().difference(*existing_rights.get()); + *existing_rights.into_mut() = existing_rights.get().union(uref.access_rights()); + if newly_granted_access_rights.is_none() { + GrantedAccess::PreExisting + } else { + GrantedAccess::Granted { + uref_addr: uref.addr(), + newly_granted_access_rights, + } + } + } + Entry::Vacant(rights) => { + rights.insert(uref.access_rights()); + GrantedAccess::Granted { + uref_addr: uref.addr(), + newly_granted_access_rights: uref.access_rights(), + } + } + } + } + + /// Remove access for a given `URef`. + pub fn remove_access(&mut self, uref_addr: URefAddr, access_rights: AccessRights) { + if let Some(current_access_rights) = self.access_rights.get_mut(&uref_addr) { + current_access_rights.remove(access_rights) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::UREF_ADDR_LENGTH; + + const ENTITY_HASH: AddressableEntityHash = AddressableEntityHash::new([1u8; 32]); + const UREF_ADDRESS: [u8; UREF_ADDR_LENGTH] = [1; UREF_ADDR_LENGTH]; + const UREF_NO_PERMISSIONS: URef = URef::new(UREF_ADDRESS, AccessRights::empty()); + const UREF_READ: URef = URef::new(UREF_ADDRESS, AccessRights::READ); + const UREF_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::ADD); + const UREF_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::WRITE); + const UREF_READ_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD); + const UREF_READ_ADD_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD_WRITE); + + fn test_readable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_readable(), is_true) + } + + #[test] + fn test_is_readable() { + test_readable(AccessRights::READ, true); + test_readable(AccessRights::READ_ADD, true); + test_readable(AccessRights::READ_WRITE, true); + test_readable(AccessRights::READ_ADD_WRITE, true); + test_readable(AccessRights::ADD, false); + test_readable(AccessRights::ADD_WRITE, false); + test_readable(AccessRights::WRITE, false); + } + + fn test_writable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_writeable(), is_true) + } + + #[test] + fn test_is_writable() { + test_writable(AccessRights::WRITE, true); + test_writable(AccessRights::READ_WRITE, true); + test_writable(AccessRights::ADD_WRITE, true); + test_writable(AccessRights::READ, false); + test_writable(AccessRights::ADD, false); + test_writable(AccessRights::READ_ADD, false); + test_writable(AccessRights::READ_ADD_WRITE, true); + } + + fn test_addable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_addable(), is_true) + } + + #[test] + fn test_is_addable() { + test_addable(AccessRights::ADD, true); + test_addable(AccessRights::READ_ADD, true); + test_addable(AccessRights::READ_WRITE, false); + test_addable(AccessRights::ADD_WRITE, true); + test_addable(AccessRights::READ, false); + test_addable(AccessRights::WRITE, false); + test_addable(AccessRights::READ_ADD_WRITE, true); + } + + #[test] + fn should_check_has_access_rights_to_uref() { + let context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD)); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ)); + assert!(context_rights.has_access_rights_to_uref(&UREF_ADD)); + assert!(context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS)); + } + + #[test] + fn should_check_does_not_have_access_rights_to_uref() { + let context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]); + assert!(!context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + assert!(!context_rights + .has_access_rights_to_uref(&URef::new([2; UREF_ADDR_LENGTH], AccessRights::empty()))); + } + + #[test] + fn should_extend_access_rights() { + // Start with uref with no permissions. + let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_NO_PERMISSIONS]); + let mut expected_rights = BTreeMap::new(); + expected_rights.insert(UREF_ADDRESS, AccessRights::empty()); + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a READ_ADD: should merge to single READ_ADD. + context_rights.extend(&[UREF_READ_ADD]); + *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD; + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a READ: should have no observable effect. + context_rights.extend(&[UREF_READ]); + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a WRITE: should merge to single READ_ADD_WRITE. + context_rights.extend(&[UREF_WRITE]); + *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD_WRITE; + assert_eq!(context_rights.access_rights, expected_rights); + } + + #[test] + fn should_perform_union_of_access_rights_in_new() { + let context_rights = + ContextAccessRights::new(ENTITY_HASH, vec![UREF_NO_PERMISSIONS, UREF_READ, UREF_ADD]); + + // Expect the three discrete URefs' rights to be unioned into READ_ADD. + let mut expected_rights = BTreeMap::new(); + expected_rights.insert(UREF_ADDRESS, AccessRights::READ_ADD); + assert_eq!(context_rights.access_rights, expected_rights); + } + + #[test] + fn should_grant_access_rights() { + let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]); + let granted_access = context_rights.grant_access(UREF_READ); + assert_eq!(granted_access, GrantedAccess::PreExisting); + let granted_access = context_rights.grant_access(UREF_READ_ADD_WRITE); + assert_eq!( + granted_access, + GrantedAccess::Granted { + uref_addr: UREF_ADDRESS, + newly_granted_access_rights: AccessRights::WRITE + } + ); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + let new_uref = URef::new([3; 32], AccessRights::all()); + let granted_access = context_rights.grant_access(new_uref); + assert_eq!( + granted_access, + GrantedAccess::Granted { + uref_addr: new_uref.addr(), + newly_granted_access_rights: AccessRights::all() + } + ); + assert!(context_rights.has_access_rights_to_uref(&new_uref)); + } + + #[test] + fn should_remove_access_rights() { + let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD_WRITE]); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + + // Strip write access from the context rights. + context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE), + "Write access should have been removed" + ); + + // Strip the access again to ensure that the bit is not flipped back. + context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE), + "Write access should not have been granted back" + ); + assert!( + context_rights.has_access_rights_to_uref(&UREF_READ_ADD), + "Read and add access should be preserved." + ); + + // Strip both read and add access from the context rights. + context_rights.remove_access(UREF_ADDRESS, AccessRights::READ_ADD); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD), + "Read and add access should have been removed" + ); + assert!( + context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS), + "The access rights should be empty" + ); + } +} diff --git a/casper_types_ver_2_0/src/account.rs b/casper_types_ver_2_0/src/account.rs new file mode 100644 index 00000000..51641191 --- /dev/null +++ b/casper_types_ver_2_0/src/account.rs @@ -0,0 +1,857 @@ +//! Contains types and constants associated with user accounts. + +mod account_hash; +pub mod action_thresholds; +mod action_type; +pub mod associated_keys; +mod error; +mod weight; + +use serde::{Deserialize, Serialize}; + +use alloc::{collections::BTreeSet, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +pub use self::{ + account_hash::{AccountHash, ACCOUNT_HASH_FORMATTED_STRING_PREFIX, ACCOUNT_HASH_LENGTH}, + action_thresholds::ActionThresholds, + action_type::ActionType, + associated_keys::AssociatedKeys, + error::FromStrError, + weight::Weight, +}; + +use crate::{ + addressable_entity::{ + AddKeyFailure, NamedKeys, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure, + }, + bytesrepr::{self, FromBytes, ToBytes}, + crypto, AccessRights, Key, URef, BLAKE2B_DIGEST_LENGTH, +}; +#[cfg(feature = "json-schema")] +use crate::{PublicKey, SecretKey}; + +#[cfg(feature = "json-schema")] +static ACCOUNT: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let account_hash = PublicKey::from(&secret_key).to_account_hash(); + let main_purse = URef::from_formatted_str( + "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + ) + .unwrap(); + let mut named_keys = NamedKeys::new(); + named_keys.insert("main_purse".to_string(), Key::URef(main_purse)); + let weight = Weight::new(1); + let associated_keys = AssociatedKeys::new(account_hash, weight); + let action_thresholds = ActionThresholds::new(weight, weight).unwrap(); + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + } +}); + +/// Represents an Account in the global state. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Account { + account_hash: AccountHash, + named_keys: NamedKeys, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, +} + +impl Account { + /// Creates a new account. + pub fn new( + account_hash: AccountHash, + named_keys: NamedKeys, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, + ) -> Self { + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + } + } + + /// An Account constructor with presets for associated_keys and action_thresholds. + /// + /// An account created with this method is valid and can be used as the target of a transaction. + /// It will be created with an [`AssociatedKeys`] with a [`Weight`] of 1, and a default + /// [`ActionThresholds`]. + pub fn create(account: AccountHash, named_keys: NamedKeys, main_purse: URef) -> Self { + let associated_keys = AssociatedKeys::new(account, Weight::new(1)); + + let action_thresholds: ActionThresholds = Default::default(); + Account::new( + account, + named_keys, + main_purse, + associated_keys, + action_thresholds, + ) + } + + /// Appends named keys to an account's named_keys field. + pub fn named_keys_append(&mut self, keys: NamedKeys) { + self.named_keys.append(keys); + } + + /// Returns named keys. + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys + } + + /// Removes the key under the given name from named keys. + pub fn remove_named_key(&mut self, name: &str) -> Option { + self.named_keys.remove(name) + } + + /// Returns account hash. + pub fn account_hash(&self) -> AccountHash { + self.account_hash + } + + /// Returns main purse. + pub fn main_purse(&self) -> URef { + self.main_purse + } + + /// Returns an [`AccessRights::ADD`]-only version of the main purse's [`URef`]. + pub fn main_purse_add_only(&self) -> URef { + URef::new(self.main_purse.addr(), AccessRights::ADD) + } + + /// Returns associated keys. + pub fn associated_keys(&self) -> &AssociatedKeys { + &self.associated_keys + } + + /// Returns action thresholds. + pub fn action_thresholds(&self) -> &ActionThresholds { + &self.action_thresholds + } + + /// Adds an associated key to an account. + pub fn add_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), AddKeyFailure> { + self.associated_keys.add_key(account_hash, weight) + } + + /// Checks if removing given key would properly satisfy thresholds. + fn can_remove_key(&self, account_hash: AccountHash) -> bool { + let total_weight_without = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Returns true if the total weight calculated without given public key would be greater or + // equal to all of the thresholds. + total_weight_without >= *self.action_thresholds().deployment() + && total_weight_without >= *self.action_thresholds().key_management() + } + + /// Checks if adding a weight to a sum of all weights excluding the given key would make the + /// resulting value to fall below any of the thresholds on account. + fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool { + // Calculates total weight of all keys excluding the given key + let total_weight = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Safely calculate new weight by adding the updated weight + let new_weight = total_weight.value().saturating_add(weight.value()); + + // Returns true if the new weight would be greater or equal to all of + // the thresholds. + new_weight >= self.action_thresholds().deployment().value() + && new_weight >= self.action_thresholds().key_management().value() + } + + /// Removes an associated key from an account. + /// + /// Verifies that removing the key will not cause the remaining weight to fall below any action + /// thresholds. + pub fn remove_associated_key( + &mut self, + account_hash: AccountHash, + ) -> Result<(), RemoveKeyFailure> { + if self.associated_keys.contains_key(&account_hash) { + // Check if removing this weight would fall below thresholds + if !self.can_remove_key(account_hash) { + return Err(RemoveKeyFailure::ThresholdViolation); + } + } + self.associated_keys.remove_key(&account_hash) + } + + /// Updates an associated key. + /// + /// Returns an error if the update would result in a violation of the key management thresholds. + pub fn update_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), UpdateKeyFailure> { + if let Some(current_weight) = self.associated_keys.get(&account_hash) { + if weight < *current_weight { + // New weight is smaller than current weight + if !self.can_update_key(account_hash, weight) { + return Err(UpdateKeyFailure::ThresholdViolation); + } + } + } + self.associated_keys.update_key(account_hash, weight) + } + + /// Sets a new action threshold for a given action type for the account. + /// + /// Returns an error if the new action threshold weight is greater than the total weight of the + /// account's associated keys. + pub fn set_action_threshold( + &mut self, + action_type: ActionType, + weight: Weight, + ) -> Result<(), SetThresholdFailure> { + // Verify if new threshold weight exceeds total weight of all associated + // keys. + self.can_set_threshold(weight)?; + // Set new weight for given action + self.action_thresholds.set_threshold(action_type, weight) + } + + /// Verifies if user can set action threshold. + pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> { + let total_weight = self.associated_keys.total_keys_weight(); + if new_threshold > total_weight { + return Err(SetThresholdFailure::InsufficientTotalWeight); + } + Ok(()) + } + + /// Checks whether all authorization keys are associated with this account. + pub fn can_authorize(&self, authorization_keys: &BTreeSet) -> bool { + !authorization_keys.is_empty() + && authorization_keys + .iter() + .all(|e| self.associated_keys.contains_key(e)) + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to deploy threshold. + pub fn can_deploy_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().deployment() + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to key management threshold. + pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().key_management() + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ACCOUNT + } +} + +impl ToBytes for Account { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.account_hash().write_bytes(&mut result)?; + self.named_keys().write_bytes(&mut result)?; + self.main_purse.write_bytes(&mut result)?; + self.associated_keys().write_bytes(&mut result)?; + self.action_thresholds().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.account_hash.serialized_length() + + self.named_keys.serialized_length() + + self.main_purse.serialized_length() + + self.associated_keys.serialized_length() + + self.action_thresholds.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.account_hash().write_bytes(writer)?; + self.named_keys().write_bytes(writer)?; + self.main_purse().write_bytes(writer)?; + self.associated_keys().write_bytes(writer)?; + self.action_thresholds().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Account { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (account_hash, rem) = AccountHash::from_bytes(bytes)?; + let (named_keys, rem) = NamedKeys::from_bytes(rem)?; + let (main_purse, rem) = URef::from_bytes(rem)?; + let (associated_keys, rem) = AssociatedKeys::from_bytes(rem)?; + let (action_thresholds, rem) = ActionThresholds::from_bytes(rem)?; + Ok(( + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + }, + rem, + )) + } +} + +#[doc(hidden)] +#[deprecated( + since = "1.4.4", + note = "function moved to casper_types_ver_2_0::crypto::blake2b" +)] +pub fn blake2b>(data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { + crypto::blake2b(data) +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::{ + account::{associated_keys::gens::account_associated_keys_arb, Account, Weight}, + gens::{account_hash_arb, named_keys_arb, uref_arb}, + }; + + use super::action_thresholds::gens::account_action_thresholds_arb; + + prop_compose! { + pub fn account_arb()( + account_hash in account_hash_arb(), + urefs in named_keys_arb(3), + purse in uref_arb(), + thresholds in account_action_thresholds_arb(), + mut associated_keys in account_associated_keys_arb(), + ) -> Account { + associated_keys.add_key(account_hash, Weight::new(1)).unwrap(); + Account::new( + account_hash, + urefs, + purse, + associated_keys, + thresholds, + ) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + account::{ + Account, AccountHash, ActionThresholds, ActionType, AssociatedKeys, RemoveKeyFailure, + UpdateKeyFailure, Weight, + }, + addressable_entity::{NamedKeys, TryFromIntError}, + AccessRights, URef, + }; + use std::{collections::BTreeSet, convert::TryFrom, iter::FromIterator, vec::Vec}; + + use super::*; + + #[test] + fn account_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let account_hash = AccountHash::try_from(&bytes[..]).expect( + "should create account +hash", + ); + assert_eq!(&bytes, &account_hash.as_bytes()); + } + + #[test] + fn account_hash_from_slice_too_small() { + let _account_hash = + AccountHash::try_from(&[0u8; 31][..]).expect_err("should not create account hash"); + } + + #[test] + fn account_hash_from_slice_too_big() { + let _account_hash = + AccountHash::try_from(&[0u8; 33][..]).expect_err("should not create account hash"); + } + + #[test] + fn try_from_i32_for_set_threshold_failure() { + let max_valid_value_for_variant = SetThresholdFailure::InsufficientTotalWeight as i32; + assert_eq!( + Err(TryFromIntError(())), + SetThresholdFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `SetThresholdFailure::try_from` for a new variant of \ + `SetThresholdFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn try_from_i32_for_add_key_failure() { + let max_valid_value_for_variant = AddKeyFailure::PermissionDenied as i32; + assert_eq!( + Err(TryFromIntError(())), + AddKeyFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `AddKeyFailure::try_from` for a new variant of \ + `AddKeyFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn try_from_i32_for_remove_key_failure() { + let max_valid_value_for_variant = RemoveKeyFailure::ThresholdViolation as i32; + assert_eq!( + Err(TryFromIntError(())), + RemoveKeyFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `RemoveKeyFailure::try_from` for a new variant of \ + `RemoveKeyFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn try_from_i32_for_update_key_failure() { + let max_valid_value_for_variant = UpdateKeyFailure::ThresholdViolation as i32; + assert_eq!( + Err(TryFromIntError(())), + UpdateKeyFailure::try_from(max_valid_value_for_variant + 1), + "Did you forget to update `UpdateKeyFailure::try_from` for a new variant of \ + `UpdateKeyFailure`, or `max_valid_value_for_variant` in this test?" + ); + } + + #[test] + fn account_hash_from_str() { + let account_hash = AccountHash([3; 32]); + let encoded = account_hash.to_formatted_string(); + let decoded = AccountHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(account_hash, decoded); + + let invalid_prefix = + "accounthash-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(invalid_prefix).is_err()); + + let invalid_prefix = + "account-hash0000000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "account-hash-00000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "account-hash-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(AccountHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "account-hash-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(AccountHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn account_hash_serde_roundtrip() { + let account_hash = AccountHash([255; 32]); + let serialized = bincode::serialize(&account_hash).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(account_hash, decoded); + } + + #[test] + fn account_hash_json_roundtrip() { + let account_hash = AccountHash([255; 32]); + let json_string = serde_json::to_string_pretty(&account_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(account_hash, decoded); + } + + #[test] + fn associated_keys_can_authorize_keys() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_3, key_2, key_1]))); + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_3, key_2]))); + + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_2]))); + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1]))); + + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + key_1, + key_2, + AccountHash::new([42; 32]) + ]))); + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + AccountHash::new([42; 32]), + key_1, + key_2 + ]))); + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + AccountHash::new([43; 32]), + AccountHash::new([44; 32]), + AccountHash::new([42; 32]) + ]))); + assert!(!account.can_authorize(&BTreeSet::new())); + } + + #[test] + fn account_can_deploy_with() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) + .expect("should add key 3"); + res + }; + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + // sum: 22, required 33 - can't deploy + assert!(!account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 33, required 33 - can deploy + assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 34, required 33 - can deploy + assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([2u8; 32]), + AccountHash::new([1u8; 32]), + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + ]))); + } + + #[test] + fn account_can_manage_keys_with() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) + .expect("should add key 3"); + res + }; + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(11), Weight::new(33)) + .expect("should create thresholds"), + ); + + // sum: 22, required 33 - can't manage + assert!(!account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 33, required 33 - can manage + assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 34, required 33 - can manage + assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([2u8; 32]), + AccountHash::new([1u8; 32]), + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + ]))); + } + + #[test] + fn set_action_threshold_higher_than_total_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + res.add_key(key_2, Weight::new(3)) + .expect("should add key 2"); + res.add_key(key_3, Weight::new(4)) + .expect("should add key 3"); + res + }; + let mut account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + assert_eq!( + account + .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 1)) + .unwrap_err(), + SetThresholdFailure::InsufficientTotalWeight, + ); + assert_eq!( + account + .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 245)) + .unwrap_err(), + SetThresholdFailure::InsufficientTotalWeight, + ) + } + + #[test] + fn remove_key_would_violate_action_thresholds() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + res.add_key(key_2, Weight::new(3)) + .expect("should add key 2"); + res.add_key(key_3, Weight::new(4)) + .expect("should add key 3"); + res + }; + let mut account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(1 + 2 + 3 + 4), Weight::new(1 + 2 + 3 + 4 + 5)) + .expect("should create thresholds"), + ); + + assert_eq!( + account.remove_associated_key(key_3).unwrap_err(), + RemoveKeyFailure::ThresholdViolation, + ) + } + + #[test] + fn updating_key_would_violate_action_thresholds() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(2); + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(3); + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(4); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + // 1 + 2 + 3 + 4 + res + }; + + let deployment_threshold = Weight::new( + identity_key_weight.value() + + key_1_weight.value() + + key_2_weight.value() + + key_3_weight.value(), + ); + let key_management_threshold = Weight::new(deployment_threshold.value() + 1); + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(deployment_threshold, key_management_threshold) + .expect("should create thresholds"), + ); + + // Decreases by 3 + assert_eq!( + account + .clone() + .update_associated_key(key_3, Weight::new(1)) + .unwrap_err(), + UpdateKeyFailure::ThresholdViolation, + ); + + // increase total weight (12) + account + .update_associated_key(identity_key, Weight::new(3)) + .unwrap(); + + // variant a) decrease total weight by 1 (total 11) + account + .clone() + .update_associated_key(key_3, Weight::new(3)) + .unwrap(); + // variant b) decrease total weight by 3 (total 9) - fail + assert_eq!( + account + .update_associated_key(key_3, Weight::new(1)) + .unwrap_err(), + UpdateKeyFailure::ThresholdViolation + ); + } + + #[test] + fn overflowing_should_allow_removal() { + let identity_key = AccountHash::new([42; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + + let associated_keys = { + // Identity + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + + // Spare key + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + // Big key + res.add_key(key_2, Weight::new(255)) + .expect("should add key 2"); + + res + }; + + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + ActionThresholds::new(Weight::new(1), Weight::new(254)) + .expect("should create thresholds"), + ); + + account.remove_associated_key(key_1).expect("should work") + } + + #[test] + fn overflowing_should_allow_updating() { + let identity_key = AccountHash::new([1; 32]); + let identity_key_weight = Weight::new(1); + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(3); + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(255); + let deployment_threshold = Weight::new(1); + let key_management_threshold = Weight::new(254); + + let associated_keys = { + // Identity + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + // Spare key + res.add_key(key_1, key_1_weight).expect("should add key 1"); + // Big key + res.add_key(key_2, key_2_weight).expect("should add key 2"); + + res + }; + + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + ActionThresholds::new(deployment_threshold, key_management_threshold) + .expect("should create thresholds"), + ); + + // decrease so total weight would be changed from 1 + 3 + 255 to 1 + 1 + 255 + account + .update_associated_key(key_1, Weight::new(1)) + .expect("should work"); + } +} + +#[cfg(test)] +mod proptests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + proptest! { + #[test] + fn test_value_account(acct in gens::account_arb()) { + bytesrepr::test_serialization_roundtrip(&acct); + } + } +} diff --git a/casper_types_ver_2_0/src/account/account_hash.rs b/casper_types_ver_2_0/src/account/account_hash.rs new file mode 100644 index 00000000..1e4ff6d1 --- /dev/null +++ b/casper_types_ver_2_0/src/account/account_hash.rs @@ -0,0 +1,212 @@ +use alloc::{string::String, vec::Vec}; +use core::{ + convert::{From, TryFrom}, + fmt::{Debug, Display, Formatter}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + addressable_entity::FromStrError, + bytesrepr::{Error, FromBytes, ToBytes}, + checksummed_hex, crypto, CLType, CLTyped, PublicKey, BLAKE2B_DIGEST_LENGTH, +}; + +/// The length in bytes of a [`AccountHash`]. +pub const ACCOUNT_HASH_LENGTH: usize = 32; +/// The prefix applied to the hex-encoded `AccountHash` to produce a formatted string +/// representation. +pub const ACCOUNT_HASH_FORMATTED_STRING_PREFIX: &str = "account-hash-"; + +/// A newtype wrapping an array which contains the raw bytes of +/// the AccountHash, a hash of Public Key and Algorithm +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Account hash as a formatted string.") +)] +pub struct AccountHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub [u8; ACCOUNT_HASH_LENGTH], +); + +impl AccountHash { + /// Constructs a new `AccountHash` instance from the raw bytes of an Public Key Account Hash. + pub const fn new(value: [u8; ACCOUNT_HASH_LENGTH]) -> AccountHash { + AccountHash(value) + } + + /// Returns the raw bytes of the account hash as an array. + pub fn value(&self) -> [u8; ACCOUNT_HASH_LENGTH] { + self.0 + } + + /// Returns the raw bytes of the account hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `AccountHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + ACCOUNT_HASH_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into an `AccountHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(ACCOUNT_HASH_FORMATTED_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(AccountHash(bytes)) + } + + /// Parses a `PublicKey` and outputs the corresponding account hash. + pub fn from_public_key( + public_key: &PublicKey, + blake2b_hash_fn: impl Fn(Vec) -> [u8; BLAKE2B_DIGEST_LENGTH], + ) -> Self { + const SYSTEM_LOWERCASE: &str = "system"; + const ED25519_LOWERCASE: &str = "ed25519"; + const SECP256K1_LOWERCASE: &str = "secp256k1"; + + let algorithm_name = match public_key { + PublicKey::System => SYSTEM_LOWERCASE, + PublicKey::Ed25519(_) => ED25519_LOWERCASE, + PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE, + }; + let public_key_bytes: Vec = public_key.into(); + + // Prepare preimage based on the public key parameters. + let preimage = { + let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1); + data.extend(algorithm_name.as_bytes()); + data.push(0); + data.extend(public_key_bytes); + data + }; + // Hash the preimage data using blake2b256 and return it. + let digest = blake2b_hash_fn(preimage); + Self::new(digest) + } +} + +impl Serialize for AccountHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for AccountHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + AccountHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; ACCOUNT_HASH_LENGTH]>::deserialize(deserializer)?; + Ok(AccountHash(bytes)) + } + } +} + +impl TryFrom<&[u8]> for AccountHash { + type Error = TryFromSliceForAccountHashError; + + fn try_from(bytes: &[u8]) -> Result { + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes) + .map(AccountHash::new) + .map_err(|_| TryFromSliceForAccountHashError(())) + } +} + +impl TryFrom<&alloc::vec::Vec> for AccountHash { + type Error = TryFromSliceForAccountHashError; + + fn try_from(bytes: &Vec) -> Result { + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes as &[u8]) + .map(AccountHash::new) + .map_err(|_| TryFromSliceForAccountHashError(())) + } +} + +impl From<&PublicKey> for AccountHash { + fn from(public_key: &PublicKey) -> Self { + AccountHash::from_public_key(public_key, crypto::blake2b) + } +} + +impl Display for AccountHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for AccountHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "AccountHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for AccountHash { + fn cl_type() -> CLType { + CLType::ByteArray(ACCOUNT_HASH_LENGTH as u32) + } +} + +impl ToBytes for AccountHash { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for AccountHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((AccountHash::new(bytes), rem)) + } +} + +impl AsRef<[u8]> for AccountHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`]. +#[derive(Debug)] +pub struct TryFromSliceForAccountHashError(()); + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AccountHash { + AccountHash::new(rng.gen()) + } +} diff --git a/casper_types_ver_2_0/src/account/action_thresholds.rs b/casper_types_ver_2_0/src/account/action_thresholds.rs new file mode 100644 index 00000000..ce2e492c --- /dev/null +++ b/casper_types_ver_2_0/src/account/action_thresholds.rs @@ -0,0 +1,175 @@ +//! This module contains types and functions for managing action thresholds. + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::{ActionType, SetThresholdFailure, Weight}, + addressable_entity::WEIGHT_SERIALIZED_LENGTH, + bytesrepr::{self, Error, FromBytes, ToBytes}, +}; + +/// Thresholds that have to be met when executing an action of a certain type. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "AccountActionThresholds"))] +pub struct ActionThresholds { + /// Threshold for deploy execution. + pub deployment: Weight, + /// Threshold for managing action threshold. + pub key_management: Weight, +} + +impl ActionThresholds { + /// Creates new ActionThresholds object with provided weights + /// + /// Requires deployment threshold to be lower than or equal to + /// key management threshold. + pub fn new( + deployment: Weight, + key_management: Weight, + ) -> Result { + if deployment > key_management { + return Err(SetThresholdFailure::DeploymentThreshold); + } + Ok(ActionThresholds { + deployment, + key_management, + }) + } + /// Sets new threshold for [ActionType::Deployment]. + /// Should return an error if setting new threshold for `action_type` breaks + /// one of the invariants. Currently, invariant is that + /// `ActionType::Deployment` threshold shouldn't be higher than any + /// other, which should be checked both when increasing `Deployment` + /// threshold and decreasing the other. + pub fn set_deployment_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if new_threshold > self.key_management { + Err(SetThresholdFailure::DeploymentThreshold) + } else { + self.deployment = new_threshold; + Ok(()) + } + } + + /// Sets new threshold for [ActionType::KeyManagement]. + pub fn set_key_management_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if self.deployment > new_threshold { + Err(SetThresholdFailure::KeyManagementThreshold) + } else { + self.key_management = new_threshold; + Ok(()) + } + } + + /// Returns the deployment action threshold. + pub fn deployment(&self) -> &Weight { + &self.deployment + } + + /// Returns key management action threshold. + pub fn key_management(&self) -> &Weight { + &self.key_management + } + + /// Unified function that takes an action type, and changes appropriate + /// threshold defined by the [ActionType] variants. + pub fn set_threshold( + &mut self, + action_type: ActionType, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + match action_type { + ActionType::Deployment => self.set_deployment_threshold(new_threshold), + ActionType::KeyManagement => self.set_key_management_threshold(new_threshold), + } + } +} + +impl Default for ActionThresholds { + fn default() -> Self { + ActionThresholds { + deployment: Weight::new(1), + key_management: Weight::new(1), + } + } +} + +impl ToBytes for ActionThresholds { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + result.append(&mut self.deployment.to_bytes()?); + result.append(&mut self.key_management.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + 2 * WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deployment().write_bytes(writer)?; + self.key_management().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ActionThresholds { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (deployment, rem) = Weight::from_bytes(bytes)?; + let (key_management, rem) = Weight::from_bytes(rem)?; + let ret = ActionThresholds { + deployment, + key_management, + }; + Ok((ret, rem)) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use super::ActionThresholds; + + pub fn account_action_thresholds_arb() -> impl Strategy { + Just(Default::default()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_create_new_action_thresholds() { + let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); + assert_eq!(*action_thresholds.deployment(), Weight::new(1)); + assert_eq!(*action_thresholds.key_management(), Weight::new(42)); + } + + #[test] + fn should_not_create_action_thresholds_with_invalid_deployment_threshold() { + // deployment cant be greater than key management + assert!(ActionThresholds::new(Weight::new(5), Weight::new(1)).is_err()); + } + + #[test] + fn serialization_roundtrip() { + let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); + bytesrepr::test_serialization_roundtrip(&action_thresholds); + } +} diff --git a/casper_types_ver_2_0/src/account/action_type.rs b/casper_types_ver_2_0/src/account/action_type.rs new file mode 100644 index 00000000..65848f79 --- /dev/null +++ b/casper_types_ver_2_0/src/account/action_type.rs @@ -0,0 +1,32 @@ +use core::convert::TryFrom; + +use crate::addressable_entity::TryFromIntError; + +/// The various types of action which can be performed in the context of a given account. +#[repr(u32)] +pub enum ActionType { + /// Represents performing a deploy. + Deployment = 0, + /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s + /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total + /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to + /// perform various actions). + KeyManagement = 1, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for ActionType { + type Error = TryFromIntError; + + fn try_from(value: u32) -> Result { + // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive + // that helps to automatically create `from_u32` and `to_u32`. This approach + // gives better control over generated code. + match value { + d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment), + d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement), + _ => Err(TryFromIntError(())), + } + } +} diff --git a/casper_types_ver_2_0/src/account/associated_keys.rs b/casper_types_ver_2_0/src/account/associated_keys.rs new file mode 100644 index 00000000..aa7d3e91 --- /dev/null +++ b/casper_types_ver_2_0/src/account/associated_keys.rs @@ -0,0 +1,381 @@ +//! This module contains types and functions for working with keys associated with an account. + +use alloc::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + vec::Vec, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + account::{AccountHash, Weight}, + addressable_entity::{AddKeyFailure, RemoveKeyFailure, UpdateKeyFailure}, + bytesrepr::{self, FromBytes, ToBytes}, +}; + +/// A collection of weighted public keys (represented as account hashes) associated with an account. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "AccountAssociatedKeys"))] +#[serde(deny_unknown_fields)] +#[rustfmt::skip] +pub struct AssociatedKeys( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl AssociatedKeys { + /// Constructs a new AssociatedKeys. + pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys { + let mut bt: BTreeMap = BTreeMap::new(); + bt.insert(key, weight); + AssociatedKeys(bt) + } + + /// Adds a new AssociatedKey to the set. + /// + /// Returns true if added successfully, false otherwise. + pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(entry) => { + entry.insert(weight); + } + Entry::Occupied(_) => return Err(AddKeyFailure::DuplicateKey), + } + Ok(()) + } + + /// Removes key from the associated keys set. + /// Returns true if value was found in the set prior to the removal, false + /// otherwise. + pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> { + self.0 + .remove(key) + .map(|_| ()) + .ok_or(RemoveKeyFailure::MissingKey) + } + + /// Adds new AssociatedKey to the set. + /// Returns true if added successfully, false otherwise. + pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(_) => { + return Err(UpdateKeyFailure::MissingKey); + } + Entry::Occupied(mut entry) => { + *entry.get_mut() = weight; + } + } + Ok(()) + } + + /// Returns the weight of an account hash. + pub fn get(&self, key: &AccountHash) -> Option<&Weight> { + self.0.get(key) + } + + /// Returns `true` if a given key exists. + pub fn contains_key(&self, key: &AccountHash) -> bool { + self.0.contains_key(key) + } + + /// Returns an iterator over the account hash and the weights. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the count of the associated keys. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the associated keys are empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Helper method that calculates weight for keys that comes from any + /// source. + /// + /// This method is not concerned about uniqueness of the passed iterable. + /// Uniqueness is determined based on the input collection properties, + /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`]) + /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]). + fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator) -> Weight { + let total = keys + .filter_map(|key| self.0.get(key)) + .fold(0u8, |acc, w| acc.saturating_add(w.value())); + + Weight::new(total) + } + + /// Calculates total weight of authorization keys provided by an argument + pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet) -> Weight { + self.calculate_any_keys_weight(authorization_keys.iter()) + } + + /// Calculates total weight of all authorization keys + pub fn total_keys_weight(&self) -> Weight { + self.calculate_any_keys_weight(self.0.keys()) + } + + /// Calculates total weight of all authorization keys excluding a given key + pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight { + self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash)) + } +} + +impl From> for AssociatedKeys { + fn from(associated_keys: BTreeMap) -> Self { + Self(associated_keys) + } +} + +impl From for BTreeMap { + fn from(associated_keys: AssociatedKeys) -> Self { + associated_keys.0 + } +} + +impl ToBytes for AssociatedKeys { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for AssociatedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (associated_keys, rem) = FromBytes::from_bytes(bytes)?; + Ok((AssociatedKeys(associated_keys), rem)) + } +} + +struct Labels; + +impl KeyValueLabels for Labels { + const KEY: &'static str = "account_hash"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for Labels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("AssociatedKey"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some("A weighted public key."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = + Some("The account hash of the public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = + Some("The weight assigned to the public key."); +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::gens::{account_hash_arb, account_weight_arb}; + + use super::AssociatedKeys; + + pub fn account_associated_keys_arb() -> impl Strategy { + proptest::collection::btree_map(account_hash_arb(), account_weight_arb(), 10).prop_map( + |keys| { + let mut associated_keys = AssociatedKeys::default(); + keys.into_iter().for_each(|(k, v)| { + associated_keys.add_key(k, v).unwrap(); + }); + associated_keys + }, + ) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeSet, iter::FromIterator}; + + use crate::{ + account::{AccountHash, Weight, ACCOUNT_HASH_LENGTH}, + bytesrepr, + }; + + use super::*; + + #[test] + fn associated_keys_add() { + let mut keys = + AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1)); + let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let new_pk_weight = Weight::new(2); + assert!(keys.add_key(new_pk, new_pk_weight).is_ok()); + assert_eq!(keys.get(&new_pk), Some(&new_pk_weight)) + } + + #[test] + fn associated_keys_add_duplicate() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert_eq!( + keys.add_key(pk, Weight::new(10)), + Err(AddKeyFailure::DuplicateKey) + ); + assert_eq!(keys.get(&pk), Some(&weight)); + } + + #[test] + fn associated_keys_remove() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert!(keys.remove_key(&pk).is_ok()); + assert!(keys + .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH])) + .is_err()); + } + + #[test] + fn associated_keys_update() { + let pk1 = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let pk2 = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk1, weight); + assert!(matches!( + keys.update_key(pk2, Weight::new(2)) + .expect_err("should get error"), + UpdateKeyFailure::MissingKey + )); + keys.add_key(pk2, Weight::new(1)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(1))); + keys.update_key(pk2, Weight::new(2)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(2))); + } + + #[test] + fn associated_keys_calculate_keys_once() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + assert_eq!( + keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + key_1, key_2, key_3, key_1, key_2, key_3, + ])), + Weight::new(1 + 2 + 3) + ); + } + + #[test] + fn associated_keys_total_weight() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(12)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(13)) + .expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight(), + Weight::new(1 + 11 + 12 + 13) + ); + } + + #[test] + fn associated_keys_total_weight_excluding() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(11); + + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(12); + + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(13); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight_excluding(key_2), + Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value()) + ); + } + + #[test] + fn overflowing_keys_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + + let identity_key_weight = Weight::new(250); + let weight_1 = Weight::new(1); + let weight_2 = Weight::new(2); + let weight_3 = Weight::new(3); + + let saturated_weight = Weight::new(u8::max_value()); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + res.add_key(key_1, weight_1).expect("should add key 1"); + res.add_key(key_2, weight_2).expect("should add key 2"); + res.add_key(key_3, weight_3).expect("should add key 3"); + res + }; + + assert_eq!( + associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + identity_key, // 250 + key_1, // 251 + key_2, // 253 + key_3, // 256 - error + ])), + saturated_weight, + ); + } + + #[test] + fn serialization_roundtrip() { + let mut keys = AssociatedKeys::default(); + keys.add_key(AccountHash::new([1; 32]), Weight::new(1)) + .unwrap(); + keys.add_key(AccountHash::new([2; 32]), Weight::new(2)) + .unwrap(); + keys.add_key(AccountHash::new([3; 32]), Weight::new(3)) + .unwrap(); + bytesrepr::test_serialization_roundtrip(&keys); + } +} diff --git a/casper_types_ver_2_0/src/account/error.rs b/casper_types_ver_2_0/src/account/error.rs new file mode 100644 index 00000000..35195fc7 --- /dev/null +++ b/casper_types_ver_2_0/src/account/error.rs @@ -0,0 +1,43 @@ +use core::{ + array::TryFromSliceError, + fmt::{self, Display, Formatter}, +}; + +/// Error returned when decoding an `AccountHash` from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The hash is not valid hex. + Hex(base16::DecodeError), + /// The hash is the wrong length. + Hash(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "prefix is not 'account-hash-'"), + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Hash(error) => write!(f, "address portion is wrong length: {}", error), + } + } +} +/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`](super::AccountHash). +#[derive(Debug)] +pub struct TryFromSliceForAccountHashError(()); diff --git a/casper_types_ver_2_0/src/account/weight.rs b/casper_types_ver_2_0/src/account/weight.rs new file mode 100644 index 00000000..f9c87035 --- /dev/null +++ b/casper_types_ver_2_0/src/account/weight.rs @@ -0,0 +1,69 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// The number of bytes in a serialized [`Weight`]. +pub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// The weight associated with public keys in an account's associated keys. +#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr( + feature = "json-schema", + schemars(rename = "AccountAssociatedKeyWeight") +)] +pub struct Weight(u8); + +impl Weight { + /// Maximum possible weight. + pub const MAX: Weight = Weight(u8::MAX); + + /// Constructs a new `Weight`. + pub const fn new(weight: u8) -> Weight { + Weight(weight) + } + + /// Returns the value of `self` as a `u8`. + pub fn value(self) -> u8 { + self.0 + } +} + +impl ToBytes for Weight { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.0); + Ok(()) + } +} + +impl FromBytes for Weight { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (byte, rem) = u8::from_bytes(bytes)?; + Ok((Weight::new(byte), rem)) + } +} + +impl CLTyped for Weight { + fn cl_type() -> CLType { + CLType::U8 + } +} diff --git a/casper_types_ver_2_0/src/addressable_entity.rs b/casper_types_ver_2_0/src/addressable_entity.rs new file mode 100644 index 00000000..11f69c4c --- /dev/null +++ b/casper_types_ver_2_0/src/addressable_entity.rs @@ -0,0 +1,1714 @@ +//! Data types for supporting contract headers feature. +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +pub mod action_thresholds; +mod action_type; +pub mod associated_keys; +mod error; +mod named_keys; +mod weight; + +use alloc::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + format, + string::{String, ToString}, + vec::Vec, +}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + iter, +}; +use num_derive::FromPrimitive; +use num_traits::FromPrimitive; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +pub use self::{ + action_thresholds::ActionThresholds, + action_type::ActionType, + associated_keys::AssociatedKeys, + error::{ + FromAccountHashStrError, SetThresholdFailure, TryFromIntError, + TryFromSliceForAccountHashError, + }, + named_keys::NamedKeys, + weight::{Weight, WEIGHT_SERIALIZED_LENGTH}, +}; + +use crate::{ + account::{Account, AccountHash}, + byte_code::ByteCodeHash, + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, + contract_messages::TopicNameHash, + contracts::{Contract, ContractHash}, + key::ByteCodeAddr, + uref::{self, URef}, + AccessRights, ApiError, CLType, CLTyped, ContextAccessRights, Group, HashAddr, Key, + PackageHash, ProtocolVersion, KEY_HASH_LENGTH, +}; + +/// Maximum number of distinct user groups. +pub const MAX_GROUPS: u8 = 10; +/// Maximum number of URefs which can be assigned across all user groups. +pub const MAX_TOTAL_UREFS: usize = 100; + +/// The tag for Contract Packages associated with Wasm stored on chain. +pub const PACKAGE_KIND_WASM_TAG: u8 = 0; +/// The tag for Contract Package associated with a native contract implementation. +pub const PACKAGE_KIND_SYSTEM_CONTRACT_TAG: u8 = 1; +/// The tag for Contract Package associated with an Account hash. +pub const PACKAGE_KIND_ACCOUNT_TAG: u8 = 2; +/// The tag for Contract Packages associated with legacy packages. +pub const PACKAGE_KIND_LEGACY_TAG: u8 = 3; + +const ADDRESSABLE_ENTITY_STRING_PREFIX: &str = "addressable-entity-"; + +/// Set of errors which may happen when working with contract headers. +#[derive(Debug, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Attempt to override an existing or previously existing version with a + /// new header (this is not allowed to ensure immutability of a given + /// version). + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(1, Error::PreviouslyUsedVersion as u8); + /// ``` + PreviouslyUsedVersion = 1, + /// Attempted to disable a contract that does not exist. + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(2, Error::EntityNotFound as u8); + /// ``` + EntityNotFound = 2, + /// Attempted to create a user group which already exists (use the update + /// function to change an existing user group). + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(3, Error::GroupAlreadyExists as u8); + /// ``` + GroupAlreadyExists = 3, + /// Attempted to add a new user group which exceeds the allowed maximum + /// number of groups. + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(4, Error::MaxGroupsExceeded as u8); + /// ``` + MaxGroupsExceeded = 4, + /// Attempted to add a new URef to a group, which resulted in the total + /// number of URefs across all user groups to exceed the allowed maximum. + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(5, Error::MaxTotalURefsExceeded as u8); + /// ``` + MaxTotalURefsExceeded = 5, + /// Attempted to remove a URef from a group, which does not exist in the + /// group. + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(6, Error::GroupDoesNotExist as u8); + /// ``` + GroupDoesNotExist = 6, + /// Attempted to remove unknown URef from the group. + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(7, Error::UnableToRemoveURef as u8); + /// ``` + UnableToRemoveURef = 7, + /// Group is use by at least one active contract. + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(8, Error::GroupInUse as u8); + /// ``` + GroupInUse = 8, + /// URef already exists in given group. + /// ``` + /// # use casper_types_ver_2_0::addressable_entity::Error; + /// assert_eq!(9, Error::URefAlreadyExists as u8); + /// ``` + URefAlreadyExists = 9, +} + +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Self::PreviouslyUsedVersion as u8 => Self::PreviouslyUsedVersion, + v if v == Self::EntityNotFound as u8 => Self::EntityNotFound, + v if v == Self::GroupAlreadyExists as u8 => Self::GroupAlreadyExists, + v if v == Self::MaxGroupsExceeded as u8 => Self::MaxGroupsExceeded, + v if v == Self::MaxTotalURefsExceeded as u8 => Self::MaxTotalURefsExceeded, + v if v == Self::GroupDoesNotExist as u8 => Self::GroupDoesNotExist, + v if v == Self::UnableToRemoveURef as u8 => Self::UnableToRemoveURef, + v if v == Self::GroupInUse as u8 => Self::GroupInUse, + v if v == Self::URefAlreadyExists as u8 => Self::URefAlreadyExists, + _ => return Err(()), + }; + Ok(error) + } +} + +/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +impl Display for TryFromSliceForContractHashError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "failed to retrieve from slice") + } +} + +/// An error from parsing a formatted contract string +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Invalid formatted string prefix. + InvalidPrefix, + /// Error when decoding a hex string + Hex(base16::DecodeError), + /// Error when parsing an account + Account(TryFromSliceForAccountHashError), + /// Error when parsing the hash. + Hash(TryFromSliceError), + /// Error when parsing an uref. + URef(uref::FromStrError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + FromStrError::Account(error) => { + write!(f, "account hash from string error: {:?}", error) + } + } + } +} + +/// A newtype wrapping a `HashAddr` which references an [`AddressableEntity`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The hex-encoded address of the addressable entity.") +)] +pub struct AddressableEntityHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] HashAddr, +); + +impl AddressableEntityHash { + /// Constructs a new `AddressableEntityHash` from the raw bytes of the contract hash. + pub const fn new(value: HashAddr) -> AddressableEntityHash { + AddressableEntityHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `AddressableEntityHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + ADDRESSABLE_ENTITY_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `AddressableEntityHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(ADDRESSABLE_ENTITY_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(AddressableEntityHash(bytes)) + } +} + +impl From for AddressableEntityHash { + fn from(contract_hash: ContractHash) -> Self { + AddressableEntityHash::new(contract_hash.value()) + } +} + +impl Display for AddressableEntityHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for AddressableEntityHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!( + f, + "AddressableEntityHash({})", + base16::encode_lower(&self.0) + ) + } +} + +impl CLTyped for AddressableEntityHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for AddressableEntityHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for AddressableEntityHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((AddressableEntityHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for AddressableEntityHash { + fn from(bytes: [u8; 32]) -> Self { + AddressableEntityHash(bytes) + } +} + +impl TryFrom for AddressableEntityHash { + type Error = ApiError; + + fn try_from(value: Key) -> Result { + if let Key::AddressableEntity(_, entity_addr) = value { + Ok(AddressableEntityHash::new(entity_addr)) + } else { + Err(ApiError::Formatting) + } + } +} + +impl Serialize for AddressableEntityHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for AddressableEntityHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + AddressableEntityHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(AddressableEntityHash(bytes)) + } + } +} + +impl AsRef<[u8]> for AddressableEntityHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for AddressableEntityHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(AddressableEntityHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for AddressableEntityHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(AddressableEntityHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AddressableEntityHash { + AddressableEntityHash(rng.gen()) + } +} + +/// Errors that can occur while adding a new [`AccountHash`] to an account's associated keys map. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum AddKeyFailure { + /// There are already maximum [`AccountHash`]s associated with the given account. + MaxKeysLimit = 1, + /// The given [`AccountHash`] is already associated with the given account. + DuplicateKey = 2, + /// Caller doesn't have sufficient permissions to associate a new [`AccountHash`] with the + /// given account. + PermissionDenied = 3, +} + +impl Display for AddKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + AddKeyFailure::MaxKeysLimit => formatter.write_str( + "Unable to add new associated key because maximum amount of keys is reached", + ), + AddKeyFailure::DuplicateKey => formatter + .write_str("Unable to add new associated key because given key already exists"), + AddKeyFailure::PermissionDenied => formatter + .write_str("Unable to add new associated key due to insufficient permissions"), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for AddKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == AddKeyFailure::MaxKeysLimit as i32 => Ok(AddKeyFailure::MaxKeysLimit), + d if d == AddKeyFailure::DuplicateKey as i32 => Ok(AddKeyFailure::DuplicateKey), + d if d == AddKeyFailure::PermissionDenied as i32 => Ok(AddKeyFailure::PermissionDenied), + _ => Err(TryFromIntError(())), + } + } +} + +/// Errors that can occur while removing a [`AccountHash`] from an account's associated keys map. +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum RemoveKeyFailure { + /// The given [`AccountHash`] is not associated with the given account. + MissingKey = 1, + /// Caller doesn't have sufficient permissions to remove an associated [`AccountHash`] from the + /// given account. + PermissionDenied = 2, + /// Removing the given associated [`AccountHash`] would cause the total weight of all remaining + /// `AccountHash`s to fall below one of the action thresholds for the given account. + ThresholdViolation = 3, +} + +impl Display for RemoveKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + RemoveKeyFailure::MissingKey => { + formatter.write_str("Unable to remove a key that does not exist") + } + RemoveKeyFailure::PermissionDenied => formatter + .write_str("Unable to remove associated key due to insufficient permissions"), + RemoveKeyFailure::ThresholdViolation => formatter.write_str( + "Unable to remove a key which would violate action threshold constraints", + ), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for RemoveKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == RemoveKeyFailure::MissingKey as i32 => Ok(RemoveKeyFailure::MissingKey), + d if d == RemoveKeyFailure::PermissionDenied as i32 => { + Ok(RemoveKeyFailure::PermissionDenied) + } + d if d == RemoveKeyFailure::ThresholdViolation as i32 => { + Ok(RemoveKeyFailure::ThresholdViolation) + } + _ => Err(TryFromIntError(())), + } + } +} + +/// Errors that can occur while updating the [`Weight`] of a [`AccountHash`] in an account's +/// associated keys map. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum UpdateKeyFailure { + /// The given [`AccountHash`] is not associated with the given account. + MissingKey = 1, + /// Caller doesn't have sufficient permissions to update an associated [`AccountHash`] from the + /// given account. + PermissionDenied = 2, + /// Updating the [`Weight`] of the given associated [`AccountHash`] would cause the total + /// weight of all `AccountHash`s to fall below one of the action thresholds for the given + /// account. + ThresholdViolation = 3, +} + +impl Display for UpdateKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + UpdateKeyFailure::MissingKey => formatter.write_str( + "Unable to update the value under an associated key that does not exist", + ), + UpdateKeyFailure::PermissionDenied => formatter + .write_str("Unable to update associated key due to insufficient permissions"), + UpdateKeyFailure::ThresholdViolation => formatter.write_str( + "Unable to update weight that would fall below any of action thresholds", + ), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for UpdateKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == UpdateKeyFailure::MissingKey as i32 => Ok(UpdateKeyFailure::MissingKey), + d if d == UpdateKeyFailure::PermissionDenied as i32 => { + Ok(UpdateKeyFailure::PermissionDenied) + } + d if d == UpdateKeyFailure::ThresholdViolation as i32 => { + Ok(UpdateKeyFailure::ThresholdViolation) + } + _ => Err(TryFromIntError(())), + } + } +} + +/// Collection of named entry points. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(transparent, deny_unknown_fields)] +pub struct EntryPoints( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl ToBytes for EntryPoints { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for EntryPoints { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entry_points_map, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((EntryPoints(entry_points_map), remainder)) + } +} + +impl Default for EntryPoints { + fn default() -> Self { + let mut entry_points = EntryPoints::new(); + let entry_point = EntryPoint::default(); + entry_points.add_entry_point(entry_point); + entry_points + } +} + +impl EntryPoints { + /// Constructs a new, empty `EntryPoints`. + pub const fn new() -> EntryPoints { + EntryPoints(BTreeMap::::new()) + } + + /// Constructs a new `EntryPoints` with a single entry for the default `EntryPoint`. + pub fn new_with_default_entry_point() -> Self { + let mut entry_points = EntryPoints::new(); + let entry_point = EntryPoint::default(); + entry_points.add_entry_point(entry_point); + entry_points + } + + /// Adds new [`EntryPoint`]. + pub fn add_entry_point(&mut self, entry_point: EntryPoint) { + self.0.insert(entry_point.name().to_string(), entry_point); + } + + /// Checks if given [`EntryPoint`] exists. + pub fn has_entry_point(&self, entry_point_name: &str) -> bool { + self.0.contains_key(entry_point_name) + } + + /// Gets an existing [`EntryPoint`] by its name. + pub fn get(&self, entry_point_name: &str) -> Option<&EntryPoint> { + self.0.get(entry_point_name) + } + + /// Returns iterator for existing entry point names. + pub fn keys(&self) -> impl Iterator { + self.0.keys() + } + + /// Takes all entry points. + pub fn take_entry_points(self) -> Vec { + self.0.into_values().collect() + } + + /// Returns the length of the entry points + pub fn len(&self) -> usize { + self.0.len() + } + + /// Checks if the `EntryPoints` is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Checks if any of the entry points are of the type Session. + pub fn contains_stored_session(&self) -> bool { + self.0 + .values() + .any(|entry_point| entry_point.entry_point_type == EntryPointType::Session) + } +} + +impl From> for EntryPoints { + fn from(entry_points: Vec) -> EntryPoints { + let entries = entry_points + .into_iter() + .map(|entry_point| (String::from(entry_point.name()), entry_point)) + .collect(); + EntryPoints(entries) + } +} + +struct EntryPointLabels; + +impl KeyValueLabels for EntryPointLabels { + const KEY: &'static str = "name"; + const VALUE: &'static str = "entry_point"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for EntryPointLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("NamedEntryPoint"); +} + +/// Collection of named message topics. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(transparent, deny_unknown_fields)] +pub struct MessageTopics( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl ToBytes for MessageTopics { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for MessageTopics { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (message_topics_map, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((MessageTopics(message_topics_map), remainder)) + } +} + +impl MessageTopics { + /// Adds new message topic by topic name. + pub fn add_topic( + &mut self, + topic_name: &str, + topic_name_hash: TopicNameHash, + ) -> Result<(), MessageTopicError> { + if self.0.len() >= u32::MAX as usize { + return Err(MessageTopicError::MaxTopicsExceeded); + } + + match self.0.entry(topic_name.to_string()) { + Entry::Vacant(entry) => { + entry.insert(topic_name_hash); + Ok(()) + } + Entry::Occupied(_) => Err(MessageTopicError::DuplicateTopic), + } + } + + /// Checks if given topic name exists. + pub fn has_topic(&self, topic_name: &str) -> bool { + self.0.contains_key(topic_name) + } + + /// Gets the topic hash from the collection by its topic name. + pub fn get(&self, topic_name: &str) -> Option<&TopicNameHash> { + self.0.get(topic_name) + } + + /// Returns the length of the message topics. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns true if no message topics are registered. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns an iterator over the topic name and its hash. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } +} + +struct MessageTopicLabels; + +impl KeyValueLabels for MessageTopicLabels { + const KEY: &'static str = "topic_name"; + const VALUE: &'static str = "topic_name_hash"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for MessageTopicLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("MessageTopic"); +} + +impl From> for MessageTopics { + fn from(topics: BTreeMap) -> MessageTopics { + MessageTopics(topics) + } +} + +/// Errors that can occur while adding a new topic. +#[derive(PartialEq, Eq, Debug, Clone)] +#[non_exhaustive] +pub enum MessageTopicError { + /// Topic already exists. + DuplicateTopic, + /// Maximum number of topics exceeded. + MaxTopicsExceeded, + /// Topic name size exceeded. + TopicNameSizeExceeded, +} + +/// Methods and type signatures supported by a contract. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct AddressableEntity { + package_hash: PackageHash, + byte_code_hash: ByteCodeHash, + named_keys: NamedKeys, + entry_points: EntryPoints, + protocol_version: ProtocolVersion, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, + message_topics: MessageTopics, +} + +impl From + for ( + PackageHash, + ByteCodeHash, + NamedKeys, + EntryPoints, + ProtocolVersion, + URef, + AssociatedKeys, + ActionThresholds, + ) +{ + fn from(entity: AddressableEntity) -> Self { + ( + entity.package_hash, + entity.byte_code_hash, + entity.named_keys, + entity.entry_points, + entity.protocol_version, + entity.main_purse, + entity.associated_keys, + entity.action_thresholds, + ) + } +} + +impl AddressableEntity { + /// `AddressableEntity` constructor. + #[allow(clippy::too_many_arguments)] + pub fn new( + package_hash: PackageHash, + byte_code_hash: ByteCodeHash, + named_keys: NamedKeys, + entry_points: EntryPoints, + protocol_version: ProtocolVersion, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, + message_topics: MessageTopics, + ) -> Self { + AddressableEntity { + package_hash, + byte_code_hash, + named_keys, + entry_points, + protocol_version, + main_purse, + action_thresholds, + associated_keys, + message_topics, + } + } + + /// Hash for accessing contract package + pub fn package_hash(&self) -> PackageHash { + self.package_hash + } + + /// Hash for accessing contract WASM + pub fn byte_code_hash(&self) -> ByteCodeHash { + self.byte_code_hash + } + + /// Checks whether there is a method with the given name + pub fn has_entry_point(&self, name: &str) -> bool { + self.entry_points.has_entry_point(name) + } + + /// Returns the type signature for the given `method`. + pub fn entry_point(&self, method: &str) -> Option<&EntryPoint> { + self.entry_points.get(method) + } + + /// Get the protocol version this header is targeting. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns main purse. + pub fn main_purse(&self) -> URef { + self.main_purse + } + + /// Returns an [`AccessRights::ADD`]-only version of the main purse's [`URef`]. + pub fn main_purse_add_only(&self) -> URef { + URef::new(self.main_purse.addr(), AccessRights::ADD) + } + + /// Returns associated keys. + pub fn associated_keys(&self) -> &AssociatedKeys { + &self.associated_keys + } + + /// Returns action thresholds. + pub fn action_thresholds(&self) -> &ActionThresholds { + &self.action_thresholds + } + + /// Adds an associated key to an addressable entity. + pub fn add_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), AddKeyFailure> { + self.associated_keys.add_key(account_hash, weight) + } + + /// Checks if removing given key would properly satisfy thresholds. + fn can_remove_key(&self, account_hash: AccountHash) -> bool { + let total_weight_without = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Returns true if the total weight calculated without given public key would be greater or + // equal to all of the thresholds. + total_weight_without >= *self.action_thresholds().deployment() + && total_weight_without >= *self.action_thresholds().key_management() + } + + /// Checks if adding a weight to a sum of all weights excluding the given key would make the + /// resulting value to fall below any of the thresholds on account. + fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool { + // Calculates total weight of all keys excluding the given key + let total_weight = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Safely calculate new weight by adding the updated weight + let new_weight = total_weight.value().saturating_add(weight.value()); + + // Returns true if the new weight would be greater or equal to all of + // the thresholds. + new_weight >= self.action_thresholds().deployment().value() + && new_weight >= self.action_thresholds().key_management().value() + } + + /// Removes an associated key from an addressable entity. + /// + /// Verifies that removing the key will not cause the remaining weight to fall below any action + /// thresholds. + pub fn remove_associated_key( + &mut self, + account_hash: AccountHash, + ) -> Result<(), RemoveKeyFailure> { + if self.associated_keys.contains_key(&account_hash) { + // Check if removing this weight would fall below thresholds + if !self.can_remove_key(account_hash) { + return Err(RemoveKeyFailure::ThresholdViolation); + } + } + self.associated_keys.remove_key(&account_hash) + } + + /// Updates an associated key. + /// + /// Returns an error if the update would result in a violation of the key management thresholds. + pub fn update_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), UpdateKeyFailure> { + if let Some(current_weight) = self.associated_keys.get(&account_hash) { + if weight < *current_weight { + // New weight is smaller than current weight + if !self.can_update_key(account_hash, weight) { + return Err(UpdateKeyFailure::ThresholdViolation); + } + } + } + self.associated_keys.update_key(account_hash, weight) + } + + /// Sets new action threshold for a given action type for the addressable entity. + /// + /// Returns an error if the new action threshold weight is greater than the total weight of the + /// account's associated keys. + pub fn set_action_threshold( + &mut self, + action_type: ActionType, + weight: Weight, + ) -> Result<(), SetThresholdFailure> { + // Verify if new threshold weight exceeds total weight of all associated + // keys. + self.can_set_threshold(weight)?; + // Set new weight for given action + self.action_thresholds.set_threshold(action_type, weight) + } + + /// Sets a new action threshold for a given action type for the account without checking against + /// the total weight of the associated keys. + /// + /// This should only be called when authorized by an administrator account. + /// + /// Returns an error if setting the action would cause the `ActionType::Deployment` threshold to + /// be greater than any of the other action types. + pub fn set_action_threshold_unchecked( + &mut self, + action_type: ActionType, + threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + self.action_thresholds.set_threshold(action_type, threshold) + } + + /// Verifies if user can set action threshold. + pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> { + let total_weight = self.associated_keys.total_keys_weight(); + if new_threshold > total_weight { + return Err(SetThresholdFailure::InsufficientTotalWeight); + } + Ok(()) + } + + /// Checks whether all authorization keys are associated with this addressable entity. + pub fn can_authorize(&self, authorization_keys: &BTreeSet) -> bool { + !authorization_keys.is_empty() + && authorization_keys + .iter() + .any(|e| self.associated_keys.contains_key(e)) + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to deploy threshold. + pub fn can_deploy_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().deployment() + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to key management threshold. + pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().key_management() + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to upgrade management threshold. + pub fn can_upgrade_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().upgrade_management() + } + + /// Adds new entry point + pub fn add_entry_point>(&mut self, entry_point: EntryPoint) { + self.entry_points.add_entry_point(entry_point); + } + + /// Addr for accessing wasm bytes + pub fn byte_code_addr(&self) -> ByteCodeAddr { + self.byte_code_hash.value() + } + + /// Returns immutable reference to methods + pub fn entry_points(&self) -> &EntryPoints { + &self.entry_points + } + + /// Returns a reference to the message topics + pub fn message_topics(&self) -> &MessageTopics { + &self.message_topics + } + + /// Adds a new message topic to the entity + pub fn add_message_topic( + &mut self, + topic_name: &str, + topic_name_hash: TopicNameHash, + ) -> Result<(), MessageTopicError> { + self.message_topics.add_topic(topic_name, topic_name_hash) + } + + /// Takes `named_keys` + pub fn take_named_keys(self) -> NamedKeys { + self.named_keys + } + + /// Returns a reference to `named_keys` + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys + } + + /// Appends `keys` to `named_keys` + pub fn named_keys_append(&mut self, keys: NamedKeys) { + self.named_keys.append(keys); + } + + /// Removes given named key. + pub fn remove_named_key(&mut self, key: &str) -> Option { + self.named_keys.remove(key) + } + + /// Set protocol_version. + pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) { + self.protocol_version = protocol_version; + } + + /// Determines if `AddressableEntity` is compatible with a given `ProtocolVersion`. + pub fn is_compatible_protocol_version(&self, protocol_version: ProtocolVersion) -> bool { + self.protocol_version.value().major == protocol_version.value().major + } + + /// Extracts the access rights from the named keys of the addressable entity. + pub fn extract_access_rights(&self, entity_hash: AddressableEntityHash) -> ContextAccessRights { + let urefs_iter = self + .named_keys + .keys() + .filter_map(|key| key.as_uref().copied()) + .chain(iter::once(self.main_purse)); + ContextAccessRights::new(entity_hash, urefs_iter) + } + + /// Update the byte code hash for a given Entity associated with an Account. + pub fn update_session_entity( + self, + byte_code_hash: ByteCodeHash, + entry_points: EntryPoints, + ) -> Self { + Self { + package_hash: self.package_hash, + byte_code_hash, + named_keys: self.named_keys, + entry_points, + protocol_version: self.protocol_version, + main_purse: self.main_purse, + associated_keys: self.associated_keys, + action_thresholds: self.action_thresholds, + message_topics: self.message_topics, + } + } +} + +impl ToBytes for AddressableEntity { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.package_hash().write_bytes(&mut result)?; + self.byte_code_hash().write_bytes(&mut result)?; + self.named_keys().write_bytes(&mut result)?; + self.entry_points().write_bytes(&mut result)?; + self.protocol_version().write_bytes(&mut result)?; + self.main_purse().write_bytes(&mut result)?; + self.associated_keys().write_bytes(&mut result)?; + self.action_thresholds().write_bytes(&mut result)?; + self.message_topics().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.entry_points) + + ToBytes::serialized_length(&self.package_hash) + + ToBytes::serialized_length(&self.byte_code_hash) + + ToBytes::serialized_length(&self.protocol_version) + + ToBytes::serialized_length(&self.named_keys) + + ToBytes::serialized_length(&self.main_purse) + + ToBytes::serialized_length(&self.associated_keys) + + ToBytes::serialized_length(&self.action_thresholds) + + ToBytes::serialized_length(&self.message_topics) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.package_hash().write_bytes(writer)?; + self.byte_code_hash().write_bytes(writer)?; + self.named_keys().write_bytes(writer)?; + self.entry_points().write_bytes(writer)?; + self.protocol_version().write_bytes(writer)?; + self.main_purse().write_bytes(writer)?; + self.associated_keys().write_bytes(writer)?; + self.action_thresholds().write_bytes(writer)?; + self.message_topics().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for AddressableEntity { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (package_hash, bytes) = PackageHash::from_bytes(bytes)?; + let (contract_wasm_hash, bytes) = ByteCodeHash::from_bytes(bytes)?; + let (named_keys, bytes) = NamedKeys::from_bytes(bytes)?; + let (entry_points, bytes) = EntryPoints::from_bytes(bytes)?; + let (protocol_version, bytes) = ProtocolVersion::from_bytes(bytes)?; + let (main_purse, bytes) = URef::from_bytes(bytes)?; + let (associated_keys, bytes) = AssociatedKeys::from_bytes(bytes)?; + let (action_thresholds, bytes) = ActionThresholds::from_bytes(bytes)?; + let (message_topics, bytes) = MessageTopics::from_bytes(bytes)?; + Ok(( + AddressableEntity { + package_hash, + byte_code_hash: contract_wasm_hash, + named_keys, + entry_points, + protocol_version, + main_purse, + associated_keys, + action_thresholds, + message_topics, + }, + bytes, + )) + } +} + +impl Default for AddressableEntity { + fn default() -> Self { + AddressableEntity { + named_keys: NamedKeys::new(), + entry_points: EntryPoints::new_with_default_entry_point(), + byte_code_hash: [0; KEY_HASH_LENGTH].into(), + package_hash: [0; KEY_HASH_LENGTH].into(), + protocol_version: ProtocolVersion::V1_0_0, + main_purse: URef::default(), + action_thresholds: ActionThresholds::default(), + associated_keys: AssociatedKeys::default(), + message_topics: MessageTopics::default(), + } + } +} + +impl From for AddressableEntity { + fn from(value: Contract) -> Self { + AddressableEntity::new( + PackageHash::new(value.contract_package_hash().value()), + ByteCodeHash::new(value.contract_wasm_hash().value()), + value.named_keys().clone(), + value.entry_points().clone(), + value.protocol_version(), + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + MessageTopics::default(), + ) + } +} + +impl From for AddressableEntity { + fn from(value: Account) -> Self { + AddressableEntity::new( + PackageHash::default(), + ByteCodeHash::new([0u8; 32]), + value.named_keys().clone(), + EntryPoints::new(), + ProtocolVersion::default(), + value.main_purse(), + value.associated_keys().clone().into(), + value.action_thresholds().clone().into(), + MessageTopics::default(), + ) + } +} + +/// Context of method execution +/// +/// Most significant bit represents version i.e. +/// - 0b0 -> 0.x/1.x (session & contracts) +/// - 0b1 -> 2.x and later (introduced installer, utility entry points) +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, FromPrimitive)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointType { + /// Runs as session code (caller) + /// Deprecated, retained to allow read back of legacy stored session. + Session = 0b00000000, + /// Runs within called entity's context (called) + AddressableEntity = 0b00000001, + /// This entry point is intended to extract a subset of bytecode. + /// Runs within called entity's context (called) + Factory = 0b10000000, +} + +impl EntryPointType { + /// Checks if entry point type is introduced before 2.0. + /// + /// This method checks if there is a bit pattern for entry point types introduced in 2.0. + /// + /// If this bit is missing, that means given entry point type was defined in pre-2.0 world. + pub fn is_legacy_pattern(&self) -> bool { + (*self as u8) & 0b10000000 == 0 + } + + /// Get the bit pattern. + pub fn bits(self) -> u8 { + self as u8 + } + + /// Returns true if entry point type is invalid for the context. + pub fn is_invalid_context(&self) -> bool { + match self { + EntryPointType::Session => true, + EntryPointType::AddressableEntity | EntryPointType::Factory => false, + } + } +} + +impl ToBytes for EntryPointType { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.bits().to_bytes() + } + + fn serialized_length(&self) -> usize { + 1 + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.bits()); + Ok(()) + } +} + +impl FromBytes for EntryPointType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, bytes) = u8::from_bytes(bytes)?; + let entry_point_type = + EntryPointType::from_u8(value).ok_or(bytesrepr::Error::Formatting)?; + Ok((entry_point_type, bytes)) + } +} + +/// Default name for an entry point. +pub const DEFAULT_ENTRY_POINT_NAME: &str = "call"; + +/// Name for an installer entry point. +pub const INSTALL_ENTRY_POINT_NAME: &str = "install"; + +/// Name for an upgrade entry point. +pub const UPGRADE_ENTRY_POINT_NAME: &str = "upgrade"; + +/// Collection of entry point parameters. +pub type Parameters = Vec; + +/// Type signature of a method. Order of arguments matter since can be +/// referenced by index as well as name. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EntryPoint { + name: String, + args: Parameters, + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, +} + +impl From for (String, Parameters, CLType, EntryPointAccess, EntryPointType) { + fn from(entry_point: EntryPoint) -> Self { + ( + entry_point.name, + entry_point.args, + entry_point.ret, + entry_point.access, + entry_point.entry_point_type, + ) + } +} + +impl EntryPoint { + /// `EntryPoint` constructor. + pub fn new>( + name: T, + args: Parameters, + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, + ) -> Self { + EntryPoint { + name: name.into(), + args, + ret, + access, + entry_point_type, + } + } + + /// Create a default [`EntryPoint`] with specified name. + pub fn default_with_name>(name: T) -> Self { + EntryPoint { + name: name.into(), + ..Default::default() + } + } + + /// Get name. + pub fn name(&self) -> &str { + &self.name + } + + /// Get access enum. + pub fn access(&self) -> &EntryPointAccess { + &self.access + } + + /// Get the arguments for this method. + pub fn args(&self) -> &[Parameter] { + self.args.as_slice() + } + + /// Get the return type. + pub fn ret(&self) -> &CLType { + &self.ret + } + + /// Obtains entry point + pub fn entry_point_type(&self) -> EntryPointType { + self.entry_point_type + } +} + +impl Default for EntryPoint { + /// constructor for a public session `EntryPoint` that takes no args and returns `Unit` + fn default() -> Self { + EntryPoint { + name: DEFAULT_ENTRY_POINT_NAME.to_string(), + args: Vec::new(), + ret: CLType::Unit, + access: EntryPointAccess::Public, + entry_point_type: EntryPointType::Session, + } + } +} + +impl ToBytes for EntryPoint { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + + self.args.serialized_length() + + self.ret.serialized_length() + + self.access.serialized_length() + + self.entry_point_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.args.write_bytes(writer)?; + self.ret.append_bytes(writer)?; + self.access.write_bytes(writer)?; + self.entry_point_type.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EntryPoint { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, bytes) = String::from_bytes(bytes)?; + let (args, bytes) = Vec::::from_bytes(bytes)?; + let (ret, bytes) = CLType::from_bytes(bytes)?; + let (access, bytes) = EntryPointAccess::from_bytes(bytes)?; + let (entry_point_type, bytes) = EntryPointType::from_bytes(bytes)?; + + Ok(( + EntryPoint { + name, + args, + ret, + access, + entry_point_type, + }, + bytes, + )) + } +} + +/// Enum describing the possible access control options for a contract entry +/// point (method). +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointAccess { + /// Anyone can call this method (no access controls). + Public, + /// Only users from the listed groups may call this method. Note: if the + /// list is empty then this method is not callable from outside the + /// contract. + Groups(Vec), + /// Can't be accessed directly but are kept in the derived wasm bytes. + Template, +} + +const ENTRYPOINTACCESS_PUBLIC_TAG: u8 = 1; +const ENTRYPOINTACCESS_GROUPS_TAG: u8 = 2; +const ENTRYPOINTACCESS_ABSTRACT_TAG: u8 = 3; + +impl EntryPointAccess { + /// Constructor for access granted to only listed groups. + pub fn groups(labels: &[&str]) -> Self { + let list: Vec = labels + .iter() + .map(|s| Group::new(String::from(*s))) + .collect(); + EntryPointAccess::Groups(list) + } +} + +impl ToBytes for EntryPointAccess { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + + match self { + EntryPointAccess::Public => { + result.push(ENTRYPOINTACCESS_PUBLIC_TAG); + } + EntryPointAccess::Groups(groups) => { + result.push(ENTRYPOINTACCESS_GROUPS_TAG); + result.append(&mut groups.to_bytes()?); + } + EntryPointAccess::Template => { + result.push(ENTRYPOINTACCESS_ABSTRACT_TAG); + } + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + EntryPointAccess::Public => 1, + EntryPointAccess::Groups(groups) => 1 + groups.serialized_length(), + EntryPointAccess::Template => 1, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + EntryPointAccess::Public => { + writer.push(ENTRYPOINTACCESS_PUBLIC_TAG); + } + EntryPointAccess::Groups(groups) => { + writer.push(ENTRYPOINTACCESS_GROUPS_TAG); + groups.write_bytes(writer)?; + } + EntryPointAccess::Template => { + writer.push(ENTRYPOINTACCESS_ABSTRACT_TAG); + } + } + Ok(()) + } +} + +impl FromBytes for EntryPointAccess { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, bytes) = u8::from_bytes(bytes)?; + + match tag { + ENTRYPOINTACCESS_PUBLIC_TAG => Ok((EntryPointAccess::Public, bytes)), + ENTRYPOINTACCESS_GROUPS_TAG => { + let (groups, bytes) = Vec::::from_bytes(bytes)?; + let result = EntryPointAccess::Groups(groups); + Ok((result, bytes)) + } + ENTRYPOINTACCESS_ABSTRACT_TAG => Ok((EntryPointAccess::Template, bytes)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// Parameter to a method +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Parameter { + name: String, + cl_type: CLType, +} + +impl Parameter { + /// `Parameter` constructor. + pub fn new>(name: T, cl_type: CLType) -> Self { + Parameter { + name: name.into(), + cl_type, + } + } + + /// Get the type of this argument. + pub fn cl_type(&self) -> &CLType { + &self.cl_type + } + + /// Get a reference to the parameter's name. + pub fn name(&self) -> &str { + &self.name + } +} + +impl From for (String, CLType) { + fn from(parameter: Parameter) -> Self { + (parameter.name, parameter.cl_type) + } +} + +impl ToBytes for Parameter { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = ToBytes::to_bytes(&self.name)?; + self.cl_type.append_bytes(&mut result)?; + + Ok(result) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.name) + self.cl_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.cl_type.append_bytes(writer) + } +} + +impl FromBytes for Parameter { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, bytes) = String::from_bytes(bytes)?; + let (cl_type, bytes) = CLType::from_bytes(bytes)?; + + Ok((Parameter { name, cl_type }, bytes)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{AccessRights, URef, UREF_ADDR_LENGTH}; + + #[test] + fn entity_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let entity_hash = HashAddr::try_from(&bytes[..]).expect("should create contract hash"); + let entity_hash = AddressableEntityHash::new(entity_hash); + assert_eq!(&bytes, &entity_hash.as_bytes()); + } + + #[test] + fn entity_hash_from_str() { + let entity_hash = AddressableEntityHash([3; 32]); + let encoded = entity_hash.to_formatted_string(); + let decoded = AddressableEntityHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(entity_hash, decoded); + + let invalid_prefix = + "addressable-entity--0000000000000000000000000000000000000000000000000000000000000000"; + assert!(AddressableEntityHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "addressable-entity-00000000000000000000000000000000000000000000000000000000000000"; + assert!(AddressableEntityHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "addressable-entity-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(AddressableEntityHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "addressable-entity-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(AddressableEntityHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn entity_hash_serde_roundtrip() { + let entity_hash = AddressableEntityHash([255; 32]); + let serialized = bincode::serialize(&entity_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(entity_hash, deserialized) + } + + #[test] + fn entity_hash_json_roundtrip() { + let entity_hash = AddressableEntityHash([255; 32]); + let json_string = serde_json::to_string_pretty(&entity_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(entity_hash, decoded) + } + + #[test] + fn should_extract_access_rights() { + const MAIN_PURSE: URef = URef::new([2; 32], AccessRights::READ_ADD_WRITE); + + let entity_hash = AddressableEntityHash([255; 32]); + let uref = URef::new([84; UREF_ADDR_LENGTH], AccessRights::READ_ADD); + let uref_r = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ); + let uref_a = URef::new([42; UREF_ADDR_LENGTH], AccessRights::ADD); + let uref_w = URef::new([42; UREF_ADDR_LENGTH], AccessRights::WRITE); + let mut named_keys = NamedKeys::new(); + named_keys.insert("a".to_string(), Key::URef(uref_r)); + named_keys.insert("b".to_string(), Key::URef(uref_a)); + named_keys.insert("c".to_string(), Key::URef(uref_w)); + named_keys.insert("d".to_string(), Key::URef(uref)); + let associated_keys = AssociatedKeys::new(AccountHash::new([254; 32]), Weight::new(1)); + let contract = AddressableEntity::new( + PackageHash::new([254; 32]), + ByteCodeHash::new([253; 32]), + named_keys, + EntryPoints::new_with_default_entry_point(), + ProtocolVersion::V1_0_0, + MAIN_PURSE, + associated_keys, + ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(1)) + .expect("should create thresholds"), + MessageTopics::default(), + ); + let access_rights = contract.extract_access_rights(entity_hash); + let expected_uref = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + assert!( + access_rights.has_access_rights_to_uref(&uref), + "urefs in named keys should be included in access rights" + ); + assert!( + access_rights.has_access_rights_to_uref(&expected_uref), + "multiple access right bits to the same uref should coalesce" + ); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_contract(contract in gens::addressable_entity_arb()) { + bytesrepr::test_serialization_roundtrip(&contract); + } + } +} diff --git a/casper_types_ver_2_0/src/addressable_entity/action_thresholds.rs b/casper_types_ver_2_0/src/addressable_entity/action_thresholds.rs new file mode 100644 index 00000000..4d6d58b9 --- /dev/null +++ b/casper_types_ver_2_0/src/addressable_entity/action_thresholds.rs @@ -0,0 +1,212 @@ +//! This module contains types and functions for managing action thresholds. + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::ActionThresholds as AccountActionThresholds, + addressable_entity::{ActionType, SetThresholdFailure, Weight, WEIGHT_SERIALIZED_LENGTH}, + bytesrepr::{self, Error, FromBytes, ToBytes}, +}; + +/// Thresholds that have to be met when executing an action of a certain type. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "EntityActionThresholds"))] +pub struct ActionThresholds { + /// Threshold for deploy execution. + pub deployment: Weight, + /// Threshold for upgrading contracts. + pub upgrade_management: Weight, + /// Threshold for managing action threshold. + pub key_management: Weight, +} + +impl ActionThresholds { + /// Creates new ActionThresholds object with provided weights + /// + /// Requires deployment threshold to be lower than or equal to + /// key management threshold. + pub fn new( + deployment: Weight, + upgrade_management: Weight, + key_management: Weight, + ) -> Result { + if deployment > key_management { + return Err(SetThresholdFailure::DeploymentThreshold); + } + Ok(ActionThresholds { + deployment, + upgrade_management, + key_management, + }) + } + /// Sets new threshold for [ActionType::Deployment]. + /// Should return an error if setting new threshold for `action_type` breaks + /// one of the invariants. Currently, invariant is that + /// `ActionType::Deployment` threshold shouldn't be higher than any + /// other, which should be checked both when increasing `Deployment` + /// threshold and decreasing the other. + pub fn set_deployment_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if new_threshold > self.key_management { + Err(SetThresholdFailure::DeploymentThreshold) + } else { + self.deployment = new_threshold; + Ok(()) + } + } + + /// Sets new threshold for [ActionType::KeyManagement]. + pub fn set_key_management_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if self.deployment > new_threshold { + Err(SetThresholdFailure::KeyManagementThreshold) + } else { + self.key_management = new_threshold; + Ok(()) + } + } + + /// Sets new threshold for [ActionType::UpgradeManagement]. + pub fn set_upgrade_management_threshold( + &mut self, + upgrade_management: Weight, + ) -> Result<(), SetThresholdFailure> { + self.upgrade_management = upgrade_management; + Ok(()) + } + + /// Returns the deployment action threshold. + pub fn deployment(&self) -> &Weight { + &self.deployment + } + + /// Returns key management action threshold. + pub fn key_management(&self) -> &Weight { + &self.key_management + } + + /// Returns the upgrade management action threshold. + pub fn upgrade_management(&self) -> &Weight { + &self.upgrade_management + } + + /// Unified function that takes an action type, and changes appropriate + /// threshold defined by the [ActionType] variants. + pub fn set_threshold( + &mut self, + action_type: ActionType, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + match action_type { + ActionType::Deployment => self.set_deployment_threshold(new_threshold), + ActionType::KeyManagement => self.set_key_management_threshold(new_threshold), + ActionType::UpgradeManagement => self.set_upgrade_management_threshold(new_threshold), + } + } +} + +impl Default for ActionThresholds { + fn default() -> Self { + ActionThresholds { + deployment: Weight::new(1), + upgrade_management: Weight::new(1), + key_management: Weight::new(1), + } + } +} + +impl From for ActionThresholds { + fn from(value: AccountActionThresholds) -> Self { + Self { + deployment: Weight::new(value.deployment.value()), + key_management: Weight::new(value.key_management.value()), + upgrade_management: Weight::new(1), + } + } +} + +impl ToBytes for ActionThresholds { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + result.append(&mut self.deployment.to_bytes()?); + result.append(&mut self.upgrade_management.to_bytes()?); + result.append(&mut self.key_management.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + 3 * WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deployment().write_bytes(writer)?; + self.upgrade_management().write_bytes(writer)?; + self.key_management().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ActionThresholds { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (deployment, rem) = Weight::from_bytes(bytes)?; + let (upgrade_management, rem) = Weight::from_bytes(rem)?; + let (key_management, rem) = Weight::from_bytes(rem)?; + let ret = ActionThresholds { + deployment, + upgrade_management, + key_management, + }; + Ok((ret, rem)) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use super::ActionThresholds; + + pub fn action_thresholds_arb() -> impl Strategy { + Just(Default::default()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_create_new_action_thresholds() { + let action_thresholds = + ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(42)).unwrap(); + assert_eq!(*action_thresholds.deployment(), Weight::new(1)); + assert_eq!(*action_thresholds.upgrade_management(), Weight::new(1)); + assert_eq!(*action_thresholds.key_management(), Weight::new(42)); + } + + #[test] + fn should_not_create_action_thresholds_with_invalid_deployment_threshold() { + // deployment cant be greater than key management + assert!(ActionThresholds::new(Weight::new(5), Weight::new(1), Weight::new(1)).is_err()); + } + + #[test] + fn serialization_roundtrip() { + let action_thresholds = + ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(42)).unwrap(); + bytesrepr::test_serialization_roundtrip(&action_thresholds); + } +} diff --git a/casper_types_ver_2_0/src/addressable_entity/action_type.rs b/casper_types_ver_2_0/src/addressable_entity/action_type.rs new file mode 100644 index 00000000..2a627309 --- /dev/null +++ b/casper_types_ver_2_0/src/addressable_entity/action_type.rs @@ -0,0 +1,38 @@ +use core::convert::TryFrom; + +use super::TryFromIntError; + +/// The various types of action which can be performed in the context of a given account. +#[repr(u32)] +pub enum ActionType { + /// Represents performing a deploy. + Deployment = 0, + /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s + /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total + /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to + /// perform various actions). + KeyManagement = 1, + /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s + /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total + /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to + /// upgrade the addressable entity. + UpgradeManagement = 2, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for ActionType { + type Error = TryFromIntError; + + fn try_from(value: u32) -> Result { + // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive + // that helps to automatically create `from_u32` and `to_u32`. This approach + // gives better control over generated code. + match value { + d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment), + d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement), + d if d == ActionType::UpgradeManagement as u32 => Ok(ActionType::UpgradeManagement), + _ => Err(TryFromIntError(())), + } + } +} diff --git a/casper_types_ver_2_0/src/addressable_entity/associated_keys.rs b/casper_types_ver_2_0/src/addressable_entity/associated_keys.rs new file mode 100644 index 00000000..9f8ae2ac --- /dev/null +++ b/casper_types_ver_2_0/src/addressable_entity/associated_keys.rs @@ -0,0 +1,386 @@ +//! This module contains types and functions for working with keys associated with an account. + +use alloc::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + vec::Vec, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + account::{AccountHash, AssociatedKeys as AccountAssociatedKeys}, + addressable_entity::{AddKeyFailure, RemoveKeyFailure, UpdateKeyFailure, Weight}, + bytesrepr::{self, FromBytes, ToBytes}, +}; + +/// A collection of weighted public keys (represented as account hashes) associated with an account. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "EntityAssociatedKeys"))] +#[serde(deny_unknown_fields)] +#[rustfmt::skip] +pub struct AssociatedKeys( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl AssociatedKeys { + /// Constructs a new AssociatedKeys. + pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys { + let mut bt: BTreeMap = BTreeMap::new(); + bt.insert(key, weight); + AssociatedKeys(bt) + } + + /// Adds a new AssociatedKey to the set. + /// + /// Returns true if added successfully, false otherwise. + pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(entry) => { + entry.insert(weight); + } + Entry::Occupied(_) => return Err(AddKeyFailure::DuplicateKey), + } + Ok(()) + } + + /// Removes key from the associated keys set. + /// Returns true if value was found in the set prior to the removal, false + /// otherwise. + pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> { + self.0 + .remove(key) + .map(|_| ()) + .ok_or(RemoveKeyFailure::MissingKey) + } + + /// Adds new AssociatedKey to the set. + /// Returns true if added successfully, false otherwise. + pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(_) => { + return Err(UpdateKeyFailure::MissingKey); + } + Entry::Occupied(mut entry) => { + *entry.get_mut() = weight; + } + } + Ok(()) + } + + /// Returns the weight of an account hash. + pub fn get(&self, key: &AccountHash) -> Option<&Weight> { + self.0.get(key) + } + + /// Returns `true` if a given key exists. + pub fn contains_key(&self, key: &AccountHash) -> bool { + self.0.contains_key(key) + } + + /// Returns an iterator over the account hash and the weights. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the count of the associated keys. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the associated keys are empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Helper method that calculates weight for keys that comes from any + /// source. + /// + /// This method is not concerned about uniqueness of the passed iterable. + /// Uniqueness is determined based on the input collection properties, + /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`]) + /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]). + fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator) -> Weight { + let total = keys + .filter_map(|key| self.0.get(key)) + .fold(0u8, |acc, w| acc.saturating_add(w.value())); + + Weight::new(total) + } + + /// Calculates total weight of authorization keys provided by an argument + pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet) -> Weight { + self.calculate_any_keys_weight(authorization_keys.iter()) + } + + /// Calculates total weight of all authorization keys + pub fn total_keys_weight(&self) -> Weight { + self.calculate_any_keys_weight(self.0.keys()) + } + + /// Calculates total weight of all authorization keys excluding a given key + pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight { + self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash)) + } +} + +impl From> for AssociatedKeys { + fn from(associated_keys: BTreeMap) -> Self { + Self(associated_keys) + } +} + +impl ToBytes for AssociatedKeys { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for AssociatedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (associated_keys, rem) = FromBytes::from_bytes(bytes)?; + Ok((AssociatedKeys(associated_keys), rem)) + } +} + +impl From for AssociatedKeys { + fn from(value: AccountAssociatedKeys) -> Self { + let mut associated_keys = AssociatedKeys::default(); + for (account_hash, weight) in value.iter() { + associated_keys + .0 + .insert(*account_hash, Weight::new(weight.value())); + } + associated_keys + } +} + +struct Labels; + +impl KeyValueLabels for Labels { + const KEY: &'static str = "account_hash"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for Labels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("AssociatedKey"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some("A weighted public key."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = + Some("The account hash of the public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = + Some("The weight assigned to the public key."); +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::gens::{account_hash_arb, weight_arb}; + + use super::AssociatedKeys; + + pub fn associated_keys_arb() -> impl Strategy { + proptest::collection::btree_map(account_hash_arb(), weight_arb(), 10).prop_map(|keys| { + let mut associated_keys = AssociatedKeys::default(); + keys.into_iter().for_each(|(k, v)| { + associated_keys.add_key(k, v).unwrap(); + }); + associated_keys + }) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeSet, iter::FromIterator}; + + use crate::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + addressable_entity::{AddKeyFailure, Weight}, + bytesrepr, + }; + + use super::*; + + #[test] + fn associated_keys_add() { + let mut keys = + AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1)); + let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let new_pk_weight = Weight::new(2); + assert!(keys.add_key(new_pk, new_pk_weight).is_ok()); + assert_eq!(keys.get(&new_pk), Some(&new_pk_weight)) + } + + #[test] + fn associated_keys_add_duplicate() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert_eq!( + keys.add_key(pk, Weight::new(10)), + Err(AddKeyFailure::DuplicateKey) + ); + assert_eq!(keys.get(&pk), Some(&weight)); + } + + #[test] + fn associated_keys_remove() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert!(keys.remove_key(&pk).is_ok()); + assert!(keys + .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH])) + .is_err()); + } + + #[test] + fn associated_keys_update() { + let pk1 = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let pk2 = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk1, weight); + assert!(matches!( + keys.update_key(pk2, Weight::new(2)) + .expect_err("should get error"), + UpdateKeyFailure::MissingKey + )); + keys.add_key(pk2, Weight::new(1)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(1))); + keys.update_key(pk2, Weight::new(2)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(2))); + } + + #[test] + fn associated_keys_calculate_keys_once() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + assert_eq!( + keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + key_1, key_2, key_3, key_1, key_2, key_3, + ])), + Weight::new(1 + 2 + 3) + ); + } + + #[test] + fn associated_keys_total_weight() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(12)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(13)) + .expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight(), + Weight::new(1 + 11 + 12 + 13) + ); + } + + #[test] + fn associated_keys_total_weight_excluding() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(11); + + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(12); + + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(13); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight_excluding(key_2), + Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value()) + ); + } + + #[test] + fn overflowing_keys_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + + let identity_key_weight = Weight::new(250); + let weight_1 = Weight::new(1); + let weight_2 = Weight::new(2); + let weight_3 = Weight::new(3); + + let saturated_weight = Weight::new(u8::max_value()); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + res.add_key(key_1, weight_1).expect("should add key 1"); + res.add_key(key_2, weight_2).expect("should add key 2"); + res.add_key(key_3, weight_3).expect("should add key 3"); + res + }; + + assert_eq!( + associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + identity_key, // 250 + key_1, // 251 + key_2, // 253 + key_3, // 256 - error + ])), + saturated_weight, + ); + } + + #[test] + fn serialization_roundtrip() { + let mut keys = AssociatedKeys::default(); + keys.add_key(AccountHash::new([1; 32]), Weight::new(1)) + .unwrap(); + keys.add_key(AccountHash::new([2; 32]), Weight::new(2)) + .unwrap(); + keys.add_key(AccountHash::new([3; 32]), Weight::new(3)) + .unwrap(); + bytesrepr::test_serialization_roundtrip(&keys); + } +} diff --git a/casper_types_ver_2_0/src/addressable_entity/error.rs b/casper_types_ver_2_0/src/addressable_entity/error.rs new file mode 100644 index 00000000..f4a75866 --- /dev/null +++ b/casper_types_ver_2_0/src/addressable_entity/error.rs @@ -0,0 +1,112 @@ +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +#[derive(Debug, Eq, PartialEq)] +pub struct TryFromIntError(pub ()); + +/// Error returned when decoding an `AccountHash` from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromAccountHashStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The hash is not valid hex. + Hex(base16::DecodeError), + /// The hash is the wrong length. + Hash(TryFromSliceError), +} + +impl From for FromAccountHashStrError { + fn from(error: base16::DecodeError) -> Self { + FromAccountHashStrError::Hex(error) + } +} + +impl From for FromAccountHashStrError { + fn from(error: TryFromSliceError) -> Self { + FromAccountHashStrError::Hash(error) + } +} + +impl Display for FromAccountHashStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromAccountHashStrError::InvalidPrefix => write!(f, "prefix is not 'account-hash-'"), + FromAccountHashStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromAccountHashStrError::Hash(error) => { + write!(f, "address portion is wrong length: {}", error) + } + } + } +} + +/// Errors that can occur while changing action thresholds (i.e. the total +/// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to perform +/// various actions) on an account. +#[repr(i32)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[non_exhaustive] +pub enum SetThresholdFailure { + /// Setting the key-management threshold to a value lower than the deployment threshold is + /// disallowed. + KeyManagementThreshold = 1, + /// Setting the deployment threshold to a value greater than any other threshold is disallowed. + DeploymentThreshold = 2, + /// Caller doesn't have sufficient permissions to set new thresholds. + PermissionDeniedError = 3, + /// Setting a threshold to a value greater than the total weight of associated keys is + /// disallowed. + InsufficientTotalWeight = 4, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for SetThresholdFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == SetThresholdFailure::KeyManagementThreshold as i32 => { + Ok(SetThresholdFailure::KeyManagementThreshold) + } + d if d == SetThresholdFailure::DeploymentThreshold as i32 => { + Ok(SetThresholdFailure::DeploymentThreshold) + } + d if d == SetThresholdFailure::PermissionDeniedError as i32 => { + Ok(SetThresholdFailure::PermissionDeniedError) + } + d if d == SetThresholdFailure::InsufficientTotalWeight as i32 => { + Ok(SetThresholdFailure::InsufficientTotalWeight) + } + _ => Err(TryFromIntError(())), + } + } +} + +impl Display for SetThresholdFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + SetThresholdFailure::KeyManagementThreshold => formatter + .write_str("New threshold should be greater than or equal to deployment threshold"), + SetThresholdFailure::DeploymentThreshold => formatter.write_str( + "New threshold should be lower than or equal to key management threshold", + ), + SetThresholdFailure::PermissionDeniedError => formatter + .write_str("Unable to set action threshold due to insufficient permissions"), + SetThresholdFailure::InsufficientTotalWeight => formatter.write_str( + "New threshold should be lower or equal than total weight of associated keys", + ), + } + } +} + +/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`](super::AccountHash). +#[derive(Debug)] +pub struct TryFromSliceForAccountHashError(()); diff --git a/casper_types_ver_2_0/src/addressable_entity/named_keys.rs b/casper_types_ver_2_0/src/addressable_entity/named_keys.rs new file mode 100644 index 00000000..37a0bcd0 --- /dev/null +++ b/casper_types_ver_2_0/src/addressable_entity/named_keys.rs @@ -0,0 +1,166 @@ +use alloc::{collections::BTreeMap, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(feature = "json-schema")] +use crate::execution::execution_result_v1::NamedKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, Key, +}; + +/// A collection of named keys. +#[derive(Clone, Eq, PartialEq, Default, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +#[rustfmt::skip] +pub struct NamedKeys( + #[serde(with = "BTreeMapToArray::")] + #[cfg_attr(feature = "json-schema", schemars(with = "Vec"))] + BTreeMap, +); + +impl NamedKeys { + /// Constructs a new, empty `NamedKeys`. + pub const fn new() -> Self { + NamedKeys(BTreeMap::new()) + } + + /// Consumes `self`, returning the wrapped map. + pub fn into_inner(self) -> BTreeMap { + self.0 + } + + /// Inserts a named key. + /// + /// If the map did not have this name present, `None` is returned. If the map did have this + /// name present, the `Key` is updated, and the old `Key` is returned. + pub fn insert(&mut self, name: String, key: Key) -> Option { + self.0.insert(name, key) + } + + /// Moves all elements from `other` into `self`. + pub fn append(&mut self, mut other: Self) { + self.0.append(&mut other.0) + } + + /// Removes a named `Key`, returning the `Key` if it existed in the collection. + pub fn remove(&mut self, name: &str) -> Option { + self.0.remove(name) + } + + /// Returns a reference to the `Key` under the given `name` if any. + pub fn get(&self, name: &str) -> Option<&Key> { + self.0.get(name) + } + + /// Returns `true` if the named `Key` exists in the collection. + pub fn contains(&self, name: &str) -> bool { + self.0.contains_key(name) + } + + /// Returns an iterator over the names. + pub fn names(&self) -> impl Iterator { + self.0.keys() + } + + /// Returns an iterator over the `Key`s (i.e. the map's values). + pub fn keys(&self) -> impl Iterator { + self.0.values() + } + + /// Returns a mutable iterator over the `Key`s (i.e. the map's values). + pub fn keys_mut(&mut self) -> impl Iterator { + self.0.values_mut() + } + + /// Returns an iterator over the name-key pairs. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the number of named `Key`s. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if there are no named `Key`s. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl From> for NamedKeys { + fn from(value: BTreeMap) -> Self { + NamedKeys(value) + } +} + +impl ToBytes for NamedKeys { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for NamedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (named_keys, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((NamedKeys(named_keys), remainder)) + } +} + +impl CLTyped for NamedKeys { + fn cl_type() -> CLType { + BTreeMap::::cl_type() + } +} + +struct Labels; + +impl KeyValueLabels for Labels { + const KEY: &'static str = "name"; + const VALUE: &'static str = "key"; +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + use crate::testing::TestRng; + + /// `NamedKeys` was previously (pre node v2.0.0) just an alias for `BTreeMap`. + /// Check if we serialize as the old form, that can deserialize to the new. + #[test] + fn should_be_backwards_compatible() { + let rng = &mut TestRng::new(); + let mut named_keys = NamedKeys::new(); + assert!(named_keys.insert("a".to_string(), rng.gen()).is_none()); + assert!(named_keys.insert("bb".to_string(), rng.gen()).is_none()); + assert!(named_keys.insert("ccc".to_string(), rng.gen()).is_none()); + + let serialized_old = bincode::serialize(&named_keys.0).unwrap(); + let parsed_new = bincode::deserialize(&serialized_old).unwrap(); + assert_eq!(named_keys, parsed_new); + + let serialized_old = bytesrepr::serialize(&named_keys.0).unwrap(); + let parsed_new = bytesrepr::deserialize(serialized_old).unwrap(); + assert_eq!(named_keys, parsed_new); + } +} diff --git a/casper_types_ver_2_0/src/addressable_entity/weight.rs b/casper_types_ver_2_0/src/addressable_entity/weight.rs new file mode 100644 index 00000000..ee2f0343 --- /dev/null +++ b/casper_types_ver_2_0/src/addressable_entity/weight.rs @@ -0,0 +1,66 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// The number of bytes in a serialized [`Weight`]. +pub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// The weight associated with public keys in an account's associated keys. +#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr( + feature = "json-schema", + schemars(rename = "EntityAssociatedKeyWeight") +)] +pub struct Weight(u8); + +impl Weight { + /// Constructs a new `Weight`. + pub const fn new(weight: u8) -> Weight { + Weight(weight) + } + + /// Returns the value of `self` as a `u8`. + pub fn value(self) -> u8 { + self.0 + } +} + +impl ToBytes for Weight { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.0); + Ok(()) + } +} + +impl FromBytes for Weight { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (byte, rem) = u8::from_bytes(bytes)?; + Ok((Weight::new(byte), rem)) + } +} + +impl CLTyped for Weight { + fn cl_type() -> CLType { + CLType::U8 + } +} diff --git a/casper_types_ver_2_0/src/api_error.rs b/casper_types_ver_2_0/src/api_error.rs new file mode 100644 index 00000000..2c1a1d59 --- /dev/null +++ b/casper_types_ver_2_0/src/api_error.rs @@ -0,0 +1,949 @@ +//! Contains [`ApiError`] and associated helper functions. + +use core::{ + convert::TryFrom, + fmt::{self, Debug, Formatter}, +}; + +use crate::{ + addressable_entity::{ + self, AddKeyFailure, MessageTopicError, RemoveKeyFailure, SetThresholdFailure, + TryFromIntError, TryFromSliceForAccountHashError, UpdateKeyFailure, + }, + bytesrepr, + system::{auction, handle_payment, mint}, + CLValueError, +}; + +/// All `Error` variants defined in this library other than `Error::User` will convert to a `u32` +/// value less than or equal to `RESERVED_ERROR_MAX`. +const RESERVED_ERROR_MAX: u32 = u16::MAX as u32; // 0..=65535 + +/// Handle Payment errors will have this value added to them when being converted to a `u32`. +const POS_ERROR_OFFSET: u32 = RESERVED_ERROR_MAX - u8::MAX as u32; // 65280..=65535 + +/// Mint errors will have this value added to them when being converted to a `u32`. +const MINT_ERROR_OFFSET: u32 = (POS_ERROR_OFFSET - 1) - u8::MAX as u32; // 65024..=65279 + +/// Contract header errors will have this value added to them when being converted to a `u32`. +const HEADER_ERROR_OFFSET: u32 = (MINT_ERROR_OFFSET - 1) - u8::MAX as u32; // 64768..=65023 + +/// Contract header errors will have this value added to them when being converted to a `u32`. +const AUCTION_ERROR_OFFSET: u32 = (HEADER_ERROR_OFFSET - 1) - u8::MAX as u32; // 64512..=64767 + +/// Minimum value of user error's inclusive range. +const USER_ERROR_MIN: u32 = RESERVED_ERROR_MAX + 1; + +/// Maximum value of user error's inclusive range. +const USER_ERROR_MAX: u32 = 2 * RESERVED_ERROR_MAX + 1; + +/// Minimum value of Mint error's inclusive range. +const MINT_ERROR_MIN: u32 = MINT_ERROR_OFFSET; + +/// Maximum value of Mint error's inclusive range. +const MINT_ERROR_MAX: u32 = POS_ERROR_OFFSET - 1; + +/// Minimum value of Handle Payment error's inclusive range. +const HP_ERROR_MIN: u32 = POS_ERROR_OFFSET; + +/// Maximum value of Handle Payment error's inclusive range. +const HP_ERROR_MAX: u32 = RESERVED_ERROR_MAX; + +/// Minimum value of contract header error's inclusive range. +const HEADER_ERROR_MIN: u32 = HEADER_ERROR_OFFSET; + +/// Maximum value of contract header error's inclusive range. +const HEADER_ERROR_MAX: u32 = HEADER_ERROR_OFFSET + u8::MAX as u32; + +/// Minimum value of an auction contract error's inclusive range. +const AUCTION_ERROR_MIN: u32 = AUCTION_ERROR_OFFSET; + +/// Maximum value of an auction contract error's inclusive range. +const AUCTION_ERROR_MAX: u32 = AUCTION_ERROR_OFFSET + u8::MAX as u32; + +/// Errors which can be encountered while running a smart contract. +/// +/// An `ApiError` can be converted to a `u32` in order to be passed via the execution engine's +/// `ext_ffi::casper_revert()` function. This means the information each variant can convey is +/// limited. +/// +/// The variants are split into numeric ranges as follows: +/// +/// | Inclusive range | Variant(s) | +/// | ----------------| ----------------------------------------------------------------| +/// | [1, 64511] | all except reserved system contract error ranges defined below. | +/// | [64512, 64767] | `Auction` | +/// | [64768, 65023] | `ContractHeader` | +/// | [65024, 65279] | `Mint` | +/// | [65280, 65535] | `HandlePayment` | +/// | [65536, 131071] | `User` | +/// +/// Users can specify a C-style enum and implement `From` to ease usage of +/// `casper_contract::runtime::revert()`, e.g. +/// ``` +/// use casper_types_ver_2_0::ApiError; +/// +/// #[repr(u16)] +/// enum FailureCode { +/// Zero = 0, // 65,536 as an ApiError::User +/// One, // 65,537 as an ApiError::User +/// Two // 65,538 as an ApiError::User +/// } +/// +/// impl From for ApiError { +/// fn from(code: FailureCode) -> Self { +/// ApiError::User(code as u16) +/// } +/// } +/// +/// assert_eq!(ApiError::User(1), FailureCode::One.into()); +/// assert_eq!(65_536, u32::from(ApiError::from(FailureCode::Zero))); +/// assert_eq!(65_538, u32::from(ApiError::from(FailureCode::Two))); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub enum ApiError { + /// Optional data was unexpectedly `None`. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(1), ApiError::None); + /// ``` + None, + /// Specified argument not provided. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(2), ApiError::MissingArgument); + /// ``` + MissingArgument, + /// Argument not of correct type. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(3), ApiError::InvalidArgument); + /// ``` + InvalidArgument, + /// Failed to deserialize a value. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(4), ApiError::Deserialize); + /// ``` + Deserialize, + /// `casper_contract::storage::read()` returned an error. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(5), ApiError::Read); + /// ``` + Read, + /// The given key returned a `None` value. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(6), ApiError::ValueNotFound); + /// ``` + ValueNotFound, + /// Failed to find a specified contract. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(7), ApiError::ContractNotFound); + /// ``` + ContractNotFound, + /// A call to `casper_contract::runtime::get_key()` returned a failure. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(8), ApiError::GetKey); + /// ``` + GetKey, + /// The [`Key`](crate::Key) variant was not as expected. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(9), ApiError::UnexpectedKeyVariant); + /// ``` + UnexpectedKeyVariant, + /// Obsolete error variant (we no longer have ContractRef). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(10), ApiError::UnexpectedContractRefVariant); + /// ``` + UnexpectedContractRefVariant, // TODO: this variant is not used any longer and can be removed + /// Invalid purse name given. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(11), ApiError::InvalidPurseName); + /// ``` + InvalidPurseName, + /// Invalid purse retrieved. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(12), ApiError::InvalidPurse); + /// ``` + InvalidPurse, + /// Failed to upgrade contract at [`URef`](crate::URef). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(13), ApiError::UpgradeContractAtURef); + /// ``` + UpgradeContractAtURef, + /// Failed to transfer motes. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(14), ApiError::Transfer); + /// ``` + Transfer, + /// The given [`URef`](crate::URef) has no access rights. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(15), ApiError::NoAccessRights); + /// ``` + NoAccessRights, + /// A given type could not be constructed from a [`CLValue`](crate::CLValue). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(16), ApiError::CLTypeMismatch); + /// ``` + CLTypeMismatch, + /// Early end of stream while deserializing. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(17), ApiError::EarlyEndOfStream); + /// ``` + EarlyEndOfStream, + /// Formatting error while deserializing. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(18), ApiError::Formatting); + /// ``` + Formatting, + /// Not all input bytes were consumed in [`deserialize`](crate::bytesrepr::deserialize). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(19), ApiError::LeftOverBytes); + /// ``` + LeftOverBytes, + /// Out of memory error. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(20), ApiError::OutOfMemory); + /// ``` + OutOfMemory, + /// There are already maximum [`AccountHash`](crate::account::AccountHash)s associated with the + /// given account. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(21), ApiError::MaxKeysLimit); + /// ``` + MaxKeysLimit, + /// The given [`AccountHash`](crate::account::AccountHash) is already associated with the given + /// account. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(22), ApiError::DuplicateKey); + /// ``` + DuplicateKey, + /// Caller doesn't have sufficient permissions to perform the given action. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(23), ApiError::PermissionDenied); + /// ``` + PermissionDenied, + /// The given [`AccountHash`](crate::account::AccountHash) is not associated with the given + /// account. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(24), ApiError::MissingKey); + /// ``` + MissingKey, + /// Removing/updating the given associated [`AccountHash`](crate::account::AccountHash) would + /// cause the total [`Weight`](addressable_entity::Weight) of all remaining `AccountHash`s to + /// fall below one of the action thresholds for the given account. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(25), ApiError::ThresholdViolation); + /// ``` + ThresholdViolation, + /// Setting the key-management threshold to a value lower than the deployment threshold is + /// disallowed. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(26), ApiError::KeyManagementThreshold); + /// ``` + KeyManagementThreshold, + /// Setting the deployment threshold to a value greater than any other threshold is disallowed. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(27), ApiError::DeploymentThreshold); + /// ``` + DeploymentThreshold, + /// Setting a threshold to a value greater than the total weight of associated keys is + /// disallowed. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(28), ApiError::InsufficientTotalWeight); + /// ``` + InsufficientTotalWeight, + /// The given `u32` doesn't map to a [`SystemContractType`](crate::system::SystemEntityType). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(29), ApiError::InvalidSystemContract); + /// ``` + InvalidSystemContract, + /// Failed to create a new purse. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(30), ApiError::PurseNotCreated); + /// ``` + PurseNotCreated, + /// An unhandled value, likely representing a bug in the code. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(31), ApiError::Unhandled); + /// ``` + Unhandled, + /// The provided buffer is too small to complete an operation. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(32), ApiError::BufferTooSmall); + /// ``` + BufferTooSmall, + /// No data available in the host buffer. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(33), ApiError::HostBufferEmpty); + /// ``` + HostBufferEmpty, + /// The host buffer has been set to a value and should be consumed first by a read operation. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(34), ApiError::HostBufferFull); + /// ``` + HostBufferFull, + /// Could not lay out an array in memory + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(35), ApiError::AllocLayout); + /// ``` + AllocLayout, + /// The `dictionary_item_key` length exceeds the maximum length. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(36), ApiError::DictionaryItemKeyExceedsLength); + /// ``` + DictionaryItemKeyExceedsLength, + /// The `dictionary_item_key` is invalid. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(37), ApiError::InvalidDictionaryItemKey); + /// ``` + InvalidDictionaryItemKey, + /// Unable to retrieve the requested system contract hash. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(38), ApiError::MissingSystemContractHash); + /// ``` + MissingSystemContractHash, + /// Exceeded a recursion depth limit. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(39), ApiError::ExceededRecursionDepth); + /// ``` + ExceededRecursionDepth, + /// Attempt to serialize a value that does not have a serialized representation. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(40), ApiError::NonRepresentableSerialization); + /// ``` + NonRepresentableSerialization, + /// Error specific to Auction contract. See + /// [casper_types_ver_2_0::system::auction::Error](crate::system::auction::Error). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// for code in 64512..=64767 { + /// assert!(matches!(ApiError::from(code), ApiError::AuctionError(_auction_error))); + /// } + /// ``` + AuctionError(u8), + /// Contract header errors. See + /// [casper_types_ver_2_0::contracts::Error](crate::addressable_entity::Error). + /// + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// for code in 64768..=65023 { + /// assert!(matches!(ApiError::from(code), ApiError::ContractHeader(_contract_header_error))); + /// } + /// ``` + ContractHeader(u8), + /// Error specific to Mint contract. See + /// [casper_types_ver_2_0::system::mint::Error](crate::system::mint::Error). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// for code in 65024..=65279 { + /// assert!(matches!(ApiError::from(code), ApiError::Mint(_mint_error))); + /// } + /// ``` + Mint(u8), + /// Error specific to Handle Payment contract. See + /// [casper_types_ver_2_0::system::handle_payment](crate::system::handle_payment::Error). + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// for code in 65280..=65535 { + /// assert!(matches!(ApiError::from(code), ApiError::HandlePayment(_handle_payment_error))); + /// } + /// ``` + HandlePayment(u8), + /// User-specified error code. The internal `u16` value is added to `u16::MAX as u32 + 1` when + /// an `Error::User` is converted to a `u32`. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// for code in 65536..131071 { + /// assert!(matches!(ApiError::from(code), ApiError::User(_))); + /// } + /// ``` + User(u16), + /// The message topic is already registered. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(41), ApiError::MessageTopicAlreadyRegistered); + /// ``` + MessageTopicAlreadyRegistered, + /// The maximum number of allowed message topics was exceeded. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(42), ApiError::MaxTopicsNumberExceeded); + /// ``` + MaxTopicsNumberExceeded, + /// The maximum size for the topic name was exceeded. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(43), ApiError::MaxTopicNameSizeExceeded); + /// ``` + MaxTopicNameSizeExceeded, + /// The message topic is not registered. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(44), ApiError::MessageTopicNotRegistered); + /// ``` + MessageTopicNotRegistered, + /// The message topic is full and cannot accept new messages. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(45), ApiError::MessageTopicFull); + /// ``` + MessageTopicFull, + /// The message topic is full and cannot accept new messages. + /// ``` + /// # use casper_types_ver_2_0::ApiError; + /// assert_eq!(ApiError::from(46), ApiError::MessageTooLarge); + /// ``` + MessageTooLarge, +} + +impl From for ApiError { + fn from(error: bytesrepr::Error) -> Self { + match error { + bytesrepr::Error::EarlyEndOfStream => ApiError::EarlyEndOfStream, + bytesrepr::Error::Formatting => ApiError::Formatting, + bytesrepr::Error::LeftOverBytes => ApiError::LeftOverBytes, + bytesrepr::Error::OutOfMemory => ApiError::OutOfMemory, + bytesrepr::Error::NotRepresentable => ApiError::NonRepresentableSerialization, + bytesrepr::Error::ExceededRecursionDepth => ApiError::ExceededRecursionDepth, + } + } +} + +impl From for ApiError { + fn from(error: AddKeyFailure) -> Self { + match error { + AddKeyFailure::MaxKeysLimit => ApiError::MaxKeysLimit, + AddKeyFailure::DuplicateKey => ApiError::DuplicateKey, + AddKeyFailure::PermissionDenied => ApiError::PermissionDenied, + } + } +} + +impl From for ApiError { + fn from(error: UpdateKeyFailure) -> Self { + match error { + UpdateKeyFailure::MissingKey => ApiError::MissingKey, + UpdateKeyFailure::PermissionDenied => ApiError::PermissionDenied, + UpdateKeyFailure::ThresholdViolation => ApiError::ThresholdViolation, + } + } +} + +impl From for ApiError { + fn from(error: RemoveKeyFailure) -> Self { + match error { + RemoveKeyFailure::MissingKey => ApiError::MissingKey, + RemoveKeyFailure::PermissionDenied => ApiError::PermissionDenied, + RemoveKeyFailure::ThresholdViolation => ApiError::ThresholdViolation, + } + } +} + +impl From for ApiError { + fn from(error: SetThresholdFailure) -> Self { + match error { + SetThresholdFailure::KeyManagementThreshold => ApiError::KeyManagementThreshold, + SetThresholdFailure::DeploymentThreshold => ApiError::DeploymentThreshold, + SetThresholdFailure::PermissionDeniedError => ApiError::PermissionDenied, + SetThresholdFailure::InsufficientTotalWeight => ApiError::InsufficientTotalWeight, + } + } +} + +impl From for ApiError { + fn from(error: CLValueError) -> Self { + match error { + CLValueError::Serialization(bytesrepr_error) => bytesrepr_error.into(), + CLValueError::Type(_) => ApiError::CLTypeMismatch, + } + } +} + +impl From for ApiError { + fn from(error: addressable_entity::Error) -> Self { + ApiError::ContractHeader(error as u8) + } +} + +impl From for ApiError { + fn from(error: auction::Error) -> Self { + ApiError::AuctionError(error as u8) + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl From for ApiError { + fn from(_error: TryFromIntError) -> Self { + ApiError::Unhandled + } +} + +impl From for ApiError { + fn from(_error: TryFromSliceForAccountHashError) -> Self { + ApiError::Deserialize + } +} + +impl From for ApiError { + fn from(error: mint::Error) -> Self { + ApiError::Mint(error as u8) + } +} + +impl From for ApiError { + fn from(error: handle_payment::Error) -> Self { + ApiError::HandlePayment(error as u8) + } +} + +impl From for ApiError { + fn from(error: MessageTopicError) -> Self { + match error { + MessageTopicError::DuplicateTopic => ApiError::MessageTopicAlreadyRegistered, + MessageTopicError::MaxTopicsExceeded => ApiError::MaxTopicsNumberExceeded, + MessageTopicError::TopicNameSizeExceeded => ApiError::MaxTopicNameSizeExceeded, + } + } +} + +impl From for u32 { + fn from(error: ApiError) -> Self { + match error { + ApiError::None => 1, + ApiError::MissingArgument => 2, + ApiError::InvalidArgument => 3, + ApiError::Deserialize => 4, + ApiError::Read => 5, + ApiError::ValueNotFound => 6, + ApiError::ContractNotFound => 7, + ApiError::GetKey => 8, + ApiError::UnexpectedKeyVariant => 9, + ApiError::UnexpectedContractRefVariant => 10, + ApiError::InvalidPurseName => 11, + ApiError::InvalidPurse => 12, + ApiError::UpgradeContractAtURef => 13, + ApiError::Transfer => 14, + ApiError::NoAccessRights => 15, + ApiError::CLTypeMismatch => 16, + ApiError::EarlyEndOfStream => 17, + ApiError::Formatting => 18, + ApiError::LeftOverBytes => 19, + ApiError::OutOfMemory => 20, + ApiError::MaxKeysLimit => 21, + ApiError::DuplicateKey => 22, + ApiError::PermissionDenied => 23, + ApiError::MissingKey => 24, + ApiError::ThresholdViolation => 25, + ApiError::KeyManagementThreshold => 26, + ApiError::DeploymentThreshold => 27, + ApiError::InsufficientTotalWeight => 28, + ApiError::InvalidSystemContract => 29, + ApiError::PurseNotCreated => 30, + ApiError::Unhandled => 31, + ApiError::BufferTooSmall => 32, + ApiError::HostBufferEmpty => 33, + ApiError::HostBufferFull => 34, + ApiError::AllocLayout => 35, + ApiError::DictionaryItemKeyExceedsLength => 36, + ApiError::InvalidDictionaryItemKey => 37, + ApiError::MissingSystemContractHash => 38, + ApiError::ExceededRecursionDepth => 39, + ApiError::NonRepresentableSerialization => 40, + ApiError::MessageTopicAlreadyRegistered => 41, + ApiError::MaxTopicsNumberExceeded => 42, + ApiError::MaxTopicNameSizeExceeded => 43, + ApiError::MessageTopicNotRegistered => 44, + ApiError::MessageTopicFull => 45, + ApiError::MessageTooLarge => 46, + ApiError::AuctionError(value) => AUCTION_ERROR_OFFSET + u32::from(value), + ApiError::ContractHeader(value) => HEADER_ERROR_OFFSET + u32::from(value), + ApiError::Mint(value) => MINT_ERROR_OFFSET + u32::from(value), + ApiError::HandlePayment(value) => POS_ERROR_OFFSET + u32::from(value), + ApiError::User(value) => RESERVED_ERROR_MAX + 1 + u32::from(value), + } + } +} + +impl From for ApiError { + fn from(value: u32) -> ApiError { + match value { + 1 => ApiError::None, + 2 => ApiError::MissingArgument, + 3 => ApiError::InvalidArgument, + 4 => ApiError::Deserialize, + 5 => ApiError::Read, + 6 => ApiError::ValueNotFound, + 7 => ApiError::ContractNotFound, + 8 => ApiError::GetKey, + 9 => ApiError::UnexpectedKeyVariant, + 10 => ApiError::UnexpectedContractRefVariant, + 11 => ApiError::InvalidPurseName, + 12 => ApiError::InvalidPurse, + 13 => ApiError::UpgradeContractAtURef, + 14 => ApiError::Transfer, + 15 => ApiError::NoAccessRights, + 16 => ApiError::CLTypeMismatch, + 17 => ApiError::EarlyEndOfStream, + 18 => ApiError::Formatting, + 19 => ApiError::LeftOverBytes, + 20 => ApiError::OutOfMemory, + 21 => ApiError::MaxKeysLimit, + 22 => ApiError::DuplicateKey, + 23 => ApiError::PermissionDenied, + 24 => ApiError::MissingKey, + 25 => ApiError::ThresholdViolation, + 26 => ApiError::KeyManagementThreshold, + 27 => ApiError::DeploymentThreshold, + 28 => ApiError::InsufficientTotalWeight, + 29 => ApiError::InvalidSystemContract, + 30 => ApiError::PurseNotCreated, + 31 => ApiError::Unhandled, + 32 => ApiError::BufferTooSmall, + 33 => ApiError::HostBufferEmpty, + 34 => ApiError::HostBufferFull, + 35 => ApiError::AllocLayout, + 36 => ApiError::DictionaryItemKeyExceedsLength, + 37 => ApiError::InvalidDictionaryItemKey, + 38 => ApiError::MissingSystemContractHash, + 39 => ApiError::ExceededRecursionDepth, + 40 => ApiError::NonRepresentableSerialization, + 41 => ApiError::MessageTopicAlreadyRegistered, + 42 => ApiError::MaxTopicsNumberExceeded, + 43 => ApiError::MaxTopicNameSizeExceeded, + 44 => ApiError::MessageTopicNotRegistered, + 45 => ApiError::MessageTopicFull, + 46 => ApiError::MessageTooLarge, + USER_ERROR_MIN..=USER_ERROR_MAX => ApiError::User(value as u16), + HP_ERROR_MIN..=HP_ERROR_MAX => ApiError::HandlePayment(value as u8), + MINT_ERROR_MIN..=MINT_ERROR_MAX => ApiError::Mint(value as u8), + HEADER_ERROR_MIN..=HEADER_ERROR_MAX => ApiError::ContractHeader(value as u8), + AUCTION_ERROR_MIN..=AUCTION_ERROR_MAX => ApiError::AuctionError(value as u8), + _ => ApiError::Unhandled, + } + } +} + +impl Debug for ApiError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + ApiError::None => write!(f, "ApiError::None")?, + ApiError::MissingArgument => write!(f, "ApiError::MissingArgument")?, + ApiError::InvalidArgument => write!(f, "ApiError::InvalidArgument")?, + ApiError::Deserialize => write!(f, "ApiError::Deserialize")?, + ApiError::Read => write!(f, "ApiError::Read")?, + ApiError::ValueNotFound => write!(f, "ApiError::ValueNotFound")?, + ApiError::ContractNotFound => write!(f, "ApiError::ContractNotFound")?, + ApiError::GetKey => write!(f, "ApiError::GetKey")?, + ApiError::UnexpectedKeyVariant => write!(f, "ApiError::UnexpectedKeyVariant")?, + ApiError::UnexpectedContractRefVariant => { + write!(f, "ApiError::UnexpectedContractRefVariant")? + } + ApiError::InvalidPurseName => write!(f, "ApiError::InvalidPurseName")?, + ApiError::InvalidPurse => write!(f, "ApiError::InvalidPurse")?, + ApiError::UpgradeContractAtURef => write!(f, "ApiError::UpgradeContractAtURef")?, + ApiError::Transfer => write!(f, "ApiError::Transfer")?, + ApiError::NoAccessRights => write!(f, "ApiError::NoAccessRights")?, + ApiError::CLTypeMismatch => write!(f, "ApiError::CLTypeMismatch")?, + ApiError::EarlyEndOfStream => write!(f, "ApiError::EarlyEndOfStream")?, + ApiError::Formatting => write!(f, "ApiError::Formatting")?, + ApiError::LeftOverBytes => write!(f, "ApiError::LeftOverBytes")?, + ApiError::OutOfMemory => write!(f, "ApiError::OutOfMemory")?, + ApiError::MaxKeysLimit => write!(f, "ApiError::MaxKeysLimit")?, + ApiError::DuplicateKey => write!(f, "ApiError::DuplicateKey")?, + ApiError::PermissionDenied => write!(f, "ApiError::PermissionDenied")?, + ApiError::MissingKey => write!(f, "ApiError::MissingKey")?, + ApiError::ThresholdViolation => write!(f, "ApiError::ThresholdViolation")?, + ApiError::KeyManagementThreshold => write!(f, "ApiError::KeyManagementThreshold")?, + ApiError::DeploymentThreshold => write!(f, "ApiError::DeploymentThreshold")?, + ApiError::InsufficientTotalWeight => write!(f, "ApiError::InsufficientTotalWeight")?, + ApiError::InvalidSystemContract => write!(f, "ApiError::InvalidSystemContract")?, + ApiError::PurseNotCreated => write!(f, "ApiError::PurseNotCreated")?, + ApiError::Unhandled => write!(f, "ApiError::Unhandled")?, + ApiError::BufferTooSmall => write!(f, "ApiError::BufferTooSmall")?, + ApiError::HostBufferEmpty => write!(f, "ApiError::HostBufferEmpty")?, + ApiError::HostBufferFull => write!(f, "ApiError::HostBufferFull")?, + ApiError::AllocLayout => write!(f, "ApiError::AllocLayout")?, + ApiError::DictionaryItemKeyExceedsLength => { + write!(f, "ApiError::DictionaryItemKeyTooLarge")? + } + ApiError::InvalidDictionaryItemKey => write!(f, "ApiError::InvalidDictionaryItemKey")?, + ApiError::MissingSystemContractHash => write!(f, "ApiError::MissingContractHash")?, + ApiError::NonRepresentableSerialization => { + write!(f, "ApiError::NonRepresentableSerialization")? + } + ApiError::MessageTopicAlreadyRegistered => { + write!(f, "ApiError::MessageTopicAlreadyRegistered")? + } + ApiError::MaxTopicsNumberExceeded => write!(f, "ApiError::MaxTopicsNumberExceeded")?, + ApiError::MaxTopicNameSizeExceeded => write!(f, "ApiError::MaxTopicNameSizeExceeded")?, + ApiError::MessageTopicNotRegistered => { + write!(f, "ApiError::MessageTopicNotRegistered")? + } + ApiError::MessageTopicFull => write!(f, "ApiError::MessageTopicFull")?, + ApiError::MessageTooLarge => write!(f, "ApiError::MessageTooLarge")?, + ApiError::ExceededRecursionDepth => write!(f, "ApiError::ExceededRecursionDepth")?, + ApiError::AuctionError(value) => write!( + f, + "ApiError::AuctionError({:?})", + auction::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::ContractHeader(value) => write!( + f, + "ApiError::ContractHeader({:?})", + addressable_entity::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::Mint(value) => write!( + f, + "ApiError::Mint({:?})", + mint::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::HandlePayment(value) => write!( + f, + "ApiError::HandlePayment({:?})", + handle_payment::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::User(value) => write!(f, "ApiError::User({})", value)?, + } + write!(f, " [{}]", u32::from(*self)) + } +} + +impl fmt::Display for ApiError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ApiError::User(value) => write!(f, "User error: {}", value), + ApiError::ContractHeader(value) => write!(f, "Contract header error: {}", value), + ApiError::Mint(value) => write!(f, "Mint error: {}", value), + ApiError::HandlePayment(value) => write!(f, "Handle Payment error: {}", value), + _ => ::fmt(self, f), + } + } +} + +// This function is not intended to be used by third party crates. +#[doc(hidden)] +pub fn i32_from(result: Result<(), T>) -> i32 +where + ApiError: From, +{ + match result { + Ok(()) => 0, + Err(error) => { + let api_error = ApiError::from(error); + u32::from(api_error) as i32 + } + } +} + +/// Converts an `i32` to a `Result<(), ApiError>`, where `0` represents `Ok(())`, and all other +/// inputs are mapped to `Err(ApiError::)`. The full list of mappings can be found in the +/// [docs for `ApiError`](ApiError#mappings). +pub fn result_from(value: i32) -> Result<(), ApiError> { + match value { + 0 => Ok(()), + _ => Err(ApiError::from(value as u32)), + } +} + +#[cfg(test)] +mod tests { + use std::{i32, u16, u8}; + + use super::*; + + fn round_trip(result: Result<(), ApiError>) { + let code = i32_from(result); + assert_eq!(result, result_from(code)); + } + + #[test] + fn error_values() { + assert_eq!(65_024_u32, u32::from(ApiError::Mint(0))); // MINT_ERROR_OFFSET == 65,024 + assert_eq!(65_279_u32, u32::from(ApiError::Mint(u8::MAX))); + assert_eq!(65_280_u32, u32::from(ApiError::HandlePayment(0))); // POS_ERROR_OFFSET == 65,280 + assert_eq!(65_535_u32, u32::from(ApiError::HandlePayment(u8::MAX))); + assert_eq!(65_536_u32, u32::from(ApiError::User(0))); // u16::MAX + 1 + assert_eq!(131_071_u32, u32::from(ApiError::User(u16::MAX))); // 2 * u16::MAX + 1 + } + + #[test] + fn error_descriptions_getkey() { + assert_eq!("ApiError::GetKey [8]", &format!("{:?}", ApiError::GetKey)); + assert_eq!("ApiError::GetKey [8]", &format!("{}", ApiError::GetKey)); + } + + #[test] + fn error_descriptions_contract_header() { + assert_eq!( + "ApiError::ContractHeader(PreviouslyUsedVersion) [64769]", + &format!( + "{:?}", + ApiError::ContractHeader(addressable_entity::Error::PreviouslyUsedVersion as u8) + ) + ); + assert_eq!( + "Contract header error: 0", + &format!("{}", ApiError::ContractHeader(0)) + ); + assert_eq!( + "Contract header error: 255", + &format!("{}", ApiError::ContractHeader(u8::MAX)) + ); + } + + #[test] + fn error_descriptions_mint() { + assert_eq!( + "ApiError::Mint(InsufficientFunds) [65024]", + &format!("{:?}", ApiError::Mint(0)) + ); + assert_eq!("Mint error: 0", &format!("{}", ApiError::Mint(0))); + assert_eq!("Mint error: 255", &format!("{}", ApiError::Mint(u8::MAX))); + } + + #[test] + fn error_descriptions_handle_payment() { + assert_eq!( + "ApiError::HandlePayment(NotBonded) [65280]", + &format!( + "{:?}", + ApiError::HandlePayment(handle_payment::Error::NotBonded as u8) + ) + ); + } + #[test] + fn error_descriptions_handle_payment_display() { + assert_eq!( + "Handle Payment error: 0", + &format!( + "{}", + ApiError::HandlePayment(handle_payment::Error::NotBonded as u8) + ) + ); + } + + #[test] + fn error_descriptions_user_errors() { + assert_eq!( + "ApiError::User(0) [65536]", + &format!("{:?}", ApiError::User(0)) + ); + + assert_eq!("User error: 0", &format!("{}", ApiError::User(0))); + assert_eq!( + "ApiError::User(65535) [131071]", + &format!("{:?}", ApiError::User(u16::MAX)) + ); + assert_eq!( + "User error: 65535", + &format!("{}", ApiError::User(u16::MAX)) + ); + } + + #[test] + fn error_edge_cases() { + assert_eq!(Err(ApiError::Unhandled), result_from(i32::MAX)); + assert_eq!( + Err(ApiError::ContractHeader(255)), + result_from(MINT_ERROR_OFFSET as i32 - 1) + ); + assert_eq!(Err(ApiError::Unhandled), result_from(-1)); + assert_eq!(Err(ApiError::Unhandled), result_from(i32::MIN)); + } + + #[test] + fn error_round_trips() { + round_trip(Ok(())); + round_trip(Err(ApiError::None)); + round_trip(Err(ApiError::MissingArgument)); + round_trip(Err(ApiError::InvalidArgument)); + round_trip(Err(ApiError::Deserialize)); + round_trip(Err(ApiError::Read)); + round_trip(Err(ApiError::ValueNotFound)); + round_trip(Err(ApiError::ContractNotFound)); + round_trip(Err(ApiError::GetKey)); + round_trip(Err(ApiError::UnexpectedKeyVariant)); + round_trip(Err(ApiError::UnexpectedContractRefVariant)); + round_trip(Err(ApiError::InvalidPurseName)); + round_trip(Err(ApiError::InvalidPurse)); + round_trip(Err(ApiError::UpgradeContractAtURef)); + round_trip(Err(ApiError::Transfer)); + round_trip(Err(ApiError::NoAccessRights)); + round_trip(Err(ApiError::CLTypeMismatch)); + round_trip(Err(ApiError::EarlyEndOfStream)); + round_trip(Err(ApiError::Formatting)); + round_trip(Err(ApiError::LeftOverBytes)); + round_trip(Err(ApiError::OutOfMemory)); + round_trip(Err(ApiError::MaxKeysLimit)); + round_trip(Err(ApiError::DuplicateKey)); + round_trip(Err(ApiError::PermissionDenied)); + round_trip(Err(ApiError::MissingKey)); + round_trip(Err(ApiError::ThresholdViolation)); + round_trip(Err(ApiError::KeyManagementThreshold)); + round_trip(Err(ApiError::DeploymentThreshold)); + round_trip(Err(ApiError::InsufficientTotalWeight)); + round_trip(Err(ApiError::InvalidSystemContract)); + round_trip(Err(ApiError::PurseNotCreated)); + round_trip(Err(ApiError::Unhandled)); + round_trip(Err(ApiError::BufferTooSmall)); + round_trip(Err(ApiError::HostBufferEmpty)); + round_trip(Err(ApiError::HostBufferFull)); + round_trip(Err(ApiError::AllocLayout)); + round_trip(Err(ApiError::NonRepresentableSerialization)); + round_trip(Err(ApiError::ContractHeader(0))); + round_trip(Err(ApiError::ContractHeader(u8::MAX))); + round_trip(Err(ApiError::Mint(0))); + round_trip(Err(ApiError::Mint(u8::MAX))); + round_trip(Err(ApiError::HandlePayment(0))); + round_trip(Err(ApiError::HandlePayment(u8::MAX))); + round_trip(Err(ApiError::User(0))); + round_trip(Err(ApiError::User(u16::MAX))); + round_trip(Err(ApiError::AuctionError(0))); + round_trip(Err(ApiError::AuctionError(u8::MAX))); + round_trip(Err(ApiError::MessageTopicAlreadyRegistered)); + round_trip(Err(ApiError::MaxTopicsNumberExceeded)); + round_trip(Err(ApiError::MaxTopicNameSizeExceeded)); + round_trip(Err(ApiError::MessageTopicNotRegistered)); + round_trip(Err(ApiError::MessageTopicFull)); + round_trip(Err(ApiError::MessageTooLarge)); + } +} diff --git a/casper_types_ver_2_0/src/auction_state.rs b/casper_types_ver_2_0/src/auction_state.rs new file mode 100644 index 00000000..85fa32ef --- /dev/null +++ b/casper_types_ver_2_0/src/auction_state.rs @@ -0,0 +1,203 @@ +use alloc::collections::{btree_map::Entry, BTreeMap}; + +use alloc::vec::Vec; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + system::auction::{Bid, BidKind, EraValidators, Staking, ValidatorBid}, + Digest, EraId, PublicKey, U512, +}; + +#[cfg(feature = "json-schema")] +static ERA_VALIDATORS: Lazy = Lazy::new(|| { + use crate::SecretKey; + + let secret_key_1 = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + + let mut validator_weights = BTreeMap::new(); + validator_weights.insert(public_key_1, U512::from(10)); + + let mut era_validators = BTreeMap::new(); + era_validators.insert(EraId::from(10u64), validator_weights); + + era_validators +}); +#[cfg(feature = "json-schema")] +static AUCTION_INFO: Lazy = Lazy::new(|| { + use crate::{ + system::auction::{DelegationRate, Delegator}, + AccessRights, SecretKey, URef, + }; + use num_traits::Zero; + + let state_root_hash = Digest::from([11; Digest::LENGTH]); + let validator_secret_key = + SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let validator_public_key = PublicKey::from(&validator_secret_key); + + let mut bids = vec![]; + let validator_bid = ValidatorBid::unlocked( + validator_public_key.clone(), + URef::new([250; 32], AccessRights::READ_ADD_WRITE), + U512::from(20), + DelegationRate::zero(), + ); + bids.push(BidKind::Validator(Box::new(validator_bid))); + + let delegator_secret_key = + SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + let delegator_bid = Delegator::unlocked( + delegator_public_key, + U512::from(10), + URef::new([251; 32], AccessRights::READ_ADD_WRITE), + validator_public_key, + ); + bids.push(BidKind::Delegator(Box::new(delegator_bid))); + + let height: u64 = 10; + let era_validators = ERA_VALIDATORS.clone(); + AuctionState::new(state_root_hash, height, era_validators, bids) +}); + +/// A validator's weight. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorWeights { + public_key: PublicKey, + weight: U512, +} + +/// The validators for the given era. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct JsonEraValidators { + era_id: EraId, + validator_weights: Vec, +} + +/// Data structure summarizing auction contract data. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct AuctionState { + /// Global state hash. + pub state_root_hash: Digest, + /// Block height. + pub block_height: u64, + /// Era validators. + pub era_validators: Vec, + /// All bids. + #[serde(with = "BTreeMapToArray::")] + bids: BTreeMap, +} + +impl AuctionState { + /// Create new instance of `AuctionState` + pub fn new( + state_root_hash: Digest, + block_height: u64, + era_validators: EraValidators, + bids: Vec, + ) -> Self { + let mut json_era_validators: Vec = Vec::new(); + for (era_id, validator_weights) in era_validators.iter() { + let mut json_validator_weights: Vec = Vec::new(); + for (public_key, weight) in validator_weights.iter() { + json_validator_weights.push(JsonValidatorWeights { + public_key: public_key.clone(), + weight: *weight, + }); + } + json_era_validators.push(JsonEraValidators { + era_id: *era_id, + validator_weights: json_validator_weights, + }); + } + + let staking = { + let mut staking: Staking = BTreeMap::new(); + for bid_kind in bids.iter().filter(|x| x.is_unified()) { + if let BidKind::Unified(bid) = bid_kind { + let public_key = bid.validator_public_key().clone(); + let validator_bid = ValidatorBid::unlocked( + bid.validator_public_key().clone(), + *bid.bonding_purse(), + *bid.staked_amount(), + *bid.delegation_rate(), + ); + staking.insert(public_key, (validator_bid, bid.delegators().clone())); + } + } + + for bid_kind in bids.iter().filter(|x| x.is_validator()) { + if let BidKind::Validator(validator_bid) = bid_kind { + let public_key = validator_bid.validator_public_key().clone(); + staking.insert(public_key, (*validator_bid.clone(), BTreeMap::new())); + } + } + + for bid_kind in bids.iter().filter(|x| x.is_delegator()) { + if let BidKind::Delegator(delegator_bid) = bid_kind { + let validator_public_key = delegator_bid.validator_public_key().clone(); + if let Entry::Occupied(mut occupant) = + staking.entry(validator_public_key.clone()) + { + let (_, delegators) = occupant.get_mut(); + delegators.insert( + delegator_bid.delegator_public_key().clone(), + *delegator_bid.clone(), + ); + } + } + } + staking + }; + + let mut bids: BTreeMap = BTreeMap::new(); + for (public_key, (validator_bid, delegators)) in staking { + let bid = Bid::from_non_unified(validator_bid, delegators); + bids.insert(public_key, bid); + } + + AuctionState { + state_root_hash, + block_height, + era_validators: json_era_validators, + bids, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &AUCTION_INFO + } +} + +struct BidLabels; + +impl KeyValueLabels for BidLabels { + const KEY: &'static str = "public_key"; + const VALUE: &'static str = "bid"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for BidLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("PublicKeyAndBid"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = + Some("A bid associated with the given public key."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The public key of the bidder."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The bid details."); +} diff --git a/casper_types_ver_2_0/src/binary_port.rs b/casper_types_ver_2_0/src/binary_port.rs new file mode 100644 index 00000000..42fc4a9f --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port.rs @@ -0,0 +1,66 @@ +//! The binary port. +mod binary_request; +mod binary_response; +mod binary_response_and_request; +mod binary_response_header; +mod error_code; +mod get_all_values_result; +mod get_request; +mod global_state_query_result; +mod information_request; +mod minimal_block_info; +#[cfg(any(feature = "std", test))] +mod node_status; +mod payload_type; +mod record_id; +mod state_request; +mod type_wrappers; + +pub use binary_request::{BinaryRequest, BinaryRequestHeader, BinaryRequestTag}; +pub use binary_response::BinaryResponse; +pub use binary_response_and_request::BinaryResponseAndRequest; +pub use binary_response_header::BinaryResponseHeader; +pub use error_code::ErrorCode; +pub use get_all_values_result::GetAllValuesResult; +pub use get_request::GetRequest; +pub use global_state_query_result::GlobalStateQueryResult; +pub use information_request::{InformationRequest, InformationRequestTag}; +#[cfg(any(feature = "std", test))] +pub use minimal_block_info::MinimalBlockInfo; +#[cfg(any(feature = "std", test))] +pub use node_status::NodeStatus; +pub use payload_type::{PayloadEntity, PayloadType}; +pub use record_id::RecordId; +pub use state_request::GlobalStateRequest; +pub use type_wrappers::{ + ConsensusStatus, ConsensusValidatorChanges, GetTrieFullResult, LastProgress, NetworkName, + SpeculativeExecutionResult, TransactionWithExecutionInfo, Uptime, +}; + +use alloc::vec::Vec; + +/// Stores raw bytes from the DB along with the flag indicating whether data come from legacy or +/// current version of the DB. +#[derive(Debug)] +pub struct DbRawBytesSpec { + is_legacy: bool, + raw_bytes: Vec, +} + +impl DbRawBytesSpec { + /// Creates a variant indicating that raw bytes are coming from the legacy database. + pub fn new_legacy(raw_bytes: &[u8]) -> Self { + Self { + is_legacy: true, + raw_bytes: raw_bytes.to_vec(), + } + } + + /// Creates a variant indicating that raw bytes are coming from the current database. + pub fn new_current(raw_bytes: &[u8]) -> Self { + Self { + is_legacy: false, + raw_bytes: raw_bytes.to_vec(), + } + } +} diff --git a/casper_types_ver_2_0/src/binary_port/binary_request.rs b/casper_types_ver_2_0/src/binary_port/binary_request.rs new file mode 100644 index 00000000..a123a80c --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/binary_request.rs @@ -0,0 +1,297 @@ +use core::convert::TryFrom; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + BlockHeader, Digest, ProtocolVersion, Timestamp, Transaction, +}; +use alloc::vec::Vec; + +use super::get_request::GetRequest; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::{testing::TestRng, Block, TestBlockV1Builder}; + +/// The header of a binary request. +#[derive(Debug, PartialEq)] +pub struct BinaryRequestHeader { + protocol_version: ProtocolVersion, + type_tag: u8, +} + +impl BinaryRequestHeader { + /// Creates new binary request header. + pub fn new(protocol_version: ProtocolVersion, type_tag: BinaryRequestTag) -> Self { + Self { + protocol_version, + type_tag: type_tag.into(), + } + } + + /// Returns the protocol version of the request. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns the type tag of the request. + pub fn type_tag(&self) -> u8 { + self.type_tag + } +} + +impl ToBytes for BinaryRequestHeader { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.protocol_version.write_bytes(writer)?; + self.type_tag.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.protocol_version.serialized_length() + self.type_tag.serialized_length() + } +} + +impl FromBytes for BinaryRequestHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_version, remainder) = FromBytes::from_bytes(bytes)?; + let (type_tag, remainder) = u8::from_bytes(remainder)?; + Ok(( + BinaryRequestHeader { + protocol_version, + type_tag, + }, + remainder, + )) + } +} + +/// A request to the binary access interface. +#[derive(Debug, PartialEq)] +pub enum BinaryRequest { + /// Request to get data from the node + Get(GetRequest), + /// Request to add a transaction into a blockchain. + TryAcceptTransaction { + /// Transaction to be handled. + transaction: Transaction, + }, + /// Request to execute a transaction speculatively. + TrySpeculativeExec { + /// State root on top of which to execute deploy. + state_root_hash: Digest, + /// Block time. + block_time: Timestamp, + /// Protocol version used when creating the original block. + protocol_version: ProtocolVersion, + /// Transaction to execute. + transaction: Transaction, + /// Block header of block at which we should perform speculative execution. + speculative_exec_at_block: BlockHeader, + }, +} + +impl BinaryRequest { + /// Returns the type tag of the request. + pub fn tag(&self) -> BinaryRequestTag { + match self { + BinaryRequest::Get(_) => BinaryRequestTag::Get, + BinaryRequest::TryAcceptTransaction { .. } => BinaryRequestTag::TryAcceptTransaction, + BinaryRequest::TrySpeculativeExec { .. } => BinaryRequestTag::TrySpeculativeExec, + } + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match BinaryRequestTag::random(rng) { + BinaryRequestTag::Get => Self::Get(GetRequest::random(rng)), + BinaryRequestTag::TryAcceptTransaction => Self::TryAcceptTransaction { + transaction: Transaction::random(rng), + }, + BinaryRequestTag::TrySpeculativeExec => { + let block_v1 = TestBlockV1Builder::new().build(rng); + let block = Block::V1(block_v1); + + Self::TrySpeculativeExec { + state_root_hash: Digest::random(rng), + block_time: Timestamp::random(rng), + protocol_version: ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()), + transaction: Transaction::random(rng), + speculative_exec_at_block: block.take_header(), + } + } + } + } +} + +impl ToBytes for BinaryRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + BinaryRequest::Get(inner) => inner.write_bytes(writer), + BinaryRequest::TryAcceptTransaction { transaction } => transaction.write_bytes(writer), + BinaryRequest::TrySpeculativeExec { + transaction, + state_root_hash, + block_time, + protocol_version, + speculative_exec_at_block, + } => { + transaction.write_bytes(writer)?; + state_root_hash.write_bytes(writer)?; + block_time.write_bytes(writer)?; + protocol_version.write_bytes(writer)?; + speculative_exec_at_block.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + match self { + BinaryRequest::Get(inner) => inner.serialized_length(), + BinaryRequest::TryAcceptTransaction { transaction } => transaction.serialized_length(), + BinaryRequest::TrySpeculativeExec { + transaction, + state_root_hash, + block_time, + protocol_version, + speculative_exec_at_block, + } => { + transaction.serialized_length() + + state_root_hash.serialized_length() + + block_time.serialized_length() + + protocol_version.serialized_length() + + speculative_exec_at_block.serialized_length() + } + } + } +} + +impl TryFrom<(BinaryRequestTag, &[u8])> for BinaryRequest { + type Error = bytesrepr::Error; + + fn try_from((tag, bytes): (BinaryRequestTag, &[u8])) -> Result { + let (req, remainder) = match tag { + BinaryRequestTag::Get => { + let (get_request, remainder) = FromBytes::from_bytes(bytes)?; + (BinaryRequest::Get(get_request), remainder) + } + BinaryRequestTag::TryAcceptTransaction => { + let (transaction, remainder) = FromBytes::from_bytes(bytes)?; + ( + BinaryRequest::TryAcceptTransaction { transaction }, + remainder, + ) + } + BinaryRequestTag::TrySpeculativeExec => { + let (transaction, remainder) = FromBytes::from_bytes(bytes)?; + let (state_root_hash, remainder) = FromBytes::from_bytes(remainder)?; + let (block_time, remainder) = FromBytes::from_bytes(remainder)?; + let (protocol_version, remainder) = FromBytes::from_bytes(remainder)?; + let (speculative_exec_at_block, remainder) = FromBytes::from_bytes(remainder)?; + ( + BinaryRequest::TrySpeculativeExec { + transaction, + state_root_hash, + block_time, + protocol_version, + speculative_exec_at_block, + }, + remainder, + ) + } + }; + if !remainder.is_empty() { + return Err(bytesrepr::Error::LeftOverBytes); + } + Ok(req) + } +} + +/// The type tag of a binary request. +#[derive(Debug, PartialEq)] +#[repr(u8)] +pub enum BinaryRequestTag { + /// Request to get data from the node + Get = 0, + /// Request to add a transaction into a blockchain. + TryAcceptTransaction = 1, + /// Request to execute a transaction speculatively. + TrySpeculativeExec = 2, +} + +impl BinaryRequestTag { + /// Creates a random `BinaryRequestTag`. + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => BinaryRequestTag::Get, + 1 => BinaryRequestTag::TryAcceptTransaction, + 2 => BinaryRequestTag::TrySpeculativeExec, + _ => unreachable!(), + } + } +} + +impl TryFrom for BinaryRequestTag { + type Error = InvalidBinaryRequestTag; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(BinaryRequestTag::Get), + 1 => Ok(BinaryRequestTag::TryAcceptTransaction), + 2 => Ok(BinaryRequestTag::TrySpeculativeExec), + _ => Err(InvalidBinaryRequestTag(value)), + } + } +} + +impl From for u8 { + fn from(value: BinaryRequestTag) -> Self { + value as u8 + } +} + +/// Error raised when trying to convert an invalid u8 into a `BinaryRequestTag`. +pub struct InvalidBinaryRequestTag(u8); + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn header_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + for tag in [ + BinaryRequestTag::Get, + BinaryRequestTag::TryAcceptTransaction, + BinaryRequestTag::TrySpeculativeExec, + ] { + let version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()); + let val = BinaryRequestHeader::new(version, tag); + bytesrepr::test_serialization_roundtrip(&val); + } + } + + #[test] + fn request_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BinaryRequest::random(rng); + let bytes = val.to_bytes().expect("should serialize"); + assert_eq!(BinaryRequest::try_from((val.tag(), &bytes[..])), Ok(val)); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/binary_response.rs b/casper_types_ver_2_0/src/binary_port/binary_response.rs new file mode 100644 index 00000000..f821bc3b --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/binary_response.rs @@ -0,0 +1,177 @@ +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + ProtocolVersion, +}; +use alloc::vec::Vec; + +#[cfg(test)] +use crate::testing::TestRng; + +use super::{ + binary_response_header::BinaryResponseHeader, + payload_type::{PayloadEntity, PayloadType}, + record_id::RecordId, + DbRawBytesSpec, ErrorCode, +}; + +/// The response used in the binary port protocol. +#[derive(Debug, PartialEq)] +pub struct BinaryResponse { + /// Header of the binary response. + header: BinaryResponseHeader, + /// The response. + payload: Vec, +} + +impl BinaryResponse { + /// Creates new empty binary response. + pub fn new_empty(protocol_version: ProtocolVersion) -> Self { + Self { + header: BinaryResponseHeader::new(None, protocol_version), + payload: vec![], + } + } + + /// Creates new binary response with error code. + pub fn new_error(error: ErrorCode, protocol_version: ProtocolVersion) -> Self { + BinaryResponse { + header: BinaryResponseHeader::new_error(error, protocol_version), + payload: vec![], + } + } + + /// Creates new binary response from raw DB bytes. + pub fn from_db_raw_bytes( + record_id: RecordId, + spec: Option, + protocol_version: ProtocolVersion, + ) -> Self { + match spec { + Some(DbRawBytesSpec { + is_legacy, + raw_bytes, + }) => BinaryResponse { + header: BinaryResponseHeader::new( + Some(PayloadType::new_from_record_id(record_id, is_legacy)), + protocol_version, + ), + payload: raw_bytes, + }, + None => BinaryResponse { + header: BinaryResponseHeader::new_error(ErrorCode::NotFound, protocol_version), + payload: vec![], + }, + } + } + + /// Creates a new binary response from a value. + pub fn from_value(val: V, protocol_version: ProtocolVersion) -> Self + where + V: ToBytes + PayloadEntity, + { + ToBytes::to_bytes(&val).map_or( + BinaryResponse::new_error(ErrorCode::InternalError, protocol_version), + |payload| BinaryResponse { + payload, + header: BinaryResponseHeader::new(Some(V::PAYLOAD_TYPE), protocol_version), + }, + ) + } + + /// Creates a new binary response from an optional value. + pub fn from_option(opt: Option, protocol_version: ProtocolVersion) -> Self + where + V: ToBytes + PayloadEntity, + { + match opt { + Some(val) => Self::from_value(val, protocol_version), + None => Self::new_empty(protocol_version), + } + } + + /// Returns true if response is success. + pub fn is_success(&self) -> bool { + self.header.is_success() + } + + /// Returns the error code. + pub fn error_code(&self) -> u8 { + self.header.error_code() + } + + /// Returns the payload type of the response. + pub fn returned_data_type_tag(&self) -> Option { + self.header.returned_data_type_tag() + } + + /// Returns true if the response means that data has not been found. + pub fn is_not_found(&self) -> bool { + self.header.is_not_found() + } + + /// Returns the payload. + pub fn payload(&self) -> &[u8] { + self.payload.as_ref() + } + + /// Returns the protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.header.protocol_version() + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + header: BinaryResponseHeader::random(rng), + payload: rng.random_vec(64..128), + } + } +} + +impl ToBytes for BinaryResponse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let BinaryResponse { header, payload } = self; + + header.write_bytes(writer)?; + payload.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.header.serialized_length() + self.payload.serialized_length() + } +} + +impl FromBytes for BinaryResponse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (header, remainder) = FromBytes::from_bytes(bytes)?; + let (payload, remainder) = Bytes::from_bytes(remainder)?; + + Ok(( + BinaryResponse { + header, + payload: payload.into(), + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BinaryResponse::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/binary_response_and_request.rs b/casper_types_ver_2_0/src/binary_port/binary_response_and_request.rs new file mode 100644 index 00000000..78d4785d --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/binary_response_and_request.rs @@ -0,0 +1,155 @@ +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +use super::binary_response::BinaryResponse; +#[cfg(any(feature = "testing", test))] +use super::payload_type::PayloadEntity; +use alloc::vec::Vec; + +#[cfg(any(feature = "testing", test))] +use super::record_id::RecordId; +#[cfg(any(feature = "testing", test))] +use crate::ProtocolVersion; + +#[cfg(test)] +use crate::testing::TestRng; + +/// The binary response along with the original binary request attached. +#[derive(Debug, PartialEq)] +pub struct BinaryResponseAndRequest { + /// The original request (as serialized bytes). + original_request: Vec, + /// The response. + response: BinaryResponse, +} + +impl BinaryResponseAndRequest { + /// Creates new binary response with the original request attached. + pub fn new(data: BinaryResponse, original_request: &[u8]) -> Self { + Self { + original_request: original_request.to_vec(), + response: data, + } + } + + /// Returns a new binary response with specified data and no original request. + #[cfg(any(feature = "testing", test))] + pub fn new_test_response( + record_id: RecordId, + data: &A, + protocol_version: ProtocolVersion, + ) -> BinaryResponseAndRequest { + use super::DbRawBytesSpec; + + let response = BinaryResponse::from_db_raw_bytes( + record_id, + Some(DbRawBytesSpec::new_current(&data.to_bytes().unwrap())), + protocol_version, + ); + Self::new(response, &[]) + } + + /// Returns a new binary response with specified legacy data and no original request. + #[cfg(any(feature = "testing", test))] + pub fn new_legacy_test_response( + record_id: RecordId, + data: &A, + protocol_version: ProtocolVersion, + ) -> BinaryResponseAndRequest { + use super::DbRawBytesSpec; + + let response = BinaryResponse::from_db_raw_bytes( + record_id, + Some(DbRawBytesSpec::new_legacy( + &bincode::serialize(data).unwrap(), + )), + protocol_version, + ); + Self::new(response, &[]) + } + + /// Returns true if response is success. + pub fn is_success(&self) -> bool { + self.response.is_success() + } + + /// Returns the error code. + pub fn error_code(&self) -> u8 { + self.response.error_code() + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + original_request: rng.random_vec(64..128), + response: BinaryResponse::random(rng), + } + } + + /// Returns serialized bytes representing the original request. + pub fn original_request(&self) -> &[u8] { + self.original_request.as_ref() + } + + /// Returns the inner binary response. + pub fn response(&self) -> &BinaryResponse { + &self.response + } +} + +impl ToBytes for BinaryResponseAndRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let BinaryResponseAndRequest { + original_request, + response, + } = self; + + original_request.write_bytes(writer)?; + response.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.original_request.serialized_length() + self.response.serialized_length() + } +} + +impl FromBytes for BinaryResponseAndRequest { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (original_request, remainder) = Bytes::from_bytes(bytes)?; + let (response, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + BinaryResponseAndRequest { + original_request: original_request.into(), + response, + }, + remainder, + )) + } +} + +impl From for BinaryResponse { + fn from(response_and_request: BinaryResponseAndRequest) -> Self { + let BinaryResponseAndRequest { response, .. } = response_and_request; + response + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BinaryResponseAndRequest::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/binary_response_header.rs b/casper_types_ver_2_0/src/binary_port/binary_response_header.rs new file mode 100644 index 00000000..025a9068 --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/binary_response_header.rs @@ -0,0 +1,134 @@ +#[cfg(test)] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ProtocolVersion, +}; +use alloc::vec::Vec; +#[cfg(test)] +use rand::Rng; + +use super::{ErrorCode, PayloadType}; + +/// Header of the binary response. +#[derive(Debug, PartialEq)] +pub struct BinaryResponseHeader { + protocol_version: ProtocolVersion, + error: u8, + returned_data_type_tag: Option, +} + +impl BinaryResponseHeader { + /// Creates new binary response header representing success. + pub fn new(returned_data_type: Option, protocol_version: ProtocolVersion) -> Self { + Self { + protocol_version, + error: ErrorCode::NoError as u8, + returned_data_type_tag: returned_data_type.map(|ty| ty as u8), + } + } + + /// Creates new binary response header representing error. + pub fn new_error(error: ErrorCode, protocol_version: ProtocolVersion) -> Self { + Self { + protocol_version, + error: error as u8, + returned_data_type_tag: None, + } + } + + /// Returns the type of the returned data. + pub fn returned_data_type_tag(&self) -> Option { + self.returned_data_type_tag + } + + /// Returns the error code. + pub fn error_code(&self) -> u8 { + self.error + } + + /// Returns true if the response represents success. + pub fn is_success(&self) -> bool { + self.error == ErrorCode::NoError as u8 + } + + /// Returns true if the response indicates the data was not found. + pub fn is_not_found(&self) -> bool { + self.error == ErrorCode::NotFound as u8 + } + + /// Returns the protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let protocol_version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()); + let error = rng.gen(); + let returned_data_type_tag = if rng.gen() { None } else { Some(rng.gen()) }; + + BinaryResponseHeader { + protocol_version, + error, + returned_data_type_tag, + } + } +} + +impl ToBytes for BinaryResponseHeader { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let Self { + protocol_version, + error, + returned_data_type_tag, + } = self; + + protocol_version.write_bytes(writer)?; + error.write_bytes(writer)?; + returned_data_type_tag.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.protocol_version.serialized_length() + + self.error.serialized_length() + + self.returned_data_type_tag.serialized_length() + } +} + +impl FromBytes for BinaryResponseHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_version, remainder) = FromBytes::from_bytes(bytes)?; + let (error, remainder) = FromBytes::from_bytes(remainder)?; + let (returned_data_type_tag, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + BinaryResponseHeader { + protocol_version, + error, + returned_data_type_tag, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BinaryResponseHeader::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/error_code.rs b/casper_types_ver_2_0/src/binary_port/error_code.rs new file mode 100644 index 00000000..76920537 --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/error_code.rs @@ -0,0 +1,79 @@ +use core::{convert::TryFrom, fmt}; + +/// The error code indicating the result of handling the binary request. +#[derive(Debug, Clone)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[repr(u8)] +pub enum ErrorCode { + /// Request executed correctly. + #[cfg_attr(feature = "std", error("request executed correctly"))] + NoError = 0, + /// This function is disabled. + #[cfg_attr(feature = "std", error("this function is disabled"))] + FunctionDisabled = 1, + /// Data not found. + #[cfg_attr(feature = "std", error("data not found"))] + NotFound = 2, + /// Root not found. + #[cfg_attr(feature = "std", error("root not found"))] + RootNotFound = 3, + /// Invalid deploy item variant. + #[cfg_attr(feature = "std", error("invalid deploy item variant"))] + InvalidDeployItemVariant = 4, + /// Wasm preprocessing. + #[cfg_attr(feature = "std", error("wasm preprocessing"))] + WasmPreprocessing = 5, + /// Invalid protocol version. + #[cfg_attr(feature = "std", error("unsupported protocol version"))] + UnsupportedProtocolVersion = 6, + /// Invalid transaction. + #[cfg_attr(feature = "std", error("invalid transaction"))] + InvalidTransaction = 7, + /// Internal error. + #[cfg_attr(feature = "std", error("internal error"))] + InternalError = 8, + /// The query to global state failed. + #[cfg_attr(feature = "std", error("the query to global state failed"))] + QueryFailedToExecute = 9, + /// Bad request. + #[cfg_attr(feature = "std", error("bad request"))] + BadRequest = 10, + /// Received an unsupported type of request. + #[cfg_attr(feature = "std", error("unsupported request"))] + UnsupportedRequest = 11, +} + +impl TryFrom for ErrorCode { + type Error = UnknownErrorCode; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(ErrorCode::NoError), + 1 => Ok(ErrorCode::FunctionDisabled), + 2 => Ok(ErrorCode::NotFound), + 3 => Ok(ErrorCode::RootNotFound), + 4 => Ok(ErrorCode::InvalidDeployItemVariant), + 5 => Ok(ErrorCode::WasmPreprocessing), + 6 => Ok(ErrorCode::UnsupportedProtocolVersion), + 7 => Ok(ErrorCode::InvalidTransaction), + 8 => Ok(ErrorCode::InternalError), + 9 => Ok(ErrorCode::QueryFailedToExecute), + 10 => Ok(ErrorCode::BadRequest), + 11 => Ok(ErrorCode::UnsupportedRequest), + _ => Err(UnknownErrorCode), + } + } +} + +/// Error indicating that the error code is unknown. +#[derive(Debug, Clone, Copy)] +pub struct UnknownErrorCode; + +impl fmt::Display for UnknownErrorCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "unknown node error code") + } +} + +#[cfg(feature = "std")] +impl std::error::Error for UnknownErrorCode {} diff --git a/casper_types_ver_2_0/src/binary_port/get_all_values_result.rs b/casper_types_ver_2_0/src/binary_port/get_all_values_result.rs new file mode 100644 index 00000000..3ddada4a --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/get_all_values_result.rs @@ -0,0 +1,15 @@ +use alloc::vec::Vec; + +use crate::StoredValue; + +/// Represents a result of a `get_all_values` request. +#[derive(Debug, PartialEq)] +pub enum GetAllValuesResult { + /// Invalid state root hash. + RootNotFound, + /// Contains values returned from the global state. + Success { + /// Current values. + values: Vec, + }, +} diff --git a/casper_types_ver_2_0/src/binary_port/get_request.rs b/casper_types_ver_2_0/src/binary_port/get_request.rs new file mode 100644 index 00000000..01fb8f23 --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/get_request.rs @@ -0,0 +1,146 @@ +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +use alloc::vec::Vec; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +use super::state_request::GlobalStateRequest; + +const RECORD_TAG: u8 = 0; +const INFORMATION_TAG: u8 = 1; +const STATE_TAG: u8 = 2; + +/// A request to get data from the node. +#[derive(Clone, Debug, PartialEq)] +pub enum GetRequest { + /// Retrieves a record from the node. + Record { + /// Type tag of the record to retrieve. + record_type_tag: u16, + /// Key encoded into bytes. + key: Vec, + }, + /// Retrieves information from the node. + Information { + /// Type tag of the information to retrieve. + info_type_tag: u16, + /// Key encoded into bytes. + key: Vec, + }, + /// Retrieves data from the global state. + State(GlobalStateRequest), +} + +impl GetRequest { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => GetRequest::Record { + record_type_tag: rng.gen(), + key: rng.random_vec(16..32), + }, + 1 => GetRequest::Information { + info_type_tag: rng.gen(), + key: rng.random_vec(16..32), + }, + 2 => GetRequest::State(GlobalStateRequest::random(rng)), + _ => unreachable!(), + } + } +} + +impl ToBytes for GetRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + GetRequest::Record { + record_type_tag, + key, + } => { + RECORD_TAG.write_bytes(writer)?; + record_type_tag.write_bytes(writer)?; + key.write_bytes(writer) + } + GetRequest::Information { info_type_tag, key } => { + INFORMATION_TAG.write_bytes(writer)?; + info_type_tag.write_bytes(writer)?; + key.write_bytes(writer) + } + GetRequest::State(req) => { + STATE_TAG.write_bytes(writer)?; + req.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + GetRequest::Record { + record_type_tag, + key, + } => record_type_tag.serialized_length() + key.serialized_length(), + GetRequest::Information { info_type_tag, key } => { + info_type_tag.serialized_length() + key.serialized_length() + } + GetRequest::State(req) => req.serialized_length(), + } + } +} + +impl FromBytes for GetRequest { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = FromBytes::from_bytes(bytes)?; + match tag { + RECORD_TAG => { + let (record_type_tag, remainder) = FromBytes::from_bytes(remainder)?; + let (key, remainder) = Bytes::from_bytes(remainder)?; + Ok(( + GetRequest::Record { + record_type_tag, + key: key.into(), + }, + remainder, + )) + } + INFORMATION_TAG => { + let (info_type_tag, remainder) = FromBytes::from_bytes(remainder)?; + let (key, remainder) = Bytes::from_bytes(remainder)?; + Ok(( + GetRequest::Information { + info_type_tag, + key: key.into(), + }, + remainder, + )) + } + STATE_TAG => { + let (req, remainder) = FromBytes::from_bytes(remainder)?; + Ok((GetRequest::State(req), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = GetRequest::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/global_state_query_result.rs b/casper_types_ver_2_0/src/binary_port/global_state_query_result.rs new file mode 100644 index 00000000..07619201 --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/global_state_query_result.rs @@ -0,0 +1,99 @@ +//! The result of the query for the global state value. + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + StoredValue, +}; +use alloc::{string::String, vec::Vec}; + +#[cfg(test)] +use crate::testing::TestRng; + +#[cfg(test)] +use crate::{ByteCode, ByteCodeKind}; + +/// Carries the successful result of the global state query. +#[derive(Debug, PartialEq, Clone)] +pub struct GlobalStateQueryResult { + /// Stored value. + value: StoredValue, + /// Proof. + merkle_proof: String, +} + +impl GlobalStateQueryResult { + /// Creates the global state query result. + pub fn new(value: StoredValue, merkle_proof: String) -> Self { + Self { + value, + merkle_proof, + } + } + + /// Returns the stored value and the merkle proof. + pub fn into_inner(self) -> (StoredValue, String) { + (self.value, self.merkle_proof) + } + + #[cfg(test)] + pub(crate) fn random_invalid(rng: &mut TestRng) -> Self { + // Note: This does NOT create a logically-valid struct. Instance created by this function + // should be used in `bytesrepr` tests only. + Self { + value: StoredValue::ByteCode(ByteCode::new( + ByteCodeKind::V1CasperWasm, + rng.random_vec(10..20), + )), + merkle_proof: rng.random_string(10..20), + } + } +} + +impl ToBytes for GlobalStateQueryResult { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let GlobalStateQueryResult { + value, + merkle_proof, + } = self; + value.write_bytes(writer)?; + merkle_proof.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.value.serialized_length() + self.merkle_proof.serialized_length() + } +} + +impl FromBytes for GlobalStateQueryResult { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, remainder) = FromBytes::from_bytes(bytes)?; + let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateQueryResult { + value, + merkle_proof, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = GlobalStateQueryResult::random_invalid(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/information_request.rs b/casper_types_ver_2_0/src/binary_port/information_request.rs new file mode 100644 index 00000000..79756aba --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/information_request.rs @@ -0,0 +1,370 @@ +use alloc::vec::Vec; +use core::convert::TryFrom; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + BlockIdentifier, TransactionHash, +}; + +use super::GetRequest; + +/// Request for information from the node. +#[derive(Clone, Debug, PartialEq)] +pub enum InformationRequest { + /// Returns the block header by an identifier, no identifier indicates the latest block. + BlockHeader(Option), + /// Returns the signed block by an identifier, no identifier indicates the latest block. + SignedBlock(Option), + /// Returns a transaction with approvals and execution info for a given hash. + Transaction(TransactionHash), + /// Returns connected peers. + Peers, + /// Returns node uptime. + Uptime, + /// Returns last progress of the sync process. + LastProgress, + /// Returns current state of the main reactor. + ReactorState, + /// Returns network name. + NetworkName, + /// Returns consensus validator changes. + ConsensusValidatorChanges, + /// Returns status of the BlockSynchronizer. + BlockSynchronizerStatus, + /// Returns the available block range. + AvailableBlockRange, + /// Returns info about next upgrade. + NextUpgrade, + /// Returns consensus status. + ConsensusStatus, + /// Returns chainspec raw bytes. + ChainspecRawBytes, + /// Returns the status information of the node. + NodeStatus, +} + +impl InformationRequest { + /// Returns the tag of the request. + pub fn tag(&self) -> InformationRequestTag { + match self { + InformationRequest::BlockHeader(_) => InformationRequestTag::BlockHeader, + InformationRequest::SignedBlock(_) => InformationRequestTag::SignedBlock, + InformationRequest::Transaction(_) => InformationRequestTag::Transaction, + InformationRequest::Peers => InformationRequestTag::Peers, + InformationRequest::Uptime => InformationRequestTag::Uptime, + InformationRequest::LastProgress => InformationRequestTag::LastProgress, + InformationRequest::ReactorState => InformationRequestTag::ReactorState, + InformationRequest::NetworkName => InformationRequestTag::NetworkName, + InformationRequest::ConsensusValidatorChanges => { + InformationRequestTag::ConsensusValidatorChanges + } + InformationRequest::BlockSynchronizerStatus => { + InformationRequestTag::BlockSynchronizerStatus + } + InformationRequest::AvailableBlockRange => InformationRequestTag::AvailableBlockRange, + InformationRequest::NextUpgrade => InformationRequestTag::NextUpgrade, + InformationRequest::ConsensusStatus => InformationRequestTag::ConsensusStatus, + InformationRequest::ChainspecRawBytes => InformationRequestTag::ChainspecRawBytes, + InformationRequest::NodeStatus => InformationRequestTag::NodeStatus, + } + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match InformationRequestTag::random(rng) { + InformationRequestTag::BlockHeader => { + if rng.gen() { + InformationRequest::BlockHeader(None) + } else { + InformationRequest::BlockHeader(Some(BlockIdentifier::random(rng))) + } + } + InformationRequestTag::SignedBlock => { + if rng.gen() { + InformationRequest::SignedBlock(None) + } else { + InformationRequest::SignedBlock(Some(BlockIdentifier::random(rng))) + } + } + InformationRequestTag::Transaction => { + InformationRequest::Transaction(TransactionHash::random(rng)) + } + InformationRequestTag::Peers => InformationRequest::Peers, + InformationRequestTag::Uptime => InformationRequest::Uptime, + InformationRequestTag::LastProgress => InformationRequest::LastProgress, + InformationRequestTag::ReactorState => InformationRequest::ReactorState, + InformationRequestTag::NetworkName => InformationRequest::NetworkName, + InformationRequestTag::ConsensusValidatorChanges => { + InformationRequest::ConsensusValidatorChanges + } + InformationRequestTag::BlockSynchronizerStatus => { + InformationRequest::BlockSynchronizerStatus + } + InformationRequestTag::AvailableBlockRange => InformationRequest::AvailableBlockRange, + InformationRequestTag::NextUpgrade => InformationRequest::NextUpgrade, + InformationRequestTag::ConsensusStatus => InformationRequest::ConsensusStatus, + InformationRequestTag::ChainspecRawBytes => InformationRequest::ChainspecRawBytes, + InformationRequestTag::NodeStatus => InformationRequest::NodeStatus, + } + } +} + +impl ToBytes for InformationRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + InformationRequest::BlockHeader(block_identifier) => { + block_identifier.write_bytes(writer) + } + InformationRequest::SignedBlock(block_identifier) => { + block_identifier.write_bytes(writer) + } + InformationRequest::Transaction(transaction_hash) => { + transaction_hash.write_bytes(writer) + } + InformationRequest::Peers + | InformationRequest::Uptime + | InformationRequest::LastProgress + | InformationRequest::ReactorState + | InformationRequest::NetworkName + | InformationRequest::ConsensusValidatorChanges + | InformationRequest::BlockSynchronizerStatus + | InformationRequest::AvailableBlockRange + | InformationRequest::NextUpgrade + | InformationRequest::ConsensusStatus + | InformationRequest::ChainspecRawBytes + | InformationRequest::NodeStatus => Ok(()), + } + } + + fn serialized_length(&self) -> usize { + match self { + InformationRequest::BlockHeader(block_identifier) => { + block_identifier.serialized_length() + } + InformationRequest::SignedBlock(block_identifier) => { + block_identifier.serialized_length() + } + InformationRequest::Transaction(transaction_hash) => { + transaction_hash.serialized_length() + } + InformationRequest::Peers + | InformationRequest::Uptime + | InformationRequest::LastProgress + | InformationRequest::ReactorState + | InformationRequest::NetworkName + | InformationRequest::ConsensusValidatorChanges + | InformationRequest::BlockSynchronizerStatus + | InformationRequest::AvailableBlockRange + | InformationRequest::NextUpgrade + | InformationRequest::ConsensusStatus + | InformationRequest::ChainspecRawBytes + | InformationRequest::NodeStatus => 0, + } + } +} + +impl TryFrom<(InformationRequestTag, &[u8])> for InformationRequest { + type Error = bytesrepr::Error; + + fn try_from((tag, key_bytes): (InformationRequestTag, &[u8])) -> Result { + let (req, remainder) = match tag { + InformationRequestTag::BlockHeader => { + let (block_identifier, remainder) = FromBytes::from_bytes(key_bytes)?; + (InformationRequest::BlockHeader(block_identifier), remainder) + } + InformationRequestTag::SignedBlock => { + let (block_identifier, remainder) = FromBytes::from_bytes(key_bytes)?; + (InformationRequest::SignedBlock(block_identifier), remainder) + } + InformationRequestTag::Transaction => { + let (transaction_hash, remainder) = FromBytes::from_bytes(key_bytes)?; + (InformationRequest::Transaction(transaction_hash), remainder) + } + InformationRequestTag::Peers => (InformationRequest::Peers, key_bytes), + InformationRequestTag::Uptime => (InformationRequest::Uptime, key_bytes), + InformationRequestTag::LastProgress => (InformationRequest::LastProgress, key_bytes), + InformationRequestTag::ReactorState => (InformationRequest::ReactorState, key_bytes), + InformationRequestTag::NetworkName => (InformationRequest::NetworkName, key_bytes), + InformationRequestTag::ConsensusValidatorChanges => { + (InformationRequest::ConsensusValidatorChanges, key_bytes) + } + InformationRequestTag::BlockSynchronizerStatus => { + (InformationRequest::BlockSynchronizerStatus, key_bytes) + } + InformationRequestTag::AvailableBlockRange => { + (InformationRequest::AvailableBlockRange, key_bytes) + } + InformationRequestTag::NextUpgrade => (InformationRequest::NextUpgrade, key_bytes), + InformationRequestTag::ConsensusStatus => { + (InformationRequest::ConsensusStatus, key_bytes) + } + InformationRequestTag::ChainspecRawBytes => { + (InformationRequest::ChainspecRawBytes, key_bytes) + } + InformationRequestTag::NodeStatus => (InformationRequest::NodeStatus, key_bytes), + }; + if !remainder.is_empty() { + return Err(bytesrepr::Error::LeftOverBytes); + } + Ok(req) + } +} + +impl TryFrom for GetRequest { + type Error = bytesrepr::Error; + + fn try_from(request: InformationRequest) -> Result { + Ok(GetRequest::Information { + info_type_tag: request.tag().into(), + key: request.to_bytes()?, + }) + } +} + +/// Identifier of an information request. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +#[repr(u16)] +pub enum InformationRequestTag { + /// Block header request. + BlockHeader = 0, + /// Signed block request. + SignedBlock = 1, + /// Transaction request. + Transaction = 2, + /// Peers request. + Peers = 3, + /// Uptime request. + Uptime = 4, + /// Last progress request. + LastProgress = 5, + /// Reactor state request. + ReactorState = 6, + /// Network name request. + NetworkName = 7, + /// Consensus validator changes request. + ConsensusValidatorChanges = 8, + /// Block synchronizer status request. + BlockSynchronizerStatus = 9, + /// Available block range request. + AvailableBlockRange = 10, + /// Next upgrade request. + NextUpgrade = 11, + /// Consensus status request. + ConsensusStatus = 12, + /// Chainspec raw bytes request. + ChainspecRawBytes = 13, + /// Node status request. + NodeStatus = 14, +} + +impl InformationRequestTag { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..15) { + 0 => InformationRequestTag::BlockHeader, + 1 => InformationRequestTag::SignedBlock, + 2 => InformationRequestTag::Transaction, + 3 => InformationRequestTag::Peers, + 4 => InformationRequestTag::Uptime, + 5 => InformationRequestTag::LastProgress, + 6 => InformationRequestTag::ReactorState, + 7 => InformationRequestTag::NetworkName, + 8 => InformationRequestTag::ConsensusValidatorChanges, + 9 => InformationRequestTag::BlockSynchronizerStatus, + 10 => InformationRequestTag::AvailableBlockRange, + 11 => InformationRequestTag::NextUpgrade, + 12 => InformationRequestTag::ConsensusStatus, + 13 => InformationRequestTag::ChainspecRawBytes, + 14 => InformationRequestTag::NodeStatus, + _ => unreachable!(), + } + } +} + +impl TryFrom for InformationRequestTag { + type Error = UnknownInformationRequestTag; + + fn try_from(value: u16) -> Result { + match value { + 0 => Ok(InformationRequestTag::BlockHeader), + 1 => Ok(InformationRequestTag::SignedBlock), + 2 => Ok(InformationRequestTag::Transaction), + 3 => Ok(InformationRequestTag::Peers), + 4 => Ok(InformationRequestTag::Uptime), + 5 => Ok(InformationRequestTag::LastProgress), + 6 => Ok(InformationRequestTag::ReactorState), + 7 => Ok(InformationRequestTag::NetworkName), + 8 => Ok(InformationRequestTag::ConsensusValidatorChanges), + 9 => Ok(InformationRequestTag::BlockSynchronizerStatus), + 10 => Ok(InformationRequestTag::AvailableBlockRange), + 11 => Ok(InformationRequestTag::NextUpgrade), + 12 => Ok(InformationRequestTag::ConsensusStatus), + 13 => Ok(InformationRequestTag::ChainspecRawBytes), + 14 => Ok(InformationRequestTag::NodeStatus), + _ => Err(UnknownInformationRequestTag(value)), + } + } +} + +impl From for u16 { + fn from(value: InformationRequestTag) -> Self { + value as u16 + } +} + +/// Error returned when trying to convert a `u16` into a `DbId`. +#[derive(Debug, PartialEq, Eq)] +pub struct UnknownInformationRequestTag(u16); + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn tag_roundtrip() { + for tag in [ + InformationRequestTag::BlockHeader, + InformationRequestTag::SignedBlock, + InformationRequestTag::Transaction, + InformationRequestTag::Peers, + InformationRequestTag::Uptime, + InformationRequestTag::LastProgress, + InformationRequestTag::ReactorState, + InformationRequestTag::NetworkName, + InformationRequestTag::ConsensusValidatorChanges, + InformationRequestTag::BlockSynchronizerStatus, + InformationRequestTag::AvailableBlockRange, + InformationRequestTag::NextUpgrade, + InformationRequestTag::ConsensusStatus, + InformationRequestTag::ChainspecRawBytes, + InformationRequestTag::NodeStatus, + ] { + let value = u16::from(tag); + assert_eq!(InformationRequestTag::try_from(value), Ok(tag)); + } + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = InformationRequest::random(rng); + let bytes = val.to_bytes().expect("should serialize"); + assert_eq!( + InformationRequest::try_from((val.tag(), &bytes[..])), + Ok(val) + ); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/minimal_block_info.rs b/casper_types_ver_2_0/src/binary_port/minimal_block_info.rs new file mode 100644 index 00000000..7e470895 --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/minimal_block_info.rs @@ -0,0 +1,123 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Block, BlockHash, Digest, EraId, PublicKey, Timestamp, +}; +use alloc::vec::Vec; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +/// Minimal info about a `Block` needed to satisfy the node status request. +#[derive(Debug, PartialEq, Eq)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(any(feature = "std", test), serde(deny_unknown_fields))] +pub struct MinimalBlockInfo { + hash: BlockHash, + timestamp: Timestamp, + era_id: EraId, + height: u64, + state_root_hash: Digest, + creator: PublicKey, +} + +impl MinimalBlockInfo { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + hash: BlockHash::random(rng), + timestamp: Timestamp::random(rng), + era_id: EraId::random(rng), + height: rng.gen(), + state_root_hash: Digest::random(rng), + creator: PublicKey::random(rng), + } + } +} + +impl FromBytes for MinimalBlockInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = BlockHash::from_bytes(bytes)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (height, remainder) = u64::from_bytes(remainder)?; + let (state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (creator, remainder) = PublicKey::from_bytes(remainder)?; + Ok(( + MinimalBlockInfo { + hash, + timestamp, + era_id, + height, + state_root_hash, + creator, + }, + remainder, + )) + } +} + +impl ToBytes for MinimalBlockInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.height.write_bytes(writer)?; + self.state_root_hash.write_bytes(writer)?; + self.creator.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.timestamp.serialized_length() + + self.era_id.serialized_length() + + self.height.serialized_length() + + self.state_root_hash.serialized_length() + + self.creator.serialized_length() + } +} + +impl From for MinimalBlockInfo { + fn from(block: Block) -> Self { + let proposer = match &block { + Block::V1(v1) => v1.proposer().clone(), + Block::V2(v2) => v2.proposer().clone(), + }; + + MinimalBlockInfo { + hash: *block.hash(), + timestamp: block.timestamp(), + era_id: block.era_id(), + height: block.height(), + state_root_hash: *block.state_root_hash(), + creator: proposer, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = MinimalBlockInfo::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/node_status.rs b/casper_types_ver_2_0/src/binary_port/node_status.rs new file mode 100644 index 00000000..fb255f8e --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/node_status.rs @@ -0,0 +1,173 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + AvailableBlockRange, BlockSynchronizerStatus, Digest, NextUpgrade, Peers, PublicKey, + ReactorState, TimeDiff, Timestamp, +}; +use alloc::{string::String, vec::Vec}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +use super::MinimalBlockInfo; + +/// Status information about the node. +#[derive(Debug, PartialEq)] +pub struct NodeStatus { + /// The node ID and network address of each connected peer. + pub peers: Peers, + /// The compiled node version. + pub build_version: String, + /// The chainspec name. + pub chainspec_name: String, + /// The state root hash of the lowest block in the available block range. + pub starting_state_root_hash: Digest, + /// The minimal info of the last block from the linear chain. + pub last_added_block_info: Option, + /// Our public signing key. + pub our_public_signing_key: Option, + /// The next round length if this node is a validator. + pub round_length: Option, + /// Information about the next scheduled upgrade. + pub next_upgrade: Option, + /// Time that passed since the node has started. + pub uptime: TimeDiff, + /// The current state of node reactor. + pub reactor_state: ReactorState, + /// Timestamp of the last recorded progress in the reactor. + pub last_progress: Timestamp, + /// The available block range in storage. + pub available_block_range: AvailableBlockRange, + /// The status of the block synchronizer builders. + pub block_sync: BlockSynchronizerStatus, +} + +impl NodeStatus { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + peers: Peers::random(rng), + build_version: rng.random_string(5..10), + chainspec_name: rng.random_string(5..10), + starting_state_root_hash: Digest::random(rng), + last_added_block_info: rng.gen::().then_some(MinimalBlockInfo::random(rng)), + our_public_signing_key: rng.gen::().then_some(PublicKey::random(rng)), + round_length: rng + .gen::() + .then_some(TimeDiff::from_millis(rng.gen())), + next_upgrade: rng.gen::().then_some(NextUpgrade::random(rng)), + uptime: TimeDiff::from_millis(rng.gen()), + reactor_state: ReactorState::random(rng), + last_progress: Timestamp::random(rng), + available_block_range: AvailableBlockRange::random(rng), + block_sync: BlockSynchronizerStatus::random(rng), + } + } +} + +impl FromBytes for NodeStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (peers, remainder) = FromBytes::from_bytes(bytes)?; + let (build_version, remainder) = String::from_bytes(remainder)?; + let (chainspec_name, remainder) = String::from_bytes(remainder)?; + let (starting_state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (last_added_block_info, remainder) = Option::::from_bytes(remainder)?; + let (our_public_signing_key, remainder) = Option::::from_bytes(remainder)?; + let (round_length, remainder) = Option::::from_bytes(remainder)?; + let (next_upgrade, remainder) = Option::::from_bytes(remainder)?; + let (uptime, remainder) = TimeDiff::from_bytes(remainder)?; + let (reactor_state, remainder) = ReactorState::from_bytes(remainder)?; + let (last_progress, remainder) = Timestamp::from_bytes(remainder)?; + let (available_block_range, remainder) = AvailableBlockRange::from_bytes(remainder)?; + let (block_sync, remainder) = BlockSynchronizerStatus::from_bytes(remainder)?; + Ok(( + NodeStatus { + peers, + build_version, + chainspec_name, + starting_state_root_hash, + last_added_block_info, + our_public_signing_key, + round_length, + next_upgrade, + uptime, + reactor_state, + last_progress, + available_block_range, + block_sync, + }, + remainder, + )) + } +} + +impl ToBytes for NodeStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let NodeStatus { + peers, + build_version, + chainspec_name, + starting_state_root_hash, + last_added_block_info, + our_public_signing_key, + round_length, + next_upgrade, + uptime, + reactor_state, + last_progress, + available_block_range, + block_sync, + } = self; + peers.write_bytes(writer)?; + build_version.write_bytes(writer)?; + chainspec_name.write_bytes(writer)?; + starting_state_root_hash.write_bytes(writer)?; + last_added_block_info.write_bytes(writer)?; + our_public_signing_key.write_bytes(writer)?; + round_length.write_bytes(writer)?; + next_upgrade.write_bytes(writer)?; + uptime.write_bytes(writer)?; + reactor_state.write_bytes(writer)?; + last_progress.write_bytes(writer)?; + available_block_range.write_bytes(writer)?; + block_sync.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.peers.serialized_length() + + self.build_version.serialized_length() + + self.chainspec_name.serialized_length() + + self.starting_state_root_hash.serialized_length() + + self.last_added_block_info.serialized_length() + + self.our_public_signing_key.serialized_length() + + self.round_length.serialized_length() + + self.next_upgrade.serialized_length() + + self.uptime.serialized_length() + + self.reactor_state.serialized_length() + + self.last_progress.serialized_length() + + self.available_block_range.serialized_length() + + self.block_sync.serialized_length() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = NodeStatus::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/payload_type.rs b/casper_types_ver_2_0/src/binary_port/payload_type.rs new file mode 100644 index 00000000..059c8419 --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/payload_type.rs @@ -0,0 +1,510 @@ +//! The payload type. + +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +#[cfg(test)] +use rand::Rng; + +use alloc::vec::Vec; +use core::{convert::TryFrom, fmt}; + +#[cfg(test)] +use crate::testing::TestRng; + +#[cfg(any(feature = "std", test))] +use super::NodeStatus; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + execution::{ExecutionResult, ExecutionResultV1}, + AvailableBlockRange, BlockBody, BlockBodyV1, BlockHeader, BlockHeaderV1, BlockSignatures, + BlockSynchronizerStatus, Deploy, FinalizedApprovals, FinalizedDeployApprovals, Peers, + ReactorState, SignedBlock, StoredValue, Transaction, Transfer, +}; +#[cfg(any(feature = "std", test))] +use crate::{ChainspecRawBytes, NextUpgrade}; + +use super::{ + global_state_query_result::GlobalStateQueryResult, + record_id::RecordId, + type_wrappers::{ + ConsensusStatus, ConsensusValidatorChanges, GetTrieFullResult, LastProgress, NetworkName, + SpeculativeExecutionResult, + }, + TransactionWithExecutionInfo, Uptime, +}; + +/// A type of the payload being returned in a binary response. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum PayloadType { + /// Legacy version of the block header. + BlockHeaderV1, + /// Block header. + BlockHeader, + /// Legacy version of the block body. + BlockBodyV1, + /// Block body. + BlockBody, + /// Legacy version of the approvals hashes. + ApprovalsHashesV1, + /// Approvals hashes + ApprovalsHashes, + /// Block signatures. + BlockSignatures, + /// Deploy. + Deploy, + /// Transaction. + Transaction, + /// Legacy version of the execution result. + ExecutionResultV1, + /// Execution result. + ExecutionResult, + /// Transfers. + Transfers, + /// Finalized deploy approvals. + FinalizedDeployApprovals, + /// Finalized approvals. + FinalizedApprovals, + /// Block with signatures. + SignedBlock, + /// Transaction with approvals and execution info. + TransactionWithExecutionInfo, + /// Peers. + Peers, + /// Last progress. + LastProgress, + /// State of the reactor. + ReactorState, + /// Network name. + NetworkName, + /// Consensus validator changes. + ConsensusValidatorChanges, // return type in `effects.rs` will be turned into dedicated type. + /// Status of the block synchronizer. + BlockSynchronizerStatus, + /// Available block range. + AvailableBlockRange, + /// Information about the next network upgrade. + NextUpgrade, + /// Consensus status. + ConsensusStatus, // return type in `effects.rs` will be turned into dedicated type. + /// Chainspec represented as raw bytes. + ChainspecRawBytes, + /// Uptime. + Uptime, + /// Result of checking if given block is in the highest available block range. + HighestBlockSequenceCheckResult, + /// Result of the speculative execution, + SpeculativeExecutionResult, + /// Result of querying global state, + GlobalStateQueryResult, + /// Result of querying global state for all values under a specified key. + StoredValues, + /// Result of querying global state for a full trie. + GetTrieFullResult, + /// Node status. + NodeStatus, +} + +impl PayloadType { + pub(crate) fn new_from_record_id(record_id: RecordId, is_legacy: bool) -> Self { + match (is_legacy, record_id) { + (true, RecordId::BlockHeader) => Self::BlockHeaderV1, + (true, RecordId::BlockBody) => Self::BlockBodyV1, + (true, RecordId::ApprovalsHashes) => Self::ApprovalsHashesV1, + (true, RecordId::BlockMetadata) => Self::BlockSignatures, + (true, RecordId::Transaction) => Self::Deploy, + (true, RecordId::ExecutionResult) => Self::ExecutionResultV1, + (true, RecordId::Transfer) => Self::Transfers, + (true, RecordId::FinalizedTransactionApprovals) => Self::FinalizedDeployApprovals, + (false, RecordId::BlockHeader) => Self::BlockHeader, + (false, RecordId::BlockBody) => Self::BlockBody, + (false, RecordId::ApprovalsHashes) => Self::ApprovalsHashes, + (false, RecordId::BlockMetadata) => Self::BlockSignatures, + (false, RecordId::Transaction) => Self::Transaction, + (false, RecordId::ExecutionResult) => Self::ExecutionResult, + (false, RecordId::Transfer) => Self::Transfers, + (false, RecordId::FinalizedTransactionApprovals) => Self::FinalizedApprovals, + } + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self::try_from(rng.gen_range(0..33)).unwrap() + } +} + +impl TryFrom for PayloadType { + type Error = (); + + fn try_from(v: u8) -> Result { + match v { + x if x == PayloadType::BlockHeaderV1 as u8 => Ok(PayloadType::BlockHeaderV1), + x if x == PayloadType::BlockHeader as u8 => Ok(PayloadType::BlockHeader), + x if x == PayloadType::BlockBodyV1 as u8 => Ok(PayloadType::BlockBodyV1), + x if x == PayloadType::BlockBody as u8 => Ok(PayloadType::BlockBody), + x if x == PayloadType::ApprovalsHashesV1 as u8 => Ok(PayloadType::ApprovalsHashesV1), + x if x == PayloadType::ApprovalsHashes as u8 => Ok(PayloadType::ApprovalsHashes), + x if x == PayloadType::BlockSignatures as u8 => Ok(PayloadType::BlockSignatures), + x if x == PayloadType::Deploy as u8 => Ok(PayloadType::Deploy), + x if x == PayloadType::Transaction as u8 => Ok(PayloadType::Transaction), + x if x == PayloadType::ExecutionResultV1 as u8 => Ok(PayloadType::ExecutionResultV1), + x if x == PayloadType::ExecutionResult as u8 => Ok(PayloadType::ExecutionResult), + x if x == PayloadType::Transfers as u8 => Ok(PayloadType::Transfers), + x if x == PayloadType::FinalizedDeployApprovals as u8 => { + Ok(PayloadType::FinalizedDeployApprovals) + } + x if x == PayloadType::FinalizedApprovals as u8 => Ok(PayloadType::FinalizedApprovals), + x if x == PayloadType::Peers as u8 => Ok(PayloadType::Peers), + x if x == PayloadType::LastProgress as u8 => Ok(PayloadType::LastProgress), + x if x == PayloadType::ReactorState as u8 => Ok(PayloadType::ReactorState), + x if x == PayloadType::NetworkName as u8 => Ok(PayloadType::NetworkName), + x if x == PayloadType::ConsensusValidatorChanges as u8 => { + Ok(PayloadType::ConsensusValidatorChanges) + } + x if x == PayloadType::BlockSynchronizerStatus as u8 => { + Ok(PayloadType::BlockSynchronizerStatus) + } + x if x == PayloadType::AvailableBlockRange as u8 => { + Ok(PayloadType::AvailableBlockRange) + } + x if x == PayloadType::NextUpgrade as u8 => Ok(PayloadType::NextUpgrade), + x if x == PayloadType::ConsensusStatus as u8 => Ok(PayloadType::ConsensusStatus), + x if x == PayloadType::ChainspecRawBytes as u8 => Ok(PayloadType::ChainspecRawBytes), + x if x == PayloadType::Uptime as u8 => Ok(PayloadType::Uptime), + x if x == PayloadType::HighestBlockSequenceCheckResult as u8 => { + Ok(PayloadType::HighestBlockSequenceCheckResult) + } + x if x == PayloadType::SpeculativeExecutionResult as u8 => { + Ok(PayloadType::SpeculativeExecutionResult) + } + x if x == PayloadType::GlobalStateQueryResult as u8 => { + Ok(PayloadType::GlobalStateQueryResult) + } + x if x == PayloadType::StoredValues as u8 => Ok(PayloadType::StoredValues), + x if x == PayloadType::GetTrieFullResult as u8 => Ok(PayloadType::GetTrieFullResult), + x if x == PayloadType::NodeStatus as u8 => Ok(PayloadType::NodeStatus), + _ => Err(()), + } + } +} + +impl From for u8 { + fn from(value: PayloadType) -> Self { + value as u8 + } +} + +impl fmt::Display for PayloadType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PayloadType::BlockHeaderV1 => write!(f, "BlockHeaderV1"), + PayloadType::BlockHeader => write!(f, "BlockHeader"), + PayloadType::BlockBodyV1 => write!(f, "BlockBodyV1"), + PayloadType::BlockBody => write!(f, "BlockBody"), + PayloadType::ApprovalsHashesV1 => write!(f, "ApprovalsHashesV1"), + PayloadType::ApprovalsHashes => write!(f, "ApprovalsHashes"), + PayloadType::BlockSignatures => write!(f, "BlockSignatures"), + PayloadType::Deploy => write!(f, "Deploy"), + PayloadType::Transaction => write!(f, "Transaction"), + PayloadType::ExecutionResultV1 => write!(f, "ExecutionResultV1"), + PayloadType::ExecutionResult => write!(f, "ExecutionResult"), + PayloadType::Transfers => write!(f, "Transfers"), + PayloadType::FinalizedDeployApprovals => write!(f, "FinalizedDeployApprovals"), + PayloadType::FinalizedApprovals => write!(f, "FinalizedApprovals"), + PayloadType::SignedBlock => write!(f, "SignedBlock"), + PayloadType::TransactionWithExecutionInfo => write!(f, "TransactionWithExecutionInfo"), + PayloadType::Peers => write!(f, "Peers"), + PayloadType::LastProgress => write!(f, "LastProgress"), + PayloadType::ReactorState => write!(f, "ReactorState"), + PayloadType::NetworkName => write!(f, "NetworkName"), + PayloadType::ConsensusValidatorChanges => write!(f, "ConsensusValidatorChanges"), + PayloadType::BlockSynchronizerStatus => write!(f, "BlockSynchronizerStatus"), + PayloadType::AvailableBlockRange => write!(f, "AvailableBlockRange"), + PayloadType::NextUpgrade => write!(f, "NextUpgrade"), + PayloadType::ConsensusStatus => write!(f, "ConsensusStatus"), + PayloadType::ChainspecRawBytes => write!(f, "ChainspecRawBytes"), + PayloadType::Uptime => write!(f, "Uptime"), + PayloadType::HighestBlockSequenceCheckResult => { + write!(f, "HighestBlockSequenceCheckResult") + } + PayloadType::SpeculativeExecutionResult => write!(f, "SpeculativeExecutionResult"), + PayloadType::GlobalStateQueryResult => write!(f, "GlobalStateQueryResult"), + PayloadType::StoredValues => write!(f, "StoredValues"), + PayloadType::GetTrieFullResult => write!(f, "GetTrieFullResult"), + PayloadType::NodeStatus => write!(f, "NodeStatus"), + } + } +} + +const BLOCK_HEADER_V1_TAG: u8 = 0; +const BLOCK_HEADER_TAG: u8 = 1; +const BLOCK_BODY_V1_TAG: u8 = 2; +const BLOCK_BODY_TAG: u8 = 3; +const APPROVALS_HASHES_TAG: u8 = 4; +const APPROVALS_HASHES_V1: u8 = 5; +const BLOCK_SIGNATURES_TAG: u8 = 6; +const DEPLOY_TAG: u8 = 7; +const TRANSACTION_TAG: u8 = 8; +const EXECUTION_RESULT_V1_TAG: u8 = 9; +const EXECUTION_RESULT_TAG: u8 = 10; +const TRANSFERS_TAG: u8 = 11; +const FINALIZED_DEPLOY_APPROVALS_TAG: u8 = 12; +const FINALIZED_APPROVALS_TAG: u8 = 13; +const SIGNED_BLOCK_TAG: u8 = 14; +const TRANSACTION_WITH_EXECUTION_INFO_TAG: u8 = 15; +const PEERS_TAG: u8 = 16; +const UPTIME_TAG: u8 = 17; +const LAST_PROGRESS_TAG: u8 = 18; +const REACTOR_STATE_TAG: u8 = 19; +const NETWORK_NAME_TAG: u8 = 20; +const CONSENSUS_VALIDATOR_CHANGES_TAG: u8 = 21; +const BLOCK_SYNCHRONIZER_STATUS_TAG: u8 = 22; +const AVAILABLE_BLOCK_RANGE_TAG: u8 = 23; +const NEXT_UPGRADE_TAG: u8 = 24; +const CONSENSUS_STATUS_TAG: u8 = 25; +const CHAINSPEC_RAW_BYTES_TAG: u8 = 26; +const HIGHEST_BLOCK_SEQUENCE_CHECK_RESULT_TAG: u8 = 27; +const SPECULATIVE_EXECUTION_RESULT_TAG: u8 = 28; +const GLOBAL_STATE_QUERY_RESULT_TAG: u8 = 29; +const STORED_VALUES_TAG: u8 = 30; +const GET_TRIE_FULL_RESULT_TAG: u8 = 31; +const NODE_STATUS_TAG: u8 = 32; + +impl ToBytes for PayloadType { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PayloadType::BlockHeaderV1 => BLOCK_HEADER_V1_TAG, + PayloadType::BlockHeader => BLOCK_HEADER_TAG, + PayloadType::BlockBodyV1 => BLOCK_BODY_V1_TAG, + PayloadType::BlockBody => BLOCK_BODY_TAG, + PayloadType::ApprovalsHashes => APPROVALS_HASHES_TAG, + PayloadType::ApprovalsHashesV1 => APPROVALS_HASHES_V1, + PayloadType::BlockSignatures => BLOCK_SIGNATURES_TAG, + PayloadType::Deploy => DEPLOY_TAG, + PayloadType::Transaction => TRANSACTION_TAG, + PayloadType::ExecutionResultV1 => EXECUTION_RESULT_V1_TAG, + PayloadType::ExecutionResult => EXECUTION_RESULT_TAG, + PayloadType::Transfers => TRANSFERS_TAG, + PayloadType::FinalizedDeployApprovals => FINALIZED_DEPLOY_APPROVALS_TAG, + PayloadType::FinalizedApprovals => FINALIZED_APPROVALS_TAG, + PayloadType::Peers => PEERS_TAG, + PayloadType::SignedBlock => SIGNED_BLOCK_TAG, + PayloadType::TransactionWithExecutionInfo => TRANSACTION_WITH_EXECUTION_INFO_TAG, + PayloadType::LastProgress => LAST_PROGRESS_TAG, + PayloadType::ReactorState => REACTOR_STATE_TAG, + PayloadType::NetworkName => NETWORK_NAME_TAG, + PayloadType::ConsensusValidatorChanges => CONSENSUS_VALIDATOR_CHANGES_TAG, + PayloadType::BlockSynchronizerStatus => BLOCK_SYNCHRONIZER_STATUS_TAG, + PayloadType::AvailableBlockRange => AVAILABLE_BLOCK_RANGE_TAG, + PayloadType::NextUpgrade => NEXT_UPGRADE_TAG, + PayloadType::ConsensusStatus => CONSENSUS_STATUS_TAG, + PayloadType::ChainspecRawBytes => CHAINSPEC_RAW_BYTES_TAG, + PayloadType::Uptime => UPTIME_TAG, + PayloadType::HighestBlockSequenceCheckResult => HIGHEST_BLOCK_SEQUENCE_CHECK_RESULT_TAG, + PayloadType::SpeculativeExecutionResult => SPECULATIVE_EXECUTION_RESULT_TAG, + PayloadType::GlobalStateQueryResult => GLOBAL_STATE_QUERY_RESULT_TAG, + PayloadType::StoredValues => STORED_VALUES_TAG, + PayloadType::GetTrieFullResult => GET_TRIE_FULL_RESULT_TAG, + PayloadType::NodeStatus => NODE_STATUS_TAG, + } + .write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for PayloadType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = FromBytes::from_bytes(bytes)?; + let record_id = match tag { + BLOCK_HEADER_V1_TAG => PayloadType::BlockHeaderV1, + BLOCK_HEADER_TAG => PayloadType::BlockHeader, + BLOCK_BODY_V1_TAG => PayloadType::BlockBodyV1, + BLOCK_BODY_TAG => PayloadType::BlockBody, + APPROVALS_HASHES_TAG => PayloadType::ApprovalsHashes, + APPROVALS_HASHES_V1 => PayloadType::ApprovalsHashesV1, + BLOCK_SIGNATURES_TAG => PayloadType::BlockSignatures, + DEPLOY_TAG => PayloadType::Deploy, + TRANSACTION_TAG => PayloadType::Transaction, + EXECUTION_RESULT_V1_TAG => PayloadType::ExecutionResultV1, + EXECUTION_RESULT_TAG => PayloadType::ExecutionResult, + TRANSFERS_TAG => PayloadType::Transfers, + FINALIZED_DEPLOY_APPROVALS_TAG => PayloadType::FinalizedDeployApprovals, + FINALIZED_APPROVALS_TAG => PayloadType::FinalizedApprovals, + PEERS_TAG => PayloadType::Peers, + SIGNED_BLOCK_TAG => PayloadType::SignedBlock, + TRANSACTION_WITH_EXECUTION_INFO_TAG => PayloadType::TransactionWithExecutionInfo, + LAST_PROGRESS_TAG => PayloadType::LastProgress, + REACTOR_STATE_TAG => PayloadType::ReactorState, + NETWORK_NAME_TAG => PayloadType::NetworkName, + CONSENSUS_VALIDATOR_CHANGES_TAG => PayloadType::ConsensusValidatorChanges, + BLOCK_SYNCHRONIZER_STATUS_TAG => PayloadType::BlockSynchronizerStatus, + AVAILABLE_BLOCK_RANGE_TAG => PayloadType::AvailableBlockRange, + NEXT_UPGRADE_TAG => PayloadType::NextUpgrade, + CONSENSUS_STATUS_TAG => PayloadType::ConsensusStatus, + CHAINSPEC_RAW_BYTES_TAG => PayloadType::ChainspecRawBytes, + UPTIME_TAG => PayloadType::Uptime, + HIGHEST_BLOCK_SEQUENCE_CHECK_RESULT_TAG => PayloadType::HighestBlockSequenceCheckResult, + SPECULATIVE_EXECUTION_RESULT_TAG => PayloadType::SpeculativeExecutionResult, + GLOBAL_STATE_QUERY_RESULT_TAG => PayloadType::GlobalStateQueryResult, + STORED_VALUES_TAG => PayloadType::StoredValues, + GET_TRIE_FULL_RESULT_TAG => PayloadType::GetTrieFullResult, + NODE_STATUS_TAG => PayloadType::NodeStatus, + _ => return Err(bytesrepr::Error::Formatting), + }; + Ok((record_id, remainder)) + } +} + +/// Represents an entity that can be sent as a payload. +pub trait PayloadEntity { + /// Returns the payload type of the entity. + const PAYLOAD_TYPE: PayloadType; +} + +impl PayloadEntity for Transaction { + const PAYLOAD_TYPE: PayloadType = PayloadType::Transaction; +} + +impl PayloadEntity for Deploy { + const PAYLOAD_TYPE: PayloadType = PayloadType::Deploy; +} + +impl PayloadEntity for BlockHeader { + const PAYLOAD_TYPE: PayloadType = PayloadType::BlockHeader; +} + +impl PayloadEntity for BlockHeaderV1 { + const PAYLOAD_TYPE: PayloadType = PayloadType::BlockHeaderV1; +} + +impl PayloadEntity for BlockBody { + const PAYLOAD_TYPE: PayloadType = PayloadType::BlockBody; +} + +impl PayloadEntity for BlockBodyV1 { + const PAYLOAD_TYPE: PayloadType = PayloadType::BlockBodyV1; +} + +impl PayloadEntity for ExecutionResult { + const PAYLOAD_TYPE: PayloadType = PayloadType::ExecutionResult; +} + +impl PayloadEntity for FinalizedApprovals { + const PAYLOAD_TYPE: PayloadType = PayloadType::FinalizedApprovals; +} + +impl PayloadEntity for FinalizedDeployApprovals { + const PAYLOAD_TYPE: PayloadType = PayloadType::FinalizedDeployApprovals; +} + +impl PayloadEntity for ExecutionResultV1 { + const PAYLOAD_TYPE: PayloadType = PayloadType::ExecutionResultV1; +} + +impl PayloadEntity for SignedBlock { + const PAYLOAD_TYPE: PayloadType = PayloadType::SignedBlock; +} + +impl PayloadEntity for TransactionWithExecutionInfo { + const PAYLOAD_TYPE: PayloadType = PayloadType::TransactionWithExecutionInfo; +} + +impl PayloadEntity for Peers { + const PAYLOAD_TYPE: PayloadType = PayloadType::Peers; +} + +impl PayloadEntity for BlockSignatures { + const PAYLOAD_TYPE: PayloadType = PayloadType::BlockSignatures; +} + +impl PayloadEntity for Vec { + const PAYLOAD_TYPE: PayloadType = PayloadType::Transfers; +} + +impl PayloadEntity for AvailableBlockRange { + const PAYLOAD_TYPE: PayloadType = PayloadType::AvailableBlockRange; +} + +#[cfg(any(feature = "std", test))] +impl PayloadEntity for ChainspecRawBytes { + const PAYLOAD_TYPE: PayloadType = PayloadType::ChainspecRawBytes; +} + +impl PayloadEntity for ConsensusValidatorChanges { + const PAYLOAD_TYPE: PayloadType = PayloadType::ConsensusValidatorChanges; +} + +impl PayloadEntity for GlobalStateQueryResult { + const PAYLOAD_TYPE: PayloadType = PayloadType::GlobalStateQueryResult; +} + +impl PayloadEntity for Vec { + const PAYLOAD_TYPE: PayloadType = PayloadType::StoredValues; +} + +impl PayloadEntity for GetTrieFullResult { + const PAYLOAD_TYPE: PayloadType = PayloadType::GetTrieFullResult; +} + +impl PayloadEntity for SpeculativeExecutionResult { + const PAYLOAD_TYPE: PayloadType = PayloadType::SpeculativeExecutionResult; +} + +#[cfg(any(feature = "std", test))] +impl PayloadEntity for NodeStatus { + const PAYLOAD_TYPE: PayloadType = PayloadType::NodeStatus; +} + +#[cfg(any(feature = "std", test))] +impl PayloadEntity for NextUpgrade { + const PAYLOAD_TYPE: PayloadType = PayloadType::NextUpgrade; +} + +impl PayloadEntity for Uptime { + const PAYLOAD_TYPE: PayloadType = PayloadType::Uptime; +} + +impl PayloadEntity for LastProgress { + const PAYLOAD_TYPE: PayloadType = PayloadType::LastProgress; +} + +impl PayloadEntity for ReactorState { + const PAYLOAD_TYPE: PayloadType = PayloadType::ReactorState; +} + +impl PayloadEntity for NetworkName { + const PAYLOAD_TYPE: PayloadType = PayloadType::NetworkName; +} + +impl PayloadEntity for BlockSynchronizerStatus { + const PAYLOAD_TYPE: PayloadType = PayloadType::BlockSynchronizerStatus; +} + +impl PayloadEntity for ConsensusStatus { + const PAYLOAD_TYPE: PayloadType = PayloadType::ConsensusStatus; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = PayloadType::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/record_id.rs b/casper_types_ver_2_0/src/binary_port/record_id.rs new file mode 100644 index 00000000..f7ef6dfe --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/record_id.rs @@ -0,0 +1,105 @@ +use core::convert::TryFrom; + +#[cfg(test)] +use rand::Rng; +use serde::Serialize; + +#[cfg(test)] +use crate::testing::TestRng; + +/// An identifier of a record type. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)] +#[repr(u16)] +pub enum RecordId { + /// Refers to `BlockHeader` record. + BlockHeader = 0, + /// Refers to `BlockBody` record. + BlockBody = 1, + /// Refers to `ApprovalsHashes` record. + ApprovalsHashes = 2, + /// Refers to `BlockMetadata` record. + BlockMetadata = 3, + /// Refers to `Transaction` record. + Transaction = 4, + /// Refers to `ExecutionResult` record. + ExecutionResult = 5, + /// Refers to `Transfer` record. + Transfer = 6, + /// Refers to `FinalizedTransactionApprovals` record. + FinalizedTransactionApprovals = 7, +} + +impl RecordId { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..8) { + 0 => RecordId::BlockHeader, + 1 => RecordId::BlockBody, + 2 => RecordId::ApprovalsHashes, + 3 => RecordId::BlockMetadata, + 4 => RecordId::Transaction, + 5 => RecordId::ExecutionResult, + 6 => RecordId::Transfer, + 7 => RecordId::FinalizedTransactionApprovals, + _ => unreachable!(), + } + } +} + +impl TryFrom for RecordId { + type Error = UnknownRecordId; + + fn try_from(value: u16) -> Result { + match value { + 0 => Ok(RecordId::BlockHeader), + 1 => Ok(RecordId::BlockBody), + 2 => Ok(RecordId::ApprovalsHashes), + 3 => Ok(RecordId::BlockMetadata), + 4 => Ok(RecordId::Transaction), + 5 => Ok(RecordId::ExecutionResult), + 6 => Ok(RecordId::Transfer), + 7 => Ok(RecordId::FinalizedTransactionApprovals), + _ => Err(UnknownRecordId(value)), + } + } +} + +impl From for u16 { + fn from(value: RecordId) -> Self { + value as u16 + } +} + +impl core::fmt::Display for RecordId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + RecordId::BlockHeader => write!(f, "BlockHeader"), + RecordId::BlockBody => write!(f, "BlockBody"), + RecordId::ApprovalsHashes => write!(f, "ApprovalsHashes"), + RecordId::BlockMetadata => write!(f, "BlockMetadata"), + RecordId::Transaction => write!(f, "Transaction"), + RecordId::ExecutionResult => write!(f, "ExecutionResult"), + RecordId::Transfer => write!(f, "Transfer"), + RecordId::FinalizedTransactionApprovals => write!(f, "FinalizedTransactionApprovals"), + } + } +} + +/// Error returned when trying to convert a `u16` into a `RecordId`. +#[derive(Debug, PartialEq, Eq)] +pub struct UnknownRecordId(u16); + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn tag_roundtrip() { + let rng = &mut TestRng::new(); + + let val = RecordId::random(rng); + let tag = u16::from(val); + assert_eq!(RecordId::try_from(tag), Ok(val)); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/state_request.rs b/casper_types_ver_2_0/src/binary_port/state_request.rs new file mode 100644 index 00000000..fddb86dc --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/state_request.rs @@ -0,0 +1,186 @@ +use alloc::string::String; +use alloc::vec::Vec; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, GlobalStateIdentifier, Key, KeyTag, +}; + +const ITEM_TAG: u8 = 0; +const ALL_ITEMS_TAG: u8 = 1; +const TRIE_TAG: u8 = 2; + +/// A request to get data from the global state. +#[derive(Clone, Debug, PartialEq)] +pub enum GlobalStateRequest { + /// Gets an item from the global state. + Item { + /// Global state identifier, `None` means "latest block state". + state_identifier: Option, + /// Key under which data is stored. + base_key: Key, + /// Path under which the value is stored. + path: Vec, + }, + /// Get all items under the given key tag. + AllItems { + /// Global state identifier, `None` means "latest block state". + state_identifier: Option, + /// Key tag + key_tag: KeyTag, + }, + /// Get a trie by its Digest. + Trie { + /// A trie key. + trie_key: Digest, + }, +} + +impl GlobalStateRequest { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => { + let path_count = rng.gen_range(10..20); + let state_identifier = if rng.gen() { + Some(GlobalStateIdentifier::random(rng)) + } else { + None + }; + GlobalStateRequest::Item { + state_identifier, + base_key: rng.gen(), + path: std::iter::repeat_with(|| rng.random_string(32..64)) + .take(path_count) + .collect(), + } + } + 1 => { + let state_identifier = if rng.gen() { + Some(GlobalStateIdentifier::random(rng)) + } else { + None + }; + GlobalStateRequest::AllItems { + state_identifier, + key_tag: KeyTag::random(rng), + } + } + 2 => GlobalStateRequest::Trie { + trie_key: Digest::random(rng), + }, + _ => unreachable!(), + } + } +} + +impl ToBytes for GlobalStateRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + GlobalStateRequest::Item { + state_identifier, + base_key, + path, + } => { + ITEM_TAG.write_bytes(writer)?; + state_identifier.write_bytes(writer)?; + base_key.write_bytes(writer)?; + path.write_bytes(writer) + } + GlobalStateRequest::AllItems { + state_identifier, + key_tag, + } => { + ALL_ITEMS_TAG.write_bytes(writer)?; + state_identifier.write_bytes(writer)?; + key_tag.write_bytes(writer) + } + GlobalStateRequest::Trie { trie_key } => { + TRIE_TAG.write_bytes(writer)?; + trie_key.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + GlobalStateRequest::Item { + state_identifier, + base_key, + path, + } => { + state_identifier.serialized_length() + + base_key.serialized_length() + + path.serialized_length() + } + GlobalStateRequest::AllItems { + state_identifier, + key_tag, + } => state_identifier.serialized_length() + key_tag.serialized_length(), + GlobalStateRequest::Trie { trie_key } => trie_key.serialized_length(), + } + } +} + +impl FromBytes for GlobalStateRequest { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + ITEM_TAG => { + let (state_identifier, remainder) = FromBytes::from_bytes(remainder)?; + let (base_key, remainder) = FromBytes::from_bytes(remainder)?; + let (path, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateRequest::Item { + state_identifier, + base_key, + path, + }, + remainder, + )) + } + ALL_ITEMS_TAG => { + let (state_identifier, remainder) = FromBytes::from_bytes(remainder)?; + let (key_tag, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateRequest::AllItems { + state_identifier, + key_tag, + }, + remainder, + )) + } + TRIE_TAG => { + let (trie_key, remainder) = Digest::from_bytes(remainder)?; + Ok((GlobalStateRequest::Trie { trie_key }, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = GlobalStateRequest::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/binary_port/type_wrappers.rs b/casper_types_ver_2_0/src/binary_port/type_wrappers.rs new file mode 100644 index 00000000..cd4f92fc --- /dev/null +++ b/casper_types_ver_2_0/src/binary_port/type_wrappers.rs @@ -0,0 +1,349 @@ +use core::{convert::TryFrom, num::TryFromIntError, time::Duration}; + +use alloc::{ + collections::BTreeMap, + string::{String, ToString}, + vec::Vec, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; + +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + contract_messages::Messages, + execution::ExecutionResultV2, + EraId, ExecutionInfo, PublicKey, TimeDiff, Timestamp, Transaction, ValidatorChange, +}; + +// `bytesrepr` implementations for type wrappers are repetitive, hence this macro helper. We should +// get rid of this after we introduce the proper "bytesrepr-derive" proc macro. +macro_rules! impl_bytesrepr_for_type_wrapper { + ($t:ident) => { + impl ToBytes for $t { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + } + + impl FromBytes for $t { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = FromBytes::from_bytes(bytes)?; + Ok(($t(inner), remainder)) + } + } + }; +} + +/// Type representing uptime. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Uptime(u64); + +impl Uptime { + /// Constructs new uptime. + pub fn new(value: u64) -> Self { + Self(value) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> u64 { + self.0 + } +} + +impl From for Duration { + fn from(uptime: Uptime) -> Self { + Duration::from_secs(uptime.0) + } +} + +impl TryFrom for TimeDiff { + type Error = TryFromIntError; + + fn try_from(uptime: Uptime) -> Result { + u32::try_from(uptime.0).map(TimeDiff::from_seconds) + } +} + +/// Type representing changes in consensus validators. +#[derive(Debug, PartialEq, Eq)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ConsensusValidatorChanges(BTreeMap>); + +impl ConsensusValidatorChanges { + /// Constructs new consensus validator changes. + pub fn new(value: BTreeMap>) -> Self { + Self(value) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> BTreeMap> { + self.0 + } +} + +impl From for BTreeMap> { + fn from(consensus_validator_changes: ConsensusValidatorChanges) -> Self { + consensus_validator_changes.0 + } +} + +/// Type representing network name. +#[derive(Debug, PartialEq, Eq)] +pub struct NetworkName(String); + +impl NetworkName { + /// Constructs new network name. + pub fn new(value: impl ToString) -> Self { + Self(value.to_string()) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> String { + self.0 + } +} + +impl From for String { + fn from(network_name: NetworkName) -> Self { + network_name.0 + } +} + +/// Type representing last progress of the sync process. +#[derive(Debug, PartialEq, Eq)] +pub struct LastProgress(Timestamp); + +impl LastProgress { + /// Constructs new last progress. + pub fn new(value: Timestamp) -> Self { + Self(value) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> Timestamp { + self.0 + } +} + +impl From for Timestamp { + fn from(last_progress: LastProgress) -> Self { + last_progress.0 + } +} + +/// Type representing results of the speculative execution. +#[derive(Debug, PartialEq, Eq)] +pub struct SpeculativeExecutionResult(Option<(ExecutionResultV2, Messages)>); + +impl SpeculativeExecutionResult { + /// Constructs new speculative execution result. + pub fn new(value: Option<(ExecutionResultV2, Messages)>) -> Self { + Self(value) + } + + /// Returns the inner value. + pub fn into_inner(self) -> Option<(ExecutionResultV2, Messages)> { + self.0 + } +} + +/// Type representing results of the get full trie request. +#[derive(Debug, PartialEq, Eq)] +pub struct GetTrieFullResult(Option); + +impl GetTrieFullResult { + /// Constructs new get trie result. + pub fn new(value: Option) -> Self { + Self(value) + } + + /// Returns the inner value. + pub fn into_inner(self) -> Option { + self.0 + } +} + +/// Describes the consensus status. +#[derive(Debug, PartialEq, Eq)] +pub struct ConsensusStatus { + validator_public_key: PublicKey, + round_length: Option, +} + +impl ConsensusStatus { + /// Constructs new consensus status. + pub fn new(validator_public_key: PublicKey, round_length: Option) -> Self { + Self { + validator_public_key, + round_length, + } + } + + /// Returns the validator public key. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns the round length. + pub fn round_length(&self) -> Option { + self.round_length + } +} + +impl ToBytes for ConsensusStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + self.round_length.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.round_length.write_bytes(writer) + } +} + +impl FromBytes for ConsensusStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (round_length, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + ConsensusStatus::new(validator_public_key, round_length), + remainder, + )) + } +} + +/// A transaction with execution info. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct TransactionWithExecutionInfo { + transaction: Transaction, + execution_info: Option, +} + +impl TransactionWithExecutionInfo { + /// Constructs new transaction with execution info. + pub fn new(transaction: Transaction, execution_info: Option) -> Self { + Self { + transaction, + execution_info, + } + } + + /// Converts `self` into the transaction and execution info. + pub fn into_inner(self) -> (Transaction, Option) { + (self.transaction, self.execution_info) + } +} + +impl ToBytes for TransactionWithExecutionInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.transaction.write_bytes(writer)?; + self.execution_info.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.transaction.serialized_length() + self.execution_info.serialized_length() + } +} + +impl FromBytes for TransactionWithExecutionInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transaction, remainder) = FromBytes::from_bytes(bytes)?; + let (execution_info, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + TransactionWithExecutionInfo::new(transaction, execution_info), + remainder, + )) + } +} + +impl_bytesrepr_for_type_wrapper!(Uptime); +impl_bytesrepr_for_type_wrapper!(ConsensusValidatorChanges); +impl_bytesrepr_for_type_wrapper!(NetworkName); +impl_bytesrepr_for_type_wrapper!(LastProgress); +impl_bytesrepr_for_type_wrapper!(SpeculativeExecutionResult); +impl_bytesrepr_for_type_wrapper!(GetTrieFullResult); + +#[cfg(test)] +mod tests { + use core::iter::FromIterator; + use rand::Rng; + + use super::*; + use crate::testing::TestRng; + + #[test] + fn uptime_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&Uptime::new(rng.gen())); + } + + #[test] + fn consensus_validator_changes_roundtrip() { + let rng = &mut TestRng::new(); + let map = BTreeMap::from_iter([( + PublicKey::random(rng), + vec![(EraId::random(rng), ValidatorChange::random(rng))], + )]); + bytesrepr::test_serialization_roundtrip(&ConsensusValidatorChanges::new(map)); + } + + #[test] + fn network_name_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&NetworkName::new(rng.random_string(5..20))); + } + + #[test] + fn last_progress_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&LastProgress::new(Timestamp::random(rng))); + } + + #[test] + fn speculative_execution_result_roundtrip() { + let rng = &mut TestRng::new(); + if rng.gen_bool(0.5) { + bytesrepr::test_serialization_roundtrip(&SpeculativeExecutionResult::new(None)); + } else { + bytesrepr::test_serialization_roundtrip(&SpeculativeExecutionResult::new(Some(( + ExecutionResultV2::random(rng), + rng.random_vec(0..20), + )))); + } + } + + #[test] + fn get_trie_full_result_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&GetTrieFullResult::new(rng.gen())); + } + + #[test] + fn consensus_status_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&ConsensusStatus::new( + PublicKey::random(rng), + Some(TimeDiff::from_millis(rng.gen())), + )); + } +} diff --git a/casper_types_ver_2_0/src/block.rs b/casper_types_ver_2_0/src/block.rs new file mode 100644 index 00000000..1e84169d --- /dev/null +++ b/casper_types_ver_2_0/src/block.rs @@ -0,0 +1,494 @@ +mod available_block_range; +mod block_body; +mod block_hash; +mod block_hash_and_height; +mod block_header; +mod block_identifier; +mod block_signatures; +mod block_sync_status; +mod block_v1; +mod block_v2; +mod era_end; +mod finality_signature; +mod finality_signature_id; +mod json_compatibility; +mod rewarded_signatures; +mod rewards; +mod signed_block; +mod signed_block_header; + +#[cfg(any(feature = "testing", test))] +mod test_block_builder { + pub mod test_block_v1_builder; + pub mod test_block_v2_builder; +} + +use alloc::{boxed::Box, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; + +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use crate::{ + bytesrepr, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, EraId, ProtocolVersion, PublicKey, Timestamp, +}; +pub use available_block_range::AvailableBlockRange; +pub use block_body::{BlockBody, BlockBodyV1, BlockBodyV2}; +pub use block_hash::BlockHash; +pub use block_hash_and_height::BlockHashAndHeight; +pub use block_header::{BlockHeader, BlockHeaderV1, BlockHeaderV2}; +pub use block_identifier::BlockIdentifier; +pub use block_signatures::{BlockSignatures, BlockSignaturesMergeError}; +pub use block_sync_status::{BlockSyncStatus, BlockSynchronizerStatus}; +pub use block_v1::BlockV1; +pub use block_v2::BlockV2; +pub use era_end::{EraEnd, EraEndV1, EraEndV2, EraReport}; +pub use finality_signature::FinalitySignature; +pub use finality_signature_id::FinalitySignatureId; +#[cfg(all(feature = "std", feature = "json-schema"))] +pub use json_compatibility::JsonBlockWithSignatures; +pub use rewarded_signatures::{RewardedSignatures, SingleBlockRewardedSignatures}; +pub use rewards::Rewards; +pub use signed_block::SignedBlock; +pub use signed_block_header::{SignedBlockHeader, SignedBlockHeaderValidationError}; +#[cfg(any(feature = "testing", test))] +pub use test_block_builder::{ + test_block_v1_builder::TestBlockV1Builder, + test_block_v2_builder::TestBlockV2Builder as TestBlockBuilder, +}; + +#[cfg(feature = "json-schema")] +static BLOCK: Lazy = Lazy::new(|| BlockV2::example().into()); + +/// An error that can arise when validating a block's cryptographic integrity using its hashes. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(any(feature = "std", test), derive(serde::Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum BlockValidationError { + /// Problem serializing some of a block's data into bytes. + Bytesrepr(bytesrepr::Error), + /// The provided block's hash is not the same as the actual hash of the block. + UnexpectedBlockHash { + /// The block with the incorrect block hash. + block: Box, + /// The actual hash of the block. + actual_block_hash: BlockHash, + }, + /// The body hash in the header is not the same as the actual hash of the body of the block. + UnexpectedBodyHash { + /// The block with the header containing the incorrect block body hash. + block: Box, + /// The actual hash of the block's body. + actual_block_body_hash: Digest, + }, + /// The header version does not match the body version. + IncompatibleVersions, +} + +impl Display for BlockValidationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockValidationError::Bytesrepr(error) => { + write!(formatter, "error validating block: {}", error) + } + BlockValidationError::UnexpectedBlockHash { + block, + actual_block_hash, + } => { + write!( + formatter, + "block has incorrect block hash - actual block hash: {:?}, block: {:?}", + actual_block_hash, block + ) + } + BlockValidationError::UnexpectedBodyHash { + block, + actual_block_body_hash, + } => { + write!( + formatter, + "block header has incorrect body hash - actual body hash: {:?}, block: {:?}", + actual_block_body_hash, block + ) + } + BlockValidationError::IncompatibleVersions => { + write!(formatter, "block body and header versions do not match") + } + } + } +} + +impl From for BlockValidationError { + fn from(error: bytesrepr::Error) -> Self { + BlockValidationError::Bytesrepr(error) + } +} + +#[cfg(feature = "std")] +impl StdError for BlockValidationError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + BlockValidationError::Bytesrepr(error) => Some(error), + BlockValidationError::UnexpectedBlockHash { .. } + | BlockValidationError::UnexpectedBodyHash { .. } + | BlockValidationError::IncompatibleVersions => None, + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum BlockConversionError { + DifferentVersion { expected_version: u8 }, +} + +#[cfg(feature = "std")] +impl Display for BlockConversionError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BlockConversionError::DifferentVersion { expected_version } => { + write!( + f, + "Could not convert a block to the expected version {}", + expected_version + ) + } + } + } +} + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block body v1. +const BLOCK_V1_TAG: u8 = 0; +/// Tag for block body v2. +const BLOCK_V2_TAG: u8 = 1; + +/// A block after execution. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + any(feature = "std", feature = "json-schema", test), + derive(serde::Serialize, serde::Deserialize) +)] +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum Block { + /// The legacy, initial version of the block. + #[cfg_attr( + any(feature = "std", feature = "json-schema", test), + serde(rename = "Version1") + )] + V1(BlockV1), + /// The version 2 of the block. + #[cfg_attr( + any(feature = "std", feature = "json-schema", test), + serde(rename = "Version2") + )] + V2(BlockV2), +} + +impl Block { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new_from_header_and_body( + block_header: BlockHeader, + block_body: BlockBody, + ) -> Result> { + let hash = block_header.block_hash(); + let block = match (block_body, block_header) { + (BlockBody::V1(body), BlockHeader::V1(header)) => { + Ok(Block::V1(BlockV1 { hash, header, body })) + } + (BlockBody::V2(body), BlockHeader::V2(header)) => { + Ok(Block::V2(BlockV2 { hash, header, body })) + } + _ => Err(BlockValidationError::IncompatibleVersions), + }?; + + block.verify()?; + Ok(block) + } + + /// Clones the header, put it in the versioning enum, and returns it. + pub fn clone_header(&self) -> BlockHeader { + match self { + Block::V1(v1) => BlockHeader::V1(v1.header().clone()), + Block::V2(v2) => BlockHeader::V2(v2.header().clone()), + } + } + + /// Returns the block's header, consuming `self`. + pub fn take_header(self) -> BlockHeader { + match self { + Block::V1(v1) => BlockHeader::V1(v1.take_header()), + Block::V2(v2) => BlockHeader::V2(v2.take_header()), + } + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + match self { + Block::V1(v1) => v1.header.timestamp(), + Block::V2(v2) => v2.header.timestamp(), + } + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + match self { + Block::V1(v1) => v1.header.protocol_version(), + Block::V2(v2) => v2.header.protocol_version(), + } + } + + /// The hash of this block's header. + pub fn hash(&self) -> &BlockHash { + match self { + Block::V1(v1) => v1.hash(), + Block::V2(v2) => v2.hash(), + } + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + match self { + Block::V1(v1) => v1.header().body_hash(), + Block::V2(v2) => v2.header().body_hash(), + } + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + match self { + Block::V1(v1) => v1.header().random_bit(), + Block::V2(v2) => v2.header().random_bit(), + } + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + match self { + Block::V1(v1) => v1.accumulated_seed(), + Block::V2(v2) => v2.accumulated_seed(), + } + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + match self { + Block::V1(v1) => v1.parent_hash(), + Block::V2(v2) => v2.parent_hash(), + } + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + match self { + Block::V1(v1) => v1.proposer(), + Block::V2(v2) => v2.proposer(), + } + } + + /// Clone the body and wrap is up in the versioned `Body`. + pub fn clone_body(&self) -> BlockBody { + match self { + Block::V1(v1) => BlockBody::V1(v1.body().clone()), + Block::V2(v2) => BlockBody::V2(v2.body().clone()), + } + } + + /// Check the integrity of a block by hashing its body and header + pub fn verify(&self) -> Result<(), BlockValidationError> { + match self { + Block::V1(v1) => v1.verify(), + Block::V2(v2) => v2.verify(), + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + match self { + Block::V1(v1) => v1.header.height(), + Block::V2(v2) => v2.header.height(), + } + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + match self { + Block::V1(v1) => v1.era_id(), + Block::V2(v2) => v2.era_id(), + } + } + + /// Clones the era end, put it in the versioning enum, and returns it. + pub fn clone_era_end(&self) -> Option { + match self { + Block::V1(v1) => v1.header().era_end().cloned().map(EraEnd::V1), + Block::V2(v2) => v2.header().era_end().cloned().map(EraEnd::V2), + } + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + match self { + Block::V1(v1) => v1.header.is_switch_block(), + Block::V2(v2) => v2.header.is_switch_block(), + } + } + + /// Returns `true` if this block is the first block of the chain, the genesis block. + pub fn is_genesis(&self) -> bool { + match self { + Block::V1(v1) => v1.header.is_genesis(), + Block::V2(v2) => v2.header.is_genesis(), + } + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + match self { + Block::V1(v1) => v1.header.state_root_hash(), + Block::V2(v2) => v2.header.state_root_hash(), + } + } + + /// List of identifiers for finality signatures for a particular past block. + pub fn rewarded_signatures(&self) -> &RewardedSignatures { + match self { + Block::V1(_v1) => &rewarded_signatures::EMPTY, + Block::V2(v2) => v2.body.rewarded_signatures(), + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK + } +} + +impl Display for Block { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \ + {}, random bit {}, protocol version: {}", + self.height(), + self.hash(), + self.timestamp(), + self.era_id(), + self.parent_hash().inner(), + self.state_root_hash(), + self.body_hash(), + self.random_bit(), + self.protocol_version() + )?; + if let Some(era_end) = self.clone_era_end() { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for Block { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + Block::V1(v1) => { + buffer.insert(0, BLOCK_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + Block::V2(v2) => { + buffer.insert(0, BLOCK_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + Block::V1(v1) => v1.serialized_length(), + Block::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for Block { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + BLOCK_V1_TAG => { + let (body, remainder): (BlockV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(body), remainder)) + } + BLOCK_V2_TAG => { + let (body, remainder): (BlockV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(body), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl From<&BlockV2> for Block { + fn from(block: &BlockV2) -> Self { + Block::V2(block.clone()) + } +} + +impl From for Block { + fn from(block: BlockV2) -> Self { + Block::V2(block) + } +} + +impl From<&BlockV1> for Block { + fn from(block: &BlockV1) -> Self { + Block::V1(block.clone()) + } +} + +impl From for Block { + fn from(block: BlockV1) -> Self { + Block::V1(block) + } +} + +#[cfg(all(feature = "std", feature = "json-schema"))] +impl From for Block { + fn from(block_with_signatures: JsonBlockWithSignatures) -> Self { + block_with_signatures.block + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, testing::TestRng}; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let block_v1 = TestBlockV1Builder::new().build(rng); + let block = Block::V1(block_v1); + bytesrepr::test_serialization_roundtrip(&block); + + let block_v2 = TestBlockBuilder::new().build(rng); + let block = Block::V2(block_v2); + bytesrepr::test_serialization_roundtrip(&block); + } +} diff --git a/casper_types_ver_2_0/src/block/available_block_range.rs b/casper_types_ver_2_0/src/block/available_block_range.rs new file mode 100644 index 00000000..99c2fe32 --- /dev/null +++ b/casper_types_ver_2_0/src/block/available_block_range.rs @@ -0,0 +1,110 @@ +use core::fmt::{self, Display, Formatter}; + +use alloc::vec::Vec; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +/// An unbroken, inclusive range of blocks. +#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct AvailableBlockRange { + /// The inclusive lower bound of the range. + low: u64, + /// The inclusive upper bound of the range. + high: u64, +} + +impl AvailableBlockRange { + /// An `AvailableRange` of [0, 0]. + pub const RANGE_0_0: AvailableBlockRange = AvailableBlockRange { low: 0, high: 0 }; + + /// Constructs a new `AvailableBlockRange` with the given limits. + pub fn new(low: u64, high: u64) -> Self { + assert!( + low <= high, + "cannot construct available block range with low > high" + ); + AvailableBlockRange { low, high } + } + + /// Returns `true` if `height` is within the range. + pub fn contains(&self, height: u64) -> bool { + height >= self.low && height <= self.high + } + + /// Returns the low value. + pub fn low(&self) -> u64 { + self.low + } + + /// Returns the high value. + pub fn high(&self) -> u64 { + self.high + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let low = rng.gen::() as u64; + let high = low + rng.gen::() as u64; + Self { low, high } + } +} + +impl Display for AvailableBlockRange { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "available block range [{}, {}]", + self.low, self.high + ) + } +} + +impl ToBytes for AvailableBlockRange { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.low.write_bytes(writer)?; + self.high.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.low.serialized_length() + self.high.serialized_length() + } +} + +impl FromBytes for AvailableBlockRange { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (low, remainder) = u64::from_bytes(bytes)?; + let (high, remainder) = u64::from_bytes(remainder)?; + Ok((AvailableBlockRange { low, high }, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = AvailableBlockRange::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/block/block_body.rs b/casper_types_ver_2_0/src/block/block_body.rs new file mode 100644 index 00000000..5fa8f574 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_body.rs @@ -0,0 +1,115 @@ +mod block_body_v1; +mod block_body_v2; + +pub use block_body_v1::BlockBodyV1; +pub use block_body_v2::BlockBodyV2; + +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block body v1. +pub const BLOCK_BODY_V1_TAG: u8 = 0; +/// Tag for block body v2. +pub const BLOCK_BODY_V2_TAG: u8 = 1; + +/// The versioned body portion of a block. It encapsulates different variants of the BlockBody +/// struct. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(any(feature = "testing", test), derive(PartialEq))] +#[derive(Clone, Serialize, Deserialize, Debug)] +pub enum BlockBody { + /// The legacy, initial version of the body portion of a block. + #[serde(rename = "Version1")] + V1(BlockBodyV1), + /// The version 2 of the body portion of a block, which includes the + /// `past_finality_signatures`. + #[serde(rename = "Version2")] + V2(BlockBodyV2), +} + +impl Display for BlockBody { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockBody::V1(v1) => Display::fmt(&v1, formatter), + BlockBody::V2(v2) => Display::fmt(&v2, formatter), + } + } +} + +impl From for BlockBody { + fn from(body: BlockBodyV1) -> Self { + BlockBody::V1(body) + } +} + +impl From<&BlockBodyV2> for BlockBody { + fn from(body: &BlockBodyV2) -> Self { + BlockBody::V2(body.clone()) + } +} + +impl ToBytes for BlockBody { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + BlockBody::V1(v1) => { + buffer.insert(0, BLOCK_BODY_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + BlockBody::V2(v2) => { + buffer.insert(0, BLOCK_BODY_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + BlockBody::V1(v1) => v1.serialized_length(), + BlockBody::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for BlockBody { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + BLOCK_BODY_V1_TAG => { + let (body, remainder): (BlockBodyV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(body), remainder)) + } + BLOCK_BODY_V2_TAG => { + let (body, remainder): (BlockBodyV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(body), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, testing::TestRng, TestBlockBuilder, TestBlockV1Builder}; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let block_body_v1 = TestBlockV1Builder::new().build_versioned(rng).clone_body(); + bytesrepr::test_serialization_roundtrip(&block_body_v1); + + let block_body_v2 = TestBlockBuilder::new().build_versioned(rng).clone_body(); + bytesrepr::test_serialization_roundtrip(&block_body_v2); + } +} diff --git a/casper_types_ver_2_0/src/block/block_body/block_body_v1.rs b/casper_types_ver_2_0/src/block/block_body/block_body_v1.rs new file mode 100644 index 00000000..e32ab4b9 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_body/block_body_v1.rs @@ -0,0 +1,160 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + DeployHash, Digest, PublicKey, +}; + +/// The body portion of a block. Version 1. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockBodyV1 { + /// The public key of the validator which proposed the block. + pub(super) proposer: PublicKey, + /// The deploy hashes of the non-transfer deploys within the block. + pub(super) deploy_hashes: Vec, + /// The deploy hashes of the transfers within the block. + pub(super) transfer_hashes: Vec, + #[serde(skip)] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) hash: OnceCell, +} + +impl BlockBodyV1 { + /// Constructs a new `BlockBody`. + pub(crate) fn new( + proposer: PublicKey, + deploy_hashes: Vec, + transfer_hashes: Vec, + ) -> Self { + BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + } + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + &self.proposer + } + + /// Returns the deploy hashes of the non-transfer deploys within the block. + pub fn deploy_hashes(&self) -> &[DeployHash] { + &self.deploy_hashes + } + + /// Returns the deploy hashes of the transfers within the block. + pub fn transfer_hashes(&self) -> &[DeployHash] { + &self.transfer_hashes + } + + /// Returns the deploy and transfer hashes in the order in which they were executed. + pub fn deploy_and_transfer_hashes(&self) -> impl Iterator { + self.deploy_hashes() + .iter() + .chain(self.transfer_hashes().iter()) + } + + /// Returns the body hash, i.e. the hash of the body's serialized bytes. + pub fn hash(&self) -> Digest { + #[cfg(any(feature = "once_cell", test))] + return *self.hash.get_or_init(|| self.compute_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_hash() + } + + fn compute_hash(&self) -> Digest { + let serialized_body = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block body: {}", error)); + Digest::hash(serialized_body) + } +} + +impl PartialEq for BlockBodyV1 { + fn eq(&self, other: &BlockBodyV1) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + } = self; + *proposer == other.proposer + && *deploy_hashes == other.deploy_hashes + && *transfer_hashes == other.transfer_hashes + } +} + +impl Display for BlockBodyV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block body proposed by {}, {} deploys, {} transfers", + self.proposer, + self.deploy_hashes.len(), + self.transfer_hashes.len() + ) + } +} + +impl ToBytes for BlockBodyV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.proposer.write_bytes(writer)?; + self.deploy_hashes.write_bytes(writer)?; + self.transfer_hashes.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.proposer.serialized_length() + + self.deploy_hashes.serialized_length() + + self.transfer_hashes.serialized_length() + } +} + +impl FromBytes for BlockBodyV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (proposer, bytes) = PublicKey::from_bytes(bytes)?; + let (deploy_hashes, bytes) = Vec::::from_bytes(bytes)?; + let (transfer_hashes, bytes) = Vec::::from_bytes(bytes)?; + let body = BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + }; + Ok((body, bytes)) + } +} diff --git a/casper_types_ver_2_0/src/block/block_body/block_body_v2.rs b/casper_types_ver_2_0/src/block/block_body/block_body_v2.rs new file mode 100644 index 00000000..a417f022 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_body/block_body_v2.rs @@ -0,0 +1,214 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + block::RewardedSignatures, + bytesrepr::{self, FromBytes, ToBytes}, + Digest, PublicKey, TransactionHash, +}; + +/// The body portion of a block. Version 2. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockBodyV2 { + /// The public key of the validator which proposed the block. + pub(super) proposer: PublicKey, + /// The hashes of the transfer transactions within the block. + pub(super) transfer: Vec, + /// The hashes of the non-transfer, native transactions within the block. + pub(super) staking: Vec, + /// The hashes of the installer/upgrader transactions within the block. + pub(super) install_upgrade: Vec, + /// The hashes of all other transactions within the block. + pub(super) standard: Vec, + /// List of identifiers for finality signatures for a particular past block. + pub(super) rewarded_signatures: RewardedSignatures, + #[serde(skip)] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) hash: OnceCell, +} + +impl BlockBodyV2 { + /// Constructs a new `BlockBodyV2`. + pub(crate) fn new( + proposer: PublicKey, + transfer: Vec, + staking: Vec, + install_upgrade: Vec, + standard: Vec, + rewarded_signatures: RewardedSignatures, + ) -> Self { + BlockBodyV2 { + proposer, + transfer, + staking, + install_upgrade, + standard, + rewarded_signatures, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + } + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + &self.proposer + } + + /// Returns the hashes of the transfer transactions within the block. + pub fn transfer(&self) -> impl Iterator { + self.transfer.iter() + } + + /// Returns the hashes of the non-transfer, native transactions within the block. + pub fn staking(&self) -> impl Iterator { + self.staking.iter() + } + + /// Returns the hashes of the installer/upgrader transactions within the block. + pub fn install_upgrade(&self) -> impl Iterator { + self.install_upgrade.iter() + } + + /// Returns the hashes of all other transactions within the block. + pub fn standard(&self) -> impl Iterator { + self.standard.iter() + } + + /// Returns all of the transaction hashes in the order in which they were executed. + pub fn all_transactions(&self) -> impl Iterator { + self.transfer() + .chain(self.staking()) + .chain(self.install_upgrade()) + .chain(self.standard()) + } + + /// Returns the body hash, i.e. the hash of the body's serialized bytes. + pub fn hash(&self) -> Digest { + #[cfg(any(feature = "once_cell", test))] + return *self.hash.get_or_init(|| self.compute_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_hash() + } + + fn compute_hash(&self) -> Digest { + let serialized_body = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block body: {}", error)); + Digest::hash(serialized_body) + } + + /// Return the list of identifiers for finality signatures for a particular past block. + pub fn rewarded_signatures(&self) -> &RewardedSignatures { + &self.rewarded_signatures + } +} + +impl PartialEq for BlockBodyV2 { + fn eq(&self, other: &BlockBodyV2) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockBodyV2 { + proposer, + transfer, + staking, + install_upgrade, + standard, + rewarded_signatures, + hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockBodyV2 { + proposer, + transfer, + staking, + install_upgrade, + standard, + rewarded_signatures, + } = self; + *proposer == other.proposer + && *transfer == other.transfer + && *staking == other.staking + && *install_upgrade == other.install_upgrade + && *standard == other.standard + && *rewarded_signatures == other.rewarded_signatures + } +} + +impl Display for BlockBodyV2 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block body proposed by {}, {} transfers, {} non-transfer-native, {} \ + installer/upgraders, {} others", + self.proposer, + self.transfer.len(), + self.staking.len(), + self.install_upgrade.len(), + self.standard.len() + ) + } +} + +impl ToBytes for BlockBodyV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.proposer.write_bytes(writer)?; + self.transfer.write_bytes(writer)?; + self.staking.write_bytes(writer)?; + self.install_upgrade.write_bytes(writer)?; + self.standard.write_bytes(writer)?; + self.rewarded_signatures.write_bytes(writer)?; + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.proposer.serialized_length() + + self.transfer.serialized_length() + + self.staking.serialized_length() + + self.install_upgrade.serialized_length() + + self.standard.serialized_length() + + self.rewarded_signatures.serialized_length() + } +} + +impl FromBytes for BlockBodyV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (proposer, bytes) = PublicKey::from_bytes(bytes)?; + let (transfer, bytes) = Vec::::from_bytes(bytes)?; + let (staking, bytes) = Vec::::from_bytes(bytes)?; + let (install_upgrade, bytes) = Vec::::from_bytes(bytes)?; + let (standard, bytes) = Vec::::from_bytes(bytes)?; + let (rewarded_signatures, bytes) = RewardedSignatures::from_bytes(bytes)?; + let body = BlockBodyV2 { + proposer, + transfer, + staking, + install_upgrade, + standard, + rewarded_signatures, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + }; + Ok((body, bytes)) + } +} diff --git a/casper_types_ver_2_0/src/block/block_hash.rs b/casper_types_ver_2_0/src/block/block_hash.rs new file mode 100644 index 00000000..f6906c33 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_hash.rs @@ -0,0 +1,131 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Block; +#[cfg(doc)] +use super::BlockV2; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +#[cfg(feature = "json-schema")] +static BLOCK_HASH: Lazy = + Lazy::new(|| BlockHash::new(Digest::from([7; BlockHash::LENGTH]))); + +/// The cryptographic hash of a [`Block`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded cryptographic hash of a block.") +)] +#[serde(deny_unknown_fields)] +pub struct BlockHash(Digest); + +impl BlockHash { + /// The number of bytes in a `BlockHash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `BlockHash`. + pub fn new(hash: Digest) -> Self { + BlockHash(hash) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_HASH + } + + /// Returns a new `DeployHash` directly initialized with the provided bytes; no hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + BlockHash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `DeployHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Self::LENGTH]>().into(); + BlockHash(hash) + } +} + +impl From for BlockHash { + fn from(digest: Digest) -> Self { + Self(digest) + } +} + +impl From for Digest { + fn from(block_hash: BlockHash) -> Self { + block_hash.0 + } +} + +impl Display for BlockHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "block-hash({})", self.0) + } +} + +impl AsRef<[u8]> for BlockHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for BlockHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for BlockHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (BlockHash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = BlockHash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/casper_types_ver_2_0/src/block/block_hash_and_height.rs b/casper_types_ver_2_0/src/block/block_hash_and_height.rs new file mode 100644 index 00000000..b9a48796 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_hash_and_height.rs @@ -0,0 +1,114 @@ +use core::fmt::{self, Display, Formatter}; + +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::BlockHash; +#[cfg(doc)] +use super::BlockV2; +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// The block hash and height of a given block. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockHashAndHeight { + /// The hash of the block. + block_hash: BlockHash, + /// The height of the block. + block_height: u64, +} + +impl BlockHashAndHeight { + /// Constructs a new `BlockHashAndHeight`. + pub fn new(block_hash: BlockHash, block_height: u64) -> Self { + Self { + block_hash, + block_height, + } + } + + /// Returns the hash of the block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the height of the block. + pub fn block_height(&self) -> u64 { + self.block_height + } + + /// Returns a random `BlockHashAndHeight`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Self { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + } + } +} + +impl Display for BlockHashAndHeight { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "{}, height {} ", + self.block_hash, self.block_height + ) + } +} + +impl ToBytes for BlockHashAndHeight { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.block_height.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + self.block_height.serialized_length() + } +} + +impl FromBytes for BlockHashAndHeight { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (block_height, remainder) = u64::from_bytes(remainder)?; + Ok(( + BlockHashAndHeight { + block_hash, + block_height, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BlockHashAndHeight::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/block/block_header.rs b/casper_types_ver_2_0/src/block/block_header.rs new file mode 100644 index 00000000..8c683a57 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_header.rs @@ -0,0 +1,287 @@ +mod block_header_v1; +mod block_header_v2; + +pub use block_header_v1::BlockHeaderV1; +pub use block_header_v2::BlockHeaderV2; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "std")] +use crate::ProtocolConfig; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + BlockHash, Digest, EraEnd, EraId, ProtocolVersion, PublicKey, Timestamp, U512, +}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block header v1. +pub const BLOCK_HEADER_V1_TAG: u8 = 0; +/// Tag for block header v2. +pub const BLOCK_HEADER_V2_TAG: u8 = 1; + +/// The versioned header portion of a block. It encapsulates different variants of the BlockHeader +/// struct. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BlockHeader { + /// The legacy, initial version of the header portion of a block. + #[cfg_attr(any(feature = "std", test), serde(rename = "Version1"))] + V1(BlockHeaderV1), + /// The version 2 of the header portion of a block. + #[cfg_attr(any(feature = "std", test), serde(rename = "Version2"))] + V2(BlockHeaderV2), +} + +impl BlockHeader { + /// Returns the hash of this block header. + pub fn block_hash(&self) -> BlockHash { + match self { + BlockHeader::V1(v1) => v1.block_hash(), + BlockHeader::V2(v2) => v2.block_hash(), + } + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + match self { + BlockHeader::V1(v1) => v1.parent_hash(), + BlockHeader::V2(v2) => v2.parent_hash(), + } + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + match self { + BlockHeader::V1(v1) => v1.state_root_hash(), + BlockHeader::V2(v2) => v2.state_root_hash(), + } + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + match self { + BlockHeader::V1(v1) => v1.body_hash(), + BlockHeader::V2(v2) => v2.body_hash(), + } + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + match self { + BlockHeader::V1(v1) => v1.random_bit(), + BlockHeader::V2(v2) => v2.random_bit(), + } + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + match self { + BlockHeader::V1(v1) => v1.accumulated_seed(), + BlockHeader::V2(v2) => v2.accumulated_seed(), + } + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn clone_era_end(&self) -> Option { + match self { + BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.clone().into()), + BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.clone().into()), + } + } + + /// Returns equivocators if the header is of a switch block. + pub fn maybe_equivocators(&self) -> Option<&[PublicKey]> { + match self { + BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.equivocators()), + BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.equivocators()), + } + } + + /// Returns equivocators if the header is of a switch block. + pub fn maybe_inactive_validators(&self) -> Option<&[PublicKey]> { + match self { + BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.inactive_validators()), + BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.inactive_validators()), + } + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + match self { + BlockHeader::V1(v1) => v1.timestamp(), + BlockHeader::V2(v2) => v2.timestamp(), + } + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + match self { + BlockHeader::V1(v1) => v1.era_id(), + BlockHeader::V2(v2) => v2.era_id(), + } + } + + /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or + /// its successor if this is a switch block). + pub fn next_block_era_id(&self) -> EraId { + match self { + BlockHeader::V1(v1) => v1.next_block_era_id(), + BlockHeader::V2(v2) => v2.next_block_era_id(), + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + match self { + BlockHeader::V1(v1) => v1.height(), + BlockHeader::V2(v2) => v2.height(), + } + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + match self { + BlockHeader::V1(v1) => v1.protocol_version(), + BlockHeader::V2(v2) => v2.protocol_version(), + } + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + match self { + BlockHeader::V1(v1) => v1.is_switch_block(), + BlockHeader::V2(v2) => v2.is_switch_block(), + } + } + + /// Returns the validators for the upcoming era and their respective weights (if this is a + /// switch block). + pub fn next_era_validator_weights(&self) -> Option<&BTreeMap> { + match self { + BlockHeader::V1(v1) => v1.next_era_validator_weights(), + BlockHeader::V2(v2) => v2.next_era_validator_weights(), + } + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + match self { + BlockHeader::V1(v1) => v1.is_genesis(), + BlockHeader::V2(v2) => v2.is_genesis(), + } + } + + /// Returns `true` if this block belongs to the last block before the upgrade to the + /// current protocol version. + #[cfg(feature = "std")] + pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool { + match self { + BlockHeader::V1(v1) => v1.is_last_block_before_activation(protocol_config), + BlockHeader::V2(v2) => v2.is_last_block_before_activation(protocol_config), + } + } + + // This method is not intended to be used by third party crates. + // + // Sets the block hash without recomputing it. Must only be called with the correct hash. + #[doc(hidden)] + #[cfg(any(feature = "once_cell", test))] + pub fn set_block_hash(&self, block_hash: BlockHash) { + match self { + BlockHeader::V1(v1) => v1.set_block_hash(block_hash), + BlockHeader::V2(v2) => v2.set_block_hash(block_hash), + } + } +} + +impl Display for BlockHeader { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockHeader::V1(v1) => Display::fmt(&v1, formatter), + BlockHeader::V2(v2) => Display::fmt(&v2, formatter), + } + } +} + +impl From for BlockHeader { + fn from(header: BlockHeaderV1) -> Self { + BlockHeader::V1(header) + } +} + +impl From for BlockHeader { + fn from(header: BlockHeaderV2) -> Self { + BlockHeader::V2(header) + } +} + +impl ToBytes for BlockHeader { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + BlockHeader::V1(v1) => { + buffer.insert(0, BLOCK_HEADER_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + BlockHeader::V2(v2) => { + buffer.insert(0, BLOCK_HEADER_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + BlockHeader::V1(v1) => v1.serialized_length(), + BlockHeader::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for BlockHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + BLOCK_HEADER_V1_TAG => { + let (header, remainder): (BlockHeaderV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(header), remainder)) + } + BLOCK_HEADER_V2_TAG => { + let (header, remainder): (BlockHeaderV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(header), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, testing::TestRng, TestBlockBuilder, TestBlockV1Builder}; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let block_header_v1 = TestBlockV1Builder::new() + .build_versioned(rng) + .clone_header(); + bytesrepr::test_serialization_roundtrip(&block_header_v1); + + let block_header_v2 = TestBlockBuilder::new().build_versioned(rng).clone_header(); + bytesrepr::test_serialization_roundtrip(&block_header_v2); + } +} diff --git a/casper_types_ver_2_0/src/block/block_header/block_header_v1.rs b/casper_types_ver_2_0/src/block/block_header/block_header_v1.rs new file mode 100644 index 00000000..7fb64818 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_header/block_header_v1.rs @@ -0,0 +1,372 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use crate::{ + block::{BlockHash, EraEndV1}, + bytesrepr::{self, FromBytes, ToBytes}, + Digest, EraId, ProtocolVersion, PublicKey, Timestamp, U512, +}; +#[cfg(feature = "std")] +use crate::{ActivationPoint, ProtocolConfig}; + +#[cfg(feature = "json-schema")] +static BLOCK_HEADER_V1: Lazy = Lazy::new(|| { + let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH])); + let state_root_hash = Digest::from([8; Digest::LENGTH]); + let random_bit = true; + let era_end = Some(EraEndV1::example().clone()); + let timestamp = *Timestamp::example(); + let era_id = EraId::from(1); + let height: u64 = 10; + let protocol_version = ProtocolVersion::V1_0_0; + let accumulated_seed = Digest::hash_pair(Digest::from([9; Digest::LENGTH]), [random_bit as u8]); + let body_hash = Digest::from([5; Digest::LENGTH]); + BlockHeaderV1::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ) +}); + +/// The header portion of a block. +#[derive(Clone, Debug, Eq)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockHeaderV1 { + /// The parent block's hash. + pub(super) parent_hash: BlockHash, + /// The root hash of global state after the deploys in this block have been executed. + pub(super) state_root_hash: Digest, + /// The hash of the block's body. + pub(super) body_hash: Digest, + /// A random bit needed for initializing a future era. + pub(super) random_bit: bool, + /// A seed needed for initializing a future era. + pub(super) accumulated_seed: Digest, + /// The `EraEnd` of a block if it is a switch block. + pub(super) era_end: Option, + /// The timestamp from when the block was proposed. + pub(super) timestamp: Timestamp, + /// The era ID in which this block was created. + pub(super) era_id: EraId, + /// The height of this block, i.e. the number of ancestors. + pub(super) height: u64, + /// The protocol version of the network from when this block was created. + pub(super) protocol_version: ProtocolVersion, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) block_hash: OnceCell, +} + +impl BlockHeaderV1 { + /// Returns the hash of this block header. + pub fn block_hash(&self) -> BlockHash { + #[cfg(any(feature = "once_cell", test))] + return *self.block_hash.get_or_init(|| self.compute_block_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_block_hash() + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + &self.parent_hash + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + &self.state_root_hash + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + &self.body_hash + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.random_bit + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + &self.accumulated_seed + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV1> { + self.era_end.as_ref() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or + /// its successor if this is a switch block). + pub fn next_block_era_id(&self) -> EraId { + if self.era_end.is_some() { + self.era_id.successor() + } else { + self.era_id + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.height + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.era_end.is_some() + } + + /// Returns the validators for the upcoming era and their respective weights (if this is a + /// switch block). + pub fn next_era_validator_weights(&self) -> Option<&BTreeMap> { + self.era_end + .as_ref() + .map(|era_end| era_end.next_era_validator_weights()) + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.era_id().is_genesis() && self.height() == 0 + } + + /// Returns `true` if this block belongs to the last block before the upgrade to the + /// current protocol version. + #[cfg(feature = "std")] + pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool { + protocol_config.version > self.protocol_version + && self.is_switch_block() + && ActivationPoint::EraId(self.next_block_era_id()) == protocol_config.activation_point + } + + pub(crate) fn compute_block_hash(&self) -> BlockHash { + let serialized_header = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block header: {}", error)); + BlockHash::new(Digest::hash(serialized_header)) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + state_root_hash: Digest, + body_hash: Digest, + random_bit: bool, + accumulated_seed: Digest, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + #[cfg(any(feature = "once_cell", test))] block_hash: OnceCell, + ) -> Self { + BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + block_hash, + } + } + + // This method is not intended to be used by third party crates. + // + // Sets the block hash without recomputing it. Must only be called with the correct hash. + #[doc(hidden)] + #[cfg(any(feature = "once_cell", test))] + pub fn set_block_hash(&self, block_hash: BlockHash) { + self.block_hash.get_or_init(|| block_hash); + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_HEADER_V1 + } + + #[cfg(test)] + pub(crate) fn set_body_hash(&mut self, new_body_hash: Digest) { + self.body_hash = new_body_hash; + } +} + +impl PartialEq for BlockHeaderV1 { + fn eq(&self, other: &BlockHeaderV1) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + block_hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + } = self; + *parent_hash == other.parent_hash + && *state_root_hash == other.state_root_hash + && *body_hash == other.body_hash + && *random_bit == other.random_bit + && *accumulated_seed == other.accumulated_seed + && *era_end == other.era_end + && *timestamp == other.timestamp + && *era_id == other.era_id + && *height == other.height + && *protocol_version == other.protocol_version + } +} + +impl Display for BlockHeaderV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block header #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash {}, \ + random bit {}, protocol version: {}", + self.height, + self.block_hash(), + self.timestamp, + self.era_id, + self.parent_hash.inner(), + self.state_root_hash, + self.body_hash, + self.random_bit, + self.protocol_version, + )?; + if let Some(era_end) = &self.era_end { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockHeaderV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.parent_hash.write_bytes(writer)?; + self.state_root_hash.write_bytes(writer)?; + self.body_hash.write_bytes(writer)?; + self.random_bit.write_bytes(writer)?; + self.accumulated_seed.write_bytes(writer)?; + self.era_end.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.height.write_bytes(writer)?; + self.protocol_version.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.parent_hash.serialized_length() + + self.state_root_hash.serialized_length() + + self.body_hash.serialized_length() + + self.random_bit.serialized_length() + + self.accumulated_seed.serialized_length() + + self.era_end.serialized_length() + + self.timestamp.serialized_length() + + self.era_id.serialized_length() + + self.height.serialized_length() + + self.protocol_version.serialized_length() + } +} + +impl FromBytes for BlockHeaderV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (parent_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (body_hash, remainder) = Digest::from_bytes(remainder)?; + let (random_bit, remainder) = bool::from_bytes(remainder)?; + let (accumulated_seed, remainder) = Digest::from_bytes(remainder)?; + let (era_end, remainder) = Option::from_bytes(remainder)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (height, remainder) = u64::from_bytes(remainder)?; + let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?; + let block_header = BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + block_hash: OnceCell::new(), + }; + Ok((block_header, remainder)) + } +} diff --git a/casper_types_ver_2_0/src/block/block_header/block_header_v2.rs b/casper_types_ver_2_0/src/block/block_header/block_header_v2.rs new file mode 100644 index 00000000..14d11bac --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_header/block_header_v2.rs @@ -0,0 +1,371 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + BlockHash, Digest, EraEndV2, EraId, ProtocolVersion, PublicKey, Timestamp, U512, +}; +#[cfg(feature = "std")] +use crate::{ActivationPoint, ProtocolConfig}; + +#[cfg(feature = "json-schema")] +static BLOCK_HEADER_V2: Lazy = Lazy::new(|| { + let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH])); + let state_root_hash = Digest::from([8; Digest::LENGTH]); + let random_bit = true; + let era_end = Some(EraEndV2::example().clone()); + let timestamp = *Timestamp::example(); + let era_id = EraId::from(1); + let height: u64 = 10; + let protocol_version = ProtocolVersion::V1_0_0; + let accumulated_seed = Digest::hash_pair(Digest::from([9; Digest::LENGTH]), [random_bit as u8]); + let body_hash = Digest::from([5; Digest::LENGTH]); + BlockHeaderV2::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ) +}); + +/// The header portion of a block. +#[derive(Clone, Debug, Eq)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockHeaderV2 { + /// The parent block's hash. + pub(super) parent_hash: BlockHash, + /// The root hash of global state after the deploys in this block have been executed. + pub(super) state_root_hash: Digest, + /// The hash of the block's body. + pub(super) body_hash: Digest, + /// A random bit needed for initializing a future era. + pub(super) random_bit: bool, + /// A seed needed for initializing a future era. + pub(super) accumulated_seed: Digest, + /// The `EraEnd` of a block if it is a switch block. + pub(super) era_end: Option, + /// The timestamp from when the block was proposed. + pub(super) timestamp: Timestamp, + /// The era ID in which this block was created. + pub(super) era_id: EraId, + /// The height of this block, i.e. the number of ancestors. + pub(super) height: u64, + /// The protocol version of the network from when this block was created. + pub(super) protocol_version: ProtocolVersion, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) block_hash: OnceCell, +} + +impl BlockHeaderV2 { + /// Returns the hash of this block header. + pub fn block_hash(&self) -> BlockHash { + #[cfg(any(feature = "once_cell", test))] + return *self.block_hash.get_or_init(|| self.compute_block_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_block_hash() + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + &self.parent_hash + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + &self.state_root_hash + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + &self.body_hash + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.random_bit + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + &self.accumulated_seed + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV2> { + self.era_end.as_ref() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or + /// its successor if this is a switch block). + pub fn next_block_era_id(&self) -> EraId { + if self.era_end.is_some() { + self.era_id.successor() + } else { + self.era_id + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.height + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.era_end.is_some() + } + + /// Returns the validators for the upcoming era and their respective weights (if this is a + /// switch block). + pub fn next_era_validator_weights(&self) -> Option<&BTreeMap> { + self.era_end + .as_ref() + .map(|era_end| era_end.next_era_validator_weights()) + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.era_id().is_genesis() && self.height() == 0 + } + + /// Returns `true` if this block belongs to the last block before the upgrade to the + /// current protocol version. + #[cfg(feature = "std")] + pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool { + protocol_config.version > self.protocol_version + && self.is_switch_block() + && ActivationPoint::EraId(self.next_block_era_id()) == protocol_config.activation_point + } + + pub(crate) fn compute_block_hash(&self) -> BlockHash { + let serialized_header = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block header: {}", error)); + BlockHash::new(Digest::hash(serialized_header)) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + state_root_hash: Digest, + body_hash: Digest, + random_bit: bool, + accumulated_seed: Digest, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + #[cfg(any(feature = "once_cell", test))] block_hash: OnceCell, + ) -> Self { + BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + block_hash, + } + } + + // This method is not intended to be used by third party crates. + // + // Sets the block hash without recomputing it. Must only be called with the correct hash. + #[doc(hidden)] + #[cfg(any(feature = "once_cell", test))] + pub fn set_block_hash(&self, block_hash: BlockHash) { + self.block_hash.get_or_init(|| block_hash); + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_HEADER_V2 + } + + #[cfg(test)] + pub(crate) fn set_body_hash(&mut self, new_body_hash: Digest) { + self.body_hash = new_body_hash; + } +} + +impl PartialEq for BlockHeaderV2 { + fn eq(&self, other: &BlockHeaderV2) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + block_hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + } = self; + *parent_hash == other.parent_hash + && *state_root_hash == other.state_root_hash + && *body_hash == other.body_hash + && *random_bit == other.random_bit + && *accumulated_seed == other.accumulated_seed + && *era_end == other.era_end + && *timestamp == other.timestamp + && *era_id == other.era_id + && *height == other.height + && *protocol_version == other.protocol_version + } +} + +impl Display for BlockHeaderV2 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block header #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash {}, \ + random bit {}, protocol version: {}", + self.height, + self.block_hash(), + self.timestamp, + self.era_id, + self.parent_hash.inner(), + self.state_root_hash, + self.body_hash, + self.random_bit, + self.protocol_version, + )?; + if let Some(era_end) = &self.era_end { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockHeaderV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.parent_hash.write_bytes(writer)?; + self.state_root_hash.write_bytes(writer)?; + self.body_hash.write_bytes(writer)?; + self.random_bit.write_bytes(writer)?; + self.accumulated_seed.write_bytes(writer)?; + self.era_end.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.height.write_bytes(writer)?; + self.protocol_version.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.parent_hash.serialized_length() + + self.state_root_hash.serialized_length() + + self.body_hash.serialized_length() + + self.random_bit.serialized_length() + + self.accumulated_seed.serialized_length() + + self.era_end.serialized_length() + + self.timestamp.serialized_length() + + self.era_id.serialized_length() + + self.height.serialized_length() + + self.protocol_version.serialized_length() + } +} + +impl FromBytes for BlockHeaderV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (parent_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (body_hash, remainder) = Digest::from_bytes(remainder)?; + let (random_bit, remainder) = bool::from_bytes(remainder)?; + let (accumulated_seed, remainder) = Digest::from_bytes(remainder)?; + let (era_end, remainder) = Option::from_bytes(remainder)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (height, remainder) = u64::from_bytes(remainder)?; + let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?; + let block_header = BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + block_hash: OnceCell::new(), + }; + Ok((block_header, remainder)) + } +} diff --git a/casper_types_ver_2_0/src/block/block_identifier.rs b/casper_types_ver_2_0/src/block/block_identifier.rs new file mode 100644 index 00000000..02508bdd --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_identifier.rs @@ -0,0 +1,138 @@ +use alloc::vec::Vec; +use core::num::ParseIntError; +#[cfg(test)] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + BlockHash, Digest, DigestError, +}; + +const HASH_TAG: u8 = 0; +const HEIGHT_TAG: u8 = 1; + +/// Identifier for possible ways to retrieve a block. +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum BlockIdentifier { + /// Identify and retrieve the block with its hash. + Hash(BlockHash), + /// Identify and retrieve the block with its height. + Height(u64), +} + +impl BlockIdentifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..1) { + 0 => Self::Hash(BlockHash::random(rng)), + 1 => Self::Height(rng.gen()), + _ => panic!(), + } + } +} + +impl FromBytes for BlockIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + match bytes.split_first() { + Some((&HASH_TAG, rem)) => { + let (hash, rem) = FromBytes::from_bytes(rem)?; + Ok((BlockIdentifier::Hash(hash), rem)) + } + Some((&HEIGHT_TAG, rem)) => { + let (height, rem) = FromBytes::from_bytes(rem)?; + Ok((BlockIdentifier::Height(height), rem)) + } + Some(_) | None => Err(bytesrepr::Error::Formatting), + } + } +} + +impl ToBytes for BlockIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + BlockIdentifier::Hash(hash) => { + writer.push(HASH_TAG); + hash.write_bytes(writer)?; + } + BlockIdentifier::Height(height) => { + writer.push(HEIGHT_TAG); + height.write_bytes(writer)?; + } + } + Ok(()) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + BlockIdentifier::Hash(hash) => hash.serialized_length(), + BlockIdentifier::Height(height) => height.serialized_length(), + } + } +} + +impl core::str::FromStr for BlockIdentifier { + type Err = ParseBlockIdentifierError; + + fn from_str(maybe_block_identifier: &str) -> Result { + if maybe_block_identifier.is_empty() { + return Err(ParseBlockIdentifierError::EmptyString); + } + + if maybe_block_identifier.len() == (Digest::LENGTH * 2) { + let hash = Digest::from_hex(maybe_block_identifier) + .map_err(ParseBlockIdentifierError::FromHexError)?; + Ok(BlockIdentifier::Hash(BlockHash::new(hash))) + } else { + let height = maybe_block_identifier + .parse() + .map_err(ParseBlockIdentifierError::ParseIntError)?; + Ok(BlockIdentifier::Height(height)) + } + } +} + +/// Represents errors that can arise when parsing a [`BlockIdentifier`]. +#[derive(Debug)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +pub enum ParseBlockIdentifierError { + /// String was empty. + #[cfg_attr( + feature = "std", + error("Empty string is not a valid block identifier.") + )] + EmptyString, + /// Couldn't parse a height value. + #[cfg_attr(feature = "std", error("Unable to parse height from string. {0}"))] + ParseIntError(ParseIntError), + /// Couldn't parse a blake2bhash. + #[cfg_attr(feature = "std", error("Unable to parse digest from string. {0}"))] + FromHexError(DigestError), +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BlockIdentifier::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/block/block_signatures.rs b/casper_types_ver_2_0/src/block/block_signatures.rs new file mode 100644 index 00000000..63060652 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_signatures.rs @@ -0,0 +1,248 @@ +use alloc::collections::BTreeMap; +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use super::{BlockHash, FinalitySignature}; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, EraId, PublicKey, Signature, +}; + +/// An error returned during an attempt to merge two incompatible [`BlockSignatures`]. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub enum BlockSignaturesMergeError { + /// A mismatch between block hashes. + BlockHashMismatch { + /// The `self` hash. + self_hash: BlockHash, + /// The `other` hash. + other_hash: BlockHash, + }, + /// A mismatch between era IDs. + EraIdMismatch { + /// The `self` era ID. + self_era_id: EraId, + /// The `other` era ID. + other_era_id: EraId, + }, +} + +impl Display for BlockSignaturesMergeError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockSignaturesMergeError::BlockHashMismatch { + self_hash, + other_hash, + } => { + write!( + formatter, + "mismatch between block hashes while merging block signatures - self: {}, \ + other: {}", + self_hash, other_hash + ) + } + BlockSignaturesMergeError::EraIdMismatch { + self_era_id, + other_era_id, + } => { + write!( + formatter, + "mismatch between era ids while merging block signatures - self: {}, other: \ + {}", + self_era_id, other_era_id + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for BlockSignaturesMergeError {} + +/// A collection of signatures for a single block, along with the associated block's hash and era +/// ID. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct BlockSignatures { + /// The block hash. + pub(super) block_hash: BlockHash, + /// The era ID in which this block was created. + pub(super) era_id: EraId, + /// The proofs of the block, i.e. a collection of validators' signatures of the block hash. + pub(super) proofs: BTreeMap, +} + +impl BlockSignatures { + /// Constructs a new `BlockSignatures`. + pub fn new(block_hash: BlockHash, era_id: EraId) -> Self { + BlockSignatures { + block_hash, + era_id, + proofs: BTreeMap::new(), + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the era id of the associated block. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the finality signature associated with the given public key, if available. + pub fn finality_signature(&self, public_key: &PublicKey) -> Option { + self.proofs + .get(public_key) + .map(|signature| FinalitySignature { + block_hash: self.block_hash, + era_id: self.era_id, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + }) + } + + /// Returns `true` if there is a signature associated with the given public key. + pub fn has_finality_signature(&self, public_key: &PublicKey) -> bool { + self.proofs.contains_key(public_key) + } + + /// Returns an iterator over all the signatures. + pub fn finality_signatures(&self) -> impl Iterator + '_ { + self.proofs + .iter() + .map(move |(public_key, signature)| FinalitySignature { + block_hash: self.block_hash, + era_id: self.era_id, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + }) + } + + /// Returns an iterator over all the validator public keys. + pub fn signers(&self) -> impl Iterator + '_ { + self.proofs.keys() + } + + /// Returns the number of signatures in the collection. + pub fn len(&self) -> usize { + self.proofs.len() + } + + /// Returns `true` if there are no signatures in the collection. + pub fn is_empty(&self) -> bool { + self.proofs.is_empty() + } + + /// Inserts a new signature. + pub fn insert_signature(&mut self, finality_signature: FinalitySignature) { + let _ = self + .proofs + .insert(finality_signature.public_key, finality_signature.signature); + } + + /// Merges the collection of signatures in `other` into `self`. + /// + /// Returns an error if the block hashes or era IDs do not match. + pub fn merge(&mut self, mut other: Self) -> Result<(), BlockSignaturesMergeError> { + if self.block_hash != other.block_hash { + return Err(BlockSignaturesMergeError::BlockHashMismatch { + self_hash: self.block_hash, + other_hash: other.block_hash, + }); + } + + if self.era_id != other.era_id { + return Err(BlockSignaturesMergeError::EraIdMismatch { + self_era_id: self.era_id, + other_era_id: other.era_id, + }); + } + + self.proofs.append(&mut other.proofs); + + Ok(()) + } + + /// Returns `Ok` if and only if all the signatures are cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + for (public_key, signature) in self.proofs.iter() { + let signature = FinalitySignature { + block_hash: self.block_hash, + era_id: self.era_id, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + }; + signature.is_verified()?; + } + Ok(()) + } +} + +impl FromBytes for BlockSignatures { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), crate::bytesrepr::Error> { + let (block_hash, bytes) = FromBytes::from_bytes(bytes)?; + let (era_id, bytes) = FromBytes::from_bytes(bytes)?; + let (proofs, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + BlockSignatures { + block_hash, + era_id, + proofs, + }, + bytes, + )) + } +} + +impl ToBytes for BlockSignatures { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buf = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buf)?; + Ok(buf) + } + + fn write_bytes(&self, bytes: &mut Vec) -> Result<(), crate::bytesrepr::Error> { + self.block_hash.write_bytes(bytes)?; + self.era_id.write_bytes(bytes)?; + self.proofs.write_bytes(bytes)?; + Ok(()) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.era_id.serialized_length() + + self.proofs.serialized_length() + } +} + +impl Display for BlockSignatures { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block signatures for {} in {} with {} proofs", + self.block_hash, + self.era_id, + self.proofs.len() + ) + } +} diff --git a/casper_types_ver_2_0/src/block/block_sync_status.rs b/casper_types_ver_2_0/src/block/block_sync_status.rs new file mode 100644 index 00000000..6c842824 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_sync_status.rs @@ -0,0 +1,212 @@ +use alloc::{string::String, vec::Vec}; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + BlockHash, +}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +#[cfg(feature = "json-schema")] +static BLOCK_SYNCHRONIZER_STATUS: Lazy = Lazy::new(|| { + use crate::Digest; + + BlockSynchronizerStatus::new( + Some(BlockSyncStatus { + block_hash: BlockHash::new( + Digest::from_hex( + "16ddf28e2b3d2e17f4cef36f8b58827eca917af225d139b0c77df3b4a67dc55e", + ) + .unwrap(), + ), + block_height: Some(40), + acquisition_state: "have strict finality(40) for: block hash 16dd..c55e".to_string(), + }), + Some(BlockSyncStatus { + block_hash: BlockHash::new( + Digest::from_hex( + "59907b1e32a9158169c4d89d9ce5ac9164fc31240bfcfb0969227ece06d74983", + ) + .unwrap(), + ), + block_height: Some(6701), + acquisition_state: "have block body(6701) for: block hash 5990..4983".to_string(), + }), + ) +}); + +/// The status of syncing an individual block. +#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct BlockSyncStatus { + /// The block hash. + block_hash: BlockHash, + /// The height of the block, if known. + block_height: Option, + /// The state of acquisition of the data associated with the block. + acquisition_state: String, +} + +impl BlockSyncStatus { + /// Constructs a new `BlockSyncStatus`. + pub fn new( + block_hash: BlockHash, + block_height: Option, + acquisition_state: String, + ) -> Self { + Self { + block_hash, + block_height, + acquisition_state, + } + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + block_hash: BlockHash::random(rng), + block_height: rng.gen::().then_some(rng.gen()), + acquisition_state: rng.random_string(10..20), + } + } +} + +impl ToBytes for BlockSyncStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.block_height.write_bytes(writer)?; + self.acquisition_state.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.block_height.serialized_length() + + self.acquisition_state.serialized_length() + } +} + +impl FromBytes for BlockSyncStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (block_height, remainder) = Option::::from_bytes(remainder)?; + let (acquisition_state, remainder) = String::from_bytes(remainder)?; + Ok(( + BlockSyncStatus { + block_hash, + block_height, + acquisition_state, + }, + remainder, + )) + } +} + +/// The status of the block synchronizer. +#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct BlockSynchronizerStatus { + /// The status of syncing a historical block, if any. + historical: Option, + /// The status of syncing a forward block, if any. + forward: Option, +} + +impl BlockSynchronizerStatus { + /// Constructs a new `BlockSynchronizerStatus`. + pub fn new(historical: Option, forward: Option) -> Self { + Self { + historical, + forward, + } + } + + /// Returns an example `BlockSynchronizerStatus`. + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_SYNCHRONIZER_STATUS + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let historical = rng.gen::().then_some(BlockSyncStatus::random(rng)); + let forward = rng.gen::().then_some(BlockSyncStatus::random(rng)); + Self { + historical, + forward, + } + } + + /// Returns status of the historical block sync. + #[cfg(any(feature = "testing", test))] + pub fn historical(&self) -> &Option { + &self.historical + } + + /// Returns status of the forward block sync. + #[cfg(any(feature = "testing", test))] + pub fn forward(&self) -> &Option { + &self.forward + } +} + +impl ToBytes for BlockSynchronizerStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.historical.write_bytes(writer)?; + self.forward.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.historical.serialized_length() + self.forward.serialized_length() + } +} + +impl FromBytes for BlockSynchronizerStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (historical, remainder) = Option::::from_bytes(bytes)?; + let (forward, remainder) = Option::::from_bytes(remainder)?; + Ok(( + BlockSynchronizerStatus { + historical, + forward, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BlockSyncStatus::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/block/block_v1.rs b/casper_types_ver_2_0/src/block/block_v1.rs new file mode 100644 index 00000000..9592be34 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_v1.rs @@ -0,0 +1,367 @@ +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use alloc::collections::BTreeMap; +use alloc::{boxed::Box, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use core::iter; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::Rng; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::U512; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Block, BlockBodyV1, BlockHash, BlockHeaderV1, BlockValidationError, DeployHash, Digest, + EraEndV1, EraId, ProtocolVersion, PublicKey, Timestamp, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::{testing::TestRng, EraReport}; + +/// A block after execution, with the resulting global state root hash. This is the core component +/// of the Casper linear blockchain. Version 1. +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockV1 { + /// The block hash identifying this block. + pub(super) hash: BlockHash, + /// The header portion of the block. + pub(super) header: BlockHeaderV1, + /// The body portion of the block. + pub(super) body: BlockBodyV1, +} + +impl BlockV1 { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + parent_seed: Digest, + state_root_hash: Digest, + random_bit: bool, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + proposer: PublicKey, + deploy_hashes: Vec, + transfer_hashes: Vec, + ) -> Self { + let body = BlockBodyV1::new(proposer, deploy_hashes, transfer_hashes); + let body_hash = body.hash(); + let accumulated_seed = Digest::hash_pair(parent_seed, [random_bit as u8]); + let header = BlockHeaderV1::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ); + Self::new_from_header_and_body(header, body) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new_from_header_and_body(header: BlockHeaderV1, body: BlockBodyV1) -> Self { + let hash = header.block_hash(); + BlockV1 { hash, header, body } + } + + /// Returns the `BlockHash` identifying this block. + pub fn hash(&self) -> &BlockHash { + &self.hash + } + + /// Returns the block's header. + pub fn header(&self) -> &BlockHeaderV1 { + &self.header + } + + /// Returns the block's header, consuming `self`. + pub fn take_header(self) -> BlockHeaderV1 { + self.header + } + + /// Returns the block's body. + pub fn body(&self) -> &BlockBodyV1 { + &self.body + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + self.header.parent_hash() + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + self.header.state_root_hash() + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + self.header.body_hash() + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.header.random_bit() + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + self.header.accumulated_seed() + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV1> { + self.header.era_end() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp() + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.header.era_id() + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.header.height() + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.header.protocol_version() + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.header.is_switch_block() + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.header.is_genesis() + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + self.body.proposer() + } + + /// Returns the deploy hashes within the block. + pub fn deploy_hashes(&self) -> &[DeployHash] { + self.body.deploy_hashes() + } + + /// Returns the transfer hashes within the block. + pub fn transfer_hashes(&self) -> &[DeployHash] { + self.body.transfer_hashes() + } + + /// Returns the deploy and transfer hashes in the order in which they were executed. + pub fn deploy_and_transfer_hashes(&self) -> impl Iterator { + self.deploy_hashes() + .iter() + .chain(self.transfer_hashes().iter()) + } + + /// Returns `Ok` if and only if the block's provided block hash and body hash are identical to + /// those generated by hashing the appropriate input data. + pub fn verify(&self) -> Result<(), BlockValidationError> { + let actual_block_header_hash = self.header().block_hash(); + if *self.hash() != actual_block_header_hash { + return Err(BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V1(self.clone())), + actual_block_hash: actual_block_header_hash, + }); + } + + let actual_block_body_hash = self.body.hash(); + if *self.header.body_hash() != actual_block_body_hash { + return Err(BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V1(self.clone())), + actual_block_body_hash, + }); + } + + Ok(()) + } + + /// Returns a random block, but using the provided values. + /// + /// If `deploy_hashes_iter` is empty, a few random deploy hashes will be added to the + /// `deploy_hashes` and `transfer_hashes` fields of the body. Otherwise, the provided deploy + /// hashes will populate the `deploy_hashes` field and `transfer_hashes` will be empty. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_specifics>( + rng: &mut TestRng, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + is_switch: bool, + deploy_hashes_iter: I, + ) -> Self { + let parent_hash = BlockHash::random(rng); + let parent_seed = Digest::random(rng); + let state_root_hash = Digest::random(rng); + let random_bit = rng.gen(); + let era_end = is_switch.then(|| { + let mut next_era_validator_weights = BTreeMap::new(); + for i in 1_u64..6 { + let _ = next_era_validator_weights.insert(PublicKey::random(rng), U512::from(i)); + } + EraEndV1::new(EraReport::random(rng), next_era_validator_weights) + }); + let timestamp = Timestamp::now(); + let proposer = PublicKey::random(rng); + let mut deploy_hashes: Vec = deploy_hashes_iter.into_iter().collect(); + let mut transfer_hashes: Vec = vec![]; + if deploy_hashes.is_empty() { + let count = rng.gen_range(0..6); + deploy_hashes = iter::repeat_with(|| DeployHash::random(rng)) + .take(count) + .collect(); + let count = rng.gen_range(0..6); + transfer_hashes = iter::repeat_with(|| DeployHash::random(rng)) + .take(count) + .collect(); + } + + BlockV1::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + deploy_hashes, + transfer_hashes, + ) + } +} + +impl Display for BlockV1 { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \ + {}, random bit {}, protocol version: {}", + self.height(), + self.hash(), + self.timestamp(), + self.era_id(), + self.parent_hash().inner(), + self.state_root_hash(), + self.body_hash(), + self.random_bit(), + self.protocol_version() + )?; + if let Some(era_end) = self.era_end() { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.header.write_bytes(writer)?; + self.body.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.header.serialized_length() + + self.body.serialized_length() + } +} + +impl FromBytes for BlockV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = BlockHash::from_bytes(bytes)?; + let (header, remainder) = BlockHeaderV1::from_bytes(remainder)?; + let (body, remainder) = BlockBodyV1::from_bytes(remainder)?; + let block = BlockV1 { hash, header, body }; + Ok((block, remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::{Block, TestBlockV1Builder}; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let block = TestBlockV1Builder::new().build(rng); + bytesrepr::test_serialization_roundtrip(&block); + } + + #[test] + fn block_check_bad_body_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockV1Builder::new().build(rng); + let bogus_block_body_hash = Digest::hash([0xde, 0xad, 0xbe, 0xef]); + block.header.set_body_hash(bogus_block_body_hash); + block.hash = block.header.block_hash(); + + let expected_error = BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V1(block.clone())), + actual_block_body_hash: block.body.hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } + + #[test] + fn block_check_bad_block_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockV1Builder::new().build(rng); + let bogus_block_hash = BlockHash::from(Digest::hash([0xde, 0xad, 0xbe, 0xef])); + block.hash = bogus_block_hash; + + let expected_error = BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V1(block.clone())), + actual_block_hash: block.header.block_hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } +} diff --git a/casper_types_ver_2_0/src/block/block_v2.rs b/casper_types_ver_2_0/src/block/block_v2.rs new file mode 100644 index 00000000..c80f9213 --- /dev/null +++ b/casper_types_ver_2_0/src/block/block_v2.rs @@ -0,0 +1,411 @@ +use alloc::{boxed::Box, vec::Vec}; + +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; + +use super::{Block, BlockBodyV2, BlockConversionError, RewardedSignatures}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +#[cfg(feature = "json-schema")] +use crate::TransactionV1Hash; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + BlockHash, BlockHeaderV2, BlockValidationError, Digest, EraEndV2, EraId, ProtocolVersion, + PublicKey, Timestamp, TransactionHash, +}; + +#[cfg(feature = "json-schema")] +static BLOCK_V2: Lazy = Lazy::new(|| { + let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH])); + let parent_seed = Digest::from([9; Digest::LENGTH]); + let state_root_hash = Digest::from([8; Digest::LENGTH]); + let random_bit = true; + let era_end = Some(EraEndV2::example().clone()); + let timestamp = *Timestamp::example(); + let era_id = EraId::from(1); + let height = 10; + let protocol_version = ProtocolVersion::V1_0_0; + let secret_key = crate::SecretKey::example(); + let proposer = PublicKey::from(secret_key); + let transfer_hashes = vec![TransactionHash::V1(TransactionV1Hash::new(Digest::from( + [20; Digest::LENGTH], + )))]; + let non_transfer_native_hashes = vec![TransactionHash::V1(TransactionV1Hash::new( + Digest::from([21; Digest::LENGTH]), + ))]; + let installer_upgrader_hashes = vec![TransactionHash::V1(TransactionV1Hash::new( + Digest::from([22; Digest::LENGTH]), + ))]; + let other_hashes = vec![TransactionHash::V1(TransactionV1Hash::new(Digest::from( + [23; Digest::LENGTH], + )))]; + let rewarded_signatures = RewardedSignatures::default(); + BlockV2::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + transfer_hashes, + non_transfer_native_hashes, + installer_upgrader_hashes, + other_hashes, + rewarded_signatures, + ) +}); + +/// A block after execution, with the resulting global state root hash. This is the core component +/// of the Casper linear blockchain. Version 2. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockV2 { + /// The block hash identifying this block. + pub(super) hash: BlockHash, + /// The header portion of the block. + pub(super) header: BlockHeaderV2, + /// The body portion of the block. + pub(super) body: BlockBodyV2, +} + +impl BlockV2 { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + parent_seed: Digest, + state_root_hash: Digest, + random_bit: bool, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + proposer: PublicKey, + transfer: Vec, + staking: Vec, + install_upgrade: Vec, + standard: Vec, + rewarded_signatures: RewardedSignatures, + ) -> Self { + let body = BlockBodyV2::new( + proposer, + transfer, + staking, + install_upgrade, + standard, + rewarded_signatures, + ); + let body_hash = body.hash(); + let accumulated_seed = Digest::hash_pair(parent_seed, [random_bit as u8]); + let header = BlockHeaderV2::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ); + Self::new_from_header_and_body(header, body) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new_from_header_and_body(header: BlockHeaderV2, body: BlockBodyV2) -> Self { + let hash = header.block_hash(); + BlockV2 { hash, header, body } + } + + /// Returns the `BlockHash` identifying this block. + pub fn hash(&self) -> &BlockHash { + &self.hash + } + + /// Returns the block's header. + pub fn header(&self) -> &BlockHeaderV2 { + &self.header + } + + /// Returns the block's header, consuming `self`. + pub fn take_header(self) -> BlockHeaderV2 { + self.header + } + + /// Returns the block's body. + pub fn body(&self) -> &BlockBodyV2 { + &self.body + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + self.header.parent_hash() + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + self.header.state_root_hash() + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + self.header.body_hash() + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.header.random_bit() + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + self.header.accumulated_seed() + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV2> { + self.header.era_end() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp() + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.header.era_id() + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.header.height() + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.header.protocol_version() + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.header.is_switch_block() + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.header.is_genesis() + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + self.body.proposer() + } + + /// List of identifiers for finality signatures for a particular past block. + pub fn rewarded_signatures(&self) -> &RewardedSignatures { + self.body.rewarded_signatures() + } + + /// Returns the hashes of the transfer transactions within the block. + pub fn transfer(&self) -> impl Iterator { + self.body.transfer() + } + + /// Returns the hashes of the non-transfer, native transactions within the block. + pub fn staking(&self) -> impl Iterator { + self.body.staking() + } + + /// Returns the hashes of the installer/upgrader transactions within the block. + pub fn install_upgrade(&self) -> impl Iterator { + self.body.install_upgrade() + } + + /// Returns the hashes of all other transactions within the block. + pub fn standard(&self) -> impl Iterator { + self.body.standard() + } + + /// Returns all of the transaction hashes in the order in which they were executed. + pub fn all_transactions(&self) -> impl Iterator { + self.body.all_transactions() + } + + /// Returns `Ok` if and only if the block's provided block hash and body hash are identical to + /// those generated by hashing the appropriate input data. + pub fn verify(&self) -> Result<(), BlockValidationError> { + let actual_block_header_hash = self.header().block_hash(); + if *self.hash() != actual_block_header_hash { + return Err(BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V2(self.clone())), + actual_block_hash: actual_block_header_hash, + }); + } + + let actual_block_body_hash = self.body.hash(); + if *self.header.body_hash() != actual_block_body_hash { + return Err(BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V2(self.clone())), + actual_block_body_hash, + }); + } + + Ok(()) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_V2 + } + + /// Makes the block invalid, for testing purpose. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn make_invalid(self, rng: &mut TestRng) -> Self { + let block = BlockV2 { + hash: BlockHash::random(rng), + ..self + }; + + assert!(block.verify().is_err()); + block + } +} + +impl Display for BlockV2 { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \ + {}, random bit {}, protocol version: {}", + self.height(), + self.hash(), + self.timestamp(), + self.era_id(), + self.parent_hash().inner(), + self.state_root_hash(), + self.body_hash(), + self.random_bit(), + self.protocol_version() + )?; + if let Some(era_end) = self.era_end() { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.header.write_bytes(writer)?; + self.body.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.header.serialized_length() + + self.body.serialized_length() + } +} + +impl FromBytes for BlockV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = BlockHash::from_bytes(bytes)?; + let (header, remainder) = BlockHeaderV2::from_bytes(remainder)?; + let (body, remainder) = BlockBodyV2::from_bytes(remainder)?; + let block = BlockV2 { hash, header, body }; + Ok((block, remainder)) + } +} + +impl TryFrom for BlockV2 { + type Error = BlockConversionError; + + fn try_from(value: Block) -> Result { + match value { + Block::V2(v2) => Ok(v2), + _ => Err(BlockConversionError::DifferentVersion { + expected_version: 2, + }), + } + } +} + +#[cfg(test)] +mod tests { + use crate::TestBlockBuilder; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + bytesrepr::test_serialization_roundtrip(&block); + } + + #[test] + fn block_check_bad_body_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockBuilder::new().build(rng); + let bogus_block_body_hash = Digest::hash([0xde, 0xad, 0xbe, 0xef]); + block.header.set_body_hash(bogus_block_body_hash); + block.hash = block.header.block_hash(); + + let expected_error = BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V2(block.clone())), + actual_block_body_hash: block.body.hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } + + #[test] + fn block_check_bad_block_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockBuilder::new().build(rng); + let bogus_block_hash = BlockHash::from(Digest::hash([0xde, 0xad, 0xbe, 0xef])); + block.hash = bogus_block_hash; + + let expected_error = BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V2(block.clone())), + actual_block_hash: block.header.block_hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } +} diff --git a/casper_types_ver_2_0/src/block/era_end.rs b/casper_types_ver_2_0/src/block/era_end.rs new file mode 100644 index 00000000..0dcc8813 --- /dev/null +++ b/casper_types_ver_2_0/src/block/era_end.rs @@ -0,0 +1,133 @@ +mod era_end_v1; +mod era_end_v2; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + PublicKey, Rewards, U512, +}; +pub use era_end_v1::{EraEndV1, EraReport}; +pub use era_end_v2::EraEndV2; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block body v1. +pub const ERA_END_V1_TAG: u8 = 0; +/// Tag for block body v2. +pub const ERA_END_V2_TAG: u8 = 1; + +/// The versioned era end of a block, storing the data for a switch block. +/// It encapsulates different variants of the EraEnd struct. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(any(feature = "testing", test), derive(PartialEq))] +#[derive(Clone, Hash, Serialize, Deserialize, Debug)] +pub enum EraEnd { + /// The legacy, initial version of the body portion of a block. + V1(EraEndV1), + /// The version 2 of the body portion of a block, which includes the + /// `past_finality_signatures`. + V2(EraEndV2), +} + +impl EraEnd { + /// Retrieves the deploy hashes within the block. + pub fn equivocators(&self) -> &[PublicKey] { + match self { + EraEnd::V1(v1) => v1.equivocators(), + EraEnd::V2(v2) => v2.equivocators(), + } + } + + /// Retrieves the transfer hashes within the block. + pub fn inactive_validators(&self) -> &[PublicKey] { + match self { + EraEnd::V1(v1) => v1.inactive_validators(), + EraEnd::V2(v2) => v2.inactive_validators(), + } + } + + /// Returns the deploy and transfer hashes in the order in which they were executed. + pub fn next_era_validator_weights(&self) -> &BTreeMap { + match self { + EraEnd::V1(v1) => v1.next_era_validator_weights(), + EraEnd::V2(v2) => v2.next_era_validator_weights(), + } + } + + /// Returns the deploy and transfer hashes in the order in which they were executed. + pub fn rewards(&self) -> Rewards { + match self { + EraEnd::V1(v1) => Rewards::V1(v1.rewards()), + EraEnd::V2(v2) => Rewards::V2(v2.rewards()), + } + } +} + +impl Display for EraEnd { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + EraEnd::V1(v1) => Display::fmt(&v1, formatter), + EraEnd::V2(v2) => Display::fmt(&v2, formatter), + } + } +} + +impl From for EraEnd { + fn from(era_end: EraEndV1) -> Self { + EraEnd::V1(era_end) + } +} + +impl From for EraEnd { + fn from(era_end: EraEndV2) -> Self { + EraEnd::V2(era_end) + } +} + +impl ToBytes for EraEnd { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + EraEnd::V1(v1) => { + buffer.insert(0, ERA_END_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + EraEnd::V2(v2) => { + buffer.insert(0, ERA_END_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + EraEnd::V1(v1) => v1.serialized_length(), + EraEnd::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for EraEnd { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + ERA_END_V1_TAG => { + let (body, remainder): (EraEndV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(body), remainder)) + } + ERA_END_V2_TAG => { + let (body, remainder): (EraEndV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(body), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} diff --git a/casper_types_ver_2_0/src/block/era_end/era_end_v1.rs b/casper_types_ver_2_0/src/block/era_end/era_end_v1.rs new file mode 100644 index 00000000..ac89e7f3 --- /dev/null +++ b/casper_types_ver_2_0/src/block/era_end/era_end_v1.rs @@ -0,0 +1,163 @@ +mod era_report; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(feature = "json-schema")] +use crate::SecretKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + PublicKey, U512, +}; +pub use era_report::EraReport; + +#[cfg(feature = "json-schema")] +static ERA_END_V1: Lazy = Lazy::new(|| { + let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + let next_era_validator_weights = { + let mut next_era_validator_weights: BTreeMap = BTreeMap::new(); + next_era_validator_weights.insert(public_key_1, U512::from(123)); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(456), + ); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(789), + ); + next_era_validator_weights + }; + + let era_report = EraReport::example().clone(); + EraEndV1::new(era_report, next_era_validator_weights) +}); + +/// Information related to the end of an era, and validator weights for the following era. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EraEndV1 { + /// Equivocation, reward and validator inactivity information. + pub(super) era_report: EraReport, + /// The validators for the upcoming era and their respective weights. + #[serde(with = "BTreeMapToArray::")] + pub(super) next_era_validator_weights: BTreeMap, +} + +impl EraEndV1 { + /// Returns equivocation, reward and validator inactivity information. + pub fn era_report(&self) -> &EraReport { + &self.era_report + } + + /// Retrieves the deploy hashes within the block. + pub fn equivocators(&self) -> &[PublicKey] { + self.era_report.equivocators() + } + + /// Retrieves the transfer hashes within the block. + pub fn inactive_validators(&self) -> &[PublicKey] { + self.era_report.inactive_validators() + } + + /// Retrieves the transfer hashes within the block. + pub fn rewards(&self) -> &BTreeMap { + self.era_report.rewards() + } + + /// Returns the validators for the upcoming era and their respective weights. + pub fn next_era_validator_weights(&self) -> &BTreeMap { + &self.next_era_validator_weights + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new( + era_report: EraReport, + next_era_validator_weights: BTreeMap, + ) -> Self { + EraEndV1 { + era_report, + next_era_validator_weights, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ERA_END_V1 + } +} + +impl ToBytes for EraEndV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.era_report.write_bytes(writer)?; + self.next_era_validator_weights.write_bytes(writer)?; + + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.era_report.serialized_length() + self.next_era_validator_weights.serialized_length() + } +} + +impl FromBytes for EraEndV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (era_report, remainder) = EraReport::::from_bytes(bytes)?; + let (next_era_validator_weights, remainder) = + BTreeMap::::from_bytes(remainder)?; + let era_end = EraEndV1 { + era_report, + next_era_validator_weights, + }; + Ok((era_end, remainder)) + } +} + +impl Display for EraEndV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "era end: {} ", self.era_report) + } +} + +struct NextEraValidatorLabels; + +impl KeyValueLabels for NextEraValidatorLabels { + const KEY: &'static str = "validator"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for NextEraValidatorLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("ValidatorWeight"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with its weight, i.e. the total number of \ + motes staked by it and its delegators.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The validator's weight."); +} diff --git a/casper_types_ver_2_0/src/block/era_end/era_end_v1/era_report.rs b/casper_types_ver_2_0/src/block/era_end/era_end_v1/era_report.rs new file mode 100644 index 00000000..af63359e --- /dev/null +++ b/casper_types_ver_2_0/src/block/era_end/era_end_v1/era_report.rs @@ -0,0 +1,252 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(any(feature = "testing", test))] +use core::iter; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(feature = "json-schema")] +use crate::SecretKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, DisplayIter, PublicKey, +}; + +#[cfg(feature = "json-schema")] +static ERA_REPORT: Lazy> = Lazy::new(|| { + let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + let equivocators = vec![public_key_1]; + + let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap(); + let public_key_3 = PublicKey::from(&secret_key_3); + let inactive_validators = vec![public_key_3]; + + let rewards = BTreeMap::new(); + + EraReport { + equivocators, + rewards, + inactive_validators, + } +}); + +/// Equivocation, reward and validator inactivity information. +/// +/// `VID` represents validator ID type, generally [`PublicKey`]. +#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(bound( + serialize = "VID: Ord + Serialize", + deserialize = "VID: Ord + Deserialize<'de>", +))] +#[cfg_attr( + feature = "json-schema", + schemars(description = "Equivocation, reward and validator inactivity information.") +)] +pub struct EraReport { + /// The set of equivocators. + pub(super) equivocators: Vec, + /// Rewards for finalization of earlier blocks. + #[serde(with = "BTreeMapToArray::")] + pub(super) rewards: BTreeMap, + /// Validators that haven't produced any unit during the era. + pub(super) inactive_validators: Vec, +} + +impl EraReport { + /// Constructs a new `EraReport`. + pub fn new( + equivocators: Vec, + rewards: BTreeMap, + inactive_validators: Vec, + ) -> Self { + EraReport { + equivocators, + rewards, + inactive_validators, + } + } + + /// Returns the set of equivocators. + pub fn equivocators(&self) -> &[VID] { + &self.equivocators + } + + /// Returns rewards for finalization of earlier blocks. + /// + /// This is a measure of the value of each validator's contribution to consensus, in + /// fractions of the configured maximum block reward. + pub fn rewards(&self) -> &BTreeMap { + &self.rewards + } + + /// Returns validators that haven't produced any unit during the era. + pub fn inactive_validators(&self) -> &[VID] { + &self.inactive_validators + } + + /// Returns a cryptographic hash of the `EraReport`. + pub fn hash(&self) -> Digest + where + VID: ToBytes, + { + // Helper function to hash slice of validators + fn hash_slice_of_validators(slice_of_validators: &[VID]) -> Digest + where + VID: ToBytes, + { + Digest::hash_merkle_tree(slice_of_validators.iter().map(|validator| { + Digest::hash(validator.to_bytes().expect("Could not serialize validator")) + })) + } + + // Pattern match here leverages compiler to ensure every field is accounted for + let EraReport { + equivocators, + inactive_validators, + rewards, + } = self; + + let hashed_equivocators = hash_slice_of_validators(equivocators); + let hashed_inactive_validators = hash_slice_of_validators(inactive_validators); + let hashed_rewards = Digest::hash_btree_map(rewards).expect("Could not hash rewards"); + + Digest::hash_slice_rfold(&[ + hashed_equivocators, + hashed_rewards, + hashed_inactive_validators, + ]) + } +} + +impl Default for EraReport { + fn default() -> Self { + EraReport { + equivocators: vec![], + rewards: BTreeMap::new(), + inactive_validators: vec![], + } + } +} + +impl Display for EraReport { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let slashings = DisplayIter::new(&self.equivocators); + let rewards = DisplayIter::new( + self.rewards + .iter() + .map(|(public_key, amount)| format!("{}: {}", public_key, amount)), + ); + write!(f, "era end: slash {}, reward {}", slashings, rewards) + } +} + +impl ToBytes for EraReport { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.equivocators.write_bytes(writer)?; + self.rewards.write_bytes(writer)?; + self.inactive_validators.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.equivocators.serialized_length() + + self.rewards.serialized_length() + + self.inactive_validators.serialized_length() + } +} + +impl FromBytes for EraReport { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (equivocators, remainder) = Vec::::from_bytes(bytes)?; + let (rewards, remainder) = BTreeMap::::from_bytes(remainder)?; + let (inactive_validators, remainder) = Vec::::from_bytes(remainder)?; + let era_report = EraReport { + equivocators, + rewards, + inactive_validators, + }; + Ok((era_report, remainder)) + } +} + +impl EraReport { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ERA_REPORT + } + + /// Returns a random `EraReport`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + let equivocators_count = rng.gen_range(0..5); + let rewards_count = rng.gen_range(0..5); + let inactive_count = rng.gen_range(0..5); + let equivocators = iter::repeat_with(|| PublicKey::random(rng)) + .take(equivocators_count) + .collect(); + let rewards = iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let reward = rng.gen_range(1..(1_000_000_000 + 1)); + (pub_key, reward) + }) + .take(rewards_count) + .collect(); + let inactive_validators = iter::repeat_with(|| PublicKey::random(rng)) + .take(inactive_count) + .collect(); + EraReport::new(equivocators, rewards, inactive_validators) + } +} + +struct EraRewardsLabels; + +impl KeyValueLabels for EraRewardsLabels { + const KEY: &'static str = "validator"; + const VALUE: &'static str = "amount"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for EraRewardsLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("EraReward"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with a measure of the value of its \ + contribution to consensus, as a fraction of the configured maximum block reward.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The reward amount."); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let era_report = EraReport::random(rng); + bytesrepr::test_serialization_roundtrip(&era_report); + } +} diff --git a/casper_types_ver_2_0/src/block/era_end/era_end_v2.rs b/casper_types_ver_2_0/src/block/era_end/era_end_v2.rs new file mode 100644 index 00000000..2b7fe163 --- /dev/null +++ b/casper_types_ver_2_0/src/block/era_end/era_end_v2.rs @@ -0,0 +1,249 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(feature = "json-schema")] +use crate::SecretKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + DisplayIter, PublicKey, U512, +}; + +#[cfg(feature = "json-schema")] +static ERA_END_V2: Lazy = Lazy::new(|| { + let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap(); + let public_key_3 = PublicKey::from(&secret_key_3); + + let equivocators = vec![public_key_1.clone()]; + let inactive_validators = vec![public_key_3]; + let next_era_validator_weights = { + let mut next_era_validator_weights: BTreeMap = BTreeMap::new(); + next_era_validator_weights.insert(public_key_1, U512::from(123)); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(456), + ); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(789), + ); + next_era_validator_weights + }; + let rewards = Default::default(); + + EraEndV2::new( + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + ) +}); + +/// Information related to the end of an era, and validator weights for the following era. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EraEndV2 { + /// The set of equivocators. + pub(super) equivocators: Vec, + /// Validators that haven't produced any unit during the era. + pub(super) inactive_validators: Vec, + /// The validators for the upcoming era and their respective weights. + #[serde(with = "BTreeMapToArray::")] + pub(super) next_era_validator_weights: BTreeMap, + /// The rewards distributed to the validators. + pub(super) rewards: BTreeMap, +} + +impl EraEndV2 { + /// Returns the set of equivocators. + pub fn equivocators(&self) -> &[PublicKey] { + &self.equivocators + } + + /// Returns the validators that haven't produced any unit during the era. + pub fn inactive_validators(&self) -> &[PublicKey] { + &self.inactive_validators + } + + /// Returns the validators for the upcoming era and their respective weights. + pub fn next_era_validator_weights(&self) -> &BTreeMap { + &self.next_era_validator_weights + } + + /// Returns the rewards distributed to the validators. + pub fn rewards(&self) -> &BTreeMap { + &self.rewards + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new( + equivocators: Vec, + inactive_validators: Vec, + next_era_validator_weights: BTreeMap, + rewards: BTreeMap, + ) -> Self { + EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ERA_END_V2 + } + + /// Returns a random `EraReport`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut crate::testing::TestRng) -> Self { + use rand::Rng; + + let equivocators_count = rng.gen_range(0..5); + let inactive_count = rng.gen_range(0..5); + let next_era_validator_weights_count = rng.gen_range(0..5); + let rewards_count = rng.gen_range(0..5); + + let equivocators = core::iter::repeat_with(|| PublicKey::random(rng)) + .take(equivocators_count) + .collect(); + + let inactive_validators = core::iter::repeat_with(|| PublicKey::random(rng)) + .take(inactive_count) + .collect(); + + let next_era_validator_weights = core::iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let reward = rng.gen_range(1..=1_000_000_000); + (pub_key, U512::from(reward)) + }) + .take(next_era_validator_weights_count) + .collect(); + + let rewards = core::iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let reward = rng.gen_range(1..=1_000_000_000); + (pub_key, U512::from(reward)) + }) + .take(rewards_count) + .collect(); + + Self::new( + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + ) + } +} + +impl ToBytes for EraEndV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + } = self; + + equivocators.write_bytes(writer)?; + inactive_validators.write_bytes(writer)?; + next_era_validator_weights.write_bytes(writer)?; + rewards.write_bytes(writer)?; + + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + } = self; + + equivocators.serialized_length() + + inactive_validators.serialized_length() + + next_era_validator_weights.serialized_length() + + rewards.serialized_length() + } +} + +impl FromBytes for EraEndV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (equivocators, bytes) = Vec::from_bytes(bytes)?; + let (inactive_validators, bytes) = Vec::from_bytes(bytes)?; + let (next_era_validator_weights, bytes) = BTreeMap::from_bytes(bytes)?; + let (rewards, bytes) = BTreeMap::from_bytes(bytes)?; + let era_end = EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + }; + + Ok((era_end, bytes)) + } +} + +impl fmt::Display for EraEndV2 { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let slashings = DisplayIter::new(&self.equivocators); + let rewards = DisplayIter::new( + self.rewards + .iter() + .map(|(public_key, amount)| format!("{}: {}", public_key, amount)), + ); + + write!( + formatter, + "era end: slash {}, reward {}", + slashings, rewards + ) + } +} + +struct NextEraValidatorLabels; + +impl KeyValueLabels for NextEraValidatorLabels { + const KEY: &'static str = "validator"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for NextEraValidatorLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("ValidatorWeight"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with its weight, i.e. the total number of \ + motes staked by it and its delegators.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The validator's weight."); +} diff --git a/casper_types_ver_2_0/src/block/finality_signature.rs b/casper_types_ver_2_0/src/block/finality_signature.rs new file mode 100644 index 00000000..57b1c2a6 --- /dev/null +++ b/casper_types_ver_2_0/src/block/finality_signature.rs @@ -0,0 +1,266 @@ +use alloc::vec::Vec; +use core::{ + cmp::Ordering, + fmt::{self, Display, Formatter}, + hash::{Hash, Hasher}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::BlockHash; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{crypto, EraId, PublicKey, SecretKey, Signature}; + +/// A validator's signature of a block, confirming it is finalized. +/// +/// Clients and joining nodes should wait until the signers' combined weight exceeds the fault +/// tolerance threshold before accepting the block as finalized. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "A validator's signature of a block, confirming it is finalized.") +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct FinalitySignature { + /// The block hash of the associated block. + pub(super) block_hash: BlockHash, + /// The era in which the associated block was created. + pub(super) era_id: EraId, + /// The signature over the block hash of the associated block. + pub(super) signature: Signature, + /// The public key of the signing validator. + pub(super) public_key: PublicKey, + #[serde(skip)] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) is_verified: OnceCell>, +} + +impl FinalitySignature { + /// Constructs a new `FinalitySignature`. + pub fn create(block_hash: BlockHash, era_id: EraId, secret_key: &SecretKey) -> Self { + let bytes = Self::bytes_to_sign(&block_hash, era_id); + let public_key = PublicKey::from(secret_key); + let signature = crypto::sign(bytes, secret_key, &public_key); + FinalitySignature { + block_hash, + era_id, + signature, + public_key, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::with_value(Ok(())), + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the era in which the associated block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the signature over the block hash of the associated block. + pub fn signature(&self) -> &Signature { + &self.signature + } + + /// Returns the public key of the signing validator. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } + + /// Returns `Ok` if the signature is cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + #[cfg(any(feature = "once_cell", test))] + return self.is_verified.get_or_init(|| self.verify()).clone(); + + #[cfg(not(any(feature = "once_cell", test)))] + self.verify() + } + + /// Constructs a new `FinalitySignature`. + #[cfg(any(feature = "testing", test))] + pub fn new( + block_hash: BlockHash, + era_id: EraId, + signature: Signature, + public_key: PublicKey, + ) -> Self { + FinalitySignature { + block_hash, + era_id, + signature, + public_key, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + } + } + + /// Returns a random `FinalitySignature`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + FinalitySignature::random_for_block(BlockHash::random(rng), EraId::random(rng), rng) + } + + /// Returns a random `FinalitySignature` for the provided `block_hash` and `era_id`. + #[cfg(any(feature = "testing", test))] + pub fn random_for_block(block_hash: BlockHash, era_id: EraId, rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + FinalitySignature::create(block_hash, era_id, &secret_key) + } + + fn bytes_to_sign(block_hash: &BlockHash, era_id: EraId) -> Vec { + let mut bytes = block_hash.inner().into_vec(); + bytes.extend_from_slice(&era_id.to_le_bytes()); + bytes + } + + fn verify(&self) -> Result<(), crypto::Error> { + let bytes = Self::bytes_to_sign(&self.block_hash, self.era_id); + crypto::verify(bytes, &self.signature, &self.public_key) + } +} + +impl Hash for FinalitySignature { + fn hash(&self, state: &mut H) { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignature { + block_hash, + era_id, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignature { + block_hash, + era_id, + signature, + public_key, + } = self; + block_hash.hash(state); + era_id.hash(state); + signature.hash(state); + public_key.hash(state); + is_verified.hash(state); + } +} + +impl PartialEq for FinalitySignature { + fn eq(&self, other: &FinalitySignature) -> bool { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignature { + block_hash, + era_id, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignature { + block_hash, + era_id, + signature, + public_key, + } = self; + *block_hash == other.block_hash + && *era_id == other.era_id + && *signature == other.signature + && *public_key == other.public_key + && is_verified == other.is_verified().is_ok() + } +} + +impl Ord for FinalitySignature { + fn cmp(&self, other: &FinalitySignature) -> Ordering { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignature { + block_hash, + era_id, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignature { + block_hash, + era_id, + signature, + public_key, + } = self; + block_hash + .cmp(&other.block_hash) + .then_with(|| era_id.cmp(&other.era_id)) + .then_with(|| signature.cmp(&other.signature)) + .then_with(|| public_key.cmp(&other.public_key)) + .then_with(|| is_verified.cmp(&other.is_verified().is_ok())) + } +} + +impl PartialOrd for FinalitySignature { + fn partial_cmp(&self, other: &FinalitySignature) -> Option { + Some(self.cmp(other)) + } +} + +impl Display for FinalitySignature { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "finality signature for {}, from {}", + self.block_hash, self.public_key + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::TestBlockBuilder; + + #[test] + fn finality_signature() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + // Signature should be over both block hash and era id. + let secret_key = SecretKey::random(rng); + let public_key = PublicKey::from(&secret_key); + let era_id = EraId::from(1); + let finality_signature = FinalitySignature::create(*block.hash(), era_id, &secret_key); + finality_signature.is_verified().unwrap(); + let signature = finality_signature.signature; + // Verify that signature includes era id. + let invalid_finality_signature = FinalitySignature { + block_hash: *block.hash(), + era_id: EraId::from(2), + signature, + public_key, + is_verified: OnceCell::new(), + }; + // Test should fail b/c `signature` is over `era_id=1` and here we're using `era_id=2`. + assert!(invalid_finality_signature.is_verified().is_err()); + } +} diff --git a/casper_types_ver_2_0/src/block/finality_signature_id.rs b/casper_types_ver_2_0/src/block/finality_signature_id.rs new file mode 100644 index 00000000..211071e2 --- /dev/null +++ b/casper_types_ver_2_0/src/block/finality_signature_id.rs @@ -0,0 +1,55 @@ +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use super::BlockHash; +#[cfg(doc)] +use super::FinalitySignature; +use crate::{EraId, PublicKey}; + +/// An identifier for a [`FinalitySignature`]. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct FinalitySignatureId { + block_hash: BlockHash, + era_id: EraId, + public_key: PublicKey, +} + +impl FinalitySignatureId { + /// Returns a new `FinalitySignatureId`. + pub fn new(block_hash: BlockHash, era_id: EraId, public_key: PublicKey) -> Self { + FinalitySignatureId { + block_hash, + era_id, + public_key, + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the era in which the associated block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the public key of the signing validator. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } +} + +impl Display for FinalitySignatureId { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "finality signature id for {}, from {}", + self.block_hash, self.public_key + ) + } +} diff --git a/casper_types_ver_2_0/src/block/json_compatibility.rs b/casper_types_ver_2_0/src/block/json_compatibility.rs new file mode 100644 index 00000000..1c256376 --- /dev/null +++ b/casper_types_ver_2_0/src/block/json_compatibility.rs @@ -0,0 +1,8 @@ +//! This module provides types primarily to support converting instances of `BTreeMap` into +//! `Vec<(K, V)>` or similar, in order to allow these types to be able to be converted to and from +//! JSON, and to allow for the production of a static schema for them. + +#![cfg(all(feature = "std", feature = "json-schema"))] +mod json_block_with_signatures; + +pub use json_block_with_signatures::JsonBlockWithSignatures; diff --git a/casper_types_ver_2_0/src/block/json_compatibility/json_block_with_signatures.rs b/casper_types_ver_2_0/src/block/json_compatibility/json_block_with_signatures.rs new file mode 100644 index 00000000..71d472ea --- /dev/null +++ b/casper_types_ver_2_0/src/block/json_compatibility/json_block_with_signatures.rs @@ -0,0 +1,95 @@ +use alloc::collections::BTreeMap; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_map_to_array::{BTreeMapToArray, KeyValueJsonSchema, KeyValueLabels}; + +use crate::{crypto, Block, BlockSignatures, BlockV2, PublicKey, SecretKey, Signature}; + +#[cfg(feature = "json-schema")] +static JSON_SIGNED_BLOCK: Lazy = Lazy::new(|| { + let block = BlockV2::example().clone(); + let secret_key = SecretKey::example(); + let public_key = PublicKey::from(secret_key); + let signature = crypto::sign(block.hash.inner(), secret_key, &public_key); + let mut proofs = BTreeMap::new(); + proofs.insert(public_key, signature); + + JsonBlockWithSignatures { + block: block.into(), + proofs, + } +}); + +/// A JSON-friendly representation of a block and the signatures for that block. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct JsonBlockWithSignatures { + /// The block. + pub block: Block, + /// The proofs of the block, i.e. a collection of validators' signatures of the block hash. + #[serde(with = "BTreeMapToArray::")] + pub proofs: BTreeMap, +} + +impl JsonBlockWithSignatures { + /// Constructs a new `JsonBlock`. + pub fn new(block: Block, maybe_signatures: Option) -> Self { + let proofs = maybe_signatures + .map(|signatures| signatures.proofs) + .unwrap_or_default(); + + JsonBlockWithSignatures { block, proofs } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn example() -> &'static Self { + &JSON_SIGNED_BLOCK + } +} +struct BlockProofLabels; + +impl KeyValueLabels for BlockProofLabels { + const KEY: &'static str = "public_key"; + const VALUE: &'static str = "signature"; +} + +impl KeyValueJsonSchema for BlockProofLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("BlockProof"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with a corresponding signature of a given block hash.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The validator's signature."); +} + +#[cfg(test)] +mod tests { + use crate::{testing::TestRng, TestBlockBuilder}; + + use super::*; + + #[test] + fn block_to_and_from_json_block_with_signatures() { + let rng = &mut TestRng::new(); + let block: Block = TestBlockBuilder::new().build(rng).into(); + let empty_signatures = BlockSignatures::new(*block.hash(), block.era_id()); + let json_block = JsonBlockWithSignatures::new(block.clone(), Some(empty_signatures)); + let recovered_block = Block::from(json_block); + assert_eq!(block, recovered_block); + } + + #[test] + fn json_block_roundtrip() { + let rng = &mut TestRng::new(); + let block: Block = TestBlockBuilder::new().build(rng).into(); + let json_string = serde_json::to_string_pretty(&block).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(block, decoded); + } +} diff --git a/casper_types_ver_2_0/src/block/rewarded_signatures.rs b/casper_types_ver_2_0/src/block/rewarded_signatures.rs new file mode 100644 index 00000000..082aae36 --- /dev/null +++ b/casper_types_ver_2_0/src/block/rewarded_signatures.rs @@ -0,0 +1,474 @@ +use alloc::{collections::BTreeSet, vec::Vec}; + +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + PublicKey, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use serde::{Deserialize, Serialize}; +use tracing::error; + +/// Describes finality signatures that will be rewarded in a block. Consists of a vector of +/// `SingleBlockRewardedSignatures`, each of which describes signatures for a single ancestor +/// block. The first entry represents the signatures for the parent block, the second for the +/// parent of the parent, and so on. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct RewardedSignatures(Vec); + +/// List of identifiers for finality signatures for a particular past block. +/// +/// That past block height is current_height - signature_rewards_max_delay, the latter being defined +/// in the chainspec. +/// +/// We need to wait for a few blocks to pass (`signature_rewards_max_delay`) to store the finality +/// signers because we need a bit of time to get the block finality. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct SingleBlockRewardedSignatures(Vec); + +impl SingleBlockRewardedSignatures { + /// Creates a new set of recorded finality signaures from the era's validators + + /// the list of validators which signed. + pub fn from_validator_set<'a>( + public_keys: &BTreeSet, + all_validators: impl IntoIterator, + ) -> Self { + // Take the validators list + // Replace the ones who signed with 1 and the ones who didn't with 0 + // Pack everything into bytes + let result = Self::pack( + all_validators + .into_iter() + .map(|key| u8::from(public_keys.contains(key))), + ); + + let included_count: u32 = result.0.iter().map(|c| c.count_ones()).sum(); + if included_count as usize != public_keys.len() { + error!( + included_count, + expected_count = public_keys.len(), + "error creating past finality signatures from validator set" + ); + } + + result + } + + /// Gets the list of validators which signed from a set of recorded finality signaures (`self`) + /// + the era's validators. + pub fn to_validator_set( + &self, + all_validators: impl IntoIterator, + ) -> BTreeSet { + self.unpack() + .zip(all_validators) + .filter_map(|(active, validator)| (active != 0).then_some(validator)) + .collect() + } + + /// Packs the bits to bytes, to create a `PastFinalitySignature` + /// from an iterator of bits. + /// + /// If a value is neither 1 nor 0, it is interpreted as a 1. + #[doc(hidden)] + pub fn pack(bits: impl Iterator) -> Self { + //use itertools::Itertools; + + fn set_bit_at(value: u8, position: usize) -> u8 { + // Sanitize the value (must be 0 or 1): + let value = u8::from(value != 0); + + value << (7 - position) + } + + let inner = chunks_8(bits) + .map(|bits_chunk| { + bits_chunk + .enumerate() + .fold(0, |acc, (pos, value)| acc | set_bit_at(value, pos)) + }) + .collect(); + + SingleBlockRewardedSignatures(inner) + } + + /// Unpacks the bytes to bits, + /// to get a human readable representation of `PastFinalitySignature`. + #[doc(hidden)] + pub fn unpack(&self) -> impl Iterator + '_ { + // Returns the bit at the given position (0 or 1): + fn bit_at(byte: u8, position: u8) -> u8 { + (byte & (0b1000_0000 >> position)) >> (7 - position) + } + + self.0 + .iter() + .flat_map(|&byte| (0..8).map(move |i| bit_at(byte, i))) + } + + /// Calculates the set difference of two instances of `SingleBlockRewardedSignatures`. + #[doc(hidden)] + pub fn difference(mut self, other: &SingleBlockRewardedSignatures) -> Self { + for (self_byte, other_byte) in self.0.iter_mut().zip(other.0.iter()) { + *self_byte &= !other_byte; + } + self + } + + /// Calculates the set intersection of two instances of `SingleBlockRewardedSignatures`. + pub(crate) fn intersection(mut self, other: &SingleBlockRewardedSignatures) -> Self { + self.0 = self + .0 + .iter() + .zip(other.0.iter()) + .map(|(a, b)| *a & *b) + .collect(); + self + } + + /// Returns `true` if the set contains at least one signature. + pub(crate) fn has_some(&self) -> bool { + self.0.iter().any(|byte| *byte != 0) + } +} + +impl ToBytes for SingleBlockRewardedSignatures { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(Bytes::from(self.0.as_ref()).to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for SingleBlockRewardedSignatures { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, rest) = Bytes::from_bytes(bytes)?; + Ok((SingleBlockRewardedSignatures(inner.into()), rest)) + } +} + +impl RewardedSignatures { + /// Creates a new instance of `RewardedSignatures`. + pub fn new>( + single_block_signatures: I, + ) -> Self { + Self(single_block_signatures.into_iter().collect()) + } + + /// Creates an instance of `RewardedSignatures` based on its unpacked (one byte per validator) + /// representation. + pub fn pack(unpacked: Vec>) -> Self { + Self( + unpacked + .into_iter() + .map(|single_block_signatures| { + SingleBlockRewardedSignatures::pack(single_block_signatures.into_iter()) + }) + .collect(), + ) + } + + /// Creates an unpacked (one byte per validator) representation of the finality signatures to + /// be rewarded in this block. + pub fn unpack(&self) -> Vec> { + self.0 + .iter() + .map(|single_block_signatures| single_block_signatures.unpack().collect()) + .collect() + } + + /// Returns this instance of `RewardedSignatures` with `num_blocks` of empty signatures + /// prepended. + pub fn left_padded(self, num_blocks: usize) -> Self { + Self( + core::iter::repeat_with(SingleBlockRewardedSignatures::default) + .take(num_blocks) + .chain(self.0) + .collect(), + ) + } + + /// Calculates the set difference between two instances of `RewardedSignatures`. + pub fn difference(self, other: &RewardedSignatures) -> Self { + Self( + self.0 + .into_iter() + .zip(other.0.iter()) + .map(|(single_block_signatures, other_block_signatures)| { + single_block_signatures.difference(other_block_signatures) + }) + .collect(), + ) + } + + /// Calculates the set intersection between two instances of `RewardedSignatures`. + pub fn intersection(&self, other: &RewardedSignatures) -> Self { + Self( + self.0 + .iter() + .zip(other.0.iter()) + .map(|(single_block_signatures, other_block_signatures)| { + single_block_signatures + .clone() + .intersection(other_block_signatures) + }) + .collect(), + ) + } + + /// Iterates over the `SingleBlockRewardedSignatures` for each rewarded block. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Iterates over the `SingleBlockRewardedSignatures`, yielding the signatures together with + /// the block height for each entry. `block_height` is the height of the block that contains + /// this instance of `RewardedSignatures`. + pub fn iter_with_height( + &self, + block_height: u64, + ) -> impl Iterator { + self.0.iter().enumerate().map(move |(rel_height, sbrs)| { + ( + block_height + .saturating_sub(rel_height as u64) + .saturating_sub(1), + sbrs, + ) + }) + } + + /// Returns `true` if there is at least one cited signature. + pub fn has_some(&self) -> bool { + self.0.iter().any(|signatures| signatures.has_some()) + } +} + +pub(crate) static EMPTY: RewardedSignatures = RewardedSignatures(Vec::new()); + +impl ToBytes for RewardedSignatures { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for RewardedSignatures { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Vec::::from_bytes(bytes) + .map(|(inner, rest)| (RewardedSignatures(inner), rest)) + } +} + +/// Chunks an iterator over `u8`s into pieces of maximum size of 8. +fn chunks_8(bits: impl Iterator) -> impl Iterator> { + struct Chunks(B); + + struct Chunk { + values: [u8; 8], + index: usize, + max: usize, + } + + impl Iterator for Chunks + where + B: Iterator, + { + type Item = Chunk; + + fn next(&mut self) -> Option { + let mut values = [0; 8]; + let max = core::iter::zip(&mut values, &mut self.0) + .map(|(array_slot, value)| *array_slot = value) + .count(); + + (max != 0).then_some(Chunk { + values, + max, + index: 0, + }) + } + } + + impl Iterator for Chunk { + type Item = u8; + + fn next(&mut self) -> Option { + if self.index < self.max { + let n = self.values.get(self.index).cloned(); + self.index += 1; + n + } else { + None + } + } + } + + Chunks(bits) +} + +#[cfg(any(feature = "testing", test))] +impl SingleBlockRewardedSignatures { + /// Returns random data. + pub fn random(rng: &mut crate::testing::TestRng, n_validators: usize) -> Self { + let mut bytes = vec![0; (n_validators + 7) / 8]; + + rand::RngCore::fill_bytes(rng, bytes.as_mut()); + + SingleBlockRewardedSignatures(bytes) + } +} + +#[cfg(test)] +mod tests { + use super::{chunks_8, SingleBlockRewardedSignatures}; + use crate::{ + bytesrepr::{FromBytes, ToBytes}, + testing::TestRng, + PublicKey, + }; + use rand::{seq::IteratorRandom, Rng}; + use std::collections::BTreeSet; + + #[test] + fn empty_signatures() { + let rng = &mut TestRng::new(); + let validators: Vec<_> = std::iter::repeat_with(|| PublicKey::random(rng)) + .take(7) + .collect(); + let original_signed = BTreeSet::new(); + + let past_finality_signatures = + SingleBlockRewardedSignatures::from_validator_set(&original_signed, validators.iter()); + + assert_eq!(past_finality_signatures.0, &[0]); + + let signed = past_finality_signatures.to_validator_set(validators); + + assert_eq!(original_signed, signed); + } + + #[test] + fn from_and_to_methods_match_in_a_simple_case() { + let rng = &mut TestRng::new(); + let validators: Vec<_> = std::iter::repeat_with(|| PublicKey::random(rng)) + .take(11) + .collect(); + let signed = { + let mut signed = BTreeSet::new(); + signed.insert(validators[2].clone()); + signed.insert(validators[5].clone()); + signed.insert(validators[6].clone()); + signed.insert(validators[8].clone()); + signed.insert(validators[10].clone()); + signed + }; + + let past_finality_signatures = + SingleBlockRewardedSignatures::from_validator_set(&signed, validators.iter()); + + assert_eq!(past_finality_signatures.0, &[0b0010_0110, 0b1010_0000]); + + let signed_ = past_finality_signatures.to_validator_set(validators); + + assert_eq!(signed, signed_); + } + + #[test] + fn simple_serialization_roundtrip() { + let data = SingleBlockRewardedSignatures(vec![1, 2, 3, 4, 5]); + + let serialized = data.to_bytes().unwrap(); + assert_eq!(serialized.len(), data.0.len() + 4); + assert_eq!(data.serialized_length(), data.0.len() + 4); + + let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap(); + + assert_eq!(data, deserialized); + assert_eq!(rest, &[0u8; 0]); + } + + #[test] + fn serialization_roundtrip_of_empty_data() { + let data = SingleBlockRewardedSignatures::default(); + + let serialized = data.to_bytes().unwrap(); + assert_eq!(serialized, &[0; 4]); + assert_eq!(data.serialized_length(), 4); + + let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap(); + + assert_eq!(data, deserialized); + assert_eq!(rest, &[0u8; 0]); + } + + #[test] + fn serialization_roundtrip_of_random_data() { + let rng = &mut TestRng::new(); + let n_validators = rng.gen_range(50..200); + let all_validators: BTreeSet<_> = std::iter::repeat_with(|| PublicKey::random(rng)) + .take(n_validators) + .collect(); + let n_to_sign = rng.gen_range(0..all_validators.len()); + let public_keys = all_validators + .iter() + .cloned() + .choose_multiple(rng, n_to_sign) + .into_iter() + .collect(); + + let past_finality_signatures = + SingleBlockRewardedSignatures::from_validator_set(&public_keys, all_validators.iter()); + + let serialized = past_finality_signatures.to_bytes().unwrap(); + let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap(); + + assert_eq!(public_keys, deserialized.to_validator_set(all_validators)); + assert_eq!(rest, &[0u8; 0]); + } + + #[test] + fn chunk_iterator() { + fn v(maybe_chunk: Option>) -> Option> { + maybe_chunk.map(itertools::Itertools::collect_vec) + } + + // Empty chunks: + + let mut chunks = chunks_8(IntoIterator::into_iter([])); + + assert_eq!(v(chunks.next()), None); + + // Exact size chunk: + + let mut chunks = chunks_8(IntoIterator::into_iter([10, 11, 12, 13, 14, 15, 16, 17])); + + assert_eq!(v(chunks.next()), Some(vec![10, 11, 12, 13, 14, 15, 16, 17])); + assert_eq!(v(chunks.next()), None); + + // Chunks with a remainder: + + let mut chunks = chunks_8(IntoIterator::into_iter([ + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + ])); + + assert_eq!(v(chunks.next()), Some(vec![10, 11, 12, 13, 14, 15, 16, 17])); + assert_eq!(v(chunks.next()), Some(vec![18, 19, 20, 21, 22, 23, 24, 25])); + assert_eq!(v(chunks.next()), Some(vec![26])); + } +} diff --git a/casper_types_ver_2_0/src/block/rewards.rs b/casper_types_ver_2_0/src/block/rewards.rs new file mode 100644 index 00000000..66f5aff0 --- /dev/null +++ b/casper_types_ver_2_0/src/block/rewards.rs @@ -0,0 +1,11 @@ +use alloc::collections::BTreeMap; + +use crate::{PublicKey, U512}; + +/// Rewards distributed to validators. +pub enum Rewards<'a> { + /// Rewards for version 1, associate a ratio to each validator. + V1(&'a BTreeMap), + /// Rewards for version 1, associate a tokens amount to each validator. + V2(&'a BTreeMap), +} diff --git a/casper_types_ver_2_0/src/block/signed_block.rs b/casper_types_ver_2_0/src/block/signed_block.rs new file mode 100644 index 00000000..a5d49d64 --- /dev/null +++ b/casper_types_ver_2_0/src/block/signed_block.rs @@ -0,0 +1,80 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Block, BlockSignatures, +}; +#[cfg(any(feature = "std", feature = "json-schema", test))] +use serde::{Deserialize, Serialize}; + +/// A block and signatures for that block. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr( + any(feature = "std", feature = "json-schema", test), + derive(Serialize, Deserialize) +)] +pub struct SignedBlock { + /// Block. + pub(crate) block: Block, + // The signatures of the block. + pub(crate) block_signatures: BlockSignatures, +} + +impl SignedBlock { + /// Creates a new `SignedBlock`. + pub fn new(block: Block, block_signatures: BlockSignatures) -> Self { + Self { + block, + block_signatures, + } + } + + /// Returns the inner block. + pub fn block(&self) -> &Block { + &self.block + } + + /// Converts `self` into the block and signatures. + pub fn into_inner(self) -> (Block, BlockSignatures) { + (self.block, self.block_signatures) + } +} + +impl FromBytes for SignedBlock { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block, bytes) = FromBytes::from_bytes(bytes)?; + let (block_signatures, bytes) = FromBytes::from_bytes(bytes)?; + Ok((SignedBlock::new(block, block_signatures), bytes)) + } +} + +impl ToBytes for SignedBlock { + fn to_bytes(&self) -> Result, crate::bytesrepr::Error> { + let mut buf = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buf)?; + Ok(buf) + } + + fn write_bytes(&self, bytes: &mut Vec) -> Result<(), crate::bytesrepr::Error> { + self.block.write_bytes(bytes)?; + self.block_signatures.write_bytes(bytes)?; + Ok(()) + } + + fn serialized_length(&self) -> usize { + self.block.serialized_length() + self.block_signatures.serialized_length() + } +} + +impl Display for SignedBlock { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "block #{}, {}, with {} block signatures", + self.block.height(), + self.block.hash(), + self.block_signatures.len() + ) + } +} diff --git a/casper_types_ver_2_0/src/block/signed_block_header.rs b/casper_types_ver_2_0/src/block/signed_block_header.rs new file mode 100644 index 00000000..a478314d --- /dev/null +++ b/casper_types_ver_2_0/src/block/signed_block_header.rs @@ -0,0 +1,143 @@ +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use super::{BlockHash, BlockHeader, BlockSignatures}; +use crate::EraId; +#[cfg(any(feature = "testing", test))] +use crate::Signature; + +/// An error which can result from validating a [`SignedBlockHeader`]. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub enum SignedBlockHeaderValidationError { + /// Mismatch between block hash in [`BlockHeader`] and [`BlockSignatures`]. + BlockHashMismatch { + /// The block hash in the `BlockHeader`. + block_hash_in_header: BlockHash, + /// The block hash in the `BlockSignatures`. + block_hash_in_signatures: BlockHash, + }, + /// Mismatch between era ID in [`BlockHeader`] and [`BlockSignatures`]. + EraIdMismatch { + /// The era ID in the `BlockHeader`. + era_id_in_header: EraId, + /// The era ID in the `BlockSignatures`. + era_id_in_signatures: EraId, + }, +} + +impl Display for SignedBlockHeaderValidationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + SignedBlockHeaderValidationError::BlockHashMismatch { + block_hash_in_header: expected, + block_hash_in_signatures: actual, + } => { + write!( + formatter, + "block hash mismatch - header: {}, signatures: {}", + expected, actual + ) + } + SignedBlockHeaderValidationError::EraIdMismatch { + era_id_in_header: expected, + era_id_in_signatures: actual, + } => { + write!( + formatter, + "era id mismatch - header: {}, signatures: {}", + expected, actual + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for SignedBlockHeaderValidationError {} + +/// A block header and collection of signatures of a given block. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct SignedBlockHeader { + block_header: BlockHeader, + block_signatures: BlockSignatures, +} + +impl SignedBlockHeader { + /// Returns a new `SignedBlockHeader`. + pub fn new(block_header: BlockHeader, block_signatures: BlockSignatures) -> Self { + SignedBlockHeader { + block_header, + block_signatures, + } + } + + /// Returns the block header. + pub fn block_header(&self) -> &BlockHeader { + &self.block_header + } + + /// Returns the block signatures. + pub fn block_signatures(&self) -> &BlockSignatures { + &self.block_signatures + } + + /// Returns `Ok` if and only if the block hash and era ID in the `BlockHeader` are identical to + /// those in the `BlockSignatures`. + /// + /// Note that no cryptographic verification of the contained signatures is performed. For this, + /// see [`BlockSignatures::is_verified`]. + pub fn is_valid(&self) -> Result<(), SignedBlockHeaderValidationError> { + if self.block_header.block_hash() != *self.block_signatures.block_hash() { + return Err(SignedBlockHeaderValidationError::BlockHashMismatch { + block_hash_in_header: self.block_header.block_hash(), + block_hash_in_signatures: *self.block_signatures.block_hash(), + }); + } + if self.block_header.era_id() != self.block_signatures.era_id() { + return Err(SignedBlockHeaderValidationError::EraIdMismatch { + era_id_in_header: self.block_header.era_id(), + era_id_in_signatures: self.block_signatures.era_id(), + }); + } + Ok(()) + } + + /// Sets the era ID contained in `block_signatures` to its max value, rendering it and hence + /// `self` invalid (assuming the relevant era ID for this `SignedBlockHeader` wasn't already + /// the max value). + #[cfg(any(feature = "testing", test))] + pub fn invalidate_era(&mut self) { + self.block_signatures.era_id = EraId::new(u64::MAX); + } + + /// Replaces the signature field of the last `block_signatures` entry with the `System` variant + /// of [`Signature`], rendering that entry invalid. + /// + /// Note that [`Self::is_valid`] will be unaffected by this as it only checks for equality in + /// the block hash and era ID of the header and signatures; no cryptographic verification is + /// performed. + #[cfg(any(feature = "testing", test))] + pub fn invalidate_last_signature(&mut self) { + let last_proof = self + .block_signatures + .proofs + .last_entry() + .expect("should have at least one signature"); + *last_proof.into_mut() = Signature::System; + } +} + +impl Display for SignedBlockHeader { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}, and {}", self.block_header, self.block_signatures) + } +} diff --git a/casper_types_ver_2_0/src/block/test_block_builder/test_block_v1_builder.rs b/casper_types_ver_2_0/src/block/test_block_builder/test_block_v1_builder.rs new file mode 100644 index 00000000..1a6b68a7 --- /dev/null +++ b/casper_types_ver_2_0/src/block/test_block_builder/test_block_v1_builder.rs @@ -0,0 +1,183 @@ +use std::iter; + +use rand::Rng; + +use crate::{testing::TestRng, Block, EraEndV1}; + +use crate::{ + system::auction::ValidatorWeights, BlockHash, BlockV1, Deploy, Digest, EraId, EraReport, + ProtocolVersion, PublicKey, Timestamp, U512, +}; + +/// A helper to build the blocks with various properties required for tests. +pub struct TestBlockV1Builder { + parent_hash: Option, + state_root_hash: Option, + timestamp: Option, + era: Option, + height: Option, + protocol_version: ProtocolVersion, + deploys: Vec, + is_switch: Option, + validator_weights: Option, +} + +impl Default for TestBlockV1Builder { + fn default() -> Self { + Self { + parent_hash: None, + state_root_hash: None, + timestamp: None, + era: None, + height: None, + protocol_version: ProtocolVersion::V1_0_0, + deploys: Vec::new(), + is_switch: None, + validator_weights: None, + } + } +} + +impl TestBlockV1Builder { + /// Creates new `TestBlockBuilder`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the parent hash for the block. + pub fn parent_hash(self, parent_hash: BlockHash) -> Self { + Self { + parent_hash: Some(parent_hash), + ..self + } + } + + /// Sets the state root hash for the block. + pub fn state_root_hash(self, state_root_hash: Digest) -> Self { + Self { + state_root_hash: Some(state_root_hash), + ..self + } + } + + /// Sets the timestamp for the block. + pub fn timestamp(self, timestamp: Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + + /// Sets the era for the block + pub fn era(self, era: impl Into) -> Self { + Self { + era: Some(era.into()), + ..self + } + } + + /// Sets the height for the block. + pub fn height(self, height: u64) -> Self { + Self { + height: Some(height), + ..self + } + } + + /// Sets the protocol version for the block. + pub fn protocol_version(self, protocol_version: ProtocolVersion) -> Self { + Self { + protocol_version, + ..self + } + } + + /// Associates the given deploys with the created block. + pub fn deploys<'a, I: IntoIterator>(self, deploys_iter: I) -> Self { + Self { + deploys: deploys_iter.into_iter().cloned().collect(), + ..self + } + } + + /// Associates a number of random deploys with the created block. + pub fn random_deploys(mut self, count: usize, rng: &mut TestRng) -> Self { + self.deploys = iter::repeat(()) + .take(count) + .map(|_| Deploy::random(rng)) + .collect(); + self + } + + /// Allows setting the created block to be switch block or not. + pub fn switch_block(self, is_switch: bool) -> Self { + Self { + is_switch: Some(is_switch), + ..self + } + } + + /// Sets the validator weights for the block. + pub fn validator_weights(self, validator_weights: ValidatorWeights) -> Self { + Self { + validator_weights: Some(validator_weights), + ..self + } + } + + /// Builds the block. + pub fn build(self, rng: &mut TestRng) -> BlockV1 { + let Self { + parent_hash, + state_root_hash, + timestamp, + era, + height, + protocol_version, + deploys, + is_switch, + validator_weights, + } = self; + + let parent_hash = parent_hash.unwrap_or_else(|| BlockHash::new(rng.gen())); + let parent_seed = Digest::random(rng); + let state_root_hash = state_root_hash.unwrap_or_else(|| rng.gen()); + let random_bit = rng.gen(); + let is_switch = is_switch.unwrap_or_else(|| rng.gen_bool(0.1)); + let era_end = is_switch.then(|| { + let next_era_validator_weights = validator_weights.unwrap_or_else(|| { + (1..6) + .map(|i| (PublicKey::random(rng), U512::from(i))) + .take(6) + .collect() + }); + EraEndV1::new(EraReport::random(rng), next_era_validator_weights) + }); + let timestamp = timestamp.unwrap_or_else(Timestamp::now); + let era_id = era.unwrap_or(EraId::random(rng)); + let height = height.unwrap_or_else(|| era_id.value() * 10 + rng.gen_range(0..10)); + let proposer = PublicKey::random(rng); + let deploy_hashes = deploys.iter().map(|deploy| *deploy.hash()).collect(); + let transfer_hashes = vec![]; + + BlockV1::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + deploy_hashes, + transfer_hashes, + ) + } + + /// Builds the block as a versioned block. + pub fn build_versioned(self, rng: &mut TestRng) -> Block { + self.build(rng).into() + } +} diff --git a/casper_types_ver_2_0/src/block/test_block_builder/test_block_v2_builder.rs b/casper_types_ver_2_0/src/block/test_block_builder/test_block_v2_builder.rs new file mode 100644 index 00000000..b6a8324f --- /dev/null +++ b/casper_types_ver_2_0/src/block/test_block_builder/test_block_v2_builder.rs @@ -0,0 +1,275 @@ +use std::iter; + +use alloc::collections::BTreeMap; +use rand::Rng; + +use crate::{ + system::auction::ValidatorWeights, testing::TestRng, Block, BlockHash, BlockV2, Digest, + EraEndV2, EraId, ProtocolVersion, PublicKey, RewardedSignatures, Timestamp, Transaction, + TransactionEntryPoint, TransactionSessionKind, TransactionTarget, U512, +}; + +/// A helper to build the blocks with various properties required for tests. +pub struct TestBlockV2Builder { + parent_hash: Option, + state_root_hash: Option, + timestamp: Option, + era: Option, + height: Option, + proposer: Option, + protocol_version: ProtocolVersion, + txns: Vec, + is_switch: Option, + validator_weights: Option, + rewarded_signatures: Option, +} + +impl Default for TestBlockV2Builder { + fn default() -> Self { + Self { + parent_hash: None, + state_root_hash: None, + timestamp: None, + era: None, + height: None, + proposer: None, + protocol_version: ProtocolVersion::V1_0_0, + txns: Vec::new(), + is_switch: None, + validator_weights: None, + rewarded_signatures: None, + } + } +} + +impl TestBlockV2Builder { + /// Creates new `TestBlockBuilder`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the parent hash for the block. + pub fn parent_hash(self, parent_hash: BlockHash) -> Self { + Self { + parent_hash: Some(parent_hash), + ..self + } + } + + /// Sets the state root hash for the block. + pub fn state_root_hash(self, state_root_hash: Digest) -> Self { + Self { + state_root_hash: Some(state_root_hash), + ..self + } + } + + /// Sets the timestamp for the block. + pub fn timestamp(self, timestamp: Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + + /// Sets the era for the block + pub fn era(self, era: impl Into) -> Self { + Self { + era: Some(era.into()), + ..self + } + } + + /// Sets the height for the block. + pub fn height(self, height: u64) -> Self { + Self { + height: Some(height), + ..self + } + } + + /// Sets the block proposer. + pub fn proposer(self, proposer: PublicKey) -> Self { + Self { + proposer: Some(proposer), + ..self + } + } + + /// Sets the protocol version for the block. + pub fn protocol_version(self, protocol_version: ProtocolVersion) -> Self { + Self { + protocol_version, + ..self + } + } + + /// Associates the given transactions with the created block. + pub fn transactions<'a, I: IntoIterator>(self, txns_iter: I) -> Self { + Self { + txns: txns_iter.into_iter().cloned().collect(), + ..self + } + } + + /// Sets the height for the block. + pub fn rewarded_signatures(self, rewarded_signatures: RewardedSignatures) -> Self { + Self { + rewarded_signatures: Some(rewarded_signatures), + ..self + } + } + + /// Associates a number of random transactions with the created block. + pub fn random_transactions(mut self, count: usize, rng: &mut TestRng) -> Self { + self.txns = iter::repeat_with(|| Transaction::random(rng)) + .take(count) + .collect(); + self + } + + /// Allows setting the created block to be switch block or not. + pub fn switch_block(self, is_switch: bool) -> Self { + Self { + is_switch: Some(is_switch), + ..self + } + } + + /// Sets the validator weights for the block. + pub fn validator_weights(self, validator_weights: ValidatorWeights) -> Self { + Self { + validator_weights: Some(validator_weights), + ..self + } + } + + /// Builds the block. + pub fn build(self, rng: &mut TestRng) -> BlockV2 { + let Self { + parent_hash, + state_root_hash, + timestamp, + era, + height, + proposer, + protocol_version, + txns, + is_switch, + validator_weights, + rewarded_signatures, + } = self; + + let parent_hash = parent_hash.unwrap_or_else(|| BlockHash::new(rng.gen())); + let parent_seed = Digest::random(rng); + let state_root_hash = state_root_hash.unwrap_or_else(|| rng.gen()); + let random_bit = rng.gen(); + let is_switch = is_switch.unwrap_or_else(|| rng.gen_bool(0.1)); + let era_end = is_switch.then(|| gen_era_end_v2(rng, validator_weights)); + let timestamp = timestamp.unwrap_or_else(Timestamp::now); + let era_id = era.unwrap_or(EraId::random(rng)); + let height = height.unwrap_or_else(|| era_id.value() * 10 + rng.gen_range(0..10)); + let proposer = proposer.unwrap_or_else(|| PublicKey::random(rng)); + + let mut transfer_hashes = vec![]; + let mut staking_hashes = vec![]; + let mut install_upgrade_hashes = vec![]; + let mut standard_hashes = vec![]; + for txn in txns { + let txn_hash = txn.hash(); + match txn { + Transaction::Deploy(deploy) => { + if deploy.session().is_transfer() { + transfer_hashes.push(txn_hash); + } else { + standard_hashes.push(txn_hash); + } + } + Transaction::V1(v1_txn) => match v1_txn.target() { + TransactionTarget::Native => match v1_txn.entry_point() { + TransactionEntryPoint::Transfer => transfer_hashes.push(txn_hash), + TransactionEntryPoint::Custom(_) + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate => staking_hashes.push(txn_hash), + }, + TransactionTarget::Stored { .. } => standard_hashes.push(txn_hash), + TransactionTarget::Session { kind, .. } => match kind { + TransactionSessionKind::Standard | TransactionSessionKind::Isolated => { + standard_hashes.push(txn_hash) + } + TransactionSessionKind::Installer | TransactionSessionKind::Upgrader => { + install_upgrade_hashes.push(txn_hash) + } + }, + }, + } + } + let rewarded_signatures = rewarded_signatures.unwrap_or_default(); + + BlockV2::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + transfer_hashes, + staking_hashes, + install_upgrade_hashes, + standard_hashes, + rewarded_signatures, + ) + } + + /// Builds the block as a versioned block. + pub fn build_versioned(self, rng: &mut TestRng) -> Block { + self.build(rng).into() + } + + /// Builds a block that is invalid. + pub fn build_invalid(self, rng: &mut TestRng) -> BlockV2 { + self.build(rng).make_invalid(rng) + } +} + +fn gen_era_end_v2( + rng: &mut TestRng, + validator_weights: Option>, +) -> EraEndV2 { + let equivocators_count = rng.gen_range(0..5); + let rewards_count = rng.gen_range(0..5); + let inactive_count = rng.gen_range(0..5); + let next_era_validator_weights = validator_weights.unwrap_or_else(|| { + (1..6) + .map(|i| (PublicKey::random(rng), U512::from(i))) + .take(6) + .collect() + }); + let equivocators = iter::repeat_with(|| PublicKey::random(rng)) + .take(equivocators_count) + .collect(); + let rewards = iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let reward = rng.gen_range(1..=1_000_000_000 + 1); + (pub_key, U512::from(reward)) + }) + .take(rewards_count) + .collect(); + let inactive_validators = iter::repeat_with(|| PublicKey::random(rng)) + .take(inactive_count) + .collect(); + + EraEndV2::new( + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + ) +} diff --git a/casper_types_ver_2_0/src/block_time.rs b/casper_types_ver_2_0/src/block_time.rs new file mode 100644 index 00000000..f278a36b --- /dev/null +++ b/casper_types_ver_2_0/src/block_time.rs @@ -0,0 +1,55 @@ +use alloc::vec::Vec; + +use crate::bytesrepr::{Error, FromBytes, ToBytes, U64_SERIALIZED_LENGTH}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// The number of bytes in a serialized [`BlockTime`]. +pub const BLOCKTIME_SERIALIZED_LENGTH: usize = U64_SERIALIZED_LENGTH; + +/// A newtype wrapping a [`u64`] which represents the block time. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[derive(Clone, Copy, Default, Debug, PartialEq, Eq, PartialOrd, Serialize, Deserialize)] +pub struct BlockTime(u64); + +impl BlockTime { + /// Constructs a `BlockTime`. + pub fn new(value: u64) -> Self { + BlockTime(value) + } + + /// Saturating integer subtraction. Computes `self - other`, saturating at `0` instead of + /// overflowing. + #[must_use] + pub fn saturating_sub(self, other: BlockTime) -> Self { + BlockTime(self.0.saturating_sub(other.0)) + } +} + +impl From for u64 { + fn from(blocktime: BlockTime) -> Self { + blocktime.0 + } +} + +impl ToBytes for BlockTime { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + BLOCKTIME_SERIALIZED_LENGTH + } +} + +impl FromBytes for BlockTime { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (time, rem) = FromBytes::from_bytes(bytes)?; + Ok((BlockTime::new(time), rem)) + } +} diff --git a/casper_types_ver_2_0/src/byte_code.rs b/casper_types_ver_2_0/src/byte_code.rs new file mode 100644 index 00000000..1e7605d0 --- /dev/null +++ b/casper_types_ver_2_0/src/byte_code.rs @@ -0,0 +1,467 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + addressable_entity, bytesrepr, + bytesrepr::{Bytes, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, + key::ByteCodeAddr, + uref, CLType, CLTyped, +}; + +const BYTE_CODE_MAX_DISPLAY_LEN: usize = 16; +const KEY_HASH_LENGTH: usize = 32; +const WASM_STRING_PREFIX: &str = "contract-wasm-"; + +/// Associated error type of `TryFrom<&[u8]>` for `ByteCodeHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + InvalidPrefix, + Hex(base16::DecodeError), + Hash(TryFromSliceError), + AccountHash(addressable_entity::FromAccountHashStrError), + URef(uref::FromStrError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: addressable_entity::FromAccountHashStrError) -> Self { + FromStrError::AccountHash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::AccountHash(error) => { + write!(f, "account hash from string error: {:?}", error) + } + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + } + } +} + +/// A newtype wrapping a `HashAddr` which is the raw bytes of +/// the ByteCodeHash +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ByteCodeHash(ByteCodeAddr); + +impl ByteCodeHash { + /// Constructs a new `ByteCodeHash` from the raw bytes of the contract wasm hash. + pub const fn new(value: ByteCodeAddr) -> ByteCodeHash { + ByteCodeHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> ByteCodeAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ByteCodeHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", WASM_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ByteCodeHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(WASM_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = ByteCodeAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(ByteCodeHash(bytes)) + } +} + +impl Display for ByteCodeHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ByteCodeHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ByteCodeHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ByteCodeHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ByteCodeHash { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for ByteCodeHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ByteCodeHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for ByteCodeHash { + fn from(bytes: [u8; 32]) -> Self { + ByteCodeHash(bytes) + } +} + +impl Serialize for ByteCodeHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ByteCodeHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ByteCodeHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = ByteCodeAddr::deserialize(deserializer)?; + Ok(ByteCodeHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ByteCodeHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ByteCodeHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + ByteCodeAddr::try_from(bytes) + .map(ByteCodeHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ByteCodeHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + ByteCodeAddr::try_from(bytes as &[u8]) + .map(ByteCodeHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ByteCodeHash { + fn schema_name() -> String { + String::from("ByteCodeHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = + Some("The hash address of the contract wasm".to_string()); + schema_object.into() + } +} + +/// The type of Byte code. +#[repr(u8)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub enum ByteCodeKind { + /// Empty byte code. + Empty = 0, + /// Byte code to be executed with the version 1 Casper execution engine. + V1CasperWasm = 1, +} + +impl ToBytes for ByteCodeKind { + fn to_bytes(&self) -> Result, Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + (*self as u8).write_bytes(writer) + } +} + +impl FromBytes for ByteCodeKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (byte_code_kind, remainder) = u8::from_bytes(bytes)?; + match byte_code_kind { + byte_code_kind if byte_code_kind == ByteCodeKind::Empty as u8 => { + Ok((ByteCodeKind::Empty, remainder)) + } + byte_code_kind if byte_code_kind == ByteCodeKind::V1CasperWasm as u8 => { + Ok((ByteCodeKind::V1CasperWasm, remainder)) + } + _ => Err(Error::Formatting), + } + } +} + +impl Display for ByteCodeKind { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ByteCodeKind::Empty => { + write!(f, "empty") + } + ByteCodeKind::V1CasperWasm => { + write!(f, "v1-casper-wasm") + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ByteCodeKind { + match rng.gen_range(0..=1) { + 0 => ByteCodeKind::Empty, + 1 => ByteCodeKind::V1CasperWasm, + _ => unreachable!(), + } + } +} + +/// A container for contract's Wasm bytes. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct ByteCode { + kind: ByteCodeKind, + bytes: Bytes, +} + +impl Debug for ByteCode { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + if self.bytes.len() > BYTE_CODE_MAX_DISPLAY_LEN { + write!( + f, + "ByteCode(0x{}...)", + base16::encode_lower(&self.bytes[..BYTE_CODE_MAX_DISPLAY_LEN]) + ) + } else { + write!(f, "ByteCode(0x{})", base16::encode_lower(&self.bytes)) + } + } +} + +impl ByteCode { + /// Creates new Wasm object from bytes. + pub fn new(kind: ByteCodeKind, bytes: Vec) -> Self { + ByteCode { + kind, + bytes: bytes.into(), + } + } + + /// Consumes instance of [`ByteCode`] and returns its bytes. + pub fn take_bytes(self) -> Vec { + self.bytes.into() + } + + /// Returns a slice of contained Wasm bytes. + pub fn bytes(&self) -> &[u8] { + self.bytes.as_ref() + } + + /// Return the type of byte code. + pub fn kind(&self) -> ByteCodeKind { + self.kind + } +} + +impl ToBytes for ByteCode { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.kind.serialized_length() + self.bytes.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.kind.write_bytes(writer)?; + self.bytes.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ByteCode { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (kind, remainder) = ByteCodeKind::from_bytes(bytes)?; + let (bytes, remainder) = Bytes::from_bytes(remainder)?; + Ok((ByteCode { kind, bytes }, remainder)) + } +} + +#[cfg(test)] +mod tests { + use rand::RngCore; + + use super::*; + use crate::testing::TestRng; + + #[test] + fn debug_repr_of_short_wasm() { + const SIZE: usize = 8; + let wasm_bytes = vec![0; SIZE]; + let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, wasm_bytes); + assert_eq!(format!("{:?}", byte_code), "ByteCode(0x0000000000000000)"); + } + + #[test] + fn debug_repr_of_long_wasm() { + const SIZE: usize = 65; + let wasm_bytes = vec![0; SIZE]; + let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, wasm_bytes); + // String output is less than the bytes itself + assert_eq!( + format!("{:?}", byte_code), + "ByteCode(0x00000000000000000000000000000000...)" + ); + } + + #[test] + fn byte_code_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code = ByteCode::new(rng.gen(), vec![]); + bytesrepr::test_serialization_roundtrip(&byte_code); + + let mut buffer = vec![0u8; rng.gen_range(1..100)]; + rng.fill_bytes(buffer.as_mut()); + let byte_code = ByteCode::new(rng.gen(), buffer); + bytesrepr::test_serialization_roundtrip(&byte_code); + } + + #[test] + fn contract_wasm_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let byte_code_hash = + ByteCodeAddr::try_from(&bytes[..]).expect("should create byte code hash"); + let contract_hash = ByteCodeHash::new(byte_code_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_wasm_hash_from_str() { + let byte_code_hash = ByteCodeHash([3; 32]); + let encoded = byte_code_hash.to_formatted_string(); + let decoded = ByteCodeHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(byte_code_hash, decoded); + + let invalid_prefix = + "contractwasm-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(ByteCodeHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "contract-wasm-00000000000000000000000000000000000000000000000000000000000000"; + assert!(ByteCodeHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(ByteCodeHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(ByteCodeHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn contract_wasm_hash_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code_hash = ByteCodeHash(rng.gen()); + bytesrepr::test_serialization_roundtrip(&byte_code_hash); + } + + #[test] + fn contract_wasm_hash_bincode_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code_hash = ByteCodeHash(rng.gen()); + let serialized = bincode::serialize(&byte_code_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(byte_code_hash, deserialized) + } + + #[test] + fn contract_wasm_hash_json_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code_hash = ByteCodeHash(rng.gen()); + let json_string = serde_json::to_string_pretty(&byte_code_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(byte_code_hash, decoded) + } +} diff --git a/casper_types_ver_2_0/src/bytesrepr.rs b/casper_types_ver_2_0/src/bytesrepr.rs new file mode 100644 index 00000000..e66087b5 --- /dev/null +++ b/casper_types_ver_2_0/src/bytesrepr.rs @@ -0,0 +1,1646 @@ +//! Contains serialization and deserialization code for types used throughout the system. +mod bytes; + +use alloc::{ + alloc::{alloc, Layout}, + collections::{BTreeMap, BTreeSet, VecDeque}, + str, + string::String, + vec, + vec::Vec, +}; +#[cfg(debug_assertions)] +use core::any; +use core::{ + convert::TryInto, + fmt::{self, Display, Formatter}, + mem, + ptr::NonNull, +}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num_integer::Integer; +use num_rational::Ratio; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +pub use bytes::Bytes; + +/// The number of bytes in a serialized `()`. +pub const UNIT_SERIALIZED_LENGTH: usize = 0; +/// The number of bytes in a serialized `bool`. +pub const BOOL_SERIALIZED_LENGTH: usize = 1; +/// The number of bytes in a serialized `i32`. +pub const I32_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `i64`. +pub const I64_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u8`. +pub const U8_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u16`. +pub const U16_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u32`. +pub const U32_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized `u64`. +pub const U64_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized [`U128`](crate::U128). +pub const U128_SERIALIZED_LENGTH: usize = mem::size_of::(); +/// The number of bytes in a serialized [`U256`](crate::U256). +pub const U256_SERIALIZED_LENGTH: usize = U128_SERIALIZED_LENGTH * 2; +/// The number of bytes in a serialized [`U512`](crate::U512). +pub const U512_SERIALIZED_LENGTH: usize = U256_SERIALIZED_LENGTH * 2; +/// The tag representing a `None` value. +pub const OPTION_NONE_TAG: u8 = 0; +/// The tag representing a `Some` value. +pub const OPTION_SOME_TAG: u8 = 1; +/// The tag representing an `Err` value. +pub const RESULT_ERR_TAG: u8 = 0; +/// The tag representing an `Ok` value. +pub const RESULT_OK_TAG: u8 = 1; + +/// A type which can be serialized to a `Vec`. +pub trait ToBytes { + /// Serializes `&self` to a `Vec`. + fn to_bytes(&self) -> Result, Error>; + /// Consumes `self` and serializes to a `Vec`. + fn into_bytes(self) -> Result, Error> + where + Self: Sized, + { + self.to_bytes() + } + /// Returns the length of the `Vec` which would be returned from a successful call to + /// `to_bytes()` or `into_bytes()`. The data is not actually serialized, so this call is + /// relatively cheap. + fn serialized_length(&self) -> usize; + + /// Writes `&self` into a mutable `writer`. + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend(self.to_bytes()?); + Ok(()) + } +} + +/// A type which can be deserialized from a `Vec`. +pub trait FromBytes: Sized { + /// Deserializes the slice into `Self`. + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error>; + + /// Deserializes the `Vec` into `Self`. + fn from_vec(bytes: Vec) -> Result<(Self, Vec), Error> { + Self::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder))) + } +} + +/// Returns a `Vec` initialized with sufficient capacity to hold `to_be_serialized` after +/// serialization. +pub fn unchecked_allocate_buffer(to_be_serialized: &T) -> Vec { + let serialized_length = to_be_serialized.serialized_length(); + Vec::with_capacity(serialized_length) +} + +/// Returns a `Vec` initialized with sufficient capacity to hold `to_be_serialized` after +/// serialization, or an error if the capacity would exceed `u32::max_value()`. +pub fn allocate_buffer(to_be_serialized: &T) -> Result, Error> { + let serialized_length = to_be_serialized.serialized_length(); + if serialized_length > u32::max_value() as usize { + return Err(Error::OutOfMemory); + } + Ok(Vec::with_capacity(serialized_length)) +} + +/// Serialization and deserialization errors. +#[derive(Copy, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(rename = "BytesreprError") +)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Early end of stream while deserializing. + EarlyEndOfStream = 0, + /// Formatting error while deserializing. + Formatting, + /// Not all input bytes were consumed in [`deserialize`]. + LeftOverBytes, + /// Out of memory error. + OutOfMemory, + /// No serialized representation is available for a value. + NotRepresentable, + /// Exceeded a recursion depth limit. + ExceededRecursionDepth, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::EarlyEndOfStream => { + formatter.write_str("Deserialization error: early end of stream") + } + Error::Formatting => formatter.write_str("Deserialization error: formatting"), + Error::LeftOverBytes => formatter.write_str("Deserialization error: left-over bytes"), + Error::OutOfMemory => formatter.write_str("Serialization error: out of memory"), + Error::NotRepresentable => { + formatter.write_str("Serialization error: value is not representable.") + } + Error::ExceededRecursionDepth => formatter.write_str("exceeded recursion depth"), + } + } +} + +impl ToBytes for Error { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + (*self as u8).write_bytes(writer) + } + + fn to_bytes(&self) -> Result, Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for Error { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (value, remainder) = u8::from_bytes(bytes)?; + match value { + value if value == Error::EarlyEndOfStream as u8 => { + Ok((Error::EarlyEndOfStream, remainder)) + } + value if value == Error::Formatting as u8 => Ok((Error::Formatting, remainder)), + value if value == Error::LeftOverBytes as u8 => Ok((Error::LeftOverBytes, remainder)), + value if value == Error::OutOfMemory as u8 => Ok((Error::OutOfMemory, remainder)), + value if value == Error::NotRepresentable as u8 => { + Ok((Error::NotRepresentable, remainder)) + } + value if value == Error::ExceededRecursionDepth as u8 => { + Ok((Error::ExceededRecursionDepth, remainder)) + } + _ => Err(Error::Formatting), + } + } +} + +#[cfg(feature = "std")] +impl StdError for Error {} + +/// Deserializes `bytes` into an instance of `T`. +/// +/// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes +/// are consumed in the operation. +pub fn deserialize(bytes: Vec) -> Result { + let (t, remainder) = T::from_bytes(&bytes)?; + if remainder.is_empty() { + Ok(t) + } else { + Err(Error::LeftOverBytes) + } +} + +/// Deserializes a slice of bytes into an instance of `T`. +/// +/// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes +/// are consumed in the operation. +pub fn deserialize_from_slice, O: FromBytes>(bytes: I) -> Result { + let (t, remainder) = O::from_bytes(bytes.as_ref())?; + if remainder.is_empty() { + Ok(t) + } else { + Err(Error::LeftOverBytes) + } +} + +/// Serializes `t` into a `Vec`. +pub fn serialize(t: impl ToBytes) -> Result, Error> { + t.into_bytes() +} + +/// Safely splits the slice at the given point. +pub(crate) fn safe_split_at(bytes: &[u8], n: usize) -> Result<(&[u8], &[u8]), Error> { + if n > bytes.len() { + Err(Error::EarlyEndOfStream) + } else { + Ok(bytes.split_at(n)) + } +} + +impl ToBytes for () { + fn to_bytes(&self) -> Result, Error> { + Ok(Vec::new()) + } + + fn serialized_length(&self) -> usize { + UNIT_SERIALIZED_LENGTH + } +} + +impl FromBytes for () { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + Ok(((), bytes)) + } +} + +impl ToBytes for bool { + fn to_bytes(&self) -> Result, Error> { + u8::from(*self).to_bytes() + } + + fn serialized_length(&self) -> usize { + BOOL_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for bool { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + match bytes.split_first() { + None => Err(Error::EarlyEndOfStream), + Some((byte, rem)) => match byte { + 1 => Ok((true, rem)), + 0 => Ok((false, rem)), + _ => Err(Error::Formatting), + }, + } + } +} + +impl ToBytes for u8 { + fn to_bytes(&self) -> Result, Error> { + Ok(vec![*self]) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self); + Ok(()) + } +} + +impl FromBytes for u8 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + match bytes.split_first() { + None => Err(Error::EarlyEndOfStream), + Some((byte, rem)) => Ok((*byte, rem)), + } + } +} + +impl ToBytes for i32 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + I32_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for i32 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; I32_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, I32_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for i64 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + I64_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for i64 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; I64_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, I64_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for u16 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + U16_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for u16 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; U16_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, U16_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for u32 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + U32_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for u32 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; U32_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, U32_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for u64 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + U64_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for u64 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; U64_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, U64_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + +impl ToBytes for String { + fn to_bytes(&self) -> Result, Error> { + let bytes = self.as_bytes(); + u8_slice_to_bytes(bytes) + } + + fn serialized_length(&self) -> usize { + u8_slice_serialized_length(self.as_bytes()) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } +} + +impl FromBytes for String { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (size, remainder) = u32::from_bytes(bytes)?; + let (str_bytes, remainder) = safe_split_at(remainder, size as usize)?; + let result = String::from_utf8(str_bytes.to_vec()).map_err(|_| Error::Formatting)?; + Ok((result, remainder)) + } +} + +fn ensure_efficient_serialization() { + #[cfg(debug_assertions)] + debug_assert_ne!( + any::type_name::(), + any::type_name::(), + "You should use `casper_types_ver_2_0::bytesrepr::Bytes` newtype wrapper instead of `Vec` for efficiency" + ); +} + +fn iterator_serialized_length<'a, T: 'a + ToBytes>(ts: impl Iterator) -> usize { + U32_SERIALIZED_LENGTH + ts.map(ToBytes::serialized_length).sum::() +} + +impl ToBytes for Vec { + fn to_bytes(&self) -> Result, Error> { + ensure_efficient_serialization::(); + + let mut result = try_vec_with_capacity(self.serialized_length())?; + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); + + for item in self.iter() { + result.append(&mut item.to_bytes()?); + } + + Ok(result) + } + + fn into_bytes(self) -> Result, Error> { + ensure_efficient_serialization::(); + + let mut result = allocate_buffer(&self)?; + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); + + for item in self { + result.append(&mut item.into_bytes()?); + } + + Ok(result) + } + + fn serialized_length(&self) -> usize { + iterator_serialized_length(self.iter()) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for item in self.iter() { + item.write_bytes(writer)?; + } + Ok(()) + } +} + +// TODO Replace `try_vec_with_capacity` with `Vec::try_reserve_exact` once it's in stable. +fn try_vec_with_capacity(capacity: usize) -> Result, Error> { + // see https://doc.rust-lang.org/src/alloc/raw_vec.rs.html#75-98 + let elem_size = mem::size_of::(); + let alloc_size = capacity.checked_mul(elem_size).ok_or(Error::OutOfMemory)?; + + let ptr = if alloc_size == 0 { + NonNull::::dangling() + } else { + let align = mem::align_of::(); + let layout = Layout::from_size_align(alloc_size, align).map_err(|_| Error::OutOfMemory)?; + let raw_ptr = unsafe { alloc(layout) }; + let non_null_ptr = NonNull::::new(raw_ptr).ok_or(Error::OutOfMemory)?; + non_null_ptr.cast() + }; + unsafe { Ok(Vec::from_raw_parts(ptr.as_ptr(), 0, capacity)) } +} + +fn vec_from_vec(bytes: Vec) -> Result<(Vec, Vec), Error> { + ensure_efficient_serialization::(); + + Vec::::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder))) +} + +impl FromBytes for Vec { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + ensure_efficient_serialization::(); + + let (count, mut stream) = u32::from_bytes(bytes)?; + + let mut result = try_vec_with_capacity(count as usize)?; + for _ in 0..count { + let (value, remainder) = T::from_bytes(stream)?; + result.push(value); + stream = remainder; + } + + Ok((result, stream)) + } + + fn from_vec(bytes: Vec) -> Result<(Self, Vec), Error> { + vec_from_vec(bytes) + } +} + +impl ToBytes for VecDeque { + fn to_bytes(&self) -> Result, Error> { + let (slice1, slice2) = self.as_slices(); + let mut result = allocate_buffer(self)?; + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); + for item in slice1.iter().chain(slice2.iter()) { + result.append(&mut item.to_bytes()?); + } + Ok(result) + } + + fn into_bytes(self) -> Result, Error> { + let vec: Vec = self.into(); + vec.to_bytes() + } + + fn serialized_length(&self) -> usize { + let (slice1, slice2) = self.as_slices(); + iterator_serialized_length(slice1.iter().chain(slice2.iter())) + } +} + +impl FromBytes for VecDeque { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (vec, bytes) = Vec::from_bytes(bytes)?; + Ok((VecDeque::from(vec), bytes)) + } + + fn from_vec(bytes: Vec) -> Result<(Self, Vec), Error> { + let (vec, bytes) = vec_from_vec(bytes)?; + Ok((VecDeque::from(vec), bytes)) + } +} + +impl ToBytes for [u8; COUNT] { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_vec()) + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + COUNT + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(self); + Ok(()) + } +} + +impl FromBytes for [u8; COUNT] { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = safe_split_at(bytes, COUNT)?; + // SAFETY: safe_split_at makes sure `bytes` is exactly `COUNT` bytes. + let ptr = bytes.as_ptr() as *const [u8; COUNT]; + let result = unsafe { *ptr }; + Ok((result, rem)) + } +} + +impl ToBytes for BTreeSet { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + + let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut num_keys.to_bytes()?); + + for value in self.iter() { + result.append(&mut value.to_bytes()?); + } + + Ok(result) + } + + fn serialized_length(&self) -> usize { + U32_SERIALIZED_LENGTH + self.iter().map(|v| v.serialized_length()).sum::() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for value in self.iter() { + value.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for BTreeSet { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (num_keys, mut stream) = u32::from_bytes(bytes)?; + let mut result = BTreeSet::new(); + for _ in 0..num_keys { + let (v, rem) = V::from_bytes(stream)?; + result.insert(v); + stream = rem; + } + Ok((result, stream)) + } +} + +impl ToBytes for BTreeMap +where + K: ToBytes, + V: ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + + let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut num_keys.to_bytes()?); + + for (key, value) in self.iter() { + result.append(&mut key.to_bytes()?); + result.append(&mut value.to_bytes()?); + } + + Ok(result) + } + + fn serialized_length(&self) -> usize { + U32_SERIALIZED_LENGTH + + self + .iter() + .map(|(key, value)| key.serialized_length() + value.serialized_length()) + .sum::() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for (key, value) in self.iter() { + key.write_bytes(writer)?; + value.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for BTreeMap +where + K: FromBytes + Ord, + V: FromBytes, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (num_keys, mut stream) = u32::from_bytes(bytes)?; + let mut result = BTreeMap::new(); + for _ in 0..num_keys { + let (k, rem) = K::from_bytes(stream)?; + let (v, rem) = V::from_bytes(rem)?; + result.insert(k, v); + stream = rem; + } + Ok((result, stream)) + } +} + +impl ToBytes for Option { + fn to_bytes(&self) -> Result, Error> { + match self { + None => Ok(vec![OPTION_NONE_TAG]), + Some(v) => { + let mut result = allocate_buffer(self)?; + result.push(OPTION_SOME_TAG); + + let mut value = v.to_bytes()?; + result.append(&mut value); + + Ok(result) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Some(v) => v.serialized_length(), + None => 0, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + None => writer.push(OPTION_NONE_TAG), + Some(v) => { + writer.push(OPTION_SOME_TAG); + v.write_bytes(writer)?; + } + }; + Ok(()) + } +} + +impl FromBytes for Option { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + OPTION_NONE_TAG => Ok((None, rem)), + OPTION_SOME_TAG => { + let (t, rem) = T::from_bytes(rem)?; + Ok((Some(t), rem)) + } + _ => Err(Error::Formatting), + } + } +} + +impl ToBytes for Result { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + let (variant, mut value) = match self { + Err(error) => (RESULT_ERR_TAG, error.to_bytes()?), + Ok(result) => (RESULT_OK_TAG, result.to_bytes()?), + }; + result.push(variant); + result.append(&mut value); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Ok(ok) => ok.serialized_length(), + Err(error) => error.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + Err(error) => { + writer.push(RESULT_ERR_TAG); + error.write_bytes(writer)?; + } + Ok(result) => { + writer.push(RESULT_OK_TAG); + result.write_bytes(writer)?; + } + }; + Ok(()) + } +} + +impl FromBytes for Result { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (variant, rem) = u8::from_bytes(bytes)?; + match variant { + RESULT_ERR_TAG => { + let (value, rem) = E::from_bytes(rem)?; + Ok((Err(value), rem)) + } + RESULT_OK_TAG => { + let (value, rem) = T::from_bytes(rem)?; + Ok((Ok(value), rem)) + } + _ => Err(Error::Formatting), + } + } +} + +impl ToBytes for (T1,) { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for (T1,) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + Ok(((t1,), remainder)) + } +} + +impl ToBytes for (T1, T2) { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + self.1.serialized_length() + } +} + +impl FromBytes for (T1, T2) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + Ok(((t1, t2), remainder)) + } +} + +impl ToBytes for (T1, T2, T3) { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + self.1.serialized_length() + self.2.serialized_length() + } +} + +impl FromBytes for (T1, T2, T3) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + Ok(((t1, t2, t3), remainder)) + } +} + +impl ToBytes for (T1, T2, T3, T4) { + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + } +} + +impl FromBytes for (T1, T2, T3, T4) { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4), remainder)) + } +} + +impl ToBytes + for (T1, T2, T3, T4, T5) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + } +} + +impl FromBytes + for (T1, T2, T3, T4, T5) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5), remainder)) + } +} + +impl ToBytes + for (T1, T2, T3, T4, T5, T6) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + } +} + +impl + FromBytes for (T1, T2, T3, T4, T5, T6) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6), remainder)) + } +} + +impl + ToBytes for (T1, T2, T3, T4, T5, T6, T7) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7), remainder)) + } +} + +impl< + T1: ToBytes, + T2: ToBytes, + T3: ToBytes, + T4: ToBytes, + T5: ToBytes, + T6: ToBytes, + T7: ToBytes, + T8: ToBytes, + > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + result.append(&mut self.7.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + + self.7.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + T8: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + let (t8, remainder) = T8::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7, t8), remainder)) + } +} + +impl< + T1: ToBytes, + T2: ToBytes, + T3: ToBytes, + T4: ToBytes, + T5: ToBytes, + T6: ToBytes, + T7: ToBytes, + T8: ToBytes, + T9: ToBytes, + > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + result.append(&mut self.7.to_bytes()?); + result.append(&mut self.8.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + + self.7.serialized_length() + + self.8.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + T8: FromBytes, + T9: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + let (t8, remainder) = T8::from_bytes(remainder)?; + let (t9, remainder) = T9::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7, t8, t9), remainder)) + } +} + +impl< + T1: ToBytes, + T2: ToBytes, + T3: ToBytes, + T4: ToBytes, + T5: ToBytes, + T6: ToBytes, + T7: ToBytes, + T8: ToBytes, + T9: ToBytes, + T10: ToBytes, + > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) +{ + fn to_bytes(&self) -> Result, Error> { + let mut result = allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + result.append(&mut self.2.to_bytes()?); + result.append(&mut self.3.to_bytes()?); + result.append(&mut self.4.to_bytes()?); + result.append(&mut self.5.to_bytes()?); + result.append(&mut self.6.to_bytes()?); + result.append(&mut self.7.to_bytes()?); + result.append(&mut self.8.to_bytes()?); + result.append(&mut self.9.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + + self.1.serialized_length() + + self.2.serialized_length() + + self.3.serialized_length() + + self.4.serialized_length() + + self.5.serialized_length() + + self.6.serialized_length() + + self.7.serialized_length() + + self.8.serialized_length() + + self.9.serialized_length() + } +} + +impl< + T1: FromBytes, + T2: FromBytes, + T3: FromBytes, + T4: FromBytes, + T5: FromBytes, + T6: FromBytes, + T7: FromBytes, + T8: FromBytes, + T9: FromBytes, + T10: FromBytes, + > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (t1, remainder) = T1::from_bytes(bytes)?; + let (t2, remainder) = T2::from_bytes(remainder)?; + let (t3, remainder) = T3::from_bytes(remainder)?; + let (t4, remainder) = T4::from_bytes(remainder)?; + let (t5, remainder) = T5::from_bytes(remainder)?; + let (t6, remainder) = T6::from_bytes(remainder)?; + let (t7, remainder) = T7::from_bytes(remainder)?; + let (t8, remainder) = T8::from_bytes(remainder)?; + let (t9, remainder) = T9::from_bytes(remainder)?; + let (t10, remainder) = T10::from_bytes(remainder)?; + Ok(((t1, t2, t3, t4, t5, t6, t7, t8, t9, t10), remainder)) + } +} + +impl ToBytes for str { + #[inline] + fn to_bytes(&self) -> Result, Error> { + u8_slice_to_bytes(self.as_bytes()) + } + + #[inline] + fn serialized_length(&self) -> usize { + u8_slice_serialized_length(self.as_bytes()) + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } +} + +impl ToBytes for &str { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + (*self).to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + (*self).serialized_length() + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } +} + +impl ToBytes for &T +where + T: ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + (*self).to_bytes() + } + + fn serialized_length(&self) -> usize { + (*self).serialized_length() + } +} + +impl ToBytes for Ratio +where + T: Clone + Integer + ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + if self.denom().is_zero() { + return Err(Error::Formatting); + } + (self.numer().clone(), self.denom().clone()).into_bytes() + } + + fn serialized_length(&self) -> usize { + (self.numer().clone(), self.denom().clone()).serialized_length() + } +} + +impl FromBytes for Ratio +where + T: Clone + FromBytes + Integer, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let ((numer, denom), rem): ((T, T), &[u8]) = FromBytes::from_bytes(bytes)?; + if denom.is_zero() { + return Err(Error::Formatting); + } + Ok((Ratio::new(numer, denom), rem)) + } +} + +/// Serializes a slice of bytes with a length prefix. +/// +/// This function is serializing a slice of bytes with an addition of a 4 byte length prefix. +/// +/// For safety you should prefer to use [`vec_u8_to_bytes`]. For efficiency reasons you should also +/// avoid using serializing Vec. +fn u8_slice_to_bytes(bytes: &[u8]) -> Result, Error> { + let serialized_length = u8_slice_serialized_length(bytes); + let mut vec = try_vec_with_capacity(serialized_length)?; + let length_prefix: u32 = bytes + .len() + .try_into() + .map_err(|_| Error::NotRepresentable)?; + let length_prefix_bytes = length_prefix.to_le_bytes(); + vec.extend_from_slice(&length_prefix_bytes); + vec.extend_from_slice(bytes); + Ok(vec) +} + +fn write_u8_slice(bytes: &[u8], writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = bytes + .len() + .try_into() + .map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + writer.extend_from_slice(bytes); + Ok(()) +} + +/// Serializes a vector of bytes with a length prefix. +/// +/// For efficiency you should avoid serializing Vec. +#[allow(clippy::ptr_arg)] +#[inline] +pub(crate) fn vec_u8_to_bytes(vec: &Vec) -> Result, Error> { + u8_slice_to_bytes(vec.as_slice()) +} + +/// Returns serialized length of serialized slice of bytes. +/// +/// This function adds a length prefix in the beginning. +#[inline(always)] +fn u8_slice_serialized_length(bytes: &[u8]) -> usize { + U32_SERIALIZED_LENGTH + bytes.len() +} + +#[allow(clippy::ptr_arg)] +#[inline] +pub(crate) fn vec_u8_serialized_length(vec: &Vec) -> usize { + u8_slice_serialized_length(vec.as_slice()) +} + +/// Asserts that `t` can be serialized and when deserialized back into an instance `T` compares +/// equal to `t`. +/// +/// Also asserts that `t.serialized_length()` is the same as the actual number of bytes of the +/// serialized `t` instance. +#[cfg(any(feature = "testing", test))] +#[track_caller] +pub fn test_serialization_roundtrip(t: &T) +where + T: fmt::Debug + ToBytes + FromBytes + PartialEq, +{ + let serialized = ToBytes::to_bytes(t).expect("Unable to serialize data"); + assert_eq!( + serialized.len(), + t.serialized_length(), + "\nLength of serialized data: {},\nserialized_length() yielded: {},\n t is {:?}", + serialized.len(), + t.serialized_length(), + t + ); + let mut written_bytes = vec![]; + t.write_bytes(&mut written_bytes) + .expect("Unable to serialize data via write_bytes"); + assert_eq!(serialized, written_bytes); + + let deserialized_from_slice = + deserialize_from_slice(&serialized).expect("Unable to deserialize data"); + assert_eq!(*t, deserialized_from_slice); + + let deserialized = deserialize::(serialized).expect("Unable to deserialize data"); + assert_eq!(*t, deserialized); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_not_serialize_zero_denominator() { + let malicious = Ratio::new_raw(1, 0); + assert_eq!(malicious.to_bytes().unwrap_err(), Error::Formatting); + } + + #[test] + fn should_not_deserialize_zero_denominator() { + let malicious_bytes = (1u64, 0u64).to_bytes().unwrap(); + let result: Result, Error> = deserialize(malicious_bytes); + assert_eq!(result.unwrap_err(), Error::Formatting); + } + + #[test] + fn should_have_generic_tobytes_impl_for_borrowed_types() { + struct NonCopyable; + + impl ToBytes for NonCopyable { + fn to_bytes(&self) -> Result, Error> { + Ok(vec![1, 2, 3]) + } + + fn serialized_length(&self) -> usize { + 3 + } + } + + let noncopyable: &NonCopyable = &NonCopyable; + + assert_eq!(noncopyable.to_bytes().unwrap(), vec![1, 2, 3]); + assert_eq!(noncopyable.serialized_length(), 3); + assert_eq!(noncopyable.into_bytes().unwrap(), vec![1, 2, 3]); + } + + #[cfg(debug_assertions)] + #[test] + #[should_panic( + expected = "You should use `casper_types_ver_2_0::bytesrepr::Bytes` newtype wrapper instead of `Vec` for efficiency" + )] + fn should_fail_to_serialize_slice_of_u8() { + let bytes = b"0123456789".to_vec(); + bytes.to_bytes().unwrap(); + } +} + +#[cfg(test)] +mod proptests { + use std::collections::VecDeque; + + use proptest::{collection::vec, prelude::*}; + + use crate::{ + bytesrepr::{self, bytes::gens::bytes_arb, ToBytes}, + gens::*, + }; + + proptest! { + #[test] + fn test_bool(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u8(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u16(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u32(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_i32(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u64(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_i64(u in any::()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u8_slice_32(s in u8_slice_32()) { + bytesrepr::test_serialization_roundtrip(&s); + } + + #[test] + fn test_vec_u8(u in bytes_arb(1..100)) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_vec_i32(u in vec(any::(), 1..100)) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_vecdeque_i32((front, back) in (vec(any::(), 1..100), vec(any::(), 1..100))) { + let mut vec_deque = VecDeque::new(); + for f in front { + vec_deque.push_front(f); + } + for f in back { + vec_deque.push_back(f); + } + bytesrepr::test_serialization_roundtrip(&vec_deque); + } + + #[test] + fn test_vec_vec_u8(u in vec(bytes_arb(1..100), 10)) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_uref_map(m in named_keys_arb(20)) { + bytesrepr::test_serialization_roundtrip(&m); + } + + #[test] + fn test_array_u8_32(arr in any::<[u8; 32]>()) { + bytesrepr::test_serialization_roundtrip(&arr); + } + + #[test] + fn test_string(s in "\\PC*") { + bytesrepr::test_serialization_roundtrip(&s); + } + + #[test] + fn test_str(s in "\\PC*") { + let not_a_string_object = s.as_str(); + not_a_string_object.to_bytes().expect("should serialize a str"); + } + + #[test] + fn test_option(o in proptest::option::of(key_arb())) { + bytesrepr::test_serialization_roundtrip(&o); + } + + #[test] + fn test_unit(unit in Just(())) { + bytesrepr::test_serialization_roundtrip(&unit); + } + + #[test] + fn test_u128_serialization(u in u128_arb()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u256_serialization(u in u256_arb()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_u512_serialization(u in u512_arb()) { + bytesrepr::test_serialization_roundtrip(&u); + } + + #[test] + fn test_key_serialization(key in key_arb()) { + bytesrepr::test_serialization_roundtrip(&key); + } + + #[test] + fn test_cl_value_serialization(cl_value in cl_value_arb()) { + bytesrepr::test_serialization_roundtrip(&cl_value); + } + + #[test] + fn test_access_rights(access_right in access_rights_arb()) { + bytesrepr::test_serialization_roundtrip(&access_right); + } + + #[test] + fn test_uref(uref in uref_arb()) { + bytesrepr::test_serialization_roundtrip(&uref); + } + + #[test] + fn test_account_hash(pk in account_hash_arb()) { + bytesrepr::test_serialization_roundtrip(&pk); + } + + #[test] + fn test_result(result in result_arb()) { + bytesrepr::test_serialization_roundtrip(&result); + } + + #[test] + fn test_phase_serialization(phase in phase_arb()) { + bytesrepr::test_serialization_roundtrip(&phase); + } + + #[test] + fn test_protocol_version(protocol_version in protocol_version_arb()) { + bytesrepr::test_serialization_roundtrip(&protocol_version); + } + + #[test] + fn test_sem_ver(sem_ver in sem_ver_arb()) { + bytesrepr::test_serialization_roundtrip(&sem_ver); + } + + #[test] + fn test_tuple1(t in (any::(),)) { + bytesrepr::test_serialization_roundtrip(&t); + } + + #[test] + fn test_tuple2(t in (any::(),any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + + #[test] + fn test_tuple3(t in (any::(),any::(),any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + + #[test] + fn test_tuple4(t in (any::(),any::(),any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple5(t in (any::(),any::(),any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple6(t in (any::(),any::(),any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple7(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple8(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple9(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_tuple10(t in (any::(),any::(),any::(), any::(), any::(), any::(), any::(), any::(), any::(), any::())) { + bytesrepr::test_serialization_roundtrip(&t); + } + #[test] + fn test_ratio_u64(t in (any::(), 1..u64::max_value())) { + bytesrepr::test_serialization_roundtrip(&t); + } + } +} diff --git a/casper_types_ver_2_0/src/bytesrepr/bytes.rs b/casper_types_ver_2_0/src/bytesrepr/bytes.rs new file mode 100644 index 00000000..cf7196ce --- /dev/null +++ b/casper_types_ver_2_0/src/bytesrepr/bytes.rs @@ -0,0 +1,405 @@ +use alloc::{ + string::String, + vec::{IntoIter, Vec}, +}; +use core::{ + cmp, fmt, + iter::FromIterator, + ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeTo}, + slice, +}; + +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{ + de::{Error as SerdeError, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; + +use super::{Error, FromBytes, ToBytes}; +use crate::{checksummed_hex, CLType, CLTyped}; + +/// A newtype wrapper for bytes that has efficient serialization routines. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Default, Hash)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded bytes.") +)] +#[rustfmt::skip] +pub struct Bytes( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + Vec +); + +impl Bytes { + /// Constructs a new, empty vector of bytes. + pub fn new() -> Bytes { + Bytes::default() + } + + /// Returns reference to inner container. + #[inline] + pub fn inner_bytes(&self) -> &Vec { + &self.0 + } + + /// Extracts a slice containing the entire vector. + pub fn as_slice(&self) -> &[u8] { + self + } + + /// Consumes self and returns the inner bytes. + pub fn take_inner(self) -> Vec { + self.0 + } +} + +impl Deref for Bytes { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.deref() + } +} + +impl From> for Bytes { + fn from(vec: Vec) -> Self { + Self(vec) + } +} + +impl From for Vec { + fn from(bytes: Bytes) -> Self { + bytes.0 + } +} + +impl From<&[u8]> for Bytes { + fn from(bytes: &[u8]) -> Self { + Self(bytes.to_vec()) + } +} + +impl CLTyped for Bytes { + fn cl_type() -> CLType { + >::cl_type() + } +} + +impl AsRef<[u8]> for Bytes { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for Bytes { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + super::vec_u8_to_bytes(&self.0) + } + + #[inline(always)] + fn into_bytes(self) -> Result, Error> { + super::vec_u8_to_bytes(&self.0) + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + super::vec_u8_serialized_length(&self.0) + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + super::write_u8_slice(self.as_slice(), writer) + } +} + +impl FromBytes for Bytes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), super::Error> { + let (size, remainder) = u32::from_bytes(bytes)?; + let (result, remainder) = super::safe_split_at(remainder, size as usize)?; + Ok((Bytes(result.to_vec()), remainder)) + } + + fn from_vec(stream: Vec) -> Result<(Self, Vec), Error> { + let (size, mut stream) = u32::from_vec(stream)?; + + if size as usize > stream.len() { + Err(Error::EarlyEndOfStream) + } else { + let remainder = stream.split_off(size as usize); + Ok((Bytes(stream), remainder)) + } + } +} + +impl Index for Bytes { + type Output = u8; + + fn index(&self, index: usize) -> &u8 { + let Bytes(ref dat) = self; + &dat[index] + } +} + +impl Index> for Bytes { + type Output = [u8]; + + fn index(&self, index: Range) -> &[u8] { + let Bytes(dat) = self; + &dat[index] + } +} + +impl Index> for Bytes { + type Output = [u8]; + + fn index(&self, index: RangeTo) -> &[u8] { + let Bytes(dat) = self; + &dat[index] + } +} + +impl Index> for Bytes { + type Output = [u8]; + + fn index(&self, index: RangeFrom) -> &[u8] { + let Bytes(dat) = self; + &dat[index] + } +} + +impl Index for Bytes { + type Output = [u8]; + + fn index(&self, _: RangeFull) -> &[u8] { + let Bytes(dat) = self; + &dat[..] + } +} + +impl FromIterator for Bytes { + #[inline] + fn from_iter>(iter: I) -> Bytes { + let vec = Vec::from_iter(iter); + Bytes(vec) + } +} + +impl<'a> IntoIterator for &'a Bytes { + type Item = &'a u8; + + type IntoIter = slice::Iter<'a, u8>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl IntoIterator for Bytes { + type Item = u8; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +#[cfg(feature = "datasize")] +impl datasize::DataSize for Bytes { + const IS_DYNAMIC: bool = true; + + const STATIC_HEAP_SIZE: usize = 0; + + fn estimate_heap_size(&self) -> usize { + self.0.capacity() * std::mem::size_of::() + } +} + +const RANDOM_BYTES_MAX_LENGTH: usize = 100; + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Bytes { + let len = rng.gen_range(0..RANDOM_BYTES_MAX_LENGTH); + let mut result = Vec::with_capacity(len); + for _ in 0..len { + result.push(rng.gen()); + } + result.into() + } +} + +struct BytesVisitor; + +impl<'de> Visitor<'de> for BytesVisitor { + type Value = Bytes; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("byte array") + } + + fn visit_seq(self, mut visitor: V) -> Result + where + V: SeqAccess<'de>, + { + let len = cmp::min(visitor.size_hint().unwrap_or(0), 4096); + let mut bytes = Vec::with_capacity(len); + + while let Some(b) = visitor.next_element()? { + bytes.push(b); + } + + Ok(Bytes::from(bytes)) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v)) + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v)) + } + + fn visit_str(self, v: &str) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v.as_bytes())) + } + + fn visit_string(self, v: String) -> Result + where + E: SerdeError, + { + Ok(Bytes::from(v.into_bytes())) + } +} + +impl<'de> Deserialize<'de> for Bytes { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + checksummed_hex::decode(hex_string) + .map(Bytes) + .map_err(SerdeError::custom) + } else { + let bytes = deserializer.deserialize_byte_buf(BytesVisitor)?; + Ok(bytes) + } + } +} + +impl Serialize for Bytes { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + base16::encode_lower(&self.0).serialize(serializer) + } else { + serializer.serialize_bytes(&self.0) + } + } +} + +#[cfg(test)] +mod tests { + use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}; + use alloc::vec::Vec; + + use serde_json::json; + use serde_test::{assert_tokens, Configure, Token}; + + use super::Bytes; + + const TRUTH: &[u8] = &[0xde, 0xad, 0xbe, 0xef]; + + #[test] + fn vec_u8_from_bytes() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + let data_bytes = data.to_bytes().unwrap(); + assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH / 2]).is_err()); + assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH]).is_err()); + assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH + 2]).is_err()); + } + + #[test] + fn should_serialize_deserialize_bytes() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + bytesrepr::test_serialization_roundtrip(&data); + } + + #[test] + fn should_fail_to_serialize_deserialize_malicious_bytes() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + let mut serialized = data.to_bytes().expect("should serialize data"); + serialized = serialized[..serialized.len() - 1].to_vec(); + let res: Result<(_, &[u8]), Error> = Bytes::from_bytes(&serialized); + assert_eq!(res.unwrap_err(), Error::EarlyEndOfStream); + } + + #[test] + fn should_serialize_deserialize_bytes_and_keep_rem() { + let data: Bytes = vec![1, 2, 3, 4, 5].into(); + let expected_rem: Vec = vec![6, 7, 8, 9, 10]; + let mut serialized = data.to_bytes().expect("should serialize data"); + serialized.extend(&expected_rem); + let (deserialized, rem): (Bytes, &[u8]) = + FromBytes::from_bytes(&serialized).expect("should deserialize data"); + assert_eq!(data, deserialized); + assert_eq!(&rem, &expected_rem); + } + + #[test] + fn should_ser_de_human_readable() { + let truth = vec![0xde, 0xad, 0xbe, 0xef]; + + let bytes_ser: Bytes = truth.clone().into(); + + let json_object = serde_json::to_value(bytes_ser).unwrap(); + assert_eq!(json_object, json!("deadbeef")); + + let bytes_de: Bytes = serde_json::from_value(json_object).unwrap(); + assert_eq!(bytes_de, Bytes::from(truth)); + } + + #[test] + fn should_ser_de_readable() { + let truth: Bytes = TRUTH.into(); + assert_tokens(&truth.readable(), &[Token::Str("deadbeef")]); + } + + #[test] + fn should_ser_de_compact() { + let truth: Bytes = TRUTH.into(); + assert_tokens(&truth.compact(), &[Token::Bytes(TRUTH)]); + } +} + +#[cfg(test)] +pub mod gens { + use super::Bytes; + use proptest::{ + collection::{vec, SizeRange}, + prelude::*, + }; + + pub fn bytes_arb(size: impl Into) -> impl Strategy { + vec(any::(), size).prop_map(Bytes::from) + } +} diff --git a/casper_types_ver_2_0/src/chainspec.rs b/casper_types_ver_2_0/src/chainspec.rs new file mode 100644 index 00000000..cc0f0265 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec.rs @@ -0,0 +1,260 @@ +//! The chainspec is a set of configuration options for the network. All validators must apply the +//! same set of options in order to join and act as a peer in a given network. + +mod accounts_config; +mod activation_point; +mod chainspec_raw_bytes; +mod core_config; +mod fee_handling; +mod global_state_update; +mod highway_config; +mod network_config; +mod next_upgrade; +mod protocol_config; +mod refund_handling; +mod transaction_config; +mod vm_config; + +use std::{fmt::Debug, sync::Arc}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::Serialize; +use tracing::error; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, EraId, ProtocolVersion, +}; +pub use accounts_config::{ + AccountConfig, AccountsConfig, AdministratorAccount, DelegatorConfig, GenesisAccount, + GenesisValidator, ValidatorConfig, +}; +pub use activation_point::ActivationPoint; +pub use chainspec_raw_bytes::ChainspecRawBytes; +pub use core_config::{ConsensusProtocolName, CoreConfig, LegacyRequiredFinality}; +pub use fee_handling::FeeHandling; +pub use global_state_update::{GlobalStateUpdate, GlobalStateUpdateConfig, GlobalStateUpdateError}; +pub use highway_config::HighwayConfig; +pub use network_config::NetworkConfig; +pub use next_upgrade::NextUpgrade; +pub use protocol_config::ProtocolConfig; +pub use refund_handling::RefundHandling; +pub use transaction_config::{DeployConfig, TransactionConfig, TransactionV1Config}; +#[cfg(any(feature = "testing", test))] +pub use transaction_config::{DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MIN_TRANSFER_MOTES}; +pub use vm_config::{ + AuctionCosts, BrTableCost, ChainspecRegistry, ControlFlowCosts, HandlePaymentCosts, + HostFunction, HostFunctionCost, HostFunctionCosts, MessageLimits, MintCosts, OpcodeCosts, + StandardPaymentCosts, StorageCosts, SystemConfig, UpgradeConfig, WasmConfig, + DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, +}; +#[cfg(any(feature = "testing", test))] +pub use vm_config::{ + DEFAULT_ADD_BID_COST, DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, + DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, + DEFAULT_CONTROL_FLOW_BR_OPCODE, DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, + DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, + DEFAULT_CONTROL_FLOW_CALL_OPCODE, DEFAULT_CONTROL_FLOW_DROP_OPCODE, + DEFAULT_CONTROL_FLOW_ELSE_OPCODE, DEFAULT_CONTROL_FLOW_END_OPCODE, + DEFAULT_CONTROL_FLOW_IF_OPCODE, DEFAULT_CONTROL_FLOW_LOOP_OPCODE, + DEFAULT_CONTROL_FLOW_RETURN_OPCODE, DEFAULT_CONTROL_FLOW_SELECT_OPCODE, + DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, DEFAULT_DELEGATE_COST, DEFAULT_DIV_COST, + DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, DEFAULT_INTEGER_COMPARISON_COST, + DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_MUL_COST, + DEFAULT_NEW_DICTIONARY_COST, DEFAULT_NOP_COST, DEFAULT_STORE_COST, DEFAULT_TRANSFER_COST, + DEFAULT_UNREACHABLE_COST, DEFAULT_WASMLESS_TRANSFER_COST, DEFAULT_WASM_MAX_MEMORY, +}; + +/// A collection of configuration settings describing the state of the system at genesis and after +/// upgrades to basic system functionality occurring after genesis. +#[derive(PartialEq, Eq, Serialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct Chainspec { + /// Protocol config. + #[serde(rename = "protocol")] + pub protocol_config: ProtocolConfig, + + /// Network config. + #[serde(rename = "network")] + pub network_config: NetworkConfig, + + /// Core config. + #[serde(rename = "core")] + pub core_config: CoreConfig, + + /// Highway config. + #[serde(rename = "highway")] + pub highway_config: HighwayConfig, + + /// Transaction Config. + #[serde(rename = "transactions")] + pub transaction_config: TransactionConfig, + + /// Wasm config. + #[serde(rename = "wasm")] + pub wasm_config: WasmConfig, + + /// System costs config. + #[serde(rename = "system_costs")] + pub system_costs_config: SystemConfig, +} + +impl Chainspec { + /// Serializes `self` and hashes the resulting bytes. + pub fn hash(&self) -> Digest { + let serialized_chainspec = self.to_bytes().unwrap_or_else(|error| { + error!(%error, "failed to serialize chainspec"); + vec![] + }); + Digest::hash(serialized_chainspec) + } + + /// Serializes `self` and hashes the resulting bytes, if able. + pub fn try_hash(&self) -> Result { + let arr = self + .to_bytes() + .map_err(|_| "failed to serialize chainspec".to_string())?; + Ok(Digest::hash(arr)) + } + + /// Returns the protocol version of the chainspec. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_config.version + } + + /// Returns the era ID of where we should reset back to. This means stored blocks in that and + /// subsequent eras are deleted from storage. + pub fn hard_reset_to_start_of_era(&self) -> Option { + self.protocol_config + .hard_reset + .then(|| self.protocol_config.activation_point.era_id()) + } + + /// Creates an upgrade config instance from parts. + pub fn upgrade_config_from_parts( + &self, + pre_state_hash: Digest, + current_protocol_version: ProtocolVersion, + era_id: EraId, + chainspec_raw_bytes: Arc, + ) -> Result { + let chainspec_registry = ChainspecRegistry::new_with_optional_global_state( + chainspec_raw_bytes.chainspec_bytes(), + chainspec_raw_bytes.maybe_global_state_bytes(), + ); + let global_state_update = match self.protocol_config.get_update_mapping() { + Ok(global_state_update) => global_state_update, + Err(err) => { + return Err(format!("failed to generate global state update: {}", err)); + } + }; + + Ok(UpgradeConfig::new( + pre_state_hash, + current_protocol_version, + self.protocol_config.version, + Some(era_id), + Some(self.core_config.validator_slots), + Some(self.core_config.auction_delay), + Some(self.core_config.locked_funds_period.millis()), + Some(self.core_config.round_seigniorage_rate), + Some(self.core_config.unbonding_delay), + global_state_update, + chainspec_registry, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Chainspec { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let protocol_config = ProtocolConfig::random(rng); + let network_config = NetworkConfig::random(rng); + let core_config = CoreConfig::random(rng); + let highway_config = HighwayConfig::random(rng); + let transaction_config = TransactionConfig::random(rng); + let wasm_config = rng.gen(); + let system_costs_config = rng.gen(); + + Chainspec { + protocol_config, + network_config, + core_config, + highway_config, + transaction_config, + wasm_config, + system_costs_config, + } + } +} + +impl ToBytes for Chainspec { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.protocol_config.write_bytes(writer)?; + self.network_config.write_bytes(writer)?; + self.core_config.write_bytes(writer)?; + self.highway_config.write_bytes(writer)?; + self.transaction_config.write_bytes(writer)?; + self.wasm_config.write_bytes(writer)?; + self.system_costs_config.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.protocol_config.serialized_length() + + self.network_config.serialized_length() + + self.core_config.serialized_length() + + self.highway_config.serialized_length() + + self.transaction_config.serialized_length() + + self.wasm_config.serialized_length() + + self.system_costs_config.serialized_length() + } +} + +impl FromBytes for Chainspec { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_config, remainder) = ProtocolConfig::from_bytes(bytes)?; + let (network_config, remainder) = NetworkConfig::from_bytes(remainder)?; + let (core_config, remainder) = CoreConfig::from_bytes(remainder)?; + let (highway_config, remainder) = HighwayConfig::from_bytes(remainder)?; + let (transaction_config, remainder) = TransactionConfig::from_bytes(remainder)?; + let (wasm_config, remainder) = WasmConfig::from_bytes(remainder)?; + let (system_costs_config, remainder) = SystemConfig::from_bytes(remainder)?; + let chainspec = Chainspec { + protocol_config, + network_config, + core_config, + highway_config, + transaction_config, + wasm_config, + system_costs_config, + }; + Ok((chainspec, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use rand::SeedableRng; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let chainspec = Chainspec::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&chainspec); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/accounts_config.rs b/casper_types_ver_2_0/src/chainspec/accounts_config.rs new file mode 100644 index 00000000..cffc9e80 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/accounts_config.rs @@ -0,0 +1,192 @@ +//! The accounts config is a set of configuration options that is used to create accounts at +//! genesis, and set up auction contract with validators and delegators. +mod account_config; +mod delegator_config; +mod genesis; +mod validator_config; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Deserializer, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + PublicKey, +}; + +pub use account_config::AccountConfig; +pub use delegator_config::DelegatorConfig; +pub use genesis::{AdministratorAccount, GenesisAccount, GenesisValidator}; +pub use validator_config::ValidatorConfig; + +fn sorted_vec_deserializer<'de, T, D>(deserializer: D) -> Result, D::Error> +where + T: Deserialize<'de> + Ord, + D: Deserializer<'de>, +{ + let mut vec = Vec::::deserialize(deserializer)?; + vec.sort_unstable(); + Ok(vec) +} + +/// Configuration values associated with accounts.toml +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct AccountsConfig { + #[serde(deserialize_with = "sorted_vec_deserializer")] + accounts: Vec, + #[serde(default, deserialize_with = "sorted_vec_deserializer")] + delegators: Vec, + #[serde( + default, + deserialize_with = "sorted_vec_deserializer", + skip_serializing_if = "Vec::is_empty" + )] + administrators: Vec, +} + +impl AccountsConfig { + /// Create new accounts config instance. + pub fn new( + accounts: Vec, + delegators: Vec, + administrators: Vec, + ) -> Self { + Self { + accounts, + delegators, + administrators, + } + } + + /// Accounts. + pub fn accounts(&self) -> &[AccountConfig] { + &self.accounts + } + + /// Delegators. + pub fn delegators(&self) -> &[DelegatorConfig] { + &self.delegators + } + + /// Administrators. + pub fn administrators(&self) -> &[AdministratorAccount] { + &self.administrators + } + + /// Account. + pub fn account(&self, public_key: &PublicKey) -> Option<&AccountConfig> { + self.accounts + .iter() + .find(|account| &account.public_key == public_key) + } + + /// All of the validators. + pub fn validators(&self) -> impl Iterator { + self.accounts + .iter() + .filter(|account| account.validator.is_some()) + } + + /// Is the provided public key in the set of genesis validator public keys. + pub fn is_genesis_validator(&self, public_key: &PublicKey) -> bool { + match self.account(public_key) { + None => false, + Some(account_config) => account_config.is_genesis_validator(), + } + } + + #[cfg(any(feature = "testing", test))] + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + use crate::{Motes, U512}; + + let alpha = AccountConfig::random(rng); + let accounts = vec![ + alpha.clone(), + AccountConfig::random(rng), + AccountConfig::random(rng), + AccountConfig::random(rng), + ]; + + let mut delegator = DelegatorConfig::random(rng); + delegator.validator_public_key = alpha.public_key; + + let delegators = vec![delegator]; + + let admin_balance: u32 = rng.gen(); + let administrators = vec![AdministratorAccount::new( + PublicKey::random(rng), + Motes::new(U512::from(admin_balance)), + )]; + + AccountsConfig { + accounts, + delegators, + administrators, + } + } +} + +impl ToBytes for AccountsConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.accounts.to_bytes()?); + buffer.extend(self.delegators.to_bytes()?); + buffer.extend(self.administrators.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.accounts.serialized_length() + + self.delegators.serialized_length() + + self.administrators.serialized_length() + } +} + +impl FromBytes for AccountsConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (accounts, remainder) = FromBytes::from_bytes(bytes)?; + let (delegators, remainder) = FromBytes::from_bytes(remainder)?; + let (administrators, remainder) = FromBytes::from_bytes(remainder)?; + let accounts_config = AccountsConfig::new(accounts, delegators, administrators); + Ok((accounts_config, remainder)) + } +} + +impl From for Vec { + fn from(accounts_config: AccountsConfig) -> Self { + let mut genesis_accounts = Vec::with_capacity(accounts_config.accounts.len()); + for account_config in accounts_config.accounts { + let genesis_account = account_config.into(); + genesis_accounts.push(genesis_account); + } + for delegator_config in accounts_config.delegators { + let genesis_account = delegator_config.into(); + genesis_accounts.push(genesis_account); + } + + for administrator_config in accounts_config.administrators { + let administrator_account = administrator_config.into(); + genesis_accounts.push(administrator_account); + } + + genesis_accounts + } +} + +#[cfg(any(feature = "testing", test))] +mod tests { + #[cfg(test)] + use crate::{bytesrepr, testing::TestRng, AccountsConfig}; + + #[test] + fn serialization_roundtrip() { + let mut rng = TestRng::new(); + let accounts_config = AccountsConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&accounts_config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/accounts_config/account_config.rs b/casper_types_ver_2_0/src/chainspec/accounts_config/account_config.rs new file mode 100644 index 00000000..7c998d35 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/accounts_config/account_config.rs @@ -0,0 +1,138 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::Zero; + +#[cfg(any(feature = "testing", test))] +use rand::{distributions::Standard, prelude::*}; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + GenesisAccount, Motes, PublicKey, +}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(any(feature = "testing", test))] +use crate::{SecretKey, U512}; + +use super::ValidatorConfig; + +/// Configuration of an individial account in accounts.toml +#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct AccountConfig { + /// Public Key. + pub public_key: PublicKey, + /// Balance. + pub balance: Motes, + /// Validator config. + pub validator: Option, +} + +impl AccountConfig { + /// Creates a new `AccountConfig`. + pub fn new(public_key: PublicKey, balance: Motes, validator: Option) -> Self { + Self { + public_key, + balance, + validator, + } + } + + /// Public key. + pub fn public_key(&self) -> PublicKey { + self.public_key.clone() + } + + /// Balance. + pub fn balance(&self) -> Motes { + self.balance + } + + /// Bonded amount. + pub fn bonded_amount(&self) -> Motes { + match self.validator { + Some(validator_config) => validator_config.bonded_amount(), + None => Motes::zero(), + } + } + + /// Is this a genesis validator? + pub fn is_genesis_validator(&self) -> bool { + self.validator.is_some() + } + + #[cfg(any(feature = "testing", test))] + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let public_key = + PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); + let balance = Motes::new(rng.gen()); + let validator = rng.gen(); + + AccountConfig { + public_key, + balance, + validator, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AccountConfig { + let secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); + let public_key = PublicKey::from(&secret_key); + + let mut u512_array = [0u8; 64]; + rng.fill_bytes(u512_array.as_mut()); + let balance = Motes::new(U512::from(u512_array)); + + let validator = rng.gen(); + + AccountConfig::new(public_key, balance, validator) + } +} + +impl ToBytes for AccountConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.public_key.to_bytes()?); + buffer.extend(self.balance.to_bytes()?); + buffer.extend(self.validator.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.public_key.serialized_length() + + self.balance.serialized_length() + + self.validator.serialized_length() + } +} + +impl FromBytes for AccountConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let (validator, remainder) = FromBytes::from_bytes(remainder)?; + let account_config = AccountConfig { + public_key, + balance, + validator, + }; + Ok((account_config, remainder)) + } +} + +impl From for GenesisAccount { + fn from(account_config: AccountConfig) -> Self { + let genesis_validator = account_config.validator.map(Into::into); + GenesisAccount::account( + account_config.public_key, + account_config.balance, + genesis_validator, + ) + } +} diff --git a/casper_types_ver_2_0/src/chainspec/accounts_config/delegator_config.rs b/casper_types_ver_2_0/src/chainspec/accounts_config/delegator_config.rs new file mode 100644 index 00000000..b91422b5 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/accounts_config/delegator_config.rs @@ -0,0 +1,133 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{distributions::Standard, prelude::*}; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + GenesisAccount, Motes, PublicKey, +}; +#[cfg(any(feature = "testing", test))] +use crate::{SecretKey, U512}; + +/// Configuration values related to a delegator. +#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct DelegatorConfig { + /// Validator public key. + pub validator_public_key: PublicKey, + /// Delegator public key. + pub delegator_public_key: PublicKey, + /// Balance for this delegator in Motes. + pub balance: Motes, + /// Delegated amount in Motes. + pub delegated_amount: Motes, +} + +impl DelegatorConfig { + /// Creates a new DelegatorConfig. + pub fn new( + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + balance: Motes, + delegated_amount: Motes, + ) -> Self { + Self { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } + } + + #[cfg(any(feature = "testing", test))] + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let validator_public_key = + PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); + let delegator_public_key = + PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); + let balance = Motes::new(U512::from(rng.gen::())); + let delegated_amount = Motes::new(U512::from(rng.gen::())); + + DelegatorConfig { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> DelegatorConfig { + let validator_secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); + let delegator_secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); + + let validator_public_key = PublicKey::from(&validator_secret_key); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + + let mut u512_array = [0u8; 64]; + rng.fill_bytes(u512_array.as_mut()); + let balance = Motes::new(U512::from(u512_array)); + + rng.fill_bytes(u512_array.as_mut()); + let delegated_amount = Motes::new(U512::from(u512_array)); + + DelegatorConfig::new( + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + ) + } +} + +impl ToBytes for DelegatorConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.validator_public_key.to_bytes()?); + buffer.extend(self.delegator_public_key.to_bytes()?); + buffer.extend(self.balance.to_bytes()?); + buffer.extend(self.delegated_amount.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.delegator_public_key.serialized_length() + + self.balance.serialized_length() + + self.delegated_amount.serialized_length() + } +} + +impl FromBytes for DelegatorConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let (delegated_amount, remainder) = FromBytes::from_bytes(remainder)?; + let delegator_config = DelegatorConfig { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + }; + Ok((delegator_config, remainder)) + } +} + +impl From for GenesisAccount { + fn from(delegator_config: DelegatorConfig) -> Self { + GenesisAccount::delegator( + delegator_config.validator_public_key, + delegator_config.delegator_public_key, + delegator_config.balance, + delegator_config.delegated_amount, + ) + } +} diff --git a/casper_types_ver_2_0/src/chainspec/accounts_config/genesis.rs b/casper_types_ver_2_0/src/chainspec/accounts_config/genesis.rs new file mode 100644 index 00000000..08d601ee --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/accounts_config/genesis.rs @@ -0,0 +1,497 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num_traits::Zero; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::AccountHash, + bytesrepr, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + system::auction::DelegationRate, + Motes, PublicKey, SecretKey, +}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +#[repr(u8)] +enum GenesisAccountTag { + System = 0, + Account = 1, + Delegator = 2, + Administrator = 3, +} + +/// Represents details about genesis account's validator status. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct GenesisValidator { + /// Stake of a genesis validator. + bonded_amount: Motes, + /// Delegation rate in the range of 0-100. + delegation_rate: DelegationRate, +} + +impl ToBytes for GenesisValidator { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.bonded_amount.to_bytes()?); + buffer.extend(self.delegation_rate.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.bonded_amount.serialized_length() + self.delegation_rate.serialized_length() + } +} + +impl FromBytes for GenesisValidator { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonded_amount, remainder) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?; + let genesis_validator = GenesisValidator { + bonded_amount, + delegation_rate, + }; + Ok((genesis_validator, remainder)) + } +} + +impl GenesisValidator { + /// Creates new [`GenesisValidator`]. + pub fn new(bonded_amount: Motes, delegation_rate: DelegationRate) -> Self { + Self { + bonded_amount, + delegation_rate, + } + } + + /// Returns the bonded amount of a genesis validator. + pub fn bonded_amount(&self) -> Motes { + self.bonded_amount + } + + /// Returns the delegation rate of a genesis validator. + pub fn delegation_rate(&self) -> DelegationRate { + self.delegation_rate + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisValidator { + let bonded_amount = Motes::new(rng.gen()); + let delegation_rate = rng.gen(); + + GenesisValidator::new(bonded_amount, delegation_rate) + } +} + +/// Special account in the system that is useful only for some private chains. +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct AdministratorAccount { + public_key: PublicKey, + balance: Motes, +} + +impl AdministratorAccount { + /// Creates new special account. + pub fn new(public_key: PublicKey, balance: Motes) -> Self { + Self { + public_key, + balance, + } + } + + /// Gets a reference to the administrator account's public key. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } +} + +impl ToBytes for AdministratorAccount { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let AdministratorAccount { + public_key, + balance, + } = self; + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(public_key.to_bytes()?); + buffer.extend(balance.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let AdministratorAccount { + public_key, + balance, + } = self; + public_key.serialized_length() + balance.serialized_length() + } +} + +impl FromBytes for AdministratorAccount { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let administrator_account = AdministratorAccount { + public_key, + balance, + }; + Ok((administrator_account, remainder)) + } +} + +/// This enum represents possible states of a genesis account. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum GenesisAccount { + /// This variant is for internal use only - genesis process will create a virtual system + /// account and use it to call system contracts. + System, + /// Genesis account that will be created. + Account { + /// Public key of a genesis account. + public_key: PublicKey, + /// Starting balance of a genesis account. + balance: Motes, + /// If set, it will make this account a genesis validator. + validator: Option, + }, + /// The genesis delegator is a special account that will be created as a delegator. + /// It does not have any stake of its own, but will create a real account in the system + /// which will delegate to a genesis validator. + Delegator { + /// Validator's public key that has to refer to other instance of + /// [`GenesisAccount::Account`] with a `validator` field set. + validator_public_key: PublicKey, + /// Public key of the genesis account that will be created as part of this entry. + delegator_public_key: PublicKey, + /// Starting balance of the account. + balance: Motes, + /// Delegated amount for given `validator_public_key`. + delegated_amount: Motes, + }, + /// An administrative account in the genesis process. + /// + /// This variant makes sense for some private chains. + Administrator(AdministratorAccount), +} + +impl From for GenesisAccount { + fn from(v: AdministratorAccount) -> Self { + Self::Administrator(v) + } +} + +impl GenesisAccount { + /// Create a system account variant. + pub fn system() -> Self { + Self::System + } + + /// Create a standard account variant. + pub fn account( + public_key: PublicKey, + balance: Motes, + validator: Option, + ) -> Self { + Self::Account { + public_key, + balance, + validator, + } + } + + /// Create a delegator account variant. + pub fn delegator( + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + balance: Motes, + delegated_amount: Motes, + ) -> Self { + Self::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } + } + + /// The public key (if any) associated with the account. + pub fn public_key(&self) -> PublicKey { + match self { + GenesisAccount::System => PublicKey::System, + GenesisAccount::Account { public_key, .. } => public_key.clone(), + GenesisAccount::Delegator { + delegator_public_key, + .. + } => delegator_public_key.clone(), + GenesisAccount::Administrator(AdministratorAccount { public_key, .. }) => { + public_key.clone() + } + } + } + + /// The account hash for the account. + pub fn account_hash(&self) -> AccountHash { + match self { + GenesisAccount::System => PublicKey::System.to_account_hash(), + GenesisAccount::Account { public_key, .. } => public_key.to_account_hash(), + GenesisAccount::Delegator { + delegator_public_key, + .. + } => delegator_public_key.to_account_hash(), + GenesisAccount::Administrator(AdministratorAccount { public_key, .. }) => { + public_key.to_account_hash() + } + } + } + + /// How many motes are to be deposited in the account's main purse. + pub fn balance(&self) -> Motes { + match self { + GenesisAccount::System => Motes::zero(), + GenesisAccount::Account { balance, .. } => *balance, + GenesisAccount::Delegator { balance, .. } => *balance, + GenesisAccount::Administrator(AdministratorAccount { balance, .. }) => *balance, + } + } + + /// How many motes are to be staked. + /// + /// Staked accounts are either validators with some amount of bonded stake or delgators with + /// some amount of delegated stake. + pub fn staked_amount(&self) -> Motes { + match self { + GenesisAccount::System { .. } + | GenesisAccount::Account { + validator: None, .. + } => Motes::zero(), + GenesisAccount::Account { + validator: Some(genesis_validator), + .. + } => genesis_validator.bonded_amount(), + GenesisAccount::Delegator { + delegated_amount, .. + } => *delegated_amount, + GenesisAccount::Administrator(AdministratorAccount { + public_key: _, + balance: _, + }) => { + // This is defaulted to zero because administrator accounts are filtered out before + // validator set is created at the genesis. + Motes::zero() + } + } + } + + /// What is the delegation rate of a validator. + pub fn delegation_rate(&self) -> DelegationRate { + match self { + GenesisAccount::Account { + validator: Some(genesis_validator), + .. + } => genesis_validator.delegation_rate(), + GenesisAccount::System + | GenesisAccount::Account { + validator: None, .. + } + | GenesisAccount::Delegator { .. } => { + // This value represents a delegation rate in invalid state that system is supposed + // to reject if used. + DelegationRate::max_value() + } + GenesisAccount::Administrator(AdministratorAccount { .. }) => { + DelegationRate::max_value() + } + } + } + + /// Is this a virtual system account. + pub fn is_system_account(&self) -> bool { + matches!(self, GenesisAccount::System { .. }) + } + + /// Is this a validator account. + pub fn is_validator(&self) -> bool { + match self { + GenesisAccount::Account { + validator: Some(_), .. + } => true, + GenesisAccount::System { .. } + | GenesisAccount::Account { + validator: None, .. + } + | GenesisAccount::Delegator { .. } + | GenesisAccount::Administrator(AdministratorAccount { .. }) => false, + } + } + + /// Details about the genesis validator. + pub fn validator(&self) -> Option<&GenesisValidator> { + match self { + GenesisAccount::Account { + validator: Some(genesis_validator), + .. + } => Some(genesis_validator), + _ => None, + } + } + + /// Is this a delegator account. + pub fn is_delegator(&self) -> bool { + matches!(self, GenesisAccount::Delegator { .. }) + } + + /// Details about the genesis delegator. + pub fn as_delegator(&self) -> Option<(&PublicKey, &PublicKey, &Motes, &Motes)> { + match self { + GenesisAccount::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } => Some(( + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + )), + _ => None, + } + } + + /// Gets the administrator account variant. + pub fn as_administrator_account(&self) -> Option<&AdministratorAccount> { + if let Self::Administrator(v) = self { + Some(v) + } else { + None + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisAccount { + let mut bytes = [0u8; 32]; + rng.fill_bytes(&mut bytes[..]); + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let public_key = PublicKey::from(&secret_key); + let balance = Motes::new(rng.gen()); + let validator = rng.gen(); + + GenesisAccount::account(public_key, balance, validator) + } +} + +impl ToBytes for GenesisAccount { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + GenesisAccount::System => { + buffer.push(GenesisAccountTag::System as u8); + } + GenesisAccount::Account { + public_key, + balance, + validator, + } => { + buffer.push(GenesisAccountTag::Account as u8); + buffer.extend(public_key.to_bytes()?); + buffer.extend(balance.value().to_bytes()?); + buffer.extend(validator.to_bytes()?); + } + GenesisAccount::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } => { + buffer.push(GenesisAccountTag::Delegator as u8); + buffer.extend(validator_public_key.to_bytes()?); + buffer.extend(delegator_public_key.to_bytes()?); + buffer.extend(balance.value().to_bytes()?); + buffer.extend(delegated_amount.value().to_bytes()?); + } + GenesisAccount::Administrator(administrator_account) => { + buffer.push(GenesisAccountTag::Administrator as u8); + buffer.extend(administrator_account.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + match self { + GenesisAccount::System => TAG_LENGTH, + GenesisAccount::Account { + public_key, + balance, + validator, + } => { + public_key.serialized_length() + + balance.value().serialized_length() + + validator.serialized_length() + + TAG_LENGTH + } + GenesisAccount::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } => { + validator_public_key.serialized_length() + + delegator_public_key.serialized_length() + + balance.value().serialized_length() + + delegated_amount.value().serialized_length() + + TAG_LENGTH + } + GenesisAccount::Administrator(administrator_account) => { + administrator_account.serialized_length() + TAG_LENGTH + } + } + } +} + +impl FromBytes for GenesisAccount { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == GenesisAccountTag::System as u8 => { + let genesis_account = GenesisAccount::system(); + Ok((genesis_account, remainder)) + } + tag if tag == GenesisAccountTag::Account as u8 => { + let (public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let (validator, remainder) = FromBytes::from_bytes(remainder)?; + let genesis_account = GenesisAccount::account(public_key, balance, validator); + Ok((genesis_account, remainder)) + } + tag if tag == GenesisAccountTag::Delegator as u8 => { + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let (delegated_amount_value, remainder) = FromBytes::from_bytes(remainder)?; + let genesis_account = GenesisAccount::delegator( + validator_public_key, + delegator_public_key, + balance, + Motes::new(delegated_amount_value), + ); + Ok((genesis_account, remainder)) + } + tag if tag == GenesisAccountTag::Administrator as u8 => { + let (administrator_account, remainder) = + AdministratorAccount::from_bytes(remainder)?; + let genesis_account = GenesisAccount::Administrator(administrator_account); + Ok((genesis_account, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/accounts_config/validator_config.rs b/casper_types_ver_2_0/src/chainspec/accounts_config/validator_config.rs new file mode 100644 index 00000000..588faa49 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/accounts_config/validator_config.rs @@ -0,0 +1,102 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::Zero; +#[cfg(any(feature = "testing", test))] +use rand::{distributions::Standard, prelude::*}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::DelegationRate, + GenesisValidator, Motes, +}; +#[cfg(any(feature = "testing", test))] +use crate::{testing::TestRng, U512}; + +/// Validator account configuration. +#[derive(PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Debug, Copy, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ValidatorConfig { + bonded_amount: Motes, + #[serde(default = "DelegationRate::zero")] + delegation_rate: DelegationRate, +} + +impl ValidatorConfig { + /// Creates a new `ValidatorConfig`. + pub fn new(bonded_amount: Motes, delegation_rate: DelegationRate) -> Self { + Self { + bonded_amount, + delegation_rate, + } + } + + /// Delegation rate. + pub fn delegation_rate(&self) -> DelegationRate { + self.delegation_rate + } + + /// Bonded amount. + pub fn bonded_amount(&self) -> Motes { + self.bonded_amount + } + + /// Returns a random `ValidatorConfig`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let bonded_amount = Motes::new(U512::from(rng.gen::())); + let delegation_rate = rng.gen(); + + ValidatorConfig { + bonded_amount, + delegation_rate, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ValidatorConfig { + let mut u512_array = [0; 64]; + rng.fill_bytes(u512_array.as_mut()); + let bonded_amount = Motes::new(U512::from(u512_array)); + + let delegation_rate = rng.gen(); + + ValidatorConfig::new(bonded_amount, delegation_rate) + } +} + +impl ToBytes for ValidatorConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.bonded_amount.to_bytes()?); + buffer.extend(self.delegation_rate.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.bonded_amount.serialized_length() + self.delegation_rate.serialized_length() + } +} + +impl FromBytes for ValidatorConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonded_amount, remainder) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?; + let account_config = ValidatorConfig { + bonded_amount, + delegation_rate, + }; + Ok((account_config, remainder)) + } +} + +impl From for GenesisValidator { + fn from(account_config: ValidatorConfig) -> Self { + GenesisValidator::new( + account_config.bonded_amount(), + account_config.delegation_rate, + ) + } +} diff --git a/casper_types_ver_2_0/src/chainspec/activation_point.rs b/casper_types_ver_2_0/src/chainspec/activation_point.rs new file mode 100644 index 00000000..1410adea --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/activation_point.rs @@ -0,0 +1,121 @@ +use std::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + EraId, Timestamp, +}; + +const ERA_ID_TAG: u8 = 0; +const GENESIS_TAG: u8 = 1; + +/// The first era to which the associated protocol version applies. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(untagged)] +pub enum ActivationPoint { + /// Era id. + EraId(EraId), + /// Genesis timestamp. + Genesis(Timestamp), +} + +impl ActivationPoint { + /// Returns whether we should upgrade the node due to the next era being the upgrade activation + /// point. + pub fn should_upgrade(&self, era_being_deactivated: &EraId) -> bool { + match self { + ActivationPoint::EraId(era_id) => era_being_deactivated.successor() >= *era_id, + ActivationPoint::Genesis(_) => false, + } + } + + /// Returns the Era ID if `self` is of `EraId` variant, or else 0 if `Genesis`. + pub fn era_id(&self) -> EraId { + match self { + ActivationPoint::EraId(era_id) => *era_id, + ActivationPoint::Genesis(_) => EraId::from(0), + } + } + + /// Returns the timestamp if `self` is of `Genesis` variant, or else `None`. + pub fn genesis_timestamp(&self) -> Option { + match self { + ActivationPoint::EraId(_) => None, + ActivationPoint::Genesis(timestamp) => Some(*timestamp), + } + } + + /// Returns a random `ActivationPoint`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + ActivationPoint::EraId(EraId::random(rng)) + } else { + ActivationPoint::Genesis(Timestamp::random(rng)) + } + } +} + +impl Display for ActivationPoint { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + ActivationPoint::EraId(era_id) => write!(formatter, "activation point {}", era_id), + ActivationPoint::Genesis(timestamp) => { + write!(formatter, "activation point {}", timestamp) + } + } + } +} + +impl ToBytes for ActivationPoint { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + match self { + ActivationPoint::EraId(era_id) => { + let mut buffer = vec![ERA_ID_TAG]; + buffer.extend(era_id.to_bytes()?); + Ok(buffer) + } + ActivationPoint::Genesis(timestamp) => { + let mut buffer = vec![GENESIS_TAG]; + buffer.extend(timestamp.to_bytes()?); + Ok(buffer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ActivationPoint::EraId(era_id) => era_id.serialized_length(), + ActivationPoint::Genesis(timestamp) => timestamp.serialized_length(), + } + } +} + +impl FromBytes for ActivationPoint { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + ERA_ID_TAG => { + let (era_id, remainder) = EraId::from_bytes(remainder)?; + Ok((ActivationPoint::EraId(era_id), remainder)) + } + GENESIS_TAG => { + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + Ok((ActivationPoint::Genesis(timestamp), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/chainspec_raw_bytes.rs b/casper_types_ver_2_0/src/chainspec/chainspec_raw_bytes.rs new file mode 100644 index 00000000..37c8347d --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/chainspec_raw_bytes.rs @@ -0,0 +1,196 @@ +use core::fmt::{self, Debug, Display, Formatter}; + +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct ChainspecRawBytes { + /// Raw bytes of the current chainspec.toml file. + chainspec_bytes: Bytes, + /// Raw bytes of the current genesis accounts.toml file. + maybe_genesis_accounts_bytes: Option, + /// Raw bytes of the current global_state.toml file. + maybe_global_state_bytes: Option, +} + +impl ChainspecRawBytes { + /// Create an instance from parts. + pub fn new( + chainspec_bytes: Bytes, + maybe_genesis_accounts_bytes: Option, + maybe_global_state_bytes: Option, + ) -> Self { + ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } + } + + /// The bytes of the chainspec file. + pub fn chainspec_bytes(&self) -> &[u8] { + self.chainspec_bytes.as_slice() + } + + /// The bytes of global state account entries, when present for a protocol version. + pub fn maybe_genesis_accounts_bytes(&self) -> Option<&[u8]> { + match self.maybe_genesis_accounts_bytes.as_ref() { + Some(bytes) => Some(bytes.as_slice()), + None => None, + } + } + + /// The bytes of global state update entries, when present for a protocol version. + pub fn maybe_global_state_bytes(&self) -> Option<&[u8]> { + match self.maybe_global_state_bytes.as_ref() { + Some(bytes) => Some(bytes.as_slice()), + None => None, + } + } + + /// Returns a random `ChainspecRawBytes`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + let chainspec_bytes = Bytes::from(rng.random_vec(0..1024)); + let maybe_genesis_accounts_bytes = rng + .gen::() + .then(|| Bytes::from(rng.random_vec(0..1024))); + let maybe_global_state_bytes = rng + .gen::() + .then(|| Bytes::from(rng.random_vec(0..1024))); + ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } + } +} + +impl Debug for ChainspecRawBytes { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let genesis_accounts_bytes_owned: Bytes; + let global_state_bytes_owned: Bytes; + f.debug_struct("ChainspecRawBytes") + .field( + "chainspec_bytes", + &self.chainspec_bytes[0..16].to_ascii_uppercase(), + ) + .field( + "maybe_genesis_accounts_bytes", + match self.maybe_genesis_accounts_bytes.as_ref() { + Some(genesis_accounts_bytes) => { + genesis_accounts_bytes_owned = + genesis_accounts_bytes[0..16].to_ascii_uppercase().into(); + &genesis_accounts_bytes_owned + } + None => &self.maybe_genesis_accounts_bytes, + }, + ) + .field( + "maybe_global_state_bytes", + match self.maybe_global_state_bytes.as_ref() { + Some(global_state_bytes) => { + global_state_bytes_owned = + global_state_bytes[0..16].to_ascii_uppercase().into(); + &global_state_bytes_owned + } + None => &self.maybe_global_state_bytes, + }, + ) + .finish() + } +} + +impl Display for ChainspecRawBytes { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "{}", + String::from_utf8_lossy(&self.chainspec_bytes) + )?; + if let Some(genesis_accounts_bytes) = &self.maybe_genesis_accounts_bytes { + write!( + formatter, + "{}", + String::from_utf8_lossy(genesis_accounts_bytes) + )?; + } + if let Some(global_state_bytes) = &self.maybe_global_state_bytes { + write!(formatter, "{}", String::from_utf8_lossy(global_state_bytes))?; + } + Ok(()) + } +} + +impl ToBytes for ChainspecRawBytes { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } = self; + + chainspec_bytes.write_bytes(writer)?; + maybe_genesis_accounts_bytes.write_bytes(writer)?; + maybe_global_state_bytes.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + let ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } = self; + chainspec_bytes.serialized_length() + + maybe_genesis_accounts_bytes.serialized_length() + + maybe_global_state_bytes.serialized_length() + } +} + +impl FromBytes for ChainspecRawBytes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (chainspec_bytes, remainder) = FromBytes::from_bytes(bytes)?; + let (maybe_genesis_accounts_bytes, remainder) = FromBytes::from_bytes(remainder)?; + let (maybe_global_state_bytes, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = ChainspecRawBytes::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/core_config.rs b/casper_types_ver_2_0/src/chainspec/core_config.rs new file mode 100644 index 00000000..8f5b5821 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/core_config.rs @@ -0,0 +1,538 @@ +use alloc::collections::BTreeSet; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::rational::Ratio; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use serde::{ + de::{Deserializer, Error as DeError}, + Deserialize, Serialize, Serializer, +}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ProtocolVersion, PublicKey, TimeDiff, +}; + +use super::{fee_handling::FeeHandling, refund_handling::RefundHandling}; + +/// Configuration values associated with the core protocol. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct CoreConfig { + /// Duration of an era. + pub era_duration: TimeDiff, + + /// Minimum era height. + pub minimum_era_height: u64, + + /// Minimum block time. + pub minimum_block_time: TimeDiff, + + /// Validator slots. + pub validator_slots: u32, + + /// Finality threshold fraction. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub finality_threshold_fraction: Ratio, + + /// Protocol version from which nodes are required to hold strict finality signatures. + pub start_protocol_version_with_strict_finality_signatures_required: ProtocolVersion, + + /// Which finality is required for legacy blocks. + /// Used to determine finality sufficiency for new joiners syncing blocks created + /// in a protocol version before + /// `start_protocol_version_with_strict_finality_signatures_required`. + pub legacy_required_finality: LegacyRequiredFinality, + + /// Number of eras before an auction actually defines the set of validators. + /// If you bond with a sufficient bid in era N, you will be a validator in era N + + /// auction_delay + 1 + pub auction_delay: u64, + + /// The period after genesis during which a genesis validator's bid is locked. + pub locked_funds_period: TimeDiff, + + /// The period in which genesis validator's bid is released over time after it's unlocked. + pub vesting_schedule_period: TimeDiff, + + /// The delay in number of eras for paying out the unbonding amount. + pub unbonding_delay: u64, + + /// Round seigniorage rate represented as a fractional number. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub round_seigniorage_rate: Ratio, + + /// Maximum number of associated keys for a single account. + pub max_associated_keys: u32, + + /// Maximum height of contract runtime call stack. + pub max_runtime_call_stack_height: u32, + + /// The minimum bound of motes that can be delegated to a validator. + pub minimum_delegation_amount: u64, + + /// Global state prune batch size (0 means the feature is off in the current protocol version). + pub prune_batch_size: u64, + + /// Enables strict arguments checking when calling a contract. + pub strict_argument_checking: bool, + + /// How many peers to simultaneously ask when sync leaping. + pub simultaneous_peer_requests: u8, + + /// Which consensus protocol to use. + pub consensus_protocol: ConsensusProtocolName, + + /// The maximum amount of delegators per validator. + /// if the value is 0, there is no maximum capacity. + pub max_delegators_per_validator: u32, + + /// The split in finality signature rewards between block producer and participating signers. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub finders_fee: Ratio, + + /// The proportion of baseline rewards going to reward finality signatures specifically. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub finality_signature_proportion: Ratio, + + /// Lookback interval indicating which past block we are looking at to reward. + pub signature_rewards_max_delay: u64, + /// Auction entrypoints such as "add_bid" or "delegate" are disabled if this flag is set to + /// `false`. Setting up this option makes sense only for private chains where validator set + /// rotation is unnecessary. + pub allow_auction_bids: bool, + /// Allows unrestricted transfers between users. + pub allow_unrestricted_transfers: bool, + /// If set to false then consensus doesn't compute rewards and always uses 0. + pub compute_rewards: bool, + /// Administrative accounts are a valid option for a private chain only. + #[serde(default, skip_serializing_if = "BTreeSet::is_empty")] + pub administrators: BTreeSet, + /// Refund handling. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub refund_handling: RefundHandling, + /// Fee handling. + pub fee_handling: FeeHandling, +} + +impl CoreConfig { + /// The number of eras that have already started and whose validators are still bonded. + pub fn recent_era_count(&self) -> u64 { + // Safe to use naked `-` operation assuming `CoreConfig::is_valid()` has been checked. + self.unbonding_delay - self.auction_delay + } + + /// The proportion of the total rewards going to block production. + pub fn production_rewards_proportion(&self) -> Ratio { + Ratio::new(1, 1) - self.finality_signature_proportion + } + + /// The finder's fee, *i.e.* the proportion of the total rewards going to the validator + /// collecting the finality signatures which is the validator producing the block. + pub fn collection_rewards_proportion(&self) -> Ratio { + self.finders_fee * self.finality_signature_proportion + } + + /// The proportion of the total rewards going to finality signatures collection. + pub fn contribution_rewards_proportion(&self) -> Ratio { + (Ratio::new(1, 1) - self.finders_fee) * self.finality_signature_proportion + } +} + +#[cfg(any(feature = "testing", test))] +impl CoreConfig { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let era_duration = TimeDiff::from_seconds(rng.gen_range(600..604_800)); + let minimum_era_height = rng.gen_range(5..100); + let minimum_block_time = TimeDiff::from_seconds(rng.gen_range(1..60)); + let validator_slots = rng.gen_range(1..10_000); + let finality_threshold_fraction = Ratio::new(rng.gen_range(1..100), 100); + let start_protocol_version_with_strict_finality_signatures_required = + ProtocolVersion::from_parts(1, rng.gen_range(5..10), rng.gen_range(0..100)); + let legacy_required_finality = rng.gen(); + let auction_delay = rng.gen_range(1..5); + let locked_funds_period = TimeDiff::from_seconds(rng.gen_range(600..604_800)); + let vesting_schedule_period = TimeDiff::from_seconds(rng.gen_range(600..604_800)); + let unbonding_delay = rng.gen_range((auction_delay + 1)..1_000_000_000); + let round_seigniorage_rate = Ratio::new( + rng.gen_range(1..1_000_000_000), + rng.gen_range(1..1_000_000_000), + ); + let max_associated_keys = rng.gen(); + let max_runtime_call_stack_height = rng.gen(); + let minimum_delegation_amount = rng.gen::() as u64; + let prune_batch_size = rng.gen_range(0..100); + let strict_argument_checking = rng.gen(); + let simultaneous_peer_requests = rng.gen_range(3..100); + let consensus_protocol = rng.gen(); + let finders_fee = Ratio::new(rng.gen_range(1..100), 100); + let finality_signature_proportion = Ratio::new(rng.gen_range(1..100), 100); + let signature_rewards_max_delay = rng.gen_range(1..10); + let allow_auction_bids = rng.gen(); + let allow_unrestricted_transfers = rng.gen(); + let compute_rewards = rng.gen(); + let administrators = (0..rng.gen_range(0..=10u32)) + .map(|_| PublicKey::random(rng)) + .collect(); + let refund_handling = { + let numer = rng.gen_range(0..=100); + let refund_ratio = Ratio::new(numer, 100); + RefundHandling::Refund { refund_ratio } + }; + + let fee_handling = if rng.gen() { + FeeHandling::PayToProposer + } else { + FeeHandling::Accumulate + }; + + CoreConfig { + era_duration, + minimum_era_height, + minimum_block_time, + validator_slots, + finality_threshold_fraction, + start_protocol_version_with_strict_finality_signatures_required, + legacy_required_finality, + auction_delay, + locked_funds_period, + vesting_schedule_period, + unbonding_delay, + round_seigniorage_rate, + max_associated_keys, + max_runtime_call_stack_height, + minimum_delegation_amount, + prune_batch_size, + strict_argument_checking, + simultaneous_peer_requests, + consensus_protocol, + max_delegators_per_validator: 0, + finders_fee, + finality_signature_proportion, + signature_rewards_max_delay, + allow_auction_bids, + administrators, + allow_unrestricted_transfers, + compute_rewards, + refund_handling, + fee_handling, + } + } +} + +impl ToBytes for CoreConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.era_duration.to_bytes()?); + buffer.extend(self.minimum_era_height.to_bytes()?); + buffer.extend(self.minimum_block_time.to_bytes()?); + buffer.extend(self.validator_slots.to_bytes()?); + buffer.extend(self.finality_threshold_fraction.to_bytes()?); + buffer.extend( + self.start_protocol_version_with_strict_finality_signatures_required + .to_bytes()?, + ); + buffer.extend(self.legacy_required_finality.to_bytes()?); + buffer.extend(self.auction_delay.to_bytes()?); + buffer.extend(self.locked_funds_period.to_bytes()?); + buffer.extend(self.vesting_schedule_period.to_bytes()?); + buffer.extend(self.unbonding_delay.to_bytes()?); + buffer.extend(self.round_seigniorage_rate.to_bytes()?); + buffer.extend(self.max_associated_keys.to_bytes()?); + buffer.extend(self.max_runtime_call_stack_height.to_bytes()?); + buffer.extend(self.minimum_delegation_amount.to_bytes()?); + buffer.extend(self.prune_batch_size.to_bytes()?); + buffer.extend(self.strict_argument_checking.to_bytes()?); + buffer.extend(self.simultaneous_peer_requests.to_bytes()?); + buffer.extend(self.consensus_protocol.to_bytes()?); + buffer.extend(self.max_delegators_per_validator.to_bytes()?); + buffer.extend(self.finders_fee.to_bytes()?); + buffer.extend(self.finality_signature_proportion.to_bytes()?); + buffer.extend(self.signature_rewards_max_delay.to_bytes()?); + buffer.extend(self.allow_auction_bids.to_bytes()?); + buffer.extend(self.allow_unrestricted_transfers.to_bytes()?); + buffer.extend(self.compute_rewards.to_bytes()?); + buffer.extend(self.administrators.to_bytes()?); + buffer.extend(self.refund_handling.to_bytes()?); + buffer.extend(self.fee_handling.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.era_duration.serialized_length() + + self.minimum_era_height.serialized_length() + + self.minimum_block_time.serialized_length() + + self.validator_slots.serialized_length() + + self.finality_threshold_fraction.serialized_length() + + self + .start_protocol_version_with_strict_finality_signatures_required + .serialized_length() + + self.legacy_required_finality.serialized_length() + + self.auction_delay.serialized_length() + + self.locked_funds_period.serialized_length() + + self.vesting_schedule_period.serialized_length() + + self.unbonding_delay.serialized_length() + + self.round_seigniorage_rate.serialized_length() + + self.max_associated_keys.serialized_length() + + self.max_runtime_call_stack_height.serialized_length() + + self.minimum_delegation_amount.serialized_length() + + self.prune_batch_size.serialized_length() + + self.strict_argument_checking.serialized_length() + + self.simultaneous_peer_requests.serialized_length() + + self.consensus_protocol.serialized_length() + + self.max_delegators_per_validator.serialized_length() + + self.finders_fee.serialized_length() + + self.finality_signature_proportion.serialized_length() + + self.signature_rewards_max_delay.serialized_length() + + self.allow_auction_bids.serialized_length() + + self.allow_unrestricted_transfers.serialized_length() + + self.compute_rewards.serialized_length() + + self.administrators.serialized_length() + + self.refund_handling.serialized_length() + + self.fee_handling.serialized_length() + } +} + +impl FromBytes for CoreConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (era_duration, remainder) = TimeDiff::from_bytes(bytes)?; + let (minimum_era_height, remainder) = u64::from_bytes(remainder)?; + let (minimum_block_time, remainder) = TimeDiff::from_bytes(remainder)?; + let (validator_slots, remainder) = u32::from_bytes(remainder)?; + let (finality_threshold_fraction, remainder) = Ratio::::from_bytes(remainder)?; + let (start_protocol_version_with_strict_finality_signatures_required, remainder) = + ProtocolVersion::from_bytes(remainder)?; + let (legacy_required_finality, remainder) = LegacyRequiredFinality::from_bytes(remainder)?; + let (auction_delay, remainder) = u64::from_bytes(remainder)?; + let (locked_funds_period, remainder) = TimeDiff::from_bytes(remainder)?; + let (vesting_schedule_period, remainder) = TimeDiff::from_bytes(remainder)?; + let (unbonding_delay, remainder) = u64::from_bytes(remainder)?; + let (round_seigniorage_rate, remainder) = Ratio::::from_bytes(remainder)?; + let (max_associated_keys, remainder) = u32::from_bytes(remainder)?; + let (max_runtime_call_stack_height, remainder) = u32::from_bytes(remainder)?; + let (minimum_delegation_amount, remainder) = u64::from_bytes(remainder)?; + let (prune_batch_size, remainder) = u64::from_bytes(remainder)?; + let (strict_argument_checking, remainder) = bool::from_bytes(remainder)?; + let (simultaneous_peer_requests, remainder) = u8::from_bytes(remainder)?; + let (consensus_protocol, remainder) = ConsensusProtocolName::from_bytes(remainder)?; + let (max_delegators_per_validator, remainder) = FromBytes::from_bytes(remainder)?; + let (finders_fee, remainder) = Ratio::from_bytes(remainder)?; + let (finality_signature_proportion, remainder) = Ratio::from_bytes(remainder)?; + let (signature_rewards_max_delay, remainder) = u64::from_bytes(remainder)?; + let (allow_auction_bids, remainder) = FromBytes::from_bytes(remainder)?; + let (allow_unrestricted_transfers, remainder) = FromBytes::from_bytes(remainder)?; + let (compute_rewards, remainder) = bool::from_bytes(remainder)?; + let (administrative_accounts, remainder) = FromBytes::from_bytes(remainder)?; + let (refund_handling, remainder) = FromBytes::from_bytes(remainder)?; + let (fee_handling, remainder) = FromBytes::from_bytes(remainder)?; + let config = CoreConfig { + era_duration, + minimum_era_height, + minimum_block_time, + validator_slots, + finality_threshold_fraction, + start_protocol_version_with_strict_finality_signatures_required, + legacy_required_finality, + auction_delay, + locked_funds_period, + vesting_schedule_period, + unbonding_delay, + round_seigniorage_rate, + max_associated_keys, + max_runtime_call_stack_height, + minimum_delegation_amount, + prune_batch_size, + strict_argument_checking, + simultaneous_peer_requests, + consensus_protocol, + max_delegators_per_validator, + finders_fee, + finality_signature_proportion, + signature_rewards_max_delay, + allow_auction_bids, + allow_unrestricted_transfers, + compute_rewards, + administrators: administrative_accounts, + refund_handling, + fee_handling, + }; + Ok((config, remainder)) + } +} + +/// Consensus protocol name. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum ConsensusProtocolName { + /// Highway. + Highway, + /// Zug. + Zug, +} + +impl Serialize for ConsensusProtocolName { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + ConsensusProtocolName::Highway => "Highway", + ConsensusProtocolName::Zug => "Zug", + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for ConsensusProtocolName { + fn deserialize>(deserializer: D) -> Result { + match String::deserialize(deserializer)?.to_lowercase().as_str() { + "highway" => Ok(ConsensusProtocolName::Highway), + "zug" => Ok(ConsensusProtocolName::Zug), + _ => Err(DeError::custom("unknown consensus protocol name")), + } + } +} + +const CONSENSUS_HIGHWAY_TAG: u8 = 0; +const CONSENSUS_ZUG_TAG: u8 = 1; + +impl ToBytes for ConsensusProtocolName { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let tag = match self { + ConsensusProtocolName::Highway => CONSENSUS_HIGHWAY_TAG, + ConsensusProtocolName::Zug => CONSENSUS_ZUG_TAG, + }; + Ok(vec![tag]) + } + + fn serialized_length(&self) -> usize { + 1 + } +} + +impl FromBytes for ConsensusProtocolName { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let name = match tag { + CONSENSUS_HIGHWAY_TAG => ConsensusProtocolName::Highway, + CONSENSUS_ZUG_TAG => ConsensusProtocolName::Zug, + _ => return Err(bytesrepr::Error::Formatting), + }; + Ok((name, remainder)) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ConsensusProtocolName { + if rng.gen() { + ConsensusProtocolName::Highway + } else { + ConsensusProtocolName::Zug + } + } +} + +/// Which finality a legacy block needs during a fast sync. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum LegacyRequiredFinality { + /// Strict finality: more than 2/3rd of validators. + Strict, + /// Weak finality: more than 1/3rd of validators. + Weak, + /// Finality always valid. + Any, +} + +impl Serialize for LegacyRequiredFinality { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + LegacyRequiredFinality::Strict => "Strict", + LegacyRequiredFinality::Weak => "Weak", + LegacyRequiredFinality::Any => "Any", + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for LegacyRequiredFinality { + fn deserialize>(deserializer: D) -> Result { + match String::deserialize(deserializer)?.to_lowercase().as_str() { + "strict" => Ok(LegacyRequiredFinality::Strict), + "weak" => Ok(LegacyRequiredFinality::Weak), + "any" => Ok(LegacyRequiredFinality::Any), + _ => Err(DeError::custom("unknown legacy required finality")), + } + } +} + +const LEGACY_REQUIRED_FINALITY_STRICT_TAG: u8 = 0; +const LEGACY_REQUIRED_FINALITY_WEAK_TAG: u8 = 1; +const LEGACY_REQUIRED_FINALITY_ANY_TAG: u8 = 2; + +impl ToBytes for LegacyRequiredFinality { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let tag = match self { + LegacyRequiredFinality::Strict => LEGACY_REQUIRED_FINALITY_STRICT_TAG, + LegacyRequiredFinality::Weak => LEGACY_REQUIRED_FINALITY_WEAK_TAG, + LegacyRequiredFinality::Any => LEGACY_REQUIRED_FINALITY_ANY_TAG, + }; + Ok(vec![tag]) + } + + fn serialized_length(&self) -> usize { + 1 + } +} + +impl FromBytes for LegacyRequiredFinality { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + LEGACY_REQUIRED_FINALITY_STRICT_TAG => Ok((LegacyRequiredFinality::Strict, remainder)), + LEGACY_REQUIRED_FINALITY_WEAK_TAG => Ok((LegacyRequiredFinality::Weak, remainder)), + LEGACY_REQUIRED_FINALITY_ANY_TAG => Ok((LegacyRequiredFinality::Any, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> LegacyRequiredFinality { + match rng.gen_range(0..3) { + 0 => LegacyRequiredFinality::Strict, + 1 => LegacyRequiredFinality::Weak, + 2 => LegacyRequiredFinality::Any, + _not_in_range => unreachable!(), + } + } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = CoreConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/fee_handling.rs b/casper_types_ver_2_0/src/chainspec/fee_handling.rs new file mode 100644 index 00000000..abd17017 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/fee_handling.rs @@ -0,0 +1,76 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +const FEE_HANDLING_PROPOSER_TAG: u8 = 0; +const FEE_HANDLING_ACCUMULATE_TAG: u8 = 1; +const FEE_HANDLING_BURN_TAG: u8 = 2; + +/// Defines how fees are handled in the system. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum FeeHandling { + /// Transaction fees are paid to the block proposer. + /// + /// This is the default option for public chains. + PayToProposer, + /// Transaction fees are accumulated in a special purse and then distributed during end of era + /// processing evenly among all administrator accounts. + /// + /// This setting is applicable for some private chains (but not all). + Accumulate, + /// Burn the fees. + Burn, +} + +impl ToBytes for FeeHandling { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + match self { + FeeHandling::PayToProposer => Ok(vec![FEE_HANDLING_PROPOSER_TAG]), + FeeHandling::Accumulate => Ok(vec![FEE_HANDLING_ACCUMULATE_TAG]), + FeeHandling::Burn => Ok(vec![FEE_HANDLING_BURN_TAG]), + } + } + + fn serialized_length(&self) -> usize { + 1 + } +} + +impl FromBytes for FeeHandling { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + FEE_HANDLING_PROPOSER_TAG => Ok((FeeHandling::PayToProposer, rem)), + FEE_HANDLING_ACCUMULATE_TAG => Ok((FeeHandling::Accumulate, rem)), + FEE_HANDLING_BURN_TAG => Ok((FeeHandling::Burn, rem)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip_for_refund() { + let fee_config = FeeHandling::PayToProposer; + bytesrepr::test_serialization_roundtrip(&fee_config); + } + + #[test] + fn bytesrepr_roundtrip_for_accumulate() { + let fee_config = FeeHandling::Accumulate; + bytesrepr::test_serialization_roundtrip(&fee_config); + } + + #[test] + fn bytesrepr_roundtrip_for_burn() { + let fee_config = FeeHandling::Burn; + bytesrepr::test_serialization_roundtrip(&fee_config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/global_state_update.rs b/casper_types_ver_2_0/src/chainspec/global_state_update.rs new file mode 100644 index 00000000..68de870c --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/global_state_update.rs @@ -0,0 +1,181 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, convert::TryFrom}; +use thiserror::Error; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + AsymmetricType, Key, PublicKey, U512, +}; + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct GlobalStateUpdateEntry { + key: String, + value: String, +} + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct GlobalStateUpdateValidatorInfo { + public_key: String, + weight: String, +} + +/// Type storing global state update entries. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct GlobalStateUpdateConfig { + validators: Option>, + entries: Vec, +} + +/// Type storing the information about modifications to be applied to the global state. +/// +/// It stores the serialized `StoredValue`s corresponding to keys to be modified, and for the case +/// where the validator set is being modified in any way, the full set of post-upgrade validators. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct GlobalStateUpdate { + /// Some with all validators (including pre-existent), if any change to the set is made. + pub validators: Option>, + /// Global state key value pairs, which will be directly upserted into global state against + /// the root hash of the final block of the era before the upgrade. + pub entries: BTreeMap, +} + +impl GlobalStateUpdate { + /// Returns a random `GlobalStateUpdate`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let mut validators = BTreeMap::new(); + if rng.gen() { + let count = rng.gen_range(5..10); + for _ in 0..count { + validators.insert(PublicKey::random(rng), rng.gen::()); + } + } + + let count = rng.gen_range(0..10); + let mut entries = BTreeMap::new(); + for _ in 0..count { + entries.insert(rng.gen(), rng.gen()); + } + + Self { + validators: Some(validators), + entries, + } + } +} + +impl ToBytes for GlobalStateUpdate { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validators.write_bytes(writer)?; + self.entries.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validators.serialized_length() + self.entries.serialized_length() + } +} + +impl FromBytes for GlobalStateUpdate { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validators, remainder) = Option::>::from_bytes(bytes)?; + let (entries, remainder) = BTreeMap::::from_bytes(remainder)?; + let global_state_update = GlobalStateUpdate { + entries, + validators, + }; + Ok((global_state_update, remainder)) + } +} + +/// Error loading global state update file. +#[derive(Debug, Error)] +pub enum GlobalStateUpdateError { + /// Error while decoding a key from a prefix formatted string. + #[error("decoding key from formatted string error: {0}")] + DecodingKeyFromStr(String), + /// Error while decoding a key from a hex formatted string. + #[error("decoding key from hex string error: {0}")] + DecodingKeyFromHex(String), + /// Error while decoding a public key weight from formatted string. + #[error("decoding weight from decimal string error: {0}")] + DecodingWeightFromStr(String), + /// Error while decoding a serialized value from a base64 encoded string. + #[error("decoding from base64 error: {0}")] + DecodingFromBase64(#[from] base64::DecodeError), +} + +impl TryFrom for GlobalStateUpdate { + type Error = GlobalStateUpdateError; + + fn try_from(config: GlobalStateUpdateConfig) -> Result { + let mut validators: Option> = None; + if let Some(config_validators) = config.validators { + let mut new_validators = BTreeMap::new(); + for (index, validator) in config_validators.into_iter().enumerate() { + let public_key = PublicKey::from_hex(&validator.public_key).map_err(|error| { + GlobalStateUpdateError::DecodingKeyFromHex(format!( + "failed to decode validator public key {}: {:?}", + index, error + )) + })?; + let weight = U512::from_dec_str(&validator.weight).map_err(|error| { + GlobalStateUpdateError::DecodingWeightFromStr(format!( + "failed to decode validator weight {}: {}", + index, error + )) + })?; + let _ = new_validators.insert(public_key, weight); + } + validators = Some(new_validators); + } + + let mut entries = BTreeMap::new(); + for (index, entry) in config.entries.into_iter().enumerate() { + let key = Key::from_formatted_str(&entry.key).map_err(|error| { + GlobalStateUpdateError::DecodingKeyFromStr(format!( + "failed to decode entry key {}: {}", + index, error + )) + })?; + let value = base64::decode(&entry.value)?.into(); + let _ = entries.insert(key, value); + } + + Ok(GlobalStateUpdate { + validators, + entries, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::SeedableRng; + + #[test] + fn global_state_update_bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let update = GlobalStateUpdate::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&update); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/highway_config.rs b/casper_types_ver_2_0/src/chainspec/highway_config.rs new file mode 100644 index 00000000..def377c2 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/highway_config.rs @@ -0,0 +1,111 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::rational::Ratio; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + TimeDiff, +}; + +/// Configuration values relevant to Highway consensus. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct HighwayConfig { + /// The upper limit for Highway round lengths. + pub maximum_round_length: TimeDiff, + /// The factor by which rewards for a round are multiplied if the greatest summit has ≤50% + /// quorum, i.e. no finality. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub reduced_reward_multiplier: Ratio, +} + +impl HighwayConfig { + /// Checks whether the values set in the config make sense and returns `false` if they don't. + pub fn is_valid(&self) -> Result<(), String> { + if self.reduced_reward_multiplier > Ratio::new(1, 1) { + Err("reduced reward multiplier is not in the range [0, 1]".to_string()) + } else { + Ok(()) + } + } + + /// Returns a random `HighwayConfig`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let maximum_round_length = TimeDiff::from_seconds(rng.gen_range(60..600)); + let reduced_reward_multiplier = Ratio::new(rng.gen_range(0..10), 10); + + HighwayConfig { + maximum_round_length, + reduced_reward_multiplier, + } + } +} + +impl ToBytes for HighwayConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.maximum_round_length.to_bytes()?); + buffer.extend(self.reduced_reward_multiplier.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.maximum_round_length.serialized_length() + + self.reduced_reward_multiplier.serialized_length() + } +} + +impl FromBytes for HighwayConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (maximum_round_length, remainder) = TimeDiff::from_bytes(bytes)?; + let (reduced_reward_multiplier, remainder) = Ratio::::from_bytes(remainder)?; + let config = HighwayConfig { + maximum_round_length, + reduced_reward_multiplier, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = HighwayConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } + + #[test] + fn should_validate_for_reduced_reward_multiplier() { + let mut rng = TestRng::from_entropy(); + let mut highway_config = HighwayConfig::random(&mut rng); + + // Should be valid for 0 <= RRM <= 1. + highway_config.reduced_reward_multiplier = Ratio::new(0, 1); + assert!(highway_config.is_valid().is_ok()); + highway_config.reduced_reward_multiplier = Ratio::new(1, 1); + assert!(highway_config.is_valid().is_ok()); + highway_config.reduced_reward_multiplier = Ratio::new(u64::MAX, u64::MAX); + assert!(highway_config.is_valid().is_ok()); + + highway_config.reduced_reward_multiplier = Ratio::new(u64::MAX, u64::MAX - 1); + assert!( + highway_config.is_valid().is_err(), + "Should be invalid for RRM > 1." + ); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/network_config.rs b/casper_types_ver_2_0/src/chainspec/network_config.rs new file mode 100644 index 00000000..42090c22 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/network_config.rs @@ -0,0 +1,86 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::Serialize; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +use super::AccountsConfig; + +/// Configuration values associated with the network. +#[derive(Clone, PartialEq, Eq, Serialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct NetworkConfig { + /// The network name. + pub name: String, + /// The maximum size of an accepted network message, in bytes. + pub maximum_net_message_size: u32, + /// Validator accounts specified in the chainspec. + // Note: `accounts_config` must be the last field on this struct due to issues in the TOML + // crate - see . + pub accounts_config: AccountsConfig, +} + +impl NetworkConfig { + /// Returns a random `NetworkConfig`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let name = rng.gen::().to_string(); + let maximum_net_message_size = 4 + rng.gen_range(0..4); + let accounts_config = AccountsConfig::random(rng); + + NetworkConfig { + name, + maximum_net_message_size, + accounts_config, + } + } +} + +impl ToBytes for NetworkConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.name.to_bytes()?); + buffer.extend(self.accounts_config.to_bytes()?); + buffer.extend(self.maximum_net_message_size.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + + self.accounts_config.serialized_length() + + self.maximum_net_message_size.serialized_length() + } +} + +impl FromBytes for NetworkConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (accounts_config, remainder) = FromBytes::from_bytes(remainder)?; + let (maximum_net_message_size, remainder) = FromBytes::from_bytes(remainder)?; + let config = NetworkConfig { + name, + maximum_net_message_size, + accounts_config, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = NetworkConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/next_upgrade.rs b/casper_types_ver_2_0/src/chainspec/next_upgrade.rs new file mode 100644 index 00000000..897755f9 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/next_upgrade.rs @@ -0,0 +1,115 @@ +use std::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ActivationPoint, ProtocolConfig, ProtocolVersion, +}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +/// Information about the next protocol upgrade. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +pub struct NextUpgrade { + activation_point: ActivationPoint, + protocol_version: ProtocolVersion, +} + +impl NextUpgrade { + /// Creates a new `NextUpgrade`. + pub fn new(activation_point: ActivationPoint, protocol_version: ProtocolVersion) -> Self { + NextUpgrade { + activation_point, + protocol_version, + } + } + + /// Returns the activation point of the next upgrade. + pub fn activation_point(&self) -> ActivationPoint { + self.activation_point + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + activation_point: ActivationPoint::random(rng), + protocol_version: ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()), + } + } +} + +impl From for NextUpgrade { + fn from(protocol_config: ProtocolConfig) -> Self { + NextUpgrade { + activation_point: protocol_config.activation_point, + protocol_version: protocol_config.version, + } + } +} + +impl Display for NextUpgrade { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "next upgrade to {} at start of era {}", + self.protocol_version, + self.activation_point.era_id() + ) + } +} + +impl ToBytes for NextUpgrade { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.activation_point.write_bytes(writer)?; + self.protocol_version.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.activation_point.serialized_length() + self.protocol_version.serialized_length() + } +} + +impl FromBytes for NextUpgrade { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (activation_point, remainder) = ActivationPoint::from_bytes(bytes)?; + let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?; + Ok(( + NextUpgrade { + activation_point, + protocol_version, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = NextUpgrade::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/protocol_config.rs b/casper_types_ver_2_0/src/chainspec/protocol_config.rs new file mode 100644 index 00000000..f693578f --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/protocol_config.rs @@ -0,0 +1,125 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, str::FromStr}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Key, ProtocolVersion, StoredValue, +}; + +use crate::{ActivationPoint, GlobalStateUpdate}; + +/// Configuration values associated with the protocol. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ProtocolConfig { + /// Protocol version. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub version: ProtocolVersion, + /// Whether we need to clear latest blocks back to the switch block just before the activation + /// point or not. + pub hard_reset: bool, + /// This protocol config applies starting at the era specified in the activation point. + pub activation_point: ActivationPoint, + /// Any arbitrary updates we might want to make to the global state at the start of the era + /// specified in the activation point. + pub global_state_update: Option, +} + +impl ProtocolConfig { + /// The mapping of [`Key`]s to [`StoredValue`]s we will use to update global storage in the + /// event of an emergency update. + pub(crate) fn get_update_mapping( + &self, + ) -> Result, bytesrepr::Error> { + let state_update = match &self.global_state_update { + Some(GlobalStateUpdate { entries, .. }) => entries, + None => return Ok(BTreeMap::default()), + }; + let mut update_mapping = BTreeMap::new(); + for (key, stored_value_bytes) in state_update { + let stored_value = bytesrepr::deserialize(stored_value_bytes.clone().into())?; + update_mapping.insert(*key, stored_value); + } + Ok(update_mapping) + } + + /// Returns a random `ProtocolConfig`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let protocol_version = ProtocolVersion::from_parts( + rng.gen_range(0..10), + rng.gen::() as u32, + rng.gen::() as u32, + ); + let activation_point = ActivationPoint::random(rng); + + ProtocolConfig { + version: protocol_version, + hard_reset: rng.gen(), + activation_point, + global_state_update: None, + } + } +} + +impl ToBytes for ProtocolConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.version.to_string().to_bytes()?); + buffer.extend(self.hard_reset.to_bytes()?); + buffer.extend(self.activation_point.to_bytes()?); + buffer.extend(self.global_state_update.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.version.to_string().serialized_length() + + self.hard_reset.serialized_length() + + self.activation_point.serialized_length() + + self.global_state_update.serialized_length() + } +} + +impl FromBytes for ProtocolConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_version_string, remainder) = String::from_bytes(bytes)?; + let version = ProtocolVersion::from_str(&protocol_version_string) + .map_err(|_| bytesrepr::Error::Formatting)?; + let (hard_reset, remainder) = bool::from_bytes(remainder)?; + let (activation_point, remainder) = ActivationPoint::from_bytes(remainder)?; + let (global_state_update, remainder) = Option::::from_bytes(remainder)?; + let protocol_config = ProtocolConfig { + version, + hard_reset, + activation_point, + global_state_update, + }; + Ok((protocol_config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::SeedableRng; + + #[test] + fn activation_point_bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let activation_point = ActivationPoint::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&activation_point); + } + + #[test] + fn protocol_config_bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = ProtocolConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/refund_handling.rs b/casper_types_ver_2_0/src/chainspec/refund_handling.rs new file mode 100644 index 00000000..0da6bb60 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/refund_handling.rs @@ -0,0 +1,97 @@ +/// Configuration options of refund handling that are executed as part of handle payment +/// finalization. +use num_rational::Ratio; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +const REFUND_HANDLING_REFUND_TAG: u8 = 0; +const REFUND_HANDLING_BURN_TAG: u8 = 1; + +/// Defines how refunds are calculated. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum RefundHandling { + /// Refund of excess payment amount goes to either a pre-defined purse, or back to the sender + /// and the rest of the payment amount goes to the block proposer. + Refund { + /// Computes how much refund goes back to the user after deducting gas spent from the paid + /// amount. + /// + /// user_part = (payment_amount - gas_spent_amount) * refund_ratio + /// validator_part = payment_amount - user_part + /// + /// Any dust amount that was a result of multiplying by refund_ratio goes back to user. + refund_ratio: Ratio, + }, + /// Burns the refund amount. + Burn { + /// Computes how much of the refund amount is burned after deducting gas spent from the + /// paid amount. + refund_ratio: Ratio, + }, +} + +impl ToBytes for RefundHandling { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + + match self { + RefundHandling::Refund { refund_ratio } => { + buffer.push(REFUND_HANDLING_REFUND_TAG); + buffer.extend(refund_ratio.to_bytes()?); + } + RefundHandling::Burn { refund_ratio } => { + buffer.push(REFUND_HANDLING_BURN_TAG); + buffer.extend(refund_ratio.to_bytes()?); + } + } + + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + 1 + match self { + RefundHandling::Refund { refund_ratio } => refund_ratio.serialized_length(), + RefundHandling::Burn { refund_ratio } => refund_ratio.serialized_length(), + } + } +} + +impl FromBytes for RefundHandling { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + REFUND_HANDLING_REFUND_TAG => { + let (refund_ratio, rem) = FromBytes::from_bytes(rem)?; + Ok((RefundHandling::Refund { refund_ratio }, rem)) + } + REFUND_HANDLING_BURN_TAG => { + let (refund_ratio, rem) = FromBytes::from_bytes(rem)?; + Ok((RefundHandling::Burn { refund_ratio }, rem)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip_for_refund() { + let refund_config = RefundHandling::Refund { + refund_ratio: Ratio::new(49, 313), + }; + bytesrepr::test_serialization_roundtrip(&refund_config); + } + + #[test] + fn bytesrepr_roundtrip_for_burn() { + let refund_config = RefundHandling::Burn { + refund_ratio: Ratio::new(49, 313), + }; + bytesrepr::test_serialization_roundtrip(&refund_config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/transaction_config.rs b/casper_types_ver_2_0/src/chainspec/transaction_config.rs new file mode 100644 index 00000000..ea905582 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/transaction_config.rs @@ -0,0 +1,211 @@ +mod deploy_config; +mod transaction_v1_config; + +#[cfg(any(feature = "testing", test))] +use alloc::str::FromStr; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + TimeDiff, +}; + +pub use deploy_config::DeployConfig; +#[cfg(any(feature = "testing", test))] +pub use deploy_config::DEFAULT_MAX_PAYMENT_MOTES; +pub use transaction_v1_config::TransactionV1Config; + +/// The default minimum number of motes that can be transferred. +#[cfg(any(feature = "testing", test))] +pub const DEFAULT_MIN_TRANSFER_MOTES: u64 = 2_500_000_000; + +/// Configuration values associated with Transactions. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct TransactionConfig { + /// Maximum time to live any transaction can specify. + pub max_ttl: TimeDiff, + /// Maximum size in bytes of a single transaction, when bytesrepr encoded. + pub max_transaction_size: u32, + /// Maximum number of transfer transactions allowed in a block. + pub block_max_transfer_count: u32, + /// Maximum number of staking transactions allowed in a block. + pub block_max_staking_count: u32, + /// Maximum number of installer/upgrader transactions allowed in a block. + pub block_max_install_upgrade_count: u32, + /// Maximum number of other transactions (non-transfer, non-staking, non-installer/upgrader) + /// allowed in a block. + pub block_max_standard_count: u32, + /// Maximum number of approvals (signatures) allowed in a block across all transactions. + pub block_max_approval_count: u32, + /// Maximum possible size in bytes of a block. + pub max_block_size: u32, + /// Maximum sum of payment across all transactions included in a block. + pub block_gas_limit: u64, + /// Minimum token amount for a native transfer deploy or transaction (a transfer deploy or + /// transaction received with an transfer amount less than this will be rejected upon receipt). + pub native_transfer_minimum_motes: u64, + /// Maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the + /// config.toml file. + pub max_timestamp_leeway: TimeDiff, + /// Configuration values specific to Deploy transactions. + #[serde(rename = "deploy")] + pub deploy_config: DeployConfig, + /// Configuration values specific to V1 transactions. + #[serde(rename = "v1")] + pub transaction_v1_config: TransactionV1Config, +} + +#[cfg(any(feature = "testing", test))] +impl TransactionConfig { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let max_ttl = TimeDiff::from_seconds(rng.gen_range(60..3_600)); + let max_transaction_size = rng.gen_range(100_000..1_000_000); + let block_max_transfer_count = rng.gen(); + let block_max_staking_count = rng.gen(); + let block_max_install_upgrade_count = rng.gen(); + let block_max_standard_count = rng.gen(); + let block_max_approval_count = rng.gen(); + let max_block_size = rng.gen_range(1_000_000..1_000_000_000); + let block_gas_limit = rng.gen_range(100_000_000_000..1_000_000_000_000_000); + let native_transfer_minimum_motes = + rng.gen_range(DEFAULT_MIN_TRANSFER_MOTES..1_000_000_000_000_000); + let max_timestamp_leeway = TimeDiff::from_seconds(rng.gen_range(0..6)); + let deploy_config = DeployConfig::random(rng); + let transaction_v1_config = TransactionV1Config::random(rng); + + TransactionConfig { + max_ttl, + max_transaction_size, + block_max_transfer_count, + block_max_staking_count, + block_max_install_upgrade_count, + block_max_standard_count, + block_max_approval_count, + max_block_size, + block_gas_limit, + native_transfer_minimum_motes, + max_timestamp_leeway, + deploy_config, + transaction_v1_config, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Default for TransactionConfig { + fn default() -> Self { + let eighteeen_hours = TimeDiff::from_seconds(18 * 60 * 60); + TransactionConfig { + max_ttl: eighteeen_hours, + max_transaction_size: 1_048_576, + block_max_transfer_count: 1000, + block_max_staking_count: 200, + block_max_install_upgrade_count: 2, + block_max_standard_count: 100, + block_max_approval_count: 2600, + max_block_size: 10_485_760, + block_gas_limit: 10_000_000_000_000, + native_transfer_minimum_motes: DEFAULT_MIN_TRANSFER_MOTES, + max_timestamp_leeway: TimeDiff::from_str("5sec").unwrap(), + deploy_config: DeployConfig::default(), + transaction_v1_config: TransactionV1Config::default(), + } + } +} + +impl ToBytes for TransactionConfig { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.max_ttl.write_bytes(writer)?; + self.max_transaction_size.write_bytes(writer)?; + self.block_max_transfer_count.write_bytes(writer)?; + self.block_max_staking_count.write_bytes(writer)?; + self.block_max_install_upgrade_count.write_bytes(writer)?; + self.block_max_standard_count.write_bytes(writer)?; + self.block_max_approval_count.write_bytes(writer)?; + self.max_block_size.write_bytes(writer)?; + self.block_gas_limit.write_bytes(writer)?; + self.native_transfer_minimum_motes.write_bytes(writer)?; + self.max_timestamp_leeway.write_bytes(writer)?; + self.deploy_config.write_bytes(writer)?; + self.transaction_v1_config.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.max_ttl.serialized_length() + + self.max_transaction_size.serialized_length() + + self.block_max_transfer_count.serialized_length() + + self.block_max_staking_count.serialized_length() + + self.block_max_install_upgrade_count.serialized_length() + + self.block_max_standard_count.serialized_length() + + self.block_max_approval_count.serialized_length() + + self.max_block_size.serialized_length() + + self.block_gas_limit.serialized_length() + + self.native_transfer_minimum_motes.serialized_length() + + self.max_timestamp_leeway.serialized_length() + + self.deploy_config.serialized_length() + + self.transaction_v1_config.serialized_length() + } +} + +impl FromBytes for TransactionConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_ttl, remainder) = TimeDiff::from_bytes(bytes)?; + let (max_transaction_size, remainder) = u32::from_bytes(remainder)?; + let (block_max_transfer_count, remainder) = u32::from_bytes(remainder)?; + let (block_max_staking_count, remainder) = u32::from_bytes(remainder)?; + let (block_max_install_upgrade_count, remainder) = u32::from_bytes(remainder)?; + let (block_max_standard_count, remainder) = u32::from_bytes(remainder)?; + let (block_max_approval_count, remainder) = u32::from_bytes(remainder)?; + let (max_block_size, remainder) = u32::from_bytes(remainder)?; + let (block_gas_limit, remainder) = u64::from_bytes(remainder)?; + let (native_transfer_minimum_motes, remainder) = u64::from_bytes(remainder)?; + let (max_timestamp_leeway, remainder) = TimeDiff::from_bytes(remainder)?; + let (deploy_config, remainder) = DeployConfig::from_bytes(remainder)?; + let (transaction_v1_config, remainder) = TransactionV1Config::from_bytes(remainder)?; + let config = TransactionConfig { + max_ttl, + max_transaction_size, + block_max_transfer_count, + block_max_staking_count, + block_max_install_upgrade_count, + block_max_standard_count, + block_max_approval_count, + max_block_size, + block_gas_limit, + native_transfer_minimum_motes, + max_timestamp_leeway, + deploy_config, + transaction_v1_config, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let config = TransactionConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/transaction_config/deploy_config.rs b/casper_types_ver_2_0/src/chainspec/transaction_config/deploy_config.rs new file mode 100644 index 00000000..06926266 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/transaction_config/deploy_config.rs @@ -0,0 +1,112 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Motes, +}; +#[cfg(any(feature = "testing", test))] +use crate::{testing::TestRng, U512}; + +/// The default maximum number of motes that payment code execution can cost. +#[cfg(any(feature = "testing", test))] +pub const DEFAULT_MAX_PAYMENT_MOTES: u64 = 2_500_000_000; + +/// Configuration values associated with deploys. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct DeployConfig { + /// Maximum amount any deploy can pay. + pub max_payment_cost: Motes, + /// Maximum time to live any deploy can specify. + pub max_dependencies: u8, + /// Maximum length in bytes of payment args per deploy. + pub payment_args_max_length: u32, + /// Maximum length in bytes of session args per deploy. + pub session_args_max_length: u32, +} + +#[cfg(any(feature = "testing", test))] +impl DeployConfig { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let max_payment_cost = Motes::new(U512::from(rng.gen_range(1_000_000..1_000_000_000))); + let max_dependencies = rng.gen(); + let payment_args_max_length = rng.gen(); + let session_args_max_length = rng.gen(); + + DeployConfig { + max_payment_cost, + max_dependencies, + payment_args_max_length, + session_args_max_length, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Default for DeployConfig { + fn default() -> Self { + DeployConfig { + max_payment_cost: Motes::new(U512::from(DEFAULT_MAX_PAYMENT_MOTES)), + max_dependencies: 10, + payment_args_max_length: 1024, + session_args_max_length: 1024, + } + } +} + +impl ToBytes for DeployConfig { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.max_payment_cost.write_bytes(writer)?; + self.max_dependencies.write_bytes(writer)?; + self.payment_args_max_length.write_bytes(writer)?; + self.session_args_max_length.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.max_payment_cost.value().serialized_length() + + self.max_dependencies.serialized_length() + + self.payment_args_max_length.serialized_length() + + self.session_args_max_length.serialized_length() + } +} + +impl FromBytes for DeployConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_payment_cost, remainder) = Motes::from_bytes(bytes)?; + let (max_dependencies, remainder) = u8::from_bytes(remainder)?; + let (payment_args_max_length, remainder) = u32::from_bytes(remainder)?; + let (session_args_max_length, remainder) = u32::from_bytes(remainder)?; + let config = DeployConfig { + max_payment_cost, + max_dependencies, + payment_args_max_length, + session_args_max_length, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let config = DeployConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/transaction_config/transaction_v1_config.rs b/casper_types_ver_2_0/src/chainspec/transaction_config/transaction_v1_config.rs new file mode 100644 index 00000000..2e9220c3 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/transaction_config/transaction_v1_config.rs @@ -0,0 +1,74 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// Configuration values associated with V1 Transactions. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct TransactionV1Config { + /// Maximum length in bytes of runtime args per Transaction. + pub max_args_length: u32, +} + +#[cfg(any(feature = "testing", test))] +impl TransactionV1Config { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let max_args_length = rng.gen(); + + TransactionV1Config { max_args_length } + } +} + +#[cfg(any(feature = "testing", test))] +impl Default for TransactionV1Config { + fn default() -> Self { + TransactionV1Config { + max_args_length: 1024, + } + } +} + +impl ToBytes for TransactionV1Config { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.max_args_length.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.max_args_length.serialized_length() + } +} + +impl FromBytes for TransactionV1Config { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_args_length, remainder) = u32::from_bytes(bytes)?; + let config = TransactionV1Config { max_args_length }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let config = TransactionV1Config::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config.rs b/casper_types_ver_2_0/src/chainspec/vm_config.rs new file mode 100644 index 00000000..34bb856e --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config.rs @@ -0,0 +1,42 @@ +mod auction_costs; +mod chainspec_registry; +mod handle_payment_costs; +mod host_function_costs; +mod message_limits; +mod mint_costs; +mod opcode_costs; +mod standard_payment_costs; +mod storage_costs; +mod system_config; +mod upgrade_config; +mod wasm_config; + +pub use auction_costs::{AuctionCosts, DEFAULT_ADD_BID_COST, DEFAULT_DELEGATE_COST}; +pub use chainspec_registry::ChainspecRegistry; +pub use handle_payment_costs::HandlePaymentCosts; +pub use host_function_costs::{ + Cost as HostFunctionCost, HostFunction, HostFunctionCosts, + DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, DEFAULT_NEW_DICTIONARY_COST, +}; +pub use message_limits::MessageLimits; +pub use mint_costs::{MintCosts, DEFAULT_TRANSFER_COST}; +pub use opcode_costs::{BrTableCost, ControlFlowCosts, OpcodeCosts}; +#[cfg(any(feature = "testing", test))] +pub use opcode_costs::{ + DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, + DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, DEFAULT_CONTROL_FLOW_BR_OPCODE, + DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, + DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, DEFAULT_CONTROL_FLOW_CALL_OPCODE, + DEFAULT_CONTROL_FLOW_DROP_OPCODE, DEFAULT_CONTROL_FLOW_ELSE_OPCODE, + DEFAULT_CONTROL_FLOW_END_OPCODE, DEFAULT_CONTROL_FLOW_IF_OPCODE, + DEFAULT_CONTROL_FLOW_LOOP_OPCODE, DEFAULT_CONTROL_FLOW_RETURN_OPCODE, + DEFAULT_CONTROL_FLOW_SELECT_OPCODE, DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, + DEFAULT_DIV_COST, DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, + DEFAULT_INTEGER_COMPARISON_COST, DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MUL_COST, + DEFAULT_NOP_COST, DEFAULT_STORE_COST, DEFAULT_UNREACHABLE_COST, +}; +pub use standard_payment_costs::StandardPaymentCosts; +pub use storage_costs::StorageCosts; +pub use system_config::{SystemConfig, DEFAULT_WASMLESS_TRANSFER_COST}; +pub use upgrade_config::UpgradeConfig; +pub use wasm_config::{WasmConfig, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}; diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/auction_costs.rs b/casper_types_ver_2_0/src/chainspec/vm_config/auction_costs.rs new file mode 100644 index 00000000..2a673515 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/auction_costs.rs @@ -0,0 +1,269 @@ +//! Costs of the auction system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `get_era_validators` auction entry point. +pub const DEFAULT_GET_ERA_VALIDATORS_COST: u32 = 10_000; +/// Default cost of the `read_seigniorage_recipients` auction entry point. +pub const DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST: u32 = 10_000; +/// Default cost of the `add_bid` auction entry point. +pub const DEFAULT_ADD_BID_COST: u32 = 2_500_000_000; +/// Default cost of the `withdraw_bid` auction entry point. +pub const DEFAULT_WITHDRAW_BID_COST: u32 = 2_500_000_000; +/// Default cost of the `delegate` auction entry point. +pub const DEFAULT_DELEGATE_COST: u32 = 2_500_000_000; +/// Default cost of the `redelegate` auction entry point. +pub const DEFAULT_REDELEGATE_COST: u32 = 2_500_000_000; +/// Default cost of the `undelegate` auction entry point. +pub const DEFAULT_UNDELEGATE_COST: u32 = 2_500_000_000; +/// Default cost of the `run_auction` auction entry point. +pub const DEFAULT_RUN_AUCTION_COST: u32 = 10_000; +/// Default cost of the `slash` auction entry point. +pub const DEFAULT_SLASH_COST: u32 = 10_000; +/// Default cost of the `distribute` auction entry point. +pub const DEFAULT_DISTRIBUTE_COST: u32 = 10_000; +/// Default cost of the `withdraw_delegator_reward` auction entry point. +pub const DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST: u32 = 10_000; +/// Default cost of the `withdraw_validator_reward` auction entry point. +pub const DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST: u32 = 10_000; +/// Default cost of the `read_era_id` auction entry point. +pub const DEFAULT_READ_ERA_ID_COST: u32 = 10_000; +/// Default cost of the `activate_bid` auction entry point. +pub const DEFAULT_ACTIVATE_BID_COST: u32 = 10_000; + +/// Description of the costs of calling auction entrypoints. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct AuctionCosts { + /// Cost of calling the `get_era_validators` entry point. + pub get_era_validators: u32, + /// Cost of calling the `read_seigniorage_recipients` entry point. + pub read_seigniorage_recipients: u32, + /// Cost of calling the `add_bid` entry point. + pub add_bid: u32, + /// Cost of calling the `withdraw_bid` entry point. + pub withdraw_bid: u32, + /// Cost of calling the `delegate` entry point. + pub delegate: u32, + /// Cost of calling the `undelegate` entry point. + pub undelegate: u32, + /// Cost of calling the `run_auction` entry point. + pub run_auction: u32, + /// Cost of calling the `slash` entry point. + pub slash: u32, + /// Cost of calling the `distribute` entry point. + pub distribute: u32, + /// Cost of calling the `withdraw_delegator_reward` entry point. + pub withdraw_delegator_reward: u32, + /// Cost of calling the `withdraw_validator_reward` entry point. + pub withdraw_validator_reward: u32, + /// Cost of calling the `read_era_id` entry point. + pub read_era_id: u32, + /// Cost of calling the `activate_bid` entry point. + pub activate_bid: u32, + /// Cost of calling the `redelegate` entry point. + pub redelegate: u32, +} + +impl Default for AuctionCosts { + fn default() -> Self { + Self { + get_era_validators: DEFAULT_GET_ERA_VALIDATORS_COST, + read_seigniorage_recipients: DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST, + add_bid: DEFAULT_ADD_BID_COST, + withdraw_bid: DEFAULT_WITHDRAW_BID_COST, + delegate: DEFAULT_DELEGATE_COST, + undelegate: DEFAULT_UNDELEGATE_COST, + run_auction: DEFAULT_RUN_AUCTION_COST, + slash: DEFAULT_SLASH_COST, + distribute: DEFAULT_DISTRIBUTE_COST, + withdraw_delegator_reward: DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST, + withdraw_validator_reward: DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST, + read_era_id: DEFAULT_READ_ERA_ID_COST, + activate_bid: DEFAULT_ACTIVATE_BID_COST, + redelegate: DEFAULT_REDELEGATE_COST, + } + } +} + +impl ToBytes for AuctionCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + } = self; + + ret.append(&mut get_era_validators.to_bytes()?); + ret.append(&mut read_seigniorage_recipients.to_bytes()?); + ret.append(&mut add_bid.to_bytes()?); + ret.append(&mut withdraw_bid.to_bytes()?); + ret.append(&mut delegate.to_bytes()?); + ret.append(&mut undelegate.to_bytes()?); + ret.append(&mut run_auction.to_bytes()?); + ret.append(&mut slash.to_bytes()?); + ret.append(&mut distribute.to_bytes()?); + ret.append(&mut withdraw_delegator_reward.to_bytes()?); + ret.append(&mut withdraw_validator_reward.to_bytes()?); + ret.append(&mut read_era_id.to_bytes()?); + ret.append(&mut activate_bid.to_bytes()?); + ret.append(&mut redelegate.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + } = self; + + get_era_validators.serialized_length() + + read_seigniorage_recipients.serialized_length() + + add_bid.serialized_length() + + withdraw_bid.serialized_length() + + delegate.serialized_length() + + undelegate.serialized_length() + + run_auction.serialized_length() + + slash.serialized_length() + + distribute.serialized_length() + + withdraw_delegator_reward.serialized_length() + + withdraw_validator_reward.serialized_length() + + read_era_id.serialized_length() + + activate_bid.serialized_length() + + redelegate.serialized_length() + } +} + +impl FromBytes for AuctionCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (get_era_validators, rem) = FromBytes::from_bytes(bytes)?; + let (read_seigniorage_recipients, rem) = FromBytes::from_bytes(rem)?; + let (add_bid, rem) = FromBytes::from_bytes(rem)?; + let (withdraw_bid, rem) = FromBytes::from_bytes(rem)?; + let (delegate, rem) = FromBytes::from_bytes(rem)?; + let (undelegate, rem) = FromBytes::from_bytes(rem)?; + let (run_auction, rem) = FromBytes::from_bytes(rem)?; + let (slash, rem) = FromBytes::from_bytes(rem)?; + let (distribute, rem) = FromBytes::from_bytes(rem)?; + let (withdraw_delegator_reward, rem) = FromBytes::from_bytes(rem)?; + let (withdraw_validator_reward, rem) = FromBytes::from_bytes(rem)?; + let (read_era_id, rem) = FromBytes::from_bytes(rem)?; + let (activate_bid, rem) = FromBytes::from_bytes(rem)?; + let (redelegate, rem) = FromBytes::from_bytes(rem)?; + Ok(( + Self { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + }, + rem, + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AuctionCosts { + AuctionCosts { + get_era_validators: rng.gen(), + read_seigniorage_recipients: rng.gen(), + add_bid: rng.gen(), + withdraw_bid: rng.gen(), + delegate: rng.gen(), + undelegate: rng.gen(), + run_auction: rng.gen(), + slash: rng.gen(), + distribute: rng.gen(), + withdraw_delegator_reward: rng.gen(), + withdraw_validator_reward: rng.gen(), + read_era_id: rng.gen(), + activate_bid: rng.gen(), + redelegate: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::AuctionCosts; + + prop_compose! { + pub fn auction_costs_arb()( + get_era_validators in num::u32::ANY, + read_seigniorage_recipients in num::u32::ANY, + add_bid in num::u32::ANY, + withdraw_bid in num::u32::ANY, + delegate in num::u32::ANY, + undelegate in num::u32::ANY, + run_auction in num::u32::ANY, + slash in num::u32::ANY, + distribute in num::u32::ANY, + withdraw_delegator_reward in num::u32::ANY, + withdraw_validator_reward in num::u32::ANY, + read_era_id in num::u32::ANY, + activate_bid in num::u32::ANY, + redelegate in num::u32::ANY, + ) -> AuctionCosts { + AuctionCosts { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + } + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/chainspec_registry.rs b/casper_types_ver_2_0/src/chainspec/vm_config/chainspec_registry.rs new file mode 100644 index 00000000..38e13b15 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/chainspec_registry.rs @@ -0,0 +1,157 @@ +//! The registry of chainspec hash digests. + +use std::{collections::BTreeMap, convert::TryFrom}; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, Digest, +}; + +type BytesreprChainspecRegistry = BTreeMap; + +/// The chainspec registry. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +pub struct ChainspecRegistry { + chainspec_raw_hash: Digest, + genesis_accounts_raw_hash: Option, + global_state_raw_hash: Option, +} + +impl ChainspecRegistry { + const CHAINSPEC_RAW_MAP_KEY: &'static str = "chainspec_raw"; + const GENESIS_ACCOUNTS_RAW_MAP_KEY: &'static str = "genesis_accounts_raw"; + const GLOBAL_STATE_RAW_MAP_KEY: &'static str = "global_state_raw"; + + /// Returns a `ChainspecRegistry` constructed at genesis. + pub fn new_with_genesis( + chainspec_file_bytes: &[u8], + genesis_accounts_file_bytes: &[u8], + ) -> Self { + ChainspecRegistry { + chainspec_raw_hash: Digest::hash(chainspec_file_bytes), + genesis_accounts_raw_hash: Some(Digest::hash(genesis_accounts_file_bytes)), + global_state_raw_hash: None, + } + } + + /// Returns a `ChainspecRegistry` constructed at node upgrade. + pub fn new_with_optional_global_state( + chainspec_file_bytes: &[u8], + global_state_file_bytes: Option<&[u8]>, + ) -> Self { + ChainspecRegistry { + chainspec_raw_hash: Digest::hash(chainspec_file_bytes), + genesis_accounts_raw_hash: None, + global_state_raw_hash: global_state_file_bytes.map(Digest::hash), + } + } + + /// Returns the hash of the raw bytes of the chainspec.toml file. + pub fn chainspec_raw_hash(&self) -> &Digest { + &self.chainspec_raw_hash + } + + /// Returns the hash of the raw bytes of the genesis accounts.toml file if it exists. + pub fn genesis_accounts_raw_hash(&self) -> Option<&Digest> { + self.genesis_accounts_raw_hash.as_ref() + } + + /// Returns the hash of the raw bytes of the global_state.toml file if it exists. + pub fn global_state_raw_hash(&self) -> Option<&Digest> { + self.global_state_raw_hash.as_ref() + } + + fn as_map(&self) -> BytesreprChainspecRegistry { + let mut map = BTreeMap::new(); + map.insert( + Self::CHAINSPEC_RAW_MAP_KEY.to_string(), + self.chainspec_raw_hash, + ); + if let Some(genesis_accounts_raw_hash) = self.genesis_accounts_raw_hash { + map.insert( + Self::GENESIS_ACCOUNTS_RAW_MAP_KEY.to_string(), + genesis_accounts_raw_hash, + ); + } + if let Some(global_state_raw_hash) = self.global_state_raw_hash { + map.insert( + Self::GLOBAL_STATE_RAW_MAP_KEY.to_string(), + global_state_raw_hash, + ); + } + map + } +} + +impl TryFrom for ChainspecRegistry { + type Error = bytesrepr::Error; + + fn try_from(map: BytesreprChainspecRegistry) -> Result { + let chainspec_raw_hash = *map + .get(Self::CHAINSPEC_RAW_MAP_KEY) + .ok_or(bytesrepr::Error::Formatting)?; + let genesis_accounts_raw_hash = map.get(Self::GENESIS_ACCOUNTS_RAW_MAP_KEY).copied(); + let global_state_raw_hash = map.get(Self::GLOBAL_STATE_RAW_MAP_KEY).copied(); + Ok(ChainspecRegistry { + chainspec_raw_hash, + genesis_accounts_raw_hash, + global_state_raw_hash, + }) + } +} + +impl ToBytes for ChainspecRegistry { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.as_map().to_bytes() + } + + fn serialized_length(&self) -> usize { + self.as_map().serialized_length() + } +} + +impl FromBytes for ChainspecRegistry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (map, remainder) = BytesreprChainspecRegistry::from_bytes(bytes)?; + let chainspec_registry = ChainspecRegistry::try_from(map)?; + Ok((chainspec_registry, remainder)) + } +} + +impl CLTyped for ChainspecRegistry { + fn cl_type() -> CLType { + BytesreprChainspecRegistry::cl_type() + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + + let chainspec_file_bytes: [u8; 10] = rng.gen(); + + let genesis_account_file_bytes: [u8; 10] = rng.gen(); + let chainspec_registry = + ChainspecRegistry::new_with_genesis(&chainspec_file_bytes, &genesis_account_file_bytes); + bytesrepr::test_serialization_roundtrip(&chainspec_registry); + + let global_state_file_bytes: [u8; 10] = rng.gen(); + let chainspec_registry = ChainspecRegistry::new_with_optional_global_state( + &chainspec_file_bytes, + Some(&global_state_file_bytes), + ); + bytesrepr::test_serialization_roundtrip(&chainspec_registry); + + let chainspec_registry = + ChainspecRegistry::new_with_optional_global_state(&chainspec_file_bytes, None); + bytesrepr::test_serialization_roundtrip(&chainspec_registry); + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/handle_payment_costs.rs b/casper_types_ver_2_0/src/chainspec/vm_config/handle_payment_costs.rs new file mode 100644 index 00000000..49f53708 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/handle_payment_costs.rs @@ -0,0 +1,116 @@ +//! Costs of the `handle_payment` system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `get_payment_purse` `handle_payment` entry point. +pub const DEFAULT_GET_PAYMENT_PURSE_COST: u32 = 10_000; +/// Default cost of the `set_refund_purse` `handle_payment` entry point. +pub const DEFAULT_SET_REFUND_PURSE_COST: u32 = 10_000; +/// Default cost of the `get_refund_purse` `handle_payment` entry point. +pub const DEFAULT_GET_REFUND_PURSE_COST: u32 = 10_000; +/// Default cost of the `finalize_payment` `handle_payment` entry point. +pub const DEFAULT_FINALIZE_PAYMENT_COST: u32 = 10_000; + +/// Description of the costs of calling `handle_payment` entrypoints. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HandlePaymentCosts { + /// Cost of calling the `get_payment_purse` entry point. + pub get_payment_purse: u32, + /// Cost of calling the `set_refund_purse` entry point. + pub set_refund_purse: u32, + /// Cost of calling the `get_refund_purse` entry point. + pub get_refund_purse: u32, + /// Cost of calling the `finalize_payment` entry point. + pub finalize_payment: u32, +} + +impl Default for HandlePaymentCosts { + fn default() -> Self { + Self { + get_payment_purse: DEFAULT_GET_PAYMENT_PURSE_COST, + set_refund_purse: DEFAULT_SET_REFUND_PURSE_COST, + get_refund_purse: DEFAULT_GET_REFUND_PURSE_COST, + finalize_payment: DEFAULT_FINALIZE_PAYMENT_COST, + } + } +} + +impl ToBytes for HandlePaymentCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.get_payment_purse.to_bytes()?); + ret.append(&mut self.set_refund_purse.to_bytes()?); + ret.append(&mut self.get_refund_purse.to_bytes()?); + ret.append(&mut self.finalize_payment.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.get_payment_purse.serialized_length() + + self.set_refund_purse.serialized_length() + + self.get_refund_purse.serialized_length() + + self.finalize_payment.serialized_length() + } +} + +impl FromBytes for HandlePaymentCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (get_payment_purse, rem) = FromBytes::from_bytes(bytes)?; + let (set_refund_purse, rem) = FromBytes::from_bytes(rem)?; + let (get_refund_purse, rem) = FromBytes::from_bytes(rem)?; + let (finalize_payment, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + Self { + get_payment_purse, + set_refund_purse, + get_refund_purse, + finalize_payment, + }, + rem, + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> HandlePaymentCosts { + HandlePaymentCosts { + get_payment_purse: rng.gen(), + set_refund_purse: rng.gen(), + get_refund_purse: rng.gen(), + finalize_payment: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::HandlePaymentCosts; + + prop_compose! { + pub fn handle_payment_costs_arb()( + get_payment_purse in num::u32::ANY, + set_refund_purse in num::u32::ANY, + get_refund_purse in num::u32::ANY, + finalize_payment in num::u32::ANY, + ) -> HandlePaymentCosts { + HandlePaymentCosts { + get_payment_purse, + set_refund_purse, + get_refund_purse, + finalize_payment, + } + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/host_function_costs.rs b/casper_types_ver_2_0/src/chainspec/vm_config/host_function_costs.rs new file mode 100644 index 00000000..c536fa76 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/host_function_costs.rs @@ -0,0 +1,1080 @@ +//! Support for host function gas cost tables. +use core::ops::Add; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use derive_more::Add; +use num_traits::Zero; +use rand::{distributions::Standard, prelude::Distribution, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + Gas, +}; + +/// Representation of argument's cost. +pub type Cost = u32; + +const COST_SERIALIZED_LENGTH: usize = U32_SERIALIZED_LENGTH; + +/// An identifier that represents an unused argument. +const NOT_USED: Cost = 0; + +/// An arbitrary default fixed cost for host functions that were not researched yet. +const DEFAULT_FIXED_COST: Cost = 200; + +const DEFAULT_ADD_COST: u32 = 5_800; +const DEFAULT_ADD_ASSOCIATED_KEY_COST: u32 = 9_000; + +const DEFAULT_CALL_CONTRACT_COST: u32 = 4_500; +const DEFAULT_CALL_CONTRACT_ARGS_SIZE_WEIGHT: u32 = 420; + +const DEFAULT_CREATE_PURSE_COST: u32 = 2_500_000_000; +const DEFAULT_GET_BALANCE_COST: u32 = 3_800; +const DEFAULT_GET_BLOCKTIME_COST: u32 = 330; +const DEFAULT_GET_CALLER_COST: u32 = 380; +const DEFAULT_GET_KEY_COST: u32 = 2_000; +const DEFAULT_GET_KEY_NAME_SIZE_WEIGHT: u32 = 440; +const DEFAULT_GET_MAIN_PURSE_COST: u32 = 1_300; +const DEFAULT_GET_PHASE_COST: u32 = 710; +const DEFAULT_GET_SYSTEM_CONTRACT_COST: u32 = 1_100; +const DEFAULT_HAS_KEY_COST: u32 = 1_500; +const DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT: u32 = 840; +const DEFAULT_IS_VALID_UREF_COST: u32 = 760; +const DEFAULT_LOAD_NAMED_KEYS_COST: u32 = 42_000; +const DEFAULT_NEW_UREF_COST: u32 = 17_000; +const DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT: u32 = 590; + +const DEFAULT_PRINT_COST: u32 = 20_000; +const DEFAULT_PRINT_TEXT_SIZE_WEIGHT: u32 = 4_600; + +const DEFAULT_PUT_KEY_COST: u32 = 38_000; +const DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT: u32 = 1_100; + +const DEFAULT_READ_HOST_BUFFER_COST: u32 = 3_500; +const DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT: u32 = 310; + +const DEFAULT_READ_VALUE_COST: u32 = 6_000; +const DEFAULT_DICTIONARY_GET_COST: u32 = 5_500; +const DEFAULT_DICTIONARY_GET_KEY_SIZE_WEIGHT: u32 = 590; + +const DEFAULT_REMOVE_ASSOCIATED_KEY_COST: u32 = 4_200; + +const DEFAULT_REMOVE_KEY_COST: u32 = 61_000; +const DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT: u32 = 3_200; + +const DEFAULT_RET_COST: u32 = 23_000; +const DEFAULT_RET_VALUE_SIZE_WEIGHT: u32 = 420_000; + +const DEFAULT_REVERT_COST: u32 = 500; +const DEFAULT_SET_ACTION_THRESHOLD_COST: u32 = 74_000; +const DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST: u32 = 2_500_000_000; +const DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST: u32 = 82_000; +const DEFAULT_TRANSFER_TO_ACCOUNT_COST: u32 = 2_500_000_000; +const DEFAULT_UPDATE_ASSOCIATED_KEY_COST: u32 = 4_200; + +const DEFAULT_WRITE_COST: u32 = 14_000; +const DEFAULT_WRITE_VALUE_SIZE_WEIGHT: u32 = 980; + +const DEFAULT_DICTIONARY_PUT_COST: u32 = 9_500; +const DEFAULT_DICTIONARY_PUT_KEY_BYTES_SIZE_WEIGHT: u32 = 1_800; +const DEFAULT_DICTIONARY_PUT_VALUE_SIZE_WEIGHT: u32 = 520; + +/// Default cost for a new dictionary. +pub const DEFAULT_NEW_DICTIONARY_COST: u32 = DEFAULT_NEW_UREF_COST; + +/// Host function cost unit for a new dictionary. +pub const DEFAULT_HOST_FUNCTION_NEW_DICTIONARY: HostFunction<[Cost; 1]> = + HostFunction::new(DEFAULT_NEW_DICTIONARY_COST, [NOT_USED]); + +/// Default value that the cost of calling `casper_emit_message` increases by for every new message +/// emitted within an execution. +pub const DEFAULT_COST_INCREASE_PER_MESSAGE_EMITTED: u32 = 50; + +/// Representation of a host function cost. +/// +/// The total gas cost is equal to `cost` + sum of each argument weight multiplied by the byte size +/// of the data. +#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HostFunction { + /// How much the user is charged for calling the host function. + cost: Cost, + /// Weights of the function arguments. + arguments: T, +} + +impl Default for HostFunction +where + T: Default, +{ + fn default() -> Self { + HostFunction::new(DEFAULT_FIXED_COST, Default::default()) + } +} + +impl HostFunction { + /// Creates a new instance of `HostFunction` with a fixed call cost and argument weights. + pub const fn new(cost: Cost, arguments: T) -> Self { + Self { cost, arguments } + } + + /// Returns the base gas fee for calling the host function. + pub fn cost(&self) -> Cost { + self.cost + } +} + +impl HostFunction +where + T: Default, +{ + /// Creates a new fixed host function cost with argument weights of zero. + pub fn fixed(cost: Cost) -> Self { + Self { + cost, + ..Default::default() + } + } +} + +impl HostFunction +where + T: AsRef<[Cost]>, +{ + /// Returns a slice containing the argument weights. + pub fn arguments(&self) -> &[Cost] { + self.arguments.as_ref() + } + + /// Calculate gas cost for a host function + pub fn calculate_gas_cost(&self, weights: T) -> Gas { + let mut gas = Gas::new(self.cost.into()); + for (argument, weight) in self.arguments.as_ref().iter().zip(weights.as_ref()) { + let lhs = Gas::new((*argument).into()); + let rhs = Gas::new((*weight).into()); + gas += lhs * rhs; + } + gas + } +} + +impl Add for HostFunction<[Cost; COUNT]> { + type Output = HostFunction<[Cost; COUNT]>; + + fn add(self, rhs: Self) -> Self::Output { + let mut result = HostFunction::new(self.cost + rhs.cost, [0; COUNT]); + for i in 0..COUNT { + result.arguments[i] = self.arguments[i] + rhs.arguments[i]; + } + result + } +} + +impl Zero for HostFunction<[Cost; COUNT]> { + fn zero() -> Self { + HostFunction::new(0, [0; COUNT]) + } + + fn is_zero(&self) -> bool { + !self.arguments.iter().any(|cost| *cost != 0) && self.cost.is_zero() + } +} + +impl Distribution> for Standard +where + Standard: Distribution, + T: AsRef<[Cost]>, +{ + fn sample(&self, rng: &mut R) -> HostFunction { + let cost = rng.gen::(); + let arguments = rng.gen(); + HostFunction::new(cost, arguments) + } +} + +impl ToBytes for HostFunction +where + T: AsRef<[Cost]>, +{ + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.cost.to_bytes()?); + for value in self.arguments.as_ref().iter() { + ret.append(&mut value.to_bytes()?); + } + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.cost.serialized_length() + (COST_SERIALIZED_LENGTH * self.arguments.as_ref().len()) + } +} + +impl FromBytes for HostFunction +where + T: Default + AsMut<[Cost]>, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (cost, mut bytes) = FromBytes::from_bytes(bytes)?; + let mut arguments = T::default(); + let arguments_mut = arguments.as_mut(); + for ith_argument in arguments_mut { + let (cost, rem) = FromBytes::from_bytes(bytes)?; + *ith_argument = cost; + bytes = rem; + } + Ok((Self { cost, arguments }, bytes)) + } +} + +/// Definition of a host function cost table. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HostFunctionCosts { + /// Cost increase for successive calls to `casper_emit_message` within an execution. + pub cost_increase_per_message: u32, + /// Cost of calling the `read_value` host function. + pub read_value: HostFunction<[Cost; 3]>, + /// Cost of calling the `dictionary_get` host function. + #[serde(alias = "read_value_local")] + pub dictionary_get: HostFunction<[Cost; 3]>, + /// Cost of calling the `write` host function. + pub write: HostFunction<[Cost; 4]>, + /// Cost of calling the `dictionary_put` host function. + #[serde(alias = "write_local")] + pub dictionary_put: HostFunction<[Cost; 4]>, + /// Cost of calling the `add` host function. + pub add: HostFunction<[Cost; 4]>, + /// Cost of calling the `new_uref` host function. + pub new_uref: HostFunction<[Cost; 3]>, + /// Cost of calling the `load_named_keys` host function. + pub load_named_keys: HostFunction<[Cost; 2]>, + /// Cost of calling the `ret` host function. + pub ret: HostFunction<[Cost; 2]>, + /// Cost of calling the `get_key` host function. + pub get_key: HostFunction<[Cost; 5]>, + /// Cost of calling the `has_key` host function. + pub has_key: HostFunction<[Cost; 2]>, + /// Cost of calling the `put_key` host function. + pub put_key: HostFunction<[Cost; 4]>, + /// Cost of calling the `remove_key` host function. + pub remove_key: HostFunction<[Cost; 2]>, + /// Cost of calling the `revert` host function. + pub revert: HostFunction<[Cost; 1]>, + /// Cost of calling the `is_valid_uref` host function. + pub is_valid_uref: HostFunction<[Cost; 2]>, + /// Cost of calling the `add_associated_key` host function. + pub add_associated_key: HostFunction<[Cost; 3]>, + /// Cost of calling the `remove_associated_key` host function. + pub remove_associated_key: HostFunction<[Cost; 2]>, + /// Cost of calling the `update_associated_key` host function. + pub update_associated_key: HostFunction<[Cost; 3]>, + /// Cost of calling the `set_action_threshold` host function. + pub set_action_threshold: HostFunction<[Cost; 2]>, + /// Cost of calling the `get_caller` host function. + pub get_caller: HostFunction<[Cost; 1]>, + /// Cost of calling the `get_blocktime` host function. + pub get_blocktime: HostFunction<[Cost; 1]>, + /// Cost of calling the `create_purse` host function. + pub create_purse: HostFunction<[Cost; 2]>, + /// Cost of calling the `transfer_to_account` host function. + pub transfer_to_account: HostFunction<[Cost; 7]>, + /// Cost of calling the `transfer_from_purse_to_account` host function. + pub transfer_from_purse_to_account: HostFunction<[Cost; 9]>, + /// Cost of calling the `transfer_from_purse_to_purse` host function. + pub transfer_from_purse_to_purse: HostFunction<[Cost; 8]>, + /// Cost of calling the `get_balance` host function. + pub get_balance: HostFunction<[Cost; 3]>, + /// Cost of calling the `get_phase` host function. + pub get_phase: HostFunction<[Cost; 1]>, + /// Cost of calling the `get_system_contract` host function. + pub get_system_contract: HostFunction<[Cost; 3]>, + /// Cost of calling the `get_main_purse` host function. + pub get_main_purse: HostFunction<[Cost; 1]>, + /// Cost of calling the `read_host_buffer` host function. + pub read_host_buffer: HostFunction<[Cost; 3]>, + /// Cost of calling the `create_contract_package_at_hash` host function. + pub create_contract_package_at_hash: HostFunction<[Cost; 2]>, + /// Cost of calling the `create_contract_user_group` host function. + pub create_contract_user_group: HostFunction<[Cost; 8]>, + /// Cost of calling the `add_contract_version` host function. + pub add_contract_version: HostFunction<[Cost; 9]>, + /// Cost of calling the `disable_contract_version` host function. + pub disable_contract_version: HostFunction<[Cost; 4]>, + /// Cost of calling the `call_contract` host function. + pub call_contract: HostFunction<[Cost; 7]>, + /// Cost of calling the `call_versioned_contract` host function. + pub call_versioned_contract: HostFunction<[Cost; 9]>, + /// Cost of calling the `get_named_arg_size` host function. + pub get_named_arg_size: HostFunction<[Cost; 3]>, + /// Cost of calling the `get_named_arg` host function. + pub get_named_arg: HostFunction<[Cost; 4]>, + /// Cost of calling the `remove_contract_user_group` host function. + pub remove_contract_user_group: HostFunction<[Cost; 4]>, + /// Cost of calling the `provision_contract_user_group_uref` host function. + pub provision_contract_user_group_uref: HostFunction<[Cost; 5]>, + /// Cost of calling the `remove_contract_user_group_urefs` host function. + pub remove_contract_user_group_urefs: HostFunction<[Cost; 6]>, + /// Cost of calling the `print` host function. + pub print: HostFunction<[Cost; 2]>, + /// Cost of calling the `blake2b` host function. + pub blake2b: HostFunction<[Cost; 4]>, + /// Cost of calling the `next address` host function. + pub random_bytes: HostFunction<[Cost; 2]>, + /// Cost of calling the `enable_contract_version` host function. + pub enable_contract_version: HostFunction<[Cost; 4]>, + /// Cost of calling the `add_session_version` host function. + pub add_session_version: HostFunction<[Cost; 2]>, + /// Cost of calling the `casper_manage_message_topic` host function. + pub manage_message_topic: HostFunction<[Cost; 4]>, + /// Cost of calling the `casper_emit_message` host function. + pub emit_message: HostFunction<[Cost; 4]>, +} + +impl Zero for HostFunctionCosts { + fn zero() -> Self { + Self { + read_value: HostFunction::zero(), + dictionary_get: HostFunction::zero(), + write: HostFunction::zero(), + dictionary_put: HostFunction::zero(), + add: HostFunction::zero(), + new_uref: HostFunction::zero(), + load_named_keys: HostFunction::zero(), + ret: HostFunction::zero(), + get_key: HostFunction::zero(), + has_key: HostFunction::zero(), + put_key: HostFunction::zero(), + remove_key: HostFunction::zero(), + revert: HostFunction::zero(), + is_valid_uref: HostFunction::zero(), + add_associated_key: HostFunction::zero(), + remove_associated_key: HostFunction::zero(), + update_associated_key: HostFunction::zero(), + set_action_threshold: HostFunction::zero(), + get_caller: HostFunction::zero(), + get_blocktime: HostFunction::zero(), + create_purse: HostFunction::zero(), + transfer_to_account: HostFunction::zero(), + transfer_from_purse_to_account: HostFunction::zero(), + transfer_from_purse_to_purse: HostFunction::zero(), + get_balance: HostFunction::zero(), + get_phase: HostFunction::zero(), + get_system_contract: HostFunction::zero(), + get_main_purse: HostFunction::zero(), + read_host_buffer: HostFunction::zero(), + create_contract_package_at_hash: HostFunction::zero(), + create_contract_user_group: HostFunction::zero(), + add_contract_version: HostFunction::zero(), + disable_contract_version: HostFunction::zero(), + call_contract: HostFunction::zero(), + call_versioned_contract: HostFunction::zero(), + get_named_arg_size: HostFunction::zero(), + get_named_arg: HostFunction::zero(), + remove_contract_user_group: HostFunction::zero(), + provision_contract_user_group_uref: HostFunction::zero(), + remove_contract_user_group_urefs: HostFunction::zero(), + print: HostFunction::zero(), + blake2b: HostFunction::zero(), + random_bytes: HostFunction::zero(), + enable_contract_version: HostFunction::zero(), + add_session_version: HostFunction::zero(), + manage_message_topic: HostFunction::zero(), + emit_message: HostFunction::zero(), + cost_increase_per_message: Zero::zero(), + } + } + + fn is_zero(&self) -> bool { + let HostFunctionCosts { + cost_increase_per_message, + read_value, + dictionary_get, + write, + dictionary_put, + add, + new_uref, + load_named_keys, + ret, + get_key, + has_key, + put_key, + remove_key, + revert, + is_valid_uref, + add_associated_key, + remove_associated_key, + update_associated_key, + set_action_threshold, + get_caller, + get_blocktime, + create_purse, + transfer_to_account, + transfer_from_purse_to_account, + transfer_from_purse_to_purse, + get_balance, + get_phase, + get_system_contract, + get_main_purse, + read_host_buffer, + create_contract_package_at_hash, + create_contract_user_group, + add_contract_version, + disable_contract_version, + call_contract, + call_versioned_contract, + get_named_arg_size, + get_named_arg, + remove_contract_user_group, + provision_contract_user_group_uref, + remove_contract_user_group_urefs, + print, + blake2b, + random_bytes, + enable_contract_version, + add_session_version, + manage_message_topic, + emit_message, + } = self; + read_value.is_zero() + && dictionary_get.is_zero() + && write.is_zero() + && dictionary_put.is_zero() + && add.is_zero() + && new_uref.is_zero() + && load_named_keys.is_zero() + && ret.is_zero() + && get_key.is_zero() + && has_key.is_zero() + && put_key.is_zero() + && remove_key.is_zero() + && revert.is_zero() + && is_valid_uref.is_zero() + && add_associated_key.is_zero() + && remove_associated_key.is_zero() + && update_associated_key.is_zero() + && set_action_threshold.is_zero() + && get_caller.is_zero() + && get_blocktime.is_zero() + && create_purse.is_zero() + && transfer_to_account.is_zero() + && transfer_from_purse_to_account.is_zero() + && transfer_from_purse_to_purse.is_zero() + && get_balance.is_zero() + && get_phase.is_zero() + && get_system_contract.is_zero() + && get_main_purse.is_zero() + && read_host_buffer.is_zero() + && create_contract_package_at_hash.is_zero() + && create_contract_user_group.is_zero() + && add_contract_version.is_zero() + && disable_contract_version.is_zero() + && call_contract.is_zero() + && call_versioned_contract.is_zero() + && get_named_arg_size.is_zero() + && get_named_arg.is_zero() + && remove_contract_user_group.is_zero() + && provision_contract_user_group_uref.is_zero() + && remove_contract_user_group_urefs.is_zero() + && print.is_zero() + && blake2b.is_zero() + && random_bytes.is_zero() + && enable_contract_version.is_zero() + && add_session_version.is_zero() + && manage_message_topic.is_zero() + && emit_message.is_zero() + && cost_increase_per_message.is_zero() + } +} + +impl Default for HostFunctionCosts { + fn default() -> Self { + Self { + read_value: HostFunction::fixed(DEFAULT_READ_VALUE_COST), + dictionary_get: HostFunction::new( + DEFAULT_DICTIONARY_GET_COST, + [NOT_USED, DEFAULT_DICTIONARY_GET_KEY_SIZE_WEIGHT, NOT_USED], + ), + write: HostFunction::new( + DEFAULT_WRITE_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_WRITE_VALUE_SIZE_WEIGHT, + ], + ), + dictionary_put: HostFunction::new( + DEFAULT_DICTIONARY_PUT_COST, + [ + NOT_USED, + DEFAULT_DICTIONARY_PUT_KEY_BYTES_SIZE_WEIGHT, + NOT_USED, + DEFAULT_DICTIONARY_PUT_VALUE_SIZE_WEIGHT, + ], + ), + add: HostFunction::fixed(DEFAULT_ADD_COST), + new_uref: HostFunction::new( + DEFAULT_NEW_UREF_COST, + [NOT_USED, NOT_USED, DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT], + ), + load_named_keys: HostFunction::fixed(DEFAULT_LOAD_NAMED_KEYS_COST), + ret: HostFunction::new(DEFAULT_RET_COST, [NOT_USED, DEFAULT_RET_VALUE_SIZE_WEIGHT]), + get_key: HostFunction::new( + DEFAULT_GET_KEY_COST, + [ + NOT_USED, + DEFAULT_GET_KEY_NAME_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + ], + ), + has_key: HostFunction::new( + DEFAULT_HAS_KEY_COST, + [NOT_USED, DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT], + ), + put_key: HostFunction::new( + DEFAULT_PUT_KEY_COST, + [ + NOT_USED, + DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + ], + ), + remove_key: HostFunction::new( + DEFAULT_REMOVE_KEY_COST, + [NOT_USED, DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT], + ), + revert: HostFunction::fixed(DEFAULT_REVERT_COST), + is_valid_uref: HostFunction::fixed(DEFAULT_IS_VALID_UREF_COST), + add_associated_key: HostFunction::fixed(DEFAULT_ADD_ASSOCIATED_KEY_COST), + remove_associated_key: HostFunction::fixed(DEFAULT_REMOVE_ASSOCIATED_KEY_COST), + update_associated_key: HostFunction::fixed(DEFAULT_UPDATE_ASSOCIATED_KEY_COST), + set_action_threshold: HostFunction::fixed(DEFAULT_SET_ACTION_THRESHOLD_COST), + get_caller: HostFunction::fixed(DEFAULT_GET_CALLER_COST), + get_blocktime: HostFunction::fixed(DEFAULT_GET_BLOCKTIME_COST), + create_purse: HostFunction::fixed(DEFAULT_CREATE_PURSE_COST), + transfer_to_account: HostFunction::fixed(DEFAULT_TRANSFER_TO_ACCOUNT_COST), + transfer_from_purse_to_account: HostFunction::fixed( + DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST, + ), + transfer_from_purse_to_purse: HostFunction::fixed( + DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST, + ), + get_balance: HostFunction::fixed(DEFAULT_GET_BALANCE_COST), + get_phase: HostFunction::fixed(DEFAULT_GET_PHASE_COST), + get_system_contract: HostFunction::fixed(DEFAULT_GET_SYSTEM_CONTRACT_COST), + get_main_purse: HostFunction::fixed(DEFAULT_GET_MAIN_PURSE_COST), + read_host_buffer: HostFunction::new( + DEFAULT_READ_HOST_BUFFER_COST, + [ + NOT_USED, + DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT, + NOT_USED, + ], + ), + create_contract_package_at_hash: HostFunction::default(), + create_contract_user_group: HostFunction::default(), + add_contract_version: HostFunction::default(), + disable_contract_version: HostFunction::default(), + call_contract: HostFunction::new( + DEFAULT_CALL_CONTRACT_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_CALL_CONTRACT_ARGS_SIZE_WEIGHT, + NOT_USED, + ], + ), + call_versioned_contract: HostFunction::new( + DEFAULT_CALL_CONTRACT_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_CALL_CONTRACT_ARGS_SIZE_WEIGHT, + NOT_USED, + ], + ), + get_named_arg_size: HostFunction::default(), + get_named_arg: HostFunction::default(), + remove_contract_user_group: HostFunction::default(), + provision_contract_user_group_uref: HostFunction::default(), + remove_contract_user_group_urefs: HostFunction::default(), + print: HostFunction::new( + DEFAULT_PRINT_COST, + [NOT_USED, DEFAULT_PRINT_TEXT_SIZE_WEIGHT], + ), + blake2b: HostFunction::default(), + random_bytes: HostFunction::default(), + enable_contract_version: HostFunction::default(), + add_session_version: HostFunction::default(), + manage_message_topic: HostFunction::default(), + emit_message: HostFunction::default(), + cost_increase_per_message: DEFAULT_COST_INCREASE_PER_MESSAGE_EMITTED, + } + } +} + +impl ToBytes for HostFunctionCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.read_value.to_bytes()?); + ret.append(&mut self.dictionary_get.to_bytes()?); + ret.append(&mut self.write.to_bytes()?); + ret.append(&mut self.dictionary_put.to_bytes()?); + ret.append(&mut self.add.to_bytes()?); + ret.append(&mut self.new_uref.to_bytes()?); + ret.append(&mut self.load_named_keys.to_bytes()?); + ret.append(&mut self.ret.to_bytes()?); + ret.append(&mut self.get_key.to_bytes()?); + ret.append(&mut self.has_key.to_bytes()?); + ret.append(&mut self.put_key.to_bytes()?); + ret.append(&mut self.remove_key.to_bytes()?); + ret.append(&mut self.revert.to_bytes()?); + ret.append(&mut self.is_valid_uref.to_bytes()?); + ret.append(&mut self.add_associated_key.to_bytes()?); + ret.append(&mut self.remove_associated_key.to_bytes()?); + ret.append(&mut self.update_associated_key.to_bytes()?); + ret.append(&mut self.set_action_threshold.to_bytes()?); + ret.append(&mut self.get_caller.to_bytes()?); + ret.append(&mut self.get_blocktime.to_bytes()?); + ret.append(&mut self.create_purse.to_bytes()?); + ret.append(&mut self.transfer_to_account.to_bytes()?); + ret.append(&mut self.transfer_from_purse_to_account.to_bytes()?); + ret.append(&mut self.transfer_from_purse_to_purse.to_bytes()?); + ret.append(&mut self.get_balance.to_bytes()?); + ret.append(&mut self.get_phase.to_bytes()?); + ret.append(&mut self.get_system_contract.to_bytes()?); + ret.append(&mut self.get_main_purse.to_bytes()?); + ret.append(&mut self.read_host_buffer.to_bytes()?); + ret.append(&mut self.create_contract_package_at_hash.to_bytes()?); + ret.append(&mut self.create_contract_user_group.to_bytes()?); + ret.append(&mut self.add_contract_version.to_bytes()?); + ret.append(&mut self.disable_contract_version.to_bytes()?); + ret.append(&mut self.call_contract.to_bytes()?); + ret.append(&mut self.call_versioned_contract.to_bytes()?); + ret.append(&mut self.get_named_arg_size.to_bytes()?); + ret.append(&mut self.get_named_arg.to_bytes()?); + ret.append(&mut self.remove_contract_user_group.to_bytes()?); + ret.append(&mut self.provision_contract_user_group_uref.to_bytes()?); + ret.append(&mut self.remove_contract_user_group_urefs.to_bytes()?); + ret.append(&mut self.print.to_bytes()?); + ret.append(&mut self.blake2b.to_bytes()?); + ret.append(&mut self.random_bytes.to_bytes()?); + ret.append(&mut self.enable_contract_version.to_bytes()?); + ret.append(&mut self.add_session_version.to_bytes()?); + ret.append(&mut self.manage_message_topic.to_bytes()?); + ret.append(&mut self.emit_message.to_bytes()?); + ret.append(&mut self.cost_increase_per_message.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.read_value.serialized_length() + + self.dictionary_get.serialized_length() + + self.write.serialized_length() + + self.dictionary_put.serialized_length() + + self.add.serialized_length() + + self.new_uref.serialized_length() + + self.load_named_keys.serialized_length() + + self.ret.serialized_length() + + self.get_key.serialized_length() + + self.has_key.serialized_length() + + self.put_key.serialized_length() + + self.remove_key.serialized_length() + + self.revert.serialized_length() + + self.is_valid_uref.serialized_length() + + self.add_associated_key.serialized_length() + + self.remove_associated_key.serialized_length() + + self.update_associated_key.serialized_length() + + self.set_action_threshold.serialized_length() + + self.get_caller.serialized_length() + + self.get_blocktime.serialized_length() + + self.create_purse.serialized_length() + + self.transfer_to_account.serialized_length() + + self.transfer_from_purse_to_account.serialized_length() + + self.transfer_from_purse_to_purse.serialized_length() + + self.get_balance.serialized_length() + + self.get_phase.serialized_length() + + self.get_system_contract.serialized_length() + + self.get_main_purse.serialized_length() + + self.read_host_buffer.serialized_length() + + self.create_contract_package_at_hash.serialized_length() + + self.create_contract_user_group.serialized_length() + + self.add_contract_version.serialized_length() + + self.disable_contract_version.serialized_length() + + self.call_contract.serialized_length() + + self.call_versioned_contract.serialized_length() + + self.get_named_arg_size.serialized_length() + + self.get_named_arg.serialized_length() + + self.remove_contract_user_group.serialized_length() + + self.provision_contract_user_group_uref.serialized_length() + + self.remove_contract_user_group_urefs.serialized_length() + + self.print.serialized_length() + + self.blake2b.serialized_length() + + self.random_bytes.serialized_length() + + self.enable_contract_version.serialized_length() + + self.add_session_version.serialized_length() + + self.manage_message_topic.serialized_length() + + self.emit_message.serialized_length() + + self.cost_increase_per_message.serialized_length() + } +} + +impl FromBytes for HostFunctionCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (read_value, rem) = FromBytes::from_bytes(bytes)?; + let (dictionary_get, rem) = FromBytes::from_bytes(rem)?; + let (write, rem) = FromBytes::from_bytes(rem)?; + let (dictionary_put, rem) = FromBytes::from_bytes(rem)?; + let (add, rem) = FromBytes::from_bytes(rem)?; + let (new_uref, rem) = FromBytes::from_bytes(rem)?; + let (load_named_keys, rem) = FromBytes::from_bytes(rem)?; + let (ret, rem) = FromBytes::from_bytes(rem)?; + let (get_key, rem) = FromBytes::from_bytes(rem)?; + let (has_key, rem) = FromBytes::from_bytes(rem)?; + let (put_key, rem) = FromBytes::from_bytes(rem)?; + let (remove_key, rem) = FromBytes::from_bytes(rem)?; + let (revert, rem) = FromBytes::from_bytes(rem)?; + let (is_valid_uref, rem) = FromBytes::from_bytes(rem)?; + let (add_associated_key, rem) = FromBytes::from_bytes(rem)?; + let (remove_associated_key, rem) = FromBytes::from_bytes(rem)?; + let (update_associated_key, rem) = FromBytes::from_bytes(rem)?; + let (set_action_threshold, rem) = FromBytes::from_bytes(rem)?; + let (get_caller, rem) = FromBytes::from_bytes(rem)?; + let (get_blocktime, rem) = FromBytes::from_bytes(rem)?; + let (create_purse, rem) = FromBytes::from_bytes(rem)?; + let (transfer_to_account, rem) = FromBytes::from_bytes(rem)?; + let (transfer_from_purse_to_account, rem) = FromBytes::from_bytes(rem)?; + let (transfer_from_purse_to_purse, rem) = FromBytes::from_bytes(rem)?; + let (get_balance, rem) = FromBytes::from_bytes(rem)?; + let (get_phase, rem) = FromBytes::from_bytes(rem)?; + let (get_system_contract, rem) = FromBytes::from_bytes(rem)?; + let (get_main_purse, rem) = FromBytes::from_bytes(rem)?; + let (read_host_buffer, rem) = FromBytes::from_bytes(rem)?; + let (create_contract_package_at_hash, rem) = FromBytes::from_bytes(rem)?; + let (create_contract_user_group, rem) = FromBytes::from_bytes(rem)?; + let (add_contract_version, rem) = FromBytes::from_bytes(rem)?; + let (disable_contract_version, rem) = FromBytes::from_bytes(rem)?; + let (call_contract, rem) = FromBytes::from_bytes(rem)?; + let (call_versioned_contract, rem) = FromBytes::from_bytes(rem)?; + let (get_named_arg_size, rem) = FromBytes::from_bytes(rem)?; + let (get_named_arg, rem) = FromBytes::from_bytes(rem)?; + let (remove_contract_user_group, rem) = FromBytes::from_bytes(rem)?; + let (provision_contract_user_group_uref, rem) = FromBytes::from_bytes(rem)?; + let (remove_contract_user_group_urefs, rem) = FromBytes::from_bytes(rem)?; + let (print, rem) = FromBytes::from_bytes(rem)?; + let (blake2b, rem) = FromBytes::from_bytes(rem)?; + let (random_bytes, rem) = FromBytes::from_bytes(rem)?; + let (enable_contract_version, rem) = FromBytes::from_bytes(rem)?; + let (add_session_version, rem) = FromBytes::from_bytes(rem)?; + let (manage_message_topic, rem) = FromBytes::from_bytes(rem)?; + let (emit_message, rem) = FromBytes::from_bytes(rem)?; + let (cost_increase_per_message, rem) = FromBytes::from_bytes(rem)?; + Ok(( + HostFunctionCosts { + read_value, + dictionary_get, + write, + dictionary_put, + add, + new_uref, + load_named_keys, + ret, + get_key, + has_key, + put_key, + remove_key, + revert, + is_valid_uref, + add_associated_key, + remove_associated_key, + update_associated_key, + set_action_threshold, + get_caller, + get_blocktime, + create_purse, + transfer_to_account, + transfer_from_purse_to_account, + transfer_from_purse_to_purse, + get_balance, + get_phase, + get_system_contract, + get_main_purse, + read_host_buffer, + create_contract_package_at_hash, + create_contract_user_group, + add_contract_version, + disable_contract_version, + call_contract, + call_versioned_contract, + get_named_arg_size, + get_named_arg, + remove_contract_user_group, + provision_contract_user_group_uref, + remove_contract_user_group_urefs, + print, + blake2b, + random_bytes, + enable_contract_version, + add_session_version, + manage_message_topic, + emit_message, + cost_increase_per_message, + }, + rem, + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> HostFunctionCosts { + HostFunctionCosts { + read_value: rng.gen(), + dictionary_get: rng.gen(), + write: rng.gen(), + dictionary_put: rng.gen(), + add: rng.gen(), + new_uref: rng.gen(), + load_named_keys: rng.gen(), + ret: rng.gen(), + get_key: rng.gen(), + has_key: rng.gen(), + put_key: rng.gen(), + remove_key: rng.gen(), + revert: rng.gen(), + is_valid_uref: rng.gen(), + add_associated_key: rng.gen(), + remove_associated_key: rng.gen(), + update_associated_key: rng.gen(), + set_action_threshold: rng.gen(), + get_caller: rng.gen(), + get_blocktime: rng.gen(), + create_purse: rng.gen(), + transfer_to_account: rng.gen(), + transfer_from_purse_to_account: rng.gen(), + transfer_from_purse_to_purse: rng.gen(), + get_balance: rng.gen(), + get_phase: rng.gen(), + get_system_contract: rng.gen(), + get_main_purse: rng.gen(), + read_host_buffer: rng.gen(), + create_contract_package_at_hash: rng.gen(), + create_contract_user_group: rng.gen(), + add_contract_version: rng.gen(), + disable_contract_version: rng.gen(), + call_contract: rng.gen(), + call_versioned_contract: rng.gen(), + get_named_arg_size: rng.gen(), + get_named_arg: rng.gen(), + remove_contract_user_group: rng.gen(), + provision_contract_user_group_uref: rng.gen(), + remove_contract_user_group_urefs: rng.gen(), + print: rng.gen(), + blake2b: rng.gen(), + random_bytes: rng.gen(), + enable_contract_version: rng.gen(), + add_session_version: rng.gen(), + manage_message_topic: rng.gen(), + emit_message: rng.gen(), + cost_increase_per_message: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prelude::*}; + + use crate::{HostFunction, HostFunctionCost, HostFunctionCosts}; + + #[allow(unused)] + pub fn host_function_cost_arb() -> impl Strategy> { + (any::(), any::()) + .prop_map(|(cost, arguments)| HostFunction::new(cost, arguments)) + } + + prop_compose! { + pub fn host_function_costs_arb() ( + read_value in host_function_cost_arb(), + dictionary_get in host_function_cost_arb(), + write in host_function_cost_arb(), + dictionary_put in host_function_cost_arb(), + add in host_function_cost_arb(), + new_uref in host_function_cost_arb(), + load_named_keys in host_function_cost_arb(), + ret in host_function_cost_arb(), + get_key in host_function_cost_arb(), + has_key in host_function_cost_arb(), + put_key in host_function_cost_arb(), + remove_key in host_function_cost_arb(), + revert in host_function_cost_arb(), + is_valid_uref in host_function_cost_arb(), + add_associated_key in host_function_cost_arb(), + remove_associated_key in host_function_cost_arb(), + update_associated_key in host_function_cost_arb(), + set_action_threshold in host_function_cost_arb(), + get_caller in host_function_cost_arb(), + get_blocktime in host_function_cost_arb(), + create_purse in host_function_cost_arb(), + transfer_to_account in host_function_cost_arb(), + transfer_from_purse_to_account in host_function_cost_arb(), + transfer_from_purse_to_purse in host_function_cost_arb(), + get_balance in host_function_cost_arb(), + get_phase in host_function_cost_arb(), + get_system_contract in host_function_cost_arb(), + get_main_purse in host_function_cost_arb(), + read_host_buffer in host_function_cost_arb(), + create_contract_package_at_hash in host_function_cost_arb(), + create_contract_user_group in host_function_cost_arb(), + add_contract_version in host_function_cost_arb(), + disable_contract_version in host_function_cost_arb(), + call_contract in host_function_cost_arb(), + call_versioned_contract in host_function_cost_arb(), + get_named_arg_size in host_function_cost_arb(), + get_named_arg in host_function_cost_arb(), + remove_contract_user_group in host_function_cost_arb(), + provision_contract_user_group_uref in host_function_cost_arb(), + remove_contract_user_group_urefs in host_function_cost_arb(), + print in host_function_cost_arb(), + blake2b in host_function_cost_arb(), + random_bytes in host_function_cost_arb(), + enable_contract_version in host_function_cost_arb(), + add_session_version in host_function_cost_arb(), + manage_message_topic in host_function_cost_arb(), + emit_message in host_function_cost_arb(), + cost_increase_per_message in num::u32::ANY, + ) -> HostFunctionCosts { + HostFunctionCosts { + read_value, + dictionary_get, + write, + dictionary_put, + add, + new_uref, + load_named_keys, + ret, + get_key, + has_key, + put_key, + remove_key, + revert, + is_valid_uref, + add_associated_key, + remove_associated_key, + update_associated_key, + set_action_threshold, + get_caller, + get_blocktime, + create_purse, + transfer_to_account, + transfer_from_purse_to_account, + transfer_from_purse_to_purse, + get_balance, + get_phase, + get_system_contract, + get_main_purse, + read_host_buffer, + create_contract_package_at_hash, + create_contract_user_group, + add_contract_version, + disable_contract_version, + call_contract, + call_versioned_contract, + get_named_arg_size, + get_named_arg, + remove_contract_user_group, + provision_contract_user_group_uref, + remove_contract_user_group_urefs, + print, + blake2b, + random_bytes, + enable_contract_version, + add_session_version, + manage_message_topic, + emit_message, + cost_increase_per_message, + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use super::*; + + const COST: Cost = 42; + const ARGUMENT_COSTS: [Cost; 3] = [123, 456, 789]; + const WEIGHTS: [Cost; 3] = [1000, 1100, 1200]; + + #[test] + fn calculate_gas_cost_for_host_function() { + let host_function = HostFunction::new(COST, ARGUMENT_COSTS); + let expected_cost = COST + + (ARGUMENT_COSTS[0] * WEIGHTS[0]) + + (ARGUMENT_COSTS[1] * WEIGHTS[1]) + + (ARGUMENT_COSTS[2] * WEIGHTS[2]); + assert_eq!( + host_function.calculate_gas_cost(WEIGHTS), + Gas::new(expected_cost.into()) + ); + } + + #[test] + fn calculate_gas_cost_would_overflow() { + let large_value = Cost::max_value(); + + let host_function = HostFunction::new( + large_value, + [large_value, large_value, large_value, large_value], + ); + + let lhs = + host_function.calculate_gas_cost([large_value, large_value, large_value, large_value]); + + let large_value = U512::from(large_value); + let rhs = large_value + (U512::from(4) * large_value * large_value); + + assert_eq!(lhs, Gas::new(rhs)); + } +} + +#[cfg(test)] +mod proptests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + type Signature = [Cost; 10]; + + proptest! { + #[test] + fn test_host_function(host_function in gens::host_function_cost_arb::()) { + bytesrepr::test_serialization_roundtrip(&host_function); + } + + #[test] + fn test_host_function_costs(host_function_costs in gens::host_function_costs_arb()) { + bytesrepr::test_serialization_roundtrip(&host_function_costs); + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/message_limits.rs b/casper_types_ver_2_0/src/chainspec/vm_config/message_limits.rs new file mode 100644 index 00000000..93635153 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/message_limits.rs @@ -0,0 +1,131 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Configuration for messages limits. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct MessageLimits { + /// Maximum size (in bytes) of a topic name string. + pub max_topic_name_size: u32, + /// Maximum message size in bytes. + pub max_message_size: u32, + /// Maximum number of topics that a contract can register. + pub max_topics_per_contract: u32, +} + +impl MessageLimits { + /// Returns the max number of topics a contract can register. + pub fn max_topics_per_contract(&self) -> u32 { + self.max_topics_per_contract + } + + /// Returns the maximum allowed size for the topic name string. + pub fn max_topic_name_size(&self) -> u32 { + self.max_topic_name_size + } + + /// Returns the maximum allowed size (in bytes) of the serialized message payload. + pub fn max_message_size(&self) -> u32 { + self.max_message_size + } +} + +impl Default for MessageLimits { + fn default() -> Self { + Self { + max_topic_name_size: 256, + max_message_size: 1024, + max_topics_per_contract: 128, + } + } +} + +impl ToBytes for MessageLimits { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.max_topic_name_size.to_bytes()?); + ret.append(&mut self.max_message_size.to_bytes()?); + ret.append(&mut self.max_topics_per_contract.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.max_topic_name_size.serialized_length() + + self.max_message_size.serialized_length() + + self.max_topics_per_contract.serialized_length() + } +} + +impl FromBytes for MessageLimits { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_topic_name_size, rem) = FromBytes::from_bytes(bytes)?; + let (max_message_size, rem) = FromBytes::from_bytes(rem)?; + let (max_topics_per_contract, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + MessageLimits { + max_topic_name_size, + max_message_size, + max_topics_per_contract, + }, + rem, + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> MessageLimits { + MessageLimits { + max_topic_name_size: rng.gen(), + max_message_size: rng.gen(), + max_topics_per_contract: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::MessageLimits; + + prop_compose! { + pub fn message_limits_arb()( + max_topic_name_size in num::u32::ANY, + max_message_size in num::u32::ANY, + max_topics_per_contract in num::u32::ANY, + ) -> MessageLimits { + MessageLimits { + max_topic_name_size, + max_message_size, + max_topics_per_contract, + } + } + } +} + +#[cfg(test)] +mod tests { + use proptest::proptest; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn should_serialize_and_deserialize_with_arbitrary_values( + message_limits in gens::message_limits_arb() + ) { + bytesrepr::test_serialization_roundtrip(&message_limits); + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/mint_costs.rs b/casper_types_ver_2_0/src/chainspec/vm_config/mint_costs.rs new file mode 100644 index 00000000..90f0d750 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/mint_costs.rs @@ -0,0 +1,172 @@ +//! Costs of the mint system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `mint` mint entry point. +pub const DEFAULT_MINT_COST: u32 = 2_500_000_000; +/// Default cost of the `reduce_total_supply` mint entry point. +pub const DEFAULT_REDUCE_TOTAL_SUPPLY_COST: u32 = 10_000; +/// Default cost of the `create` mint entry point. +pub const DEFAULT_CREATE_COST: u32 = 2_500_000_000; +/// Default cost of the `balance` mint entry point. +pub const DEFAULT_BALANCE_COST: u32 = 10_000; +/// Default cost of the `transfer` mint entry point. +pub const DEFAULT_TRANSFER_COST: u32 = 10_000; +/// Default cost of the `read_base_round_reward` mint entry point. +pub const DEFAULT_READ_BASE_ROUND_REWARD_COST: u32 = 10_000; +/// Default cost of the `mint_into_existing_purse` mint entry point. +pub const DEFAULT_MINT_INTO_EXISTING_PURSE_COST: u32 = 2_500_000_000; + +/// Description of the costs of calling mint entry points. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct MintCosts { + /// Cost of calling the `mint` entry point. + pub mint: u32, + /// Cost of calling the `reduce_total_supply` entry point. + pub reduce_total_supply: u32, + /// Cost of calling the `create` entry point. + pub create: u32, + /// Cost of calling the `balance` entry point. + pub balance: u32, + /// Cost of calling the `transfer` entry point. + pub transfer: u32, + /// Cost of calling the `read_base_round_reward` entry point. + pub read_base_round_reward: u32, + /// Cost of calling the `mint_into_existing_purse` entry point. + pub mint_into_existing_purse: u32, +} + +impl Default for MintCosts { + fn default() -> Self { + Self { + mint: DEFAULT_MINT_COST, + reduce_total_supply: DEFAULT_REDUCE_TOTAL_SUPPLY_COST, + create: DEFAULT_CREATE_COST, + balance: DEFAULT_BALANCE_COST, + transfer: DEFAULT_TRANSFER_COST, + read_base_round_reward: DEFAULT_READ_BASE_ROUND_REWARD_COST, + mint_into_existing_purse: DEFAULT_MINT_INTO_EXISTING_PURSE_COST, + } + } +} + +impl ToBytes for MintCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + mint, + reduce_total_supply, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + } = self; + + ret.append(&mut mint.to_bytes()?); + ret.append(&mut reduce_total_supply.to_bytes()?); + ret.append(&mut create.to_bytes()?); + ret.append(&mut balance.to_bytes()?); + ret.append(&mut transfer.to_bytes()?); + ret.append(&mut read_base_round_reward.to_bytes()?); + ret.append(&mut mint_into_existing_purse.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + mint, + reduce_total_supply, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + } = self; + + mint.serialized_length() + + reduce_total_supply.serialized_length() + + create.serialized_length() + + balance.serialized_length() + + transfer.serialized_length() + + read_base_round_reward.serialized_length() + + mint_into_existing_purse.serialized_length() + } +} + +impl FromBytes for MintCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (mint, rem) = FromBytes::from_bytes(bytes)?; + let (reduce_total_supply, rem) = FromBytes::from_bytes(rem)?; + let (create, rem) = FromBytes::from_bytes(rem)?; + let (balance, rem) = FromBytes::from_bytes(rem)?; + let (transfer, rem) = FromBytes::from_bytes(rem)?; + let (read_base_round_reward, rem) = FromBytes::from_bytes(rem)?; + let (mint_into_existing_purse, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + Self { + mint, + reduce_total_supply, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + }, + rem, + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> MintCosts { + MintCosts { + mint: rng.gen(), + reduce_total_supply: rng.gen(), + create: rng.gen(), + balance: rng.gen(), + transfer: rng.gen(), + read_base_round_reward: rng.gen(), + mint_into_existing_purse: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::MintCosts; + + prop_compose! { + pub fn mint_costs_arb()( + mint in num::u32::ANY, + reduce_total_supply in num::u32::ANY, + create in num::u32::ANY, + balance in num::u32::ANY, + transfer in num::u32::ANY, + read_base_round_reward in num::u32::ANY, + mint_into_existing_purse in num::u32::ANY, + ) -> MintCosts { + MintCosts { + mint, + reduce_total_supply, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + } + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/opcode_costs.rs b/casper_types_ver_2_0/src/chainspec/vm_config/opcode_costs.rs new file mode 100644 index 00000000..5ad8c49c --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/opcode_costs.rs @@ -0,0 +1,773 @@ +//! Support for Wasm opcode costs. + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use derive_more::Add; +use num_traits::Zero; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `bit` Wasm opcode. +pub const DEFAULT_BIT_COST: u32 = 300; +/// Default cost of the `add` Wasm opcode. +pub const DEFAULT_ADD_COST: u32 = 210; +/// Default cost of the `mul` Wasm opcode. +pub const DEFAULT_MUL_COST: u32 = 240; +/// Default cost of the `div` Wasm opcode. +pub const DEFAULT_DIV_COST: u32 = 320; +/// Default cost of the `load` Wasm opcode. +pub const DEFAULT_LOAD_COST: u32 = 2_500; +/// Default cost of the `store` Wasm opcode. +pub const DEFAULT_STORE_COST: u32 = 4_700; +/// Default cost of the `const` Wasm opcode. +pub const DEFAULT_CONST_COST: u32 = 110; +/// Default cost of the `local` Wasm opcode. +pub const DEFAULT_LOCAL_COST: u32 = 390; +/// Default cost of the `global` Wasm opcode. +pub const DEFAULT_GLOBAL_COST: u32 = 390; +/// Default cost of the `integer_comparison` Wasm opcode. +pub const DEFAULT_INTEGER_COMPARISON_COST: u32 = 250; +/// Default cost of the `conversion` Wasm opcode. +pub const DEFAULT_CONVERSION_COST: u32 = 420; +/// Default cost of the `unreachable` Wasm opcode. +pub const DEFAULT_UNREACHABLE_COST: u32 = 270; +/// Default cost of the `nop` Wasm opcode. +// TODO: This value is not researched. +pub const DEFAULT_NOP_COST: u32 = 200; +/// Default cost of the `current_memory` Wasm opcode. +pub const DEFAULT_CURRENT_MEMORY_COST: u32 = 290; +/// Default cost of the `grow_memory` Wasm opcode. +pub const DEFAULT_GROW_MEMORY_COST: u32 = 240_000; +/// Default cost of the `block` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BLOCK_OPCODE: u32 = 440; +/// Default cost of the `loop` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_LOOP_OPCODE: u32 = 440; +/// Default cost of the `if` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_IF_OPCODE: u32 = 440; +/// Default cost of the `else` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_ELSE_OPCODE: u32 = 440; +/// Default cost of the `end` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_END_OPCODE: u32 = 440; +/// Default cost of the `br` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_OPCODE: u32 = 35_000; +/// Default cost of the `br_if` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_IF_OPCODE: u32 = 35_000; +/// Default cost of the `return` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_RETURN_OPCODE: u32 = 440; +/// Default cost of the `select` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_SELECT_OPCODE: u32 = 440; +/// Default cost of the `call` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_CALL_OPCODE: u32 = 68_000; +/// Default cost of the `call_indirect` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE: u32 = 68_000; +/// Default cost of the `drop` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_DROP_OPCODE: u32 = 440; +/// Default fixed cost of the `br_table` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE: u32 = 35_000; +/// Default multiplier for the size of targets in `br_table` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER: u32 = 100; + +/// Definition of a cost table for a Wasm `br_table` opcode. +/// +/// Charge of a `br_table` opcode is calculated as follows: +/// +/// ```text +/// cost + (len(br_table.targets) * size_multiplier) +/// ``` +// This is done to encourage users to avoid writing code with very long `br_table`s. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct BrTableCost { + /// Fixed cost charge for `br_table` opcode. + pub cost: u32, + /// Multiplier for size of target labels in the `br_table` opcode. + pub size_multiplier: u32, +} + +impl Default for BrTableCost { + fn default() -> Self { + Self { + cost: DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, + size_multiplier: DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BrTableCost { + BrTableCost { + cost: rng.gen(), + size_multiplier: rng.gen(), + } + } +} + +impl ToBytes for BrTableCost { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let Self { + cost, + size_multiplier, + } = self; + + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut cost.to_bytes()?); + ret.append(&mut size_multiplier.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + cost, + size_multiplier, + } = self; + + cost.serialized_length() + size_multiplier.serialized_length() + } +} + +impl FromBytes for BrTableCost { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (cost, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (size_multiplier, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + Ok(( + Self { + cost, + size_multiplier, + }, + bytes, + )) + } +} + +impl Zero for BrTableCost { + fn zero() -> Self { + BrTableCost { + cost: 0, + size_multiplier: 0, + } + } + + fn is_zero(&self) -> bool { + let BrTableCost { + cost, + size_multiplier, + } = self; + cost.is_zero() && size_multiplier.is_zero() + } +} + +/// Definition of a cost table for a Wasm control flow opcodes. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct ControlFlowCosts { + /// Cost for `block` opcode. + pub block: u32, + /// Cost for `loop` opcode. + #[serde(rename = "loop")] + pub op_loop: u32, + /// Cost for `if` opcode. + #[serde(rename = "if")] + pub op_if: u32, + /// Cost for `else` opcode. + #[serde(rename = "else")] + pub op_else: u32, + /// Cost for `end` opcode. + pub end: u32, + /// Cost for `br` opcode. + pub br: u32, + /// Cost for `br_if` opcode. + pub br_if: u32, + /// Cost for `return` opcode. + #[serde(rename = "return")] + pub op_return: u32, + /// Cost for `call` opcode. + pub call: u32, + /// Cost for `call_indirect` opcode. + pub call_indirect: u32, + /// Cost for `drop` opcode. + pub drop: u32, + /// Cost for `select` opcode. + pub select: u32, + /// Cost for `br_table` opcode. + pub br_table: BrTableCost, +} + +impl Default for ControlFlowCosts { + fn default() -> Self { + Self { + block: DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, + op_loop: DEFAULT_CONTROL_FLOW_LOOP_OPCODE, + op_if: DEFAULT_CONTROL_FLOW_IF_OPCODE, + op_else: DEFAULT_CONTROL_FLOW_ELSE_OPCODE, + end: DEFAULT_CONTROL_FLOW_END_OPCODE, + br: DEFAULT_CONTROL_FLOW_BR_OPCODE, + br_if: DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, + op_return: DEFAULT_CONTROL_FLOW_RETURN_OPCODE, + call: DEFAULT_CONTROL_FLOW_CALL_OPCODE, + call_indirect: DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, + drop: DEFAULT_CONTROL_FLOW_DROP_OPCODE, + select: DEFAULT_CONTROL_FLOW_SELECT_OPCODE, + br_table: Default::default(), + } + } +} + +impl ToBytes for ControlFlowCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + } = self; + ret.append(&mut block.to_bytes()?); + ret.append(&mut op_loop.to_bytes()?); + ret.append(&mut op_if.to_bytes()?); + ret.append(&mut op_else.to_bytes()?); + ret.append(&mut end.to_bytes()?); + ret.append(&mut br.to_bytes()?); + ret.append(&mut br_if.to_bytes()?); + ret.append(&mut op_return.to_bytes()?); + ret.append(&mut call.to_bytes()?); + ret.append(&mut call_indirect.to_bytes()?); + ret.append(&mut drop.to_bytes()?); + ret.append(&mut select.to_bytes()?); + ret.append(&mut br_table.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + } = self; + block.serialized_length() + + op_loop.serialized_length() + + op_if.serialized_length() + + op_else.serialized_length() + + end.serialized_length() + + br.serialized_length() + + br_if.serialized_length() + + op_return.serialized_length() + + call.serialized_length() + + call_indirect.serialized_length() + + drop.serialized_length() + + select.serialized_length() + + br_table.serialized_length() + } +} + +impl FromBytes for ControlFlowCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_loop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_if, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_else, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (end, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (br, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (br_if, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_return, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (call, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (call_indirect, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (drop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (select, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (br_table, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + + let control_flow_cost = ControlFlowCosts { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + }; + Ok((control_flow_cost, bytes)) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ControlFlowCosts { + ControlFlowCosts { + block: rng.gen(), + op_loop: rng.gen(), + op_if: rng.gen(), + op_else: rng.gen(), + end: rng.gen(), + br: rng.gen(), + br_if: rng.gen(), + op_return: rng.gen(), + call: rng.gen(), + call_indirect: rng.gen(), + drop: rng.gen(), + select: rng.gen(), + br_table: rng.gen(), + } + } +} + +impl Zero for ControlFlowCosts { + fn zero() -> Self { + ControlFlowCosts { + block: 0, + op_loop: 0, + op_if: 0, + op_else: 0, + end: 0, + br: 0, + br_if: 0, + op_return: 0, + call: 0, + call_indirect: 0, + drop: 0, + select: 0, + br_table: BrTableCost::zero(), + } + } + + fn is_zero(&self) -> bool { + let ControlFlowCosts { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + } = self; + block.is_zero() + && op_loop.is_zero() + && op_if.is_zero() + && op_else.is_zero() + && end.is_zero() + && br.is_zero() + && br_if.is_zero() + && op_return.is_zero() + && call.is_zero() + && call_indirect.is_zero() + && drop.is_zero() + && select.is_zero() + && br_table.is_zero() + } +} + +/// Definition of a cost table for Wasm opcodes. +/// +/// This is taken (partially) from parity-ethereum. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct OpcodeCosts { + /// Bit operations multiplier. + pub bit: u32, + /// Arithmetic add operations multiplier. + pub add: u32, + /// Mul operations multiplier. + pub mul: u32, + /// Div operations multiplier. + pub div: u32, + /// Memory load operation multiplier. + pub load: u32, + /// Memory store operation multiplier. + pub store: u32, + /// Const operation multiplier. + #[serde(rename = "const")] + pub op_const: u32, + /// Local operations multiplier. + pub local: u32, + /// Global operations multiplier. + pub global: u32, + /// Integer operations multiplier. + pub integer_comparison: u32, + /// Conversion operations multiplier. + pub conversion: u32, + /// Unreachable operation multiplier. + pub unreachable: u32, + /// Nop operation multiplier. + pub nop: u32, + /// Get current memory operation multiplier. + pub current_memory: u32, + /// Grow memory cost, per page (64kb) + pub grow_memory: u32, + /// Control flow operations multiplier. + pub control_flow: ControlFlowCosts, +} + +impl Default for OpcodeCosts { + fn default() -> Self { + OpcodeCosts { + bit: DEFAULT_BIT_COST, + add: DEFAULT_ADD_COST, + mul: DEFAULT_MUL_COST, + div: DEFAULT_DIV_COST, + load: DEFAULT_LOAD_COST, + store: DEFAULT_STORE_COST, + op_const: DEFAULT_CONST_COST, + local: DEFAULT_LOCAL_COST, + global: DEFAULT_GLOBAL_COST, + integer_comparison: DEFAULT_INTEGER_COMPARISON_COST, + conversion: DEFAULT_CONVERSION_COST, + unreachable: DEFAULT_UNREACHABLE_COST, + nop: DEFAULT_NOP_COST, + current_memory: DEFAULT_CURRENT_MEMORY_COST, + grow_memory: DEFAULT_GROW_MEMORY_COST, + control_flow: ControlFlowCosts::default(), + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> OpcodeCosts { + OpcodeCosts { + bit: rng.gen(), + add: rng.gen(), + mul: rng.gen(), + div: rng.gen(), + load: rng.gen(), + store: rng.gen(), + op_const: rng.gen(), + local: rng.gen(), + global: rng.gen(), + integer_comparison: rng.gen(), + conversion: rng.gen(), + unreachable: rng.gen(), + nop: rng.gen(), + current_memory: rng.gen(), + grow_memory: rng.gen(), + control_flow: rng.gen(), + } + } +} + +impl ToBytes for OpcodeCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + } = self; + + ret.append(&mut bit.to_bytes()?); + ret.append(&mut add.to_bytes()?); + ret.append(&mut mul.to_bytes()?); + ret.append(&mut div.to_bytes()?); + ret.append(&mut load.to_bytes()?); + ret.append(&mut store.to_bytes()?); + ret.append(&mut op_const.to_bytes()?); + ret.append(&mut local.to_bytes()?); + ret.append(&mut global.to_bytes()?); + ret.append(&mut integer_comparison.to_bytes()?); + ret.append(&mut conversion.to_bytes()?); + ret.append(&mut unreachable.to_bytes()?); + ret.append(&mut nop.to_bytes()?); + ret.append(&mut current_memory.to_bytes()?); + ret.append(&mut grow_memory.to_bytes()?); + ret.append(&mut control_flow.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + } = self; + bit.serialized_length() + + add.serialized_length() + + mul.serialized_length() + + div.serialized_length() + + load.serialized_length() + + store.serialized_length() + + op_const.serialized_length() + + local.serialized_length() + + global.serialized_length() + + integer_comparison.serialized_length() + + conversion.serialized_length() + + unreachable.serialized_length() + + nop.serialized_length() + + current_memory.serialized_length() + + grow_memory.serialized_length() + + control_flow.serialized_length() + } +} + +impl FromBytes for OpcodeCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bit, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (add, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (mul, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (div, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (load, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (store, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (const_, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (local, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (global, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (integer_comparison, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (conversion, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (unreachable, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (nop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (current_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (grow_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (control_flow, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + + let opcode_costs = OpcodeCosts { + bit, + add, + mul, + div, + load, + store, + op_const: const_, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + }; + Ok((opcode_costs, bytes)) + } +} + +impl Zero for OpcodeCosts { + fn zero() -> Self { + Self { + bit: 0, + add: 0, + mul: 0, + div: 0, + load: 0, + store: 0, + op_const: 0, + local: 0, + global: 0, + integer_comparison: 0, + conversion: 0, + unreachable: 0, + nop: 0, + current_memory: 0, + grow_memory: 0, + control_flow: ControlFlowCosts::zero(), + } + } + + fn is_zero(&self) -> bool { + let OpcodeCosts { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + } = self; + bit.is_zero() + && add.is_zero() + && mul.is_zero() + && div.is_zero() + && load.is_zero() + && store.is_zero() + && op_const.is_zero() + && local.is_zero() + && global.is_zero() + && integer_comparison.is_zero() + && conversion.is_zero() + && unreachable.is_zero() + && nop.is_zero() + && current_memory.is_zero() + && grow_memory.is_zero() + && control_flow.is_zero() + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use crate::{BrTableCost, ControlFlowCosts, OpcodeCosts}; + + prop_compose! { + pub fn br_table_cost_arb()( + cost in num::u32::ANY, + size_multiplier in num::u32::ANY, + ) -> BrTableCost { + BrTableCost { cost, size_multiplier } + } + } + + prop_compose! { + pub fn control_flow_cost_arb()( + block in num::u32::ANY, + op_loop in num::u32::ANY, + op_if in num::u32::ANY, + op_else in num::u32::ANY, + end in num::u32::ANY, + br in num::u32::ANY, + br_if in num::u32::ANY, + br_table in br_table_cost_arb(), + op_return in num::u32::ANY, + call in num::u32::ANY, + call_indirect in num::u32::ANY, + drop in num::u32::ANY, + select in num::u32::ANY, + ) -> ControlFlowCosts { + ControlFlowCosts { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + br_table, + op_return, + call, + call_indirect, + drop, + select + } + } + + } + + prop_compose! { + pub fn opcode_costs_arb()( + bit in num::u32::ANY, + add in num::u32::ANY, + mul in num::u32::ANY, + div in num::u32::ANY, + load in num::u32::ANY, + store in num::u32::ANY, + op_const in num::u32::ANY, + local in num::u32::ANY, + global in num::u32::ANY, + integer_comparison in num::u32::ANY, + conversion in num::u32::ANY, + unreachable in num::u32::ANY, + nop in num::u32::ANY, + current_memory in num::u32::ANY, + grow_memory in num::u32::ANY, + control_flow in control_flow_cost_arb(), + ) -> OpcodeCosts { + OpcodeCosts { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + } + } + } +} + +#[cfg(test)] +mod tests { + use proptest::proptest; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn should_serialize_and_deserialize_with_arbitrary_values( + opcode_costs in gens::opcode_costs_arb() + ) { + bytesrepr::test_serialization_roundtrip(&opcode_costs); + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/standard_payment_costs.rs b/casper_types_ver_2_0/src/chainspec/vm_config/standard_payment_costs.rs new file mode 100644 index 00000000..618f7d66 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/standard_payment_costs.rs @@ -0,0 +1,70 @@ +//! Costs of the standard payment system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `pay` standard payment entry point. +const DEFAULT_PAY_COST: u32 = 10_000; + +/// Description of the costs of calling standard payment entry points. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct StandardPaymentCosts { + /// Cost of calling the `pay` entry point. + pub pay: u32, +} + +impl Default for StandardPaymentCosts { + fn default() -> Self { + Self { + pay: DEFAULT_PAY_COST, + } + } +} + +impl ToBytes for StandardPaymentCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.pay.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.pay.serialized_length() + } +} + +impl FromBytes for StandardPaymentCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (pay, rem) = FromBytes::from_bytes(bytes)?; + Ok((Self { pay }, rem)) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StandardPaymentCosts { + StandardPaymentCosts { pay: rng.gen() } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::StandardPaymentCosts; + + prop_compose! { + pub fn standard_payment_costs_arb()( + pay in num::u32::ANY, + ) -> StandardPaymentCosts { + StandardPaymentCosts { + pay, + } + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/storage_costs.rs b/casper_types_ver_2_0/src/chainspec/vm_config/storage_costs.rs new file mode 100644 index 00000000..0ce4e9ce --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/storage_costs.rs @@ -0,0 +1,138 @@ +//! Support for storage costs. +#[cfg(feature = "datasize")] +use datasize::DataSize; +use derive_more::Add; +use num_traits::Zero; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Gas, U512, +}; + +/// Default gas cost per byte stored. +pub const DEFAULT_GAS_PER_BYTE_COST: u32 = 630_000; + +/// Represents a cost table for storage costs. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct StorageCosts { + /// Gas charged per byte stored in the global state. + gas_per_byte: u32, +} + +impl StorageCosts { + /// Creates new `StorageCosts`. + pub const fn new(gas_per_byte: u32) -> Self { + Self { gas_per_byte } + } + + /// Returns amount of gas per byte stored. + pub fn gas_per_byte(&self) -> u32 { + self.gas_per_byte + } + + /// Calculates gas cost for storing `bytes`. + pub fn calculate_gas_cost(&self, bytes: usize) -> Gas { + let value = U512::from(self.gas_per_byte) * U512::from(bytes); + Gas::new(value) + } +} + +impl Default for StorageCosts { + fn default() -> Self { + Self { + gas_per_byte: DEFAULT_GAS_PER_BYTE_COST, + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StorageCosts { + StorageCosts { + gas_per_byte: rng.gen(), + } + } +} + +impl ToBytes for StorageCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.gas_per_byte.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.gas_per_byte.serialized_length() + } +} + +impl FromBytes for StorageCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (gas_per_byte, rem) = FromBytes::from_bytes(bytes)?; + + Ok((StorageCosts { gas_per_byte }, rem)) + } +} + +impl Zero for StorageCosts { + fn zero() -> Self { + StorageCosts { gas_per_byte: 0 } + } + + fn is_zero(&self) -> bool { + self.gas_per_byte.is_zero() + } +} + +#[cfg(test)] +pub mod tests { + use crate::U512; + + use super::*; + + const SMALL_WEIGHT: usize = 123456789; + const LARGE_WEIGHT: usize = usize::max_value(); + + #[test] + fn should_calculate_gas_cost() { + let storage_costs = StorageCosts::default(); + + let cost = storage_costs.calculate_gas_cost(SMALL_WEIGHT); + + let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(SMALL_WEIGHT); + assert_eq!(cost, Gas::new(expected_cost)); + } + + #[test] + fn should_calculate_big_gas_cost() { + let storage_costs = StorageCosts::default(); + + let cost = storage_costs.calculate_gas_cost(LARGE_WEIGHT); + + let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(LARGE_WEIGHT); + assert_eq!(cost, Gas::new(expected_cost)); + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::StorageCosts; + + prop_compose! { + pub fn storage_costs_arb()( + gas_per_byte in num::u32::ANY, + ) -> StorageCosts { + StorageCosts { + gas_per_byte, + } + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/system_config.rs b/casper_types_ver_2_0/src/chainspec/vm_config/system_config.rs new file mode 100644 index 00000000..d6f61677 --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/system_config.rs @@ -0,0 +1,179 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + chainspec::vm_config::{AuctionCosts, HandlePaymentCosts, MintCosts, StandardPaymentCosts}, +}; + +/// Default gas cost for a wasmless transfer. +pub const DEFAULT_WASMLESS_TRANSFER_COST: u32 = 100_000_000; + +/// Definition of costs in the system. +/// +/// This structure contains the costs of all the system contract's entry points and, additionally, +/// it defines a wasmless transfer cost. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct SystemConfig { + /// Wasmless transfer cost expressed in gas. + wasmless_transfer_cost: u32, + + /// Configuration of auction entrypoint costs. + auction_costs: AuctionCosts, + + /// Configuration of mint entrypoint costs. + mint_costs: MintCosts, + + /// Configuration of handle payment entrypoint costs. + handle_payment_costs: HandlePaymentCosts, + + /// Configuration of standard payment costs. + standard_payment_costs: StandardPaymentCosts, +} + +impl SystemConfig { + /// Creates new system config instance. + pub fn new( + wasmless_transfer_cost: u32, + auction_costs: AuctionCosts, + mint_costs: MintCosts, + handle_payment_costs: HandlePaymentCosts, + standard_payment_costs: StandardPaymentCosts, + ) -> Self { + Self { + wasmless_transfer_cost, + auction_costs, + mint_costs, + handle_payment_costs, + standard_payment_costs, + } + } + + /// Returns wasmless transfer cost. + pub fn wasmless_transfer_cost(&self) -> u32 { + self.wasmless_transfer_cost + } + + /// Returns the costs of executing auction entry points. + pub fn auction_costs(&self) -> &AuctionCosts { + &self.auction_costs + } + + /// Returns the costs of executing mint entry points. + pub fn mint_costs(&self) -> &MintCosts { + &self.mint_costs + } + + /// Returns the costs of executing `handle_payment` entry points. + pub fn handle_payment_costs(&self) -> &HandlePaymentCosts { + &self.handle_payment_costs + } + + /// Returns the costs of executing `standard_payment` entry points. + pub fn standard_payment_costs(&self) -> &StandardPaymentCosts { + &self.standard_payment_costs + } +} + +impl Default for SystemConfig { + fn default() -> Self { + Self { + wasmless_transfer_cost: DEFAULT_WASMLESS_TRANSFER_COST, + auction_costs: AuctionCosts::default(), + mint_costs: MintCosts::default(), + handle_payment_costs: HandlePaymentCosts::default(), + standard_payment_costs: StandardPaymentCosts::default(), + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> SystemConfig { + SystemConfig { + wasmless_transfer_cost: rng.gen(), + auction_costs: rng.gen(), + mint_costs: rng.gen(), + handle_payment_costs: rng.gen(), + standard_payment_costs: rng.gen(), + } + } +} + +impl ToBytes for SystemConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.wasmless_transfer_cost.to_bytes()?); + ret.append(&mut self.auction_costs.to_bytes()?); + ret.append(&mut self.mint_costs.to_bytes()?); + ret.append(&mut self.handle_payment_costs.to_bytes()?); + ret.append(&mut self.standard_payment_costs.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.wasmless_transfer_cost.serialized_length() + + self.auction_costs.serialized_length() + + self.mint_costs.serialized_length() + + self.handle_payment_costs.serialized_length() + + self.standard_payment_costs.serialized_length() + } +} + +impl FromBytes for SystemConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (wasmless_transfer_cost, rem) = FromBytes::from_bytes(bytes)?; + let (auction_costs, rem) = FromBytes::from_bytes(rem)?; + let (mint_costs, rem) = FromBytes::from_bytes(rem)?; + let (handle_payment_costs, rem) = FromBytes::from_bytes(rem)?; + let (standard_payment_costs, rem) = FromBytes::from_bytes(rem)?; + Ok(( + SystemConfig::new( + wasmless_transfer_cost, + auction_costs, + mint_costs, + handle_payment_costs, + standard_payment_costs, + ), + rem, + )) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use crate::{ + chainspec::vm_config::{ + auction_costs::gens::auction_costs_arb, + handle_payment_costs::gens::handle_payment_costs_arb, mint_costs::gens::mint_costs_arb, + standard_payment_costs::gens::standard_payment_costs_arb, + }, + SystemConfig, + }; + + prop_compose! { + pub fn system_config_arb()( + wasmless_transfer_cost in num::u32::ANY, + auction_costs in auction_costs_arb(), + mint_costs in mint_costs_arb(), + handle_payment_costs in handle_payment_costs_arb(), + standard_payment_costs in standard_payment_costs_arb(), + ) -> SystemConfig { + SystemConfig { + wasmless_transfer_cost, + auction_costs, + mint_costs, + handle_payment_costs, + standard_payment_costs, + } + } + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/upgrade_config.rs b/casper_types_ver_2_0/src/chainspec/vm_config/upgrade_config.rs new file mode 100644 index 00000000..21e2150a --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/upgrade_config.rs @@ -0,0 +1,112 @@ +use num_rational::Ratio; +use std::collections::BTreeMap; + +use crate::{ChainspecRegistry, Digest, EraId, Key, ProtocolVersion, StoredValue}; + +/// Represents the configuration of a protocol upgrade. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct UpgradeConfig { + pre_state_hash: Digest, + current_protocol_version: ProtocolVersion, + new_protocol_version: ProtocolVersion, + activation_point: Option, + new_validator_slots: Option, + new_auction_delay: Option, + new_locked_funds_period_millis: Option, + new_round_seigniorage_rate: Option>, + new_unbonding_delay: Option, + global_state_update: BTreeMap, + chainspec_registry: ChainspecRegistry, +} + +impl UpgradeConfig { + /// Create new upgrade config. + #[allow(clippy::too_many_arguments)] + pub fn new( + pre_state_hash: Digest, + current_protocol_version: ProtocolVersion, + new_protocol_version: ProtocolVersion, + activation_point: Option, + new_validator_slots: Option, + new_auction_delay: Option, + new_locked_funds_period_millis: Option, + new_round_seigniorage_rate: Option>, + new_unbonding_delay: Option, + global_state_update: BTreeMap, + chainspec_registry: ChainspecRegistry, + ) -> Self { + UpgradeConfig { + pre_state_hash, + current_protocol_version, + new_protocol_version, + activation_point, + new_validator_slots, + new_auction_delay, + new_locked_funds_period_millis, + new_round_seigniorage_rate, + new_unbonding_delay, + global_state_update, + chainspec_registry, + } + } + + /// Returns the current state root state hash + pub fn pre_state_hash(&self) -> Digest { + self.pre_state_hash + } + + /// Returns current protocol version of this upgrade. + pub fn current_protocol_version(&self) -> ProtocolVersion { + self.current_protocol_version + } + + /// Returns new protocol version of this upgrade. + pub fn new_protocol_version(&self) -> ProtocolVersion { + self.new_protocol_version + } + + /// Returns activation point in eras. + pub fn activation_point(&self) -> Option { + self.activation_point + } + + /// Returns new validator slots if specified. + pub fn new_validator_slots(&self) -> Option { + self.new_validator_slots + } + + /// Returns new auction delay if specified. + pub fn new_auction_delay(&self) -> Option { + self.new_auction_delay + } + + /// Returns new locked funds period if specified. + pub fn new_locked_funds_period_millis(&self) -> Option { + self.new_locked_funds_period_millis + } + + /// Returns new round seigniorage rate if specified. + pub fn new_round_seigniorage_rate(&self) -> Option> { + self.new_round_seigniorage_rate + } + + /// Returns new unbonding delay if specified. + pub fn new_unbonding_delay(&self) -> Option { + self.new_unbonding_delay + } + + /// Returns new map of emergency global state updates. + pub fn global_state_update(&self) -> &BTreeMap { + &self.global_state_update + } + + /// Returns a reference to the chainspec registry. + pub fn chainspec_registry(&self) -> &ChainspecRegistry { + &self.chainspec_registry + } + + /// Sets new pre state hash. + pub fn with_pre_state_hash(&mut self, pre_state_hash: Digest) { + self.pre_state_hash = pre_state_hash; + } +} diff --git a/casper_types_ver_2_0/src/chainspec/vm_config/wasm_config.rs b/casper_types_ver_2_0/src/chainspec/vm_config/wasm_config.rs new file mode 100644 index 00000000..ab73b44b --- /dev/null +++ b/casper_types_ver_2_0/src/chainspec/vm_config/wasm_config.rs @@ -0,0 +1,186 @@ +//! Configuration of the Wasm execution engine. +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{distributions::Standard, prelude::*, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + chainspec::vm_config::{HostFunctionCosts, MessageLimits, OpcodeCosts, StorageCosts}, +}; + +/// Default maximum number of pages of the Wasm memory. +pub const DEFAULT_WASM_MAX_MEMORY: u32 = 64; +/// Default maximum stack height. +pub const DEFAULT_MAX_STACK_HEIGHT: u32 = 500; + +/// Configuration of the Wasm execution environment. +/// +/// This structure contains various Wasm execution configuration options, such as memory limits, +/// stack limits and costs. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct WasmConfig { + /// Maximum amount of heap memory (represented in 64kB pages) each contract can use. + pub max_memory: u32, + /// Max stack height (native WebAssembly stack limiter). + pub max_stack_height: u32, + /// Wasm opcode costs table. + opcode_costs: OpcodeCosts, + /// Storage costs. + storage_costs: StorageCosts, + /// Host function costs table. + host_function_costs: HostFunctionCosts, + /// Messages limits. + messages_limits: MessageLimits, +} + +impl WasmConfig { + /// Creates new Wasm config. + pub const fn new( + max_memory: u32, + max_stack_height: u32, + opcode_costs: OpcodeCosts, + storage_costs: StorageCosts, + host_function_costs: HostFunctionCosts, + messages_limits: MessageLimits, + ) -> Self { + Self { + max_memory, + max_stack_height, + opcode_costs, + storage_costs, + host_function_costs, + messages_limits, + } + } + + /// Returns opcode costs. + pub fn opcode_costs(&self) -> OpcodeCosts { + self.opcode_costs + } + + /// Returns storage costs. + pub fn storage_costs(&self) -> StorageCosts { + self.storage_costs + } + + /// Returns host function costs and consumes this object. + pub fn take_host_function_costs(self) -> HostFunctionCosts { + self.host_function_costs + } + + /// Returns the limits config for messages. + pub fn messages_limits(&self) -> MessageLimits { + self.messages_limits + } +} + +impl Default for WasmConfig { + fn default() -> Self { + Self { + max_memory: DEFAULT_WASM_MAX_MEMORY, + max_stack_height: DEFAULT_MAX_STACK_HEIGHT, + opcode_costs: OpcodeCosts::default(), + storage_costs: StorageCosts::default(), + host_function_costs: HostFunctionCosts::default(), + messages_limits: MessageLimits::default(), + } + } +} + +impl ToBytes for WasmConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.max_memory.to_bytes()?); + ret.append(&mut self.max_stack_height.to_bytes()?); + ret.append(&mut self.opcode_costs.to_bytes()?); + ret.append(&mut self.storage_costs.to_bytes()?); + ret.append(&mut self.host_function_costs.to_bytes()?); + ret.append(&mut self.messages_limits.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.max_memory.serialized_length() + + self.max_stack_height.serialized_length() + + self.opcode_costs.serialized_length() + + self.storage_costs.serialized_length() + + self.host_function_costs.serialized_length() + + self.messages_limits.serialized_length() + } +} + +impl FromBytes for WasmConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_memory, rem) = FromBytes::from_bytes(bytes)?; + let (max_stack_height, rem) = FromBytes::from_bytes(rem)?; + let (opcode_costs, rem) = FromBytes::from_bytes(rem)?; + let (storage_costs, rem) = FromBytes::from_bytes(rem)?; + let (host_function_costs, rem) = FromBytes::from_bytes(rem)?; + let (messages_limits, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + WasmConfig { + max_memory, + max_stack_height, + opcode_costs, + storage_costs, + host_function_costs, + messages_limits, + }, + rem, + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> WasmConfig { + WasmConfig { + max_memory: rng.gen(), + max_stack_height: rng.gen(), + opcode_costs: rng.gen(), + storage_costs: rng.gen(), + host_function_costs: rng.gen(), + messages_limits: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use crate::{ + chainspec::vm_config::{ + host_function_costs::gens::host_function_costs_arb, + message_limits::gens::message_limits_arb, opcode_costs::gens::opcode_costs_arb, + storage_costs::gens::storage_costs_arb, + }, + WasmConfig, + }; + + prop_compose! { + pub fn wasm_config_arb() ( + max_memory in num::u32::ANY, + max_stack_height in num::u32::ANY, + opcode_costs in opcode_costs_arb(), + storage_costs in storage_costs_arb(), + host_function_costs in host_function_costs_arb(), + messages_limits in message_limits_arb(), + ) -> WasmConfig { + WasmConfig { + max_memory, + max_stack_height, + opcode_costs, + storage_costs, + host_function_costs, + messages_limits, + } + } + } +} diff --git a/casper_types_ver_2_0/src/checksummed_hex.rs b/casper_types_ver_2_0/src/checksummed_hex.rs new file mode 100644 index 00000000..2b7aa193 --- /dev/null +++ b/casper_types_ver_2_0/src/checksummed_hex.rs @@ -0,0 +1,241 @@ +//! Checksummed hex encoding following an [EIP-55][1]-like scheme. +//! +//! [1]: https://eips.ethereum.org/EIPS/eip-55 + +use alloc::vec::Vec; +use core::ops::RangeInclusive; + +use base16; + +use crate::crypto; + +/// The number of input bytes, at or below which [`decode`] will checksum-decode the output. +pub const SMALL_BYTES_COUNT: usize = 75; + +const HEX_CHARS: [char; 22] = [ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', + 'D', 'E', 'F', +]; + +/// Takes a slice of bytes and breaks it up into a vector of *nibbles* (ie, 4-bit values) +/// represented as `u8`s. +fn bytes_to_nibbles<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator + 'a { + input + .as_ref() + .iter() + .flat_map(move |byte| [4, 0].iter().map(move |offset| (byte >> offset) & 0x0f)) +} + +/// Takes a slice of bytes and outputs an infinite cyclic stream of bits for those bytes. +fn bytes_to_bits_cycle(bytes: Vec) -> impl Iterator { + bytes + .into_iter() + .cycle() + .flat_map(move |byte| (0..8usize).map(move |offset| ((byte >> offset) & 0x01) == 0x01)) +} + +/// Returns the bytes encoded as hexadecimal with mixed-case based checksums following a scheme +/// similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55). +/// +/// Key differences: +/// - Works on any length of data, not just 20-byte addresses +/// - Uses Blake2b hashes rather than Keccak +/// - Uses hash bits rather than nibbles +fn encode_iter<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator + 'a { + let nibbles = bytes_to_nibbles(input); + let mut hash_bits = bytes_to_bits_cycle(crypto::blake2b(input.as_ref()).to_vec()); + nibbles.map(move |mut nibble| { + // Base 16 numbers greater than 10 are represented by the ascii characters a through f. + if nibble >= 10 && hash_bits.next().unwrap_or(true) { + // We are using nibble to index HEX_CHARS, so adding 6 to nibble gives us the index + // of the uppercase character. HEX_CHARS[10] == 'a', HEX_CHARS[16] == 'A'. + nibble += 6; + } + HEX_CHARS[nibble as usize] + }) +} + +/// Returns true if all chars in a string are uppercase or lowercase. +/// Returns false if the string is mixed case or if there are no alphabetic chars. +fn string_is_same_case>(s: T) -> bool { + const LOWER_RANGE: RangeInclusive = b'a'..=b'f'; + const UPPER_RANGE: RangeInclusive = b'A'..=b'F'; + + let mut chars = s + .as_ref() + .iter() + .filter(|c| LOWER_RANGE.contains(c) || UPPER_RANGE.contains(c)); + + match chars.next() { + Some(first) => { + let is_upper = UPPER_RANGE.contains(first); + chars.all(|c| UPPER_RANGE.contains(c) == is_upper) + } + None => { + // String has no actual characters. + true + } + } +} + +/// Decodes a mixed-case hexadecimal string, verifying that it conforms to the checksum scheme +/// similar to scheme in [EIP-55][1]. +/// +/// Key differences: +/// - Works on any length of (decoded) data up to `SMALL_BYTES_COUNT`, not just 20-byte addresses +/// - Uses Blake2b hashes rather than Keccak +/// - Uses hash bits rather than nibbles +/// +/// For backward compatibility: if the hex string is all uppercase or all lowercase, the check is +/// skipped. +/// +/// [1]: https://eips.ethereum.org/EIPS/eip-55 +pub fn decode>(input: T) -> Result, base16::DecodeError> { + let bytes = base16::decode(input.as_ref())?; + + // If the string was not small or not mixed case, don't verify the checksum. + if bytes.len() > SMALL_BYTES_COUNT || string_is_same_case(input.as_ref()) { + return Ok(bytes); + } + + encode_iter(&bytes) + .zip(input.as_ref().iter()) + .enumerate() + .try_for_each(|(index, (expected_case_hex_char, &input_hex_char))| { + if expected_case_hex_char as u8 == input_hex_char { + Ok(()) + } else { + Err(base16::DecodeError::InvalidByte { + index, + byte: expected_case_hex_char as u8, + }) + } + })?; + Ok(bytes) +} + +#[cfg(test)] +mod tests { + use alloc::string::String; + + use proptest::{ + collection::vec, + prelude::{any, prop_assert, prop_assert_eq}, + }; + use proptest_attr_macro::proptest; + + use super::*; + + #[test] + fn should_decode_empty_input() { + let input = String::new(); + let actual = decode(input).unwrap(); + assert!(actual.is_empty()); + } + + #[test] + fn string_is_same_case_true_when_same_case() { + let input = "aaaaaaaaaaa"; + assert!(string_is_same_case(input)); + + let input = "AAAAAAAAAAA"; + assert!(string_is_same_case(input)); + } + + #[test] + fn string_is_same_case_false_when_mixed_case() { + let input = "aAaAaAaAaAa"; + assert!(!string_is_same_case(input)); + } + + #[test] + fn string_is_same_case_no_alphabetic_chars_in_string() { + let input = "424242424242"; + assert!(string_is_same_case(input)); + } + + #[test] + fn should_checksum_decode_only_if_small() { + let input = [255; SMALL_BYTES_COUNT]; + let small_encoded: String = encode_iter(&input).collect(); + assert_eq!(input.to_vec(), decode(&small_encoded).unwrap()); + + assert!(decode("A1a2").is_err()); + + let large_encoded = format!("A1{}", small_encoded); + assert!(decode(large_encoded).is_ok()); + } + + #[proptest] + fn hex_roundtrip(input: Vec) { + prop_assert_eq!( + input.clone(), + decode(encode_iter(&input).collect::()).expect("Failed to decode input.") + ); + } + + proptest::proptest! { + #[test] + fn should_fail_on_invalid_checksum(input in vec(any::(), 0..75)) { + let encoded: String = encode_iter(&input).collect(); + + // Swap the case of the first letter in the checksum hex-encoded value. + let mut expected_error = None; + let mutated: String = encoded + .char_indices() + .map(|(index, mut c)| { + if expected_error.is_some() || c.is_ascii_digit() { + return c; + } + expected_error = Some(base16::DecodeError::InvalidByte { + index, + byte: c as u8, + }); + if c.is_ascii_uppercase() { + c.make_ascii_lowercase(); + } else { + c.make_ascii_uppercase(); + } + c + }) + .collect(); + + // If the encoded form is now all the same case or digits, just return. + if string_is_same_case(&mutated) { + return Ok(()); + } + + // Assert we can still decode to original input using `base16::decode`. + prop_assert_eq!( + input, + base16::decode(&mutated).expect("Failed to decode input.") + ); + + // Assert decoding using `checksummed_hex::decode` returns the expected error. + prop_assert_eq!(expected_error.unwrap(), decode(&mutated).unwrap_err()) + } + } + + #[proptest] + fn hex_roundtrip_sanity(input: Vec) { + prop_assert!(decode(encode_iter(&input).collect::()).is_ok()) + } + + #[proptest] + fn is_same_case_uppercase(input: String) { + let input = input.to_uppercase(); + prop_assert!(string_is_same_case(input)); + } + + #[proptest] + fn is_same_case_lowercase(input: String) { + let input = input.to_lowercase(); + prop_assert!(string_is_same_case(input)); + } + + #[proptest] + fn is_not_same_case(input: String) { + let input = format!("aA{}", input); + prop_assert!(!string_is_same_case(input)); + } +} diff --git a/casper_types_ver_2_0/src/cl_type.rs b/casper_types_ver_2_0/src/cl_type.rs new file mode 100644 index 00000000..945d6267 --- /dev/null +++ b/casper_types_ver_2_0/src/cl_type.rs @@ -0,0 +1,817 @@ +use alloc::{ + boxed::Box, + collections::{BTreeMap, BTreeSet, VecDeque}, + string::String, + vec::Vec, +}; +use core::{ + fmt::{self, Display, Formatter}, + mem, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num_rational::Ratio; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Key, URef, U128, U256, U512, +}; + +// This must be less than 300 in order to avoid a stack overflow when deserializing. +pub(crate) const CL_TYPE_RECURSION_DEPTH: u8 = 50; + +const CL_TYPE_TAG_BOOL: u8 = 0; +const CL_TYPE_TAG_I32: u8 = 1; +const CL_TYPE_TAG_I64: u8 = 2; +const CL_TYPE_TAG_U8: u8 = 3; +const CL_TYPE_TAG_U32: u8 = 4; +const CL_TYPE_TAG_U64: u8 = 5; +const CL_TYPE_TAG_U128: u8 = 6; +const CL_TYPE_TAG_U256: u8 = 7; +const CL_TYPE_TAG_U512: u8 = 8; +const CL_TYPE_TAG_UNIT: u8 = 9; +const CL_TYPE_TAG_STRING: u8 = 10; +const CL_TYPE_TAG_KEY: u8 = 11; +const CL_TYPE_TAG_UREF: u8 = 12; +const CL_TYPE_TAG_OPTION: u8 = 13; +const CL_TYPE_TAG_LIST: u8 = 14; +const CL_TYPE_TAG_BYTE_ARRAY: u8 = 15; +const CL_TYPE_TAG_RESULT: u8 = 16; +const CL_TYPE_TAG_MAP: u8 = 17; +const CL_TYPE_TAG_TUPLE1: u8 = 18; +const CL_TYPE_TAG_TUPLE2: u8 = 19; +const CL_TYPE_TAG_TUPLE3: u8 = 20; +const CL_TYPE_TAG_ANY: u8 = 21; +const CL_TYPE_TAG_PUBLIC_KEY: u8 = 22; + +/// Casper types, i.e. types which can be stored and manipulated by smart contracts. +/// +/// Provides a description of the underlying data type of a [`CLValue`](crate::CLValue). +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum CLType { + /// `bool` primitive. + Bool, + /// `i32` primitive. + I32, + /// `i64` primitive. + I64, + /// `u8` primitive. + U8, + /// `u32` primitive. + U32, + /// `u64` primitive. + U64, + /// [`U128`] large unsigned integer type. + U128, + /// [`U256`] large unsigned integer type. + U256, + /// [`U512`] large unsigned integer type. + U512, + /// `()` primitive. + Unit, + /// `String` primitive. + String, + /// [`Key`] system type. + Key, + /// [`URef`] system type. + URef, + /// [`PublicKey`](crate::PublicKey) system type. + PublicKey, + /// `Option` of a `CLType`. + #[cfg_attr(feature = "datasize", data_size(skip))] + Option(Box), + /// Variable-length list of a single `CLType` (comparable to a `Vec`). + #[cfg_attr(feature = "datasize", data_size(skip))] + List(Box), + /// Fixed-length list of a single `CLType` (comparable to a Rust array). + ByteArray(u32), + /// `Result` with `Ok` and `Err` variants of `CLType`s. + #[allow(missing_docs)] // generated docs are explicit enough. + #[cfg_attr(feature = "datasize", data_size(skip))] + Result { ok: Box, err: Box }, + /// Map with keys of a single `CLType` and values of a single `CLType`. + #[allow(missing_docs)] // generated docs are explicit enough. + #[cfg_attr(feature = "datasize", data_size(skip))] + Map { + key: Box, + value: Box, + }, + /// 1-ary tuple of a `CLType`. + #[cfg_attr(feature = "datasize", data_size(skip))] + Tuple1([Box; 1]), + /// 2-ary tuple of `CLType`s. + #[cfg_attr(feature = "datasize", data_size(skip))] + Tuple2([Box; 2]), + /// 3-ary tuple of `CLType`s. + #[cfg_attr(feature = "datasize", data_size(skip))] + Tuple3([Box; 3]), + /// Unspecified type. + Any, +} + +impl CLType { + /// The `len()` of the `Vec` resulting from `self.to_bytes()`. + pub fn serialized_length(&self) -> usize { + mem::size_of::() + + match self { + CLType::Bool + | CLType::I32 + | CLType::I64 + | CLType::U8 + | CLType::U32 + | CLType::U64 + | CLType::U128 + | CLType::U256 + | CLType::U512 + | CLType::Unit + | CLType::String + | CLType::Key + | CLType::URef + | CLType::PublicKey + | CLType::Any => 0, + CLType::Option(cl_type) | CLType::List(cl_type) => cl_type.serialized_length(), + CLType::ByteArray(list_len) => list_len.serialized_length(), + CLType::Result { ok, err } => ok.serialized_length() + err.serialized_length(), + CLType::Map { key, value } => key.serialized_length() + value.serialized_length(), + CLType::Tuple1(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array), + CLType::Tuple2(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array), + CLType::Tuple3(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array), + } + } + + /// Returns `true` if the [`CLType`] is [`Option`]. + pub fn is_option(&self) -> bool { + matches!(self, Self::Option(..)) + } + + /// Creates a `CLType::Map`. + pub fn map(key: CLType, value: CLType) -> Self { + CLType::Map { + key: Box::new(key), + value: Box::new(value), + } + } +} + +/// Returns the `CLType` describing a "named key" on the system, i.e. a `(String, Key)`. +pub fn named_key_type() -> CLType { + CLType::Tuple2([Box::new(CLType::String), Box::new(CLType::Key)]) +} + +impl CLType { + pub(crate) fn append_bytes(&self, stream: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + CLType::Bool => stream.push(CL_TYPE_TAG_BOOL), + CLType::I32 => stream.push(CL_TYPE_TAG_I32), + CLType::I64 => stream.push(CL_TYPE_TAG_I64), + CLType::U8 => stream.push(CL_TYPE_TAG_U8), + CLType::U32 => stream.push(CL_TYPE_TAG_U32), + CLType::U64 => stream.push(CL_TYPE_TAG_U64), + CLType::U128 => stream.push(CL_TYPE_TAG_U128), + CLType::U256 => stream.push(CL_TYPE_TAG_U256), + CLType::U512 => stream.push(CL_TYPE_TAG_U512), + CLType::Unit => stream.push(CL_TYPE_TAG_UNIT), + CLType::String => stream.push(CL_TYPE_TAG_STRING), + CLType::Key => stream.push(CL_TYPE_TAG_KEY), + CLType::URef => stream.push(CL_TYPE_TAG_UREF), + CLType::PublicKey => stream.push(CL_TYPE_TAG_PUBLIC_KEY), + CLType::Option(cl_type) => { + stream.push(CL_TYPE_TAG_OPTION); + cl_type.append_bytes(stream)?; + } + CLType::List(cl_type) => { + stream.push(CL_TYPE_TAG_LIST); + cl_type.append_bytes(stream)?; + } + CLType::ByteArray(len) => { + stream.push(CL_TYPE_TAG_BYTE_ARRAY); + stream.append(&mut len.to_bytes()?); + } + CLType::Result { ok, err } => { + stream.push(CL_TYPE_TAG_RESULT); + ok.append_bytes(stream)?; + err.append_bytes(stream)?; + } + CLType::Map { key, value } => { + stream.push(CL_TYPE_TAG_MAP); + key.append_bytes(stream)?; + value.append_bytes(stream)?; + } + CLType::Tuple1(cl_type_array) => { + serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE1, cl_type_array, stream)? + } + CLType::Tuple2(cl_type_array) => { + serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE2, cl_type_array, stream)? + } + CLType::Tuple3(cl_type_array) => { + serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE3, cl_type_array, stream)? + } + CLType::Any => stream.push(CL_TYPE_TAG_ANY), + } + Ok(()) + } +} + +impl Display for CLType { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + CLType::Bool => write!(formatter, "bool"), + CLType::I32 => write!(formatter, "i32"), + CLType::I64 => write!(formatter, "i64"), + CLType::U8 => write!(formatter, "u8"), + CLType::U32 => write!(formatter, "u32"), + CLType::U64 => write!(formatter, "u64"), + CLType::U128 => write!(formatter, "u128"), + CLType::U256 => write!(formatter, "u256"), + CLType::U512 => write!(formatter, "u512"), + CLType::Unit => write!(formatter, "unit"), + CLType::String => write!(formatter, "string"), + CLType::Key => write!(formatter, "key"), + CLType::URef => write!(formatter, "uref"), + CLType::PublicKey => write!(formatter, "public-key"), + CLType::Option(t) => write!(formatter, "option<{t}>"), + CLType::List(t) => write!(formatter, "list<{t}>"), + CLType::ByteArray(len) => write!(formatter, "byte-array[{len}]"), + CLType::Result { ok, err } => write!(formatter, "result<{ok}, {err}>"), + CLType::Map { key, value } => write!(formatter, "map<{key}, {value}>"), + CLType::Tuple1([t1]) => write!(formatter, "({t1},)"), + CLType::Tuple2([t1, t2]) => write!(formatter, "({t1}, {t2})"), + CLType::Tuple3([t1, t2, t3]) => write!(formatter, "({t1}, {t2}, {t3})"), + CLType::Any => write!(formatter, "any"), + } + } +} + +impl FromBytes for CLType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + depth_limited_from_bytes(0, bytes) + } +} + +fn depth_limited_from_bytes(depth: u8, bytes: &[u8]) -> Result<(CLType, &[u8]), bytesrepr::Error> { + if depth >= CL_TYPE_RECURSION_DEPTH { + return Err(bytesrepr::Error::ExceededRecursionDepth); + } + let depth = depth + 1; + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + CL_TYPE_TAG_BOOL => Ok((CLType::Bool, remainder)), + CL_TYPE_TAG_I32 => Ok((CLType::I32, remainder)), + CL_TYPE_TAG_I64 => Ok((CLType::I64, remainder)), + CL_TYPE_TAG_U8 => Ok((CLType::U8, remainder)), + CL_TYPE_TAG_U32 => Ok((CLType::U32, remainder)), + CL_TYPE_TAG_U64 => Ok((CLType::U64, remainder)), + CL_TYPE_TAG_U128 => Ok((CLType::U128, remainder)), + CL_TYPE_TAG_U256 => Ok((CLType::U256, remainder)), + CL_TYPE_TAG_U512 => Ok((CLType::U512, remainder)), + CL_TYPE_TAG_UNIT => Ok((CLType::Unit, remainder)), + CL_TYPE_TAG_STRING => Ok((CLType::String, remainder)), + CL_TYPE_TAG_KEY => Ok((CLType::Key, remainder)), + CL_TYPE_TAG_UREF => Ok((CLType::URef, remainder)), + CL_TYPE_TAG_PUBLIC_KEY => Ok((CLType::PublicKey, remainder)), + CL_TYPE_TAG_OPTION => { + let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Option(Box::new(inner_type)); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_LIST => { + let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::List(Box::new(inner_type)); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_BYTE_ARRAY => { + let (len, remainder) = u32::from_bytes(remainder)?; + let cl_type = CLType::ByteArray(len); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_RESULT => { + let (ok_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let (err_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Result { + ok: Box::new(ok_type), + err: Box::new(err_type), + }; + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_MAP => { + let (key_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let (value_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Map { + key: Box::new(key_type), + value: Box::new(value_type), + }; + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE1 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 1, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 1 + // element + let cl_type = CLType::Tuple1([inner_types.pop_front().unwrap()]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE2 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 2, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 2 + // elements + let cl_type = CLType::Tuple2([ + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + ]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE3 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 3, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 3 + // elements + let cl_type = CLType::Tuple3([ + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + ]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_ANY => Ok((CLType::Any, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } +} + +fn serialize_cl_tuple_type<'a, T: IntoIterator>>( + tag: u8, + cl_type_array: T, + stream: &mut Vec, +) -> Result<(), bytesrepr::Error> { + stream.push(tag); + for cl_type in cl_type_array { + cl_type.append_bytes(stream)?; + } + Ok(()) +} + +fn parse_cl_tuple_types( + depth: u8, + count: usize, + mut bytes: &[u8], +) -> Result<(VecDeque>, &[u8]), bytesrepr::Error> { + let mut cl_types = VecDeque::with_capacity(count); + for _ in 0..count { + let (cl_type, remainder) = depth_limited_from_bytes(depth, bytes)?; + cl_types.push_back(Box::new(cl_type)); + bytes = remainder; + } + + Ok((cl_types, bytes)) +} + +fn serialized_length_of_cl_tuple_type<'a, T: IntoIterator>>( + cl_type_array: T, +) -> usize { + cl_type_array + .into_iter() + .map(|cl_type| cl_type.serialized_length()) + .sum() +} + +/// A type which can be described as a [`CLType`]. +pub trait CLTyped { + /// The `CLType` of `Self`. + fn cl_type() -> CLType; +} + +impl CLTyped for bool { + fn cl_type() -> CLType { + CLType::Bool + } +} + +impl CLTyped for i32 { + fn cl_type() -> CLType { + CLType::I32 + } +} + +impl CLTyped for i64 { + fn cl_type() -> CLType { + CLType::I64 + } +} + +impl CLTyped for u8 { + fn cl_type() -> CLType { + CLType::U8 + } +} + +impl CLTyped for u32 { + fn cl_type() -> CLType { + CLType::U32 + } +} + +impl CLTyped for u64 { + fn cl_type() -> CLType { + CLType::U64 + } +} + +impl CLTyped for U128 { + fn cl_type() -> CLType { + CLType::U128 + } +} + +impl CLTyped for U256 { + fn cl_type() -> CLType { + CLType::U256 + } +} + +impl CLTyped for U512 { + fn cl_type() -> CLType { + CLType::U512 + } +} + +impl CLTyped for () { + fn cl_type() -> CLType { + CLType::Unit + } +} + +impl CLTyped for String { + fn cl_type() -> CLType { + CLType::String + } +} + +impl CLTyped for &str { + fn cl_type() -> CLType { + CLType::String + } +} + +impl CLTyped for Key { + fn cl_type() -> CLType { + CLType::Key + } +} + +impl CLTyped for URef { + fn cl_type() -> CLType { + CLType::URef + } +} + +impl CLTyped for Option { + fn cl_type() -> CLType { + CLType::Option(Box::new(T::cl_type())) + } +} + +impl CLTyped for Vec { + fn cl_type() -> CLType { + CLType::List(Box::new(T::cl_type())) + } +} + +impl CLTyped for BTreeSet { + fn cl_type() -> CLType { + CLType::List(Box::new(T::cl_type())) + } +} + +impl CLTyped for &T { + fn cl_type() -> CLType { + T::cl_type() + } +} + +impl CLTyped for [u8; COUNT] { + fn cl_type() -> CLType { + CLType::ByteArray(COUNT as u32) + } +} + +impl CLTyped for Result { + fn cl_type() -> CLType { + let ok = Box::new(T::cl_type()); + let err = Box::new(E::cl_type()); + CLType::Result { ok, err } + } +} + +impl CLTyped for BTreeMap { + fn cl_type() -> CLType { + let key = Box::new(K::cl_type()); + let value = Box::new(V::cl_type()); + CLType::Map { key, value } + } +} + +impl CLTyped for (T1,) { + fn cl_type() -> CLType { + CLType::Tuple1([Box::new(T1::cl_type())]) + } +} + +impl CLTyped for (T1, T2) { + fn cl_type() -> CLType { + CLType::Tuple2([Box::new(T1::cl_type()), Box::new(T2::cl_type())]) + } +} + +impl CLTyped for (T1, T2, T3) { + fn cl_type() -> CLType { + CLType::Tuple3([ + Box::new(T1::cl_type()), + Box::new(T2::cl_type()), + Box::new(T3::cl_type()), + ]) + } +} + +impl CLTyped for Ratio { + fn cl_type() -> CLType { + <(T, T)>::cl_type() + } +} + +#[cfg(test)] +mod tests { + use std::{fmt::Debug, iter, string::ToString}; + + use super::*; + use crate::{ + bytesrepr::{FromBytes, ToBytes}, + AccessRights, CLValue, + }; + + fn round_trip(value: &T) { + let cl_value = CLValue::from_t(value.clone()).unwrap(); + + let serialized_cl_value = cl_value.to_bytes().unwrap(); + assert_eq!(serialized_cl_value.len(), cl_value.serialized_length()); + let parsed_cl_value: CLValue = bytesrepr::deserialize(serialized_cl_value).unwrap(); + assert_eq!(cl_value, parsed_cl_value); + + let parsed_value = CLValue::into_t(cl_value).unwrap(); + assert_eq!(*value, parsed_value); + } + + #[test] + fn bool_should_work() { + round_trip(&true); + round_trip(&false); + } + + #[test] + fn u8_should_work() { + round_trip(&1u8); + } + + #[test] + fn u32_should_work() { + round_trip(&1u32); + } + + #[test] + fn i32_should_work() { + round_trip(&-1i32); + } + + #[test] + fn u64_should_work() { + round_trip(&1u64); + } + + #[test] + fn i64_should_work() { + round_trip(&-1i64); + } + + #[test] + fn u128_should_work() { + round_trip(&U128::one()); + } + + #[test] + fn u256_should_work() { + round_trip(&U256::one()); + } + + #[test] + fn u512_should_work() { + round_trip(&U512::one()); + } + + #[test] + fn unit_should_work() { + round_trip(&()); + } + + #[test] + fn string_should_work() { + round_trip(&String::from("abc")); + } + + #[test] + fn key_should_work() { + let key = Key::URef(URef::new([0u8; 32], AccessRights::READ_ADD_WRITE)); + round_trip(&key); + } + + #[test] + fn uref_should_work() { + let uref = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE); + round_trip(&uref); + } + + #[test] + fn option_of_cl_type_should_work() { + let x: Option = Some(-1); + let y: Option = None; + + round_trip(&x); + round_trip(&y); + } + + #[test] + fn vec_of_cl_type_should_work() { + let vec = vec![String::from("a"), String::from("b")]; + round_trip(&vec); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn small_array_of_u8_should_work() { + macro_rules! test_small_array { + ($($N:literal)+) => { + $( + let mut array: [u8; $N] = Default::default(); + for i in 0..$N { + array[i] = i as u8; + } + round_trip(&array); + )+ + } + } + + test_small_array! { + 1 2 3 4 5 6 7 8 9 + 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 + 30 31 32 + } + } + + #[test] + fn large_array_of_cl_type_should_work() { + macro_rules! test_large_array { + ($($N:literal)+) => { + $( + let array = { + let mut tmp = [0u8; $N]; + for i in 0..$N { + tmp[i] = i as u8; + } + tmp + }; + + let cl_value = CLValue::from_t(array.clone()).unwrap(); + + let serialized_cl_value = cl_value.to_bytes().unwrap(); + let parsed_cl_value: CLValue = bytesrepr::deserialize(serialized_cl_value).unwrap(); + assert_eq!(cl_value, parsed_cl_value); + + let parsed_value: [u8; $N] = CLValue::into_t(cl_value).unwrap(); + for i in 0..$N { + assert_eq!(array[i], parsed_value[i]); + } + )+ + } + } + + test_large_array! { 64 128 256 512 } + } + + #[test] + fn result_of_cl_type_should_work() { + let x: Result<(), String> = Ok(()); + let y: Result<(), String> = Err(String::from("Hello, world!")); + + round_trip(&x); + round_trip(&y); + } + + #[test] + fn map_of_cl_type_should_work() { + let mut map: BTreeMap = BTreeMap::new(); + map.insert(String::from("abc"), 1); + map.insert(String::from("xyz"), 2); + + round_trip(&map); + } + + #[test] + fn tuple_1_should_work() { + let x = (-1i32,); + + round_trip(&x); + } + + #[test] + fn tuple_2_should_work() { + let x = (-1i32, String::from("a")); + + round_trip(&x); + } + + #[test] + fn tuple_3_should_work() { + let x = (-1i32, 1u32, String::from("a")); + + round_trip(&x); + } + + #[test] + fn parsing_nested_tuple_1_cltype_should_not_stack_overflow() { + // The bytesrepr representation of the CLType for a + // nested (((...((),),...),),) looks like: + // [18, 18, 18, ..., 9] + + for i in 1..1000 { + let bytes = iter::repeat(CL_TYPE_TAG_TUPLE1) + .take(i) + .chain(iter::once(CL_TYPE_TAG_UNIT)) + .collect(); + match bytesrepr::deserialize(bytes) { + Ok(parsed_cltype) => assert!(matches!(parsed_cltype, CLType::Tuple1(_))), + Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth), + } + } + } + + #[test] + fn parsing_nested_tuple_1_value_should_not_stack_overflow() { + // The bytesrepr representation of the CLValue for a + // nested (((...((),),...),),) looks like: + // [0, 0, 0, 0, 18, 18, 18, ..., 18, 9] + + for i in 1..1000 { + let bytes = iter::repeat(0) + .take(4) + .chain(iter::repeat(CL_TYPE_TAG_TUPLE1).take(i)) + .chain(iter::once(CL_TYPE_TAG_UNIT)) + .collect(); + match bytesrepr::deserialize::(bytes) { + Ok(parsed_clvalue) => { + assert!(matches!(parsed_clvalue.cl_type(), CLType::Tuple1(_))) + } + Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth), + } + } + } + + #[test] + fn any_should_work() { + #[derive(PartialEq, Debug, Clone)] + struct Any(String); + + impl CLTyped for Any { + fn cl_type() -> CLType { + CLType::Any + } + } + + impl ToBytes for Any { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + } + + impl FromBytes for Any { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = String::from_bytes(bytes)?; + Ok((Any(inner), remainder)) + } + } + + let any = Any("Any test".to_string()); + round_trip(&any); + } + + #[test] + fn should_have_cltype_of_ref_to_cltyped() { + assert_eq!(>::cl_type(), >::cl_type()) + } +} diff --git a/casper_types_ver_2_0/src/cl_value.rs b/casper_types_ver_2_0/src/cl_value.rs new file mode 100644 index 00000000..7e6732d1 --- /dev/null +++ b/casper_types_ver_2_0/src/cl_value.rs @@ -0,0 +1,1208 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::Value; + +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + checksummed_hex, CLType, CLTyped, +}; + +mod jsonrepr; + +/// Error while converting a [`CLValue`] into a given type. +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct CLTypeMismatch { + /// The [`CLType`] into which the `CLValue` was being converted. + pub expected: CLType, + /// The actual underlying [`CLType`] of this `CLValue`, i.e. the type from which it was + /// constructed. + pub found: CLType, +} + +impl Display for CLTypeMismatch { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "Expected {:?} but found {:?}.", + self.expected, self.found + ) + } +} + +/// Error relating to [`CLValue`] operations. +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum CLValueError { + /// An error while serializing or deserializing the underlying data. + Serialization(bytesrepr::Error), + /// A type mismatch while trying to convert a [`CLValue`] into a given type. + Type(CLTypeMismatch), +} + +impl From for CLValueError { + fn from(error: bytesrepr::Error) -> Self { + CLValueError::Serialization(error) + } +} + +impl Display for CLValueError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + CLValueError::Serialization(error) => write!(formatter, "CLValue error: {}", error), + CLValueError::Type(error) => write!(formatter, "Type mismatch: {}", error), + } + } +} + +/// A Casper value, i.e. a value which can be stored and manipulated by smart contracts. +/// +/// It holds the underlying data as a type-erased, serialized `Vec` and also holds the +/// [`CLType`] of the underlying data as a separate member. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct CLValue { + cl_type: CLType, + bytes: Bytes, +} + +impl CLValue { + /// Constructs a `CLValue` from `t`. + pub fn from_t(t: T) -> Result { + let bytes = t.into_bytes()?; + + Ok(CLValue { + cl_type: T::cl_type(), + bytes: bytes.into(), + }) + } + + /// Converts `self` into its underlying type. + pub fn to_t(&self) -> Result { + let expected = T::cl_type(); + + if self.cl_type == expected { + Ok(bytesrepr::deserialize_from_slice(&self.bytes)?) + } else { + Err(CLValueError::Type(CLTypeMismatch { + expected, + found: self.cl_type.clone(), + })) + } + } + + /// Consumes and converts `self` back into its underlying type. + pub fn into_t(self) -> Result { + let expected = T::cl_type(); + + if self.cl_type == expected { + Ok(bytesrepr::deserialize_from_slice(&self.bytes)?) + } else { + Err(CLValueError::Type(CLTypeMismatch { + expected, + found: self.cl_type, + })) + } + } + + /// A convenience method to create CLValue for a unit. + pub fn unit() -> Self { + CLValue::from_components(CLType::Unit, Vec::new()) + } + + // This is only required in order to implement `TryFrom for CLValue` (i.e. the + // conversion from the Protobuf `CLValue`) in a separate module to this one. + #[doc(hidden)] + pub fn from_components(cl_type: CLType, bytes: Vec) -> Self { + Self { + cl_type, + bytes: bytes.into(), + } + } + + // This is only required in order to implement `From for state::CLValue` (i.e. the + // conversion to the Protobuf `CLValue`) in a separate module to this one. + #[doc(hidden)] + pub fn destructure(self) -> (CLType, Bytes) { + (self.cl_type, self.bytes) + } + + /// The [`CLType`] of the underlying data. + pub fn cl_type(&self) -> &CLType { + &self.cl_type + } + + /// Returns a reference to the serialized form of the underlying value held in this `CLValue`. + pub fn inner_bytes(&self) -> &Vec { + self.bytes.inner_bytes() + } + + /// Returns the length of the `Vec` yielded after calling `self.to_bytes()`. + /// + /// Note, this method doesn't actually serialize `self`, and hence is relatively cheap. + pub fn serialized_length(&self) -> usize { + self.cl_type.serialized_length() + U32_SERIALIZED_LENGTH + self.bytes.len() + } +} + +impl ToBytes for CLValue { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.clone().into_bytes() + } + + fn into_bytes(self) -> Result, bytesrepr::Error> { + let mut result = self.bytes.into_bytes()?; + self.cl_type.append_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.bytes.serialized_length() + self.cl_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.bytes.write_bytes(writer)?; + self.cl_type.append_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for CLValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, remainder) = FromBytes::from_bytes(bytes)?; + let (cl_type, remainder) = FromBytes::from_bytes(remainder)?; + let cl_value = CLValue { cl_type, bytes }; + Ok((cl_value, remainder)) + } +} + +/// We need to implement `JsonSchema` for `CLValue` as though it is a `CLValueJson`. +#[cfg(feature = "json-schema")] +impl JsonSchema for CLValue { + fn schema_name() -> String { + "CLValue".to_string() + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + ::json_schema(gen) + } +} + +/// A Casper value, i.e. a value which can be stored and manipulated by smart contracts. +/// +/// It holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of +/// the underlying data as a separate member. +/// +/// The `parsed` field, representing the original value, is a convenience only available when a +/// CLValue is encoded to JSON, and can always be set to null if preferred. +#[derive(Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "CLValue"))] +struct CLValueJson { + cl_type: CLType, + bytes: String, + parsed: Option, +} + +impl Serialize for CLValue { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + CLValueJson { + cl_type: self.cl_type.clone(), + bytes: base16::encode_lower(&self.bytes), + parsed: jsonrepr::cl_value_to_json(self), + } + .serialize(serializer) + } else { + (&self.cl_type, &self.bytes).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for CLValue { + fn deserialize>(deserializer: D) -> Result { + let (cl_type, bytes) = if deserializer.is_human_readable() { + let json = CLValueJson::deserialize(deserializer)?; + ( + json.cl_type.clone(), + checksummed_hex::decode(&json.bytes).map_err(D::Error::custom)?, + ) + } else { + <(CLType, Vec)>::deserialize(deserializer)? + }; + Ok(CLValue { + cl_type, + bytes: bytes.into(), + }) + } +} + +#[cfg(test)] +mod tests { + use alloc::string::ToString; + + #[cfg(feature = "json-schema")] + use schemars::schema_for; + + use super::*; + use crate::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + key::KEY_HASH_LENGTH, + AccessRights, DeployHash, Digest, Key, PublicKey, TransferAddr, URef, TRANSFER_ADDR_LENGTH, + U128, U256, U512, UREF_ADDR_LENGTH, + }; + + #[cfg(feature = "json-schema")] + #[test] + fn json_schema() { + let json_clvalue_schema = schema_for!(CLValueJson); + let clvalue_schema = schema_for!(CLValue); + assert_eq!(json_clvalue_schema, clvalue_schema); + } + + #[test] + fn serde_roundtrip() { + let cl_value = CLValue::from_t(true).unwrap(); + let serialized = bincode::serialize(&cl_value).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(cl_value, decoded); + } + + #[test] + fn json_roundtrip() { + let cl_value = CLValue::from_t(true).unwrap(); + let json_string = serde_json::to_string_pretty(&cl_value).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(cl_value, decoded); + } + + fn check_to_json(value: T, expected: &str) { + let cl_value = CLValue::from_t(value).unwrap(); + let cl_value_as_json = serde_json::to_string(&cl_value).unwrap(); + // Remove the `serialized_bytes` field: + // Split the string at `,"serialized_bytes":`. + let pattern = r#","bytes":""#; + let start_index = cl_value_as_json.find(pattern).unwrap(); + let (start, end) = cl_value_as_json.split_at(start_index); + // Find the end of the value of the `bytes` field, and split there. + let mut json_without_serialize_bytes = start.to_string(); + for (index, char) in end.char_indices().skip(pattern.len()) { + if char == '"' { + let (_to_remove, to_keep) = end.split_at(index + 1); + json_without_serialize_bytes.push_str(to_keep); + break; + } + } + assert_eq!(json_without_serialize_bytes, expected); + } + + mod simple_types { + use super::*; + use crate::crypto::SecretKey; + + #[test] + fn bool_cl_value_should_encode_to_json() { + check_to_json(true, r#"{"cl_type":"Bool","parsed":true}"#); + check_to_json(false, r#"{"cl_type":"Bool","parsed":false}"#); + } + + #[test] + fn i32_cl_value_should_encode_to_json() { + check_to_json( + i32::min_value(), + r#"{"cl_type":"I32","parsed":-2147483648}"#, + ); + check_to_json(0_i32, r#"{"cl_type":"I32","parsed":0}"#); + check_to_json(i32::max_value(), r#"{"cl_type":"I32","parsed":2147483647}"#); + } + + #[test] + fn i64_cl_value_should_encode_to_json() { + check_to_json( + i64::min_value(), + r#"{"cl_type":"I64","parsed":-9223372036854775808}"#, + ); + check_to_json(0_i64, r#"{"cl_type":"I64","parsed":0}"#); + check_to_json( + i64::max_value(), + r#"{"cl_type":"I64","parsed":9223372036854775807}"#, + ); + } + + #[test] + fn u8_cl_value_should_encode_to_json() { + check_to_json(0_u8, r#"{"cl_type":"U8","parsed":0}"#); + check_to_json(u8::max_value(), r#"{"cl_type":"U8","parsed":255}"#); + } + + #[test] + fn u32_cl_value_should_encode_to_json() { + check_to_json(0_u32, r#"{"cl_type":"U32","parsed":0}"#); + check_to_json(u32::max_value(), r#"{"cl_type":"U32","parsed":4294967295}"#); + } + + #[test] + fn u64_cl_value_should_encode_to_json() { + check_to_json(0_u64, r#"{"cl_type":"U64","parsed":0}"#); + check_to_json( + u64::max_value(), + r#"{"cl_type":"U64","parsed":18446744073709551615}"#, + ); + } + + #[test] + fn u128_cl_value_should_encode_to_json() { + check_to_json(U128::zero(), r#"{"cl_type":"U128","parsed":"0"}"#); + check_to_json( + U128::max_value(), + r#"{"cl_type":"U128","parsed":"340282366920938463463374607431768211455"}"#, + ); + } + + #[test] + fn u256_cl_value_should_encode_to_json() { + check_to_json(U256::zero(), r#"{"cl_type":"U256","parsed":"0"}"#); + check_to_json( + U256::max_value(), + r#"{"cl_type":"U256","parsed":"115792089237316195423570985008687907853269984665640564039457584007913129639935"}"#, + ); + } + + #[test] + fn u512_cl_value_should_encode_to_json() { + check_to_json(U512::zero(), r#"{"cl_type":"U512","parsed":"0"}"#); + check_to_json( + U512::max_value(), + r#"{"cl_type":"U512","parsed":"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095"}"#, + ); + } + + #[test] + fn unit_cl_value_should_encode_to_json() { + check_to_json((), r#"{"cl_type":"Unit","parsed":null}"#); + } + + #[test] + fn string_cl_value_should_encode_to_json() { + check_to_json(String::new(), r#"{"cl_type":"String","parsed":""}"#); + check_to_json( + "test string".to_string(), + r#"{"cl_type":"String","parsed":"test string"}"#, + ); + } + + #[test] + fn key_cl_value_should_encode_to_json() { + let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH])); + check_to_json( + key_account, + r#"{"cl_type":"Key","parsed":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}"#, + ); + + let key_hash = Key::Hash([2; KEY_HASH_LENGTH]); + check_to_json( + key_hash, + r#"{"cl_type":"Key","parsed":"hash-0202020202020202020202020202020202020202020202020202020202020202"}"#, + ); + + let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ)); + check_to_json( + key_uref, + r#"{"cl_type":"Key","parsed":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}"#, + ); + + let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH])); + check_to_json( + key_transfer, + r#"{"cl_type":"Key","parsed":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}"#, + ); + + let key_deploy_info = Key::DeployInfo(DeployHash::from_raw([5; Digest::LENGTH])); + check_to_json( + key_deploy_info, + r#"{"cl_type":"Key","parsed":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}"#, + ); + } + + #[test] + fn uref_cl_value_should_encode_to_json() { + let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + check_to_json( + uref, + r#"{"cl_type":"URef","parsed":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}"#, + ); + } + + #[test] + fn public_key_cl_value_should_encode_to_json() { + check_to_json( + PublicKey::from( + &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), + ), + r#"{"cl_type":"PublicKey","parsed":"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c"}"#, + ); + check_to_json( + PublicKey::from( + &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), + ), + r#"{"cl_type":"PublicKey","parsed":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}"#, + ); + } + } + + mod option { + use super::*; + use crate::crypto::SecretKey; + + #[test] + fn bool_cl_value_should_encode_to_json() { + check_to_json(Some(true), r#"{"cl_type":{"Option":"Bool"},"parsed":true}"#); + check_to_json( + Some(false), + r#"{"cl_type":{"Option":"Bool"},"parsed":false}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"Bool"},"parsed":null}"#, + ); + } + + #[test] + fn i32_cl_value_should_encode_to_json() { + check_to_json( + Some(i32::min_value()), + r#"{"cl_type":{"Option":"I32"},"parsed":-2147483648}"#, + ); + check_to_json(Some(0_i32), r#"{"cl_type":{"Option":"I32"},"parsed":0}"#); + check_to_json( + Some(i32::max_value()), + r#"{"cl_type":{"Option":"I32"},"parsed":2147483647}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"I32"},"parsed":null}"#, + ); + } + + #[test] + fn i64_cl_value_should_encode_to_json() { + check_to_json( + Some(i64::min_value()), + r#"{"cl_type":{"Option":"I64"},"parsed":-9223372036854775808}"#, + ); + check_to_json(Some(0_i64), r#"{"cl_type":{"Option":"I64"},"parsed":0}"#); + check_to_json( + Some(i64::max_value()), + r#"{"cl_type":{"Option":"I64"},"parsed":9223372036854775807}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"I64"},"parsed":null}"#, + ); + } + + #[test] + fn u8_cl_value_should_encode_to_json() { + check_to_json(Some(0_u8), r#"{"cl_type":{"Option":"U8"},"parsed":0}"#); + check_to_json( + Some(u8::max_value()), + r#"{"cl_type":{"Option":"U8"},"parsed":255}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U8"},"parsed":null}"#, + ); + } + + #[test] + fn u32_cl_value_should_encode_to_json() { + check_to_json(Some(0_u32), r#"{"cl_type":{"Option":"U32"},"parsed":0}"#); + check_to_json( + Some(u32::max_value()), + r#"{"cl_type":{"Option":"U32"},"parsed":4294967295}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U32"},"parsed":null}"#, + ); + } + + #[test] + fn u64_cl_value_should_encode_to_json() { + check_to_json(Some(0_u64), r#"{"cl_type":{"Option":"U64"},"parsed":0}"#); + check_to_json( + Some(u64::max_value()), + r#"{"cl_type":{"Option":"U64"},"parsed":18446744073709551615}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U64"},"parsed":null}"#, + ); + } + + #[test] + fn u128_cl_value_should_encode_to_json() { + check_to_json( + Some(U128::zero()), + r#"{"cl_type":{"Option":"U128"},"parsed":"0"}"#, + ); + check_to_json( + Some(U128::max_value()), + r#"{"cl_type":{"Option":"U128"},"parsed":"340282366920938463463374607431768211455"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U128"},"parsed":null}"#, + ); + } + + #[test] + fn u256_cl_value_should_encode_to_json() { + check_to_json( + Some(U256::zero()), + r#"{"cl_type":{"Option":"U256"},"parsed":"0"}"#, + ); + check_to_json( + Some(U256::max_value()), + r#"{"cl_type":{"Option":"U256"},"parsed":"115792089237316195423570985008687907853269984665640564039457584007913129639935"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U256"},"parsed":null}"#, + ); + } + + #[test] + fn u512_cl_value_should_encode_to_json() { + check_to_json( + Some(U512::zero()), + r#"{"cl_type":{"Option":"U512"},"parsed":"0"}"#, + ); + check_to_json( + Some(U512::max_value()), + r#"{"cl_type":{"Option":"U512"},"parsed":"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"U512"},"parsed":null}"#, + ); + } + + #[test] + fn unit_cl_value_should_encode_to_json() { + check_to_json(Some(()), r#"{"cl_type":{"Option":"Unit"},"parsed":null}"#); + check_to_json( + Option::<()>::None, + r#"{"cl_type":{"Option":"Unit"},"parsed":null}"#, + ); + } + + #[test] + fn string_cl_value_should_encode_to_json() { + check_to_json( + Some(String::new()), + r#"{"cl_type":{"Option":"String"},"parsed":""}"#, + ); + check_to_json( + Some("test string".to_string()), + r#"{"cl_type":{"Option":"String"},"parsed":"test string"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"String"},"parsed":null}"#, + ); + } + + #[test] + fn key_cl_value_should_encode_to_json() { + let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH])); + check_to_json( + Some(key_account), + r#"{"cl_type":{"Option":"Key"},"parsed":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}"#, + ); + + let key_hash = Key::Hash([2; KEY_HASH_LENGTH]); + check_to_json( + Some(key_hash), + r#"{"cl_type":{"Option":"Key"},"parsed":"hash-0202020202020202020202020202020202020202020202020202020202020202"}"#, + ); + + let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ)); + check_to_json( + Some(key_uref), + r#"{"cl_type":{"Option":"Key"},"parsed":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}"#, + ); + + let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH])); + check_to_json( + Some(key_transfer), + r#"{"cl_type":{"Option":"Key"},"parsed":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}"#, + ); + + let key_deploy_info = Key::DeployInfo(DeployHash::from_raw([5; Digest::LENGTH])); + check_to_json( + Some(key_deploy_info), + r#"{"cl_type":{"Option":"Key"},"parsed":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}"#, + ); + + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"Key"},"parsed":null}"#, + ) + } + + #[test] + fn uref_cl_value_should_encode_to_json() { + let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + check_to_json( + Some(uref), + r#"{"cl_type":{"Option":"URef"},"parsed":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"URef"},"parsed":null}"#, + ) + } + + #[test] + fn public_key_cl_value_should_encode_to_json() { + check_to_json( + Some(PublicKey::from( + &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), + )), + r#"{"cl_type":{"Option":"PublicKey"},"parsed":"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c"}"#, + ); + check_to_json( + Some(PublicKey::from( + &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), + )), + r#"{"cl_type":{"Option":"PublicKey"},"parsed":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}"#, + ); + check_to_json( + Option::::None, + r#"{"cl_type":{"Option":"PublicKey"},"parsed":null}"#, + ) + } + } + + mod result { + use super::*; + use crate::crypto::SecretKey; + + #[test] + fn bool_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"I32"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"U32"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"Unit"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Ok(true), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"String"}},"parsed":{"Ok":true}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"Bool","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn i32_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"I32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"U32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"Unit"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"String"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"I32","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"I32","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"I32","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn i64_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"I32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"U32"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"Unit"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Ok(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"String"}},"parsed":{"Ok":-1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"I64","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"I64","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"I64","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u8_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"I32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"U32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"Unit"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"String"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U8","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U8","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U8","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u32_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"I32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"U32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"Unit"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"String"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U32","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U32","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U32","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u64_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"I32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"U32"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"Unit"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Ok(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"String"}},"parsed":{"Ok":1}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U64","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U64","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U64","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u128_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"I32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"U32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"Unit"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"String"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U128","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U128","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U128","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u256_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"I32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"U32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"Unit"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"String"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U256","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U256","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U256","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn u512_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"I32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"U32"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"Unit"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Ok(1.into()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"String"}},"parsed":{"Ok":"1"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"U512","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"U512","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"U512","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn unit_cl_value_should_encode_to_json() { + check_to_json( + Result::<(), i32>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"I32"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), u32>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"U32"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), ()>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"Unit"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), String>::Ok(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"String"}},"parsed":{"Ok":null}}"#, + ); + check_to_json( + Result::<(), i32>::Err(-1), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::<(), u32>::Err(1), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::<(), ()>::Err(()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::<(), String>::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"Unit","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn string_cl_value_should_encode_to_json() { + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"I32"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"U32"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"Unit"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Ok("test string".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"String"}},"parsed":{"Ok":"test string"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"String","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"String","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"String","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"String","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn key_cl_value_should_encode_to_json() { + let key = Key::Hash([2; KEY_HASH_LENGTH]); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"I32"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + ); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"U32"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + ); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"Unit"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + ); + check_to_json( + Result::::Ok(key), + r#"{"cl_type":{"Result":{"ok":"Key","err":"String"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"Key","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"Key","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"Key","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"Key","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn uref_cl_value_should_encode_to_json() { + let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"I32"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"U32"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"Unit"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Ok(uref), + r#"{"cl_type":{"Result":{"ok":"URef","err":"String"}},"parsed":{"Ok":"uref-0606060606060606060606060606060606060606060606060606060606060606-007"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"URef","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"URef","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"URef","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"URef","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + + #[test] + fn public_key_cl_value_should_encode_to_json() { + let secret_key = + SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(); + let public_key = PublicKey::from(&secret_key); + check_to_json( + Result::::Ok(public_key.clone()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"I32"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Ok(public_key.clone()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"U32"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Ok(public_key.clone()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"Unit"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Ok(public_key), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"String"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, + ); + check_to_json( + Result::::Err(-1), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"I32"}},"parsed":{"Err":-1}}"#, + ); + check_to_json( + Result::::Err(1), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"U32"}},"parsed":{"Err":1}}"#, + ); + check_to_json( + Result::::Err(()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"Unit"}},"parsed":{"Err":null}}"#, + ); + check_to_json( + Result::::Err("e".to_string()), + r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"String"}},"parsed":{"Err":"e"}}"#, + ); + } + } +} diff --git a/casper_types_ver_2_0/src/cl_value/jsonrepr.rs b/casper_types_ver_2_0/src/cl_value/jsonrepr.rs new file mode 100644 index 00000000..1b3b3e28 --- /dev/null +++ b/casper_types_ver_2_0/src/cl_value/jsonrepr.rs @@ -0,0 +1,272 @@ +use alloc::{string::String, vec, vec::Vec}; + +use serde::Serialize; +use serde_json::{json, Value}; + +use crate::{ + bytesrepr::{self, FromBytes, OPTION_NONE_TAG, OPTION_SOME_TAG, RESULT_ERR_TAG, RESULT_OK_TAG}, + cl_type::CL_TYPE_RECURSION_DEPTH, + CLType, CLValue, Key, PublicKey, URef, U128, U256, U512, +}; + +/// Returns a best-effort attempt to convert the `CLValue` into a meaningful JSON value. +pub fn cl_value_to_json(cl_value: &CLValue) -> Option { + depth_limited_to_json(0, cl_value.cl_type(), cl_value.inner_bytes()).and_then( + |(json_value, remainder)| { + if remainder.is_empty() { + Some(json_value) + } else { + None + } + }, + ) +} + +fn depth_limited_to_json<'a>( + depth: u8, + cl_type: &CLType, + bytes: &'a [u8], +) -> Option<(Value, &'a [u8])> { + if depth >= CL_TYPE_RECURSION_DEPTH { + return None; + } + let depth = depth + 1; + + match cl_type { + CLType::Bool => simple_type_to_json::(bytes), + CLType::I32 => simple_type_to_json::(bytes), + CLType::I64 => simple_type_to_json::(bytes), + CLType::U8 => simple_type_to_json::(bytes), + CLType::U32 => simple_type_to_json::(bytes), + CLType::U64 => simple_type_to_json::(bytes), + CLType::U128 => simple_type_to_json::(bytes), + CLType::U256 => simple_type_to_json::(bytes), + CLType::U512 => simple_type_to_json::(bytes), + CLType::Unit => simple_type_to_json::<()>(bytes), + CLType::String => simple_type_to_json::(bytes), + CLType::Key => simple_type_to_json::(bytes), + CLType::URef => simple_type_to_json::(bytes), + CLType::PublicKey => simple_type_to_json::(bytes), + CLType::Option(inner_cl_type) => { + let (variant, remainder) = u8::from_bytes(bytes).ok()?; + match variant { + OPTION_NONE_TAG => Some((Value::Null, remainder)), + OPTION_SOME_TAG => Some(depth_limited_to_json(depth, inner_cl_type, remainder)?), + _ => None, + } + } + CLType::List(inner_cl_type) => { + let (count, mut stream) = u32::from_bytes(bytes).ok()?; + let mut result: Vec = Vec::new(); + for _ in 0..count { + let (value, remainder) = depth_limited_to_json(depth, inner_cl_type, stream)?; + result.push(value); + stream = remainder; + } + Some((json!(result), stream)) + } + CLType::ByteArray(length) => { + let (bytes, remainder) = bytesrepr::safe_split_at(bytes, *length as usize).ok()?; + let hex_encoded_bytes = base16::encode_lower(&bytes); + Some((json![hex_encoded_bytes], remainder)) + } + CLType::Result { ok, err } => { + let (variant, remainder) = u8::from_bytes(bytes).ok()?; + match variant { + RESULT_ERR_TAG => { + let (value, remainder) = depth_limited_to_json(depth, err, remainder)?; + Some((json!({ "Err": value }), remainder)) + } + RESULT_OK_TAG => { + let (value, remainder) = depth_limited_to_json(depth, ok, remainder)?; + Some((json!({ "Ok": value }), remainder)) + } + _ => None, + } + } + CLType::Map { key, value } => { + let (num_keys, mut stream) = u32::from_bytes(bytes).ok()?; + let mut result: Vec = Vec::new(); + for _ in 0..num_keys { + let (k, remainder) = depth_limited_to_json(depth, key, stream)?; + let (v, remainder) = depth_limited_to_json(depth, value, remainder)?; + result.push(json!({"key": k, "value": v})); + stream = remainder; + } + Some((json!(result), stream)) + } + CLType::Tuple1(arr) => { + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + Some((json!([t1]), remainder)) + } + CLType::Tuple2(arr) => { + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?; + Some((json!([t1, t2]), remainder)) + } + CLType::Tuple3(arr) => { + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?; + let (t3, remainder) = depth_limited_to_json(depth, &arr[2], remainder)?; + Some((json!([t1, t2, t3]), remainder)) + } + CLType::Any => None, + } +} + +fn simple_type_to_json(bytes: &[u8]) -> Option<(Value, &[u8])> { + let (value, remainder) = T::from_bytes(bytes).ok()?; + Some((json!(value), remainder)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bytesrepr::ToBytes, AsymmetricType, CLTyped, SecretKey}; + use alloc::collections::BTreeMap; + + fn test_value(value: T) { + let cl_value = CLValue::from_t(value.clone()).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!(value); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn list_of_ints_to_json_value() { + test_value::>(vec![]); + test_value(vec![10u32, 12u32]); + } + + #[test] + fn list_of_bools_to_json_value() { + test_value(vec![true, false]); + } + + #[test] + fn list_of_string_to_json_value() { + test_value(vec!["rust", "python"]); + } + + #[test] + fn list_of_public_keys_to_json_value() { + let a = PublicKey::from( + &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + ); + let b = PublicKey::from( + &SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let a_hex = a.to_hex(); + let b_hex = b.to_hex(); + let cl_value = CLValue::from_t(vec![a, b]).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!([a_hex, b_hex]); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn list_of_list_of_public_keys_to_json_value() { + let a = PublicKey::from( + &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + ); + let b = PublicKey::from( + &SecretKey::ed25519_from_bytes([3; PublicKey::ED25519_LENGTH]).unwrap(), + ); + let c = PublicKey::from( + &SecretKey::ed25519_from_bytes([6; PublicKey::ED25519_LENGTH]).unwrap(), + ); + let a_hex = a.to_hex(); + let b_hex = b.to_hex(); + let c_hex = c.to_hex(); + let cl_value = CLValue::from_t(vec![vec![a, b], vec![c]]).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!([[a_hex, b_hex], [c_hex]]); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn map_of_string_to_list_of_ints_to_json_value() { + let key1 = String::from("first"); + let key2 = String::from("second"); + let value1 = vec![]; + let value2 = vec![1, 2, 3]; + let mut map: BTreeMap> = BTreeMap::new(); + map.insert(key1.clone(), value1.clone()); + map.insert(key2.clone(), value2.clone()); + let cl_value = CLValue::from_t(map).unwrap(); + let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap(); + let expected = json!([ + { "key": key1, "value": value1 }, + { "key": key2, "value": value2 } + ]); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn option_some_of_lists_to_json_value() { + test_value(Some(vec![1, 2, 3])); + } + + #[test] + fn option_none_to_json_value() { + test_value(Option::::None); + } + + #[test] + fn bytes_to_json_value() { + let bytes = [1_u8, 2]; + let cl_value = CLValue::from_t(bytes).unwrap(); + let cl_value_as_json = cl_value_to_json(&cl_value).unwrap(); + let expected = json!(base16::encode_lower(&bytes)); + assert_eq!(cl_value_as_json, expected); + } + + #[test] + fn result_ok_to_json_value() { + test_value(Result::, String>::Ok(vec![1, 2, 3])); + } + + #[test] + fn result_error_to_json_value() { + test_value(Result::, String>::Err(String::from("Upsss"))); + } + + #[test] + fn tuples_to_json_value() { + let v1 = String::from("Hello"); + let v2 = vec![1, 2, 3]; + let v3 = 1u8; + + test_value((v1.clone(),)); + test_value((v1.clone(), v2.clone())); + test_value((v1, v2, v3)); + } + + #[test] + fn json_encoding_nested_tuple_1_value_should_not_stack_overflow() { + // Returns a CLType corresponding to (((...(cl_type,),...),),) nested in tuples to + // `depth_limit`. + fn wrap_in_tuple1(cl_type: CLType, current_depth: usize, depth_limit: usize) -> CLType { + if current_depth == depth_limit { + return cl_type; + } + wrap_in_tuple1( + CLType::Tuple1([Box::new(cl_type)]), + current_depth + 1, + depth_limit, + ) + } + + for depth_limit in &[1, CL_TYPE_RECURSION_DEPTH as usize] { + let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit); + let cl_value = CLValue::from_components(cl_type, vec![]); + assert!(cl_value_to_json(&cl_value).is_some()); + } + + for depth_limit in &[CL_TYPE_RECURSION_DEPTH as usize + 1, 1000] { + let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit); + let cl_value = CLValue::from_components(cl_type, vec![]); + assert!(cl_value_to_json(&cl_value).is_none()); + } + } +} diff --git a/casper_types_ver_2_0/src/contract_messages.rs b/casper_types_ver_2_0/src/contract_messages.rs new file mode 100644 index 00000000..7bf3ccc9 --- /dev/null +++ b/casper_types_ver_2_0/src/contract_messages.rs @@ -0,0 +1,228 @@ +//! Data types for interacting with contract level messages. + +mod error; +mod messages; +mod topics; + +pub use error::FromStrError; +pub use messages::{Message, MessageChecksum, MessagePayload, Messages}; +pub use topics::{ + MessageTopicOperation, MessageTopicSummary, TopicNameHash, TOPIC_NAME_HASH_LENGTH, +}; + +use crate::{ + alloc::string::ToString, + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, AddressableEntityHash, KEY_HASH_LENGTH, +}; + +use core::convert::TryFrom; + +use alloc::{string::String, vec::Vec}; +use core::fmt::{Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +const TOPIC_FORMATTED_STRING_PREFIX: &str = "topic-"; +const MESSAGE_ADDR_PREFIX: &str = "message-"; + +/// MessageTopicAddr +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct MessageAddr { + /// The entity addr. + entity_addr: AddressableEntityHash, + /// The hash of the name of the message topic. + topic_name_hash: TopicNameHash, + /// The message index. + message_index: Option, +} + +impl MessageAddr { + /// Constructs a new topic address based on the addressable entity addr and the hash of the + /// message topic name. + pub const fn new_topic_addr( + entity_addr: AddressableEntityHash, + topic_name_hash: TopicNameHash, + ) -> Self { + Self { + entity_addr, + topic_name_hash, + message_index: None, + } + } + + /// Constructs a new message address based on the addressable entity addr, the hash of the + /// message topic name and the message index in the topic. + pub const fn new_message_addr( + entity_addr: AddressableEntityHash, + topic_name_hash: TopicNameHash, + message_index: u32, + ) -> Self { + Self { + entity_addr, + topic_name_hash, + message_index: Some(message_index), + } + } + + /// Formats the [`MessageAddr`] as a prefixed, hex-encoded string. + pub fn to_formatted_string(self) -> String { + match self.message_index { + Some(index) => { + format!( + "{}{}-{}-{:x}", + MESSAGE_ADDR_PREFIX, + base16::encode_lower(&self.entity_addr), + self.topic_name_hash.to_formatted_string(), + index, + ) + } + None => { + format!( + "{}{}{}-{}", + MESSAGE_ADDR_PREFIX, + TOPIC_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.entity_addr), + self.topic_name_hash.to_formatted_string(), + ) + } + } + } + + /// Parses a formatted string into a [`MessageAddr`]. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(MESSAGE_ADDR_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let (remainder, message_index) = match remainder.strip_prefix(TOPIC_FORMATTED_STRING_PREFIX) + { + Some(topic_string) => (topic_string, None), + None => { + let (remainder, message_index_str) = remainder + .rsplit_once('-') + .ok_or(FromStrError::MissingMessageIndex)?; + (remainder, Some(u32::from_str_radix(message_index_str, 16)?)) + } + }; + + let (entity_addr_str, topic_name_hash_str) = remainder + .split_once('-') + .ok_or(FromStrError::MissingMessageIndex)?; + + let bytes = checksummed_hex::decode(entity_addr_str)?; + let entity_addr = ::try_from(bytes[0..KEY_HASH_LENGTH].as_ref()) + .map_err(|err| FromStrError::EntityHashParseError(err.to_string()))?; + + let topic_name_hash = TopicNameHash::from_formatted_str(topic_name_hash_str)?; + Ok(MessageAddr { + entity_addr, + topic_name_hash, + message_index, + }) + } + + /// Returns the entity addr of this message topic. + pub fn entity_addr(&self) -> AddressableEntityHash { + self.entity_addr + } +} + +impl Display for MessageAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self.message_index { + Some(index) => { + write!( + f, + "{}-{}-{:x}", + base16::encode_lower(&self.entity_addr), + self.topic_name_hash, + index, + ) + } + None => { + write!( + f, + "{}-{}", + base16::encode_lower(&self.entity_addr), + self.topic_name_hash, + ) + } + } + } +} + +impl ToBytes for MessageAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.entity_addr.to_bytes()?); + buffer.append(&mut self.topic_name_hash.to_bytes()?); + buffer.append(&mut self.message_index.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.entity_addr.serialized_length() + + self.topic_name_hash.serialized_length() + + self.message_index.serialized_length() + } +} + +impl FromBytes for MessageAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entity_addr, rem) = FromBytes::from_bytes(bytes)?; + let (topic_hash, rem) = FromBytes::from_bytes(rem)?; + let (message_index, rem) = FromBytes::from_bytes(rem)?; + Ok(( + MessageAddr { + entity_addr, + topic_name_hash: topic_hash, + message_index, + }, + rem, + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> MessageAddr { + MessageAddr { + entity_addr: rng.gen(), + topic_name_hash: rng.gen(), + message_index: rng.gen(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, KEY_HASH_LENGTH}; + + use super::{topics::TOPIC_NAME_HASH_LENGTH, *}; + + #[test] + fn serialization_roundtrip() { + let topic_addr = MessageAddr::new_topic_addr( + [1; KEY_HASH_LENGTH].into(), + [2; TOPIC_NAME_HASH_LENGTH].into(), + ); + bytesrepr::test_serialization_roundtrip(&topic_addr); + + let message_addr = MessageAddr::new_message_addr( + [1; KEY_HASH_LENGTH].into(), + [2; TOPIC_NAME_HASH_LENGTH].into(), + 3, + ); + bytesrepr::test_serialization_roundtrip(&message_addr); + } +} diff --git a/casper_types_ver_2_0/src/contract_messages/error.rs b/casper_types_ver_2_0/src/contract_messages/error.rs new file mode 100644 index 00000000..ba7f2cd3 --- /dev/null +++ b/casper_types_ver_2_0/src/contract_messages/error.rs @@ -0,0 +1,74 @@ +use core::array::TryFromSliceError; + +use alloc::string::String; +use core::{ + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, +}; + +/// Error while parsing message hashes from string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// No message index at the end of the string. + MissingMessageIndex, + /// String not formatted correctly. + Formatting, + /// Cannot parse entity hash. + EntityHashParseError(String), + /// Cannot parse message topic hash. + MessageTopicParseError(String), + /// Failed to decode address portion of URef. + Hex(base16::DecodeError), + /// Failed to parse an int. + Int(ParseIntError), + /// The slice is the wrong length. + Length(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: ParseIntError) -> Self { + FromStrError::Int(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Length(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => { + write!(f, "prefix is invalid") + } + FromStrError::MissingMessageIndex => { + write!(f, "no message index found at the end of the string") + } + FromStrError::Formatting => { + write!(f, "string not properly formatted") + } + FromStrError::EntityHashParseError(err) => { + write!(f, "could not parse entity hash: {}", err) + } + FromStrError::MessageTopicParseError(err) => { + write!(f, "could not parse topic hash: {}", err) + } + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Int(error) => write!(f, "failed to parse an int: {}", error), + FromStrError::Length(error) => write!(f, "address portion is wrong length: {}", error), + } + } +} diff --git a/casper_types_ver_2_0/src/contract_messages/messages.rs b/casper_types_ver_2_0/src/contract_messages/messages.rs new file mode 100644 index 00000000..0f229e6d --- /dev/null +++ b/casper_types_ver_2_0/src/contract_messages/messages.rs @@ -0,0 +1,323 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, AddressableEntityHash, Key, +}; + +use alloc::{string::String, vec::Vec}; +use core::{convert::TryFrom, fmt::Debug}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Alphanumeric, DistString, Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use super::{FromStrError, TopicNameHash}; + +/// Collection of multiple messages. +pub type Messages = Vec; + +/// The length of a message digest +pub const MESSAGE_CHECKSUM_LENGTH: usize = 32; + +const MESSAGE_CHECKSUM_STRING_PREFIX: &str = "message-checksum-"; + +/// A newtype wrapping an array which contains the raw bytes of +/// the hash of the message emitted. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Message checksum as a formatted string.") +)] +pub struct MessageChecksum( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub [u8; MESSAGE_CHECKSUM_LENGTH], +); + +impl MessageChecksum { + /// Returns inner value of the message checksum. + pub fn value(&self) -> [u8; MESSAGE_CHECKSUM_LENGTH] { + self.0 + } + + /// Formats the `MessageChecksum` as a human readable string. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + MESSAGE_CHECKSUM_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `MessageChecksum`. + pub fn from_formatted_str(input: &str) -> Result { + let hex_addr = input + .strip_prefix(MESSAGE_CHECKSUM_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let bytes = + <[u8; MESSAGE_CHECKSUM_LENGTH]>::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?; + Ok(MessageChecksum(bytes)) + } +} + +impl ToBytes for MessageChecksum { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.0.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for MessageChecksum { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (checksum, rem) = FromBytes::from_bytes(bytes)?; + Ok((MessageChecksum(checksum), rem)) + } +} + +impl Serialize for MessageChecksum { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for MessageChecksum { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + MessageChecksum::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; MESSAGE_CHECKSUM_LENGTH]>::deserialize(deserializer)?; + Ok(MessageChecksum(bytes)) + } + } +} + +const MESSAGE_PAYLOAD_TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for a message payload that contains a human readable string. +pub const MESSAGE_PAYLOAD_STRING_TAG: u8 = 0; + +/// The payload of the message emitted by an addressable entity during execution. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum MessagePayload { + /// Human readable string message. + String(String), +} + +impl From for MessagePayload +where + T: Into, +{ + fn from(value: T) -> Self { + Self::String(value.into()) + } +} + +impl ToBytes for MessagePayload { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + MessagePayload::String(message_string) => { + buffer.insert(0, MESSAGE_PAYLOAD_STRING_TAG); + buffer.extend(message_string.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + MESSAGE_PAYLOAD_TAG_LENGTH + + match self { + MessagePayload::String(message_string) => message_string.serialized_length(), + } + } +} + +impl FromBytes for MessagePayload { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + MESSAGE_PAYLOAD_STRING_TAG => { + let (message, remainder): (String, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::String(message), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// Message that was emitted by an addressable entity during execution. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Message { + /// The identity of the entity that produced the message. + entity_addr: AddressableEntityHash, + /// The payload of the message. + message: MessagePayload, + /// The name of the topic on which the message was emitted on. + topic_name: String, + /// The hash of the name of the topic. + topic_name_hash: TopicNameHash, + /// Message index in the topic. + index: u32, +} + +impl Message { + /// Creates new instance of [`Message`] with the specified source and message payload. + pub fn new( + source: AddressableEntityHash, + message: MessagePayload, + topic_name: String, + topic_name_hash: TopicNameHash, + index: u32, + ) -> Self { + Self { + entity_addr: source, + message, + topic_name, + topic_name_hash, + index, + } + } + + /// Returns a reference to the identity of the entity that produced the message. + pub fn entity_addr(&self) -> &AddressableEntityHash { + &self.entity_addr + } + + /// Returns a reference to the payload of the message. + pub fn payload(&self) -> &MessagePayload { + &self.message + } + + /// Returns a reference to the name of the topic on which the message was emitted on. + pub fn topic_name(&self) -> &String { + &self.topic_name + } + + /// Returns a reference to the hash of the name of the topic. + pub fn topic_name_hash(&self) -> &TopicNameHash { + &self.topic_name_hash + } + + /// Returns the index of the message in the topic. + pub fn index(&self) -> u32 { + self.index + } + + /// Returns a new [`Key::Message`] based on the information in the message. + /// This key can be used to query the checksum record for the message in global state. + pub fn message_key(&self) -> Key { + Key::message(self.entity_addr, self.topic_name_hash, self.index) + } + + /// Returns a new [`Key::Message`] based on the information in the message. + /// This key can be used to query the control record for the topic of this message in global + /// state. + pub fn topic_key(&self) -> Key { + Key::message_topic(self.entity_addr, self.topic_name_hash) + } +} + +impl ToBytes for Message { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.entity_addr.to_bytes()?); + buffer.append(&mut self.message.to_bytes()?); + buffer.append(&mut self.topic_name.to_bytes()?); + buffer.append(&mut self.topic_name_hash.to_bytes()?); + buffer.append(&mut self.index.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.entity_addr.serialized_length() + + self.message.serialized_length() + + self.topic_name.serialized_length() + + self.topic_name_hash.serialized_length() + + self.index.serialized_length() + } +} + +impl FromBytes for Message { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entity_addr, rem) = FromBytes::from_bytes(bytes)?; + let (message, rem) = FromBytes::from_bytes(rem)?; + let (topic_name, rem) = FromBytes::from_bytes(rem)?; + let (topic_name_hash, rem) = FromBytes::from_bytes(rem)?; + let (index, rem) = FromBytes::from_bytes(rem)?; + Ok(( + Message { + entity_addr, + message, + topic_name, + topic_name_hash, + index, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Message { + let topic_name = Alphanumeric.sample_string(rng, 32); + let topic_name_hash = crate::crypto::blake2b(&topic_name).into(); + let message = Alphanumeric.sample_string(rng, 64).into(); + + Message { + entity_addr: rng.gen(), + message, + topic_name, + topic_name_hash, + index: rng.gen(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, contract_messages::topics::TOPIC_NAME_HASH_LENGTH, KEY_HASH_LENGTH}; + + use super::*; + + #[test] + fn serialization_roundtrip() { + let message_checksum = MessageChecksum([1; MESSAGE_CHECKSUM_LENGTH]); + bytesrepr::test_serialization_roundtrip(&message_checksum); + + let message_payload = "message payload".into(); + bytesrepr::test_serialization_roundtrip(&message_payload); + + let message = Message::new( + [1; KEY_HASH_LENGTH].into(), + message_payload, + "test_topic".to_string(), + TopicNameHash::new([0x4du8; TOPIC_NAME_HASH_LENGTH]), + 10, + ); + bytesrepr::test_serialization_roundtrip(&message); + } +} diff --git a/casper_types_ver_2_0/src/contract_messages/topics.rs b/casper_types_ver_2_0/src/contract_messages/topics.rs new file mode 100644 index 00000000..9a41d3e3 --- /dev/null +++ b/casper_types_ver_2_0/src/contract_messages/topics.rs @@ -0,0 +1,254 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, BlockTime, +}; + +use core::convert::TryFrom; + +use alloc::{string::String, vec::Vec}; +use core::fmt::{Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use super::error::FromStrError; + +/// The length in bytes of a topic name hash. +pub const TOPIC_NAME_HASH_LENGTH: usize = 32; +const MESSAGE_TOPIC_NAME_HASH: &str = "topic-name-"; + +/// The hash of the name of the message topic. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The hash of the name of the message topic.") +)] +pub struct TopicNameHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub [u8; TOPIC_NAME_HASH_LENGTH], +); + +impl TopicNameHash { + /// Returns a new [`TopicNameHash`] based on the specified value. + pub const fn new(topic_name_hash: [u8; TOPIC_NAME_HASH_LENGTH]) -> TopicNameHash { + TopicNameHash(topic_name_hash) + } + + /// Returns inner value of the topic hash. + pub fn value(&self) -> [u8; TOPIC_NAME_HASH_LENGTH] { + self.0 + } + + /// Formats the [`TopicNameHash`] as a prefixed, hex-encoded string. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + MESSAGE_TOPIC_NAME_HASH, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a [`TopicNameHash`]. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(MESSAGE_TOPIC_NAME_HASH) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = + <[u8; TOPIC_NAME_HASH_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(TopicNameHash(bytes)) + } +} + +impl ToBytes for TopicNameHash { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.0.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TopicNameHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, rem) = FromBytes::from_bytes(bytes)?; + Ok((TopicNameHash(hash), rem)) + } +} + +impl Serialize for TopicNameHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for TopicNameHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + TopicNameHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; TOPIC_NAME_HASH_LENGTH]>::deserialize(deserializer)?; + Ok(TopicNameHash(bytes)) + } + } +} + +impl Display for TopicNameHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for TopicNameHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "MessageTopicHash({})", base16::encode_lower(&self.0)) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> TopicNameHash { + TopicNameHash(rng.gen()) + } +} + +impl From<[u8; TOPIC_NAME_HASH_LENGTH]> for TopicNameHash { + fn from(value: [u8; TOPIC_NAME_HASH_LENGTH]) -> Self { + TopicNameHash(value) + } +} + +/// Summary of a message topic that will be stored in global state. +#[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct MessageTopicSummary { + /// Number of messages in this topic. + pub(crate) message_count: u32, + /// Block timestamp in which these messages were emitted. + pub(crate) blocktime: BlockTime, +} + +impl MessageTopicSummary { + /// Creates a new topic summary. + pub fn new(message_count: u32, blocktime: BlockTime) -> Self { + Self { + message_count, + blocktime, + } + } + + /// Returns the number of messages that were sent on this topic. + pub fn message_count(&self) -> u32 { + self.message_count + } + + /// Returns the block time. + pub fn blocktime(&self) -> BlockTime { + self.blocktime + } +} + +impl ToBytes for MessageTopicSummary { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.message_count.to_bytes()?); + buffer.append(&mut self.blocktime.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.message_count.serialized_length() + self.blocktime.serialized_length() + } +} + +impl FromBytes for MessageTopicSummary { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (message_count, rem) = FromBytes::from_bytes(bytes)?; + let (blocktime, rem) = FromBytes::from_bytes(rem)?; + Ok(( + MessageTopicSummary { + message_count, + blocktime, + }, + rem, + )) + } +} + +const TOPIC_OPERATION_ADD_TAG: u8 = 0; +const OPERATION_MAX_SERIALIZED_LEN: usize = 1; + +/// Operations that can be performed on message topics. +#[derive(Debug, PartialEq)] +pub enum MessageTopicOperation { + /// Add a new message topic. + Add, +} + +impl MessageTopicOperation { + /// Maximum serialized length of a message topic operation. + pub const fn max_serialized_len() -> usize { + OPERATION_MAX_SERIALIZED_LEN + } +} + +impl ToBytes for MessageTopicOperation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + MessageTopicOperation::Add => buffer.push(TOPIC_OPERATION_ADD_TAG), + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + match self { + MessageTopicOperation::Add => 1, + } + } +} + +impl FromBytes for MessageTopicOperation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + TOPIC_OPERATION_ADD_TAG => Ok((MessageTopicOperation::Add, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use crate::bytesrepr; + + use super::*; + + #[test] + fn serialization_roundtrip() { + let topic_name_hash = TopicNameHash::new([0x4du8; TOPIC_NAME_HASH_LENGTH]); + bytesrepr::test_serialization_roundtrip(&topic_name_hash); + + let topic_summary = MessageTopicSummary::new(10, BlockTime::new(100)); + bytesrepr::test_serialization_roundtrip(&topic_summary); + + let topic_operation = MessageTopicOperation::Add; + bytesrepr::test_serialization_roundtrip(&topic_operation); + } +} diff --git a/casper_types_ver_2_0/src/contract_wasm.rs b/casper_types_ver_2_0/src/contract_wasm.rs new file mode 100644 index 00000000..57019cde --- /dev/null +++ b/casper_types_ver_2_0/src/contract_wasm.rs @@ -0,0 +1,373 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account, + addressable_entity::TryFromSliceForAccountHashError, + bytesrepr::{Bytes, Error, FromBytes, ToBytes}, + checksummed_hex, uref, ByteCode, ByteCodeKind, CLType, CLTyped, HashAddr, +}; + +const CONTRACT_WASM_MAX_DISPLAY_LEN: usize = 16; +const KEY_HASH_LENGTH: usize = 32; +const WASM_STRING_PREFIX: &str = "contract-wasm-"; + +/// Associated error type of `TryFrom<&[u8]>` for `ContractWasmHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + InvalidPrefix, + Hex(base16::DecodeError), + Account(TryFromSliceForAccountHashError), + Hash(TryFromSliceError), + AccountHash(account::FromStrError), + URef(uref::FromStrError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceForAccountHashError) -> Self { + FromStrError::Account(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: account::FromStrError) -> Self { + FromStrError::AccountHash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Account(error) => write!(f, "account from string error: {:?}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::AccountHash(error) => { + write!(f, "account hash from string error: {:?}", error) + } + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + } + } +} + +/// A newtype wrapping a `HashAddr` which is the raw bytes of +/// the ContractWasmHash +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractWasmHash(HashAddr); + +impl ContractWasmHash { + /// Constructs a new `ContractWasmHash` from the raw bytes of the contract wasm hash. + pub const fn new(value: HashAddr) -> ContractWasmHash { + ContractWasmHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ContractWasmHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", WASM_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ContractWasmHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(WASM_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(ContractWasmHash(bytes)) + } +} + +impl Display for ContractWasmHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ContractWasmHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ContractWasmHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ContractWasmHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ContractWasmHash { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractWasmHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ContractWasmHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for ContractWasmHash { + fn from(bytes: [u8; 32]) -> Self { + ContractWasmHash(bytes) + } +} + +impl Serialize for ContractWasmHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ContractWasmHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ContractWasmHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(ContractWasmHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ContractWasmHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ContractWasmHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(ContractWasmHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ContractWasmHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(ContractWasmHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ContractWasmHash { + fn schema_name() -> String { + String::from("ContractWasmHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = + Some("The hash address of the contract wasm".to_string()); + schema_object.into() + } +} + +/// A container for contract's WASM bytes. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractWasm { + bytes: Bytes, +} + +impl ContractWasm { + #[cfg(test)] + pub fn new(bytes: Vec) -> Self { + Self { + bytes: bytes.into(), + } + } + + fn take_bytes(self) -> Vec { + self.bytes.into() + } +} + +impl Debug for ContractWasm { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + if self.bytes.len() > CONTRACT_WASM_MAX_DISPLAY_LEN { + write!( + f, + "ContractWasm(0x{}...)", + base16::encode_lower(&self.bytes[..CONTRACT_WASM_MAX_DISPLAY_LEN]) + ) + } else { + write!(f, "ContractWasm(0x{})", base16::encode_lower(&self.bytes)) + } + } +} + +impl ToBytes for ContractWasm { + fn to_bytes(&self) -> Result, Error> { + self.bytes.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.bytes.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.bytes.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractWasm { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem1) = FromBytes::from_bytes(bytes)?; + Ok((ContractWasm { bytes }, rem1)) + } +} + +impl From for ByteCode { + fn from(value: ContractWasm) -> Self { + ByteCode::new(ByteCodeKind::V1CasperWasm, value.take_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn test_debug_repr_of_short_wasm() { + const SIZE: usize = 8; + let wasm_bytes = vec![0; SIZE]; + let contract_wasm = ContractWasm::new(wasm_bytes); + // String output is less than the bytes itself + assert_eq!( + format!("{:?}", contract_wasm), + "ContractWasm(0x0000000000000000)" + ); + } + + #[test] + fn test_debug_repr_of_long_wasm() { + const SIZE: usize = 65; + let wasm_bytes = vec![0; SIZE]; + let contract_wasm = ContractWasm::new(wasm_bytes); + // String output is less than the bytes itself + assert_eq!( + format!("{:?}", contract_wasm), + "ContractWasm(0x00000000000000000000000000000000...)" + ); + } + + #[test] + fn contract_wasm_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let contract_hash = + HashAddr::try_from(&bytes[..]).expect("should create contract wasm hash"); + let contract_hash = ContractWasmHash::new(contract_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_wasm_hash_from_str() { + let contract_hash = ContractWasmHash([3; 32]); + let encoded = contract_hash.to_formatted_string(); + let decoded = ContractWasmHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(contract_hash, decoded); + + let invalid_prefix = + "contractwasm-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractWasmHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "contract-wasm-00000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractWasmHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractWasmHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(ContractWasmHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn contract_wasm_hash_serde_roundtrip() { + let contract_hash = ContractWasmHash([255; 32]); + let serialized = bincode::serialize(&contract_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(contract_hash, deserialized) + } + + #[test] + fn contract_wasm_hash_json_roundtrip() { + let contract_hash = ContractWasmHash([255; 32]); + let json_string = serde_json::to_string_pretty(&contract_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(contract_hash, decoded) + } +} diff --git a/casper_types_ver_2_0/src/contracts.rs b/casper_types_ver_2_0/src/contracts.rs new file mode 100644 index 00000000..02df4fc5 --- /dev/null +++ b/casper_types_ver_2_0/src/contracts.rs @@ -0,0 +1,1308 @@ +//! Data types for supporting contract headers feature. +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::{ + collections::{BTreeMap, BTreeSet}, + format, + string::String, + vec::Vec, +}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account, + addressable_entity::{NamedKeys, TryFromSliceForAccountHashError}, + bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + checksummed_hex, + contract_wasm::ContractWasmHash, + package::{PackageKind, PackageStatus}, + uref, + uref::URef, + AddressableEntityHash, CLType, CLTyped, EntityVersionKey, EntryPoint, EntryPoints, Groups, + HashAddr, Key, Package, ProtocolVersion, KEY_HASH_LENGTH, +}; + +/// Maximum number of distinct user groups. +pub const MAX_GROUPS: u8 = 10; +/// Maximum number of URefs which can be assigned across all user groups. +pub const MAX_TOTAL_UREFS: usize = 100; + +const CONTRACT_STRING_PREFIX: &str = "contract-"; +const PACKAGE_STRING_PREFIX: &str = "contract-package-"; +// We need to support the legacy prefix of "contract-package-wasm". +const PACKAGE_STRING_LEGACY_EXTRA_PREFIX: &str = "wasm"; + +/// Set of errors which may happen when working with contract headers. +#[derive(Debug, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Attempt to override an existing or previously existing version with a + /// new header (this is not allowed to ensure immutability of a given + /// version). + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(1, Error::PreviouslyUsedVersion as u8); + /// ``` + PreviouslyUsedVersion = 1, + /// Attempted to disable a contract that does not exist. + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(2, Error::ContractNotFound as u8); + /// ``` + ContractNotFound = 2, + /// Attempted to create a user group which already exists (use the update + /// function to change an existing user group). + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(3, Error::GroupAlreadyExists as u8); + /// ``` + GroupAlreadyExists = 3, + /// Attempted to add a new user group which exceeds the allowed maximum + /// number of groups. + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(4, Error::MaxGroupsExceeded as u8); + /// ``` + MaxGroupsExceeded = 4, + /// Attempted to add a new URef to a group, which resulted in the total + /// number of URefs across all user groups to exceed the allowed maximum. + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(5, Error::MaxTotalURefsExceeded as u8); + /// ``` + MaxTotalURefsExceeded = 5, + /// Attempted to remove a URef from a group, which does not exist in the + /// group. + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(6, Error::GroupDoesNotExist as u8); + /// ``` + GroupDoesNotExist = 6, + /// Attempted to remove unknown URef from the group. + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(7, Error::UnableToRemoveURef as u8); + /// ``` + UnableToRemoveURef = 7, + /// Group is use by at least one active contract. + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(8, Error::GroupInUse as u8); + /// ``` + GroupInUse = 8, + /// URef already exists in given group. + /// ``` + /// # use casper_types_ver_2_0::contracts::Error; + /// assert_eq!(9, Error::URefAlreadyExists as u8); + /// ``` + URefAlreadyExists = 9, +} + +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Self::PreviouslyUsedVersion as u8 => Self::PreviouslyUsedVersion, + v if v == Self::ContractNotFound as u8 => Self::ContractNotFound, + v if v == Self::GroupAlreadyExists as u8 => Self::GroupAlreadyExists, + v if v == Self::MaxGroupsExceeded as u8 => Self::MaxGroupsExceeded, + v if v == Self::MaxTotalURefsExceeded as u8 => Self::MaxTotalURefsExceeded, + v if v == Self::GroupDoesNotExist as u8 => Self::GroupDoesNotExist, + v if v == Self::UnableToRemoveURef as u8 => Self::UnableToRemoveURef, + v if v == Self::GroupInUse as u8 => Self::GroupInUse, + v if v == Self::URefAlreadyExists as u8 => Self::URefAlreadyExists, + _ => return Err(()), + }; + Ok(error) + } +} + +/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +impl Display for TryFromSliceForContractHashError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "failed to retrieve from slice") + } +} + +/// An error from parsing a formatted contract string +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Invalid formatted string prefix. + InvalidPrefix, + /// Error when decoding a hex string + Hex(base16::DecodeError), + /// Error when parsing an account + Account(TryFromSliceForAccountHashError), + /// Error when parsing the hash. + Hash(TryFromSliceError), + /// Error when parsing an account hash. + AccountHash(account::FromStrError), + /// Error when parsing an uref. + URef(uref::FromStrError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceForAccountHashError) -> Self { + FromStrError::Account(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: account::FromStrError) -> Self { + FromStrError::AccountHash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Account(error) => write!(f, "account from string error: {:?}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::AccountHash(error) => { + write!(f, "account hash from string error: {:?}", error) + } + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + } + } +} + +/// Automatically incremented value for a contract version within a major `ProtocolVersion`. +pub type ContractVersion = u32; + +/// Within each discrete major `ProtocolVersion`, contract version resets to this value. +pub const CONTRACT_INITIAL_VERSION: ContractVersion = 1; + +/// Major element of `ProtocolVersion` a `ContractVersion` is compatible with. +pub type ProtocolVersionMajor = u32; + +/// Major element of `ProtocolVersion` combined with `ContractVersion`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractVersionKey(ProtocolVersionMajor, ContractVersion); + +impl ContractVersionKey { + /// Returns a new instance of ContractVersionKey with provided values. + pub fn new( + protocol_version_major: ProtocolVersionMajor, + contract_version: ContractVersion, + ) -> Self { + Self(protocol_version_major, contract_version) + } + + /// Returns the major element of the protocol version this contract is compatible with. + pub fn protocol_version_major(self) -> ProtocolVersionMajor { + self.0 + } + + /// Returns the contract version within the protocol major version. + pub fn contract_version(self) -> ContractVersion { + self.1 + } +} + +impl From for (ProtocolVersionMajor, ContractVersion) { + fn from(contract_version_key: ContractVersionKey) -> Self { + (contract_version_key.0, contract_version_key.1) + } +} + +/// Serialized length of `ContractVersionKey`. +pub const CONTRACT_VERSION_KEY_SERIALIZED_LENGTH: usize = + U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH; + +impl ToBytes for ContractVersionKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.0.to_bytes()?); + ret.append(&mut self.1.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + CONTRACT_VERSION_KEY_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + self.1.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractVersionKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (major, rem): (u32, &[u8]) = FromBytes::from_bytes(bytes)?; + let (contract, rem): (ContractVersion, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((ContractVersionKey::new(major, contract), rem)) + } +} + +impl fmt::Display for ContractVersionKey { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}.{}", self.0, self.1) + } +} + +/// Collection of contract versions. +pub type ContractVersions = BTreeMap; + +/// Collection of disabled contract versions. The runtime will not permit disabled +/// contract versions to be executed. +pub type DisabledVersions = BTreeSet; + +/// A newtype wrapping a `HashAddr` which references a [`Contract`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractHash(HashAddr); + +impl ContractHash { + /// Constructs a new `ContractHash` from the raw bytes of the contract hash. + pub const fn new(value: HashAddr) -> ContractHash { + ContractHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ContractHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + CONTRACT_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ContractHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(CONTRACT_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(ContractHash(bytes)) + } +} + +impl Display for ContractHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ContractHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ContractHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ContractHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ContractHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for ContractHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ContractHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for ContractHash { + fn from(bytes: [u8; 32]) -> Self { + ContractHash(bytes) + } +} + +impl Serialize for ContractHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ContractHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ContractHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(ContractHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ContractHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ContractHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(ContractHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ContractHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(ContractHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ContractHash { + fn schema_name() -> String { + String::from("ContractHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("The hash address of the contract".to_string()); + schema_object.into() + } +} + +/// A newtype wrapping a `HashAddr` which references a [`ContractPackage`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractPackageHash(HashAddr); + +impl ContractPackageHash { + /// Constructs a new `ContractPackageHash` from the raw bytes of the contract package hash. + pub const fn new(value: HashAddr) -> ContractPackageHash { + ContractPackageHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ContractPackageHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", PACKAGE_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ContractPackageHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(PACKAGE_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let hex_addr = remainder + .strip_prefix(PACKAGE_STRING_LEGACY_EXTRA_PREFIX) + .unwrap_or(remainder); + + let bytes = HashAddr::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?; + Ok(ContractPackageHash(bytes)) + } +} + +impl Display for ContractPackageHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ContractPackageHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ContractPackageHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ContractPackageHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ContractPackageHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for ContractPackageHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ContractPackageHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for ContractPackageHash { + fn from(bytes: [u8; 32]) -> Self { + ContractPackageHash(bytes) + } +} + +impl Serialize for ContractPackageHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ContractPackageHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ContractPackageHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(ContractPackageHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ContractPackageHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ContractPackageHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(ContractPackageHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ContractPackageHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(ContractPackageHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ContractPackageHash { + fn schema_name() -> String { + String::from("ContractPackageHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = + Some("The hash address of the contract package".to_string()); + schema_object.into() + } +} + +/// A enum to determine the lock status of the contract package. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum ContractPackageStatus { + /// The package is locked and cannot be versioned. + Locked, + /// The package is unlocked and can be versioned. + Unlocked, +} + +impl ContractPackageStatus { + /// Create a new status flag based on a boolean value + pub fn new(is_locked: bool) -> Self { + if is_locked { + ContractPackageStatus::Locked + } else { + ContractPackageStatus::Unlocked + } + } +} + +impl Default for ContractPackageStatus { + fn default() -> Self { + Self::Unlocked + } +} + +impl ToBytes for ContractPackageStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + match self { + ContractPackageStatus::Unlocked => result.append(&mut false.to_bytes()?), + ContractPackageStatus::Locked => result.append(&mut true.to_bytes()?), + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + ContractPackageStatus::Unlocked => false.serialized_length(), + ContractPackageStatus::Locked => true.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ContractPackageStatus::Locked => writer.push(u8::from(true)), + ContractPackageStatus::Unlocked => writer.push(u8::from(false)), + } + Ok(()) + } +} + +impl FromBytes for ContractPackageStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (val, bytes) = bool::from_bytes(bytes)?; + let status = ContractPackageStatus::new(val); + Ok((status, bytes)) + } +} + +/// Contract definition, metadata, and security container. +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ContractPackage { + /// Key used to add or disable versions + access_key: URef, + /// All versions (enabled & disabled) + versions: ContractVersions, + /// Disabled versions + disabled_versions: DisabledVersions, + /// Mapping maintaining the set of URefs associated with each "user + /// group". This can be used to control access to methods in a particular + /// version of the contract. A method is callable by any context which + /// "knows" any of the URefs associated with the method's user group. + groups: Groups, + /// A flag that determines whether a contract is locked + lock_status: ContractPackageStatus, +} + +impl CLTyped for ContractPackage { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ContractPackage { + /// Create new `ContractPackage` (with no versions) from given access key. + pub fn new( + access_key: URef, + versions: ContractVersions, + disabled_versions: DisabledVersions, + groups: Groups, + lock_status: ContractPackageStatus, + ) -> Self { + ContractPackage { + access_key, + versions, + disabled_versions, + groups, + lock_status, + } + } + + /// Get the access key for this contract. + pub fn access_key(&self) -> URef { + self.access_key + } + + /// Get the group definitions for this contract. + pub fn groups(&self) -> &Groups { + &self.groups + } + + /// Returns reference to all of this contract's versions. + pub fn versions(&self) -> &ContractVersions { + &self.versions + } + + /// Returns mutable reference to all of this contract's versions (enabled and disabled). + pub fn versions_mut(&mut self) -> &mut ContractVersions { + &mut self.versions + } + + /// Consumes the object and returns all of this contract's versions (enabled and disabled). + pub fn take_versions(self) -> ContractVersions { + self.versions + } + + /// Returns all of this contract's disabled versions. + pub fn disabled_versions(&self) -> &DisabledVersions { + &self.disabled_versions + } + + /// Returns mut reference to all of this contract's disabled versions. + pub fn disabled_versions_mut(&mut self) -> &mut DisabledVersions { + &mut self.disabled_versions + } + + #[cfg(test)] + fn next_contract_version_for(&self, protocol_version: ProtocolVersionMajor) -> ContractVersion { + let current_version = self + .versions + .keys() + .rev() + .find_map(|&contract_version_key| { + if contract_version_key.protocol_version_major() == protocol_version { + Some(contract_version_key.contract_version()) + } else { + None + } + }) + .unwrap_or(0); + + current_version + 1 + } + + #[cfg(test)] + fn insert_contract_version( + &mut self, + protocol_version_major: ProtocolVersionMajor, + contract_hash: ContractHash, + ) -> ContractVersionKey { + let contract_version = self.next_contract_version_for(protocol_version_major); + let key = ContractVersionKey::new(protocol_version_major, contract_version); + self.versions.insert(key, contract_hash); + key + } + + #[cfg(test)] + fn groups_mut(&mut self) -> &mut Groups { + &mut self.groups + } +} + +impl ToBytes for ContractPackage { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.access_key().write_bytes(&mut result)?; + self.versions().write_bytes(&mut result)?; + self.disabled_versions().write_bytes(&mut result)?; + self.groups().write_bytes(&mut result)?; + self.lock_status.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.access_key.serialized_length() + + self.versions.serialized_length() + + self.disabled_versions.serialized_length() + + self.groups.serialized_length() + + self.lock_status.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.access_key().write_bytes(writer)?; + self.versions().write_bytes(writer)?; + self.disabled_versions().write_bytes(writer)?; + self.groups().write_bytes(writer)?; + self.lock_status.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ContractPackage { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (access_key, bytes) = URef::from_bytes(bytes)?; + let (versions, bytes) = ContractVersions::from_bytes(bytes)?; + let (disabled_versions, bytes) = DisabledVersions::from_bytes(bytes)?; + let (groups, bytes) = Groups::from_bytes(bytes)?; + let (lock_status, bytes) = ContractPackageStatus::from_bytes(bytes)?; + let result = ContractPackage { + access_key, + versions, + disabled_versions, + groups, + lock_status, + }; + + Ok((result, bytes)) + } +} + +impl From for Package { + fn from(value: ContractPackage) -> Self { + let versions: BTreeMap = value + .versions + .into_iter() + .map(|(version, contract_hash)| { + let entity_version = EntityVersionKey::new(2, version.contract_version()); + let entity_hash: AddressableEntityHash = + AddressableEntityHash::new(contract_hash.value()); + (entity_version, entity_hash) + }) + .collect(); + + let disabled_versions = value + .disabled_versions + .into_iter() + .map(|contract_versions| { + EntityVersionKey::new( + contract_versions.protocol_version_major(), + contract_versions.contract_version(), + ) + }) + .collect(); + + let lock_status = if value.lock_status == ContractPackageStatus::Locked { + PackageStatus::Locked + } else { + PackageStatus::Unlocked + }; + + Package::new( + value.access_key, + versions.into(), + disabled_versions, + value.groups, + lock_status, + PackageKind::SmartContract, + ) + } +} + +/// Methods and type signatures supported by a contract. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Contract { + contract_package_hash: ContractPackageHash, + contract_wasm_hash: ContractWasmHash, + named_keys: NamedKeys, + entry_points: EntryPoints, + protocol_version: ProtocolVersion, +} + +impl From + for ( + ContractPackageHash, + ContractWasmHash, + NamedKeys, + EntryPoints, + ProtocolVersion, + ) +{ + fn from(contract: Contract) -> Self { + ( + contract.contract_package_hash, + contract.contract_wasm_hash, + contract.named_keys, + contract.entry_points, + contract.protocol_version, + ) + } +} + +impl Contract { + /// `Contract` constructor. + pub fn new( + contract_package_hash: ContractPackageHash, + contract_wasm_hash: ContractWasmHash, + named_keys: NamedKeys, + entry_points: EntryPoints, + protocol_version: ProtocolVersion, + ) -> Self { + Contract { + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points, + protocol_version, + } + } + + /// Hash for accessing contract package + pub fn contract_package_hash(&self) -> ContractPackageHash { + self.contract_package_hash + } + + /// Hash for accessing contract WASM + pub fn contract_wasm_hash(&self) -> ContractWasmHash { + self.contract_wasm_hash + } + + /// Checks whether there is a method with the given name + pub fn has_entry_point(&self, name: &str) -> bool { + self.entry_points.has_entry_point(name) + } + + /// Returns the type signature for the given `method`. + pub fn entry_point(&self, method: &str) -> Option<&EntryPoint> { + self.entry_points.get(method) + } + + /// Get the protocol version this header is targeting. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Adds new entry point + pub fn add_entry_point>(&mut self, entry_point: EntryPoint) { + self.entry_points.add_entry_point(entry_point); + } + + /// Hash for accessing contract bytes + pub fn contract_wasm_key(&self) -> Key { + self.contract_wasm_hash.into() + } + + /// Returns immutable reference to methods + pub fn entry_points(&self) -> &EntryPoints { + &self.entry_points + } + + /// Takes `named_keys` + pub fn take_named_keys(self) -> NamedKeys { + self.named_keys + } + + /// Returns a reference to `named_keys` + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys + } + + /// Appends `keys` to `named_keys` + pub fn named_keys_append(&mut self, keys: NamedKeys) { + self.named_keys.append(keys); + } + + /// Removes given named key. + pub fn remove_named_key(&mut self, key: &str) -> Option { + self.named_keys.remove(key) + } + + /// Set protocol_version. + pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) { + self.protocol_version = protocol_version; + } + + /// Determines if `Contract` is compatible with a given `ProtocolVersion`. + pub fn is_compatible_protocol_version(&self, protocol_version: ProtocolVersion) -> bool { + self.protocol_version.value().major == protocol_version.value().major + } +} + +impl ToBytes for Contract { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.contract_package_hash().write_bytes(&mut result)?; + self.contract_wasm_hash().write_bytes(&mut result)?; + self.named_keys().write_bytes(&mut result)?; + self.entry_points().write_bytes(&mut result)?; + self.protocol_version().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.entry_points) + + ToBytes::serialized_length(&self.contract_package_hash) + + ToBytes::serialized_length(&self.contract_wasm_hash) + + ToBytes::serialized_length(&self.protocol_version) + + ToBytes::serialized_length(&self.named_keys) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.contract_package_hash().write_bytes(writer)?; + self.contract_wasm_hash().write_bytes(writer)?; + self.named_keys().write_bytes(writer)?; + self.entry_points().write_bytes(writer)?; + self.protocol_version().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Contract { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (contract_package_hash, bytes) = FromBytes::from_bytes(bytes)?; + let (contract_wasm_hash, bytes) = FromBytes::from_bytes(bytes)?; + let (named_keys, bytes) = NamedKeys::from_bytes(bytes)?; + let (entry_points, bytes) = EntryPoints::from_bytes(bytes)?; + let (protocol_version, bytes) = ProtocolVersion::from_bytes(bytes)?; + Ok(( + Contract { + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points, + protocol_version, + }, + bytes, + )) + } +} + +impl Default for Contract { + fn default() -> Self { + Contract { + named_keys: NamedKeys::default(), + entry_points: EntryPoints::default(), + contract_wasm_hash: [0; KEY_HASH_LENGTH].into(), + contract_package_hash: [0; KEY_HASH_LENGTH].into(), + protocol_version: ProtocolVersion::V1_0_0, + } + } +} + +/// Default name for an entry point +pub const DEFAULT_ENTRY_POINT_NAME: &str = "call"; + +/// Default name for an installer entry point +pub const ENTRY_POINT_NAME_INSTALL: &str = "install"; + +/// Default name for an upgrade entry point +pub const UPGRADE_ENTRY_POINT_NAME: &str = "upgrade"; + +#[cfg(test)] +mod tests { + + use super::*; + use crate::{AccessRights, EntryPointAccess, EntryPointType, Group, Parameter, URef}; + use alloc::borrow::ToOwned; + + const CONTRACT_HASH_V1: ContractHash = ContractHash::new([42; 32]); + const CONTRACT_HASH_V2: ContractHash = ContractHash::new([84; 32]); + + fn make_contract_package() -> ContractPackage { + let mut contract_package = ContractPackage::new( + URef::new([0; 32], AccessRights::NONE), + ContractVersions::default(), + DisabledVersions::default(), + Groups::default(), + ContractPackageStatus::default(), + ); + + // add groups + { + let group_urefs = { + let mut ret = BTreeSet::new(); + ret.insert(URef::new([1; 32], AccessRights::READ)); + ret + }; + + contract_package + .groups_mut() + .insert(Group::new("Group 1"), group_urefs.clone()); + + contract_package + .groups_mut() + .insert(Group::new("Group 2"), group_urefs); + } + + // add entry_points + let _entry_points = { + let mut ret = BTreeMap::new(); + let entrypoint = EntryPoint::new( + "method0".to_string(), + vec![], + CLType::U32, + EntryPointAccess::groups(&["Group 2"]), + EntryPointType::Session, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + let entrypoint = EntryPoint::new( + "method1".to_string(), + vec![Parameter::new("Foo", CLType::U32)], + CLType::U32, + EntryPointAccess::groups(&["Group 1"]), + EntryPointType::Session, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + ret + }; + + let _contract_package_hash = [41; 32]; + let _contract_wasm_hash = [43; 32]; + let _named_keys = NamedKeys::new(); + let protocol_version = ProtocolVersion::V1_0_0; + + let v1 = contract_package + .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V1); + let v2 = contract_package + .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V2); + + assert!(v2 > v1); + + contract_package + } + + #[test] + fn roundtrip_serialization() { + let contract_package = make_contract_package(); + let bytes = contract_package.to_bytes().expect("should serialize"); + let (decoded_package, rem) = + ContractPackage::from_bytes(&bytes).expect("should deserialize"); + assert_eq!(contract_package, decoded_package); + assert_eq!(rem.len(), 0); + } + + #[test] + fn contract_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let contract_hash = HashAddr::try_from(&bytes[..]).expect("should create contract hash"); + let contract_hash = ContractHash::new(contract_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_package_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let contract_hash = HashAddr::try_from(&bytes[..]).expect("should create contract hash"); + let contract_hash = ContractPackageHash::new(contract_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_hash_from_str() { + let contract_hash = ContractHash([3; 32]); + let encoded = contract_hash.to_formatted_string(); + let decoded = ContractHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(contract_hash, decoded); + + let invalid_prefix = + "contract--0000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = "contract-00000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "contract-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(ContractHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "contract-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(ContractHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn contract_package_hash_from_str() { + let contract_package_hash = ContractPackageHash([3; 32]); + let encoded = contract_package_hash.to_formatted_string(); + let decoded = ContractPackageHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(contract_package_hash, decoded); + + let invalid_prefix = + "contract-package0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } + + #[test] + fn contract_package_hash_from_legacy_str() { + let contract_package_hash = ContractPackageHash([3; 32]); + let hex_addr = contract_package_hash.to_string(); + let legacy_encoded = format!("contract-package-wasm{}", hex_addr); + let decoded_from_legacy = ContractPackageHash::from_formatted_str(&legacy_encoded) + .expect("should accept legacy prefixed string"); + assert_eq!( + contract_package_hash, decoded_from_legacy, + "decoded_from_legacy should equal decoded" + ); + + let invalid_prefix = + "contract-packagewasm0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-wasm00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } + + #[test] + fn contract_hash_serde_roundtrip() { + let contract_hash = ContractHash([255; 32]); + let serialized = bincode::serialize(&contract_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(contract_hash, deserialized) + } + + #[test] + fn contract_hash_json_roundtrip() { + let contract_hash = ContractHash([255; 32]); + let json_string = serde_json::to_string_pretty(&contract_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(contract_hash, decoded) + } + + #[test] + fn contract_package_hash_serde_roundtrip() { + let contract_hash = ContractPackageHash([255; 32]); + let serialized = bincode::serialize(&contract_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(contract_hash, deserialized) + } + + #[test] + fn contract_package_hash_json_roundtrip() { + let contract_hash = ContractPackageHash([255; 32]); + let json_string = serde_json::to_string_pretty(&contract_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(contract_hash, decoded) + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #![proptest_config(ProptestConfig { + cases: 1024, + .. ProptestConfig::default() + })] + + #[test] + fn test_value_contract(contract in gens::contract_arb()) { + bytesrepr::test_serialization_roundtrip(&contract); + } + + #[test] + fn test_value_contract_package(contract_pkg in gens::contract_package_arb()) { + bytesrepr::test_serialization_roundtrip(&contract_pkg); + } + } +} diff --git a/casper_types_ver_2_0/src/crypto.rs b/casper_types_ver_2_0/src/crypto.rs new file mode 100644 index 00000000..fbcd172c --- /dev/null +++ b/casper_types_ver_2_0/src/crypto.rs @@ -0,0 +1,35 @@ +//! Cryptographic types and operations on them + +mod asymmetric_key; +mod error; + +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; + +use crate::key::BLAKE2B_DIGEST_LENGTH; +#[cfg(any(feature = "std", test))] +pub use asymmetric_key::generate_ed25519_keypair; +#[cfg(any(feature = "testing", feature = "gens", test))] +pub use asymmetric_key::gens; +pub use asymmetric_key::{ + sign, verify, AsymmetricType, PublicKey, SecretKey, Signature, ED25519_TAG, SECP256K1_TAG, + SYSTEM_ACCOUNT, SYSTEM_TAG, +}; +pub use error::Error; +#[cfg(any(feature = "std", test))] +pub use error::ErrorExt; + +#[doc(hidden)] +pub fn blake2b>(data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { + let mut result = [0; BLAKE2B_DIGEST_LENGTH]; + // NOTE: Assumed safe as `BLAKE2B_DIGEST_LENGTH` is a valid value for a hasher + let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect("should create hasher"); + + hasher.update(data); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + result +} diff --git a/casper_types_ver_2_0/src/crypto/asymmetric_key.rs b/casper_types_ver_2_0/src/crypto/asymmetric_key.rs new file mode 100644 index 00000000..1f445b78 --- /dev/null +++ b/casper_types_ver_2_0/src/crypto/asymmetric_key.rs @@ -0,0 +1,1304 @@ +//! Asymmetric key types and methods on them + +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; +use core::{ + cmp::Ordering, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + hash::{Hash, Hasher}, + iter, + marker::Copy, +}; +#[cfg(any(feature = "std", test))] +use std::path::Path; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use derp::{Der, Tag}; +use ed25519_dalek::{ + Signature as Ed25519Signature, SigningKey as Ed25519SecretKey, + VerifyingKey as Ed25519PublicKey, PUBLIC_KEY_LENGTH as ED25519_PUBLIC_KEY_LENGTH, + SECRET_KEY_LENGTH as ED25519_SECRET_KEY_LENGTH, SIGNATURE_LENGTH as ED25519_SIGNATURE_LENGTH, +}; +use hex_fmt::HexFmt; +use k256::ecdsa::{ + signature::{Signer, Verifier}, + Signature as Secp256k1Signature, SigningKey as Secp256k1SecretKey, + VerifyingKey as Secp256k1PublicKey, +}; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "std", test))] +use pem::Pem; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] +use serde_json::json; +#[cfg(any(feature = "std", test))] +use untrusted::Input; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + account::AccountHash, + bytesrepr, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, + crypto::Error, + CLType, CLTyped, Tagged, +}; +#[cfg(any(feature = "std", test))] +use crate::{ + crypto::ErrorExt, + file_utils::{read_file, write_file, write_private_file}, +}; + +#[cfg(any(feature = "testing", test))] +pub mod gens; +#[cfg(test)] +mod tests; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for system variant. +pub const SYSTEM_TAG: u8 = 0; +const SYSTEM: &str = "System"; + +/// Tag for ed25519 variant. +pub const ED25519_TAG: u8 = 1; +const ED25519: &str = "Ed25519"; + +/// Tag for secp256k1 variant. +pub const SECP256K1_TAG: u8 = 2; +const SECP256K1: &str = "Secp256k1"; + +const SECP256K1_SECRET_KEY_LENGTH: usize = 32; +const SECP256K1_COMPRESSED_PUBLIC_KEY_LENGTH: usize = 33; +const SECP256K1_SIGNATURE_LENGTH: usize = 64; + +/// Public key for system account. +pub const SYSTEM_ACCOUNT: PublicKey = PublicKey::System; + +// See https://www.secg.org/sec1-v2.pdf#subsection.C.4 +#[cfg(any(feature = "std", test))] +const EC_PUBLIC_KEY_OBJECT_IDENTIFIER: [u8; 7] = [42, 134, 72, 206, 61, 2, 1]; + +// See https://tools.ietf.org/html/rfc8410#section-10.3 +#[cfg(any(feature = "std", test))] +const ED25519_OBJECT_IDENTIFIER: [u8; 3] = [43, 101, 112]; +#[cfg(any(feature = "std", test))] +const ED25519_PEM_SECRET_KEY_TAG: &str = "PRIVATE KEY"; +#[cfg(any(feature = "std", test))] +const ED25519_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; + +// Ref? +#[cfg(any(feature = "std", test))] +const SECP256K1_OBJECT_IDENTIFIER: [u8; 5] = [43, 129, 4, 0, 10]; +#[cfg(any(feature = "std", test))] +const SECP256K1_PEM_SECRET_KEY_TAG: &str = "EC PRIVATE KEY"; +#[cfg(any(feature = "std", test))] +const SECP256K1_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; + +#[cfg(feature = "json-schema")] +static ED25519_SECRET_KEY: Lazy = Lazy::new(|| { + let bytes = [15u8; SecretKey::ED25519_LENGTH]; + SecretKey::ed25519_from_bytes(bytes).unwrap() +}); + +#[cfg(feature = "json-schema")] +static ED25519_PUBLIC_KEY: Lazy = Lazy::new(|| { + let bytes = [15u8; SecretKey::ED25519_LENGTH]; + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + PublicKey::from(&secret_key) +}); + +/// Operations on asymmetric cryptographic type. +pub trait AsymmetricType<'a> +where + Self: 'a + Sized + Tagged, + Vec: From<&'a Self>, +{ + /// Converts `self` to hex, where the first byte represents the algorithm tag. + fn to_hex(&'a self) -> String { + let bytes = iter::once(self.tag()) + .chain(Vec::::from(self)) + .collect::>(); + base16::encode_lower(&bytes) + } + + /// Tries to decode `Self` from its hex-representation. The hex format should be as produced + /// by `AsymmetricType::to_hex()`. + fn from_hex>(input: A) -> Result { + if input.as_ref().len() < 2 { + return Err(Error::AsymmetricKey( + "failed to decode from hex: too short".to_string(), + )); + } + + let (tag_hex, key_hex) = input.as_ref().split_at(2); + + let tag = checksummed_hex::decode(tag_hex)?; + let key_bytes = checksummed_hex::decode(key_hex)?; + + match tag[0] { + SYSTEM_TAG => { + if key_bytes.is_empty() { + Ok(Self::system()) + } else { + Err(Error::AsymmetricKey( + "failed to decode from hex: invalid system variant".to_string(), + )) + } + } + ED25519_TAG => Self::ed25519_from_bytes(&key_bytes), + SECP256K1_TAG => Self::secp256k1_from_bytes(&key_bytes), + _ => Err(Error::AsymmetricKey(format!( + "failed to decode from hex: invalid tag. Expected {}, {} or {}, got {}", + SYSTEM_TAG, ED25519_TAG, SECP256K1_TAG, tag[0] + ))), + } + } + + /// Constructs a new system variant. + fn system() -> Self; + + /// Constructs a new ed25519 variant from a byte slice. + fn ed25519_from_bytes>(bytes: T) -> Result; + + /// Constructs a new secp256k1 variant from a byte slice. + fn secp256k1_from_bytes>(bytes: T) -> Result; +} + +/// A secret or private asymmetric key. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum SecretKey { + /// System secret key. + System, + /// Ed25519 secret key. + #[cfg_attr(feature = "datasize", data_size(skip))] + // Manually verified to have no data on the heap. + Ed25519(Ed25519SecretKey), + /// secp256k1 secret key. + #[cfg_attr(feature = "datasize", data_size(skip))] + Secp256k1(Secp256k1SecretKey), +} + +impl SecretKey { + /// The length in bytes of a system secret key. + pub const SYSTEM_LENGTH: usize = 0; + + /// The length in bytes of an Ed25519 secret key. + pub const ED25519_LENGTH: usize = ED25519_SECRET_KEY_LENGTH; + + /// The length in bytes of a secp256k1 secret key. + pub const SECP256K1_LENGTH: usize = SECP256K1_SECRET_KEY_LENGTH; + + /// Constructs a new system variant. + pub fn system() -> Self { + SecretKey::System + } + + /// Constructs a new ed25519 variant from a byte slice. + pub fn ed25519_from_bytes>(bytes: T) -> Result { + Ok(SecretKey::Ed25519(Ed25519SecretKey::try_from( + bytes.as_ref(), + )?)) + } + + /// Constructs a new secp256k1 variant from a byte slice. + pub fn secp256k1_from_bytes>(bytes: T) -> Result { + Ok(SecretKey::Secp256k1( + Secp256k1SecretKey::from_slice(bytes.as_ref()).map_err(|_| Error::SignatureError)?, + )) + } + + /// Generates a new ed25519 variant using the system's secure random number generator. + #[cfg(any(feature = "std", test))] + pub fn generate_ed25519() -> Result { + let mut bytes = [0u8; Self::ED25519_LENGTH]; + getrandom::getrandom(&mut bytes[..])?; + SecretKey::ed25519_from_bytes(bytes).map_err(Into::into) + } + + /// Generates a new secp256k1 variant using the system's secure random number generator. + #[cfg(any(feature = "std", test))] + pub fn generate_secp256k1() -> Result { + let mut bytes = [0u8; Self::SECP256K1_LENGTH]; + getrandom::getrandom(&mut bytes[..])?; + SecretKey::secp256k1_from_bytes(bytes).map_err(Into::into) + } + + /// Attempts to write the key bytes to the configured file path. + #[cfg(any(feature = "std", test))] + pub fn to_file>(&self, file: P) -> Result<(), ErrorExt> { + write_private_file(file, self.to_pem()?).map_err(ErrorExt::SecretKeySave) + } + + /// Attempts to read the key bytes from configured file path. + #[cfg(any(feature = "std", test))] + pub fn from_file>(file: P) -> Result { + let data = read_file(file).map_err(ErrorExt::SecretKeyLoad)?; + Self::from_pem(data) + } + + /// DER encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_der(&self) -> Result, ErrorExt> { + match self { + SecretKey::System => Err(Error::System(String::from("to_der")).into()), + SecretKey::Ed25519(secret_key) => { + // See https://tools.ietf.org/html/rfc8410#section-10.3 + let mut key_bytes = vec![]; + let mut der = Der::new(&mut key_bytes); + der.octet_string(&secret_key.to_bytes())?; + + let mut encoded = vec![]; + der = Der::new(&mut encoded); + der.sequence(|der| { + der.integer(&[0])?; + der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; + der.octet_string(&key_bytes) + })?; + Ok(encoded) + } + SecretKey::Secp256k1(secret_key) => { + // See https://www.secg.org/sec1-v2.pdf#subsection.C.4 + let mut oid_bytes = vec![]; + let mut der = Der::new(&mut oid_bytes); + der.oid(&SECP256K1_OBJECT_IDENTIFIER)?; + + let mut encoded = vec![]; + der = Der::new(&mut encoded); + der.sequence(|der| { + der.integer(&[1])?; + der.octet_string(secret_key.to_bytes().as_slice())?; + der.element(Tag::ContextSpecificConstructed0, &oid_bytes) + })?; + Ok(encoded) + } + } + } + + /// Decodes a key from a DER-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_der>(input: T) -> Result { + let input = Input::from(input.as_ref()); + + let (key_type_tag, raw_bytes) = input.read_all(derp::Error::Read, |input| { + derp::nested(input, Tag::Sequence, |input| { + // Safe to ignore the first value which should be an integer. + let version_slice = + derp::expect_tag_and_get_value(input, Tag::Integer)?.as_slice_less_safe(); + if version_slice.len() != 1 { + return Err(derp::Error::NonZeroUnusedBits); + } + let version = version_slice[0]; + + // Read the next value. + let (tag, value) = derp::read_tag_and_get_value(input)?; + if tag == Tag::Sequence as u8 { + // Expecting an Ed25519 key. + if version != 0 { + return Err(derp::Error::WrongValue); + } + + // The sequence should have one element: an object identifier defining Ed25519. + let object_identifier = value.read_all(derp::Error::Read, |input| { + derp::expect_tag_and_get_value(input, Tag::Oid) + })?; + if object_identifier.as_slice_less_safe() != ED25519_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + // The third and final value should be the raw bytes of the secret key as an + // octet string in an octet string. + let raw_bytes = derp::nested(input, Tag::OctetString, |input| { + derp::expect_tag_and_get_value(input, Tag::OctetString) + })? + .as_slice_less_safe(); + + return Ok((ED25519_TAG, raw_bytes)); + } else if tag == Tag::OctetString as u8 { + // Expecting a secp256k1 key. + if version != 1 { + return Err(derp::Error::WrongValue); + } + + // The octet string is the secret key. + let raw_bytes = value.as_slice_less_safe(); + + // The object identifier is next. + let parameter0 = + derp::expect_tag_and_get_value(input, Tag::ContextSpecificConstructed0)?; + let object_identifier = parameter0.read_all(derp::Error::Read, |input| { + derp::expect_tag_and_get_value(input, Tag::Oid) + })?; + if object_identifier.as_slice_less_safe() != SECP256K1_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + // There might be an optional public key as the final value, but we're not + // interested in parsing that. Read it to ensure `input.read_all` doesn't fail + // with unused bytes error. + let _ = derp::read_tag_and_get_value(input); + + return Ok((SECP256K1_TAG, raw_bytes)); + } + + Err(derp::Error::WrongValue) + }) + })?; + + match key_type_tag { + SYSTEM_TAG => Err(Error::AsymmetricKey("cannot construct variant".to_string()).into()), + ED25519_TAG => SecretKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), + SECP256K1_TAG => SecretKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), + _ => Err(Error::AsymmetricKey("unknown type tag".to_string()).into()), + } + } + + /// PEM encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_pem(&self) -> Result { + let tag = match self { + SecretKey::System => return Err(Error::System(String::from("to_pem")).into()), + SecretKey::Ed25519(_) => ED25519_PEM_SECRET_KEY_TAG.to_string(), + SecretKey::Secp256k1(_) => SECP256K1_PEM_SECRET_KEY_TAG.to_string(), + }; + let contents = self.to_der()?; + let pem = Pem { tag, contents }; + Ok(pem::encode(&pem)) + } + + /// Decodes a key from a PEM-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_pem>(input: T) -> Result { + let pem = pem::parse(input)?; + + let secret_key = Self::from_der(&pem.contents)?; + + let bad_tag = |expected_tag: &str| { + ErrorExt::FromPem(format!( + "invalid tag: expected {}, got {}", + expected_tag, pem.tag + )) + }; + + match secret_key { + SecretKey::System => return Err(Error::System(String::from("from_pem")).into()), + SecretKey::Ed25519(_) => { + if pem.tag != ED25519_PEM_SECRET_KEY_TAG { + return Err(bad_tag(ED25519_PEM_SECRET_KEY_TAG)); + } + } + SecretKey::Secp256k1(_) => { + if pem.tag != SECP256K1_PEM_SECRET_KEY_TAG { + return Err(bad_tag(SECP256K1_PEM_SECRET_KEY_TAG)); + } + } + } + + Ok(secret_key) + } + + /// Returns a random `SecretKey`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + Self::random_ed25519(rng) + } else { + Self::random_secp256k1(rng) + } + } + + /// Returns a random Ed25519 variant of `SecretKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_ed25519(rng: &mut TestRng) -> Self { + let mut bytes = [0u8; Self::ED25519_LENGTH]; + rng.fill_bytes(&mut bytes[..]); + SecretKey::ed25519_from_bytes(bytes).unwrap() + } + + /// Returns a random secp256k1 variant of `SecretKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_secp256k1(rng: &mut TestRng) -> Self { + let mut bytes = [0u8; Self::SECP256K1_LENGTH]; + rng.fill_bytes(&mut bytes[..]); + SecretKey::secp256k1_from_bytes(bytes).unwrap() + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ED25519_SECRET_KEY + } + + fn variant_name(&self) -> &str { + match self { + SecretKey::System => SYSTEM, + SecretKey::Ed25519(_) => ED25519, + SecretKey::Secp256k1(_) => SECP256K1, + } + } +} + +impl Debug for SecretKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "SecretKey::{}", self.variant_name()) + } +} + +impl Display for SecretKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + ::fmt(self, formatter) + } +} + +impl Tagged for SecretKey { + fn tag(&self) -> u8 { + match self { + SecretKey::System => SYSTEM_TAG, + SecretKey::Ed25519(_) => ED25519_TAG, + SecretKey::Secp256k1(_) => SECP256K1_TAG, + } + } +} + +/// A public asymmetric key. +#[derive(Clone, Eq, PartialEq)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum PublicKey { + /// System public key. + System, + /// Ed25519 public key. + #[cfg_attr(feature = "datasize", data_size(skip))] + Ed25519(Ed25519PublicKey), + /// secp256k1 public key. + #[cfg_attr(feature = "datasize", data_size(skip))] + Secp256k1(Secp256k1PublicKey), +} + +impl PublicKey { + /// The length in bytes of a system public key. + pub const SYSTEM_LENGTH: usize = 0; + + /// The length in bytes of an Ed25519 public key. + pub const ED25519_LENGTH: usize = ED25519_PUBLIC_KEY_LENGTH; + + /// The length in bytes of a secp256k1 public key. + pub const SECP256K1_LENGTH: usize = SECP256K1_COMPRESSED_PUBLIC_KEY_LENGTH; + + /// Creates an `AccountHash` from a given `PublicKey` instance. + pub fn to_account_hash(&self) -> AccountHash { + AccountHash::from(self) + } + + /// Returns `true` if this public key is of the `System` variant. + pub fn is_system(&self) -> bool { + matches!(self, PublicKey::System) + } + + /// Attempts to write the key bytes to the configured file path. + #[cfg(any(feature = "std", test))] + pub fn to_file>(&self, file: P) -> Result<(), ErrorExt> { + write_file(file, self.to_pem()?).map_err(ErrorExt::PublicKeySave) + } + + /// Attempts to read the key bytes from configured file path. + #[cfg(any(feature = "std", test))] + pub fn from_file>(file: P) -> Result { + let data = read_file(file).map_err(ErrorExt::PublicKeyLoad)?; + Self::from_pem(data) + } + + /// DER encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_der(&self) -> Result, ErrorExt> { + match self { + PublicKey::System => Err(Error::System(String::from("to_der")).into()), + PublicKey::Ed25519(public_key) => { + // See https://tools.ietf.org/html/rfc8410#section-10.1 + let mut encoded = vec![]; + let mut der = Der::new(&mut encoded); + der.sequence(|der| { + der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; + der.bit_string(0, public_key.as_ref()) + })?; + Ok(encoded) + } + PublicKey::Secp256k1(public_key) => { + // See https://www.secg.org/sec1-v2.pdf#subsection.C.3 + let mut encoded = vec![]; + let mut der = Der::new(&mut encoded); + der.sequence(|der| { + der.sequence(|der| { + der.oid(&EC_PUBLIC_KEY_OBJECT_IDENTIFIER)?; + der.oid(&SECP256K1_OBJECT_IDENTIFIER) + })?; + der.bit_string(0, public_key.to_encoded_point(true).as_ref()) + })?; + Ok(encoded) + } + } + } + + /// Decodes a key from a DER-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_der>(input: T) -> Result { + let input = Input::from(input.as_ref()); + + let mut key_type_tag = ED25519_TAG; + let raw_bytes = input.read_all(derp::Error::Read, |input| { + derp::nested(input, Tag::Sequence, |input| { + derp::nested(input, Tag::Sequence, |input| { + // Read the first value. + let object_identifier = + derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); + if object_identifier == ED25519_OBJECT_IDENTIFIER { + key_type_tag = ED25519_TAG; + Ok(()) + } else if object_identifier == EC_PUBLIC_KEY_OBJECT_IDENTIFIER { + // Assert the next object identifier is the secp256k1 ID. + let next_object_identifier = + derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); + if next_object_identifier != SECP256K1_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + key_type_tag = SECP256K1_TAG; + Ok(()) + } else { + Err(derp::Error::WrongValue) + } + })?; + Ok(derp::bit_string_with_no_unused_bits(input)?.as_slice_less_safe()) + }) + })?; + + match key_type_tag { + ED25519_TAG => PublicKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), + SECP256K1_TAG => PublicKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), + _ => unreachable!(), + } + } + + /// PEM encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_pem(&self) -> Result { + let tag = match self { + PublicKey::System => return Err(Error::System(String::from("to_pem")).into()), + PublicKey::Ed25519(_) => ED25519_PEM_PUBLIC_KEY_TAG.to_string(), + PublicKey::Secp256k1(_) => SECP256K1_PEM_PUBLIC_KEY_TAG.to_string(), + }; + let contents = self.to_der()?; + let pem = Pem { tag, contents }; + Ok(pem::encode(&pem)) + } + + /// Decodes a key from a PEM-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_pem>(input: T) -> Result { + let pem = pem::parse(input)?; + let public_key = Self::from_der(&pem.contents)?; + let bad_tag = |expected_tag: &str| { + ErrorExt::FromPem(format!( + "invalid tag: expected {}, got {}", + expected_tag, pem.tag + )) + }; + match public_key { + PublicKey::System => return Err(Error::System(String::from("from_pem")).into()), + PublicKey::Ed25519(_) => { + if pem.tag != ED25519_PEM_PUBLIC_KEY_TAG { + return Err(bad_tag(ED25519_PEM_PUBLIC_KEY_TAG)); + } + } + PublicKey::Secp256k1(_) => { + if pem.tag != SECP256K1_PEM_PUBLIC_KEY_TAG { + return Err(bad_tag(SECP256K1_PEM_PUBLIC_KEY_TAG)); + } + } + } + Ok(public_key) + } + + /// Returns a random `PublicKey`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + PublicKey::from(&secret_key) + } + + /// Returns a random Ed25519 variant of `PublicKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_ed25519(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random_ed25519(rng); + PublicKey::from(&secret_key) + } + + /// Returns a random secp256k1 variant of `PublicKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_secp256k1(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random_secp256k1(rng); + PublicKey::from(&secret_key) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ED25519_PUBLIC_KEY + } + + fn variant_name(&self) -> &str { + match self { + PublicKey::System => SYSTEM, + PublicKey::Ed25519(_) => ED25519, + PublicKey::Secp256k1(_) => SECP256K1, + } + } +} + +impl AsymmetricType<'_> for PublicKey { + fn system() -> Self { + PublicKey::System + } + + fn ed25519_from_bytes>(bytes: T) -> Result { + Ok(PublicKey::Ed25519(Ed25519PublicKey::try_from( + bytes.as_ref(), + )?)) + } + + fn secp256k1_from_bytes>(bytes: T) -> Result { + Ok(PublicKey::Secp256k1( + Secp256k1PublicKey::from_sec1_bytes(bytes.as_ref()) + .map_err(|_| Error::SignatureError)?, + )) + } +} + +impl From<&SecretKey> for PublicKey { + fn from(secret_key: &SecretKey) -> PublicKey { + match secret_key { + SecretKey::System => PublicKey::System, + SecretKey::Ed25519(secret_key) => PublicKey::Ed25519(secret_key.into()), + SecretKey::Secp256k1(secret_key) => PublicKey::Secp256k1(secret_key.into()), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl PartialEq for SecretKey { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::System, Self::System) => true, + (Self::Ed25519(k1), Self::Ed25519(k2)) => k1.to_bytes() == k2.to_bytes(), + (Self::Secp256k1(k1), Self::Secp256k1(k2)) => k1.to_bytes() == k2.to_bytes(), + _ => false, + } + } +} +#[cfg(any(feature = "testing", test))] +impl Eq for SecretKey {} + +#[cfg(any(feature = "testing", test))] +impl Ord for SecretKey { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::System, Self::System) => Ordering::Equal, + (Self::Ed25519(k1), Self::Ed25519(k2)) => k1.to_bytes().cmp(&k2.to_bytes()), + (Self::Secp256k1(k1), Self::Secp256k1(k2)) => k1.to_bytes().cmp(&k2.to_bytes()), + (k1, k2) => k1.variant_name().cmp(k2.variant_name()), + } + } +} +#[cfg(any(feature = "testing", test))] +impl PartialOrd for SecretKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl From<&PublicKey> for Vec { + fn from(public_key: &PublicKey) -> Self { + match public_key { + PublicKey::System => Vec::new(), + PublicKey::Ed25519(key) => key.to_bytes().into(), + PublicKey::Secp256k1(key) => key.to_encoded_point(true).as_ref().into(), + } + } +} + +impl From for Vec { + fn from(public_key: PublicKey) -> Self { + Vec::::from(&public_key) + } +} + +impl Debug for PublicKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "PublicKey::{}({})", + self.variant_name(), + base16::encode_lower(&Into::>::into(self)) + ) + } +} + +impl Display for PublicKey { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "PubKey::{}({:10})", + self.variant_name(), + HexFmt(Into::>::into(self)) + ) + } +} + +impl PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for PublicKey { + fn cmp(&self, other: &Self) -> Ordering { + let self_tag = self.tag(); + let other_tag = other.tag(); + if self_tag == other_tag { + Into::>::into(self).cmp(&Into::>::into(other)) + } else { + self_tag.cmp(&other_tag) + } + } +} + +// This implementation of `Hash` agrees with the derived `PartialEq`. It's required since +// `ed25519_dalek::PublicKey` doesn't implement `Hash`. +#[allow(clippy::derived_hash_with_manual_eq)] +impl Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.tag().hash(state); + Into::>::into(self).hash(state); + } +} + +impl Tagged for PublicKey { + fn tag(&self) -> u8 { + match self { + PublicKey::System => SYSTEM_TAG, + PublicKey::Ed25519(_) => ED25519_TAG, + PublicKey::Secp256k1(_) => SECP256K1_TAG, + } + } +} + +impl ToBytes for PublicKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + PublicKey::System => Self::SYSTEM_LENGTH, + PublicKey::Ed25519(_) => Self::ED25519_LENGTH, + PublicKey::Secp256k1(_) => Self::SECP256K1_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PublicKey::System => writer.push(SYSTEM_TAG), + PublicKey::Ed25519(public_key) => { + writer.push(ED25519_TAG); + writer.extend_from_slice(public_key.as_bytes()); + } + PublicKey::Secp256k1(public_key) => { + writer.push(SECP256K1_TAG); + writer.extend_from_slice(public_key.to_encoded_point(true).as_ref()); + } + } + Ok(()) + } +} + +impl FromBytes for PublicKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + SYSTEM_TAG => Ok((PublicKey::System, remainder)), + ED25519_TAG => { + let (raw_bytes, remainder): ([u8; Self::ED25519_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = Self::ed25519_from_bytes(raw_bytes) + .map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + SECP256K1_TAG => { + let (raw_bytes, remainder): ([u8; Self::SECP256K1_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = Self::secp256k1_from_bytes(raw_bytes) + .map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Serialize for PublicKey { + fn serialize(&self, serializer: S) -> Result { + detail::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for PublicKey { + fn deserialize>(deserializer: D) -> Result { + detail::deserialize(deserializer) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for PublicKey { + fn schema_name() -> String { + String::from("PublicKey") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some( + "Hex-encoded cryptographic public key, including the algorithm tag prefix.".to_string(), + ); + schema_object.metadata().examples = vec![ + json!({ + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an \ + immediate switch block after a network upgrade rather than a specific validator. \ + Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }), + json!({ + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is \ + followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }), + json!({ + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is \ + followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + }), + ]; + schema_object.into() + } +} + +impl CLTyped for PublicKey { + fn cl_type() -> CLType { + CLType::PublicKey + } +} + +/// A signature of given data. +#[derive(Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum Signature { + /// System signature. Cannot be verified. + System, + /// Ed25519 signature. + #[cfg_attr(feature = "datasize", data_size(skip))] + Ed25519(Ed25519Signature), + /// Secp256k1 signature. + #[cfg_attr(feature = "datasize", data_size(skip))] + Secp256k1(Secp256k1Signature), +} + +impl Signature { + /// The length in bytes of a system signature, + pub const SYSTEM_LENGTH: usize = 0; + + /// The length in bytes of an Ed25519 signature, + pub const ED25519_LENGTH: usize = ED25519_SIGNATURE_LENGTH; + + /// The length in bytes of a secp256k1 signature + pub const SECP256K1_LENGTH: usize = SECP256K1_SIGNATURE_LENGTH; + + /// Constructs a new Ed25519 variant from a byte array. + pub fn ed25519(bytes: [u8; Self::ED25519_LENGTH]) -> Result { + let signature = Ed25519Signature::from_bytes(&bytes); + Ok(Signature::Ed25519(signature)) + } + + /// Constructs a new secp256k1 variant from a byte array. + pub fn secp256k1(bytes: [u8; Self::SECP256K1_LENGTH]) -> Result { + let signature = Secp256k1Signature::try_from(&bytes[..]).map_err(|_| { + Error::AsymmetricKey(format!( + "failed to construct secp256k1 signature from {:?}", + &bytes[..] + )) + })?; + + Ok(Signature::Secp256k1(signature)) + } + + fn variant_name(&self) -> &str { + match self { + Signature::System => SYSTEM, + Signature::Ed25519(_) => ED25519, + Signature::Secp256k1(_) => SECP256K1, + } + } +} + +impl AsymmetricType<'_> for Signature { + fn system() -> Self { + Signature::System + } + + fn ed25519_from_bytes>(bytes: T) -> Result { + let signature = Ed25519Signature::try_from(bytes.as_ref()).map_err(|_| { + Error::AsymmetricKey(format!( + "failed to construct Ed25519 signature from {:?}", + bytes.as_ref() + )) + })?; + Ok(Signature::Ed25519(signature)) + } + + fn secp256k1_from_bytes>(bytes: T) -> Result { + let signature = Secp256k1Signature::try_from(bytes.as_ref()).map_err(|_| { + Error::AsymmetricKey(format!( + "failed to construct secp256k1 signature from {:?}", + bytes.as_ref() + )) + })?; + Ok(Signature::Secp256k1(signature)) + } +} + +impl Debug for Signature { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "Signature::{}({})", + self.variant_name(), + base16::encode_lower(&Into::>::into(*self)) + ) + } +} + +impl Display for Signature { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "Sig::{}({:10})", + self.variant_name(), + HexFmt(Into::>::into(*self)) + ) + } +} + +impl PartialOrd for Signature { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Signature { + fn cmp(&self, other: &Self) -> Ordering { + let self_tag = self.tag(); + let other_tag = other.tag(); + if self_tag == other_tag { + Into::>::into(*self).cmp(&Into::>::into(*other)) + } else { + self_tag.cmp(&other_tag) + } + } +} + +impl PartialEq for Signature { + fn eq(&self, other: &Self) -> bool { + self.tag() == other.tag() && Into::>::into(*self) == Into::>::into(*other) + } +} + +impl Eq for Signature {} + +impl Hash for Signature { + fn hash(&self, state: &mut H) { + self.tag().hash(state); + Into::>::into(*self).hash(state); + } +} + +impl Tagged for Signature { + fn tag(&self) -> u8 { + match self { + Signature::System => SYSTEM_TAG, + Signature::Ed25519(_) => ED25519_TAG, + Signature::Secp256k1(_) => SECP256K1_TAG, + } + } +} + +impl ToBytes for Signature { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + Signature::System => Self::SYSTEM_LENGTH, + Signature::Ed25519(_) => Self::ED25519_LENGTH, + Signature::Secp256k1(_) => Self::SECP256K1_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Signature::System => { + writer.push(SYSTEM_TAG); + } + Signature::Ed25519(signature) => { + writer.push(ED25519_TAG); + writer.extend(signature.to_bytes()); + } + Signature::Secp256k1(signature) => { + writer.push(SECP256K1_TAG); + writer.extend_from_slice(&signature.to_bytes()); + } + } + Ok(()) + } +} + +impl FromBytes for Signature { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + SYSTEM_TAG => Ok((Signature::System, remainder)), + ED25519_TAG => { + let (raw_bytes, remainder): ([u8; Self::ED25519_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = + Self::ed25519(raw_bytes).map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + SECP256K1_TAG => { + let (raw_bytes, remainder): ([u8; Self::SECP256K1_LENGTH], _) = + FromBytes::from_bytes(remainder)?; + let public_key = + Self::secp256k1(raw_bytes).map_err(|_error| bytesrepr::Error::Formatting)?; + Ok((public_key, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Serialize for Signature { + fn serialize(&self, serializer: S) -> Result { + detail::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for Signature { + fn deserialize>(deserializer: D) -> Result { + detail::deserialize(deserializer) + } +} + +impl From<&Signature> for Vec { + fn from(signature: &Signature) -> Self { + match signature { + Signature::System => Vec::new(), + Signature::Ed25519(signature) => signature.to_bytes().into(), + Signature::Secp256k1(signature) => (*signature.to_bytes()).into(), + } + } +} + +impl From for Vec { + fn from(signature: Signature) -> Self { + Vec::::from(&signature) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for Signature { + fn schema_name() -> String { + String::from("Signature") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some( + "Hex-encoded cryptographic signature, including the algorithm tag prefix.".to_string(), + ); + schema_object.into() + } +} + +/// Signs the given message using the given key pair. +pub fn sign>( + message: T, + secret_key: &SecretKey, + public_key: &PublicKey, +) -> Signature { + match (secret_key, public_key) { + (SecretKey::System, PublicKey::System) => { + panic!("cannot create signature with system keys",) + } + (SecretKey::Ed25519(secret_key), PublicKey::Ed25519(_public_key)) => { + let signature = secret_key.sign(message.as_ref()); + Signature::Ed25519(signature) + } + (SecretKey::Secp256k1(secret_key), PublicKey::Secp256k1(_public_key)) => { + let signer = secret_key; + let signature: Secp256k1Signature = signer + .try_sign(message.as_ref()) + .expect("should create signature"); + Signature::Secp256k1(signature) + } + _ => panic!("secret and public key types must match"), + } +} + +/// Verifies the signature of the given message against the given public key. +pub fn verify>( + message: T, + signature: &Signature, + public_key: &PublicKey, +) -> Result<(), Error> { + match (signature, public_key) { + (Signature::System, _) => Err(Error::AsymmetricKey(String::from( + "signatures based on the system key cannot be verified", + ))), + (Signature::Ed25519(signature), PublicKey::Ed25519(public_key)) => public_key + .verify_strict(message.as_ref(), signature) + .map_err(|_| Error::AsymmetricKey(String::from("failed to verify Ed25519 signature"))), + (Signature::Secp256k1(signature), PublicKey::Secp256k1(public_key)) => { + let verifier: &Secp256k1PublicKey = public_key; + verifier + .verify(message.as_ref(), signature) + .map_err(|error| { + Error::AsymmetricKey(format!("failed to verify secp256k1 signature: {}", error)) + }) + } + _ => Err(Error::AsymmetricKey(format!( + "type mismatch between {} and {}", + signature, public_key + ))), + } +} + +/// Generates an Ed25519 keypair using the operating system's cryptographically secure random number +/// generator. +#[cfg(any(feature = "std", test))] +pub fn generate_ed25519_keypair() -> (SecretKey, PublicKey) { + let secret_key = SecretKey::generate_ed25519().unwrap(); + let public_key = PublicKey::from(&secret_key); + (secret_key, public_key) +} + +mod detail { + use alloc::{string::String, vec::Vec}; + + use serde::{de::Error as _deError, Deserialize, Deserializer, Serialize, Serializer}; + + use super::{PublicKey, Signature}; + use crate::AsymmetricType; + + /// Used to serialize and deserialize asymmetric key types where the (de)serializer is not a + /// human-readable type. + /// + /// The wrapped contents are the result of calling `t_as_ref()` on the type. + #[derive(Serialize, Deserialize)] + pub(super) enum AsymmetricTypeAsBytes { + System, + Ed25519(Vec), + Secp256k1(Vec), + } + + impl From<&PublicKey> for AsymmetricTypeAsBytes { + fn from(public_key: &PublicKey) -> Self { + match public_key { + PublicKey::System => AsymmetricTypeAsBytes::System, + key @ PublicKey::Ed25519(_) => AsymmetricTypeAsBytes::Ed25519(key.into()), + key @ PublicKey::Secp256k1(_) => AsymmetricTypeAsBytes::Secp256k1(key.into()), + } + } + } + + impl From<&Signature> for AsymmetricTypeAsBytes { + fn from(signature: &Signature) -> Self { + match signature { + Signature::System => AsymmetricTypeAsBytes::System, + key @ Signature::Ed25519(_) => AsymmetricTypeAsBytes::Ed25519(key.into()), + key @ Signature::Secp256k1(_) => AsymmetricTypeAsBytes::Secp256k1(key.into()), + } + } + } + + pub(super) fn serialize<'a, T, S>(value: &'a T, serializer: S) -> Result + where + T: AsymmetricType<'a>, + Vec: From<&'a T>, + S: Serializer, + AsymmetricTypeAsBytes: From<&'a T>, + { + if serializer.is_human_readable() { + return value.to_hex().serialize(serializer); + } + + AsymmetricTypeAsBytes::from(value).serialize(serializer) + } + + pub(super) fn deserialize<'a, 'de, T, D>(deserializer: D) -> Result + where + T: AsymmetricType<'a>, + Vec: From<&'a T>, + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let value = T::from_hex(hex_string.as_bytes()).map_err(D::Error::custom)?; + return Ok(value); + } + + let as_bytes = AsymmetricTypeAsBytes::deserialize(deserializer)?; + match as_bytes { + AsymmetricTypeAsBytes::System => Ok(T::system()), + AsymmetricTypeAsBytes::Ed25519(raw_bytes) => { + T::ed25519_from_bytes(raw_bytes).map_err(D::Error::custom) + } + AsymmetricTypeAsBytes::Secp256k1(raw_bytes) => { + T::secp256k1_from_bytes(raw_bytes).map_err(D::Error::custom) + } + } + } +} diff --git a/casper_types_ver_2_0/src/crypto/asymmetric_key/gens.rs b/casper_types_ver_2_0/src/crypto/asymmetric_key/gens.rs new file mode 100644 index 00000000..2316133a --- /dev/null +++ b/casper_types_ver_2_0/src/crypto/asymmetric_key/gens.rs @@ -0,0 +1,44 @@ +//! Generators for asymmetric key types + +use core::convert::TryInto; + +use proptest::{ + collection, + prelude::{Arbitrary, Just, Strategy}, + prop_oneof, +}; + +use crate::{crypto::SecretKey, PublicKey}; + +/// Creates an arbitrary [`PublicKey`] +pub fn public_key_arb() -> impl Strategy { + prop_oneof![ + Just(PublicKey::System), + collection::vec(::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| { + let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap(); + PublicKey::from(&secret_key) + }), + collection::vec(::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| { + let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap(); + PublicKey::from(&secret_key) + }) + ] +} + +/// Returns a strategy for creating random [`PublicKey`] instances but NOT system variant. +pub fn public_key_arb_no_system() -> impl Strategy { + prop_oneof![ + collection::vec(::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| { + let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap(); + PublicKey::from(&secret_key) + }), + collection::vec(::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| { + let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap(); + PublicKey::from(&secret_key) + }) + ] +} diff --git a/casper_types_ver_2_0/src/crypto/asymmetric_key/tests.rs b/casper_types_ver_2_0/src/crypto/asymmetric_key/tests.rs new file mode 100644 index 00000000..545b8dad --- /dev/null +++ b/casper_types_ver_2_0/src/crypto/asymmetric_key/tests.rs @@ -0,0 +1,861 @@ +use std::{ + cmp::Ordering, + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + iter, +}; + +use rand::RngCore; + +use k256::elliptic_curve::sec1::ToEncodedPoint; +use openssl::pkey::{PKey, Private, Public}; + +use super::*; +use crate::{ + bytesrepr, checksummed_hex, crypto::SecretKey, testing::TestRng, AsymmetricType, PublicKey, + Tagged, +}; + +#[test] +fn can_construct_ed25519_keypair_from_zeroes() { + let bytes = [0; SecretKey::ED25519_LENGTH]; + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +#[test] +#[should_panic] +fn cannot_construct_secp256k1_keypair_from_zeroes() { + let bytes = [0; SecretKey::SECP256K1_LENGTH]; + let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +#[test] +fn can_construct_ed25519_keypair_from_ones() { + let bytes = [1; SecretKey::ED25519_LENGTH]; + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +#[test] +fn can_construct_secp256k1_keypair_from_ones() { + let bytes = [1; SecretKey::SECP256K1_LENGTH]; + let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap(); + let _public_key: PublicKey = (&secret_key).into(); +} + +type OpenSSLSecretKey = PKey; +type OpenSSLPublicKey = PKey; + +// `SecretKey` does not implement `PartialEq`, so just compare derived `PublicKey`s. +fn assert_secret_keys_equal(lhs: &SecretKey, rhs: &SecretKey) { + assert_eq!(PublicKey::from(lhs), PublicKey::from(rhs)); +} + +fn secret_key_der_roundtrip(secret_key: SecretKey) { + let der_encoded = secret_key.to_der().unwrap(); + let decoded = SecretKey::from_der(&der_encoded).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + SecretKey::from_der(&der_encoded[1..]).unwrap_err(); +} + +fn secret_key_pem_roundtrip(secret_key: SecretKey) { + let pem_encoded = secret_key.to_pem().unwrap(); + let decoded = SecretKey::from_pem(pem_encoded.as_bytes()).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); + + // Check PEM-encoded can be decoded by openssl. + let _ = OpenSSLSecretKey::private_key_from_pem(pem_encoded.as_bytes()).unwrap(); + + // Ensure malformed encoded version fails to decode. + SecretKey::from_pem(&pem_encoded[1..]).unwrap_err(); +} + +fn known_secret_key_to_pem(expected_key: &SecretKey, known_key_pem: &str, expected_tag: u8) { + let decoded = SecretKey::from_pem(known_key_pem.as_bytes()).unwrap(); + assert_secret_keys_equal(expected_key, &decoded); + assert_eq!(expected_tag, decoded.tag()); +} + +fn secret_key_file_roundtrip(secret_key: SecretKey) { + let tempdir = tempfile::tempdir().unwrap(); + let path = tempdir.path().join("test_secret_key.pem"); + + secret_key.to_file(&path).unwrap(); + let decoded = SecretKey::from_file(&path).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); +} + +fn public_key_serialization_roundtrip(public_key: PublicKey) { + // Try to/from bincode. + let serialized = bincode::serialize(&public_key).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(public_key, deserialized); + assert_eq!(public_key.tag(), deserialized.tag()); + + // Try to/from JSON. + let serialized = serde_json::to_vec_pretty(&public_key).unwrap(); + let deserialized = serde_json::from_slice(&serialized).unwrap(); + assert_eq!(public_key, deserialized); + assert_eq!(public_key.tag(), deserialized.tag()); + + // Using bytesrepr. + bytesrepr::test_serialization_roundtrip(&public_key); +} + +fn public_key_der_roundtrip(public_key: PublicKey) { + let der_encoded = public_key.to_der().unwrap(); + let decoded = PublicKey::from_der(&der_encoded).unwrap(); + assert_eq!(public_key, decoded); + + // Check DER-encoded can be decoded by openssl. + let _ = OpenSSLPublicKey::public_key_from_der(&der_encoded).unwrap(); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_der(&der_encoded[1..]).unwrap_err(); +} + +fn public_key_pem_roundtrip(public_key: PublicKey) { + let pem_encoded = public_key.to_pem().unwrap(); + let decoded = PublicKey::from_pem(pem_encoded.as_bytes()).unwrap(); + assert_eq!(public_key, decoded); + assert_eq!(public_key.tag(), decoded.tag()); + + // Check PEM-encoded can be decoded by openssl. + let _ = OpenSSLPublicKey::public_key_from_pem(pem_encoded.as_bytes()).unwrap(); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_pem(&pem_encoded[1..]).unwrap_err(); +} + +fn known_public_key_to_pem(known_key_hex: &str, known_key_pem: &str) { + let key_bytes = checksummed_hex::decode(known_key_hex).unwrap(); + let decoded = PublicKey::from_pem(known_key_pem.as_bytes()).unwrap(); + assert_eq!(key_bytes, Into::>::into(decoded)); +} + +fn public_key_file_roundtrip(public_key: PublicKey) { + let tempdir = tempfile::tempdir().unwrap(); + let path = tempdir.path().join("test_public_key.pem"); + + public_key.to_file(&path).unwrap(); + let decoded = PublicKey::from_file(&path).unwrap(); + assert_eq!(public_key, decoded); +} + +fn public_key_hex_roundtrip(public_key: PublicKey) { + let hex_encoded = public_key.to_hex(); + let decoded = PublicKey::from_hex(&hex_encoded).unwrap(); + assert_eq!(public_key, decoded); + assert_eq!(public_key.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_hex(&hex_encoded[..1]).unwrap_err(); + PublicKey::from_hex(&hex_encoded[1..]).unwrap_err(); +} + +fn signature_serialization_roundtrip(signature: Signature) { + // Try to/from bincode. + let serialized = bincode::serialize(&signature).unwrap(); + let deserialized: Signature = bincode::deserialize(&serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()); + + // Try to/from JSON. + let serialized = serde_json::to_vec_pretty(&signature).unwrap(); + let deserialized = serde_json::from_slice(&serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()); + + // Try to/from using bytesrepr. + let serialized = bytesrepr::serialize(signature).unwrap(); + let deserialized = bytesrepr::deserialize(serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()) +} + +fn signature_hex_roundtrip(signature: Signature) { + let hex_encoded = signature.to_hex(); + let decoded = Signature::from_hex(hex_encoded.as_bytes()).unwrap(); + assert_eq!(signature, decoded); + assert_eq!(signature.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + Signature::from_hex(&hex_encoded[..1]).unwrap_err(); + Signature::from_hex(&hex_encoded[1..]).unwrap_err(); +} + +fn hash(data: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + data.hash(&mut hasher); + hasher.finish() +} + +fn check_ord_and_hash(low: T, high: T) { + let low_copy = low.clone(); + + assert_eq!(hash(&low), hash(&low_copy)); + assert_ne!(hash(&low), hash(&high)); + + assert_eq!(Ordering::Less, low.cmp(&high)); + assert_eq!(Some(Ordering::Less), low.partial_cmp(&high)); + + assert_eq!(Ordering::Greater, high.cmp(&low)); + assert_eq!(Some(Ordering::Greater), high.partial_cmp(&low)); + + assert_eq!(Ordering::Equal, low.cmp(&low_copy)); + assert_eq!(Some(Ordering::Equal), low.partial_cmp(&low_copy)); +} + +mod system { + use std::path::Path; + + use super::{sign, verify}; + use crate::crypto::{AsymmetricType, PublicKey, SecretKey, Signature}; + + #[test] + fn secret_key_to_der_should_error() { + assert!(SecretKey::system().to_der().is_err()); + } + + #[test] + fn secret_key_to_pem_should_error() { + assert!(SecretKey::system().to_pem().is_err()); + } + + #[test] + fn secret_key_to_file_should_error() { + assert!(SecretKey::system().to_file(Path::new("/dev/null")).is_err()); + } + + #[test] + fn public_key_serialization_roundtrip() { + super::public_key_serialization_roundtrip(PublicKey::system()); + } + + #[test] + fn public_key_to_der_should_error() { + assert!(PublicKey::system().to_der().is_err()); + } + + #[test] + fn public_key_to_pem_should_error() { + assert!(PublicKey::system().to_pem().is_err()); + } + + #[test] + fn public_key_to_file_should_error() { + assert!(PublicKey::system().to_file(Path::new("/dev/null")).is_err()); + } + + #[test] + fn public_key_to_and_from_hex() { + super::public_key_hex_roundtrip(PublicKey::system()); + } + + #[test] + #[should_panic] + fn sign_should_panic() { + sign([], &SecretKey::system(), &PublicKey::system()); + } + + #[test] + fn signature_to_and_from_hex() { + super::signature_hex_roundtrip(Signature::system()); + } + + #[test] + fn public_key_to_account_hash() { + assert_ne!( + PublicKey::system().to_account_hash().as_ref(), + Into::>::into(PublicKey::system()) + ); + } + + #[test] + fn verify_should_error() { + assert!(verify([], &Signature::system(), &PublicKey::system()).is_err()); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + crate::bytesrepr::test_serialization_roundtrip(&Signature::system()); + } +} + +mod ed25519 { + use rand::Rng; + + use super::*; + use crate::ED25519_TAG; + + const SECRET_KEY_LENGTH: usize = SecretKey::ED25519_LENGTH; + const PUBLIC_KEY_LENGTH: usize = PublicKey::ED25519_LENGTH; + const SIGNATURE_LENGTH: usize = Signature::ED25519_LENGTH; + + #[test] + fn secret_key_from_bytes() { + // Secret key should be `SecretKey::ED25519_LENGTH` bytes. + let bytes = [0; SECRET_KEY_LENGTH + 1]; + assert!(SecretKey::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(SecretKey::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(SecretKey::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn secret_key_to_and_from_der() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let der_encoded = secret_key.to_der().unwrap(); + secret_key_der_roundtrip(secret_key); + + // Check DER-encoded can be decoded by openssl. + let _ = OpenSSLSecretKey::private_key_from_der(&der_encoded).unwrap(); + } + + #[test] + fn secret_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + secret_key_pem_roundtrip(secret_key); + } + + #[test] + fn known_secret_key_to_pem() { + // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.3 + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC +-----END PRIVATE KEY-----"#; + let key_bytes = + base16::decode("d4ee72dbf913584ad5b6d8f1f769f8ad3afe7c28cbf1d4fbe097a88f44755842") + .unwrap(); + let expected_key = SecretKey::ed25519_from_bytes(key_bytes).unwrap(); + super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, ED25519_TAG); + } + + #[test] + fn secret_key_to_and_from_file() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + secret_key_file_roundtrip(secret_key); + } + + #[test] + fn public_key_serialization_roundtrip() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + super::public_key_serialization_roundtrip(public_key); + } + + #[test] + fn public_key_from_bytes() { + // Public key should be `PublicKey::ED25519_LENGTH` bytes. Create vec with an extra + // byte. + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + let bytes: Vec = iter::once(rng.gen()) + .chain(Into::>::into(public_key)) + .collect::>(); + + assert!(PublicKey::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(PublicKey::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(PublicKey::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn public_key_to_and_from_der() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_der_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_pem_roundtrip(public_key); + } + + #[test] + fn known_public_key_to_pem() { + // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.1 + const KNOWN_KEY_HEX: &str = + "19bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1"; + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- +MCowBQYDK2VwAyEAGb9ECWmEzf6FQbrBZ9w7lshQhqowtrbLDFw4rXAxZuE= +-----END PUBLIC KEY-----"#; + super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); + } + + #[test] + fn public_key_to_and_from_file() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_file_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_hex_roundtrip(public_key); + } + + #[test] + fn signature_serialization_roundtrip() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + super::signature_serialization_roundtrip(signature); + } + + #[test] + fn signature_from_bytes() { + // Signature should be `Signature::ED25519_LENGTH` bytes. + let bytes = [2; SIGNATURE_LENGTH + 1]; + assert!(Signature::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(Signature::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(Signature::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn signature_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + signature_hex_roundtrip(signature); + } + + #[test] + fn public_key_traits() { + let public_key_low = PublicKey::ed25519_from_bytes([1; PUBLIC_KEY_LENGTH]).unwrap(); + let public_key_high = PublicKey::ed25519_from_bytes([3; PUBLIC_KEY_LENGTH]).unwrap(); + check_ord_and_hash(public_key_low, public_key_high) + } + + #[test] + fn public_key_to_account_hash() { + let public_key_high = PublicKey::ed25519_from_bytes([255; PUBLIC_KEY_LENGTH]).unwrap(); + assert_ne!( + public_key_high.to_account_hash().as_ref(), + Into::>::into(public_key_high) + ); + } + + #[test] + fn signature_traits() { + let signature_low = Signature::ed25519([1; SIGNATURE_LENGTH]).unwrap(); + let signature_high = Signature::ed25519([3; SIGNATURE_LENGTH]).unwrap(); + check_ord_and_hash(signature_low, signature_high) + } + + #[test] + fn sign_and_verify() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + + let public_key = PublicKey::from(&secret_key); + let other_public_key = PublicKey::random_ed25519(&mut rng); + let wrong_type_public_key = PublicKey::random_secp256k1(&mut rng); + + let message = b"message"; + let signature = sign(message, &secret_key, &public_key); + + assert!(verify(message, &signature, &public_key).is_ok()); + assert!(verify(message, &signature, &other_public_key).is_err()); + assert!(verify(message, &signature, &wrong_type_public_key).is_err()); + assert!(verify(&message[1..], &signature, &public_key).is_err()); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + let mut rng = TestRng::new(); + let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&ed25519_secret_key); + let data = b"data"; + let signature = sign(data, &ed25519_secret_key, &public_key); + bytesrepr::test_serialization_roundtrip(&signature); + } + + #[test] + fn validate_known_signature() { + // In the event that this test fails, we need to consider pinning the version of the + // `ed25519-dalek` crate to maintain backwards compatibility with existing data on the + // Casper network. + + // Values taken from: + // https://github.com/dalek-cryptography/ed25519-dalek/blob/925eb9ea56192053c9eb93b9d30d1b9419eee128/TESTVECTORS#L62 + let secret_key_hex = "bf5ba5d6a49dd5ef7b4d5d7d3e4ecc505c01f6ccee4c54b5ef7b40af6a454140"; + let public_key_hex = "1be034f813017b900d8990af45fad5b5214b573bd303ef7a75ef4b8c5c5b9842"; + let message_hex = + "16152c2e037b1c0d3219ced8e0674aee6b57834b55106c5344625322da638ecea2fc9a424a05ee9512\ + d48fcf75dd8bd4691b3c10c28ec98ee1afa5b863d1c36795ed18105db3a9aabd9d2b4c1747adbaf1a56\ + ffcc0c533c1c0faef331cdb79d961fa39f880a1b8b1164741822efb15a7259a465bef212855751fab66\ + a897bfa211abe0ea2f2e1cd8a11d80e142cde1263eec267a3138ae1fcf4099db0ab53d64f336f4bcd7a\ + 363f6db112c0a2453051a0006f813aaf4ae948a2090619374fa58052409c28ef76225687df3cb2d1b0b\ + fb43b09f47f1232f790e6d8dea759e57942099f4c4bd3390f28afc2098244961465c643fc8b29766af2\ + bcbc5440b86e83608cfc937be98bb4827fd5e6b689adc2e26513db531076a6564396255a09975b7034d\ + ac06461b255642e3a7ed75fa9fc265011f5f6250382a84ac268d63ba64"; + let signature_hex = + "279cace6fdaf3945e3837df474b28646143747632bede93e7a66f5ca291d2c24978512ca0cb8827c8c\ + 322685bd605503a5ec94dbae61bbdcae1e49650602bc07"; + + let secret_key_bytes = base16::decode(secret_key_hex).unwrap(); + let public_key_bytes = base16::decode(public_key_hex).unwrap(); + let message_bytes = base16::decode(message_hex).unwrap(); + let signature_bytes = base16::decode(signature_hex).unwrap(); + + let secret_key = SecretKey::ed25519_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::ed25519_from_bytes(public_key_bytes).unwrap(); + assert_eq!(public_key, PublicKey::from(&secret_key)); + + let signature = Signature::ed25519_from_bytes(signature_bytes).unwrap(); + assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature); + assert!(verify(&message_bytes, &signature, &public_key).is_ok()); + } +} + +mod secp256k1 { + use rand::Rng; + + use super::*; + use crate::SECP256K1_TAG; + + const SECRET_KEY_LENGTH: usize = SecretKey::SECP256K1_LENGTH; + const SIGNATURE_LENGTH: usize = Signature::SECP256K1_LENGTH; + + #[test] + fn secret_key_from_bytes() { + // Secret key should be `SecretKey::SECP256K1_LENGTH` bytes. + // The k256 library will ensure that a byte stream of a length not equal to + // `SECP256K1_LENGTH` will fail due to an assertion internal to the library. + // We can check that invalid byte streams e.g [0;32] does not generate a valid key. + let bytes = [0; SECRET_KEY_LENGTH]; + assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_err()); + + // Check that a valid byte stream produces a valid key + let bytes = [1; SECRET_KEY_LENGTH]; + assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_ok()); + } + + #[test] + fn secret_key_to_and_from_der() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_der_roundtrip(secret_key); + } + + #[test] + fn secret_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_pem_roundtrip(secret_key); + } + + #[test] + fn known_secret_key_to_pem() { + // Example values taken from Python client. + const KNOWN_KEY_PEM: &str = r#"-----BEGIN EC PRIVATE KEY----- +MHQCAQEEIL3fqaMKAfXSK1D2PnVVbZlZ7jTv133nukq4+95s6kmcoAcGBSuBBAAK +oUQDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtdkv+kBR5u4ISEAkuc2TFWQHX0 +Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== +-----END EC PRIVATE KEY-----"#; + let key_bytes = + base16::decode("bddfa9a30a01f5d22b50f63e75556d9959ee34efd77de7ba4ab8fbde6cea499c") + .unwrap(); + let expected_key = SecretKey::secp256k1_from_bytes(key_bytes).unwrap(); + super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, SECP256K1_TAG); + } + + #[test] + fn secret_key_to_and_from_file() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_file_roundtrip(secret_key); + } + + #[test] + fn public_key_serialization_roundtrip() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + super::public_key_serialization_roundtrip(public_key); + } + + #[test] + fn public_key_from_bytes() { + // Public key should be `PublicKey::SECP256K1_LENGTH` bytes. Create vec with an extra + // byte. + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + let bytes: Vec = iter::once(rng.gen()) + .chain(Into::>::into(public_key)) + .collect::>(); + + assert!(PublicKey::secp256k1_from_bytes(&bytes[..]).is_err()); + assert!(PublicKey::secp256k1_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(PublicKey::secp256k1_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn public_key_to_and_from_der() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_der_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_pem_roundtrip(public_key); + } + + #[test] + fn known_public_key_to_pem() { + // Example values taken from Python client. + const KNOWN_KEY_HEX: &str = + "03408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084"; + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- +MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtd +kv+kBR5u4ISEAkuc2TFWQHX0Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== +-----END PUBLIC KEY-----"#; + super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); + } + + #[test] + fn public_key_to_and_from_file() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_file_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_hex_roundtrip(public_key); + } + + #[test] + fn signature_serialization_roundtrip() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + super::signature_serialization_roundtrip(signature); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + bytesrepr::test_serialization_roundtrip(&signature); + } + + #[test] + fn signature_from_bytes() { + // Signature should be `Signature::SECP256K1_LENGTH` bytes. + let bytes = [2; SIGNATURE_LENGTH + 1]; + assert!(Signature::secp256k1_from_bytes(&bytes[..]).is_err()); + assert!(Signature::secp256k1_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(Signature::secp256k1_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn signature_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + signature_hex_roundtrip(signature); + } + + #[test] + fn public_key_traits() { + let mut rng = TestRng::new(); + let public_key1 = PublicKey::random_secp256k1(&mut rng); + let public_key2 = PublicKey::random_secp256k1(&mut rng); + if Into::>::into(public_key1.clone()) < Into::>::into(public_key2.clone()) { + check_ord_and_hash(public_key1, public_key2) + } else { + check_ord_and_hash(public_key2, public_key1) + } + } + + #[test] + fn public_key_to_account_hash() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + assert_ne!( + public_key.to_account_hash().as_ref(), + Into::>::into(public_key) + ); + } + + #[test] + fn signature_traits() { + let signature_low = Signature::secp256k1([1; SIGNATURE_LENGTH]).unwrap(); + let signature_high = Signature::secp256k1([3; SIGNATURE_LENGTH]).unwrap(); + check_ord_and_hash(signature_low, signature_high) + } + + #[test] + fn validate_known_signature() { + // In the event that this test fails, we need to consider pinning the version of the + // `k256` crate to maintain backwards compatibility with existing data on the Casper + // network. + let secret_key_hex = "833fe62409237b9d62ec77587520911e9a759cec1d19755b7da901b96dca3d42"; + let public_key_hex = "028e24fd9654f12c793d3d376c15f7abe53e0fbd537884a3a98d10d2dc6d513b4e"; + let message_hex = "616263"; + let signature_hex = "8016162860f0795154643d15c5ab5bb840d8c695d6de027421755579ea7f2a4629b7e0c88fc3428669a6a89496f426181b73f10c6c8a05ac8f49d6cb5032eb89"; + + let secret_key_bytes = base16::decode(secret_key_hex).unwrap(); + let public_key_bytes = base16::decode(public_key_hex).unwrap(); + let message_bytes = base16::decode(message_hex).unwrap(); + let signature_bytes = base16::decode(signature_hex).unwrap(); + + let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::secp256k1_from_bytes(public_key_bytes).unwrap(); + assert_eq!(public_key, PublicKey::from(&secret_key)); + + let signature = Signature::secp256k1_from_bytes(signature_bytes).unwrap(); + assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature); + assert!(verify(&message_bytes, &signature, &public_key).is_ok()); + } +} + +#[test] +fn public_key_traits() { + let system_key = PublicKey::system(); + let mut rng = TestRng::new(); + let ed25519_public_key = PublicKey::random_ed25519(&mut rng); + let secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); + check_ord_and_hash(ed25519_public_key.clone(), secp256k1_public_key.clone()); + check_ord_and_hash(system_key.clone(), ed25519_public_key); + check_ord_and_hash(system_key, secp256k1_public_key); +} + +#[test] +fn signature_traits() { + let system_sig = Signature::system(); + let ed25519_sig = Signature::ed25519([3; Signature::ED25519_LENGTH]).unwrap(); + let secp256k1_sig = Signature::secp256k1([1; Signature::SECP256K1_LENGTH]).unwrap(); + check_ord_and_hash(ed25519_sig, secp256k1_sig); + check_ord_and_hash(system_sig, ed25519_sig); + check_ord_and_hash(system_sig, secp256k1_sig); +} + +#[test] +fn sign_and_verify() { + let mut rng = TestRng::new(); + let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); + let secp256k1_secret_key = SecretKey::random_secp256k1(&mut rng); + + let ed25519_public_key = PublicKey::from(&ed25519_secret_key); + let secp256k1_public_key = PublicKey::from(&secp256k1_secret_key); + + let other_ed25519_public_key = PublicKey::random_ed25519(&mut rng); + let other_secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); + + let message = b"message"; + let ed25519_signature = sign(message, &ed25519_secret_key, &ed25519_public_key); + let secp256k1_signature = sign(message, &secp256k1_secret_key, &secp256k1_public_key); + + assert!(verify(message, &ed25519_signature, &ed25519_public_key).is_ok()); + assert!(verify(message, &secp256k1_signature, &secp256k1_public_key).is_ok()); + + assert!(verify(message, &ed25519_signature, &other_ed25519_public_key).is_err()); + assert!(verify(message, &secp256k1_signature, &other_secp256k1_public_key).is_err()); + + assert!(verify(message, &ed25519_signature, &secp256k1_public_key).is_err()); + assert!(verify(message, &secp256k1_signature, &ed25519_public_key).is_err()); + + assert!(verify(&message[1..], &ed25519_signature, &ed25519_public_key).is_err()); + assert!(verify(&message[1..], &secp256k1_signature, &secp256k1_public_key).is_err()); +} + +#[test] +fn should_construct_secp256k1_from_uncompressed_bytes() { + let mut rng = TestRng::new(); + + let mut secret_key_bytes = [0u8; SecretKey::SECP256K1_LENGTH]; + rng.fill_bytes(&mut secret_key_bytes[..]); + + // Construct a secp256k1 secret key and use that to construct a public key. + let secp256k1_secret_key = k256::SecretKey::from_slice(&secret_key_bytes).unwrap(); + let secp256k1_public_key = secp256k1_secret_key.public_key(); + + // Construct a CL secret key and public key from that (which will be a compressed key). + let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::from(&secret_key); + assert_eq!( + Into::>::into(public_key.clone()).len(), + PublicKey::SECP256K1_LENGTH + ); + assert_ne!( + secp256k1_public_key + .to_encoded_point(false) + .as_bytes() + .len(), + PublicKey::SECP256K1_LENGTH + ); + + // Construct a CL public key from uncompressed public key bytes and ensure it's compressed. + let from_uncompressed_bytes = + PublicKey::secp256k1_from_bytes(secp256k1_public_key.to_encoded_point(false).as_bytes()) + .unwrap(); + assert_eq!(public_key, from_uncompressed_bytes); + + // Construct a CL public key from the uncompressed one's hex representation and ensure it's + // compressed. + let uncompressed_hex = { + let tag_bytes = vec![0x02u8]; + base16::encode_lower(&tag_bytes) + + &base16::encode_lower(&secp256k1_public_key.to_encoded_point(false).as_bytes()) + }; + + format!( + "02{}", + base16::encode_lower(secp256k1_public_key.to_encoded_point(false).as_bytes()) + .to_lowercase() + ); + let from_uncompressed_hex = PublicKey::from_hex(uncompressed_hex).unwrap(); + assert_eq!(public_key, from_uncompressed_hex); +} + +#[test] +fn generate_ed25519_should_generate_an_ed25519_key() { + let secret_key = SecretKey::generate_ed25519().unwrap(); + assert!(matches!(secret_key, SecretKey::Ed25519(_))) +} + +#[test] +fn generate_secp256k1_should_generate_an_secp256k1_key() { + let secret_key = SecretKey::generate_secp256k1().unwrap(); + assert!(matches!(secret_key, SecretKey::Secp256k1(_))) +} diff --git a/casper_types_ver_2_0/src/crypto/error.rs b/casper_types_ver_2_0/src/crypto/error.rs new file mode 100644 index 00000000..a4d822aa --- /dev/null +++ b/casper_types_ver_2_0/src/crypto/error.rs @@ -0,0 +1,155 @@ +use alloc::string::String; +use core::fmt::{self, Display, Formatter}; +#[cfg(any(feature = "std", test))] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use ed25519_dalek::ed25519::Error as SignatureError; +#[cfg(any(feature = "std", test))] +use pem::PemError; +use serde::Serialize; +#[cfg(any(feature = "std", test))] +use thiserror::Error; + +#[cfg(any(feature = "std", test))] +use crate::file_utils::{ReadFileError, WriteFileError}; + +/// Cryptographic errors. +#[derive(Clone, Eq, PartialEq, Debug, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum Error { + /// Error resulting from creating or using asymmetric key types. + AsymmetricKey(String), + + /// Error resulting when decoding a type from a hex-encoded representation. + #[serde(with = "serde_helpers::Base16DecodeError")] + #[cfg_attr(feature = "datasize", data_size(skip))] + FromHex(base16::DecodeError), + + /// Error resulting when decoding a type from a base64 representation. + #[serde(with = "serde_helpers::Base64DecodeError")] + #[cfg_attr(feature = "datasize", data_size(skip))] + FromBase64(base64::DecodeError), + + /// Signature error. + SignatureError, + + /// Error trying to manipulate the system key. + System(String), +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Error::AsymmetricKey(error_msg) => { + write!(formatter, "asymmetric key error: {}", error_msg) + } + Error::FromHex(error) => { + write!(formatter, "decoding from hex: {}", error) + } + Error::FromBase64(error) => { + write!(formatter, "decoding from base 64: {}", error) + } + Error::SignatureError => { + write!(formatter, "error in signature") + } + Error::System(error_msg) => { + write!(formatter, "invalid operation on system key: {}", error_msg) + } + } + } +} + +impl From for Error { + fn from(error: base16::DecodeError) -> Self { + Error::FromHex(error) + } +} + +impl From for Error { + fn from(_error: SignatureError) -> Self { + Error::SignatureError + } +} + +#[cfg(any(feature = "std", test))] +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + Error::FromHex(error) => Some(error), + Error::FromBase64(error) => Some(error), + Error::AsymmetricKey(_) | Error::SignatureError | Error::System(_) => None, + } + } +} + +/// Cryptographic errors extended with some additional variants. +#[cfg(any(feature = "std", test))] +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum ErrorExt { + /// A basic crypto error. + #[error("crypto error: {0:?}")] + CryptoError(#[from] Error), + + /// Error trying to read a secret key. + #[error("secret key load failed: {0}")] + SecretKeyLoad(ReadFileError), + + /// Error trying to read a public key. + #[error("public key load failed: {0}")] + PublicKeyLoad(ReadFileError), + + /// Error trying to write a secret key. + #[error("secret key save failed: {0}")] + SecretKeySave(WriteFileError), + + /// Error trying to write a public key. + #[error("public key save failed: {0}")] + PublicKeySave(WriteFileError), + + /// Pem format error. + #[error("pem error: {0}")] + FromPem(String), + + /// DER format error. + #[error("der error: {0}")] + FromDer(#[from] derp::Error), + + /// Error in getting random bytes from the system's preferred random number source. + #[error("failed to get random bytes: {0}")] + GetRandomBytes(#[from] getrandom::Error), +} + +#[cfg(any(feature = "std", test))] +impl From for ErrorExt { + fn from(error: PemError) -> Self { + ErrorExt::FromPem(error.to_string()) + } +} + +/// This module allows us to derive `Serialize` for the third party error types which don't +/// themselves derive it. +/// +/// See for more info. +#[allow(clippy::enum_variant_names)] +mod serde_helpers { + use serde::Serialize; + + #[derive(Serialize)] + #[serde(remote = "base16::DecodeError")] + pub(super) enum Base16DecodeError { + InvalidByte { index: usize, byte: u8 }, + InvalidLength { length: usize }, + } + + #[derive(Serialize)] + #[serde(remote = "base64::DecodeError")] + pub(super) enum Base64DecodeError { + InvalidByte(usize, u8), + InvalidLength, + InvalidLastSymbol(usize, u8), + } +} diff --git a/casper_types_ver_2_0/src/deploy_info.rs b/casper_types_ver_2_0/src/deploy_info.rs new file mode 100644 index 00000000..faa51e74 --- /dev/null +++ b/casper_types_ver_2_0/src/deploy_info.rs @@ -0,0 +1,174 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes}, + serde_helpers, DeployHash, TransferAddr, URef, U512, +}; + +/// Information relating to the given Deploy. +#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct DeployInfo { + /// The relevant Deploy. + #[serde(with = "serde_helpers::deploy_hash_as_array")] + #[cfg_attr( + feature = "json-schema", + schemars(with = "DeployHash", description = "Hex-encoded Deploy hash.") + )] + pub deploy_hash: DeployHash, + /// Transfers performed by the Deploy. + pub transfers: Vec, + /// Account identifier of the creator of the Deploy. + pub from: AccountHash, + /// Source purse used for payment of the Deploy. + pub source: URef, + /// Gas cost of executing the Deploy. + pub gas: U512, +} + +impl DeployInfo { + /// Creates a [`DeployInfo`]. + pub fn new( + deploy_hash: DeployHash, + transfers: &[TransferAddr], + from: AccountHash, + source: URef, + gas: U512, + ) -> Self { + let transfers = transfers.to_vec(); + DeployInfo { + deploy_hash, + transfers, + from, + source, + gas, + } + } +} + +impl FromBytes for DeployInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (deploy_hash, rem) = DeployHash::from_bytes(bytes)?; + let (transfers, rem) = Vec::::from_bytes(rem)?; + let (from, rem) = AccountHash::from_bytes(rem)?; + let (source, rem) = URef::from_bytes(rem)?; + let (gas, rem) = U512::from_bytes(rem)?; + Ok(( + DeployInfo { + deploy_hash, + transfers, + from, + source, + gas, + }, + rem, + )) + } +} + +impl ToBytes for DeployInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.deploy_hash.write_bytes(&mut result)?; + self.transfers.write_bytes(&mut result)?; + self.from.write_bytes(&mut result)?; + self.source.write_bytes(&mut result)?; + self.gas.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.deploy_hash.serialized_length() + + self.transfers.serialized_length() + + self.from.serialized_length() + + self.source.serialized_length() + + self.gas.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.transfers.write_bytes(writer)?; + self.from.write_bytes(writer)?; + self.source.write_bytes(writer)?; + self.gas.write_bytes(writer)?; + Ok(()) + } +} + +/// Generators for a `Deploy` +#[cfg(any(feature = "testing", feature = "gens", test))] +pub(crate) mod gens { + use alloc::vec::Vec; + + use proptest::{ + array, + collection::{self, SizeRange}, + prelude::{Arbitrary, Strategy}, + }; + + use crate::{ + account::AccountHash, + gens::{u512_arb, uref_arb}, + DeployHash, DeployInfo, TransferAddr, + }; + + pub fn deploy_hash_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(DeployHash::from_raw) + } + + pub fn transfer_addr_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(TransferAddr::new) + } + + pub fn transfers_arb(size: impl Into) -> impl Strategy> { + collection::vec(transfer_addr_arb(), size) + } + + pub fn account_hash_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(AccountHash::new) + } + + /// Creates an arbitrary `Deploy` + pub fn deploy_info_arb() -> impl Strategy { + let transfers_length_range = 0..5; + ( + deploy_hash_arb(), + transfers_arb(transfers_length_range), + account_hash_arb(), + uref_arb(), + u512_arb(), + ) + .prop_map(|(deploy_hash, transfers, from, source, gas)| DeployInfo { + deploy_hash, + transfers, + from, + source, + gas, + }) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn test_serialization_roundtrip(deploy_info in gens::deploy_info_arb()) { + bytesrepr::test_serialization_roundtrip(&deploy_info) + } + } +} diff --git a/casper_types_ver_2_0/src/digest.rs b/casper_types_ver_2_0/src/digest.rs new file mode 100644 index 00000000..31a5d77e --- /dev/null +++ b/casper_types_ver_2_0/src/digest.rs @@ -0,0 +1,730 @@ +//! Contains digest and merkle chunking used throughout the system. + +mod chunk_with_proof; +mod error; +mod indexed_merkle_proof; + +use alloc::{collections::BTreeMap, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::{TryFrom, TryInto}, + fmt::{self, Debug, Display, Formatter, LowerHex, UpperHex}, +}; + +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +use itertools::Itertools; +#[cfg(feature = "once_cell")] +use once_cell::sync::OnceCell; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, CLType, CLTyped, +}; +pub use chunk_with_proof::ChunkWithProof; +pub use error::{ + ChunkWithProofVerificationError, Error as DigestError, MerkleConstructionError, + MerkleVerificationError, +}; +pub use indexed_merkle_proof::IndexedMerkleProof; + +/// The output of the hash function. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded hash digest.") +)] +pub struct Digest( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub(super) [u8; Digest::LENGTH], +); + +const CHUNK_DATA_ZEROED: &[u8] = &[0u8; ChunkWithProof::CHUNK_SIZE_BYTES]; + +impl Digest { + /// The number of bytes in a `Digest`. + pub const LENGTH: usize = 32; + + /// Sentinel hash to be used for hashing options in the case of `None`. + pub const SENTINEL_NONE: Digest = Digest([0u8; Digest::LENGTH]); + /// Sentinel hash to be used by `hash_slice_rfold`. Terminates the fold. + pub const SENTINEL_RFOLD: Digest = Digest([1u8; Digest::LENGTH]); + /// Sentinel hash to be used by `hash_merkle_tree` in the case of an empty list. + pub const SENTINEL_MERKLE_TREE: Digest = Digest([2u8; Digest::LENGTH]); + + /// Creates a 32-byte BLAKE2b hash digest from a given a piece of data. + pub fn hash>(data: T) -> Digest { + Self::blake2b_hash(data) + } + + /// Creates a 32-byte BLAKE2b hash digest from a given a piece of data + pub(crate) fn blake2b_hash>(data: T) -> Digest { + let mut ret = [0u8; Digest::LENGTH]; + // NOTE: Safe to unwrap here because our digest length is constant and valid + let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap(); + hasher.update(data); + hasher.finalize_variable(|hash| ret.clone_from_slice(hash)); + Digest(ret) + } + + /// Hashes a pair of byte slices. + pub fn hash_pair, U: AsRef<[u8]>>(data1: T, data2: U) -> Digest { + let mut result = [0; Digest::LENGTH]; + let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap(); + hasher.update(data1); + hasher.update(data2); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + Digest(result) + } + + /// Hashes a raw Merkle root and leaf count to firm the final Merkle hash. + /// + /// To avoid pre-image attacks, the final hash that is based upon the number of leaves in the + /// Merkle tree and the root hash is prepended with a padding to ensure it is longer than the + /// actual chunk size. + /// + /// Without this feature, an attacker could construct an item that is only a few bytes long but + /// hashes to the same value as a much longer, chunked item by hashing `(len || root hash of + /// longer item's Merkle tree root)`. + /// + /// This function computes the correct final hash by ensuring the hasher used has been + /// initialized with padding before. + /// + /// With `once_cell` feature enabled (generally done by enabling `std` feature), for efficiency + /// reasons it uses a memoized hasher state computed on first run and cloned afterwards. + fn hash_merkle_root(leaf_count: u64, root: Digest) -> Digest { + #[cfg(feature = "once_cell")] + static PAIR_PREFIX_HASHER: OnceCell = OnceCell::new(); + + let mut result = [0; Digest::LENGTH]; + let get_hasher = || { + let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap(); + hasher.update(CHUNK_DATA_ZEROED); + hasher + }; + #[cfg(feature = "once_cell")] + let mut hasher = PAIR_PREFIX_HASHER.get_or_init(get_hasher).clone(); + #[cfg(not(feature = "once_cell"))] + let mut hasher = get_hasher(); + + hasher.update(leaf_count.to_le_bytes()); + hasher.update(root); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + Digest(result) + } + + /// Returns the underlying BLAKE2b hash bytes + pub fn value(&self) -> [u8; Digest::LENGTH] { + self.0 + } + + /// Converts the underlying BLAKE2b hash digest array to a `Vec` + pub fn into_vec(self) -> Vec { + self.0.to_vec() + } + + /// Hashes an `impl IntoIterator` of [`Digest`]s into a single [`Digest`] by + /// constructing a [Merkle tree][1]. Reduces pairs of elements in the collection by repeatedly + /// calling [Digest::hash_pair]. + /// + /// The pattern of hashing is as follows. It is akin to [graph reduction][2]: + /// + /// ```text + /// 1 2 4 5 8 9 + /// │ │ │ │ │ │ + /// └─3 └─6 └─10 + /// │ │ │ + /// └───7 │ + /// │ │ + /// └───11 + /// ``` + /// + /// Finally hashes the number of elements with the resulting hash. In the example above the + /// final output would be `hash_pair(6_u64.to_le_bytes(), l)`. + /// + /// Returns [`Digest::SENTINEL_MERKLE_TREE`] when the input is empty. + /// + /// [1]: https://en.wikipedia.org/wiki/Merkle_tree + /// [2]: https://en.wikipedia.org/wiki/Graph_reduction + pub fn hash_merkle_tree(leaves: I) -> Digest + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let leaves = leaves.into_iter(); + let leaf_count = leaves.len() as u64; + + leaves.tree_fold1(Digest::hash_pair).map_or_else( + || Digest::SENTINEL_MERKLE_TREE, + |raw_root| Digest::hash_merkle_root(leaf_count, raw_root), + ) + } + + /// Hashes a `BTreeMap`. + pub fn hash_btree_map(btree_map: &BTreeMap) -> Result + where + K: ToBytes, + V: ToBytes, + { + let mut kv_hashes: Vec = Vec::with_capacity(btree_map.len()); + for (key, value) in btree_map.iter() { + kv_hashes.push(Digest::hash_pair( + Digest::hash(key.to_bytes()?), + Digest::hash(value.to_bytes()?), + )) + } + Ok(Self::hash_merkle_tree(kv_hashes)) + } + + /// Hashes a `&[Digest]` using a [right fold][1]. + /// + /// This pattern of hashing is as follows: + /// + /// ```text + /// hash_pair(a, &hash_pair(b, &hash_pair(c, &SENTINEL_RFOLD))) + /// ``` + /// + /// Unlike Merkle trees, this is suited to hashing heterogeneous lists we may wish to extend in + /// the future (ie, hashes of data structures that may undergo revision). + /// + /// Returns [`Digest::SENTINEL_RFOLD`] when given an empty slice as input. + /// + /// [1]: https://en.wikipedia.org/wiki/Fold_(higher-order_function)#Linear_folds + pub fn hash_slice_rfold(slice: &[Digest]) -> Digest { + Self::hash_slice_with_proof(slice, Self::SENTINEL_RFOLD) + } + + /// Hashes a `&[Digest]` using a [right fold][1]. Uses `proof` as a Merkle proof for the + /// missing tail of the slice. + /// + /// [1]: https://en.wikipedia.org/wiki/Fold_(higher-order_function)#Linear_folds + pub fn hash_slice_with_proof(slice: &[Digest], proof: Digest) -> Digest { + slice + .iter() + .rfold(proof, |prev, next| Digest::hash_pair(next, prev)) + } + + /// Returns a `Digest` parsed from a hex-encoded `Digest`. + pub fn from_hex>(hex_input: T) -> Result { + let bytes = checksummed_hex::decode(&hex_input).map_err(DigestError::Base16DecodeError)?; + let slice: [u8; Self::LENGTH] = bytes + .try_into() + .map_err(|_| DigestError::IncorrectDigestLength(hex_input.as_ref().len()))?; + Ok(Digest(slice)) + } + + /// Hash data into chunks if necessary. + pub fn hash_into_chunks_if_necessary(bytes: &[u8]) -> Digest { + if bytes.len() <= ChunkWithProof::CHUNK_SIZE_BYTES { + Digest::blake2b_hash(bytes) + } else { + Digest::hash_merkle_tree( + bytes + .chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .map(Digest::blake2b_hash), + ) + } + } + + /// Returns a new `Digest` directly initialized with the provided bytes; no hashing is done. + /// + /// This is equivalent to `Deploy::from`, but is a const function. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + Digest(raw_digest) + } + + /// Returns a random `Digest`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Digest(rng.gen()) + } +} + +impl CLTyped for Digest { + fn cl_type() -> CLType { + CLType::ByteArray(Digest::LENGTH as u32) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Digest { + Digest(rng.gen()) + } +} + +impl LowerHex for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let hex_string = base16::encode_lower(&self.value()); + if f.alternate() { + write!(f, "0x{}", hex_string) + } else { + write!(f, "{}", hex_string) + } + } +} + +impl UpperHex for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let hex_string = base16::encode_upper(&self.value()); + if f.alternate() { + write!(f, "0x{}", hex_string) + } else { + write!(f, "{}", hex_string) + } + } +} + +impl Display for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{:10}", HexFmt(&self.0)) + } +} + +impl Debug for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl From<[u8; Digest::LENGTH]> for Digest { + fn from(arr: [u8; Digest::LENGTH]) -> Self { + Digest(arr) + } +} + +impl<'a> TryFrom<&'a [u8]> for Digest { + type Error = TryFromSliceError; + + fn try_from(slice: &[u8]) -> Result { + <[u8; Digest::LENGTH]>::try_from(slice).map(Digest) + } +} + +impl AsRef<[u8]> for Digest { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl From for [u8; Digest::LENGTH] { + fn from(hash: Digest) -> Self { + hash.0 + } +} + +impl ToBytes for Digest { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for Digest { + #[inline(always)] + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + FromBytes::from_bytes(bytes).map(|(arr, rem)| (Digest(arr), rem)) + } +} + +impl Serialize for Digest { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + base16::encode_lower(&self.0).serialize(serializer) + } else { + // This is to keep backwards compatibility with how HexForm encodes + // byte arrays. HexForm treats this like a slice. + self.0[..].serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for Digest { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let bytes = + checksummed_hex::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; + let data = + <[u8; Digest::LENGTH]>::try_from(bytes.as_ref()).map_err(SerdeError::custom)?; + Ok(Digest::from(data)) + } else { + let data = >::deserialize(deserializer)?; + Digest::try_from(data.as_slice()).map_err(D::Error::custom) + } + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, iter}; + + use proptest_attr_macro::proptest; + + use super::Digest; + + use crate::{ + bytesrepr::{self, ToBytes}, + ChunkWithProof, + }; + + #[proptest] + fn bytesrepr_roundtrip(hash: [u8; Digest::LENGTH]) { + let digest = Digest(hash); + bytesrepr::test_serialization_roundtrip(&digest); + } + + #[proptest] + fn serde_roundtrip(hash: [u8; Digest::LENGTH]) { + let preser_digest = Digest(hash); + let serialized = serde_json::to_string(&preser_digest).unwrap(); + let deser_digest: Digest = serde_json::from_str(&serialized).unwrap(); + assert_eq!(preser_digest, deser_digest); + } + + #[test] + fn serde_custom_serialization() { + let serialized = serde_json::to_string(&Digest::SENTINEL_RFOLD).unwrap(); + let expected = format!("\"{:?}\"", Digest::SENTINEL_RFOLD); + assert_eq!(expected, serialized); + } + + #[test] + fn hash_known() { + // Data of length less or equal to [ChunkWithProof::CHUNK_SIZE_BYTES] + // are hashed using Blake2B algorithm. + // Larger data are chunked and Merkle tree hash is calculated. + // + // Please note that [ChunkWithProof::CHUNK_SIZE_BYTES] is `test` configuration + // is smaller than in production, to allow testing with more chunks + // with still reasonable time and memory consumption. + // + // See: [Digest::hash] + let inputs_and_digests = [ + ( + "", + "0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8", + ), + ( + "abc", + "bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319", + ), + ( + "0123456789", + "7b6cb8d374484e221785288b035dc53fc9ddf000607f473fc2a3258d89a70398", + ), + ( + "01234567890", + "3d199478c18b7fe3ca1f4f2a9b3e07f708ff66ed52eb345db258abe8a812ed5c", + ), + ( + "The quick brown fox jumps over the lazy dog", + "01718cec35cd3d796dd00020e0bfecb473ad23457d063b75eff29c0ffa2e58a9", + ), + ]; + for (known_input, expected_digest) in &inputs_and_digests { + let known_input: &[u8] = known_input.as_ref(); + assert_eq!(*expected_digest, format!("{:?}", Digest::hash(known_input))); + } + } + + #[test] + fn from_valid_hex_should_succeed() { + for char in "abcdefABCDEF0123456789".chars() { + let input: String = iter::repeat(char).take(64).collect(); + assert!(Digest::from_hex(input).is_ok()); + } + } + + #[test] + fn from_hex_invalid_length_should_fail() { + for len in &[2_usize, 62, 63, 65, 66] { + let input: String = "f".repeat(*len); + assert!(Digest::from_hex(input).is_err()); + } + } + + #[test] + fn from_hex_invalid_char_should_fail() { + for char in "g %-".chars() { + let input: String = iter::repeat('f').take(63).chain(iter::once(char)).collect(); + assert!(Digest::from_hex(input).is_err()); + } + } + + #[test] + fn should_display_digest_in_hex() { + let hash = Digest([0u8; 32]); + let hash_hex = format!("{:?}", hash); + assert_eq!( + hash_hex, + "0000000000000000000000000000000000000000000000000000000000000000" + ); + } + + #[test] + fn should_print_digest_lower_hex() { + let hash = Digest([10u8; 32]); + let hash_lower_hex = format!("{:x}", hash); + assert_eq!( + hash_lower_hex, + "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a" + ) + } + + #[test] + fn should_print_digest_upper_hex() { + let hash = Digest([10u8; 32]); + let hash_upper_hex = format!("{:X}", hash); + assert_eq!( + hash_upper_hex, + "0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A" + ) + } + + #[test] + fn alternate_should_prepend_0x() { + let hash = Digest([0u8; 32]); + let hash_hex_alt = format!("{:#x}", hash); + assert_eq!( + hash_hex_alt, + "0x0000000000000000000000000000000000000000000000000000000000000000" + ) + } + + #[test] + fn test_hash_pair() { + let hash1 = Digest([1u8; 32]); + let hash2 = Digest([2u8; 32]); + + let hash = Digest::hash_pair(hash1, hash2); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "30b600fb1f0cc0b3f0fc28cdcb7389405a6659be81c7d5c5905725aa3a5119ce" + ); + } + + #[test] + fn test_hash_rfold() { + let hashes = [ + Digest([1u8; 32]), + Digest([2u8; 32]), + Digest([3u8; 32]), + Digest([4u8; 32]), + Digest([5u8; 32]), + ]; + + let hash = Digest::hash_slice_rfold(&hashes[..]); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "e137f4eb94d2387065454eecfe2cdb5584e3dbd5f1ca07fc511fffd13d234e8e" + ); + + let proof = Digest::hash_slice_rfold(&hashes[2..]); + let hash_proof = Digest::hash_slice_with_proof(&hashes[..2], proof); + + assert_eq!(hash, hash_proof); + } + + #[test] + fn test_hash_merkle_odd() { + let hashes = [ + Digest([1u8; 32]), + Digest([2u8; 32]), + Digest([3u8; 32]), + Digest([4u8; 32]), + Digest([5u8; 32]), + ]; + + let hash = Digest::hash_merkle_tree(hashes); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "775cec8133b97b0e8d4e97659025d5bac4ed7c8927d1bd99cf62114df57f3e74" + ); + } + + #[test] + fn test_hash_merkle_even() { + let hashes = [ + Digest([1u8; 32]), + Digest([2u8; 32]), + Digest([3u8; 32]), + Digest([4u8; 32]), + Digest([5u8; 32]), + Digest([6u8; 32]), + ]; + + let hash = Digest::hash_merkle_tree(hashes); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "4bd50b08a8366b28c35bc831b95d147123bad01c29ffbf854b659c4b3ea4086c" + ); + } + + #[test] + fn test_hash_btreemap() { + let mut map = BTreeMap::new(); + let _ = map.insert(Digest([1u8; 32]), Digest([2u8; 32])); + let _ = map.insert(Digest([3u8; 32]), Digest([4u8; 32])); + let _ = map.insert(Digest([5u8; 32]), Digest([6u8; 32])); + let _ = map.insert(Digest([7u8; 32]), Digest([8u8; 32])); + let _ = map.insert(Digest([9u8; 32]), Digest([10u8; 32])); + + let hash = Digest::hash_btree_map(&map).unwrap(); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "fd1214a627473ffc6d6cc97e7012e6344d74abbf987b48cde5d0642049a0db98" + ); + } + + #[test] + fn digest_deserialize_regression() { + let input = Digest([0; 32]); + let serialized = bincode::serialize(&input).expect("failed to serialize."); + + let expected = vec![ + 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + + assert_eq!(expected, serialized); + } + + #[test] + fn should_assert_simple_digest_serialization_format() { + let digest_bytes = [0; 32]; + + assert_eq!( + Digest(digest_bytes).to_bytes().unwrap(), + digest_bytes.to_vec() + ); + } + + #[test] + fn merkle_roots_are_preimage_resistent() { + // Input data is two chunks long. + // + // The resulting tree will look like this: + // + // 1..0 a..j + // │ │ + // └─────── R + // + // The Merkle root is thus: R = h( h(1..0) || h(a..j) ) + // + // h(1..0) = 807f1ba73147c3a96c2d63b38dd5a5f514f66290a1436bb9821e9f2a72eff263 + // h(a..j) = 499e1cdb476523fedafc9d9db31125e2744f271578ea95b16ab4bd1905f05fea + // R=h(h(1..0)||h(a..j)) = 1319394a98d0cb194f960e3748baeb2045a9ec28aa51e0d42011be43f4a91f5f + // h(2u64le || R) = c31f0bb6ef569354d1a26c3a51f1ad4b6d87cef7f73a290ab6be8db6a9c7d4ee + // + // The final step is to hash h(2u64le || R), which is the length as little endian + // concatenated with the root. + + // Constants used here assume a chunk size of 10 bytes. + assert_eq!(ChunkWithProof::CHUNK_SIZE_BYTES, 10); + + let long_data = b"1234567890abcdefghij"; + assert_eq!(long_data.len(), ChunkWithProof::CHUNK_SIZE_BYTES * 2); + + // The `long_data_hash` is constructed manually here, as `Digest::hash` still had + // deactivated chunking code at the time this test was written. + let long_data_hash = Digest::hash_merkle_tree( + long_data + .as_ref() + .chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .map(Digest::blake2b_hash), + ); + + // The concatenation of `2u64` in little endian + the Merkle root hash `R`. Note that this + // is a valid hashable object on its own. + let maybe_colliding_short_data = [ + 2, 0, 0, 0, 0, 0, 0, 0, 19, 25, 57, 74, 152, 208, 203, 25, 79, 150, 14, 55, 72, 186, + 235, 32, 69, 169, 236, 40, 170, 81, 224, 212, 32, 17, 190, 67, 244, 169, 31, 95, + ]; + + // Use `blake2b_hash` to work around the issue of the chunk size being shorter than the + // digest length. + let short_data_hash = Digest::blake2b_hash(maybe_colliding_short_data); + + // Ensure there is no collision. You can verify this test is correct by temporarily changing + // the `Digest::hash_merkle_tree` function to use the unpadded `hash_pair` function, instead + // of `hash_merkle_root`. + assert_ne!(long_data_hash, short_data_hash); + + // The expected input for the root hash is the colliding data, but prefixed with a full + // chunk of zeros. + let expected_final_hash_input = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 19, 25, 57, 74, 152, 208, 203, + 25, 79, 150, 14, 55, 72, 186, 235, 32, 69, 169, 236, 40, 170, 81, 224, 212, 32, 17, + 190, 67, 244, 169, 31, 95, + ]; + assert_eq!( + Digest::blake2b_hash(expected_final_hash_input), + long_data_hash + ); + + // Another way to specify this sanity check is to say that the short and long data should + // hash differently. + // + // Note: This condition is true at the time of writing this test, where chunk hashing is + // disabled. It should still hold true once enabled. + assert_ne!( + Digest::hash(maybe_colliding_short_data), + Digest::hash(long_data) + ); + + // In a similar manner, the internal padded data should also not hash equal to either, as it + // should be hashed using the chunking function. + assert_ne!( + Digest::hash(maybe_colliding_short_data), + Digest::hash(expected_final_hash_input) + ); + assert_ne!( + Digest::hash(long_data), + Digest::hash(expected_final_hash_input) + ); + } +} diff --git a/casper_types_ver_2_0/src/digest/chunk_with_proof.rs b/casper_types_ver_2_0/src/digest/chunk_with_proof.rs new file mode 100644 index 00000000..404e74b3 --- /dev/null +++ b/casper_types_ver_2_0/src/digest/chunk_with_proof.rs @@ -0,0 +1,335 @@ +//! Chunks with Merkle proofs. + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{ChunkWithProofVerificationError, Digest, IndexedMerkleProof, MerkleConstructionError}; +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +/// Represents a chunk of data with attached proof. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ChunkWithProof { + proof: IndexedMerkleProof, + chunk: Bytes, +} + +impl ToBytes for ChunkWithProof { + fn write_bytes(&self, buf: &mut Vec) -> Result<(), bytesrepr::Error> { + buf.append(&mut self.proof.to_bytes()?); + buf.append(&mut self.chunk.to_bytes()?); + + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.proof.serialized_length() + self.chunk.serialized_length() + } +} + +impl FromBytes for ChunkWithProof { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (proof, remainder) = FromBytes::from_bytes(bytes)?; + let (chunk, remainder) = FromBytes::from_bytes(remainder)?; + + Ok((ChunkWithProof { proof, chunk }, remainder)) + } +} + +impl ChunkWithProof { + #[cfg(test)] + /// 10 bytes for testing purposes. + pub const CHUNK_SIZE_BYTES: usize = 10; + + #[cfg(not(test))] + /// 8 MiB + pub const CHUNK_SIZE_BYTES: usize = 8 * 1024 * 1024; + + /// Constructs the [`ChunkWithProof`] that contains the chunk of data with the appropriate index + /// and the cryptographic proof. + /// + /// Empty data is always represented as single, empty chunk and not as zero chunks. + pub fn new(data: &[u8], index: u64) -> Result { + Ok(if data.is_empty() { + ChunkWithProof { + proof: IndexedMerkleProof::new([Digest::blake2b_hash([])], index)?, + chunk: Bytes::new(), + } + } else { + ChunkWithProof { + proof: IndexedMerkleProof::new( + data.chunks(Self::CHUNK_SIZE_BYTES) + .map(Digest::blake2b_hash), + index, + )?, + chunk: Bytes::from( + data.chunks(Self::CHUNK_SIZE_BYTES) + .nth(index as usize) + .ok_or_else(|| MerkleConstructionError::IndexOutOfBounds { + count: data.chunks(Self::CHUNK_SIZE_BYTES).len() as u64, + index, + })?, + ), + } + }) + } + + /// Get a reference to the `ChunkWithProof`'s chunk. + pub fn chunk(&self) -> &[u8] { + self.chunk.as_slice() + } + + /// Convert a chunk with proof into the underlying chunk. + pub fn into_chunk(self) -> Bytes { + self.chunk + } + + /// Returns the `IndexedMerkleProof`. + pub fn proof(&self) -> &IndexedMerkleProof { + &self.proof + } + + /// Verify the integrity of this chunk with indexed Merkle proof. + pub fn verify(&self) -> Result<(), ChunkWithProofVerificationError> { + self.proof().verify()?; + let first_digest_in_indexed_merkle_proof = + self.proof().merkle_proof().first().ok_or_else(|| { + ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof { + chunk_with_proof: self.clone(), + } + })?; + let hash_of_chunk = Digest::hash(self.chunk()); + if *first_digest_in_indexed_merkle_proof != hash_of_chunk { + return Err( + ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk { + first_digest_in_indexed_merkle_proof: *first_digest_in_indexed_merkle_proof, + hash_of_chunk, + }, + ); + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use proptest::{ + arbitrary::Arbitrary, + strategy::{BoxedStrategy, Strategy}, + }; + use proptest_attr_macro::proptest; + use rand::Rng; + + use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ChunkWithProof, Digest, MerkleConstructionError, + }; + + fn prepare_bytes(length: usize) -> Vec { + let mut rng = rand::thread_rng(); + + (0..length).map(|_| rng.gen()).collect() + } + + fn random_chunk_with_proof() -> ChunkWithProof { + let mut rng = rand::thread_rng(); + let data: Vec = prepare_bytes(rng.gen_range(1..1024)); + let index = rng.gen_range(0..data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES).len() as u64); + + ChunkWithProof::new(&data, index).unwrap() + } + + impl ChunkWithProof { + fn replace_first_proof(self) -> Self { + let mut rng = rand::thread_rng(); + let ChunkWithProof { mut proof, chunk } = self; + + // Keep the same number of proofs, but replace the first one with some random hash + let mut merkle_proof: Vec<_> = proof.merkle_proof().to_vec(); + merkle_proof.pop(); + merkle_proof.insert(0, Digest::hash(rng.gen::().to_string())); + proof.inject_merkle_proof(merkle_proof); + + ChunkWithProof { proof, chunk } + } + } + + #[derive(Debug)] + pub struct TestDataSize(usize); + impl Arbitrary for TestDataSize { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { + (0usize..32usize) + .prop_map(|chunk_count| { + TestDataSize(chunk_count * ChunkWithProof::CHUNK_SIZE_BYTES) + }) + .boxed() + } + } + + #[derive(Debug)] + pub struct TestDataSizeAtLeastTwoChunks(usize); + impl Arbitrary for TestDataSizeAtLeastTwoChunks { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { + (2usize..32usize) + .prop_map(|chunk_count| { + TestDataSizeAtLeastTwoChunks(chunk_count * ChunkWithProof::CHUNK_SIZE_BYTES) + }) + .boxed() + } + } + + #[proptest] + fn generates_valid_proof(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let number_of_chunks: u64 = data + .chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .len() + .try_into() + .unwrap(); + + assert!((0..number_of_chunks) + .map(|chunk_index| { ChunkWithProof::new(data.as_slice(), chunk_index).unwrap() }) + .all(|chunk_with_proof| chunk_with_proof.verify().is_ok())); + } + } + + #[proptest] + fn validate_chunks_against_hash_merkle_tree(test_data: TestDataSizeAtLeastTwoChunks) { + // This test requires at least two chunks + assert!(test_data.0 >= ChunkWithProof::CHUNK_SIZE_BYTES * 2); + + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let expected_root = Digest::hash_merkle_tree( + data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .map(Digest::hash), + ); + + // Calculate proof with `ChunkWithProof` + let ChunkWithProof { + proof: proof_0, + chunk: _, + } = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + let ChunkWithProof { + proof: proof_1, + chunk: _, + } = ChunkWithProof::new(data.as_slice(), 1).unwrap(); + + assert_eq!(proof_0.root_hash(), expected_root); + assert_eq!(proof_1.root_hash(), expected_root); + } + } + + #[proptest] + fn verifies_chunk_with_proofs(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + assert!(chunk_with_proof.verify().is_ok()); + + let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof(); + assert!(chunk_with_incorrect_proof.verify().is_err()); + } + } + + #[proptest] + fn serde_deserialization_of_malformed_chunk_should_work(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + + let json = serde_json::to_string(&chunk_with_proof).unwrap(); + assert_eq!( + chunk_with_proof, + serde_json::from_str::(&json) + .expect("should deserialize correctly") + ); + + let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof(); + let json = serde_json::to_string(&chunk_with_incorrect_proof).unwrap(); + serde_json::from_str::(&json).expect("should deserialize correctly"); + } + } + + #[proptest] + fn bytesrepr_deserialization_of_malformed_chunk_should_work(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + + let bytes = chunk_with_proof + .to_bytes() + .expect("should serialize correctly"); + + let (deserialized_chunk_with_proof, _) = + ChunkWithProof::from_bytes(&bytes).expect("should deserialize correctly"); + + assert_eq!(chunk_with_proof, deserialized_chunk_with_proof); + + let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof(); + let bytes = chunk_with_incorrect_proof + .to_bytes() + .expect("should serialize correctly"); + + ChunkWithProof::from_bytes(&bytes).expect("should deserialize correctly"); + } + } + + #[test] + fn returns_error_on_incorrect_index() { + // This test needs specific data sizes, hence it doesn't use the proptest + + let chunk_with_proof = ChunkWithProof::new(&[], 0).expect("should create with empty data"); + assert!(chunk_with_proof.verify().is_ok()); + + let chunk_with_proof = + ChunkWithProof::new(&[], 1).expect_err("should error with empty data and index > 0"); + if let MerkleConstructionError::IndexOutOfBounds { count, index } = chunk_with_proof { + assert_eq!(count, 1); + assert_eq!(index, 1); + } else { + panic!("expected MerkleConstructionError::IndexOutOfBounds"); + } + + let data_larger_than_single_chunk = vec![0u8; ChunkWithProof::CHUNK_SIZE_BYTES * 10]; + ChunkWithProof::new(data_larger_than_single_chunk.as_slice(), 9).unwrap(); + + let chunk_with_proof = + ChunkWithProof::new(data_larger_than_single_chunk.as_slice(), 10).unwrap_err(); + if let MerkleConstructionError::IndexOutOfBounds { count, index } = chunk_with_proof { + assert_eq!(count, 10); + assert_eq!(index, 10); + } else { + panic!("expected MerkleConstructionError::IndexOutOfBounds"); + } + } + + #[test] + fn bytesrepr_serialization() { + let chunk_with_proof = random_chunk_with_proof(); + bytesrepr::test_serialization_roundtrip(&chunk_with_proof); + } + + #[test] + fn chunk_with_empty_data_contains_a_single_proof() { + let chunk_with_proof = ChunkWithProof::new(&[], 0).unwrap(); + assert_eq!(chunk_with_proof.proof.merkle_proof().len(), 1) + } +} diff --git a/casper_types_ver_2_0/src/digest/error.rs b/casper_types_ver_2_0/src/digest/error.rs new file mode 100644 index 00000000..539e7267 --- /dev/null +++ b/casper_types_ver_2_0/src/digest/error.rs @@ -0,0 +1,233 @@ +//! Errors in constructing and validating indexed Merkle proofs, chunks with indexed Merkle proofs. + +use alloc::string::String; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +use super::{ChunkWithProof, Digest}; +use crate::bytesrepr; + +/// Possible hashing errors. +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// The digest length was an incorrect size. + IncorrectDigestLength(usize), + /// There was a decoding error. + Base16DecodeError(base16::DecodeError), +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::IncorrectDigestLength(length) => { + write!( + formatter, + "incorrect digest length {}, expected length {}.", + length, + Digest::LENGTH + ) + } + Error::Base16DecodeError(error) => { + write!(formatter, "base16 decode error: {}", error) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + Error::IncorrectDigestLength(_) => None, + Error::Base16DecodeError(error) => Some(error), + } + } +} + +/// Error validating a Merkle proof of a chunk. +#[derive(Debug, PartialEq, Eq)] +#[non_exhaustive] +pub enum MerkleVerificationError { + /// Index out of bounds. + IndexOutOfBounds { + /// Count. + count: u64, + /// Index. + index: u64, + }, + + /// Unexpected proof length. + UnexpectedProofLength { + /// Count. + count: u64, + /// Index. + index: u64, + /// Expected proof length. + expected_proof_length: u8, + /// Actual proof length. + actual_proof_length: usize, + }, +} + +impl Display for MerkleVerificationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + MerkleVerificationError::IndexOutOfBounds { count, index } => { + write!( + formatter, + "index out of bounds - count: {}, index: {}", + count, index + ) + } + MerkleVerificationError::UnexpectedProofLength { + count, + index, + expected_proof_length, + actual_proof_length, + } => { + write!( + formatter, + "unexpected proof length - count: {}, index: {}, expected length: {}, actual \ + length: {}", + count, index, expected_proof_length, actual_proof_length + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for MerkleVerificationError {} + +/// Error validating a chunk with proof. +#[derive(Debug)] +#[non_exhaustive] +pub enum ChunkWithProofVerificationError { + /// Indexed Merkle proof verification error. + MerkleVerificationError(MerkleVerificationError), + + /// Empty Merkle proof for trie with chunk. + ChunkWithProofHasEmptyMerkleProof { + /// Chunk with empty Merkle proof. + chunk_with_proof: ChunkWithProof, + }, + /// Unexpected Merkle root hash. + UnexpectedRootHash, + /// Bytesrepr error. + Bytesrepr(bytesrepr::Error), + + /// First digest in indexed Merkle proof did not match hash of chunk. + FirstDigestInMerkleProofDidNotMatchHashOfChunk { + /// First digest in indexed Merkle proof. + first_digest_in_indexed_merkle_proof: Digest, + /// Hash of chunk. + hash_of_chunk: Digest, + }, +} + +impl Display for ChunkWithProofVerificationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + ChunkWithProofVerificationError::MerkleVerificationError(error) => { + write!(formatter, "{}", error) + } + ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof { + chunk_with_proof, + } => { + write!( + formatter, + "chunk with proof has empty merkle proof: {:?}", + chunk_with_proof + ) + } + ChunkWithProofVerificationError::UnexpectedRootHash => { + write!(formatter, "merkle proof has an unexpected root hash") + } + ChunkWithProofVerificationError::Bytesrepr(error) => { + write!( + formatter, + "bytesrepr error computing chunkable hash: {}", + error + ) + } + ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk { + first_digest_in_indexed_merkle_proof, + hash_of_chunk, + } => { + write!( + formatter, + "first digest in merkle proof did not match hash of chunk - first digest: \ + {:?}, hash of chunk: {:?}", + first_digest_in_indexed_merkle_proof, hash_of_chunk + ) + } + } + } +} + +impl From for ChunkWithProofVerificationError { + fn from(error: MerkleVerificationError) -> Self { + ChunkWithProofVerificationError::MerkleVerificationError(error) + } +} + +#[cfg(feature = "std")] +impl StdError for ChunkWithProofVerificationError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + ChunkWithProofVerificationError::MerkleVerificationError(error) => Some(error), + ChunkWithProofVerificationError::Bytesrepr(error) => Some(error), + ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof { .. } + | ChunkWithProofVerificationError::UnexpectedRootHash + | ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk { + .. + } => None, + } + } +} + +/// Error during the construction of a Merkle proof. +#[derive(Debug, Eq, PartialEq, Clone)] +#[non_exhaustive] +pub enum MerkleConstructionError { + /// Chunk index was out of bounds. + IndexOutOfBounds { + /// Total chunks count. + count: u64, + /// Requested index. + index: u64, + }, + /// Too many Merkle tree leaves. + TooManyLeaves { + /// Total chunks count. + count: String, + }, +} + +impl Display for MerkleConstructionError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + MerkleConstructionError::IndexOutOfBounds { count, index } => { + write!( + formatter, + "could not construct merkle proof - index out of bounds - count: {}, index: {}", + count, index + ) + } + MerkleConstructionError::TooManyLeaves { count } => { + write!( + formatter, + "could not construct merkle proof - too many leaves - count: {}, max: {} \ + (u64::MAX)", + count, + u64::MAX + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for MerkleConstructionError {} diff --git a/casper_types_ver_2_0/src/digest/indexed_merkle_proof.rs b/casper_types_ver_2_0/src/digest/indexed_merkle_proof.rs new file mode 100644 index 00000000..7e8a7f7c --- /dev/null +++ b/casper_types_ver_2_0/src/digest/indexed_merkle_proof.rs @@ -0,0 +1,514 @@ +//! Constructing and validating indexed Merkle proofs. +use alloc::{string::ToString, vec::Vec}; +use core::convert::TryInto; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use itertools::Itertools; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{Digest, MerkleConstructionError, MerkleVerificationError}; +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// A Merkle proof of the given chunk. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct IndexedMerkleProof { + index: u64, + count: u64, + merkle_proof: Vec, + #[cfg_attr(any(feature = "once_cell", test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + root_hash: OnceCell, +} + +impl ToBytes for IndexedMerkleProof { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.index.to_bytes()?); + result.append(&mut self.count.to_bytes()?); + result.append(&mut self.merkle_proof.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.index.serialized_length() + + self.count.serialized_length() + + self.merkle_proof.serialized_length() + } +} + +impl FromBytes for IndexedMerkleProof { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (index, remainder) = FromBytes::from_bytes(bytes)?; + let (count, remainder) = FromBytes::from_bytes(remainder)?; + let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + IndexedMerkleProof { + index, + count, + merkle_proof, + #[cfg(any(feature = "once_cell", test))] + root_hash: OnceCell::new(), + }, + remainder, + )) + } +} + +impl IndexedMerkleProof { + /// Attempts to construct a new instance. + pub fn new(leaves: I, index: u64) -> Result + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + use HashOrProof::{Hash as H, Proof as P}; + + enum HashOrProof { + Hash(Digest), + Proof(Vec), + } + + let leaves = leaves.into_iter(); + let count: u64 = + leaves + .len() + .try_into() + .map_err(|_| MerkleConstructionError::TooManyLeaves { + count: leaves.len().to_string(), + })?; + + let maybe_proof = leaves + .enumerate() + .map(|(i, hash)| { + if i as u64 == index { + P(vec![hash]) + } else { + H(hash) + } + }) + .tree_fold1(|x, y| match (x, y) { + (H(hash_x), H(hash_y)) => H(Digest::hash_pair(hash_x, hash_y)), + (H(hash), P(mut proof)) | (P(mut proof), H(hash)) => { + proof.push(hash); + P(proof) + } + (P(_), P(_)) => unreachable!(), + }); + + match maybe_proof { + None | Some(H(_)) => Err(MerkleConstructionError::IndexOutOfBounds { count, index }), + Some(P(merkle_proof)) => Ok(IndexedMerkleProof { + index, + count, + merkle_proof, + #[cfg(any(feature = "once_cell", test))] + root_hash: OnceCell::new(), + }), + } + } + + /// Returns the index. + pub fn index(&self) -> u64 { + self.index + } + + /// Returns the total count of chunks. + pub fn count(&self) -> u64 { + self.count + } + + /// Returns the root hash of this proof (i.e. the index hashed with the Merkle root hash). + /// + /// Note that with the `once_cell` feature enabled (generally done by enabling the `std` + /// feature), the root hash is memoized, and hence calling this method is cheap after the first + /// call. Without `once_cell` enabled, every call to this method calculates the root hash. + pub fn root_hash(&self) -> Digest { + #[cfg(any(feature = "once_cell", test))] + return *self.root_hash.get_or_init(|| self.compute_root_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_root_hash() + } + + /// Returns the full collection of hash digests of the proof. + pub fn merkle_proof(&self) -> &[Digest] { + &self.merkle_proof + } + + /// Attempts to verify self. + pub fn verify(&self) -> Result<(), MerkleVerificationError> { + if self.index >= self.count { + return Err(MerkleVerificationError::IndexOutOfBounds { + count: self.count, + index: self.index, + }); + } + let expected_proof_length = self.compute_expected_proof_length(); + if self.merkle_proof.len() != expected_proof_length as usize { + return Err(MerkleVerificationError::UnexpectedProofLength { + count: self.count, + index: self.index, + expected_proof_length, + actual_proof_length: self.merkle_proof.len(), + }); + } + Ok(()) + } + + fn compute_root_hash(&self) -> Digest { + let IndexedMerkleProof { + count, + merkle_proof, + .. + } = self; + + let mut hashes = merkle_proof.iter(); + let raw_root = if let Some(leaf_hash) = hashes.next().cloned() { + // Compute whether to hash left or right for the elements of the Merkle proof. + // This gives a path to the value with the specified index. + // We represent this path as a sequence of 64 bits. 1 here means "hash right". + let mut path: u64 = 0; + let mut n = self.count; + let mut i = self.index; + while n > 1 { + path <<= 1; + let pivot = 1u64 << (63 - (n - 1).leading_zeros()); + if i < pivot { + n = pivot; + } else { + path |= 1; + n -= pivot; + i -= pivot; + } + } + + // Compute the raw Merkle root by hashing the proof from leaf hash up. + hashes.fold(leaf_hash, |acc, hash| { + let digest = if (path & 1) == 1 { + Digest::hash_pair(hash, acc) + } else { + Digest::hash_pair(acc, hash) + }; + path >>= 1; + digest + }) + } else { + Digest::SENTINEL_MERKLE_TREE + }; + + // The Merkle root is the hash of the count with the raw root. + Digest::hash_merkle_root(*count, raw_root) + } + + // Proof lengths are never bigger than 65 is because we are using 64 bit counts + fn compute_expected_proof_length(&self) -> u8 { + if self.count == 0 { + return 0; + } + let mut l = 1; + let mut n = self.count; + let mut i = self.index; + while n > 1 { + let pivot = 1u64 << (63 - (n - 1).leading_zeros()); + if i < pivot { + n = pivot; + } else { + n -= pivot; + i -= pivot; + } + l += 1; + } + l + } + + #[cfg(test)] + pub fn inject_merkle_proof(&mut self, merkle_proof: Vec) { + self.merkle_proof = merkle_proof; + } +} + +#[cfg(test)] +mod tests { + use once_cell::sync::OnceCell; + use proptest::prelude::{prop_assert, prop_assert_eq}; + use proptest_attr_macro::proptest; + use rand::{distributions::Standard, Rng}; + + use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, IndexedMerkleProof, MerkleVerificationError, + }; + + fn random_indexed_merkle_proof() -> IndexedMerkleProof { + let mut rng = rand::thread_rng(); + let leaf_count: u64 = rng.gen_range(1..100); + let index = rng.gen_range(0..leaf_count); + let leaves: Vec = (0..leaf_count) + .map(|i| Digest::hash(i.to_le_bytes())) + .collect(); + IndexedMerkleProof::new(leaves.iter().cloned(), index) + .expect("should create indexed Merkle proof") + } + + #[test] + fn test_merkle_proofs() { + let mut rng = rand::thread_rng(); + for _ in 0..20 { + let leaf_count: u64 = rng.gen_range(1..100); + let index = rng.gen_range(0..leaf_count); + let leaves: Vec = (0..leaf_count) + .map(|i| Digest::hash(i.to_le_bytes())) + .collect(); + let root = Digest::hash_merkle_tree(leaves.clone()); + let indexed_merkle_proof = IndexedMerkleProof::new(leaves.clone(), index).unwrap(); + assert_eq!( + indexed_merkle_proof.compute_expected_proof_length(), + indexed_merkle_proof.merkle_proof().len() as u8 + ); + assert_eq!(indexed_merkle_proof.verify(), Ok(())); + assert_eq!(leaf_count, indexed_merkle_proof.count); + assert_eq!(leaves[index as usize], indexed_merkle_proof.merkle_proof[0]); + assert_eq!(root, indexed_merkle_proof.root_hash()); + } + } + + #[test] + fn out_of_bounds_index() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 23, + count: 4, + merkle_proof: vec![Digest([0u8; 32]); 3], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::IndexOutOfBounds { + count: 4, + index: 23 + }) + ) + } + + #[test] + fn unexpected_proof_length() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 1235, + count: 5647, + merkle_proof: vec![Digest([0u8; 32]); 13], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::UnexpectedProofLength { + count: 5647, + index: 1235, + expected_proof_length: 14, + actual_proof_length: 13 + }) + ) + } + + #[test] + fn empty_unexpected_proof_length() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 0, + count: 0, + merkle_proof: vec![Digest([0u8; 32]); 3], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::IndexOutOfBounds { count: 0, index: 0 }) + ) + } + + #[test] + fn empty_out_of_bounds_index() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 23, + count: 0, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::IndexOutOfBounds { + count: 0, + index: 23 + }) + ) + } + + #[test] + fn deep_proof_doesnt_kill_stack() { + const PROOF_LENGTH: usize = 63; + let indexed_merkle_proof = IndexedMerkleProof { + index: 42, + count: 1 << (PROOF_LENGTH - 1), + merkle_proof: vec![Digest([0u8; Digest::LENGTH]); PROOF_LENGTH], + root_hash: OnceCell::new(), + }; + let _hash = indexed_merkle_proof.root_hash(); + } + + #[test] + fn empty_proof() { + let empty_merkle_root = Digest::hash_merkle_tree(vec![]); + assert_eq!(empty_merkle_root, Digest::SENTINEL_MERKLE_TREE); + let indexed_merkle_proof = IndexedMerkleProof { + index: 0, + count: 0, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + assert!(indexed_merkle_proof.verify().is_err()); + } + + #[proptest] + fn expected_proof_length_le_65(index: u64, count: u64) { + let indexed_merkle_proof = IndexedMerkleProof { + index, + count, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + prop_assert!(indexed_merkle_proof.compute_expected_proof_length() <= 65); + } + + fn reference_root_from_proof(index: u64, count: u64, proof: &[Digest]) -> Digest { + fn compute_raw_root_from_proof(index: u64, leaf_count: u64, proof: &[Digest]) -> Digest { + if leaf_count == 0 { + return Digest::SENTINEL_MERKLE_TREE; + } + if leaf_count == 1 { + return proof[0]; + } + let half = 1u64 << (63 - (leaf_count - 1).leading_zeros()); + let last = proof.len() - 1; + if index < half { + let left = compute_raw_root_from_proof(index, half, &proof[..last]); + Digest::hash_pair(left, proof[last]) + } else { + let right = + compute_raw_root_from_proof(index - half, leaf_count - half, &proof[..last]); + Digest::hash_pair(proof[last], right) + } + } + + let raw_root = compute_raw_root_from_proof(index, count, proof); + Digest::hash_merkle_root(count, raw_root) + } + + /// Construct an `IndexedMerkleProof` with a proof of zero digests. + fn test_indexed_merkle_proof(index: u64, count: u64) -> IndexedMerkleProof { + let mut indexed_merkle_proof = IndexedMerkleProof { + index, + count, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + let expected_proof_length = indexed_merkle_proof.compute_expected_proof_length(); + indexed_merkle_proof.merkle_proof = rand::thread_rng() + .sample_iter(Standard) + .take(expected_proof_length as usize) + .collect(); + indexed_merkle_proof + } + + #[proptest] + fn root_from_proof_agrees_with_recursion(index: u64, count: u64) { + let indexed_merkle_proof = test_indexed_merkle_proof(index, count); + prop_assert_eq!( + indexed_merkle_proof.root_hash(), + reference_root_from_proof( + indexed_merkle_proof.index, + indexed_merkle_proof.count, + indexed_merkle_proof.merkle_proof(), + ), + "Result did not agree with reference implementation.", + ); + } + + #[test] + fn root_from_proof_agrees_with_recursion_2147483648_4294967297() { + let indexed_merkle_proof = test_indexed_merkle_proof(2147483648, 4294967297); + assert_eq!( + indexed_merkle_proof.root_hash(), + reference_root_from_proof( + indexed_merkle_proof.index, + indexed_merkle_proof.count, + indexed_merkle_proof.merkle_proof(), + ), + "Result did not agree with reference implementation.", + ); + } + + #[test] + fn serde_deserialization_of_malformed_proof_should_work() { + let indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + + let json = serde_json::to_string(&indexed_merkle_proof).unwrap(); + assert_eq!( + indexed_merkle_proof, + serde_json::from_str::(&json) + .expect("should deserialize correctly") + ); + + // Check that proof with index greater than count deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.index += 1; + let json = serde_json::to_string(&indexed_merkle_proof).unwrap(); + serde_json::from_str::(&json).expect("should deserialize correctly"); + + // Check that proof with incorrect length deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.merkle_proof.push(Digest::hash("XXX")); + let json = serde_json::to_string(&indexed_merkle_proof).unwrap(); + serde_json::from_str::(&json).expect("should deserialize correctly"); + } + + #[test] + fn bytesrepr_deserialization_of_malformed_proof_should_work() { + let indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + + let bytes = indexed_merkle_proof + .to_bytes() + .expect("should serialize correctly"); + IndexedMerkleProof::from_bytes(&bytes).expect("should deserialize correctly"); + + // Check that proof with index greater than count deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.index += 1; + let bytes = indexed_merkle_proof + .to_bytes() + .expect("should serialize correctly"); + IndexedMerkleProof::from_bytes(&bytes).expect("should deserialize correctly"); + + // Check that proof with incorrect length deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.merkle_proof.push(Digest::hash("XXX")); + let bytes = indexed_merkle_proof + .to_bytes() + .expect("should serialize correctly"); + IndexedMerkleProof::from_bytes(&bytes).expect("should deserialize correctly"); + } + + #[test] + fn bytesrepr_serialization() { + let indexed_merkle_proof = random_indexed_merkle_proof(); + bytesrepr::test_serialization_roundtrip(&indexed_merkle_proof); + } +} diff --git a/casper_types_ver_2_0/src/display_iter.rs b/casper_types_ver_2_0/src/display_iter.rs new file mode 100644 index 00000000..00b23e84 --- /dev/null +++ b/casper_types_ver_2_0/src/display_iter.rs @@ -0,0 +1,40 @@ +use core::{ + cell::RefCell, + fmt::{self, Display, Formatter}, +}; + +/// A helper to allow `Display` printing the items of an iterator with a comma and space between +/// each. +#[derive(Debug)] +pub struct DisplayIter(RefCell>); + +impl DisplayIter { + /// Returns a new `DisplayIter`. + pub fn new(item: T) -> Self { + DisplayIter(RefCell::new(Some(item))) + } +} + +impl Display for DisplayIter +where + I: IntoIterator, + T: Display, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + if let Some(src) = self.0.borrow_mut().take() { + let mut first = true; + for item in src.into_iter().take(f.width().unwrap_or(usize::MAX)) { + if first { + first = false; + write!(f, "{}", item)?; + } else { + write!(f, ", {}", item)?; + } + } + + Ok(()) + } else { + write!(f, "DisplayIter:GONE") + } + } +} diff --git a/casper_types_ver_2_0/src/era_id.rs b/casper_types_ver_2_0/src/era_id.rs new file mode 100644 index 00000000..5179d59e --- /dev/null +++ b/casper_types_ver_2_0/src/era_id.rs @@ -0,0 +1,254 @@ +use alloc::vec::Vec; +use core::{ + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, + ops::{Add, AddAssign, Sub}, + str::FromStr, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, +}; + +/// Era ID newtype. +#[derive( + Debug, Default, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "testing", derive(proptest_derive::Arbitrary))] +#[serde(deny_unknown_fields)] +pub struct EraId(u64); + +impl EraId { + /// Maximum possible value an [`EraId`] can hold. + pub const MAX: EraId = EraId(u64::max_value()); + + /// Creates new [`EraId`] instance. + pub const fn new(value: u64) -> EraId { + EraId(value) + } + + /// Returns an iterator over era IDs of `num_eras` future eras starting from current. + pub fn iter(&self, num_eras: u64) -> impl Iterator { + let current_era_id = self.0; + (current_era_id..current_era_id + num_eras).map(EraId) + } + + /// Returns an iterator over era IDs of `num_eras` future eras starting from current, plus the + /// provided one. + pub fn iter_inclusive(&self, num_eras: u64) -> impl Iterator { + let current_era_id = self.0; + (current_era_id..=current_era_id + num_eras).map(EraId) + } + + /// Increments the era. + /// + /// For `u64::MAX`, this returns `u64::MAX` again: We want to make sure this doesn't panic, and + /// that era number will never be reached in practice. + pub fn increment(&mut self) { + self.0 = self.0.saturating_add(1); + } + + /// Returns a successor to current era. + /// + /// For `u64::MAX`, this returns `u64::MAX` again: We want to make sure this doesn't panic, and + /// that era number will never be reached in practice. + #[must_use] + pub fn successor(self) -> EraId { + EraId::from(self.0.saturating_add(1)) + } + + /// Returns the predecessor to current era, or `None` if genesis. + #[must_use] + pub fn predecessor(self) -> Option { + self.0.checked_sub(1).map(EraId) + } + + /// Returns the current era plus `x`, or `None` if that would overflow + pub fn checked_add(&self, x: u64) -> Option { + self.0.checked_add(x).map(EraId) + } + + /// Returns the current era minus `x`, or `None` if that would be less than `0`. + pub fn checked_sub(&self, x: u64) -> Option { + self.0.checked_sub(x).map(EraId) + } + + /// Returns the current era minus `x`, or `0` if that would be less than `0`. + #[must_use] + pub fn saturating_sub(&self, x: u64) -> EraId { + EraId::from(self.0.saturating_sub(x)) + } + + /// Returns the current era plus `x`, or [`EraId::MAX`] if overflow would occur. + #[must_use] + pub fn saturating_add(self, rhs: u64) -> EraId { + EraId(self.0.saturating_add(rhs)) + } + + /// Returns the current era times `x`, or [`EraId::MAX`] if overflow would occur. + #[must_use] + pub fn saturating_mul(&self, x: u64) -> EraId { + EraId::from(self.0.saturating_mul(x)) + } + + /// Returns whether this is era 0. + pub fn is_genesis(&self) -> bool { + self.0 == 0 + } + + /// Returns little endian bytes. + pub fn to_le_bytes(self) -> [u8; 8] { + self.0.to_le_bytes() + } + + /// Returns a raw value held by this [`EraId`] instance. + /// + /// You should prefer [`From`] trait implementations over this method where possible. + pub fn value(self) -> u64 { + self.0 + } + + /// Returns a random `EraId`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + EraId(rng.gen_range(0..1_000_000)) + } +} + +impl FromStr for EraId { + type Err = ParseIntError; + + fn from_str(s: &str) -> Result { + u64::from_str(s).map(EraId) + } +} + +impl Add for EraId { + type Output = EraId; + + #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow. + fn add(self, x: u64) -> EraId { + EraId::from(self.0 + x) + } +} + +impl AddAssign for EraId { + fn add_assign(&mut self, x: u64) { + self.0 += x; + } +} + +impl Sub for EraId { + type Output = EraId; + + #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow. + fn sub(self, x: u64) -> EraId { + EraId::from(self.0 - x) + } +} + +impl Display for EraId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "era {}", self.0) + } +} + +impl From for u64 { + fn from(era_id: EraId) -> Self { + era_id.value() + } +} + +impl From for EraId { + fn from(era_id: u64) -> Self { + EraId(era_id) + } +} + +impl ToBytes for EraId { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EraId { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (id_value, remainder) = u64::from_bytes(bytes)?; + let era_id = EraId::from(id_value); + Ok((era_id, remainder)) + } +} + +impl CLTyped for EraId { + fn cl_type() -> CLType { + CLType::U64 + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use super::*; + use crate::gens::era_id_arb; + + #[test] + fn should_calculate_correct_inclusive_future_eras() { + let auction_delay = 3; + + let current_era = EraId::from(42); + + let window: Vec = current_era.iter_inclusive(auction_delay).collect(); + assert_eq!(window.len(), auction_delay as usize + 1); + assert_eq!(window.first(), Some(¤t_era)); + assert_eq!( + window.iter().next_back(), + Some(&(current_era + auction_delay)) + ); + } + + #[test] + fn should_have_valid_genesis_era_id() { + let expected_initial_era_id = EraId::from(0); + assert!(expected_initial_era_id.is_genesis()); + assert!(!expected_initial_era_id.successor().is_genesis()) + } + + #[test] + fn should_increment_era_id() { + let mut era = EraId::from(0); + assert!(era.is_genesis()); + era.increment(); + assert_eq!(era.value(), 1, "should have incremented to 1"); + } + + proptest! { + #[test] + fn bytesrepr_roundtrip(era_id in era_id_arb()) { + bytesrepr::test_serialization_roundtrip(&era_id); + } + } +} diff --git a/casper_types_ver_2_0/src/execution.rs b/casper_types_ver_2_0/src/execution.rs new file mode 100644 index 00000000..887966df --- /dev/null +++ b/casper_types_ver_2_0/src/execution.rs @@ -0,0 +1,17 @@ +//! Types related to execution of deploys. + +mod effects; +mod execution_result; +pub mod execution_result_v1; +mod execution_result_v2; +mod transform; +mod transform_error; +mod transform_kind; + +pub use effects::Effects; +pub use execution_result::ExecutionResult; +pub use execution_result_v1::ExecutionResultV1; +pub use execution_result_v2::ExecutionResultV2; +pub use transform::Transform; +pub use transform_error::TransformError; +pub use transform_kind::{TransformInstruction, TransformKind}; diff --git a/casper_types_ver_2_0/src/execution/effects.rs b/casper_types_ver_2_0/src/execution/effects.rs new file mode 100644 index 00000000..e1031196 --- /dev/null +++ b/casper_types_ver_2_0/src/execution/effects.rs @@ -0,0 +1,105 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::Transform; +#[cfg(any(feature = "testing", test))] +use super::TransformKind; +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// A log of all transforms produced during execution. +#[derive(Debug, Clone, Eq, Default, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Effects(Vec); + +impl Effects { + /// Constructs a new, empty `Effects`. + pub const fn new() -> Self { + Effects(vec![]) + } + + /// Returns a reference to the transforms. + pub fn transforms(&self) -> &[Transform] { + &self.0 + } + + /// Appends a transform. + pub fn push(&mut self, transform: Transform) { + self.0.push(transform) + } + + /// Moves all elements from `other` into `self`. + pub fn append(&mut self, mut other: Self) { + self.0.append(&mut other.0); + } + + /// Returns `true` if there are no transforms recorded. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns the number of transforms recorded. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Consumes `self`, returning the wrapped vec. + pub fn value(self) -> Vec { + self.0 + } + + /// Returns a random `Effects`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut R) -> Self { + let mut effects = Effects::new(); + let transform_count = rng.gen_range(0..6); + for _ in 0..transform_count { + effects.push(Transform::new(rng.gen(), TransformKind::random(rng))); + } + effects + } +} + +impl ToBytes for Effects { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Effects { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transforms, remainder) = Vec::::from_bytes(bytes)?; + Ok((Effects(transforms), remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::testing::TestRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let effects = Effects::random(rng); + bytesrepr::test_serialization_roundtrip(&effects); + } +} diff --git a/casper_types_ver_2_0/src/execution/execution_result.rs b/casper_types_ver_2_0/src/execution/execution_result.rs new file mode 100644 index 00000000..c24dfb1d --- /dev/null +++ b/casper_types_ver_2_0/src/execution/execution_result.rs @@ -0,0 +1,148 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::distributions::Distribution; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{ExecutionResultV1, ExecutionResultV2}; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +const V1_TAG: u8 = 0; +const V2_TAG: u8 = 1; + +/// The versioned result of executing a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutionResult { + /// Version 1 of execution result type. + #[serde(rename = "Version1")] + V1(ExecutionResultV1), + /// Version 2 of execution result type. + #[serde(rename = "Version2")] + V2(ExecutionResultV2), +} + +impl ExecutionResult { + /// Returns a random ExecutionResult. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen_bool(0.5) { + Self::V1(rand::distributions::Standard.sample(rng)) + } else { + Self::V2(ExecutionResultV2::random(rng)) + } + } +} + +impl From for ExecutionResult { + fn from(value: ExecutionResultV1) -> Self { + ExecutionResult::V1(value) + } +} + +impl From for ExecutionResult { + fn from(value: ExecutionResultV2) -> Self { + ExecutionResult::V2(value) + } +} + +impl ToBytes for ExecutionResult { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ExecutionResult::V1(result) => { + V1_TAG.write_bytes(writer)?; + result.write_bytes(writer) + } + ExecutionResult::V2(result) => { + V2_TAG.write_bytes(writer)?; + result.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ExecutionResult::V1(result) => result.serialized_length(), + ExecutionResult::V2(result) => result.serialized_length(), + } + } +} + +impl FromBytes for ExecutionResult { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + V1_TAG => { + let (result, remainder) = ExecutionResultV1::from_bytes(remainder)?; + Ok((ExecutionResult::V1(result), remainder)) + } + V2_TAG => { + let (result, remainder) = ExecutionResultV2::from_bytes(remainder)?; + Ok((ExecutionResult::V2(result), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let execution_result = ExecutionResult::V1(rng.gen()); + bytesrepr::test_serialization_roundtrip(&execution_result); + let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng)); + bytesrepr::test_serialization_roundtrip(&execution_result); + } + + #[test] + fn bincode_roundtrip() { + let rng = &mut TestRng::new(); + let execution_result = ExecutionResult::V1(rng.gen()); + let serialized = bincode::serialize(&execution_result).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + + let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng)); + let serialized = bincode::serialize(&execution_result).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + } + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + let execution_result = ExecutionResult::V1(rng.gen()); + let serialized = serde_json::to_string(&execution_result).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + + let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng)); + let serialized = serde_json::to_string(&execution_result).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + } +} diff --git a/casper_types_ver_2_0/src/execution/execution_result_v1.rs b/casper_types_ver_2_0/src/execution/execution_result_v1.rs new file mode 100644 index 00000000..bf8f908a --- /dev/null +++ b/casper_types_ver_2_0/src/execution/execution_result_v1.rs @@ -0,0 +1,794 @@ +//! Types for reporting results of execution pre `casper-node` v2.0.0. + +use core::convert::TryFrom; + +use alloc::{boxed::Box, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::{FromPrimitive, ToPrimitive}; +use num_derive::{FromPrimitive, ToPrimitive}; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + seq::SliceRandom, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + system::auction::{Bid, BidKind, EraInfo, UnbondingPurse, WithdrawPurse}, + CLValue, DeployInfo, Key, Transfer, TransferAddr, U128, U256, U512, +}; + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum ExecutionResultTag { + Failure = 0, + Success = 1, +} + +impl TryFrom for ExecutionResultTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum OpTag { + Read = 0, + Write = 1, + Add = 2, + NoOp = 3, + Prune = 4, +} + +impl TryFrom for OpTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum TransformTag { + Identity = 0, + WriteCLValue = 1, + WriteAccount = 2, + WriteByteCode = 3, + WriteContract = 4, + WritePackage = 5, + WriteDeployInfo = 6, + WriteTransfer = 7, + WriteEraInfo = 8, + WriteBid = 9, + WriteWithdraw = 10, + AddInt32 = 11, + AddUInt64 = 12, + AddUInt128 = 13, + AddUInt256 = 14, + AddUInt512 = 15, + AddKeys = 16, + Failure = 17, + WriteUnbonding = 18, + WriteAddressableEntity = 19, + Prune = 20, + WriteBidKind = 21, +} + +impl TryFrom for TransformTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +/// The result of executing a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutionResultV1 { + /// The result of a failed execution. + Failure { + /// The effect of executing the deploy. + effect: ExecutionEffect, + /// A record of Transfers performed while executing the deploy. + transfers: Vec, + /// The cost of executing the deploy. + cost: U512, + /// The error message associated with executing the deploy. + error_message: String, + }, + /// The result of a successful execution. + Success { + /// The effect of executing the deploy. + effect: ExecutionEffect, + /// A record of Transfers performed while executing the deploy. + transfers: Vec, + /// The cost of executing the deploy. + cost: U512, + }, +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExecutionResultV1 { + let op_count = rng.gen_range(0..6); + let mut operations = Vec::new(); + for _ in 0..op_count { + let op = [OpKind::Read, OpKind::Add, OpKind::NoOp, OpKind::Write] + .choose(rng) + .unwrap(); + operations.push(Operation { + key: rng.gen::().to_string(), + kind: *op, + }); + } + + let transform_count = rng.gen_range(0..6); + let mut transforms = Vec::new(); + for _ in 0..transform_count { + transforms.push(TransformEntry { + key: rng.gen::().to_string(), + transform: rng.gen(), + }); + } + + let execution_effect = ExecutionEffect { + operations, + transforms, + }; + + let transfer_count = rng.gen_range(0..6); + let mut transfers = Vec::new(); + for _ in 0..transfer_count { + transfers.push(TransferAddr::new(rng.gen())) + } + + if rng.gen() { + ExecutionResultV1::Failure { + effect: execution_effect, + transfers, + cost: rng.gen::().into(), + error_message: format!("Error message {}", rng.gen::()), + } + } else { + ExecutionResultV1::Success { + effect: execution_effect, + transfers, + cost: rng.gen::().into(), + } + } + } +} + +impl ToBytes for ExecutionResultV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ExecutionResultV1::Failure { + effect, + transfers, + cost, + error_message, + } => { + (ExecutionResultTag::Failure as u8).write_bytes(writer)?; + effect.write_bytes(writer)?; + transfers.write_bytes(writer)?; + cost.write_bytes(writer)?; + error_message.write_bytes(writer) + } + ExecutionResultV1::Success { + effect, + transfers, + cost, + } => { + (ExecutionResultTag::Success as u8).write_bytes(writer)?; + effect.write_bytes(writer)?; + transfers.write_bytes(writer)?; + cost.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ExecutionResultV1::Failure { + effect, + transfers, + cost, + error_message, + } => { + effect.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + + error_message.serialized_length() + } + ExecutionResultV1::Success { + effect, + transfers, + cost, + } => { + effect.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + } + } + } +} + +impl FromBytes for ExecutionResultV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + ExecutionResultTag::Failure => { + let (effect, remainder) = ExecutionEffect::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let (error_message, remainder) = String::from_bytes(remainder)?; + let execution_result = ExecutionResultV1::Failure { + effect, + transfers, + cost, + error_message, + }; + Ok((execution_result, remainder)) + } + ExecutionResultTag::Success => { + let (execution_effect, remainder) = ExecutionEffect::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let execution_result = ExecutionResultV1::Success { + effect: execution_effect, + transfers, + cost, + }; + Ok((execution_result, remainder)) + } + } + } +} + +/// The sequence of execution transforms from a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ExecutionEffect { + /// The resulting operations. + pub operations: Vec, + /// The sequence of execution transforms. + pub transforms: Vec, +} + +impl ToBytes for ExecutionEffect { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.operations.write_bytes(writer)?; + self.transforms.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.operations.serialized_length() + self.transforms.serialized_length() + } +} + +impl FromBytes for ExecutionEffect { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (operations, remainder) = Vec::::from_bytes(bytes)?; + let (transforms, remainder) = Vec::::from_bytes(remainder)?; + let json_effects = ExecutionEffect { + operations, + transforms, + }; + Ok((json_effects, remainder)) + } +} + +/// An operation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Operation { + /// The formatted string of the `Key`. + pub key: String, + /// The type of operation. + pub kind: OpKind, +} + +impl ToBytes for Operation { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.key.write_bytes(writer)?; + self.kind.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.kind.serialized_length() + } +} + +impl FromBytes for Operation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = String::from_bytes(bytes)?; + let (kind, remainder) = OpKind::from_bytes(remainder)?; + let operation = Operation { key, kind }; + Ok((operation, remainder)) + } +} + +/// The type of operation performed while executing a deploy. +#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum OpKind { + /// A read operation. + Read, + /// A write operation. + Write, + /// An addition. + Add, + /// An operation which has no effect. + NoOp, + /// A prune operation. + Prune, +} + +impl OpKind { + fn tag(&self) -> OpTag { + match self { + OpKind::Read => OpTag::Read, + OpKind::Write => OpTag::Write, + OpKind::Add => OpTag::Add, + OpKind::NoOp => OpTag::NoOp, + OpKind::Prune => OpTag::Prune, + } + } +} + +impl ToBytes for OpKind { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let tag_byte = self.tag().to_u8().ok_or(bytesrepr::Error::Formatting)?; + tag_byte.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for OpKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + OpTag::Read => Ok((OpKind::Read, remainder)), + OpTag::Write => Ok((OpKind::Write, remainder)), + OpTag::Add => Ok((OpKind::Add, remainder)), + OpTag::NoOp => Ok((OpKind::NoOp, remainder)), + OpTag::Prune => Ok((OpKind::Prune, remainder)), + } + } +} + +/// A transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct TransformEntry { + /// The formatted string of the `Key`. + pub key: String, + /// The transformation. + pub transform: Transform, +} + +impl ToBytes for TransformEntry { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.key.write_bytes(writer)?; + self.transform.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.transform.serialized_length() + } +} + +impl FromBytes for TransformEntry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = String::from_bytes(bytes)?; + let (transform, remainder) = Transform::from_bytes(remainder)?; + let transform_entry = TransformEntry { key, transform }; + Ok((transform_entry, remainder)) + } +} + +/// The actual transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "TransformV1"))] +#[serde(deny_unknown_fields)] +pub enum Transform { + /// A transform having no effect. + Identity, + /// Writes the given CLValue to global state. + WriteCLValue(CLValue), + /// Writes the given Account to global state. + WriteAccount(AccountHash), + /// Writes a smart contract as Wasm to global state. + WriteContractWasm, + /// Writes a smart contract to global state. + WriteContract, + /// Writes a smart contract package to global state. + WriteContractPackage, + /// Writes the given DeployInfo to global state. + WriteDeployInfo(DeployInfo), + /// Writes the given EraInfo to global state. + WriteEraInfo(EraInfo), + /// Writes the given Transfer to global state. + WriteTransfer(Transfer), + /// Writes the given Bid to global state. + WriteBid(Box), + /// Writes the given Withdraw to global state. + WriteWithdraw(Vec), + /// Adds the given `i32`. + AddInt32(i32), + /// Adds the given `u64`. + AddUInt64(u64), + /// Adds the given `U128`. + AddUInt128(U128), + /// Adds the given `U256`. + AddUInt256(U256), + /// Adds the given `U512`. + AddUInt512(U512), + /// Adds the given collection of named keys. + AddKeys(Vec), + /// A failed transformation, containing an error message. + Failure(String), + /// Writes the given Unbonding to global state. + WriteUnbonding(Vec), + /// Writes the addressable entity to global state. + WriteAddressableEntity, + /// Removes pathing to keyed value within global state. This is a form of soft delete; the + /// underlying value remains in global state and is reachable from older global state root + /// hashes where it was included in the hash up. + Prune(Key), + /// Writes the given BidKind to global state. + WriteBidKind(BidKind), +} + +impl ToBytes for Transform { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Transform::Identity => (TransformTag::Identity as u8).write_bytes(writer), + Transform::WriteCLValue(value) => { + (TransformTag::WriteCLValue as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::WriteAccount(account_hash) => { + (TransformTag::WriteAccount as u8).write_bytes(writer)?; + account_hash.write_bytes(writer) + } + Transform::WriteContractWasm => (TransformTag::WriteByteCode as u8).write_bytes(writer), + Transform::WriteContract => (TransformTag::WriteContract as u8).write_bytes(writer), + Transform::WriteContractPackage => { + (TransformTag::WritePackage as u8).write_bytes(writer) + } + Transform::WriteDeployInfo(deploy_info) => { + (TransformTag::WriteDeployInfo as u8).write_bytes(writer)?; + deploy_info.write_bytes(writer) + } + Transform::WriteEraInfo(era_info) => { + (TransformTag::WriteEraInfo as u8).write_bytes(writer)?; + era_info.write_bytes(writer) + } + Transform::WriteTransfer(transfer) => { + (TransformTag::WriteTransfer as u8).write_bytes(writer)?; + transfer.write_bytes(writer) + } + Transform::WriteBid(bid) => { + (TransformTag::WriteBid as u8).write_bytes(writer)?; + bid.write_bytes(writer) + } + Transform::WriteWithdraw(unbonding_purses) => { + (TransformTag::WriteWithdraw as u8).write_bytes(writer)?; + unbonding_purses.write_bytes(writer) + } + Transform::AddInt32(value) => { + (TransformTag::AddInt32 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::AddUInt64(value) => { + (TransformTag::AddUInt64 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::AddUInt128(value) => { + (TransformTag::AddUInt128 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::AddUInt256(value) => { + (TransformTag::AddUInt256 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::AddUInt512(value) => { + (TransformTag::AddUInt512 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::AddKeys(value) => { + (TransformTag::AddKeys as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::Failure(value) => { + (TransformTag::Failure as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::WriteUnbonding(value) => { + (TransformTag::WriteUnbonding as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::WriteAddressableEntity => { + (TransformTag::WriteAddressableEntity as u8).write_bytes(writer) + } + Transform::Prune(value) => { + (TransformTag::Prune as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + Transform::WriteBidKind(value) => { + (TransformTag::WriteBidKind as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let body_len = match self { + Transform::Prune(key) => key.serialized_length(), + Transform::WriteCLValue(value) => value.serialized_length(), + Transform::WriteAccount(value) => value.serialized_length(), + Transform::WriteDeployInfo(value) => value.serialized_length(), + Transform::WriteEraInfo(value) => value.serialized_length(), + Transform::WriteTransfer(value) => value.serialized_length(), + Transform::AddInt32(value) => value.serialized_length(), + Transform::AddUInt64(value) => value.serialized_length(), + Transform::AddUInt128(value) => value.serialized_length(), + Transform::AddUInt256(value) => value.serialized_length(), + Transform::AddUInt512(value) => value.serialized_length(), + Transform::AddKeys(value) => value.serialized_length(), + Transform::Failure(value) => value.serialized_length(), + Transform::Identity + | Transform::WriteContractWasm + | Transform::WriteContract + | Transform::WriteContractPackage + | Transform::WriteAddressableEntity => 0, + Transform::WriteBid(value) => value.serialized_length(), + Transform::WriteBidKind(value) => value.serialized_length(), + Transform::WriteWithdraw(value) => value.serialized_length(), + Transform::WriteUnbonding(value) => value.serialized_length(), + }; + U8_SERIALIZED_LENGTH + body_len + } +} + +impl FromBytes for Transform { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + TransformTag::Identity => Ok((Transform::Identity, remainder)), + TransformTag::WriteCLValue => { + let (cl_value, remainder) = CLValue::from_bytes(remainder)?; + Ok((Transform::WriteCLValue(cl_value), remainder)) + } + TransformTag::WriteAccount => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((Transform::WriteAccount(account_hash), remainder)) + } + TransformTag::WriteByteCode => Ok((Transform::WriteContractWasm, remainder)), + TransformTag::WriteContract => Ok((Transform::WriteContract, remainder)), + TransformTag::WritePackage => Ok((Transform::WriteContractPackage, remainder)), + TransformTag::WriteDeployInfo => { + let (deploy_info, remainder) = DeployInfo::from_bytes(remainder)?; + Ok((Transform::WriteDeployInfo(deploy_info), remainder)) + } + TransformTag::WriteEraInfo => { + let (era_info, remainder) = EraInfo::from_bytes(remainder)?; + Ok((Transform::WriteEraInfo(era_info), remainder)) + } + TransformTag::WriteTransfer => { + let (transfer, remainder) = Transfer::from_bytes(remainder)?; + Ok((Transform::WriteTransfer(transfer), remainder)) + } + TransformTag::AddInt32 => { + let (value_i32, remainder) = i32::from_bytes(remainder)?; + Ok((Transform::AddInt32(value_i32), remainder)) + } + TransformTag::AddUInt64 => { + let (value_u64, remainder) = u64::from_bytes(remainder)?; + Ok((Transform::AddUInt64(value_u64), remainder)) + } + TransformTag::AddUInt128 => { + let (value_u128, remainder) = U128::from_bytes(remainder)?; + Ok((Transform::AddUInt128(value_u128), remainder)) + } + TransformTag::AddUInt256 => { + let (value_u256, remainder) = U256::from_bytes(remainder)?; + Ok((Transform::AddUInt256(value_u256), remainder)) + } + TransformTag::AddUInt512 => { + let (value_u512, remainder) = U512::from_bytes(remainder)?; + Ok((Transform::AddUInt512(value_u512), remainder)) + } + TransformTag::AddKeys => { + let (value, remainder) = Vec::::from_bytes(remainder)?; + Ok((Transform::AddKeys(value), remainder)) + } + TransformTag::Failure => { + let (value, remainder) = String::from_bytes(remainder)?; + Ok((Transform::Failure(value), remainder)) + } + TransformTag::WriteBid => { + let (bid, remainder) = Bid::from_bytes(remainder)?; + Ok((Transform::WriteBid(Box::new(bid)), remainder)) + } + TransformTag::WriteWithdraw => { + let (withdraw_purses, remainder) = + as FromBytes>::from_bytes(remainder)?; + Ok((Transform::WriteWithdraw(withdraw_purses), remainder)) + } + TransformTag::WriteUnbonding => { + let (unbonding_purses, remainder) = + as FromBytes>::from_bytes(remainder)?; + Ok((Transform::WriteUnbonding(unbonding_purses), remainder)) + } + TransformTag::WriteAddressableEntity => { + Ok((Transform::WriteAddressableEntity, remainder)) + } + TransformTag::Prune => { + let (key, remainder) = Key::from_bytes(remainder)?; + Ok((Transform::Prune(key), remainder)) + } + TransformTag::WriteBidKind => { + let (value, remainder) = BidKind::from_bytes(remainder)?; + Ok((Transform::WriteBidKind(value), remainder)) + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Transform { + // TODO - include WriteDeployInfo and WriteTransfer as options + match rng.gen_range(0..13) { + 0 => Transform::Identity, + 1 => Transform::WriteCLValue(CLValue::from_t(true).unwrap()), + 2 => Transform::WriteAccount(AccountHash::new(rng.gen())), + 3 => Transform::WriteContractWasm, + 4 => Transform::WriteContract, + 5 => Transform::WriteContractPackage, + 6 => Transform::AddInt32(rng.gen()), + 7 => Transform::AddUInt64(rng.gen()), + 8 => Transform::AddUInt128(rng.gen::().into()), + 9 => Transform::AddUInt256(rng.gen::().into()), + 10 => Transform::AddUInt512(rng.gen::().into()), + 11 => { + let mut named_keys = Vec::new(); + for _ in 0..rng.gen_range(1..6) { + named_keys.push(NamedKey { + name: rng.gen::().to_string(), + key: rng.gen::().to_string(), + }); + } + Transform::AddKeys(named_keys) + } + 12 => Transform::Failure(rng.gen::().to_string()), + 13 => Transform::WriteAddressableEntity, + _ => unreachable!(), + } + } +} + +/// A key with a name. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct NamedKey { + /// The name of the entry. + pub name: String, + /// The value of the entry: a casper `Key` type. + #[cfg_attr(feature = "json-schema", schemars(with = "Key"))] + pub key: String, +} + +impl ToBytes for NamedKey { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.key.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + self.key.serialized_length() + } +} + +impl FromBytes for NamedKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (key, remainder) = String::from_bytes(remainder)?; + let named_key = NamedKey { name, key }; + Ok((named_key, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_test_transform() { + let mut rng = TestRng::new(); + let transform: Transform = rng.gen(); + bytesrepr::test_serialization_roundtrip(&transform); + } + + #[test] + fn bytesrepr_test_execution_result() { + let mut rng = TestRng::new(); + let execution_result: ExecutionResultV1 = rng.gen(); + bytesrepr::test_serialization_roundtrip(&execution_result); + } +} diff --git a/casper_types_ver_2_0/src/execution/execution_result_v2.rs b/casper_types_ver_2_0/src/execution/execution_result_v2.rs new file mode 100644 index 00000000..9470c133 --- /dev/null +++ b/casper_types_ver_2_0/src/execution/execution_result_v2.rs @@ -0,0 +1,259 @@ +//! This file provides types to allow conversion from an EE `ExecutionResult` into a similar type +//! which can be serialized to a valid binary or JSON representation. +//! +//! It is stored as metadata related to a given deploy, and made available to clients via the +//! JSON-RPC API. + +#[cfg(any(feature = "testing", test))] +use alloc::format; +use alloc::{string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "testing", test))] +use rand::{distributions::Standard, prelude::Distribution, Rng}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::Effects; +#[cfg(feature = "json-schema")] +use super::{Transform, TransformKind}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, RESULT_ERR_TAG, RESULT_OK_TAG, U8_SERIALIZED_LENGTH}, + TransferAddr, U512, +}; +#[cfg(feature = "json-schema")] +use crate::{Key, KEY_HASH_LENGTH}; + +#[cfg(feature = "json-schema")] +static EXECUTION_RESULT: Lazy = Lazy::new(|| { + let key1 = Key::from_formatted_str( + "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + ) + .unwrap(); + let key2 = Key::from_formatted_str( + "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + ) + .unwrap(); + let mut effects = Effects::new(); + effects.push(Transform::new(key1, TransformKind::AddUInt64(8u64))); + effects.push(Transform::new(key2, TransformKind::Identity)); + + let transfers = vec![ + TransferAddr::new([89; KEY_HASH_LENGTH]), + TransferAddr::new([130; KEY_HASH_LENGTH]), + ]; + + ExecutionResultV2::Success { + effects, + transfers, + cost: U512::from(123_456), + } +}); + +/// The result of executing a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutionResultV2 { + /// The result of a failed execution. + Failure { + /// The effects of executing the deploy. + effects: Effects, + /// A record of transfers performed while executing the deploy. + transfers: Vec, + /// The cost in Motes of executing the deploy. + cost: U512, + /// The error message associated with executing the deploy. + error_message: String, + }, + /// The result of a successful execution. + Success { + /// The effects of executing the deploy. + effects: Effects, + /// A record of transfers performed while executing the deploy. + transfers: Vec, + /// The cost in Motes of executing the deploy. + cost: U512, + }, +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExecutionResultV2 { + let transfer_count = rng.gen_range(0..6); + let mut transfers = Vec::new(); + for _ in 0..transfer_count { + transfers.push(TransferAddr::new(rng.gen())) + } + + let effects = Effects::random(rng); + + if rng.gen() { + ExecutionResultV2::Failure { + effects, + transfers, + cost: rng.gen::().into(), + error_message: format!("Error message {}", rng.gen::()), + } + } else { + ExecutionResultV2::Success { + effects, + transfers, + cost: rng.gen::().into(), + } + } + } +} + +impl ExecutionResultV2 { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &EXECUTION_RESULT + } + + /// Returns a random `ExecutionResultV2`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let effects = Effects::random(rng); + + let transfer_count = rng.gen_range(0..6); + let mut transfers = vec![]; + for _ in 0..transfer_count { + transfers.push(TransferAddr::new(rng.gen())) + } + + let cost = U512::from(rng.gen::()); + + if rng.gen() { + ExecutionResultV2::Failure { + effects, + transfers, + cost, + error_message: format!("Error message {}", rng.gen::()), + } + } else { + ExecutionResultV2::Success { + effects, + transfers, + cost, + } + } + } +} + +impl ToBytes for ExecutionResultV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ExecutionResultV2::Failure { + effects, + transfers, + cost, + error_message, + } => { + RESULT_ERR_TAG.write_bytes(writer)?; + effects.write_bytes(writer)?; + transfers.write_bytes(writer)?; + cost.write_bytes(writer)?; + error_message.write_bytes(writer) + } + ExecutionResultV2::Success { + effects, + transfers, + cost, + } => { + RESULT_OK_TAG.write_bytes(writer)?; + effects.write_bytes(writer)?; + transfers.write_bytes(writer)?; + cost.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ExecutionResultV2::Failure { + effects, + transfers, + cost, + error_message, + } => { + effects.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + + error_message.serialized_length() + } + ExecutionResultV2::Success { + effects, + transfers, + cost, + } => { + effects.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + } + } + } +} + +impl FromBytes for ExecutionResultV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + RESULT_ERR_TAG => { + let (effects, remainder) = Effects::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let (error_message, remainder) = String::from_bytes(remainder)?; + let execution_result = ExecutionResultV2::Failure { + effects, + transfers, + cost, + error_message, + }; + Ok((execution_result, remainder)) + } + RESULT_OK_TAG => { + let (effects, remainder) = Effects::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let execution_result = ExecutionResultV2::Success { + effects, + transfers, + cost, + }; + Ok((execution_result, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + let execution_result = ExecutionResultV2::random(rng); + bytesrepr::test_serialization_roundtrip(&execution_result); + } + } +} diff --git a/casper_types_ver_2_0/src/execution/transform.rs b/casper_types_ver_2_0/src/execution/transform.rs new file mode 100644 index 00000000..c0fd9f98 --- /dev/null +++ b/casper_types_ver_2_0/src/execution/transform.rs @@ -0,0 +1,75 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::TransformKind; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Key, +}; + +/// A transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "TransformV2"))] +#[serde(deny_unknown_fields)] +pub struct Transform { + key: Key, + kind: TransformKind, +} + +impl Transform { + /// Constructs a new `Transform`. + pub fn new(key: Key, kind: TransformKind) -> Self { + Transform { key, kind } + } + + /// Returns the key whose value was transformed. + pub fn key(&self) -> &Key { + &self.key + } + + /// Returns the transformation kind. + pub fn kind(&self) -> &TransformKind { + &self.kind + } + + /// Consumes `self`, returning its constituent parts. + pub fn destructure(self) -> (Key, TransformKind) { + (self.key, self.kind) + } +} + +impl ToBytes for Transform { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.key.write_bytes(writer)?; + self.kind.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.kind.serialized_length() + } +} + +impl FromBytes for Transform { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = Key::from_bytes(bytes)?; + let (transform, remainder) = TransformKind::from_bytes(remainder)?; + let transform_entry = Transform { + key, + kind: transform, + }; + Ok((transform_entry, remainder)) + } +} diff --git a/casper_types_ver_2_0/src/execution/transform_error.rs b/casper_types_ver_2_0/src/execution/transform_error.rs new file mode 100644 index 00000000..7936b8fa --- /dev/null +++ b/casper_types_ver_2_0/src/execution/transform_error.rs @@ -0,0 +1,136 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLValueError, StoredValueTypeMismatch, +}; + +/// Error type for applying and combining transforms. +/// +/// A `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible +/// (e.g. trying to add a number to a string). +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[non_exhaustive] +pub enum TransformError { + /// Error while (de)serializing data. + Serialization(bytesrepr::Error), + /// Type mismatch error. + TypeMismatch(StoredValueTypeMismatch), + /// Type no longer supported. + Deprecated, +} + +impl Display for TransformError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransformError::Serialization(error) => { + write!(formatter, "{}", error) + } + TransformError::TypeMismatch(error) => { + write!(formatter, "{}", error) + } + TransformError::Deprecated => { + write!(formatter, "type no longer supported") + } + } + } +} + +impl From for TransformError { + fn from(error: StoredValueTypeMismatch) -> Self { + TransformError::TypeMismatch(error) + } +} + +impl From for TransformError { + fn from(cl_value_error: CLValueError) -> TransformError { + match cl_value_error { + CLValueError::Serialization(error) => TransformError::Serialization(error), + CLValueError::Type(cl_type_mismatch) => { + let expected = format!("{:?}", cl_type_mismatch.expected); + let found = format!("{:?}", cl_type_mismatch.found); + let type_mismatch = StoredValueTypeMismatch::new(expected, found); + TransformError::TypeMismatch(type_mismatch) + } + } + } +} + +impl ToBytes for TransformError { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransformError::Serialization(error) => { + (TransformErrorTag::Serialization as u8).write_bytes(writer)?; + error.write_bytes(writer) + } + TransformError::TypeMismatch(error) => { + (TransformErrorTag::TypeMismatch as u8).write_bytes(writer)?; + error.write_bytes(writer) + } + TransformError::Deprecated => (TransformErrorTag::Deprecated as u8).write_bytes(writer), + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransformError::Serialization(error) => error.serialized_length(), + TransformError::TypeMismatch(error) => error.serialized_length(), + TransformError::Deprecated => 0, + } + } +} + +impl FromBytes for TransformError { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == TransformErrorTag::Serialization as u8 => { + let (error, remainder) = bytesrepr::Error::from_bytes(remainder)?; + Ok((TransformError::Serialization(error), remainder)) + } + tag if tag == TransformErrorTag::TypeMismatch as u8 => { + let (error, remainder) = StoredValueTypeMismatch::from_bytes(remainder)?; + Ok((TransformError::TypeMismatch(error), remainder)) + } + tag if tag == TransformErrorTag::Deprecated as u8 => { + Ok((TransformError::Deprecated, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(feature = "std")] +impl StdError for TransformError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + TransformError::Serialization(error) => Some(error), + TransformError::TypeMismatch(_) | TransformError::Deprecated => None, + } + } +} + +#[repr(u8)] +enum TransformErrorTag { + Serialization = 0, + TypeMismatch = 1, + Deprecated = 2, +} diff --git a/casper_types_ver_2_0/src/execution/transform_kind.rs b/casper_types_ver_2_0/src/execution/transform_kind.rs new file mode 100644 index 00000000..0c0f6ee4 --- /dev/null +++ b/casper_types_ver_2_0/src/execution/transform_kind.rs @@ -0,0 +1,847 @@ +use alloc::{string::ToString, vec::Vec}; +use core::{any, convert::TryFrom}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::traits::{AsPrimitive, WrappingAdd}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::TransformError; +use crate::{ + addressable_entity::NamedKeys, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, CLValue, Key, StoredValue, StoredValueTypeMismatch, U128, U256, U512, +}; + +/// Taxonomy of Transform. +#[derive(PartialEq, Eq, Debug, Clone)] +pub enum TransformInstruction { + /// Store a StoredValue. + Store(StoredValue), + /// Prune a StoredValue by Key. + Prune(Key), +} + +impl TransformInstruction { + /// Store instruction. + pub fn store(stored_value: StoredValue) -> Self { + Self::Store(stored_value) + } + + /// Prune instruction. + pub fn prune(key: Key) -> Self { + Self::Prune(key) + } +} + +impl From for TransformInstruction { + fn from(value: StoredValue) -> Self { + TransformInstruction::Store(value) + } +} + +/// Representation of a single transformation occurring during execution. +/// +/// Note that all arithmetic variants of [`TransformKind`] are commutative which means that a given +/// collection of them can be executed in any order to produce the same end result. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum TransformKind { + /// An identity transformation that does not modify a value in the global state. + /// + /// Created as a result of reading from the global state. + Identity, + /// Writes a new value in the global state. + Write(StoredValue), + /// A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in + /// the global state. + AddInt32(i32), + /// A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in + /// the global state. + AddUInt64(u64), + /// A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in + /// the global state. + AddUInt128(U128), + /// A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in + /// the global state. + AddUInt256(U256), + /// A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in + /// the global state. + AddUInt512(U512), + /// Adds new named keys to an existing entry in the global state. + /// + /// This transform assumes that the existing stored value is either an Account or a Contract. + AddKeys(NamedKeys), + /// Removes the pathing to the global state entry of the specified key. The pruned element + /// remains reachable from previously generated global state root hashes, but will not be + /// included in the next generated global state root hash and subsequent state accumulated + /// from it. + Prune(Key), + /// Represents the case where applying a transform would cause an error. + Failure(TransformError), +} + +impl TransformKind { + /// Applies the transformation on a specified stored value instance. + /// + /// This method produces a new `StoredValue` instance based on the `TransformKind` variant. + pub fn apply(self, stored_value: StoredValue) -> Result { + fn store(sv: StoredValue) -> TransformInstruction { + TransformInstruction::Store(sv) + } + match self { + TransformKind::Identity => Ok(store(stored_value)), + TransformKind::Write(new_value) => Ok(store(new_value)), + TransformKind::Prune(key) => Ok(TransformInstruction::prune(key)), + TransformKind::AddInt32(to_add) => wrapping_addition(stored_value, to_add), + TransformKind::AddUInt64(to_add) => wrapping_addition(stored_value, to_add), + TransformKind::AddUInt128(to_add) => wrapping_addition(stored_value, to_add), + TransformKind::AddUInt256(to_add) => wrapping_addition(stored_value, to_add), + TransformKind::AddUInt512(to_add) => wrapping_addition(stored_value, to_add), + TransformKind::AddKeys(keys) => match stored_value { + StoredValue::AddressableEntity(mut entity) => { + entity.named_keys_append(keys); + Ok(store(StoredValue::AddressableEntity(entity))) + } + StoredValue::Account(_) | StoredValue::Contract(_) => { + Err(TransformError::Deprecated) + } + StoredValue::CLValue(cl_value) => { + let expected = "Contract or Account".to_string(); + let found = format!("{:?}", cl_value.cl_type()); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Package(_) => { + let expected = "Contract or Account".to_string(); + let found = "ContractPackage".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::ByteCode(_) => { + let expected = "Contract or Account".to_string(); + let found = "ByteCode".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Transfer(_) => { + let expected = "Contract or Account".to_string(); + let found = "Transfer".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::DeployInfo(_) => { + let expected = "Contract or Account".to_string(); + let found = "DeployInfo".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::EraInfo(_) => { + let expected = "Contract or Account".to_string(); + let found = "EraInfo".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Bid(_) => { + let expected = "Contract or Account".to_string(); + let found = "Bid".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::BidKind(_) => { + let expected = "Contract or Account".to_string(); + let found = "BidKind".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Withdraw(_) => { + let expected = "Contract or Account".to_string(); + let found = "Withdraw".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Unbonding(_) => { + let expected = "Contract or Account".to_string(); + let found = "Unbonding".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::ContractWasm(_) => { + let expected = "Contract or Account".to_string(); + let found = "ContractWasm".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::ContractPackage(_) => { + let expected = "Contract or Account".to_string(); + let found = "ContractPackage".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::MessageTopic(_) => { + let expected = "Contract or Account".to_string(); + let found = "MessageTopic".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Message(_) => { + let expected = "Contract or Account".to_string(); + let found = "Message".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + }, + TransformKind::Failure(error) => Err(error), + } + } + + /// Returns a random `TransformKind`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut R) -> Self { + match rng.gen_range(0..10) { + 0 => TransformKind::Identity, + 1 => TransformKind::Write(StoredValue::CLValue(CLValue::from_t(true).unwrap())), + 2 => TransformKind::AddInt32(rng.gen()), + 3 => TransformKind::AddUInt64(rng.gen()), + 4 => TransformKind::AddUInt128(rng.gen::().into()), + 5 => TransformKind::AddUInt256(rng.gen::().into()), + 6 => TransformKind::AddUInt512(rng.gen::().into()), + 7 => { + let mut named_keys = NamedKeys::new(); + for _ in 0..rng.gen_range(1..6) { + named_keys.insert(rng.gen::().to_string(), rng.gen()); + } + TransformKind::AddKeys(named_keys) + } + 8 => TransformKind::Failure(TransformError::Serialization( + bytesrepr::Error::EarlyEndOfStream, + )), + 9 => TransformKind::Prune(rng.gen::()), + _ => unreachable!(), + } + } +} + +impl ToBytes for TransformKind { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransformKind::Identity => (TransformTag::Identity as u8).write_bytes(writer), + TransformKind::Write(stored_value) => { + (TransformTag::Write as u8).write_bytes(writer)?; + stored_value.write_bytes(writer) + } + TransformKind::AddInt32(value) => { + (TransformTag::AddInt32 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKind::AddUInt64(value) => { + (TransformTag::AddUInt64 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKind::AddUInt128(value) => { + (TransformTag::AddUInt128 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKind::AddUInt256(value) => { + (TransformTag::AddUInt256 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKind::AddUInt512(value) => { + (TransformTag::AddUInt512 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKind::AddKeys(named_keys) => { + (TransformTag::AddKeys as u8).write_bytes(writer)?; + named_keys.write_bytes(writer) + } + TransformKind::Failure(error) => { + (TransformTag::Failure as u8).write_bytes(writer)?; + error.write_bytes(writer) + } + TransformKind::Prune(value) => { + (TransformTag::Prune as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransformKind::Identity => 0, + TransformKind::Write(stored_value) => stored_value.serialized_length(), + TransformKind::AddInt32(value) => value.serialized_length(), + TransformKind::AddUInt64(value) => value.serialized_length(), + TransformKind::AddUInt128(value) => value.serialized_length(), + TransformKind::AddUInt256(value) => value.serialized_length(), + TransformKind::AddUInt512(value) => value.serialized_length(), + TransformKind::AddKeys(named_keys) => named_keys.serialized_length(), + TransformKind::Failure(error) => error.serialized_length(), + TransformKind::Prune(value) => value.serialized_length(), + } + } +} + +impl FromBytes for TransformKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == TransformTag::Identity as u8 => Ok((TransformKind::Identity, remainder)), + tag if tag == TransformTag::Write as u8 => { + let (stored_value, remainder) = StoredValue::from_bytes(remainder)?; + Ok((TransformKind::Write(stored_value), remainder)) + } + tag if tag == TransformTag::AddInt32 as u8 => { + let (value, remainder) = i32::from_bytes(remainder)?; + Ok((TransformKind::AddInt32(value), remainder)) + } + tag if tag == TransformTag::AddUInt64 as u8 => { + let (value, remainder) = u64::from_bytes(remainder)?; + Ok((TransformKind::AddUInt64(value), remainder)) + } + tag if tag == TransformTag::AddUInt128 as u8 => { + let (value, remainder) = U128::from_bytes(remainder)?; + Ok((TransformKind::AddUInt128(value), remainder)) + } + tag if tag == TransformTag::AddUInt256 as u8 => { + let (value, remainder) = U256::from_bytes(remainder)?; + Ok((TransformKind::AddUInt256(value), remainder)) + } + tag if tag == TransformTag::AddUInt512 as u8 => { + let (value, remainder) = U512::from_bytes(remainder)?; + Ok((TransformKind::AddUInt512(value), remainder)) + } + tag if tag == TransformTag::AddKeys as u8 => { + let (named_keys, remainder) = NamedKeys::from_bytes(remainder)?; + Ok((TransformKind::AddKeys(named_keys), remainder)) + } + tag if tag == TransformTag::Failure as u8 => { + let (error, remainder) = TransformError::from_bytes(remainder)?; + Ok((TransformKind::Failure(error), remainder)) + } + tag if tag == TransformTag::Prune as u8 => { + let (key, remainder) = Key::from_bytes(remainder)?; + Ok((TransformKind::Prune(key), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// Attempts a wrapping addition of `to_add` to `stored_value`, assuming `stored_value` is +/// compatible with type `Y`. +fn wrapping_addition( + stored_value: StoredValue, + to_add: Y, +) -> Result +where + Y: AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive, +{ + let cl_value = CLValue::try_from(stored_value)?; + + match cl_value.cl_type() { + CLType::I32 => do_wrapping_addition::(cl_value, to_add), + CLType::I64 => do_wrapping_addition::(cl_value, to_add), + CLType::U8 => do_wrapping_addition::(cl_value, to_add), + CLType::U32 => do_wrapping_addition::(cl_value, to_add), + CLType::U64 => do_wrapping_addition::(cl_value, to_add), + CLType::U128 => do_wrapping_addition::(cl_value, to_add), + CLType::U256 => do_wrapping_addition::(cl_value, to_add), + CLType::U512 => do_wrapping_addition::(cl_value, to_add), + other => { + let expected = format!("integral type compatible with {}", any::type_name::()); + let found = format!("{:?}", other); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + } +} + +/// Attempts a wrapping addition of `to_add` to the value represented by `cl_value`. +fn do_wrapping_addition( + cl_value: CLValue, + to_add: Y, +) -> Result +where + X: WrappingAdd + CLTyped + ToBytes + FromBytes + Copy + 'static, + Y: AsPrimitive, +{ + let x: X = cl_value.into_t()?; + let result = x.wrapping_add(&(to_add.as_())); + let stored_value = StoredValue::CLValue(CLValue::from_t(result)?); + Ok(TransformInstruction::store(stored_value)) +} + +#[derive(Debug)] +#[repr(u8)] +enum TransformTag { + Identity = 0, + Write = 1, + AddInt32 = 2, + AddUInt64 = 3, + AddUInt128 = 4, + AddUInt256 = 5, + AddUInt512 = 6, + AddKeys = 7, + Failure = 8, + Prune = 9, +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, fmt}; + + use num::{Bounded, Num}; + + use crate::{ + byte_code::ByteCodeKind, bytesrepr::Bytes, testing::TestRng, AccessRights, ByteCode, Key, + URef, U128, U256, U512, + }; + + use super::*; + + const ZERO_ARRAY: [u8; 32] = [0; 32]; + const TEST_STR: &str = "a"; + const TEST_BOOL: bool = true; + + const ZERO_I32: i32 = 0; + const ONE_I32: i32 = 1; + const NEG_ONE_I32: i32 = -1; + const NEG_TWO_I32: i32 = -2; + const MIN_I32: i32 = i32::min_value(); + const MAX_I32: i32 = i32::max_value(); + + const ZERO_I64: i64 = 0; + const ONE_I64: i64 = 1; + const NEG_ONE_I64: i64 = -1; + const NEG_TWO_I64: i64 = -2; + const MIN_I64: i64 = i64::min_value(); + const MAX_I64: i64 = i64::max_value(); + + const ZERO_U8: u8 = 0; + const ONE_U8: u8 = 1; + const MAX_U8: u8 = u8::max_value(); + + const ZERO_U32: u32 = 0; + const ONE_U32: u32 = 1; + const MAX_U32: u32 = u32::max_value(); + + const ZERO_U64: u64 = 0; + const ONE_U64: u64 = 1; + const MAX_U64: u64 = u64::max_value(); + + const ZERO_U128: U128 = U128([0; 2]); + const ONE_U128: U128 = U128([1, 0]); + const MAX_U128: U128 = U128([MAX_U64; 2]); + + const ZERO_U256: U256 = U256([0; 4]); + const ONE_U256: U256 = U256([1, 0, 0, 0]); + const MAX_U256: U256 = U256([MAX_U64; 4]); + + const ZERO_U512: U512 = U512([0; 8]); + const ONE_U512: U512 = U512([1, 0, 0, 0, 0, 0, 0, 0]); + const MAX_U512: U512 = U512([MAX_U64; 8]); + + #[test] + fn i32_overflow() { + let max = std::i32::MAX; + let min = std::i32::MIN; + + let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap()); + let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap()); + + let apply_overflow = TransformKind::AddInt32(1).apply(max_value.clone()); + let apply_underflow = TransformKind::AddInt32(-1).apply(min_value.clone()); + + assert_eq!( + apply_overflow.expect("Unexpected overflow"), + TransformInstruction::store(min_value) + ); + assert_eq!( + apply_underflow.expect("Unexpected underflow"), + TransformInstruction::store(max_value) + ); + } + + fn uint_overflow_test() + where + T: Num + Bounded + CLTyped + ToBytes + Into + Copy, + { + let max = T::max_value(); + let min = T::min_value(); + let one = T::one(); + let zero = T::zero(); + + let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap()); + let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap()); + let zero_value = StoredValue::CLValue(CLValue::from_t(zero).unwrap()); + + let one_transform: TransformKind = one.into(); + + let apply_overflow = TransformKind::AddInt32(1).apply(max_value.clone()); + + let apply_overflow_uint = one_transform.apply(max_value.clone()); + let apply_underflow = TransformKind::AddInt32(-1).apply(min_value); + + assert_eq!(apply_overflow, Ok(zero_value.clone().into())); + assert_eq!(apply_overflow_uint, Ok(zero_value.into())); + assert_eq!(apply_underflow, Ok(max_value.into())); + } + + #[test] + fn u128_overflow() { + impl From for TransformKind { + fn from(x: U128) -> Self { + TransformKind::AddUInt128(x) + } + } + uint_overflow_test::(); + } + + #[test] + fn u256_overflow() { + impl From for TransformKind { + fn from(x: U256) -> Self { + TransformKind::AddUInt256(x) + } + } + uint_overflow_test::(); + } + + #[test] + fn u512_overflow() { + impl From for TransformKind { + fn from(x: U512) -> Self { + TransformKind::AddUInt512(x) + } + } + uint_overflow_test::(); + } + + #[test] + fn addition_between_mismatched_types_should_fail() { + fn assert_yields_type_mismatch_error(stored_value: StoredValue) { + match wrapping_addition(stored_value, ZERO_I32) { + Err(TransformError::TypeMismatch(_)) => (), + _ => panic!("wrapping addition should yield TypeMismatch error"), + }; + } + + let byte_code = StoredValue::ByteCode(ByteCode::new(ByteCodeKind::V1CasperWasm, vec![])); + assert_yields_type_mismatch_error(byte_code); + + let uref = URef::new(ZERO_ARRAY, AccessRights::READ); + + let cl_bool = + StoredValue::CLValue(CLValue::from_t(TEST_BOOL).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_bool); + + let cl_unit = StoredValue::CLValue(CLValue::from_t(()).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_unit); + + let cl_string = + StoredValue::CLValue(CLValue::from_t(TEST_STR).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_string); + + let cl_key = StoredValue::CLValue( + CLValue::from_t(Key::Hash(ZERO_ARRAY)).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_key); + + let cl_uref = StoredValue::CLValue(CLValue::from_t(uref).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_uref); + + let cl_option = + StoredValue::CLValue(CLValue::from_t(Some(ZERO_U8)).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_option); + + let cl_list = StoredValue::CLValue( + CLValue::from_t(Bytes::from(vec![ZERO_U8])).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_list); + + let cl_fixed_list = + StoredValue::CLValue(CLValue::from_t([ZERO_U8]).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_fixed_list); + + let cl_result: Result<(), u8> = Err(ZERO_U8); + let cl_result = + StoredValue::CLValue(CLValue::from_t(cl_result).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_result); + + let cl_map = StoredValue::CLValue( + CLValue::from_t(BTreeMap::::new()).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_map); + + let cl_tuple1 = + StoredValue::CLValue(CLValue::from_t((ZERO_U8,)).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_tuple1); + + let cl_tuple2 = StoredValue::CLValue( + CLValue::from_t((ZERO_U8, ZERO_U8)).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_tuple2); + + let cl_tuple3 = StoredValue::CLValue( + CLValue::from_t((ZERO_U8, ZERO_U8, ZERO_U8)).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_tuple3); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn wrapping_addition_should_succeed() { + fn add(current_value: X, to_add: Y) -> X + where + X: CLTyped + ToBytes + FromBytes + PartialEq + fmt::Debug, + Y: AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive, + { + let current = StoredValue::CLValue( + CLValue::from_t(current_value).expect("should create CLValue"), + ); + if let TransformInstruction::Store(result) = + wrapping_addition(current, to_add).expect("wrapping addition should succeed") + { + CLValue::try_from(result) + .expect("should be CLValue") + .into_t() + .expect("should parse to X") + } else { + panic!("expected TransformInstruction::Store"); + } + } + + // Adding to i32 + assert_eq!(ONE_I32, add(ZERO_I32, ONE_I32)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_I32)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32)); + assert_eq!(ZERO_I32, add(ONE_I32, NEG_ONE_I32)); + assert_eq!(NEG_ONE_I32, add(ZERO_I32, NEG_ONE_I32)); + assert_eq!(MAX_I32, add(NEG_ONE_I32, MIN_I32)); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U64)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U64)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32 as u64)); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U128)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U128)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, U128::from(MAX_I32))); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U256)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U256)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, U256::from(MAX_I32))); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U512)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U512)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, U512::from(MAX_I32))); + + // Adding to i64 + assert_eq!(ONE_I64, add(ZERO_I64, ONE_I32)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_I32)); + assert_eq!(ZERO_I64, add(ONE_I64, NEG_ONE_I32)); + assert_eq!(NEG_ONE_I64, add(ZERO_I64, NEG_ONE_I32)); + assert_eq!(MAX_I64, add(MIN_I64, NEG_ONE_I32)); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U64)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U64)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, MAX_I64 as u64)); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U128)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U128)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, U128::from(MAX_I64))); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U256)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U256)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, U256::from(MAX_I64))); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U512)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U512)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, U512::from(MAX_I64))); + + // Adding to u8 + assert_eq!(ONE_U8, add(ZERO_U8, ONE_I32)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_I32)); + assert_eq!(MAX_U8, add(MAX_U8, 256_i32)); + assert_eq!(ZERO_U8, add(MAX_U8, 257_i32)); + assert_eq!(ZERO_U8, add(ONE_U8, NEG_ONE_I32)); + assert_eq!(MAX_U8, add(ZERO_U8, NEG_ONE_I32)); + assert_eq!(ZERO_U8, add(ZERO_U8, -256_i32)); + assert_eq!(MAX_U8, add(ZERO_U8, -257_i32)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_I32)); + assert_eq!(ZERO_U8, add(ZERO_U8, MIN_I32)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U64)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U64)); + assert_eq!(ONE_U8, add(ZERO_U8, u64::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U64)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U128)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U128)); + assert_eq!(ONE_U8, add(ZERO_U8, U128::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U128)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U256)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U256)); + assert_eq!(ONE_U8, add(ZERO_U8, U256::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U256)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U512)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U512)); + assert_eq!(ONE_U8, add(ZERO_U8, U512::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U512)); + + // Adding to u32 + assert_eq!(ONE_U32, add(ZERO_U32, ONE_I32)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_I32)); + assert_eq!(ZERO_U32, add(ONE_U32, NEG_ONE_I32)); + assert_eq!(MAX_U32, add(ZERO_U32, NEG_ONE_I32)); + assert_eq!(MAX_I32 as u32 + 1, add(ZERO_U32, MIN_I32)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U64)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U64)); + assert_eq!(ONE_U32, add(ZERO_U32, u64::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U64)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U128)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U128)); + assert_eq!(ONE_U32, add(ZERO_U32, U128::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U128)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U256)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U256)); + assert_eq!(ONE_U32, add(ZERO_U32, U256::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U256)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U512)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U512)); + assert_eq!(ONE_U32, add(ZERO_U32, U512::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U512)); + + // Adding to u64 + assert_eq!(ONE_U64, add(ZERO_U64, ONE_I32)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_I32)); + assert_eq!(ZERO_U64, add(ONE_U64, NEG_ONE_I32)); + assert_eq!(MAX_U64, add(ZERO_U64, NEG_ONE_I32)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U64)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U64)); + assert_eq!(MAX_U64 - 1, add(MAX_U64, MAX_U64)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U128)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U128)); + assert_eq!(ONE_U64, add(ZERO_U64, U128::from(MAX_U64) + 2)); + assert_eq!(MAX_U64, add(ZERO_U64, MAX_U128)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U256)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U256)); + assert_eq!(ONE_U64, add(ZERO_U64, U256::from(MAX_U64) + 2)); + assert_eq!(MAX_U64, add(ZERO_U64, MAX_U256)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U512)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U512)); + assert_eq!(ONE_U64, add(ZERO_U64, U512::from(MAX_U64) + 2)); + assert_eq!(MAX_U64, add(ZERO_U64, MAX_U512)); + + // Adding to U128 + assert_eq!(ONE_U128, add(ZERO_U128, ONE_I32)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_I32)); + assert_eq!(ZERO_U128, add(ONE_U128, NEG_ONE_I32)); + assert_eq!(MAX_U128, add(ZERO_U128, NEG_ONE_I32)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U64)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U64)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U128)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U128)); + assert_eq!(MAX_U128 - 1, add(MAX_U128, MAX_U128)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U256)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U256)); + assert_eq!( + ONE_U128, + add( + ZERO_U128, + U256::from_dec_str(&MAX_U128.to_string()).unwrap() + 2, + ) + ); + assert_eq!(MAX_U128, add(ZERO_U128, MAX_U256)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U512)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U512)); + assert_eq!( + ONE_U128, + add( + ZERO_U128, + U512::from_dec_str(&MAX_U128.to_string()).unwrap() + 2, + ) + ); + assert_eq!(MAX_U128, add(ZERO_U128, MAX_U512)); + + // Adding to U256 + assert_eq!(ONE_U256, add(ZERO_U256, ONE_I32)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_I32)); + assert_eq!(ZERO_U256, add(ONE_U256, NEG_ONE_I32)); + assert_eq!(MAX_U256, add(ZERO_U256, NEG_ONE_I32)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U64)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U64)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U128)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U128)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U256)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U256)); + assert_eq!(MAX_U256 - 1, add(MAX_U256, MAX_U256)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U512)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U512)); + assert_eq!( + ONE_U256, + add( + ZERO_U256, + U512::from_dec_str(&MAX_U256.to_string()).unwrap() + 2, + ) + ); + assert_eq!(MAX_U256, add(ZERO_U256, MAX_U512)); + + // Adding to U512 + assert_eq!(ONE_U512, add(ZERO_U512, ONE_I32)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_I32)); + assert_eq!(ZERO_U512, add(ONE_U512, NEG_ONE_I32)); + assert_eq!(MAX_U512, add(ZERO_U512, NEG_ONE_I32)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U64)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U64)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U128)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U128)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U256)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U256)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U512)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U512)); + assert_eq!(MAX_U512 - 1, add(MAX_U512, MAX_U512)); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..11 { + let execution_result = TransformKind::random(rng); + bytesrepr::test_serialization_roundtrip(&execution_result); + } + } +} diff --git a/casper_types_ver_2_0/src/file_utils.rs b/casper_types_ver_2_0/src/file_utils.rs new file mode 100644 index 00000000..775a7315 --- /dev/null +++ b/casper_types_ver_2_0/src/file_utils.rs @@ -0,0 +1,77 @@ +//! Utilities for handling reading from and writing to files. + +use std::{ + fs, + io::{self, Write}, + os::unix::fs::OpenOptionsExt, + path::{Path, PathBuf}, +}; + +use thiserror::Error; + +/// Error reading a file. +#[derive(Debug, Error)] +#[error("could not read '{0}': {error}", .path.display())] +pub struct ReadFileError { + /// Path that failed to be read. + path: PathBuf, + /// The underlying OS error. + #[source] + error: io::Error, +} + +/// Error writing a file +#[derive(Debug, Error)] +#[error("could not write to '{0}': {error}", .path.display())] +pub struct WriteFileError { + /// Path that failed to be written to. + path: PathBuf, + /// The underlying OS error. + #[source] + error: io::Error, +} + +/// Read complete at `path` into memory. +/// +/// Wraps `fs::read`, but preserves the filename for better error printing. +pub fn read_file>(filename: P) -> Result, ReadFileError> { + let path = filename.as_ref(); + fs::read(path).map_err(|error| ReadFileError { + path: path.to_owned(), + error, + }) +} + +/// Write data to `path`. +/// +/// Wraps `fs::write`, but preserves the filename for better error printing. +pub(crate) fn write_file, B: AsRef<[u8]>>( + filename: P, + data: B, +) -> Result<(), WriteFileError> { + let path = filename.as_ref(); + fs::write(path, data.as_ref()).map_err(|error| WriteFileError { + path: path.to_owned(), + error, + }) +} + +/// Writes data to `path`, ensuring only the owner can read or write it. +/// +/// Otherwise functions like [`write_file`]. +pub(crate) fn write_private_file, B: AsRef<[u8]>>( + filename: P, + data: B, +) -> Result<(), WriteFileError> { + let path = filename.as_ref(); + fs::OpenOptions::new() + .write(true) + .create(true) + .mode(0o600) + .open(path) + .and_then(|mut file| file.write_all(data.as_ref())) + .map_err(|error| WriteFileError { + path: path.to_owned(), + error, + }) +} diff --git a/casper_types_ver_2_0/src/gas.rs b/casper_types_ver_2_0/src/gas.rs new file mode 100644 index 00000000..7689849e --- /dev/null +++ b/casper_types_ver_2_0/src/gas.rs @@ -0,0 +1,240 @@ +//! The `gas` module is used for working with Gas including converting to and from Motes. + +use core::{ + fmt, + iter::Sum, + ops::{Add, AddAssign, Div, Mul, Sub}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::Zero; +use serde::{Deserialize, Serialize}; + +use crate::{Motes, U512}; + +/// The `Gas` struct represents a `U512` amount of gas. +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Gas(U512); + +impl Gas { + /// Constructs a new `Gas`. + pub fn new(value: U512) -> Self { + Gas(value) + } + + /// Returns the inner `U512` value. + pub fn value(&self) -> U512 { + self.0 + } + + /// Returns the cost to be charged. + pub fn cost(&self, is_system: bool) -> Self { + if is_system { + return Gas::new(U512::zero()); + } + *self + } + + /// Converts the given `motes` to `Gas` by dividing them by `conv_rate`. + /// + /// Returns `None` if `conv_rate == 0`. + pub fn from_motes(motes: Motes, conv_rate: u64) -> Option { + motes + .value() + .checked_div(U512::from(conv_rate)) + .map(Self::new) + } + + /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(&self, rhs: Self) -> Option { + self.0.checked_add(rhs.value()).map(Self::new) + } + + /// Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred. + pub fn checked_sub(&self, rhs: Self) -> Option { + self.0.checked_sub(rhs.value()).map(Self::new) + } +} + +impl fmt::Display for Gas { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl Add for Gas { + type Output = Gas; + + fn add(self, rhs: Self) -> Self::Output { + let val = self.value() + rhs.value(); + Gas::new(val) + } +} + +impl Sub for Gas { + type Output = Gas; + + fn sub(self, rhs: Self) -> Self::Output { + let val = self.value() - rhs.value(); + Gas::new(val) + } +} + +impl Div for Gas { + type Output = Gas; + + fn div(self, rhs: Self) -> Self::Output { + let val = self.value() / rhs.value(); + Gas::new(val) + } +} + +impl Mul for Gas { + type Output = Gas; + + fn mul(self, rhs: Self) -> Self::Output { + let val = self.value() * rhs.value(); + Gas::new(val) + } +} + +impl AddAssign for Gas { + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0 + } +} + +impl Zero for Gas { + fn zero() -> Self { + Gas::new(U512::zero()) + } + + fn is_zero(&self) -> bool { + self.0.is_zero() + } +} + +impl Sum for Gas { + fn sum>(iter: I) -> Self { + iter.fold(Gas::zero(), Add::add) + } +} + +impl From for Gas { + fn from(gas: u32) -> Self { + let gas_u512: U512 = gas.into(); + Gas::new(gas_u512) + } +} + +impl From for Gas { + fn from(gas: u64) -> Self { + let gas_u512: U512 = gas.into(); + Gas::new(gas_u512) + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use crate::{Gas, Motes}; + + #[test] + fn should_be_able_to_get_instance_of_gas() { + let initial_value = 1; + let gas = Gas::new(U512::from(initial_value)); + assert_eq!( + initial_value, + gas.value().as_u64(), + "should have equal value" + ) + } + + #[test] + fn should_be_able_to_compare_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + assert_eq!(left_gas, right_gas, "should be equal"); + let right_gas = Gas::new(U512::from(2)); + assert_ne!(left_gas, right_gas, "should not be equal") + } + + #[test] + fn should_be_able_to_add_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + let expected_gas = Gas::new(U512::from(2)); + assert_eq!((left_gas + right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_subtract_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + let expected_gas = Gas::new(U512::from(0)); + assert_eq!((left_gas - right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_multiply_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(100)); + let right_gas = Gas::new(U512::from(10)); + let expected_gas = Gas::new(U512::from(1000)); + assert_eq!((left_gas * right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_divide_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1000)); + let right_gas = Gas::new(U512::from(100)); + let expected_gas = Gas::new(U512::from(10)); + assert_eq!((left_gas / right_gas), expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_convert_from_mote() { + let mote = Motes::new(U512::from(100)); + let gas = Gas::from_motes(mote, 10).expect("should have gas"); + let expected_gas = Gas::new(U512::from(10)); + assert_eq!(gas, expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_default() { + let gas = Gas::default(); + let expected_gas = Gas::new(U512::from(0)); + assert_eq!(gas, expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let left_gas = Gas::new(U512::from(100)); + let right_gas = Gas::new(U512::from(10)); + assert!(left_gas > right_gas, "should be gt"); + let right_gas = Gas::new(U512::from(100)); + assert!(left_gas >= right_gas, "should be gte"); + assert!(left_gas <= right_gas, "should be lte"); + let left_gas = Gas::new(U512::from(10)); + assert!(left_gas < right_gas, "should be lt"); + } + + #[test] + fn should_default() { + let left_gas = Gas::new(U512::from(0)); + let right_gas = Gas::default(); + assert_eq!(left_gas, right_gas, "should be equal"); + let u512 = U512::zero(); + assert_eq!(left_gas.value(), u512, "should be equal"); + } + + #[test] + fn should_support_checked_div_from_motes() { + let motes = Motes::new(U512::zero()); + let conv_rate = 0; + let maybe = Gas::from_motes(motes, conv_rate); + assert!(maybe.is_none(), "should be none due to divide by zero"); + } +} diff --git a/casper_types_ver_2_0/src/gens.rs b/casper_types_ver_2_0/src/gens.rs new file mode 100644 index 00000000..ac09ad12 --- /dev/null +++ b/casper_types_ver_2_0/src/gens.rs @@ -0,0 +1,738 @@ +//! Contains functions for generating arbitrary values for use by +//! [`Proptest`](https://crates.io/crates/proptest). +#![allow(missing_docs)] + +use alloc::{ + boxed::Box, + collections::{BTreeMap, BTreeSet}, + string::String, + vec, +}; + +use proptest::{ + array, bits, bool, + collection::{self, SizeRange}, + option, + prelude::*, + result, +}; + +use crate::{ + account::{self, action_thresholds::gens::account_action_thresholds_arb, AccountHash}, + addressable_entity::{MessageTopics, NamedKeys, Parameters, Weight}, + contract_messages::{MessageChecksum, MessageTopicSummary, TopicNameHash}, + crypto::{self, gens::public_key_arb_no_system}, + package::{EntityVersionKey, EntityVersions, Groups, PackageStatus}, + system::auction::{ + gens::era_info_arb, DelegationRate, Delegator, UnbondingPurse, WithdrawPurse, + DELEGATION_RATE_DENOMINATOR, + }, + transfer::TransferAddr, + AccessRights, AddressableEntity, AddressableEntityHash, BlockTime, ByteCode, CLType, CLValue, + EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, EraId, Group, Key, NamedArg, + Package, Parameter, Phase, ProtocolVersion, SemVer, StoredValue, URef, U128, U256, U512, +}; + +use crate::{ + account::{associated_keys::gens::account_associated_keys_arb, Account}, + addressable_entity::{ + action_thresholds::gens::action_thresholds_arb, associated_keys::gens::associated_keys_arb, + }, + byte_code::ByteCodeKind, + contracts::{ + Contract, ContractHash, ContractPackage, ContractPackageStatus, ContractVersionKey, + ContractVersions, + }, + deploy_info::gens::{deploy_hash_arb, transfer_addr_arb}, + package::PackageKind, + system::auction::{Bid, BidAddr, BidKind, ValidatorBid}, +}; +pub use crate::{deploy_info::gens::deploy_info_arb, transfer::gens::transfer_arb}; + +pub fn u8_slice_32() -> impl Strategy { + collection::vec(any::(), 32).prop_map(|b| { + let mut res = [0u8; 32]; + res.clone_from_slice(b.as_slice()); + res + }) +} + +pub fn u2_slice_32() -> impl Strategy { + array::uniform32(any::()).prop_map(|mut arr| { + for byte in arr.iter_mut() { + *byte &= 0b11; + } + arr + }) +} + +pub(crate) fn named_keys_arb(depth: usize) -> impl Strategy { + collection::btree_map("\\PC*", key_arb(), depth).prop_map(NamedKeys::from) +} + +pub fn access_rights_arb() -> impl Strategy { + prop_oneof![ + Just(AccessRights::NONE), + Just(AccessRights::READ), + Just(AccessRights::ADD), + Just(AccessRights::WRITE), + Just(AccessRights::READ_ADD), + Just(AccessRights::READ_WRITE), + Just(AccessRights::ADD_WRITE), + Just(AccessRights::READ_ADD_WRITE), + ] +} + +pub fn phase_arb() -> impl Strategy { + prop_oneof![ + Just(Phase::Payment), + Just(Phase::Session), + Just(Phase::FinalizePayment), + ] +} + +pub fn uref_arb() -> impl Strategy { + (array::uniform32(bits::u8::ANY), access_rights_arb()) + .prop_map(|(id, access_rights)| URef::new(id, access_rights)) +} + +pub fn era_id_arb() -> impl Strategy { + any::().prop_map(EraId::from) +} + +pub fn key_arb() -> impl Strategy { + prop_oneof![ + account_hash_arb().prop_map(Key::Account), + u8_slice_32().prop_map(Key::Hash), + uref_arb().prop_map(Key::URef), + transfer_addr_arb().prop_map(Key::Transfer), + deploy_hash_arb().prop_map(Key::DeployInfo), + era_id_arb().prop_map(Key::EraInfo), + uref_arb().prop_map(|uref| Key::Balance(uref.addr())), + bid_addr_validator_arb().prop_map(Key::BidAddr), + bid_addr_delegator_arb().prop_map(Key::BidAddr), + account_hash_arb().prop_map(Key::Withdraw), + u8_slice_32().prop_map(Key::Dictionary), + Just(Key::EraSummary), + ] +} + +pub fn colliding_key_arb() -> impl Strategy { + prop_oneof![ + u2_slice_32().prop_map(|bytes| Key::Account(AccountHash::new(bytes))), + u2_slice_32().prop_map(Key::Hash), + u2_slice_32().prop_map(|bytes| Key::URef(URef::new(bytes, AccessRights::NONE))), + u2_slice_32().prop_map(|bytes| Key::Transfer(TransferAddr::new(bytes))), + u2_slice_32().prop_map(Key::Dictionary), + ] +} + +pub fn account_hash_arb() -> impl Strategy { + u8_slice_32().prop_map(AccountHash::new) +} + +pub fn bid_addr_validator_arb() -> impl Strategy { + u8_slice_32().prop_map(BidAddr::new_validator_addr) +} + +pub fn bid_addr_delegator_arb() -> impl Strategy { + let x = u8_slice_32(); + let y = u8_slice_32(); + (x, y).prop_map(BidAddr::new_delegator_addr) +} + +pub fn weight_arb() -> impl Strategy { + any::().prop_map(Weight::new) +} + +pub fn account_weight_arb() -> impl Strategy { + any::().prop_map(account::Weight::new) +} + +pub fn sem_ver_arb() -> impl Strategy { + (any::(), any::(), any::()) + .prop_map(|(major, minor, patch)| SemVer::new(major, minor, patch)) +} + +pub fn protocol_version_arb() -> impl Strategy { + sem_ver_arb().prop_map(ProtocolVersion::new) +} + +pub fn u128_arb() -> impl Strategy { + collection::vec(any::(), 0..16).prop_map(|b| U128::from_little_endian(b.as_slice())) +} + +pub fn u256_arb() -> impl Strategy { + collection::vec(any::(), 0..32).prop_map(|b| U256::from_little_endian(b.as_slice())) +} + +pub fn u512_arb() -> impl Strategy { + prop_oneof![ + 1 => Just(U512::zero()), + 8 => collection::vec(any::(), 0..64).prop_map(|b| U512::from_little_endian(b.as_slice())), + 1 => Just(U512::MAX), + ] +} + +pub fn cl_simple_type_arb() -> impl Strategy { + prop_oneof![ + Just(CLType::Bool), + Just(CLType::I32), + Just(CLType::I64), + Just(CLType::U8), + Just(CLType::U32), + Just(CLType::U64), + Just(CLType::U128), + Just(CLType::U256), + Just(CLType::U512), + Just(CLType::Unit), + Just(CLType::String), + Just(CLType::Key), + Just(CLType::URef), + ] +} + +pub fn cl_type_arb() -> impl Strategy { + cl_simple_type_arb().prop_recursive(4, 16, 8, |element| { + prop_oneof![ + // We want to produce basic types too + element.clone(), + // For complex type + element + .clone() + .prop_map(|val| CLType::Option(Box::new(val))), + element.clone().prop_map(|val| CLType::List(Box::new(val))), + // Realistic Result type generator: ok is anything recursive, err is simple type + (element.clone(), cl_simple_type_arb()).prop_map(|(ok, err)| CLType::Result { + ok: Box::new(ok), + err: Box::new(err) + }), + // Realistic Map type generator: key is simple type, value is complex recursive type + (cl_simple_type_arb(), element.clone()).prop_map(|(key, value)| CLType::Map { + key: Box::new(key), + value: Box::new(value) + }), + // Various tuples + element + .clone() + .prop_map(|cl_type| CLType::Tuple1([Box::new(cl_type)])), + (element.clone(), element.clone()).prop_map(|(cl_type1, cl_type2)| CLType::Tuple2([ + Box::new(cl_type1), + Box::new(cl_type2) + ])), + (element.clone(), element.clone(), element).prop_map( + |(cl_type1, cl_type2, cl_type3)| CLType::Tuple3([ + Box::new(cl_type1), + Box::new(cl_type2), + Box::new(cl_type3) + ]) + ), + ] + }) +} + +pub fn cl_value_arb() -> impl Strategy { + // If compiler brings you here it most probably means you've added a variant to `CLType` enum + // but forgot to add generator for it. + let stub: Option = None; + if let Some(cl_type) = stub { + match cl_type { + CLType::Bool + | CLType::I32 + | CLType::I64 + | CLType::U8 + | CLType::U32 + | CLType::U64 + | CLType::U128 + | CLType::U256 + | CLType::U512 + | CLType::Unit + | CLType::String + | CLType::Key + | CLType::URef + | CLType::PublicKey + | CLType::Option(_) + | CLType::List(_) + | CLType::ByteArray(..) + | CLType::Result { .. } + | CLType::Map { .. } + | CLType::Tuple1(_) + | CLType::Tuple2(_) + | CLType::Tuple3(_) + | CLType::Any => (), + } + }; + + prop_oneof![ + Just(CLValue::from_t(()).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + u128_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + u256_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + u512_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + key_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + uref_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + ".*".prop_map(|x: String| CLValue::from_t(x).expect("should create CLValue")), + option::of(any::()).prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + collection::vec(uref_arb(), 0..100) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + result::maybe_err(key_arb(), ".*") + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + collection::btree_map(".*", u512_arb(), 0..100) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + (any::()).prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + (any::(), any::()) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + (any::(), any::(), any::()) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + // Fixed lists of any size + any::().prop_map(|len| CLValue::from_t([len; 32]).expect("should create CLValue")), + ] +} + +pub fn result_arb() -> impl Strategy> { + result::maybe_ok(any::(), any::()) +} + +pub fn named_args_arb() -> impl Strategy { + (".*", cl_value_arb()).prop_map(|(name, value)| NamedArg::new(name, value)) +} + +pub fn group_arb() -> impl Strategy { + ".*".prop_map(Group::new) +} + +pub fn entry_point_access_arb() -> impl Strategy { + prop_oneof![ + Just(EntryPointAccess::Public), + collection::vec(group_arb(), 0..32).prop_map(EntryPointAccess::Groups), + Just(EntryPointAccess::Template), + ] +} + +pub fn entry_point_type_arb() -> impl Strategy { + prop_oneof![ + Just(EntryPointType::Session), + Just(EntryPointType::AddressableEntity), + Just(EntryPointType::Factory), + ] +} + +pub fn parameter_arb() -> impl Strategy { + (".*", cl_type_arb()).prop_map(|(name, cl_type)| Parameter::new(name, cl_type)) +} + +pub fn parameters_arb() -> impl Strategy { + collection::vec(parameter_arb(), 0..10) +} + +pub fn entry_point_arb() -> impl Strategy { + ( + ".*", + parameters_arb(), + entry_point_type_arb(), + entry_point_access_arb(), + cl_type_arb(), + ) + .prop_map( + |(name, parameters, entry_point_type, entry_point_access, ret)| { + EntryPoint::new(name, parameters, ret, entry_point_access, entry_point_type) + }, + ) +} + +pub fn entry_points_arb() -> impl Strategy { + collection::vec(entry_point_arb(), 1..10).prop_map(EntryPoints::from) +} + +pub fn message_topics_arb() -> impl Strategy { + collection::vec(any::(), 1..100).prop_map(|topic_names| { + MessageTopics::from( + topic_names + .into_iter() + .map(|name| { + let name_hash = crypto::blake2b(&name).into(); + (name, name_hash) + }) + .collect::>(), + ) + }) +} + +pub fn account_arb() -> impl Strategy { + ( + account_hash_arb(), + named_keys_arb(20), + uref_arb(), + account_associated_keys_arb(), + account_action_thresholds_arb(), + ) + .prop_map( + |(account_hash, named_keys, main_purse, associated_keys, action_thresholds)| { + Account::new( + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + ) + }, + ) +} + +pub fn contract_package_arb() -> impl Strategy { + ( + uref_arb(), + contract_versions_arb(), + disabled_contract_versions_arb(), + groups_arb(), + ) + .prop_map(|(access_key, versions, disabled_versions, groups)| { + ContractPackage::new( + access_key, + versions, + disabled_versions, + groups, + ContractPackageStatus::default(), + ) + }) +} + +pub fn contract_arb() -> impl Strategy { + ( + protocol_version_arb(), + entry_points_arb(), + u8_slice_32(), + u8_slice_32(), + named_keys_arb(20), + ) + .prop_map( + |( + protocol_version, + entry_points, + contract_package_hash_arb, + contract_wasm_hash, + named_keys, + )| { + Contract::new( + contract_package_hash_arb.into(), + contract_wasm_hash.into(), + named_keys, + entry_points, + protocol_version, + ) + }, + ) +} + +pub fn addressable_entity_arb() -> impl Strategy { + ( + protocol_version_arb(), + entry_points_arb(), + u8_slice_32(), + u8_slice_32(), + named_keys_arb(20), + uref_arb(), + associated_keys_arb(), + action_thresholds_arb(), + message_topics_arb(), + ) + .prop_map( + |( + protocol_version, + entry_points, + contract_package_hash_arb, + contract_wasm_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + message_topics, + )| { + AddressableEntity::new( + contract_package_hash_arb.into(), + contract_wasm_hash.into(), + named_keys, + entry_points, + protocol_version, + main_purse, + associated_keys, + action_thresholds, + message_topics, + ) + }, + ) +} + +pub fn byte_code_arb() -> impl Strategy { + collection::vec(any::(), 1..1000) + .prop_map(|byte_code| ByteCode::new(ByteCodeKind::V1CasperWasm, byte_code)) +} + +pub fn contract_version_key_arb() -> impl Strategy { + (1..32u32, 1..1000u32) + .prop_map(|(major, contract_ver)| ContractVersionKey::new(major, contract_ver)) +} + +pub fn entity_version_key_arb() -> impl Strategy { + (1..32u32, 1..1000u32) + .prop_map(|(major, contract_ver)| EntityVersionKey::new(major, contract_ver)) +} + +pub fn contract_versions_arb() -> impl Strategy { + collection::btree_map( + contract_version_key_arb(), + u8_slice_32().prop_map(ContractHash::new), + 1..5, + ) +} + +pub fn entity_versions_arb() -> impl Strategy { + collection::btree_map( + entity_version_key_arb(), + u8_slice_32().prop_map(AddressableEntityHash::new), + 1..5, + ) + .prop_map(EntityVersions::from) +} + +pub fn disabled_versions_arb() -> impl Strategy> { + collection::btree_set(entity_version_key_arb(), 0..5) +} + +pub fn disabled_contract_versions_arb() -> impl Strategy> { + collection::btree_set(contract_version_key_arb(), 0..5) +} + +pub fn groups_arb() -> impl Strategy { + collection::btree_map(group_arb(), collection::btree_set(uref_arb(), 1..10), 0..5) + .prop_map(Groups::from) +} + +pub fn package_arb() -> impl Strategy { + ( + uref_arb(), + entity_versions_arb(), + disabled_versions_arb(), + groups_arb(), + ) + .prop_map(|(access_key, versions, disabled_versions, groups)| { + Package::new( + access_key, + versions, + disabled_versions, + groups, + PackageStatus::default(), + PackageKind::SmartContract, + ) + }) +} + +pub(crate) fn delegator_arb() -> impl Strategy { + ( + public_key_arb_no_system(), + u512_arb(), + uref_arb(), + public_key_arb_no_system(), + ) + .prop_map( + |(delegator_pk, staked_amount, bonding_purse, validator_pk)| { + Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk) + }, + ) +} + +fn delegation_rate_arb() -> impl Strategy { + 0..=DELEGATION_RATE_DENOMINATOR // Maximum, allowed value for delegation rate. +} + +pub(crate) fn unified_bid_arb( + delegations_len: impl Into, +) -> impl Strategy { + ( + public_key_arb_no_system(), + uref_arb(), + u512_arb(), + delegation_rate_arb(), + bool::ANY, + collection::vec(delegator_arb(), delegations_len), + ) + .prop_map( + |( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + is_locked, + new_delegators, + )| { + let mut bid = if is_locked { + Bid::locked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + 1u64, + ) + } else { + Bid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + ) + }; + let delegators = bid.delegators_mut(); + new_delegators.into_iter().for_each(|delegator| { + assert!(delegators + .insert(delegator.delegator_public_key().clone(), delegator) + .is_none()); + }); + BidKind::Unified(Box::new(bid)) + }, + ) +} + +pub(crate) fn delegator_bid_arb() -> impl Strategy { + (delegator_arb()).prop_map(|delegator| BidKind::Delegator(Box::new(delegator))) +} + +pub(crate) fn validator_bid_arb() -> impl Strategy { + ( + public_key_arb_no_system(), + uref_arb(), + u512_arb(), + delegation_rate_arb(), + bool::ANY, + ) + .prop_map( + |(validator_public_key, bonding_purse, staked_amount, delegation_rate, is_locked)| { + let validator_bid = if is_locked { + ValidatorBid::locked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + 1u64, + ) + } else { + ValidatorBid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + ) + }; + BidKind::Validator(Box::new(validator_bid)) + }, + ) +} + +fn withdraw_arb() -> impl Strategy { + ( + uref_arb(), + public_key_arb_no_system(), + public_key_arb_no_system(), + era_id_arb(), + u512_arb(), + ) + .prop_map(|(bonding_purse, validator_pk, unbonder_pk, era, amount)| { + WithdrawPurse::new(bonding_purse, validator_pk, unbonder_pk, era, amount) + }) +} + +fn withdraws_arb(size: impl Into) -> impl Strategy> { + collection::vec(withdraw_arb(), size) +} + +fn unbonding_arb() -> impl Strategy { + ( + uref_arb(), + public_key_arb_no_system(), + public_key_arb_no_system(), + era_id_arb(), + u512_arb(), + option::of(public_key_arb_no_system()), + ) + .prop_map( + |( + bonding_purse, + validator_public_key, + unbonder_public_key, + era, + amount, + new_validator, + )| { + UnbondingPurse::new( + bonding_purse, + validator_public_key, + unbonder_public_key, + era, + amount, + new_validator, + ) + }, + ) +} + +fn unbondings_arb(size: impl Into) -> impl Strategy> { + collection::vec(unbonding_arb(), size) +} + +fn message_topic_summary_arb() -> impl Strategy { + (any::(), any::()).prop_map(|(message_count, blocktime)| MessageTopicSummary { + message_count, + blocktime: BlockTime::new(blocktime), + }) +} + +fn message_summary_arb() -> impl Strategy { + u8_slice_32().prop_map(MessageChecksum) +} + +pub fn stored_value_arb() -> impl Strategy { + prop_oneof![ + cl_value_arb().prop_map(StoredValue::CLValue), + account_arb().prop_map(StoredValue::Account), + byte_code_arb().prop_map(StoredValue::ByteCode), + contract_arb().prop_map(StoredValue::Contract), + addressable_entity_arb().prop_map(StoredValue::AddressableEntity), + package_arb().prop_map(StoredValue::Package), + transfer_arb().prop_map(StoredValue::Transfer), + deploy_info_arb().prop_map(StoredValue::DeployInfo), + era_info_arb(1..10).prop_map(StoredValue::EraInfo), + unified_bid_arb(0..3).prop_map(StoredValue::BidKind), + validator_bid_arb().prop_map(StoredValue::BidKind), + delegator_bid_arb().prop_map(StoredValue::BidKind), + withdraws_arb(1..50).prop_map(StoredValue::Withdraw), + unbondings_arb(1..50).prop_map(StoredValue::Unbonding), + message_topic_summary_arb().prop_map(StoredValue::MessageTopic), + message_summary_arb().prop_map(StoredValue::Message), + ] + .prop_map(|stored_value| + // The following match statement is here only to make sure + // we don't forget to update the generator when a new variant is added. + match stored_value { + StoredValue::CLValue(_) => stored_value, + StoredValue::Account(_) => stored_value, + StoredValue::ContractWasm(_) => stored_value, + StoredValue::Contract(_) => stored_value, + StoredValue::ContractPackage(_) => stored_value, + StoredValue::Transfer(_) => stored_value, + StoredValue::DeployInfo(_) => stored_value, + StoredValue::EraInfo(_) => stored_value, + StoredValue::Bid(_) => stored_value, + StoredValue::Withdraw(_) => stored_value, + StoredValue::Unbonding(_) => stored_value, + StoredValue::AddressableEntity(_) => stored_value, + StoredValue::BidKind(_) => stored_value, + StoredValue::Package(_) => stored_value, + StoredValue::ByteCode(_) => stored_value, + StoredValue::MessageTopic(_) => stored_value, + StoredValue::Message(_) => stored_value, + }) +} diff --git a/casper_types_ver_2_0/src/json_pretty_printer.rs b/casper_types_ver_2_0/src/json_pretty_printer.rs new file mode 100644 index 00000000..3648d38c --- /dev/null +++ b/casper_types_ver_2_0/src/json_pretty_printer.rs @@ -0,0 +1,291 @@ +extern crate alloc; + +use alloc::{format, string::String, vec::Vec}; + +use serde::Serialize; +use serde_json::{json, Value}; + +const MAX_STRING_LEN: usize = 150; + +/// Represents the information about a substring found in a string. +#[derive(Debug)] +struct SubstringSpec { + /// Index of the first character. + start_index: usize, + /// Length of the substring. + length: usize, +} + +impl SubstringSpec { + /// Constructs a new StringSpec with the given start index and length. + fn new(start_index: usize, length: usize) -> Self { + Self { + start_index, + length, + } + } +} + +/// Serializes the given data structure as a pretty-printed `String` of JSON using +/// `serde_json::to_string_pretty()`, but after first reducing any large hex-string values. +/// +/// A large hex-string is one containing only hex characters and which is over `MAX_STRING_LEN`. +/// Such hex-strings will be replaced by an indication of the number of chars redacted, for example +/// `[130 hex chars]`. +pub fn json_pretty_print(value: &T) -> serde_json::Result +where + T: ?Sized + Serialize, +{ + let mut json_value = json!(value); + shorten_string_field(&mut json_value); + + serde_json::to_string_pretty(&json_value) +} + +/// Searches the given string for all occurrences of hex substrings +/// that are longer than the specified `max_len`. +fn find_hex_strings_longer_than(string: &str, max_len: usize) -> Vec { + let mut ranges_to_remove = Vec::new(); + let mut start_index = 0; + let mut contiguous_hex_count = 0; + + // Record all large hex-strings' start positions and lengths. + for (index, char) in string.char_indices() { + if char.is_ascii_hexdigit() { + if contiguous_hex_count == 0 { + // This is the start of a new hex-string. + start_index = index; + } + contiguous_hex_count += 1; + } else if contiguous_hex_count != 0 { + // This is the end of a hex-string: if it's too long, record it. + if contiguous_hex_count > max_len { + ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count)); + } + contiguous_hex_count = 0; + } + } + // If the string contains a large hex-string at the end, record it now. + if contiguous_hex_count > max_len { + ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count)); + } + ranges_to_remove +} + +fn shorten_string_field(value: &mut Value) { + match value { + Value::String(string) => { + // Iterate over the ranges to remove from last to first so each + // replacement start index remains valid. + find_hex_strings_longer_than(string, MAX_STRING_LEN) + .into_iter() + .rev() + .for_each( + |SubstringSpec { + start_index, + length, + }| { + let range = start_index..(start_index + length); + string.replace_range(range, &format!("[{} hex chars]", length)); + }, + ) + } + Value::Array(values) => { + for value in values { + shorten_string_field(value); + } + } + Value::Object(map) => { + for map_value in map.values_mut() { + shorten_string_field(map_value); + } + } + Value::Null | Value::Bool(_) | Value::Number(_) => {} + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn hex_string(length: usize) -> String { + "0123456789abcdef".chars().cycle().take(length).collect() + } + + impl PartialEq<(usize, usize)> for SubstringSpec { + fn eq(&self, other: &(usize, usize)) -> bool { + self.start_index == other.0 && self.length == other.1 + } + } + + #[test] + fn finds_hex_strings_longer_than() { + const TESTING_LEN: usize = 3; + + let input = "01234"; + let expected = vec![(0, 5)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "01234-0123"; + let expected = vec![(0, 5), (6, 4)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "012-34-0123"; + let expected = vec![(7, 4)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "012-34-01-23"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "0"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = ""; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + } + + #[test] + fn respects_length() { + let input = "I like beef"; + let expected = vec![(7, 4)]; + let actual = find_hex_strings_longer_than(input, 3); + assert_eq!(actual, expected); + + let input = "I like beef"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, 1000); + assert_eq!(actual, expected); + } + + #[test] + fn should_shorten_long_strings() { + let max_unshortened_hex_string = hex_string(MAX_STRING_LEN); + let long_hex_string = hex_string(MAX_STRING_LEN + 1); + let long_non_hex_string: String = "g".repeat(MAX_STRING_LEN + 1); + let long_hex_substring = format!("a-{}-b", hex_string(MAX_STRING_LEN + 1)); + let multiple_long_hex_substrings = + format!("a: {0}, b: {0}, c: {0}", hex_string(MAX_STRING_LEN + 1)); + + let mut long_strings: Vec = vec![]; + for i in 1..=5 { + long_strings.push("a".repeat(MAX_STRING_LEN + i)); + } + let value = json!({ + "field_1": Option::::None, + "field_2": true, + "field_3": 123, + "field_4": max_unshortened_hex_string, + "field_5": ["short string value", long_hex_string], + "field_6": { + "f1": Option::::None, + "f2": false, + "f3": -123, + "f4": long_non_hex_string, + "f5": ["short string value", long_hex_substring], + "f6": { + "final long string": multiple_long_hex_substrings + } + } + }); + + let expected = r#"{ + "field_1": null, + "field_2": true, + "field_3": 123, + "field_4": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012345", + "field_5": [ + "short string value", + "[151 hex chars]" + ], + "field_6": { + "f1": null, + "f2": false, + "f3": -123, + "f4": "ggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg", + "f5": [ + "short string value", + "a-[151 hex chars]-b" + ], + "f6": { + "final long string": "a: [151 hex chars], b: [151 hex chars], c: [151 hex chars]" + } + } +}"#; + + let output = json_pretty_print(&value).unwrap(); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); + } + + #[test] + fn should_not_modify_short_strings() { + let max_string: String = "a".repeat(MAX_STRING_LEN); + let value = json!({ + "field_1": Option::::None, + "field_2": true, + "field_3": 123, + "field_4": max_string, + "field_5": [ + "short string value", + "another short string" + ], + "field_6": { + "f1": Option::::None, + "f2": false, + "f3": -123, + "f4": "short", + "f5": [ + "short string value", + "another short string" + ], + "f6": { + "final string": "the last short string" + } + } + }); + + let expected = serde_json::to_string_pretty(&value).unwrap(); + let output = json_pretty_print(&value).unwrap(); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); + } + + #[test] + /// Ref: https://github.com/casper-network/casper-node/issues/1456 + fn regression_1456() { + let long_string = r#"state query failed: ValueNotFound("Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)")"#; + assert_eq!(long_string.len(), 148); + + let value = json!({ + "code": -32003, + "message": long_string, + }); + + let expected = r#"{ + "code": -32003, + "message": "state query failed: ValueNotFound(\"Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)\")" +}"#; + + let output = json_pretty_print(&value).unwrap(); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); + } +} diff --git a/casper_types_ver_2_0/src/key.rs b/casper_types_ver_2_0/src/key.rs new file mode 100644 index 00000000..eebc0f85 --- /dev/null +++ b/casper_types_ver_2_0/src/key.rs @@ -0,0 +1,2172 @@ +//! Key types. + +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; + +use core::{ + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + str::FromStr, +}; + +#[cfg(test)] +use crate::testing::TestRng; + +#[cfg(doc)] +use crate::CLValue; +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + addressable_entity, + addressable_entity::AddressableEntityHash, + byte_code::ByteCodeKind, + bytesrepr::{ + self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH, U64_SERIALIZED_LENGTH, + U8_SERIALIZED_LENGTH, + }, + checksummed_hex, + contract_messages::{self, MessageAddr, TopicNameHash, TOPIC_NAME_HASH_LENGTH}, + contract_wasm::ContractWasmHash, + contracts::{ContractHash, ContractPackageHash}, + package::{PackageHash, PackageKindTag}, + system::auction::{BidAddr, BidAddrTag}, + uref::{self, URef, URefAddr, UREF_SERIALIZED_LENGTH}, + DeployHash, Digest, EraId, Tagged, TransferAddr, TransferFromStrError, TRANSFER_ADDR_LENGTH, + UREF_ADDR_LENGTH, +}; + +const HASH_PREFIX: &str = "hash-"; +const DEPLOY_INFO_PREFIX: &str = "deploy-"; +const ERA_INFO_PREFIX: &str = "era-"; +const BALANCE_PREFIX: &str = "balance-"; +const BID_PREFIX: &str = "bid-"; +const WITHDRAW_PREFIX: &str = "withdraw-"; +const DICTIONARY_PREFIX: &str = "dictionary-"; +const UNBOND_PREFIX: &str = "unbond-"; +const SYSTEM_CONTRACT_REGISTRY_PREFIX: &str = "system-contract-registry-"; +const ERA_SUMMARY_PREFIX: &str = "era-summary-"; +const CHAINSPEC_REGISTRY_PREFIX: &str = "chainspec-registry-"; +const CHECKSUM_REGISTRY_PREFIX: &str = "checksum-registry-"; +const BID_ADDR_PREFIX: &str = "bid-addr-"; +const PACKAGE_PREFIX: &str = "package-"; +const ENTITY_PREFIX: &str = "addressable-entity-"; +const ACCOUNT_ENTITY_PREFIX: &str = "account-"; +const CONTRACT_ENTITY_PREFIX: &str = "contract-"; +const SYSTEM_ENTITY_PREFIX: &str = "system-"; +const BYTE_CODE_PREFIX: &str = "byte-code-"; +const V1_WASM_PREFIX: &str = "v1-wasm-"; +const EMPTY_PREFIX: &str = "empty-"; + +/// The number of bytes in a Blake2b hash +pub const BLAKE2B_DIGEST_LENGTH: usize = 32; +/// The number of bytes in a [`Key::Hash`]. +pub const KEY_HASH_LENGTH: usize = 32; +/// The number of bytes in a [`Key::Transfer`]. +pub const KEY_TRANSFER_LENGTH: usize = TRANSFER_ADDR_LENGTH; +/// The number of bytes in a [`Key::DeployInfo`]. +pub const KEY_DEPLOY_INFO_LENGTH: usize = DeployHash::LENGTH; +/// The number of bytes in a [`Key::Dictionary`]. +pub const KEY_DICTIONARY_LENGTH: usize = 32; +/// The maximum length for a `dictionary_item_key`. +pub const DICTIONARY_ITEM_KEY_MAX_LENGTH: usize = 128; +/// The maximum length for an `Addr`. +pub const ADDR_LENGTH: usize = 32; +const PADDING_BYTES: [u8; 32] = [0u8; 32]; +const KEY_ID_SERIALIZED_LENGTH: usize = 1; +// u8 used to determine the ID +const KEY_HASH_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_UREF_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_SERIALIZED_LENGTH; +const KEY_TRANSFER_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_TRANSFER_LENGTH; +const KEY_DEPLOY_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DEPLOY_INFO_LENGTH; +const KEY_ERA_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + U64_SERIALIZED_LENGTH; +const KEY_BALANCE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_ADDR_LENGTH; +const KEY_BID_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_WITHDRAW_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_UNBOND_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_DICTIONARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DICTIONARY_LENGTH; +const KEY_SYSTEM_CONTRACT_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_ERA_SUMMARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_PACKAGE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + 32; +const KEY_MESSAGE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + + KEY_HASH_LENGTH + + TOPIC_NAME_HASH_LENGTH + + U8_SERIALIZED_LENGTH + + U32_SERIALIZED_LENGTH; + +const MAX_SERIALIZED_LENGTH: usize = KEY_MESSAGE_SERIALIZED_LENGTH; + +/// An alias for [`Key`]s hash variant. +pub type HashAddr = [u8; KEY_HASH_LENGTH]; + +/// An alias for [`Key`]s package variant. +pub type PackageAddr = [u8; ADDR_LENGTH]; + +/// An alias for [`Key`]s entity variant. +pub type EntityAddr = [u8; ADDR_LENGTH]; + +/// An alias for [`Key`]s byte code variant. +pub type ByteCodeAddr = [u8; ADDR_LENGTH]; + +/// An alias for [`Key`]s dictionary variant. +pub type DictionaryAddr = [u8; KEY_DICTIONARY_LENGTH]; + +#[allow(missing_docs)] +#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum KeyTag { + Account = 0, + Hash = 1, + URef = 2, + Transfer = 3, + DeployInfo = 4, + EraInfo = 5, + Balance = 6, + Bid = 7, + Withdraw = 8, + Dictionary = 9, + SystemContractRegistry = 10, + EraSummary = 11, + Unbond = 12, + ChainspecRegistry = 13, + ChecksumRegistry = 14, + BidAddr = 15, + Package = 16, + AddressableEntity = 17, + ByteCode = 18, + Message = 19, +} + +impl KeyTag { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..20) { + 0 => KeyTag::Account, + 1 => KeyTag::Hash, + 2 => KeyTag::URef, + 3 => KeyTag::Transfer, + 4 => KeyTag::DeployInfo, + 5 => KeyTag::EraInfo, + 6 => KeyTag::Balance, + 7 => KeyTag::Bid, + 8 => KeyTag::Withdraw, + 9 => KeyTag::Dictionary, + 10 => KeyTag::SystemContractRegistry, + 11 => KeyTag::EraSummary, + 12 => KeyTag::Unbond, + 13 => KeyTag::ChainspecRegistry, + 14 => KeyTag::ChecksumRegistry, + 15 => KeyTag::BidAddr, + 16 => KeyTag::Package, + 17 => KeyTag::AddressableEntity, + 18 => KeyTag::ByteCode, + 19 => KeyTag::Message, + _ => panic!(), + } + } +} + +impl Display for KeyTag { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + KeyTag::Account => write!(f, "Account"), + KeyTag::Hash => write!(f, "Hash"), + KeyTag::URef => write!(f, "URef"), + KeyTag::Transfer => write!(f, "Transfer"), + KeyTag::DeployInfo => write!(f, "DeployInfo"), + KeyTag::EraInfo => write!(f, "EraInfo"), + KeyTag::Balance => write!(f, "Balance"), + KeyTag::Bid => write!(f, "Bid"), + KeyTag::Withdraw => write!(f, "Withdraw"), + KeyTag::Dictionary => write!(f, "Dictionary"), + KeyTag::SystemContractRegistry => write!(f, "SystemContractRegistry"), + KeyTag::EraSummary => write!(f, "EraSummary"), + KeyTag::Unbond => write!(f, "Unbond"), + KeyTag::ChainspecRegistry => write!(f, "ChainspecRegistry"), + KeyTag::ChecksumRegistry => write!(f, "ChecksumRegistry"), + KeyTag::BidAddr => write!(f, "BidAddr"), + KeyTag::Package => write!(f, "Package"), + KeyTag::AddressableEntity => write!(f, "AddressableEntity"), + KeyTag::ByteCode => write!(f, "ByteCode"), + KeyTag::Message => write!(f, "Message"), + } + } +} + +impl ToBytes for KeyTag { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + KEY_ID_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for KeyTag { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (id, rem) = u8::from_bytes(bytes)?; + let tag = match id { + tag if tag == KeyTag::Account as u8 => KeyTag::Account, + tag if tag == KeyTag::Hash as u8 => KeyTag::Hash, + tag if tag == KeyTag::URef as u8 => KeyTag::URef, + tag if tag == KeyTag::Transfer as u8 => KeyTag::Transfer, + tag if tag == KeyTag::DeployInfo as u8 => KeyTag::DeployInfo, + tag if tag == KeyTag::EraInfo as u8 => KeyTag::EraInfo, + tag if tag == KeyTag::Balance as u8 => KeyTag::Balance, + tag if tag == KeyTag::Bid as u8 => KeyTag::Bid, + tag if tag == KeyTag::Withdraw as u8 => KeyTag::Withdraw, + tag if tag == KeyTag::Dictionary as u8 => KeyTag::Dictionary, + tag if tag == KeyTag::SystemContractRegistry as u8 => KeyTag::SystemContractRegistry, + tag if tag == KeyTag::EraSummary as u8 => KeyTag::EraSummary, + tag if tag == KeyTag::Unbond as u8 => KeyTag::Unbond, + tag if tag == KeyTag::ChainspecRegistry as u8 => KeyTag::ChainspecRegistry, + tag if tag == KeyTag::ChecksumRegistry as u8 => KeyTag::ChecksumRegistry, + tag if tag == KeyTag::BidAddr as u8 => KeyTag::BidAddr, + tag if tag == KeyTag::Package as u8 => KeyTag::Package, + tag if tag == KeyTag::AddressableEntity as u8 => KeyTag::AddressableEntity, + tag if tag == KeyTag::ByteCode as u8 => KeyTag::ByteCode, + tag if tag == KeyTag::Message as u8 => KeyTag::Message, + _ => return Err(Error::Formatting), + }; + Ok((tag, rem)) + } +} + +/// The key under which data (e.g. [`CLValue`]s, smart contracts, user accounts) are stored in +/// global state. +#[repr(C)] +#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum Key { + /// A `Key` under which a user account is stored. + Account(AccountHash), + /// A `Key` under which a smart contract is stored and which is the pseudo-hash of the + /// contract. + Hash(HashAddr), + /// A `Key` which is a [`URef`], under which most types of data can be stored. + URef(URef), + /// A `Key` under which a transfer is stored. + Transfer(TransferAddr), + /// A `Key` under which a deploy info is stored. + DeployInfo(DeployHash), + /// A `Key` under which an era info is stored. + EraInfo(EraId), + /// A `Key` under which a purse balance is stored. + Balance(URefAddr), + /// A `Key` under which bid information is stored. + Bid(AccountHash), + /// A `Key` under which withdraw information is stored. + Withdraw(AccountHash), + /// A `Key` whose value is derived by hashing a [`URef`] address and arbitrary data, under + /// which a dictionary is stored. + Dictionary(DictionaryAddr), + /// A `Key` under which system contract hashes are stored. + SystemContractRegistry, + /// A `Key` under which current era info is stored. + EraSummary, + /// A `Key` under which unbond information is stored. + Unbond(AccountHash), + /// A `Key` under which chainspec and other hashes are stored. + ChainspecRegistry, + /// A `Key` under which a registry of checksums is stored. + ChecksumRegistry, + /// A `Key` under which bid information is stored. + BidAddr(BidAddr), + /// A `Key` under which package information is stored. + Package(PackageAddr), + /// A `Key` under which an addressable entity is stored. + AddressableEntity(PackageKindTag, EntityAddr), + /// A `Key` under which a byte code record is stored. + ByteCode(ByteCodeKind, ByteCodeAddr), + /// A `Key` under which a message is stored. + Message(MessageAddr), +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for Key { + fn schema_name() -> String { + String::from("Key") + } + + fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some( + "The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, \ + user accounts) are stored in global state." + .to_string(), + ); + schema_object.into() + } +} + +/// Errors produced when converting a `String` into a `Key`. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Account parse error. + Account(addressable_entity::FromStrError), + /// Hash parse error. + Hash(String), + /// URef parse error. + URef(uref::FromStrError), + /// Transfer parse error. + Transfer(TransferFromStrError), + /// DeployInfo parse error. + DeployInfo(String), + /// EraInfo parse error. + EraInfo(String), + /// Balance parse error. + Balance(String), + /// Bid parse error. + Bid(String), + /// Withdraw parse error. + Withdraw(String), + /// Dictionary parse error. + Dictionary(String), + /// System contract registry parse error. + SystemContractRegistry(String), + /// Era summary parse error. + EraSummary(String), + /// Unbond parse error. + Unbond(String), + /// Chainspec registry error. + ChainspecRegistry(String), + /// Checksum registry error. + ChecksumRegistry(String), + /// Bid parse error. + BidAddr(String), + /// Package parse error. + Package(String), + /// Entity parse error. + AddressableEntity(String), + /// Byte code parse error. + ByteCode(String), + /// Message parse error. + Message(contract_messages::FromStrError), + /// Unknown prefix. + UnknownPrefix, +} + +impl From for FromStrError { + fn from(error: addressable_entity::FromStrError) -> Self { + FromStrError::Account(error) + } +} + +impl From for FromStrError { + fn from(error: TransferFromStrError) -> Self { + FromStrError::Transfer(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl From for FromStrError { + fn from(error: contract_messages::FromStrError) -> Self { + FromStrError::Message(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::Account(error) => write!(f, "account-key from string error: {}", error), + FromStrError::Hash(error) => write!(f, "hash-key from string error: {}", error), + FromStrError::URef(error) => write!(f, "uref-key from string error: {}", error), + FromStrError::Transfer(error) => write!(f, "transfer-key from string error: {}", error), + FromStrError::DeployInfo(error) => { + write!(f, "deploy-info-key from string error: {}", error) + } + FromStrError::EraInfo(error) => write!(f, "era-info-key from string error: {}", error), + FromStrError::Balance(error) => write!(f, "balance-key from string error: {}", error), + FromStrError::Bid(error) => write!(f, "bid-key from string error: {}", error), + FromStrError::Withdraw(error) => write!(f, "withdraw-key from string error: {}", error), + FromStrError::Dictionary(error) => { + write!(f, "dictionary-key from string error: {}", error) + } + FromStrError::SystemContractRegistry(error) => { + write!( + f, + "system-contract-registry-key from string error: {}", + error + ) + } + FromStrError::EraSummary(error) => { + write!(f, "era-summary-key from string error: {}", error) + } + FromStrError::Unbond(error) => { + write!(f, "unbond-key from string error: {}", error) + } + FromStrError::ChainspecRegistry(error) => { + write!(f, "chainspec-registry-key from string error: {}", error) + } + FromStrError::ChecksumRegistry(error) => { + write!(f, "checksum-registry-key from string error: {}", error) + } + FromStrError::BidAddr(error) => write!(f, "bid-addr-key from string error: {}", error), + FromStrError::Package(error) => write!(f, "package-key from string error: {}", error), + FromStrError::AddressableEntity(error) => { + write!(f, "addressable-entity-key from string error: {}", error) + } + FromStrError::ByteCode(error) => { + write!(f, "byte-code-key from string error: {}", error) + } + FromStrError::Message(error) => { + write!(f, "message-key from string error: {}", error) + } + FromStrError::UnknownPrefix => write!(f, "unknown prefix for key"), + } + } +} + +impl Key { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn type_string(&self) -> String { + match self { + Key::Account(_) => String::from("Key::Account"), + Key::Hash(_) => String::from("Key::Hash"), + Key::URef(_) => String::from("Key::URef"), + Key::Transfer(_) => String::from("Key::Transfer"), + Key::DeployInfo(_) => String::from("Key::DeployInfo"), + Key::EraInfo(_) => String::from("Key::EraInfo"), + Key::Balance(_) => String::from("Key::Balance"), + Key::Bid(_) => String::from("Key::Bid"), + Key::Withdraw(_) => String::from("Key::Unbond"), + Key::Dictionary(_) => String::from("Key::Dictionary"), + Key::SystemContractRegistry => String::from("Key::SystemContractRegistry"), + Key::EraSummary => String::from("Key::EraSummary"), + Key::Unbond(_) => String::from("Key::Unbond"), + Key::ChainspecRegistry => String::from("Key::ChainspecRegistry"), + Key::ChecksumRegistry => String::from("Key::ChecksumRegistry"), + Key::BidAddr(_) => String::from("Key::BidAddr"), + Key::Package(_) => String::from("Key::Package"), + Key::AddressableEntity(..) => String::from("Key::AddressableEntity"), + Key::ByteCode(..) => String::from("Key::ByteCode"), + Key::Message(_) => String::from("Key::Message"), + } + } + + /// Returns the maximum size a [`Key`] can be serialized into. + pub const fn max_serialized_length() -> usize { + MAX_SERIALIZED_LENGTH + } + + /// If `self` is of type [`Key::URef`], returns `self` with the + /// [`AccessRights`](crate::AccessRights) stripped from the wrapped [`URef`], otherwise + /// returns `self` unmodified. + #[must_use] + pub fn normalize(self) -> Key { + match self { + Key::URef(uref) => Key::URef(uref.remove_access_rights()), + other => other, + } + } + + /// Returns a human-readable version of `self`, with the inner bytes encoded to Base16. + pub fn to_formatted_string(self) -> String { + match self { + Key::Account(account_hash) => account_hash.to_formatted_string(), + Key::Hash(addr) => format!("{}{}", HASH_PREFIX, base16::encode_lower(&addr)), + Key::URef(uref) => uref.to_formatted_string(), + Key::Transfer(transfer_addr) => transfer_addr.to_formatted_string(), + Key::DeployInfo(addr) => { + format!( + "{}{}", + DEPLOY_INFO_PREFIX, + base16::encode_lower(addr.as_ref()) + ) + } + Key::EraInfo(era_id) => { + format!("{}{}", ERA_INFO_PREFIX, era_id.value()) + } + Key::Balance(uref_addr) => { + format!("{}{}", BALANCE_PREFIX, base16::encode_lower(&uref_addr)) + } + Key::Bid(account_hash) => { + format!("{}{}", BID_PREFIX, base16::encode_lower(&account_hash)) + } + Key::Withdraw(account_hash) => { + format!("{}{}", WITHDRAW_PREFIX, base16::encode_lower(&account_hash)) + } + Key::Dictionary(dictionary_addr) => { + format!( + "{}{}", + DICTIONARY_PREFIX, + base16::encode_lower(&dictionary_addr) + ) + } + Key::SystemContractRegistry => { + format!( + "{}{}", + SYSTEM_CONTRACT_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::EraSummary => { + format!( + "{}{}", + ERA_SUMMARY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::Unbond(account_hash) => { + format!("{}{}", UNBOND_PREFIX, base16::encode_lower(&account_hash)) + } + Key::ChainspecRegistry => { + format!( + "{}{}", + CHAINSPEC_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::ChecksumRegistry => { + format!( + "{}{}", + CHECKSUM_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::BidAddr(bid_addr) => { + format!("{}{}", BID_ADDR_PREFIX, bid_addr) + } + Key::Message(message_addr) => message_addr.to_formatted_string(), + Key::Package(package_addr) => { + format!("{}{}", PACKAGE_PREFIX, base16::encode_lower(&package_addr)) + } + Key::AddressableEntity(package_tag, entity_addr) => match package_tag { + PackageKindTag::System => { + format!( + "{}{}{}", + ENTITY_PREFIX, + SYSTEM_ENTITY_PREFIX, + base16::encode_lower(&entity_addr) + ) + } + PackageKindTag::Account => { + format!( + "{}{}{}", + ENTITY_PREFIX, + ACCOUNT_ENTITY_PREFIX, + base16::encode_lower(&entity_addr) + ) + } + PackageKindTag::SmartContract => { + format!( + "{}{}{}", + ENTITY_PREFIX, + CONTRACT_ENTITY_PREFIX, + base16::encode_lower(&entity_addr) + ) + } + }, + Key::ByteCode(byte_code_kind, byte_code_addr) => match byte_code_kind { + ByteCodeKind::Empty => { + format!( + "{}{}{}", + BYTE_CODE_PREFIX, + EMPTY_PREFIX, + base16::encode_lower(&byte_code_addr) + ) + } + ByteCodeKind::V1CasperWasm => { + format!( + "{}{}{}", + BYTE_CODE_PREFIX, + V1_WASM_PREFIX, + base16::encode_lower(&byte_code_addr) + ) + } + }, + } + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a `Key`. + pub fn from_formatted_str(input: &str) -> Result { + match AccountHash::from_formatted_str(input) { + Ok(account_hash) => return Ok(Key::Account(account_hash)), + Err(addressable_entity::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + if let Some(hex) = input.strip_prefix(HASH_PREFIX) { + let addr = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Hash(error.to_string()))?; + let hash_addr = HashAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::Hash(error.to_string()))?; + return Ok(Key::Hash(hash_addr)); + } + + if let Some(hex) = input.strip_prefix(DEPLOY_INFO_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::DeployInfo(error.to_string()))?; + let hash_array = <[u8; DeployHash::LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::DeployInfo(error.to_string()))?; + return Ok(Key::DeployInfo(DeployHash::new(Digest::from(hash_array)))); + } + + match TransferAddr::from_formatted_str(input) { + Ok(transfer_addr) => return Ok(Key::Transfer(transfer_addr)), + Err(TransferFromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + match URef::from_formatted_str(input) { + Ok(uref) => return Ok(Key::URef(uref)), + Err(uref::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + if let Some(era_summary_padding) = input.strip_prefix(ERA_SUMMARY_PREFIX) { + let padded_bytes = checksummed_hex::decode(era_summary_padding) + .map_err(|error| FromStrError::EraSummary(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::EraSummary("Failed to deserialize era summary key".to_string()) + })?; + return Ok(Key::EraSummary); + } + + if let Some(era_id_str) = input.strip_prefix(ERA_INFO_PREFIX) { + let era_id = EraId::from_str(era_id_str) + .map_err(|error| FromStrError::EraInfo(error.to_string()))?; + return Ok(Key::EraInfo(era_id)); + } + + if let Some(hex) = input.strip_prefix(BALANCE_PREFIX) { + let addr = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Balance(error.to_string()))?; + let uref_addr = URefAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::Balance(error.to_string()))?; + return Ok(Key::Balance(uref_addr)); + } + + // note: BID_ADDR must come before BID as their heads overlap (bid- / bid-addr-) + if let Some(hex) = input.strip_prefix(BID_ADDR_PREFIX) { + let bytes = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::BidAddr(error.to_string()))?; + if bytes.is_empty() { + return Err(FromStrError::BidAddr( + "bytes should not be 0 len".to_string(), + )); + } + let tag_bytes = <[u8; BidAddrTag::BID_ADDR_TAG_LENGTH]>::try_from(bytes[0..1].as_ref()) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + let tag = BidAddrTag::try_from_u8(tag_bytes[0]) + .ok_or_else(|| FromStrError::BidAddr("failed to parse bid addr tag".to_string()))?; + let validator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from( + bytes[1..BidAddr::VALIDATOR_BID_ADDR_LENGTH].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + + let bid_addr = { + if tag == BidAddrTag::Unified { + BidAddr::legacy(validator_bytes) + } else if tag == BidAddrTag::Validator { + BidAddr::new_validator_addr(validator_bytes) + } else if tag == BidAddrTag::Delegator { + let delegator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from( + bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::new_delegator_addr((validator_bytes, delegator_bytes)) + } else { + return Err(FromStrError::BidAddr("invalid tag".to_string())); + } + }; + return Ok(Key::BidAddr(bid_addr)); + } + + if let Some(hex) = input.strip_prefix(BID_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Bid(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Bid(error.to_string()))?; + return Ok(Key::Bid(AccountHash::new(account_hash))); + } + + if let Some(hex) = input.strip_prefix(WITHDRAW_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Withdraw(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Withdraw(error.to_string()))?; + return Ok(Key::Withdraw(AccountHash::new(account_hash))); + } + + if let Some(hex) = input.strip_prefix(UNBOND_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Unbond(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Unbond(error.to_string()))?; + return Ok(Key::Unbond(AccountHash::new(account_hash))); + } + + if let Some(dictionary_addr) = input.strip_prefix(DICTIONARY_PREFIX) { + let dictionary_addr_bytes = checksummed_hex::decode(dictionary_addr) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + let addr = DictionaryAddr::try_from(dictionary_addr_bytes.as_ref()) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + return Ok(Key::Dictionary(addr)); + } + + if let Some(registry_address) = input.strip_prefix(SYSTEM_CONTRACT_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::SystemContractRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::SystemContractRegistry( + "Failed to deserialize system registry key".to_string(), + ) + })?; + return Ok(Key::SystemContractRegistry); + } + + if let Some(registry_address) = input.strip_prefix(CHAINSPEC_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::ChainspecRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::ChainspecRegistry( + "Failed to deserialize chainspec registry key".to_string(), + ) + })?; + return Ok(Key::ChainspecRegistry); + } + + if let Some(registry_address) = input.strip_prefix(CHECKSUM_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::ChecksumRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::ChecksumRegistry( + "Failed to deserialize checksum registry key".to_string(), + ) + })?; + return Ok(Key::ChecksumRegistry); + } + + if let Some(package_addr) = input.strip_prefix(PACKAGE_PREFIX) { + let package_addr_bytes = checksummed_hex::decode(package_addr) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + let addr = PackageAddr::try_from(package_addr_bytes.as_ref()) + .map_err(|error| FromStrError::Package(error.to_string()))?; + return Ok(Key::Package(addr)); + } + + if let Some(entity) = input.strip_prefix(ENTITY_PREFIX) { + let (addr_str, tag) = if let Some(str) = entity.strip_prefix(ACCOUNT_ENTITY_PREFIX) { + (str, PackageKindTag::Account) + } else if let Some(str) = entity.strip_prefix(SYSTEM_ENTITY_PREFIX) { + (str, PackageKindTag::System) + } else if let Some(str) = entity.strip_prefix(CONTRACT_ENTITY_PREFIX) { + (str, PackageKindTag::SmartContract) + } else { + return Err(FromStrError::UnknownPrefix); + }; + let addr = checksummed_hex::decode(addr_str) + .map_err(|error| FromStrError::AddressableEntity(error.to_string()))?; + let entity_addr = EntityAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::AddressableEntity(error.to_string()))?; + return Ok(Key::AddressableEntity(tag, entity_addr)); + } + + if let Some(byte_code) = input.strip_prefix(BYTE_CODE_PREFIX) { + let (addr_str, tag) = if let Some(str) = byte_code.strip_prefix(EMPTY_PREFIX) { + (str, ByteCodeKind::Empty) + } else if let Some(str) = byte_code.strip_prefix(V1_WASM_PREFIX) { + (str, ByteCodeKind::V1CasperWasm) + } else { + return Err(FromStrError::UnknownPrefix); + }; + let addr = checksummed_hex::decode(addr_str) + .map_err(|error| FromStrError::ByteCode(error.to_string()))?; + let byte_code_addr = ByteCodeAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::ByteCode(error.to_string()))?; + return Ok(Key::ByteCode(tag, byte_code_addr)); + } + + match MessageAddr::from_formatted_str(input) { + Ok(message_addr) => return Ok(Key::Message(message_addr)), + Err(contract_messages::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + Err(FromStrError::UnknownPrefix) + } + + /// Returns the inner bytes of `self` if `self` is of type [`Key::Account`], otherwise returns + /// `None`. + pub fn into_account(self) -> Option { + match self { + Key::Account(bytes) => Some(bytes), + _ => None, + } + } + + /// Returns the inner bytes of `self` if `self` is of type [`Key::Hash`], otherwise returns + /// `None`. + pub fn into_hash_addr(self) -> Option { + match self { + Key::Hash(hash) => Some(hash), + _ => None, + } + } + + /// Returns the inner bytes of `self` if `self` is of type [`Key::AddressableEntity`], otherwise + /// returns `None`. + pub fn into_entity_addr(self) -> Option { + match self { + Key::AddressableEntity(_, hash) => Some(hash), + _ => None, + } + } + + /// Returns the inner bytes of `self` if `self` is of type [`Key::Package`], otherwise returns + /// `None`. + pub fn into_package_addr(self) -> Option { + match self { + Key::Package(package_addr) => Some(package_addr), + _ => None, + } + } + + /// Returns [`AddressableEntityHash`] of `self` if `self` is of type [`Key::AddressableEntity`], + /// otherwise returns `None`. + pub fn into_entity_hash(self) -> Option { + let entity_addr = self.into_entity_addr()?; + Some(AddressableEntityHash::new(entity_addr)) + } + + /// Returns [`PackageHash`] of `self` if `self` is of type [`Key::Package`], otherwise + /// returns `None`. + pub fn into_package_hash(self) -> Option { + let package_addr = self.into_package_addr()?; + Some(PackageHash::new(package_addr)) + } + + /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise + /// returns `None`. + pub fn as_uref(&self) -> Option<&URef> { + match self { + Key::URef(uref) => Some(uref), + _ => None, + } + } + + /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise + /// returns `None`. + pub fn as_uref_mut(&mut self) -> Option<&mut URef> { + match self { + Key::URef(uref) => Some(uref), + _ => None, + } + } + + /// Returns a reference to the inner `URefAddr` if `self` is of type [`Key::Balance`], + /// otherwise returns `None`. + pub fn as_balance(&self) -> Option<&URefAddr> { + if let Self::Balance(v) = self { + Some(v) + } else { + None + } + } + + /// Returns the inner [`URef`] if `self` is of type [`Key::URef`], otherwise returns `None`. + pub fn into_uref(self) -> Option { + match self { + Key::URef(uref) => Some(uref), + _ => None, + } + } + + /// Returns a reference to the inner [`DictionaryAddr`] if `self` is of type + /// [`Key::Dictionary`], otherwise returns `None`. + pub fn as_dictionary(&self) -> Option<&DictionaryAddr> { + match self { + Key::Dictionary(v) => Some(v), + _ => None, + } + } + + /// Casts a [`Key::URef`] to a [`Key::Hash`] + pub fn uref_to_hash(&self) -> Option { + let uref = self.as_uref()?; + let addr = uref.addr(); + Some(Key::Hash(addr)) + } + + /// Casts a [`Key::Withdraw`] to a [`Key::Unbond`] + pub fn withdraw_to_unbond(&self) -> Option { + if let Key::Withdraw(account_hash) = self { + return Some(Key::Unbond(*account_hash)); + } + None + } + + /// Creates a new [`Key::Dictionary`] variant based on a `seed_uref` and a `dictionary_item_key` + /// bytes. + pub fn dictionary(seed_uref: URef, dictionary_item_key: &[u8]) -> Key { + // NOTE: Expect below is safe because the length passed is supported. + let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect("should create hasher"); + hasher.update(seed_uref.addr().as_ref()); + hasher.update(dictionary_item_key); + // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher. + let mut addr = HashAddr::default(); + hasher.finalize_variable(|hash| addr.clone_from_slice(hash)); + Key::Dictionary(addr) + } + + /// Creates a new [`Key::AddressableEntity`] variant from a package kind and an entity + /// hash. + pub fn addressable_entity_key( + package_kind_tag: PackageKindTag, + entity_hash: AddressableEntityHash, + ) -> Self { + Key::AddressableEntity(package_kind_tag, entity_hash.value()) + } + + /// Creates a new [`Key::AddressableEntity`] for a Smart contract. + pub fn contract_entity_key(entity_hash: AddressableEntityHash) -> Key { + Self::addressable_entity_key(PackageKindTag::SmartContract, entity_hash) + } + + /// Creates a new [`Key::ByteCode`] variant from a byte code kind and an byte code addr. + pub fn byte_code_key(byte_code_kind: ByteCodeKind, byte_code_addr: ByteCodeAddr) -> Self { + Key::ByteCode(byte_code_kind, byte_code_addr) + } + + /// Creates a new [`Key::Message`] variant that identifies an indexed message based on an + /// `entity_addr`, `topic_name_hash` and message `index`. + pub fn message( + entity_addr: AddressableEntityHash, + topic_name_hash: TopicNameHash, + index: u32, + ) -> Key { + Key::Message(MessageAddr::new_message_addr( + entity_addr, + topic_name_hash, + index, + )) + } + + /// Creates a new [`Key::Message`] variant that identifies a message topic based on an + /// `entity_addr` and a hash of the topic name. + pub fn message_topic( + entity_addr: AddressableEntityHash, + topic_name_hash: TopicNameHash, + ) -> Key { + Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash)) + } + + /// Returns true if the key is of type [`Key::Dictionary`]. + pub fn is_dictionary_key(&self) -> bool { + if let Key::Dictionary(_) = self { + return true; + } + false + } + + /// Returns true if the key is of type [`Key::Bid`]. + pub fn is_balance_key(&self) -> bool { + if let Key::Balance(_) = self { + return true; + } + false + } + + /// Returns true if the key is of type [`Key::BidAddr`]. + pub fn is_bid_addr_key(&self) -> bool { + if let Key::BidAddr(_) = self { + return true; + } + false + } + + /// Returns a reference to the inner `BidAddr` if `self` is of type [`Key::Bid`], + /// otherwise returns `None`. + pub fn as_bid_addr(&self) -> Option<&BidAddr> { + if let Self::BidAddr(addr) = self { + Some(addr) + } else { + None + } + } + + /// Returns if they inner Key is for a system contract entity. + pub fn is_system_key(&self) -> bool { + if let Self::AddressableEntity(PackageKindTag::System, _) = self { + return true; + } + + false + } + + /// Return true if the inner Key is of the smart contract type. + pub fn is_smart_contract_key(&self) -> bool { + if let Self::AddressableEntity(PackageKindTag::SmartContract, _) = self { + return true; + } + + false + } +} + +impl Display for Key { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + Key::Account(account_hash) => write!(f, "Key::Account({})", account_hash), + Key::Hash(addr) => write!(f, "Key::Hash({})", base16::encode_lower(&addr)), + Key::URef(uref) => write!(f, "Key::{}", uref), /* Display impl for URef will append */ + Key::Transfer(transfer_addr) => write!(f, "Key::Transfer({})", transfer_addr), + Key::DeployInfo(addr) => write!( + f, + "Key::DeployInfo({})", + base16::encode_lower(addr.as_ref()) + ), + Key::EraInfo(era_id) => write!(f, "Key::EraInfo({})", era_id), + Key::Balance(uref_addr) => { + write!(f, "Key::Balance({})", base16::encode_lower(uref_addr)) + } + Key::Bid(account_hash) => write!(f, "Key::Bid({})", account_hash), + Key::Withdraw(account_hash) => write!(f, "Key::Withdraw({})", account_hash), + Key::Dictionary(addr) => { + write!(f, "Key::Dictionary({})", base16::encode_lower(addr)) + } + Key::SystemContractRegistry => write!( + f, + "Key::SystemContractRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ), + Key::EraSummary => write!( + f, + "Key::EraSummary({})", + base16::encode_lower(&PADDING_BYTES), + ), + Key::Unbond(account_hash) => write!(f, "Key::Unbond({})", account_hash), + Key::ChainspecRegistry => write!( + f, + "Key::ChainspecRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ), + Key::ChecksumRegistry => { + write!( + f, + "Key::ChecksumRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::BidAddr(bid_addr) => write!(f, "Key::BidAddr({})", bid_addr), + Key::Message(message_addr) => { + write!(f, "Key::Message({})", message_addr) + } + Key::Package(package_addr) => { + write!(f, "Key::Package({})", base16::encode_lower(package_addr)) + } + Key::AddressableEntity(kind_tag, entity_addr) => write!( + f, + "Key::AddressableEntity({}-{})", + kind_tag, + base16::encode_lower(entity_addr) + ), + Key::ByteCode(kind, byte_code_addr) => { + write!( + f, + "Key::ByteCode({}-{})", + kind, + base16::encode_lower(byte_code_addr) + ) + } + } + } +} + +impl Debug for Key { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +impl Tagged for Key { + fn tag(&self) -> KeyTag { + match self { + Key::Account(_) => KeyTag::Account, + Key::Hash(_) => KeyTag::Hash, + Key::URef(_) => KeyTag::URef, + Key::Transfer(_) => KeyTag::Transfer, + Key::DeployInfo(_) => KeyTag::DeployInfo, + Key::EraInfo(_) => KeyTag::EraInfo, + Key::Balance(_) => KeyTag::Balance, + Key::Bid(_) => KeyTag::Bid, + Key::Withdraw(_) => KeyTag::Withdraw, + Key::Dictionary(_) => KeyTag::Dictionary, + Key::SystemContractRegistry => KeyTag::SystemContractRegistry, + Key::EraSummary => KeyTag::EraSummary, + Key::Unbond(_) => KeyTag::Unbond, + Key::ChainspecRegistry => KeyTag::ChainspecRegistry, + Key::ChecksumRegistry => KeyTag::ChecksumRegistry, + Key::BidAddr(_) => KeyTag::BidAddr, + Key::Package(_) => KeyTag::Package, + Key::AddressableEntity(..) => KeyTag::AddressableEntity, + Key::ByteCode(..) => KeyTag::ByteCode, + Key::Message(_) => KeyTag::Message, + } + } +} + +impl Tagged for Key { + fn tag(&self) -> u8 { + let key_tag: KeyTag = self.tag(); + key_tag as u8 + } +} + +impl From for Key { + fn from(uref: URef) -> Key { + Key::URef(uref) + } +} + +impl From for Key { + fn from(account_hash: AccountHash) -> Key { + Key::Account(account_hash) + } +} + +impl From for Key { + fn from(transfer_addr: TransferAddr) -> Key { + Key::Transfer(transfer_addr) + } +} + +impl From for Key { + fn from(package_hash: PackageHash) -> Key { + Key::Package(package_hash.value()) + } +} + +impl From for Key { + fn from(wasm_hash: ContractWasmHash) -> Self { + Key::Hash(wasm_hash.value()) + } +} + +impl From for Key { + fn from(contract_package_hash: ContractPackageHash) -> Self { + Key::Hash(contract_package_hash.value()) + } +} + +impl From for Key { + fn from(contract_hash: ContractHash) -> Self { + Key::Hash(contract_hash.value()) + } +} + +impl ToBytes for Key { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + Key::Account(account_hash) => { + KEY_ID_SERIALIZED_LENGTH + account_hash.serialized_length() + } + Key::Hash(_) => KEY_HASH_SERIALIZED_LENGTH, + Key::URef(_) => KEY_UREF_SERIALIZED_LENGTH, + Key::Transfer(_) => KEY_TRANSFER_SERIALIZED_LENGTH, + Key::DeployInfo(_) => KEY_DEPLOY_INFO_SERIALIZED_LENGTH, + Key::EraInfo(_) => KEY_ERA_INFO_SERIALIZED_LENGTH, + Key::Balance(_) => KEY_BALANCE_SERIALIZED_LENGTH, + Key::Bid(_) => KEY_BID_SERIALIZED_LENGTH, + Key::Withdraw(_) => KEY_WITHDRAW_SERIALIZED_LENGTH, + Key::Dictionary(_) => KEY_DICTIONARY_SERIALIZED_LENGTH, + Key::SystemContractRegistry => KEY_SYSTEM_CONTRACT_REGISTRY_SERIALIZED_LENGTH, + Key::EraSummary => KEY_ERA_SUMMARY_SERIALIZED_LENGTH, + Key::Unbond(_) => KEY_UNBOND_SERIALIZED_LENGTH, + Key::ChainspecRegistry => KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH, + Key::ChecksumRegistry => KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH, + Key::BidAddr(bid_addr) => match bid_addr.tag() { + BidAddrTag::Unified => KEY_ID_SERIALIZED_LENGTH + bid_addr.serialized_length() - 1, + BidAddrTag::Validator | BidAddrTag::Delegator => { + KEY_ID_SERIALIZED_LENGTH + bid_addr.serialized_length() + } + }, + Key::Package(_) => KEY_PACKAGE_SERIALIZED_LENGTH, + Key::AddressableEntity(..) => { + U8_SERIALIZED_LENGTH + KEY_ID_SERIALIZED_LENGTH + ADDR_LENGTH + } + Key::ByteCode(..) => U8_SERIALIZED_LENGTH + KEY_ID_SERIALIZED_LENGTH + ADDR_LENGTH, + Key::Message(message_addr) => { + KEY_ID_SERIALIZED_LENGTH + message_addr.serialized_length() + } + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(self.tag()); + match self { + Key::Account(account_hash) => account_hash.write_bytes(writer), + Key::Hash(hash) => hash.write_bytes(writer), + Key::URef(uref) => uref.write_bytes(writer), + Key::Transfer(addr) => addr.write_bytes(writer), + Key::DeployInfo(deploy_hash) => deploy_hash.write_bytes(writer), + Key::EraInfo(era_id) => era_id.write_bytes(writer), + Key::Balance(uref_addr) => uref_addr.write_bytes(writer), + Key::Bid(account_hash) => account_hash.write_bytes(writer), + Key::Withdraw(account_hash) => account_hash.write_bytes(writer), + Key::Dictionary(addr) => addr.write_bytes(writer), + Key::Unbond(account_hash) => account_hash.write_bytes(writer), + Key::SystemContractRegistry + | Key::EraSummary + | Key::ChainspecRegistry + | Key::ChecksumRegistry => PADDING_BYTES.write_bytes(writer), + Key::BidAddr(bid_addr) => match bid_addr.tag() { + BidAddrTag::Unified => { + let bytes = bid_addr.to_bytes()?; + writer.extend(&bytes[1..]); + Ok(()) + } + BidAddrTag::Validator | BidAddrTag::Delegator => bid_addr.write_bytes(writer), + }, + Key::Package(package_addr) => package_addr.write_bytes(writer), + Key::AddressableEntity(package_kind_tag, entity_addr) => { + package_kind_tag.write_bytes(writer)?; + entity_addr.write_bytes(writer) + } + Key::ByteCode(byte_code_kind, byte_code_addr) => { + byte_code_kind.write_bytes(writer)?; + byte_code_addr.write_bytes(writer) + } + Key::Message(message_addr) => message_addr.write_bytes(writer), + } + } +} + +impl FromBytes for Key { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, remainder) = KeyTag::from_bytes(bytes)?; + match tag { + KeyTag::Account => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Account(account_hash), rem)) + } + KeyTag::Hash => { + let (hash, rem) = HashAddr::from_bytes(remainder)?; + Ok((Key::Hash(hash), rem)) + } + KeyTag::URef => { + let (uref, rem) = URef::from_bytes(remainder)?; + Ok((Key::URef(uref), rem)) + } + KeyTag::Transfer => { + let (transfer_addr, rem) = TransferAddr::from_bytes(remainder)?; + Ok((Key::Transfer(transfer_addr), rem)) + } + KeyTag::DeployInfo => { + let (deploy_hash, rem) = DeployHash::from_bytes(remainder)?; + Ok((Key::DeployInfo(deploy_hash), rem)) + } + KeyTag::EraInfo => { + let (era_id, rem) = EraId::from_bytes(remainder)?; + Ok((Key::EraInfo(era_id), rem)) + } + KeyTag::Balance => { + let (uref_addr, rem) = URefAddr::from_bytes(remainder)?; + Ok((Key::Balance(uref_addr), rem)) + } + KeyTag::Bid => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Bid(account_hash), rem)) + } + KeyTag::Withdraw => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Withdraw(account_hash), rem)) + } + KeyTag::Dictionary => { + let (addr, rem) = DictionaryAddr::from_bytes(remainder)?; + Ok((Key::Dictionary(addr), rem)) + } + KeyTag::SystemContractRegistry => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::SystemContractRegistry, rem)) + } + KeyTag::EraSummary => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::EraSummary, rem)) + } + KeyTag::Unbond => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Unbond(account_hash), rem)) + } + KeyTag::ChainspecRegistry => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::ChainspecRegistry, rem)) + } + KeyTag::ChecksumRegistry => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::ChecksumRegistry, rem)) + } + KeyTag::BidAddr => { + let (bid_addr, rem) = BidAddr::from_bytes(remainder)?; + Ok((Key::BidAddr(bid_addr), rem)) + } + KeyTag::Package => { + let (package_addr, rem) = PackageAddr::from_bytes(remainder)?; + Ok((Key::Package(package_addr), rem)) + } + KeyTag::AddressableEntity => { + let (package_kind_tag, rem) = PackageKindTag::from_bytes(remainder)?; + let (entity_addr, rem) = EntityAddr::from_bytes(rem)?; + Ok((Key::AddressableEntity(package_kind_tag, entity_addr), rem)) + } + KeyTag::ByteCode => { + let (byte_code_kind, rem) = ByteCodeKind::from_bytes(remainder)?; + let (byte_code_addr, rem) = ByteCodeAddr::from_bytes(rem)?; + Ok((Key::ByteCode(byte_code_kind, byte_code_addr), rem)) + } + KeyTag::Message => { + let (message_addr, rem) = MessageAddr::from_bytes(remainder)?; + Ok((Key::Message(message_addr), rem)) + } + } + } +} + +#[allow(dead_code)] +fn please_add_to_distribution_impl(key: Key) { + // If you've been forced to come here, you likely need to add your variant to the + // `Distribution` impl for `Key`. + match key { + Key::Account(_) => unimplemented!(), + Key::Hash(_) => unimplemented!(), + Key::URef(_) => unimplemented!(), + Key::Transfer(_) => unimplemented!(), + Key::DeployInfo(_) => unimplemented!(), + Key::EraInfo(_) => unimplemented!(), + Key::Balance(_) => unimplemented!(), + Key::Bid(_) => unimplemented!(), + Key::Withdraw(_) => unimplemented!(), + Key::Dictionary(_) => unimplemented!(), + Key::SystemContractRegistry => unimplemented!(), + Key::EraSummary => unimplemented!(), + Key::Unbond(_) => unimplemented!(), + Key::ChainspecRegistry => unimplemented!(), + Key::ChecksumRegistry => unimplemented!(), + Key::BidAddr(_) => unimplemented!(), + Key::Package(_) => unimplemented!(), + Key::AddressableEntity(..) => unimplemented!(), + Key::ByteCode(..) => unimplemented!(), + Key::Message(_) => unimplemented!(), + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Key { + match rng.gen_range(0..=18) { + 0 => Key::Account(rng.gen()), + 1 => Key::Hash(rng.gen()), + 2 => Key::URef(rng.gen()), + 3 => Key::Transfer(rng.gen()), + 4 => Key::DeployInfo(DeployHash::from_raw(rng.gen())), + 5 => Key::EraInfo(EraId::new(rng.gen())), + 6 => Key::Balance(rng.gen()), + 7 => Key::Bid(rng.gen()), + 8 => Key::Withdraw(rng.gen()), + 9 => Key::Dictionary(rng.gen()), + 10 => Key::SystemContractRegistry, + 11 => Key::EraSummary, + 12 => Key::Unbond(rng.gen()), + 13 => Key::ChainspecRegistry, + 14 => Key::ChecksumRegistry, + 15 => Key::BidAddr(rng.gen()), + 16 => Key::Package(rng.gen()), + 17 => Key::AddressableEntity(rng.gen(), rng.gen()), + 18 => Key::ByteCode(rng.gen(), rng.gen()), + 19 => Key::Message(rng.gen()), + _ => unreachable!(), + } + } +} + +mod serde_helpers { + use super::*; + + #[derive(Serialize)] + pub(super) enum BinarySerHelper<'a> { + Account(&'a AccountHash), + Hash(&'a HashAddr), + URef(&'a URef), + Transfer(&'a TransferAddr), + #[serde(with = "crate::serde_helpers::deploy_hash_as_array")] + DeployInfo(&'a DeployHash), + EraInfo(&'a EraId), + Balance(&'a URefAddr), + Bid(&'a AccountHash), + Withdraw(&'a AccountHash), + Dictionary(&'a HashAddr), + SystemContractRegistry, + EraSummary, + Unbond(&'a AccountHash), + ChainspecRegistry, + ChecksumRegistry, + BidAddr(&'a BidAddr), + Package(&'a PackageAddr), + AddressableEntity(&'a PackageKindTag, &'a EntityAddr), + ByteCode(&'a ByteCodeKind, &'a ByteCodeAddr), + Message(&'a MessageAddr), + } + + #[derive(Deserialize)] + pub(super) enum BinaryDeserHelper { + Account(AccountHash), + Hash(HashAddr), + URef(URef), + Transfer(TransferAddr), + #[serde(with = "crate::serde_helpers::deploy_hash_as_array")] + DeployInfo(DeployHash), + EraInfo(EraId), + Balance(URefAddr), + Bid(AccountHash), + Withdraw(AccountHash), + Dictionary(DictionaryAddr), + SystemContractRegistry, + EraSummary, + Unbond(AccountHash), + ChainspecRegistry, + ChecksumRegistry, + BidAddr(BidAddr), + Package(PackageAddr), + AddressableEntity(PackageKindTag, EntityAddr), + ByteCode(ByteCodeKind, ByteCodeAddr), + Message(MessageAddr), + } + + impl<'a> From<&'a Key> for BinarySerHelper<'a> { + fn from(key: &'a Key) -> Self { + match key { + Key::Account(account_hash) => BinarySerHelper::Account(account_hash), + Key::Hash(hash_addr) => BinarySerHelper::Hash(hash_addr), + Key::URef(uref) => BinarySerHelper::URef(uref), + Key::Transfer(transfer_addr) => BinarySerHelper::Transfer(transfer_addr), + Key::DeployInfo(deploy_hash) => BinarySerHelper::DeployInfo(deploy_hash), + Key::EraInfo(era_id) => BinarySerHelper::EraInfo(era_id), + Key::Balance(uref_addr) => BinarySerHelper::Balance(uref_addr), + Key::Bid(account_hash) => BinarySerHelper::Bid(account_hash), + Key::Withdraw(account_hash) => BinarySerHelper::Withdraw(account_hash), + Key::Dictionary(addr) => BinarySerHelper::Dictionary(addr), + Key::SystemContractRegistry => BinarySerHelper::SystemContractRegistry, + Key::EraSummary => BinarySerHelper::EraSummary, + Key::Unbond(account_hash) => BinarySerHelper::Unbond(account_hash), + Key::ChainspecRegistry => BinarySerHelper::ChainspecRegistry, + Key::ChecksumRegistry => BinarySerHelper::ChecksumRegistry, + Key::BidAddr(bid_addr) => BinarySerHelper::BidAddr(bid_addr), + Key::Message(message_addr) => BinarySerHelper::Message(message_addr), + Key::Package(package_addr) => BinarySerHelper::Package(package_addr), + Key::AddressableEntity(package_kind, entity_addr) => { + BinarySerHelper::AddressableEntity(package_kind, entity_addr) + } + Key::ByteCode(byte_code_kind, byte_code_addr) => { + BinarySerHelper::ByteCode(byte_code_kind, byte_code_addr) + } + } + } + } + + impl From for Key { + fn from(helper: BinaryDeserHelper) -> Self { + match helper { + BinaryDeserHelper::Account(account_hash) => Key::Account(account_hash), + BinaryDeserHelper::Hash(hash_addr) => Key::Hash(hash_addr), + BinaryDeserHelper::URef(uref) => Key::URef(uref), + BinaryDeserHelper::Transfer(transfer_addr) => Key::Transfer(transfer_addr), + BinaryDeserHelper::DeployInfo(deploy_hash) => Key::DeployInfo(deploy_hash), + BinaryDeserHelper::EraInfo(era_id) => Key::EraInfo(era_id), + BinaryDeserHelper::Balance(uref_addr) => Key::Balance(uref_addr), + BinaryDeserHelper::Bid(account_hash) => Key::Bid(account_hash), + BinaryDeserHelper::Withdraw(account_hash) => Key::Withdraw(account_hash), + BinaryDeserHelper::Dictionary(addr) => Key::Dictionary(addr), + BinaryDeserHelper::SystemContractRegistry => Key::SystemContractRegistry, + BinaryDeserHelper::EraSummary => Key::EraSummary, + BinaryDeserHelper::Unbond(account_hash) => Key::Unbond(account_hash), + BinaryDeserHelper::ChainspecRegistry => Key::ChainspecRegistry, + BinaryDeserHelper::ChecksumRegistry => Key::ChecksumRegistry, + BinaryDeserHelper::BidAddr(bid_addr) => Key::BidAddr(bid_addr), + BinaryDeserHelper::Message(message_addr) => Key::Message(message_addr), + BinaryDeserHelper::Package(package_addr) => Key::Package(package_addr), + BinaryDeserHelper::AddressableEntity(package_kind, entity_addr) => { + Key::AddressableEntity(package_kind, entity_addr) + } + BinaryDeserHelper::ByteCode(byte_kind, byte_code_addr) => { + Key::ByteCode(byte_kind, byte_code_addr) + } + } + } + } +} + +impl Serialize for Key { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + serde_helpers::BinarySerHelper::from(self).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for Key { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_key = String::deserialize(deserializer)?; + Key::from_formatted_str(&formatted_key).map_err(SerdeError::custom) + } else { + let binary_helper = serde_helpers::BinaryDeserHelper::deserialize(deserializer)?; + Ok(Key::from(binary_helper)) + } + } +} + +#[cfg(test)] +mod tests { + use std::string::ToString; + + use super::*; + use crate::{ + account::ACCOUNT_HASH_FORMATTED_STRING_PREFIX, + bytesrepr::{Error, FromBytes}, + transfer::TRANSFER_ADDR_FORMATTED_STRING_PREFIX, + uref::UREF_FORMATTED_STRING_PREFIX, + AccessRights, URef, + }; + + const ACCOUNT_KEY: Key = Key::Account(AccountHash::new([42; 32])); + const HASH_KEY: Key = Key::Hash([42; 32]); + const UREF_KEY: Key = Key::URef(URef::new([42; 32], AccessRights::READ)); + const TRANSFER_KEY: Key = Key::Transfer(TransferAddr::new([42; 32])); + const DEPLOY_INFO_KEY: Key = Key::DeployInfo(DeployHash::from_raw([42; 32])); + const ERA_INFO_KEY: Key = Key::EraInfo(EraId::new(42)); + const BALANCE_KEY: Key = Key::Balance([42; 32]); + const BID_KEY: Key = Key::Bid(AccountHash::new([42; 32])); + const UNIFIED_BID_KEY: Key = Key::BidAddr(BidAddr::legacy([42; 32])); + const VALIDATOR_BID_KEY: Key = Key::BidAddr(BidAddr::new_validator_addr([2; 32])); + const DELEGATOR_BID_KEY: Key = Key::BidAddr(BidAddr::new_delegator_addr(([2; 32], [9; 32]))); + const WITHDRAW_KEY: Key = Key::Withdraw(AccountHash::new([42; 32])); + const DICTIONARY_KEY: Key = Key::Dictionary([42; 32]); + const SYSTEM_CONTRACT_REGISTRY_KEY: Key = Key::SystemContractRegistry; + const ERA_SUMMARY_KEY: Key = Key::EraSummary; + const UNBOND_KEY: Key = Key::Unbond(AccountHash::new([42; 32])); + const CHAINSPEC_REGISTRY_KEY: Key = Key::ChainspecRegistry; + const CHECKSUM_REGISTRY_KEY: Key = Key::ChecksumRegistry; + const PACKAGE_KEY: Key = Key::Package([42; 32]); + const ADDRESSABLE_ENTITY_SYSTEM_KEY: Key = + Key::AddressableEntity(PackageKindTag::System, [42; 32]); + const ADDRESSABLE_ENTITY_ACCOUNT_KEY: Key = + Key::AddressableEntity(PackageKindTag::Account, [42; 32]); + const ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY: Key = + Key::AddressableEntity(PackageKindTag::SmartContract, [42; 32]); + const BYTE_CODE_EMPTY_KEY: Key = Key::ByteCode(ByteCodeKind::Empty, [42; 32]); + const BYTE_CODE_V1_WASM_KEY: Key = Key::ByteCode(ByteCodeKind::V1CasperWasm, [42; 32]); + const MESSAGE_TOPIC_KEY: Key = Key::Message(MessageAddr::new_topic_addr( + AddressableEntityHash::new([42u8; 32]), + TopicNameHash::new([42; 32]), + )); + const MESSAGE_KEY: Key = Key::Message(MessageAddr::new_message_addr( + AddressableEntityHash::new([42u8; 32]), + TopicNameHash::new([2; 32]), + 15, + )); + const KEYS: &[Key] = &[ + ACCOUNT_KEY, + HASH_KEY, + UREF_KEY, + TRANSFER_KEY, + DEPLOY_INFO_KEY, + ERA_INFO_KEY, + BALANCE_KEY, + BID_KEY, + WITHDRAW_KEY, + DICTIONARY_KEY, + SYSTEM_CONTRACT_REGISTRY_KEY, + ERA_SUMMARY_KEY, + UNBOND_KEY, + CHAINSPEC_REGISTRY_KEY, + CHECKSUM_REGISTRY_KEY, + UNIFIED_BID_KEY, + VALIDATOR_BID_KEY, + DELEGATOR_BID_KEY, + PACKAGE_KEY, + ADDRESSABLE_ENTITY_SYSTEM_KEY, + ADDRESSABLE_ENTITY_ACCOUNT_KEY, + ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY, + BYTE_CODE_EMPTY_KEY, + BYTE_CODE_V1_WASM_KEY, + MESSAGE_TOPIC_KEY, + MESSAGE_KEY, + ]; + const HEX_STRING: &str = "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"; + const TOPIC_NAME_HEX_STRING: &str = + "0202020202020202020202020202020202020202020202020202020202020202"; + const MESSAGE_INDEX_HEX_STRING: &str = "f"; + const UNIFIED_HEX_STRING: &str = + "002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"; + const VALIDATOR_HEX_STRING: &str = + "010202020202020202020202020202020202020202020202020202020202020202"; + const DELEGATOR_HEX_STRING: &str = + "0202020202020202020202020202020202020202020202020202020202020202020909090909090909090909090909090909090909090909090909090909090909"; + + fn test_readable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_readable(), is_true) + } + + #[test] + fn test_is_readable() { + test_readable(AccessRights::READ, true); + test_readable(AccessRights::READ_ADD, true); + test_readable(AccessRights::READ_WRITE, true); + test_readable(AccessRights::READ_ADD_WRITE, true); + test_readable(AccessRights::ADD, false); + test_readable(AccessRights::ADD_WRITE, false); + test_readable(AccessRights::WRITE, false); + } + + fn test_writable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_writeable(), is_true) + } + + #[test] + fn test_is_writable() { + test_writable(AccessRights::WRITE, true); + test_writable(AccessRights::READ_WRITE, true); + test_writable(AccessRights::ADD_WRITE, true); + test_writable(AccessRights::READ, false); + test_writable(AccessRights::ADD, false); + test_writable(AccessRights::READ_ADD, false); + test_writable(AccessRights::READ_ADD_WRITE, true); + } + + fn test_addable(right: AccessRights, is_true: bool) { + assert_eq!(right.is_addable(), is_true) + } + + #[test] + fn test_is_addable() { + test_addable(AccessRights::ADD, true); + test_addable(AccessRights::READ_ADD, true); + test_addable(AccessRights::READ_WRITE, false); + test_addable(AccessRights::ADD_WRITE, true); + test_addable(AccessRights::READ, false); + test_addable(AccessRights::WRITE, false); + test_addable(AccessRights::READ_ADD_WRITE, true); + } + + #[test] + fn should_display_key() { + assert_eq!( + format!("{}", ACCOUNT_KEY), + format!("Key::Account({})", HEX_STRING) + ); + assert_eq!( + format!("{}", HASH_KEY), + format!("Key::Hash({})", HEX_STRING) + ); + assert_eq!( + format!("{}", UREF_KEY), + format!("Key::URef({}, READ)", HEX_STRING) + ); + assert_eq!( + format!("{}", TRANSFER_KEY), + format!("Key::Transfer({})", HEX_STRING) + ); + assert_eq!( + format!("{}", DEPLOY_INFO_KEY), + format!("Key::DeployInfo({})", HEX_STRING) + ); + assert_eq!( + format!("{}", ERA_INFO_KEY), + "Key::EraInfo(era 42)".to_string() + ); + assert_eq!( + format!("{}", BALANCE_KEY), + format!("Key::Balance({})", HEX_STRING) + ); + assert_eq!(format!("{}", BID_KEY), format!("Key::Bid({})", HEX_STRING)); + assert_eq!( + format!("{}", UNIFIED_BID_KEY), + format!("Key::BidAddr({})", UNIFIED_HEX_STRING) + ); + assert_eq!( + format!("{}", VALIDATOR_BID_KEY), + format!("Key::BidAddr({})", VALIDATOR_HEX_STRING) + ); + assert_eq!( + format!("{}", DELEGATOR_BID_KEY), + format!("Key::BidAddr({})", DELEGATOR_HEX_STRING) + ); + assert_eq!( + format!("{}", WITHDRAW_KEY), + format!("Key::Withdraw({})", HEX_STRING) + ); + assert_eq!( + format!("{}", DICTIONARY_KEY), + format!("Key::Dictionary({})", HEX_STRING) + ); + assert_eq!( + format!("{}", SYSTEM_CONTRACT_REGISTRY_KEY), + format!( + "Key::SystemContractRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + ); + assert_eq!( + format!("{}", ERA_SUMMARY_KEY), + format!("Key::EraSummary({})", base16::encode_lower(&PADDING_BYTES)) + ); + assert_eq!( + format!("{}", UNBOND_KEY), + format!("Key::Unbond({})", HEX_STRING) + ); + assert_eq!( + format!("{}", CHAINSPEC_REGISTRY_KEY), + format!( + "Key::ChainspecRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + ); + assert_eq!( + format!("{}", CHECKSUM_REGISTRY_KEY), + format!( + "Key::ChecksumRegistry({})", + base16::encode_lower(&PADDING_BYTES), + ) + ); + assert_eq!( + format!("{}", PACKAGE_KEY), + format!("Key::Package({})", HEX_STRING) + ); + assert_eq!( + format!("{}", ADDRESSABLE_ENTITY_SYSTEM_KEY), + format!("Key::AddressableEntity(system-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", ADDRESSABLE_ENTITY_ACCOUNT_KEY), + format!("Key::AddressableEntity(account-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY), + format!("Key::AddressableEntity(smart-contract-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", BYTE_CODE_EMPTY_KEY), + format!("Key::ByteCode(empty-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", BYTE_CODE_V1_WASM_KEY), + format!("Key::ByteCode(v1-casper-wasm-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", MESSAGE_TOPIC_KEY), + format!("Key::Message({}-{})", HEX_STRING, HEX_STRING) + ); + + assert_eq!( + format!("{}", MESSAGE_KEY), + format!( + "Key::Message({}-{}-{})", + HEX_STRING, TOPIC_NAME_HEX_STRING, MESSAGE_INDEX_HEX_STRING + ) + ) + } + + #[test] + fn abuse_vec_key() { + // Prefix is 2^32-1 = shouldn't allocate that much + let bytes: Vec = vec![255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let res: Result<(Vec, &[u8]), _> = FromBytes::from_bytes(&bytes); + #[cfg(target_os = "linux")] + assert_eq!(res.expect_err("should fail"), Error::OutOfMemory); + #[cfg(target_os = "macos")] + assert_eq!(res.expect_err("should fail"), Error::EarlyEndOfStream); + } + + #[test] + fn check_key_account_getters() { + let account = [42; 32]; + let account_hash = AccountHash::new(account); + let key1 = Key::Account(account_hash); + assert_eq!(key1.into_account(), Some(account_hash)); + assert!(key1.into_entity_addr().is_none()); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_key_hash_getters() { + let hash = [42; KEY_HASH_LENGTH]; + let key1 = Key::Hash(hash); + assert!(key1.into_account().is_none()); + assert_eq!(key1.into_hash_addr(), Some(hash)); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_entity_key_getters() { + let hash = [42; KEY_HASH_LENGTH]; + let key1 = Key::contract_entity_key(AddressableEntityHash::new(hash)); + assert!(key1.into_account().is_none()); + assert_eq!(key1.into_entity_addr(), Some(hash)); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_package_key_getters() { + let hash = [42; KEY_HASH_LENGTH]; + let key1 = Key::Package(hash); + assert!(key1.into_account().is_none()); + assert_eq!(key1.into_package_addr(), Some(hash)); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_key_uref_getters() { + let uref = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + let key1 = Key::URef(uref); + assert!(key1.into_account().is_none()); + assert!(key1.into_entity_addr().is_none()); + assert_eq!(key1.as_uref(), Some(&uref)); + } + + #[test] + fn key_max_serialized_length() { + let mut got_max = false; + for key in KEYS { + let expected = Key::max_serialized_length(); + let actual = key.serialized_length(); + assert!( + actual <= expected, + "key too long {} expected {} actual {}", + key, + expected, + actual + ); + if actual == Key::max_serialized_length() { + got_max = true; + } + } + assert!( + got_max, + "None of the Key variants has a serialized_length equal to \ + Key::max_serialized_length(), so Key::max_serialized_length() should be reduced" + ); + } + + #[test] + fn should_parse_legacy_bid_key_from_string() { + let account_hash = AccountHash([1; 32]); + let legacy_bid_key = Key::Bid(account_hash); + let original_string = legacy_bid_key.to_formatted_string(); + + let parsed_bid_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + if let Key::Bid(parsed_account_hash) = parsed_bid_key { + assert_eq!(parsed_account_hash, account_hash,); + assert_eq!(legacy_bid_key, parsed_bid_key); + + let translated_string = parsed_bid_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + } else { + panic!("should have account hash"); + } + } + + #[test] + fn should_parse_legacy_unified_bid_key_from_string() { + let legacy_bid_addr = BidAddr::legacy([1; 32]); + let legacy_bid_key = Key::BidAddr(legacy_bid_addr); + assert_eq!(legacy_bid_addr.tag(), BidAddrTag::Unified,); + + let original_string = legacy_bid_key.to_formatted_string(); + let parsed_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + let parsed_bid_addr = parsed_key.as_bid_addr().expect("must have bid addr"); + assert!(parsed_key.is_bid_addr_key()); + assert_eq!(parsed_bid_addr.tag(), legacy_bid_addr.tag(),); + assert_eq!(*parsed_bid_addr, legacy_bid_addr); + + let translated_string = parsed_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + assert_eq!(parsed_key.as_bid_addr(), legacy_bid_key.as_bid_addr(),); + } + + #[test] + fn should_parse_validator_bid_key_from_string() { + let validator_bid_addr = BidAddr::new_validator_addr([1; 32]); + let validator_bid_key = Key::BidAddr(validator_bid_addr); + assert_eq!(validator_bid_addr.tag(), BidAddrTag::Validator,); + + let original_string = validator_bid_key.to_formatted_string(); + let parsed_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + let parsed_bid_addr = parsed_key.as_bid_addr().expect("must have bid addr"); + assert!(parsed_key.is_bid_addr_key()); + assert_eq!(parsed_bid_addr.tag(), validator_bid_addr.tag(),); + assert_eq!(*parsed_bid_addr, validator_bid_addr,); + + let translated_string = parsed_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + assert_eq!(parsed_key.as_bid_addr(), validator_bid_key.as_bid_addr(),); + } + + #[test] + fn should_parse_delegator_bid_key_from_string() { + let delegator_bid_addr = BidAddr::new_delegator_addr(([1; 32], [9; 32])); + let delegator_bid_key = Key::BidAddr(delegator_bid_addr); + assert_eq!(delegator_bid_addr.tag(), BidAddrTag::Delegator,); + + let original_string = delegator_bid_key.to_formatted_string(); + + let parsed_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + let parsed_bid_addr = parsed_key.as_bid_addr().expect("must have bid addr"); + assert!(parsed_key.is_bid_addr_key()); + assert_eq!(parsed_bid_addr.tag(), delegator_bid_addr.tag(),); + assert_eq!(*parsed_bid_addr, delegator_bid_addr,); + + let translated_string = parsed_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + assert_eq!(parsed_key.as_bid_addr(), delegator_bid_key.as_bid_addr(),); + } + + #[test] + fn should_parse_key_from_str() { + for key in KEYS { + let string = key.to_formatted_string(); + let parsed_key = Key::from_formatted_str(&string).expect("{string} (key = {key:?})"); + assert_eq!(parsed_key, *key, "{string} (key = {key:?})"); + } + } + + #[test] + fn should_fail_to_parse_key_from_str() { + assert!( + Key::from_formatted_str(ACCOUNT_HASH_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("account-key from string error: ") + ); + assert!(Key::from_formatted_str(HASH_PREFIX) + .unwrap_err() + .to_string() + .starts_with("hash-key from string error: ")); + assert!(Key::from_formatted_str(UREF_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("uref-key from string error: ")); + assert!( + Key::from_formatted_str(TRANSFER_ADDR_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("transfer-key from string error: ") + ); + assert!(Key::from_formatted_str(DEPLOY_INFO_PREFIX) + .unwrap_err() + .to_string() + .starts_with("deploy-info-key from string error: ")); + assert!(Key::from_formatted_str(ERA_INFO_PREFIX) + .unwrap_err() + .to_string() + .starts_with("era-info-key from string error: ")); + assert!(Key::from_formatted_str(BALANCE_PREFIX) + .unwrap_err() + .to_string() + .starts_with("balance-key from string error: ")); + assert!(Key::from_formatted_str(BID_PREFIX) + .unwrap_err() + .to_string() + .starts_with("bid-key from string error: ")); + assert!(Key::from_formatted_str(WITHDRAW_PREFIX) + .unwrap_err() + .to_string() + .starts_with("withdraw-key from string error: ")); + assert!(Key::from_formatted_str(DICTIONARY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("dictionary-key from string error: ")); + assert!(Key::from_formatted_str(SYSTEM_CONTRACT_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("system-contract-registry-key from string error: ")); + assert!(Key::from_formatted_str(ERA_SUMMARY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("era-summary-key from string error")); + assert!(Key::from_formatted_str(UNBOND_PREFIX) + .unwrap_err() + .to_string() + .starts_with("unbond-key from string error: ")); + assert!(Key::from_formatted_str(CHAINSPEC_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("chainspec-registry-key from string error: ")); + assert!(Key::from_formatted_str(CHECKSUM_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("checksum-registry-key from string error: ")); + let bid_addr_err = Key::from_formatted_str(BID_ADDR_PREFIX) + .unwrap_err() + .to_string(); + assert!( + bid_addr_err.starts_with("bid-addr-key from string error: "), + "{}", + bid_addr_err + ); + assert!(Key::from_formatted_str(PACKAGE_PREFIX) + .unwrap_err() + .to_string() + .starts_with("package-key from string error: ")); + assert!( + Key::from_formatted_str(&format!("{}{}", ENTITY_PREFIX, ACCOUNT_ENTITY_PREFIX)) + .unwrap_err() + .to_string() + .starts_with("addressable-entity-key from string error: ") + ); + assert!( + Key::from_formatted_str(&format!("{}{}", BYTE_CODE_PREFIX, EMPTY_PREFIX)) + .unwrap_err() + .to_string() + .starts_with("byte-code-key from string error: ") + ); + let invalid_prefix = "a-0000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + Key::from_formatted_str(invalid_prefix) + .unwrap_err() + .to_string(), + "unknown prefix for key" + ); + + let missing_hyphen_prefix = + "hash0000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + Key::from_formatted_str(missing_hyphen_prefix) + .unwrap_err() + .to_string(), + "unknown prefix for key" + ); + + let no_prefix = "0000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + Key::from_formatted_str(no_prefix).unwrap_err().to_string(), + "unknown prefix for key" + ); + } + + #[test] + fn key_to_json() { + for key in KEYS.iter() { + assert_eq!( + serde_json::to_string(key).unwrap(), + format!("\"{}\"", key.to_formatted_string()) + ); + } + } + + #[test] + fn serialization_roundtrip_bincode() { + for key in KEYS { + let encoded = bincode::serialize(key).unwrap(); + let decoded = bincode::deserialize(&encoded).unwrap(); + assert_eq!(key, &decoded); + } + } + + #[test] + fn key_tag_bytes_roundtrip() { + for key in KEYS { + let tag: KeyTag = key.tag(); + bytesrepr::test_serialization_roundtrip(&tag); + } + } + + #[test] + fn serialization_roundtrip_json() { + let round_trip = |key: &Key| { + let encoded = serde_json::to_value(key).unwrap(); + let decoded = serde_json::from_value(encoded.clone()) + .unwrap_or_else(|_| panic!("{} {}", key, encoded)); + assert_eq!(key, &decoded); + }; + + for key in KEYS { + round_trip(key); + } + + let zeros = [0; BLAKE2B_DIGEST_LENGTH]; + let nines = [9; BLAKE2B_DIGEST_LENGTH]; + + round_trip(&Key::Account(AccountHash::new(zeros))); + round_trip(&Key::Hash(zeros)); + round_trip(&Key::URef(URef::new(zeros, AccessRights::READ))); + round_trip(&Key::Transfer(TransferAddr::new(zeros))); + round_trip(&Key::DeployInfo(DeployHash::from_raw(zeros))); + round_trip(&Key::EraInfo(EraId::from(0))); + round_trip(&Key::Balance(URef::new(zeros, AccessRights::READ).addr())); + round_trip(&Key::Bid(AccountHash::new(zeros))); + round_trip(&Key::BidAddr(BidAddr::legacy(zeros))); + round_trip(&Key::BidAddr(BidAddr::new_validator_addr(zeros))); + round_trip(&Key::BidAddr(BidAddr::new_delegator_addr((zeros, nines)))); + round_trip(&Key::Withdraw(AccountHash::new(zeros))); + round_trip(&Key::Dictionary(zeros)); + round_trip(&Key::Unbond(AccountHash::new(zeros))); + round_trip(&Key::Package(zeros)); + round_trip(&Key::AddressableEntity(PackageKindTag::System, zeros)); + round_trip(&Key::AddressableEntity(PackageKindTag::Account, zeros)); + round_trip(&Key::AddressableEntity( + PackageKindTag::SmartContract, + zeros, + )); + round_trip(&Key::ByteCode(ByteCodeKind::Empty, zeros)); + round_trip(&Key::ByteCode(ByteCodeKind::V1CasperWasm, zeros)); + round_trip(&Key::Message(MessageAddr::new_topic_addr( + zeros.into(), + nines.into(), + ))); + round_trip(&Key::Message(MessageAddr::new_message_addr( + zeros.into(), + nines.into(), + 1, + ))); + } +} diff --git a/casper_types_ver_2_0/src/lib.rs b/casper_types_ver_2_0/src/lib.rs new file mode 100644 index 00000000..20427aa3 --- /dev/null +++ b/casper_types_ver_2_0/src/lib.rs @@ -0,0 +1,215 @@ +//! Types used to allow creation of Wasm contracts and tests for use on the Casper Platform. + +#![cfg_attr( + not(any( + feature = "json-schema", + feature = "datasize", + feature = "std", + feature = "testing", + test, + )), + no_std +)] +#![doc(html_root_url = "https://docs.rs/casper-types/3.0.0")] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png" +)] +#![warn(missing_docs)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + +#[cfg_attr(not(test), macro_use)] +extern crate alloc; +extern crate core; + +mod access_rights; +pub mod account; +pub mod addressable_entity; +pub mod api_error; +mod auction_state; +pub mod binary_port; +mod block; +mod block_time; +mod byte_code; +pub mod bytesrepr; +#[cfg(any(feature = "std", test))] +mod chainspec; +pub mod checksummed_hex; +mod cl_type; +mod cl_value; +pub mod contract_messages; +mod contract_wasm; +pub mod contracts; +pub mod crypto; +mod deploy_info; +mod digest; +mod display_iter; +mod era_id; +pub mod execution; +#[cfg(any(feature = "std", test))] +pub mod file_utils; +mod gas; +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens; +mod json_pretty_printer; +mod key; +mod motes; +pub mod package; +mod peers_map; +mod phase; +mod protocol_version; +mod reactor_state; +mod semver; +pub(crate) mod serde_helpers; +mod stored_value; +pub mod system; +mod tagged; +#[cfg(any(feature = "testing", test))] +pub mod testing; +mod timestamp; +mod transaction; +mod transfer; +mod transfer_result; +mod uint; +mod uref; +mod validator_change; + +#[cfg(feature = "std")] +use libc::{c_long, sysconf, _SC_PAGESIZE}; +#[cfg(feature = "std")] +use once_cell::sync::Lazy; + +pub use crate::uint::{UIntParseError, U128, U256, U512}; + +pub use access_rights::{ + AccessRights, ContextAccessRights, GrantedAccess, ACCESS_RIGHTS_SERIALIZED_LENGTH, +}; +#[doc(inline)] +pub use addressable_entity::{ + AddressableEntity, AddressableEntityHash, EntryPoint, EntryPointAccess, EntryPointType, + EntryPoints, Parameter, +}; +#[doc(inline)] +pub use api_error::ApiError; +pub use auction_state::{AuctionState, JsonEraValidators, JsonValidatorWeights}; +#[cfg(all(feature = "std", feature = "json-schema"))] +pub use block::JsonBlockWithSignatures; +pub use block::{ + AvailableBlockRange, Block, BlockBody, BlockBodyV1, BlockBodyV2, BlockHash, BlockHashAndHeight, + BlockHeader, BlockHeaderV1, BlockHeaderV2, BlockIdentifier, BlockSignatures, + BlockSignaturesMergeError, BlockSyncStatus, BlockSynchronizerStatus, BlockV1, BlockV2, + BlockValidationError, EraEnd, EraEndV1, EraEndV2, EraReport, FinalitySignature, + FinalitySignatureId, RewardedSignatures, Rewards, SignedBlock, SignedBlockHeader, + SignedBlockHeaderValidationError, SingleBlockRewardedSignatures, +}; +#[cfg(any(feature = "testing", test))] +pub use block::{TestBlockBuilder, TestBlockV1Builder}; +pub use block_time::{BlockTime, BLOCKTIME_SERIALIZED_LENGTH}; +pub use byte_code::{ByteCode, ByteCodeHash, ByteCodeKind}; +#[cfg(any(feature = "std", test))] +pub use chainspec::{ + AccountConfig, AccountsConfig, ActivationPoint, AdministratorAccount, AuctionCosts, + BrTableCost, Chainspec, ChainspecRawBytes, ChainspecRegistry, ConsensusProtocolName, + ControlFlowCosts, CoreConfig, DelegatorConfig, DeployConfig, FeeHandling, GenesisAccount, + GenesisValidator, GlobalStateUpdate, GlobalStateUpdateConfig, GlobalStateUpdateError, + HandlePaymentCosts, HighwayConfig, HostFunction, HostFunctionCost, HostFunctionCosts, + LegacyRequiredFinality, MessageLimits, MintCosts, NetworkConfig, NextUpgrade, OpcodeCosts, + ProtocolConfig, RefundHandling, StandardPaymentCosts, StorageCosts, SystemConfig, + TransactionConfig, TransactionV1Config, UpgradeConfig, ValidatorConfig, WasmConfig, + DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +pub use chainspec::{ + DEFAULT_ADD_BID_COST, DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, + DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, + DEFAULT_CONTROL_FLOW_BR_OPCODE, DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, + DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, + DEFAULT_CONTROL_FLOW_CALL_OPCODE, DEFAULT_CONTROL_FLOW_DROP_OPCODE, + DEFAULT_CONTROL_FLOW_ELSE_OPCODE, DEFAULT_CONTROL_FLOW_END_OPCODE, + DEFAULT_CONTROL_FLOW_IF_OPCODE, DEFAULT_CONTROL_FLOW_LOOP_OPCODE, + DEFAULT_CONTROL_FLOW_RETURN_OPCODE, DEFAULT_CONTROL_FLOW_SELECT_OPCODE, + DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, DEFAULT_DELEGATE_COST, DEFAULT_DIV_COST, + DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, DEFAULT_INTEGER_COMPARISON_COST, + DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MAX_STACK_HEIGHT, + DEFAULT_MIN_TRANSFER_MOTES, DEFAULT_MUL_COST, DEFAULT_NEW_DICTIONARY_COST, DEFAULT_NOP_COST, + DEFAULT_STORE_COST, DEFAULT_TRANSFER_COST, DEFAULT_UNREACHABLE_COST, + DEFAULT_WASMLESS_TRANSFER_COST, DEFAULT_WASM_MAX_MEMORY, +}; +pub use cl_type::{named_key_type, CLType, CLTyped}; +pub use cl_value::{CLTypeMismatch, CLValue, CLValueError}; +pub use contract_wasm::ContractWasm; +#[doc(inline)] +pub use contracts::Contract; +pub use crypto::*; +pub use deploy_info::DeployInfo; +pub use digest::{ + ChunkWithProof, ChunkWithProofVerificationError, Digest, DigestError, IndexedMerkleProof, + MerkleConstructionError, MerkleVerificationError, +}; +pub use display_iter::DisplayIter; +pub use era_id::EraId; +pub use gas::Gas; +pub use json_pretty_printer::json_pretty_print; +#[doc(inline)] +pub use key::{ + ByteCodeAddr, DictionaryAddr, EntityAddr, FromStrError as KeyFromStrError, HashAddr, Key, + KeyTag, PackageAddr, BLAKE2B_DIGEST_LENGTH, DICTIONARY_ITEM_KEY_MAX_LENGTH, + KEY_DICTIONARY_LENGTH, KEY_HASH_LENGTH, +}; +pub use motes::Motes; +#[doc(inline)] +pub use package::{ + EntityVersion, EntityVersionKey, EntityVersions, Group, Groups, Package, PackageHash, +}; +pub use peers_map::{PeerEntry, Peers}; +pub use phase::{Phase, PHASE_SERIALIZED_LENGTH}; +pub use protocol_version::{ProtocolVersion, VersionCheckResult}; +pub use reactor_state::ReactorState; +pub use semver::{ParseSemVerError, SemVer, SEM_VER_SERIALIZED_LENGTH}; +pub use stored_value::{ + GlobalStateIdentifier, StoredValue, TypeMismatch as StoredValueTypeMismatch, +}; +pub use tagged::Tagged; +#[cfg(any(feature = "std", test))] +pub use timestamp::serde_option_time_diff; +pub use timestamp::{TimeDiff, Timestamp}; +pub use transaction::{ + AddressableEntityIdentifier, Deploy, DeployApproval, DeployApprovalsHash, DeployConfigFailure, + DeployDecodeFromJsonError, DeployError, DeployExcessiveSizeError, DeployFootprint, DeployHash, + DeployHeader, DeployId, ExecutableDeployItem, ExecutableDeployItemIdentifier, ExecutionInfo, + FinalizedApprovals, FinalizedDeployApprovals, FinalizedTransactionV1Approvals, InitiatorAddr, + NamedArg, PackageIdentifier, PricingMode, RuntimeArgs, Transaction, TransactionApprovalsHash, + TransactionEntryPoint, TransactionHash, TransactionHeader, TransactionId, + TransactionInvocationTarget, TransactionRuntime, TransactionScheduling, TransactionSessionKind, + TransactionTarget, TransactionV1, TransactionV1Approval, TransactionV1ApprovalsHash, + TransactionV1Body, TransactionV1ConfigFailure, TransactionV1DecodeFromJsonError, + TransactionV1Error, TransactionV1ExcessiveSizeError, TransactionV1Hash, TransactionV1Header, + TransferTarget, +}; +#[cfg(any(feature = "std", test))] +pub use transaction::{ + DeployBuilder, DeployBuilderError, TransactionV1Builder, TransactionV1BuilderError, +}; +pub use transfer::{ + FromStrError as TransferFromStrError, Transfer, TransferAddr, TRANSFER_ADDR_LENGTH, +}; +pub use transfer_result::{TransferResult, TransferredTo}; +pub use uref::{ + FromStrError as URefFromStrError, URef, URefAddr, UREF_ADDR_LENGTH, UREF_SERIALIZED_LENGTH, +}; +pub use validator_change::ValidatorChange; + +/// OS page size. +#[cfg(feature = "std")] +pub static OS_PAGE_SIZE: Lazy = Lazy::new(|| { + /// Sensible default for many if not all systems. + const DEFAULT_PAGE_SIZE: usize = 4096; + + // https://www.gnu.org/software/libc/manual/html_node/Sysconf.html + let value: c_long = unsafe { sysconf(_SC_PAGESIZE) }; + if value <= 0 { + DEFAULT_PAGE_SIZE + } else { + value as usize + } +}); diff --git a/casper_types_ver_2_0/src/motes.rs b/casper_types_ver_2_0/src/motes.rs new file mode 100644 index 00000000..8008a81c --- /dev/null +++ b/casper_types_ver_2_0/src/motes.rs @@ -0,0 +1,248 @@ +//! The `motes` module is used for working with Motes. + +use alloc::vec::Vec; +use core::{ + fmt, + iter::Sum, + ops::{Add, Div, Mul, Sub}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::Zero; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Gas, U512, +}; + +/// A struct representing a number of `Motes`. +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Motes(U512); + +impl Motes { + /// Constructs a new `Motes`. + pub fn new(value: U512) -> Motes { + Motes(value) + } + + /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(&self, rhs: Self) -> Option { + self.0.checked_add(rhs.value()).map(Self::new) + } + + /// Checked integer subtraction. Computes `self - rhs`, returning `None` if underflow occurred. + pub fn checked_sub(&self, rhs: Self) -> Option { + self.0.checked_sub(rhs.value()).map(Self::new) + } + + /// Returns the inner `U512` value. + pub fn value(&self) -> U512 { + self.0 + } + + /// Converts the given `gas` to `Motes` by multiplying them by `conv_rate`. + /// + /// Returns `None` if an arithmetic overflow occurred. + pub fn from_gas(gas: Gas, conv_rate: u64) -> Option { + gas.value() + .checked_mul(U512::from(conv_rate)) + .map(Self::new) + } +} + +impl fmt::Display for Motes { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl Add for Motes { + type Output = Motes; + + fn add(self, rhs: Self) -> Self::Output { + let val = self.value() + rhs.value(); + Motes::new(val) + } +} + +impl Sub for Motes { + type Output = Motes; + + fn sub(self, rhs: Self) -> Self::Output { + let val = self.value() - rhs.value(); + Motes::new(val) + } +} + +impl Div for Motes { + type Output = Motes; + + fn div(self, rhs: Self) -> Self::Output { + let val = self.value() / rhs.value(); + Motes::new(val) + } +} + +impl Mul for Motes { + type Output = Motes; + + fn mul(self, rhs: Self) -> Self::Output { + let val = self.value() * rhs.value(); + Motes::new(val) + } +} + +impl Zero for Motes { + fn zero() -> Self { + Motes::new(U512::zero()) + } + + fn is_zero(&self) -> bool { + self.0.is_zero() + } +} + +impl Sum for Motes { + fn sum>(iter: I) -> Self { + iter.fold(Motes::zero(), Add::add) + } +} + +impl ToBytes for Motes { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Motes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, remainder) = FromBytes::from_bytes(bytes)?; + Ok((Motes::new(value), remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use crate::{Gas, Motes}; + + #[test] + fn should_be_able_to_get_instance_of_motes() { + let initial_value = 1; + let motes = Motes::new(U512::from(initial_value)); + assert_eq!( + initial_value, + motes.value().as_u64(), + "should have equal value" + ) + } + + #[test] + fn should_be_able_to_compare_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1)); + let right_motes = Motes::new(U512::from(1)); + assert_eq!(left_motes, right_motes, "should be equal"); + let right_motes = Motes::new(U512::from(2)); + assert_ne!(left_motes, right_motes, "should not be equal") + } + + #[test] + fn should_be_able_to_add_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1)); + let right_motes = Motes::new(U512::from(1)); + let expected_motes = Motes::new(U512::from(2)); + assert_eq!( + (left_motes + right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_subtract_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1)); + let right_motes = Motes::new(U512::from(1)); + let expected_motes = Motes::new(U512::from(0)); + assert_eq!( + (left_motes - right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_multiply_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(100)); + let right_motes = Motes::new(U512::from(10)); + let expected_motes = Motes::new(U512::from(1000)); + assert_eq!( + (left_motes * right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_divide_two_instances_of_motes() { + let left_motes = Motes::new(U512::from(1000)); + let right_motes = Motes::new(U512::from(100)); + let expected_motes = Motes::new(U512::from(10)); + assert_eq!( + (left_motes / right_motes), + expected_motes, + "should be equal" + ) + } + + #[test] + fn should_be_able_to_convert_from_motes() { + let gas = Gas::new(U512::from(100)); + let motes = Motes::from_gas(gas, 10).expect("should have value"); + let expected_motes = Motes::new(U512::from(1000)); + assert_eq!(motes, expected_motes, "should be equal") + } + + #[test] + fn should_be_able_to_default() { + let motes = Motes::default(); + let expected_motes = Motes::new(U512::from(0)); + assert_eq!(motes, expected_motes, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let left_motes = Motes::new(U512::from(100)); + let right_motes = Motes::new(U512::from(10)); + assert!(left_motes > right_motes, "should be gt"); + let right_motes = Motes::new(U512::from(100)); + assert!(left_motes >= right_motes, "should be gte"); + assert!(left_motes <= right_motes, "should be lte"); + let left_motes = Motes::new(U512::from(10)); + assert!(left_motes < right_motes, "should be lt"); + } + + #[test] + fn should_default() { + let left_motes = Motes::new(U512::from(0)); + let right_motes = Motes::default(); + assert_eq!(left_motes, right_motes, "should be equal"); + let u512 = U512::zero(); + assert_eq!(left_motes.value(), u512, "should be equal"); + } + + #[test] + fn should_support_checked_mul_from_gas() { + let gas = Gas::new(U512::MAX); + let conv_rate = 10; + let maybe = Motes::from_gas(gas, conv_rate); + assert!(maybe.is_none(), "should be none due to overflow"); + } +} diff --git a/casper_types_ver_2_0/src/package.rs b/casper_types_ver_2_0/src/package.rs new file mode 100644 index 00000000..72ac1ce4 --- /dev/null +++ b/casper_types_ver_2_0/src/package.rs @@ -0,0 +1,1567 @@ +//! Module containing the Package and associated types for addressable entities. + +use alloc::{ + collections::{BTreeMap, BTreeSet}, + format, + string::String, + vec::Vec, +}; +use core::{ + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + account::AccountHash, + addressable_entity::{AssociatedKeys, Error, FromStrError, Weight}, + bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH, U8_SERIALIZED_LENGTH}, + checksummed_hex, + crypto::{self, PublicKey}, + system::SystemEntityType, + uref::URef, + AddressableEntityHash, CLType, CLTyped, HashAddr, Key, Tagged, BLAKE2B_DIGEST_LENGTH, + KEY_HASH_LENGTH, +}; + +/// Maximum number of distinct user groups. +pub const MAX_GROUPS: u8 = 10; +/// Maximum number of URefs which can be assigned across all user groups. +pub const MAX_TOTAL_UREFS: usize = 100; + +/// The tag for Contract Packages associated with Wasm stored on chain. +pub const PACKAGE_KIND_WASM_TAG: u8 = 0; +/// The tag for Contract Package associated with a native contract implementation. +pub const PACKAGE_KIND_SYSTEM_CONTRACT_TAG: u8 = 1; +/// The tag for Contract Package associated with an Account hash. +pub const PACKAGE_KIND_ACCOUNT_TAG: u8 = 2; +/// The tag for Contract Packages associated with legacy packages. +pub const PACKAGE_KIND_LEGACY_TAG: u8 = 3; + +const PACKAGE_STRING_PREFIX: &str = "contract-package-"; +// We need to support the legacy prefix of "contract-package-wasm". +const PACKAGE_STRING_LEGACY_EXTRA_PREFIX: &str = "wasm"; + +/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`. +#[derive(Debug)] +pub struct TryFromSliceForPackageHashError(()); + +impl Display for TryFromSliceForPackageHashError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "failed to retrieve from slice") + } +} + +/// A (labelled) "user group". Each method of a versioned contract may be +/// associated with one or more user groups which are allowed to call it. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Group(String); + +impl Group { + /// Basic constructor + pub fn new>(s: T) -> Self { + Group(s.into()) + } + + /// Retrieves underlying name. + pub fn value(&self) -> &str { + &self.0 + } +} + +impl From for String { + fn from(group: Group) -> Self { + group.0 + } +} + +impl ToBytes for Group { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.value().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Group { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + String::from_bytes(bytes).map(|(label, bytes)| (Group(label), bytes)) + } +} + +/// Automatically incremented value for a contract version within a major `ProtocolVersion`. +pub type EntityVersion = u32; + +/// Within each discrete major `ProtocolVersion`, entity version resets to this value. +pub const ENTITY_INITIAL_VERSION: EntityVersion = 1; + +/// Major element of `ProtocolVersion` a `EntityVersion` is compatible with. +pub type ProtocolVersionMajor = u32; + +/// Major element of `ProtocolVersion` combined with `EntityVersion`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EntityVersionKey { + /// Major element of `ProtocolVersion` a `ContractVersion` is compatible with. + protocol_version_major: ProtocolVersionMajor, + /// Automatically incremented value for a contract version within a major `ProtocolVersion`. + entity_version: EntityVersion, +} + +impl EntityVersionKey { + /// Returns a new instance of ContractVersionKey with provided values. + pub fn new( + protocol_version_major: ProtocolVersionMajor, + entity_version: EntityVersion, + ) -> Self { + Self { + protocol_version_major, + entity_version, + } + } + + /// Returns the major element of the protocol version this contract is compatible with. + pub fn protocol_version_major(self) -> ProtocolVersionMajor { + self.protocol_version_major + } + + /// Returns the contract version within the protocol major version. + pub fn entity_version(self) -> EntityVersion { + self.entity_version + } +} + +impl From for (ProtocolVersionMajor, EntityVersion) { + fn from(entity_version_key: EntityVersionKey) -> Self { + ( + entity_version_key.protocol_version_major, + entity_version_key.entity_version, + ) + } +} + +impl ToBytes for EntityVersionKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + ENTITY_VERSION_KEY_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.protocol_version_major.write_bytes(writer)?; + self.entity_version.write_bytes(writer) + } +} + +impl FromBytes for EntityVersionKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_version_major, remainder) = ProtocolVersionMajor::from_bytes(bytes)?; + let (entity_version, remainder) = EntityVersion::from_bytes(remainder)?; + Ok(( + EntityVersionKey { + protocol_version_major, + entity_version, + }, + remainder, + )) + } +} + +impl Display for EntityVersionKey { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}.{}", self.protocol_version_major, self.entity_version) + } +} + +/// Serialized length of `EntityVersionKey`. +pub const ENTITY_VERSION_KEY_SERIALIZED_LENGTH: usize = + U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH; + +/// Collection of entity versions. +#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(transparent, deny_unknown_fields)] +pub struct EntityVersions( + #[serde( + with = "BTreeMapToArray::" + )] + BTreeMap, +); + +impl EntityVersions { + /// Constructs a new, empty `EntityVersions`. + pub const fn new() -> Self { + EntityVersions(BTreeMap::new()) + } + + /// Returns an iterator over the `AddressableEntityHash`s (i.e. the map's values). + pub fn contract_hashes(&self) -> impl Iterator { + self.0.values() + } + + /// Returns the `AddressableEntityHash` under the key + pub fn get(&self, key: &EntityVersionKey) -> Option<&AddressableEntityHash> { + self.0.get(key) + } + + /// Retrieve the first entity version key if it exists + pub fn maybe_first(&mut self) -> Option<(EntityVersionKey, AddressableEntityHash)> { + if let Some((entity_version_key, entity_hash)) = self.0.iter().next() { + Some((*entity_version_key, *entity_hash)) + } else { + None + } + } +} + +impl ToBytes for EntityVersions { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for EntityVersions { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (versions, remainder) = + BTreeMap::::from_bytes(bytes)?; + Ok((EntityVersions(versions), remainder)) + } +} + +impl From> for EntityVersions { + fn from(value: BTreeMap) -> Self { + EntityVersions(value) + } +} + +struct EntityVersionLabels; + +impl KeyValueLabels for EntityVersionLabels { + const KEY: &'static str = "entity_version_key"; + const VALUE: &'static str = "addressable_entity_hash"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for EntityVersionLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("EntityVersionAndHash"); +} +/// Collection of named groups. +#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(transparent, deny_unknown_fields)] +pub struct Groups( + #[serde(with = "BTreeMapToArray::, GroupLabels>")] + BTreeMap>, +); + +impl Groups { + /// Constructs a new, empty `Groups`. + pub const fn new() -> Self { + Groups(BTreeMap::new()) + } + + /// Inserts a named group. + /// + /// If the map did not have this name present, `None` is returned. If the map did have this + /// name present, its collection of `URef`s is overwritten, and the collection is returned. + pub fn insert(&mut self, name: Group, urefs: BTreeSet) -> Option> { + self.0.insert(name, urefs) + } + + /// Returns `true` if the named group exists in the collection. + pub fn contains(&self, name: &Group) -> bool { + self.0.contains_key(name) + } + + /// Returns a reference to the collection of `URef`s under the given `name` if any. + pub fn get(&self, name: &Group) -> Option<&BTreeSet> { + self.0.get(name) + } + + /// Returns a mutable reference to the collection of `URef`s under the given `name` if any. + pub fn get_mut(&mut self, name: &Group) -> Option<&mut BTreeSet> { + self.0.get_mut(name) + } + + /// Returns the number of named groups. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if there are no named groups. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns an iterator over the `Key`s (i.e. the map's values). + pub fn keys(&self) -> impl Iterator> { + self.0.values() + } + + /// Returns the total number of `URef`s contained in all the groups. + pub fn total_urefs(&self) -> usize { + self.0.values().map(|urefs| urefs.len()).sum() + } +} + +impl ToBytes for Groups { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for Groups { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (groups, remainder) = BTreeMap::>::from_bytes(bytes)?; + Ok((Groups(groups), remainder)) + } +} + +struct GroupLabels; + +impl KeyValueLabels for GroupLabels { + const KEY: &'static str = "group_name"; + const VALUE: &'static str = "group_users"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for GroupLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("NamedUserGroup"); +} + +#[cfg(any(feature = "testing", feature = "gens", test))] +impl From>> for Groups { + fn from(value: BTreeMap>) -> Self { + Groups(value) + } +} + +/// A newtype wrapping a `HashAddr` which references a [`Package`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The hex-encoded address of the Package.") +)] +pub struct PackageHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] HashAddr, +); + +impl PackageHash { + /// Constructs a new `PackageHash` from the raw bytes of the package hash. + pub const fn new(value: HashAddr) -> PackageHash { + PackageHash(value) + } + + /// Returns the raw bytes of the entity hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the entity hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `PackageHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", PACKAGE_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `PackageHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(PACKAGE_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let hex_addr = remainder + .strip_prefix(PACKAGE_STRING_LEGACY_EXTRA_PREFIX) + .unwrap_or(remainder); + + let bytes = HashAddr::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?; + Ok(PackageHash(bytes)) + } + + /// Parses a `PublicKey` and outputs the corresponding account hash. + pub fn from_public_key( + public_key: &PublicKey, + blake2b_hash_fn: impl Fn(Vec) -> [u8; BLAKE2B_DIGEST_LENGTH], + ) -> Self { + const SYSTEM_LOWERCASE: &str = "system"; + const ED25519_LOWERCASE: &str = "ed25519"; + const SECP256K1_LOWERCASE: &str = "secp256k1"; + + let algorithm_name = match public_key { + PublicKey::System => SYSTEM_LOWERCASE, + PublicKey::Ed25519(_) => ED25519_LOWERCASE, + PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE, + }; + let public_key_bytes: Vec = public_key.into(); + + // Prepare preimage based on the public key parameters. + let preimage = { + let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1); + data.extend(algorithm_name.as_bytes()); + data.push(0); + data.extend(public_key_bytes); + data + }; + // Hash the preimage data using blake2b256 and return it. + let digest = blake2b_hash_fn(preimage); + Self::new(digest) + } +} + +impl Display for PackageHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for PackageHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "PackageHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for PackageHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for PackageHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for PackageHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((PackageHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for PackageHash { + fn from(bytes: [u8; 32]) -> Self { + PackageHash(bytes) + } +} + +impl Serialize for PackageHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for PackageHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + PackageHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(PackageHash(bytes)) + } + } +} + +impl AsRef<[u8]> for PackageHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for PackageHash { + type Error = TryFromSliceForPackageHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(PackageHash::new) + .map_err(|_| TryFromSliceForPackageHashError(())) + } +} + +impl TryFrom<&Vec> for PackageHash { + type Error = TryFromSliceForPackageHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(PackageHash::new) + .map_err(|_| TryFromSliceForPackageHashError(())) + } +} + +impl From<&PublicKey> for PackageHash { + fn from(public_key: &PublicKey) -> Self { + PackageHash::from_public_key(public_key, crypto::blake2b) + } +} + +/// A enum to determine the lock status of the package. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum PackageStatus { + /// The package is locked and cannot be versioned. + Locked, + /// The package is unlocked and can be versioned. + Unlocked, +} + +impl PackageStatus { + /// Create a new status flag based on a boolean value + pub fn new(is_locked: bool) -> Self { + if is_locked { + PackageStatus::Locked + } else { + PackageStatus::Unlocked + } + } +} + +impl Default for PackageStatus { + fn default() -> Self { + Self::Unlocked + } +} + +impl ToBytes for PackageStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + match self { + PackageStatus::Unlocked => result.append(&mut false.to_bytes()?), + PackageStatus::Locked => result.append(&mut true.to_bytes()?), + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + PackageStatus::Unlocked => false.serialized_length(), + PackageStatus::Locked => true.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PackageStatus::Locked => writer.push(u8::from(true)), + PackageStatus::Unlocked => writer.push(u8::from(false)), + } + Ok(()) + } +} + +impl FromBytes for PackageStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (val, bytes) = bool::from_bytes(bytes)?; + let status = PackageStatus::new(val); + Ok((status, bytes)) + } +} + +#[allow(missing_docs)] +#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[repr(u8)] +pub enum PackageKindTag { + System = 0, + Account = 1, + SmartContract = 2, +} + +impl ToBytes for PackageKindTag { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + (*self as u8).write_bytes(writer) + } +} + +impl FromBytes for PackageKindTag { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (package_kind_tag, remainder) = u8::from_bytes(bytes)?; + match package_kind_tag { + package_kind_tag if package_kind_tag == PackageKindTag::System as u8 => { + Ok((PackageKindTag::System, remainder)) + } + package_kind_tag if package_kind_tag == PackageKindTag::Account as u8 => { + Ok((PackageKindTag::Account, remainder)) + } + package_kind_tag if package_kind_tag == PackageKindTag::SmartContract as u8 => { + Ok((PackageKindTag::SmartContract, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Display for PackageKindTag { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + PackageKindTag::System => { + write!(f, "system") + } + PackageKindTag::Account => { + write!(f, "account") + } + PackageKindTag::SmartContract => { + write!(f, "smart-contract") + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> PackageKindTag { + match rng.gen_range(0..=1) { + 0 => PackageKindTag::System, + 1 => PackageKindTag::Account, + 2 => PackageKindTag::SmartContract, + _ => unreachable!(), + } + } +} + +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +/// The type of Package. +pub enum PackageKind { + /// Package associated with a native contract implementation. + System(SystemEntityType), + /// Package associated with an Account hash. + Account(AccountHash), + /// Packages associated with Wasm stored on chain. + #[default] + SmartContract, +} + +impl PackageKind { + /// Returns the Account hash associated with a Package based on the package kind. + pub fn maybe_account_hash(&self) -> Option { + match self { + Self::Account(account_hash) => Some(*account_hash), + Self::SmartContract | Self::System(_) => None, + } + } + + /// Returns the associated key set based on the Account hash set in the package kind. + pub fn associated_keys(&self) -> AssociatedKeys { + match self { + Self::Account(account_hash) => AssociatedKeys::new(*account_hash, Weight::new(1)), + Self::SmartContract | Self::System(_) => AssociatedKeys::default(), + } + } + + /// Returns if the current package is either a system contract or the system entity. + pub fn is_system(&self) -> bool { + matches!(self, Self::System(_)) + } + + /// Returns if the current package is the system mint. + pub fn is_system_mint(&self) -> bool { + matches!(self, Self::System(SystemEntityType::Mint)) + } + + /// Returns if the current package is the system auction. + pub fn is_system_auction(&self) -> bool { + matches!(self, Self::System(SystemEntityType::Auction)) + } + + /// Returns if the current package is associated with the system addressable entity. + pub fn is_system_account(&self) -> bool { + match self { + Self::Account(account_hash) => { + if *account_hash == PublicKey::System.to_account_hash() { + return true; + } + false + } + _ => false, + } + } +} + +impl Tagged for PackageKind { + fn tag(&self) -> PackageKindTag { + match self { + PackageKind::System(_) => PackageKindTag::System, + PackageKind::Account(_) => PackageKindTag::Account, + PackageKind::SmartContract => PackageKindTag::SmartContract, + } + } +} + +impl Tagged for PackageKind { + fn tag(&self) -> u8 { + let package_kind_tag: PackageKindTag = self.tag(); + package_kind_tag as u8 + } +} + +impl ToBytes for PackageKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + PackageKind::SmartContract => 0, + PackageKind::System(system_entity_type) => system_entity_type.serialized_length(), + PackageKind::Account(account_hash) => account_hash.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PackageKind::SmartContract => { + writer.push(self.tag()); + Ok(()) + } + PackageKind::System(system_entity_type) => { + writer.push(self.tag()); + system_entity_type.write_bytes(writer) + } + PackageKind::Account(account_hash) => { + writer.push(self.tag()); + account_hash.write_bytes(writer) + } + } + } +} + +impl FromBytes for PackageKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == PackageKindTag::System as u8 => { + let (entity_type, remainder) = SystemEntityType::from_bytes(remainder)?; + Ok((PackageKind::System(entity_type), remainder)) + } + tag if tag == PackageKindTag::Account as u8 => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((PackageKind::Account(account_hash), remainder)) + } + tag if tag == PackageKindTag::SmartContract as u8 => { + Ok((PackageKind::SmartContract, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Display for PackageKind { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + PackageKind::System(system_entity) => { + write!(f, "PackageKind::System({})", system_entity) + } + PackageKind::Account(account_hash) => { + write!(f, "PackageKind::Account({})", account_hash) + } + PackageKind::SmartContract => { + write!(f, "PackageKind::SmartContract") + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> PackageKind { + match rng.gen_range(0..=2) { + 0 => PackageKind::System(rng.gen()), + 1 => PackageKind::Account(rng.gen()), + 2 => PackageKind::SmartContract, + _ => unreachable!(), + } + } +} + +/// Entity definition, metadata, and security container. +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Package { + /// Key used to add or disable versions. + access_key: URef, + /// All versions (enabled & disabled). + versions: EntityVersions, + /// Collection of disabled entity versions. The runtime will not permit disabled entity + /// versions to be executed. + disabled_versions: BTreeSet, + /// Mapping maintaining the set of URefs associated with each "user group". This can be used to + /// control access to methods in a particular version of the entity. A method is callable by + /// any context which "knows" any of the URefs associated with the method's user group. + groups: Groups, + /// A flag that determines whether a entity is locked + lock_status: PackageStatus, + /// The kind of package. + package_kind: PackageKind, +} + +impl CLTyped for Package { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl Package { + /// Create new `Package` (with no versions) from given access key. + pub fn new( + access_key: URef, + versions: EntityVersions, + disabled_versions: BTreeSet, + groups: Groups, + lock_status: PackageStatus, + package_kind: PackageKind, + ) -> Self { + Package { + access_key, + versions, + disabled_versions, + groups, + lock_status, + package_kind, + } + } + + /// Enable the entity version corresponding to the given hash (if it exists). + pub fn enable_version(&mut self, entity_hash: AddressableEntityHash) -> Result<(), Error> { + let entity_version_key = self + .find_entity_version_key_by_hash(&entity_hash) + .copied() + .ok_or(Error::EntityNotFound)?; + + self.disabled_versions.remove(&entity_version_key); + + Ok(()) + } + + /// Get the access key for this entity. + pub fn access_key(&self) -> URef { + self.access_key + } + + /// Get the mutable group definitions for this entity. + pub fn groups_mut(&mut self) -> &mut Groups { + &mut self.groups + } + + /// Get the group definitions for this entity. + pub fn groups(&self) -> &Groups { + &self.groups + } + + /// Adds new group to this entity. + pub fn add_group(&mut self, group: Group, urefs: BTreeSet) { + let v = self.groups.0.entry(group).or_default(); + v.extend(urefs) + } + + /// Lookup the entity hash for a given entity version (if present) + pub fn lookup_entity_hash( + &self, + entity_version_key: EntityVersionKey, + ) -> Option<&AddressableEntityHash> { + if !self.is_version_enabled(entity_version_key) { + return None; + } + self.versions.0.get(&entity_version_key) + } + + /// Checks if the given entity version exists and is available for use. + pub fn is_version_enabled(&self, entity_version_key: EntityVersionKey) -> bool { + !self.disabled_versions.contains(&entity_version_key) + && self.versions.0.contains_key(&entity_version_key) + } + + /// Returns `true` if the given entity hash exists and is enabled. + pub fn is_entity_enabled(&self, entity_hash: &AddressableEntityHash) -> bool { + match self.find_entity_version_key_by_hash(entity_hash) { + Some(version_key) => !self.disabled_versions.contains(version_key), + None => false, + } + } + + /// Insert a new entity version; the next sequential version number will be issued. + pub fn insert_entity_version( + &mut self, + protocol_version_major: ProtocolVersionMajor, + entity_hash: AddressableEntityHash, + ) -> EntityVersionKey { + let contract_version = self.next_entity_version_for(protocol_version_major); + let key = EntityVersionKey::new(protocol_version_major, contract_version); + self.versions.0.insert(key, entity_hash); + key + } + + /// Disable the entity version corresponding to the given hash (if it exists). + pub fn disable_entity_version( + &mut self, + entity_hash: AddressableEntityHash, + ) -> Result<(), Error> { + let entity_version_key = self + .versions + .0 + .iter() + .filter_map(|(k, v)| if *v == entity_hash { Some(*k) } else { None }) + .next() + .ok_or(Error::EntityNotFound)?; + + if !self.disabled_versions.contains(&entity_version_key) { + self.disabled_versions.insert(entity_version_key); + } + + Ok(()) + } + + fn find_entity_version_key_by_hash( + &self, + entity_hash: &AddressableEntityHash, + ) -> Option<&EntityVersionKey> { + self.versions + .0 + .iter() + .filter_map(|(k, v)| if v == entity_hash { Some(k) } else { None }) + .next() + } + + /// Returns reference to all of this entity's versions. + pub fn versions(&self) -> &EntityVersions { + &self.versions + } + + /// Returns all of this entity's enabled entity versions. + pub fn enabled_versions(&self) -> EntityVersions { + let mut ret = EntityVersions::new(); + for version in &self.versions.0 { + if !self.is_version_enabled(*version.0) { + continue; + } + ret.0.insert(*version.0, *version.1); + } + ret + } + + /// Returns mutable reference to all of this entity's versions (enabled and disabled). + pub fn versions_mut(&mut self) -> &mut EntityVersions { + &mut self.versions + } + + /// Consumes the object and returns all of this entity's versions (enabled and disabled). + pub fn take_versions(self) -> EntityVersions { + self.versions + } + + /// Returns all of this entity's disabled versions. + pub fn disabled_versions(&self) -> &BTreeSet { + &self.disabled_versions + } + + /// Returns mut reference to all of this entity's disabled versions. + pub fn disabled_versions_mut(&mut self) -> &mut BTreeSet { + &mut self.disabled_versions + } + + /// Removes a group from this entity (if it exists). + pub fn remove_group(&mut self, group: &Group) -> bool { + self.groups.0.remove(group).is_some() + } + + /// Gets the next available entity version for the given protocol version + fn next_entity_version_for(&self, protocol_version: ProtocolVersionMajor) -> EntityVersion { + let current_version = self + .versions + .0 + .keys() + .rev() + .find_map(|&entity_version_key| { + if entity_version_key.protocol_version_major() == protocol_version { + Some(entity_version_key.entity_version()) + } else { + None + } + }) + .unwrap_or(0); + + current_version + 1 + } + + /// Return the entity version key for the newest enabled entity version. + pub fn current_entity_version(&self) -> Option { + self.enabled_versions().0.keys().next_back().copied() + } + + /// Return the entity hash for the newest enabled entity version. + pub fn current_entity_hash(&self) -> Option { + self.enabled_versions().0.values().next_back().copied() + } + + /// Return the Key representation for the previous entity. + pub fn previous_entity_key(&self) -> Option { + if let Some(previous_entity_hash) = self.current_entity_hash() { + return Some(Key::addressable_entity_key( + self.get_package_kind().tag(), + previous_entity_hash, + )); + } + None + } + + /// Return the lock status of the entity package. + pub fn is_locked(&self) -> bool { + if self.versions.0.is_empty() { + return false; + } + + match self.lock_status { + PackageStatus::Unlocked => false, + PackageStatus::Locked => true, + } + } + + // TODO: Check the history of this. + /// Return the package status itself + pub fn get_lock_status(&self) -> PackageStatus { + self.lock_status.clone() + } + + /// Returns the kind of Package. + pub fn get_package_kind(&self) -> PackageKind { + self.package_kind + } + + /// Is the given Package associated to an Account. + pub fn is_account_kind(&self) -> bool { + matches!(self.package_kind, PackageKind::Account(_)) + } + + /// Update the entity package kind. + pub fn update_package_kind(&mut self, new_package_kind: PackageKind) { + self.package_kind = new_package_kind + } +} + +impl ToBytes for Package { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.access_key.serialized_length() + + self.versions.serialized_length() + + self.disabled_versions.serialized_length() + + self.groups.serialized_length() + + self.lock_status.serialized_length() + + self.package_kind.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.access_key().write_bytes(writer)?; + self.versions().write_bytes(writer)?; + self.disabled_versions().write_bytes(writer)?; + self.groups().write_bytes(writer)?; + self.lock_status.write_bytes(writer)?; + self.package_kind.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Package { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (access_key, bytes) = URef::from_bytes(bytes)?; + let (versions, bytes) = EntityVersions::from_bytes(bytes)?; + let (disabled_versions, bytes) = BTreeSet::::from_bytes(bytes)?; + let (groups, bytes) = Groups::from_bytes(bytes)?; + let (lock_status, bytes) = PackageStatus::from_bytes(bytes)?; + let (package_kind, bytes) = PackageKind::from_bytes(bytes)?; + let result = Package { + access_key, + versions, + disabled_versions, + groups, + lock_status, + package_kind, + }; + + Ok((result, bytes)) + } +} + +#[cfg(test)] +mod tests { + use core::iter::FromIterator; + + use super::*; + use crate::{ + AccessRights, EntityVersionKey, EntryPoint, EntryPointAccess, EntryPointType, Parameter, + ProtocolVersion, URef, + }; + use alloc::borrow::ToOwned; + + const ENTITY_HASH_V1: AddressableEntityHash = AddressableEntityHash::new([42; 32]); + const ENTITY_HASH_V2: AddressableEntityHash = AddressableEntityHash::new([84; 32]); + + fn make_package_with_two_versions() -> Package { + let mut package = Package::new( + URef::new([0; 32], AccessRights::NONE), + EntityVersions::default(), + BTreeSet::new(), + Groups::default(), + PackageStatus::default(), + PackageKind::SmartContract, + ); + + // add groups + { + let group_urefs = { + let mut ret = BTreeSet::new(); + ret.insert(URef::new([1; 32], AccessRights::READ)); + ret + }; + + package + .groups_mut() + .insert(Group::new("Group 1"), group_urefs.clone()); + + package + .groups_mut() + .insert(Group::new("Group 2"), group_urefs); + } + + // add entry_points + let _entry_points = { + let mut ret = BTreeMap::new(); + let entrypoint = EntryPoint::new( + "method0".to_string(), + vec![], + CLType::U32, + EntryPointAccess::groups(&["Group 2"]), + EntryPointType::Session, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + let entrypoint = EntryPoint::new( + "method1".to_string(), + vec![Parameter::new("Foo", CLType::U32)], + CLType::U32, + EntryPointAccess::groups(&["Group 1"]), + EntryPointType::Session, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + ret + }; + + let protocol_version = ProtocolVersion::V1_0_0; + + let v1 = package.insert_entity_version(protocol_version.value().major, ENTITY_HASH_V1); + let v2 = package.insert_entity_version(protocol_version.value().major, ENTITY_HASH_V2); + assert!(v2 > v1); + + package + } + + #[test] + fn next_entity_version() { + let major = 1; + let mut package = Package::new( + URef::new([0; 32], AccessRights::NONE), + EntityVersions::default(), + BTreeSet::default(), + Groups::default(), + PackageStatus::default(), + PackageKind::SmartContract, + ); + assert_eq!(package.next_entity_version_for(major), 1); + + let next_version = package.insert_entity_version(major, [123; 32].into()); + assert_eq!(next_version, EntityVersionKey::new(major, 1)); + assert_eq!(package.next_entity_version_for(major), 2); + let next_version_2 = package.insert_entity_version(major, [124; 32].into()); + assert_eq!(next_version_2, EntityVersionKey::new(major, 2)); + + let major = 2; + assert_eq!(package.next_entity_version_for(major), 1); + let next_version_3 = package.insert_entity_version(major, [42; 32].into()); + assert_eq!(next_version_3, EntityVersionKey::new(major, 1)); + } + + #[test] + fn roundtrip_serialization() { + let package = make_package_with_two_versions(); + let bytes = package.to_bytes().expect("should serialize"); + let (decoded_package, rem) = Package::from_bytes(&bytes).expect("should deserialize"); + assert_eq!(package, decoded_package); + assert_eq!(rem.len(), 0); + } + + #[test] + fn should_remove_group() { + let mut package = make_package_with_two_versions(); + + assert!(!package.remove_group(&Group::new("Non-existent group"))); + assert!(package.remove_group(&Group::new("Group 1"))); + assert!(!package.remove_group(&Group::new("Group 1"))); // Group no longer exists + } + + #[test] + fn should_disable_and_enable_entity_version() { + const ENTITY_HASH: AddressableEntityHash = AddressableEntityHash::new([123; 32]); + + let mut package = make_package_with_two_versions(); + + assert!( + !package.is_entity_enabled(&ENTITY_HASH), + "nonexisting entity should return false" + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 2)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + + assert_eq!( + package.versions(), + &EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2) + ])), + ); + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2) + ])), + ); + + assert!(!package.is_entity_enabled(&ENTITY_HASH)); + + assert_eq!( + package.disable_entity_version(ENTITY_HASH), + Err(Error::EntityNotFound), + "should return entity not found error" + ); + + assert!( + !package.is_entity_enabled(&ENTITY_HASH), + "disabling missing entity shouldnt change outcome" + ); + + let next_version = package.insert_entity_version(1, ENTITY_HASH); + assert!( + package.is_version_enabled(next_version), + "version should exist and be enabled" + ); + assert!(package.is_entity_enabled(&ENTITY_HASH)); + + assert!( + package.is_entity_enabled(&ENTITY_HASH), + "entity should be enabled" + ); + + assert_eq!( + package.disable_entity_version(ENTITY_HASH), + Ok(()), + "should be able to disable version" + ); + assert!(!package.is_entity_enabled(&ENTITY_HASH)); + + assert!( + !package.is_entity_enabled(&ENTITY_HASH), + "entity should be disabled" + ); + assert_eq!( + package.lookup_entity_hash(next_version), + None, + "should not return disabled entity version" + ); + assert!( + !package.is_version_enabled(next_version), + "version should not be enabled" + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 2)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + assert_eq!( + package.versions(), + &EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + (next_version, ENTITY_HASH), + ])), + ); + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + ])), + ); + assert_eq!( + package.disabled_versions(), + &BTreeSet::from_iter([next_version]), + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 2)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + + assert_eq!( + package.disable_entity_version(ENTITY_HASH_V2), + Ok(()), + "should be able to disable version 2" + ); + + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([( + EntityVersionKey::new(1, 1), + ENTITY_HASH_V1 + ),])), + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 1)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V1)); + + assert_eq!( + package.disabled_versions(), + &BTreeSet::from_iter([next_version, EntityVersionKey::new(1, 2)]), + ); + + assert_eq!(package.enable_version(ENTITY_HASH_V2), Ok(()),); + + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + ])), + ); + + assert_eq!( + package.disabled_versions(), + &BTreeSet::from_iter([next_version]) + ); + + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + + assert_eq!(package.enable_version(ENTITY_HASH), Ok(()),); + + assert_eq!( + package.enable_version(ENTITY_HASH), + Ok(()), + "enabling a entity twice should be a noop" + ); + + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + (next_version, ENTITY_HASH), + ])), + ); + + assert_eq!(package.disabled_versions(), &BTreeSet::new(),); + + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH)); + } + + #[test] + fn should_not_allow_to_enable_non_existing_version() { + let mut package = make_package_with_two_versions(); + + assert_eq!( + package.enable_version(AddressableEntityHash::default()), + Err(Error::EntityNotFound), + ); + } + + #[test] + fn package_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let package_hash = HashAddr::try_from(&bytes[..]).expect("should create package hash"); + let package_hash = PackageHash::new(package_hash); + assert_eq!(&bytes, &package_hash.as_bytes()); + } + + #[test] + fn package_hash_from_str() { + let package_hash = PackageHash::new([3; 32]); + let encoded = package_hash.to_formatted_string(); + let decoded = PackageHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(package_hash, decoded); + + let invalid_prefix = + "contract-package0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + PackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } + + #[test] + fn package_hash_from_legacy_str() { + let package_hash = PackageHash([3; 32]); + let hex_addr = package_hash.to_string(); + let legacy_encoded = format!("contract-package-wasm{}", hex_addr); + let decoded_from_legacy = PackageHash::from_formatted_str(&legacy_encoded) + .expect("should accept legacy prefixed string"); + assert_eq!( + package_hash, decoded_from_legacy, + "decoded_from_legacy should equal decoded" + ); + + let invalid_prefix = + "contract-packagewasm0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-wasm00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + PackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_contract_package(contract_pkg in gens::package_arb()) { + bytesrepr::test_serialization_roundtrip(&contract_pkg); + } + } +} diff --git a/casper_types_ver_2_0/src/peers_map.rs b/casper_types_ver_2_0/src/peers_map.rs new file mode 100644 index 00000000..c7a28334 --- /dev/null +++ b/casper_types_ver_2_0/src/peers_map.rs @@ -0,0 +1,138 @@ +use alloc::collections::BTreeMap; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use core::iter; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +/// Node peer entry. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct PeerEntry { + /// Node id. + pub node_id: String, + /// Node address. + pub address: String, +} + +impl PeerEntry { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + node_id: rng.random_string(10..20), + address: rng.random_string(10..20), + } + } +} + +impl ToBytes for PeerEntry { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.node_id.write_bytes(writer)?; + self.address.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.node_id.serialized_length() + self.address.serialized_length() + } +} + +impl FromBytes for PeerEntry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (node_id, remainder) = String::from_bytes(bytes)?; + let (address, remainder) = String::from_bytes(remainder)?; + Ok((PeerEntry { node_id, address }, remainder)) + } +} + +/// Map of peer IDs to network addresses. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Peers(Vec); + +impl Peers { + /// Retrieve collection of `PeerEntry` records. + pub fn into_inner(self) -> Vec { + self.0 + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let count = rng.gen_range(0..10); + let peers = iter::repeat(()) + .map(|_| PeerEntry::random(rng)) + .take(count) + .collect(); + Self(peers) + } +} + +impl From> for Peers { + fn from(input: BTreeMap) -> Self { + let ret = input + .into_iter() + .map(|(node_id, address)| PeerEntry { + node_id: node_id.to_string(), + address, + }) + .collect(); + Peers(ret) + } +} + +impl ToBytes for Peers { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Peers { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = Vec::::from_bytes(bytes)?; + Ok((Peers(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = Peers::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/phase.rs b/casper_types_ver_2_0/src/phase.rs new file mode 100644 index 00000000..35586889 --- /dev/null +++ b/casper_types_ver_2_0/src/phase.rs @@ -0,0 +1,56 @@ +// Can be removed once https://github.com/rust-lang/rustfmt/issues/3362 is resolved. +#[rustfmt::skip] +use alloc::vec; +use alloc::vec::Vec; + +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::{FromPrimitive, ToPrimitive}; + +use crate::{ + bytesrepr::{Error, FromBytes, ToBytes}, + CLType, CLTyped, +}; + +/// The number of bytes in a serialized [`Phase`]. +pub const PHASE_SERIALIZED_LENGTH: usize = 1; + +/// The phase in which a given contract is executing. +#[derive(Debug, PartialEq, Eq, Clone, Copy, FromPrimitive, ToPrimitive)] +#[repr(u8)] +pub enum Phase { + /// Set while committing the genesis or upgrade configurations. + System = 0, + /// Set while executing the payment code of a deploy. + Payment = 1, + /// Set while executing the session code of a deploy. + Session = 2, + /// Set while finalizing payment at the end of a deploy. + FinalizePayment = 3, +} + +impl ToBytes for Phase { + fn to_bytes(&self) -> Result, Error> { + // NOTE: Assumed safe as [`Phase`] is represented as u8. + let id = self.to_u8().expect("Phase is represented as a u8"); + + Ok(vec![id]) + } + + fn serialized_length(&self) -> usize { + PHASE_SERIALIZED_LENGTH + } +} + +impl FromBytes for Phase { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (id, rest) = u8::from_bytes(bytes)?; + let phase = FromPrimitive::from_u8(id).ok_or(Error::Formatting)?; + Ok((phase, rest)) + } +} + +impl CLTyped for Phase { + fn cl_type() -> CLType { + CLType::U8 + } +} diff --git a/casper_types_ver_2_0/src/protocol_version.rs b/casper_types_ver_2_0/src/protocol_version.rs new file mode 100644 index 00000000..fe889f1c --- /dev/null +++ b/casper_types_ver_2_0/src/protocol_version.rs @@ -0,0 +1,550 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{convert::TryFrom, fmt, str::FromStr}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + bytesrepr::{Error, FromBytes, ToBytes}, + ParseSemVerError, SemVer, +}; + +/// A newtype wrapping a [`SemVer`] which represents a Casper Platform protocol version. +#[derive(Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ProtocolVersion(SemVer); + +/// The result of [`ProtocolVersion::check_next_version`]. +#[derive(Debug, PartialEq, Eq)] +pub enum VersionCheckResult { + /// Upgrade possible. + Valid { + /// Is this a major protocol version upgrade? + is_major_version: bool, + }, + /// Upgrade is invalid. + Invalid, +} + +impl VersionCheckResult { + /// Checks if given version result is invalid. + /// + /// Invalid means that a given version can not be followed. + pub fn is_invalid(&self) -> bool { + matches!(self, VersionCheckResult::Invalid) + } + + /// Checks if given version is a major protocol version upgrade. + pub fn is_major_version(&self) -> bool { + match self { + VersionCheckResult::Valid { is_major_version } => *is_major_version, + VersionCheckResult::Invalid => false, + } + } +} + +impl ProtocolVersion { + /// Version 1.0.0. + pub const V1_0_0: ProtocolVersion = ProtocolVersion(SemVer { + major: 1, + minor: 0, + patch: 0, + }); + + /// Constructs a new `ProtocolVersion` from `version`. + pub const fn new(version: SemVer) -> ProtocolVersion { + ProtocolVersion(version) + } + + /// Constructs a new `ProtocolVersion` from the given semver parts. + pub const fn from_parts(major: u32, minor: u32, patch: u32) -> ProtocolVersion { + let sem_ver = SemVer::new(major, minor, patch); + Self::new(sem_ver) + } + + /// Returns the inner [`SemVer`]. + pub fn value(&self) -> SemVer { + self.0 + } + + /// Checks if next version can be followed. + pub fn check_next_version(&self, next: &ProtocolVersion) -> VersionCheckResult { + // Protocol major versions should increase monotonically by 1. + let major_bumped = self.0.major.saturating_add(1); + if next.0.major < self.0.major || next.0.major > major_bumped { + return VersionCheckResult::Invalid; + } + + if next.0.major == major_bumped { + return VersionCheckResult::Valid { + is_major_version: true, + }; + } + + // Covers the equal major versions + debug_assert_eq!(next.0.major, self.0.major); + + if next.0.minor < self.0.minor { + // Protocol minor versions within the same major version should not go backwards. + return VersionCheckResult::Invalid; + } + + if next.0.minor > self.0.minor { + return VersionCheckResult::Valid { + is_major_version: false, + }; + } + + // Code belows covers equal minor versions + debug_assert_eq!(next.0.minor, self.0.minor); + + // Protocol patch versions should increase monotonically but can be skipped. + if next.0.patch <= self.0.patch { + return VersionCheckResult::Invalid; + } + + VersionCheckResult::Valid { + is_major_version: false, + } + } + + /// Checks if given protocol version is compatible with current one. + /// + /// Two protocol versions with different major version are considered to be incompatible. + pub fn is_compatible_with(&self, version: &ProtocolVersion) -> bool { + self.0.major == version.0.major + } +} + +impl ToBytes for ProtocolVersion { + fn to_bytes(&self) -> Result, Error> { + self.value().to_bytes() + } + + fn serialized_length(&self) -> usize { + self.value().serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend(self.0.major.to_le_bytes()); + writer.extend(self.0.minor.to_le_bytes()); + writer.extend(self.0.patch.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for ProtocolVersion { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (version, rem) = SemVer::from_bytes(bytes)?; + let protocol_version = ProtocolVersion::new(version); + Ok((protocol_version, rem)) + } +} + +impl FromStr for ProtocolVersion { + type Err = ParseSemVerError; + + fn from_str(s: &str) -> Result { + let version = SemVer::try_from(s)?; + Ok(ProtocolVersion::new(version)) + } +} + +impl Serialize for ProtocolVersion { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + let str = format!("{}.{}.{}", self.0.major, self.0.minor, self.0.patch); + String::serialize(&str, serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ProtocolVersion { + fn deserialize>(deserializer: D) -> Result { + let semver = if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + SemVer::try_from(value_as_string.as_str()).map_err(SerdeError::custom)? + } else { + SemVer::deserialize(deserializer)? + }; + Ok(ProtocolVersion(semver)) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ProtocolVersion { + fn schema_name() -> String { + String::from("ProtocolVersion") + } + + fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("Casper Platform protocol version".to_string()); + schema_object.into() + } +} + +impl fmt::Display for ProtocolVersion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::SemVer; + + #[test] + fn should_follow_version_with_optional_code() { + let value = VersionCheckResult::Valid { + is_major_version: false, + }; + assert!(!value.is_invalid()); + assert!(!value.is_major_version()); + } + + #[test] + fn should_follow_version_with_required_code() { + let value = VersionCheckResult::Valid { + is_major_version: true, + }; + assert!(!value.is_invalid()); + assert!(value.is_major_version()); + } + + #[test] + fn should_not_follow_version_with_invalid_code() { + let value = VersionCheckResult::Invalid; + assert!(value.is_invalid()); + assert!(!value.is_major_version()); + } + + #[test] + fn should_be_able_to_get_instance() { + let initial_value = SemVer::new(1, 0, 0); + let item = ProtocolVersion::new(initial_value); + assert_eq!(initial_value, item.value(), "should have equal value") + } + + #[test] + fn should_be_able_to_compare_two_instances() { + let lhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let rhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + assert_eq!(lhs, rhs, "should be equal"); + let rhs = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert_ne!(lhs, rhs, "should not be equal") + } + + #[test] + fn should_be_able_to_default() { + let defaulted = ProtocolVersion::default(); + let expected = ProtocolVersion::new(SemVer::new(0, 0, 0)); + assert_eq!(defaulted, expected, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let lhs = ProtocolVersion::new(SemVer::new(2, 0, 0)); + let rhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + assert!(lhs > rhs, "should be gt"); + let rhs = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert!(lhs >= rhs, "should be gte"); + assert!(lhs <= rhs, "should be lte"); + let lhs = ProtocolVersion::new(SemVer::new(1, 0, 0)); + assert!(lhs < rhs, "should be lt"); + } + + #[test] + fn should_follow_major_version_upgrade() { + // If the upgrade protocol version is lower than or the same as EE's current in-use protocol + // version the upgrade is rejected and an error is returned; this includes the special case + // of a defaulted protocol version ( 0.0.0 ). + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert!( + prev.check_next_version(&next).is_major_version(), + "should be major version" + ); + } + + #[test] + fn should_reject_if_major_version_decreases() { + let prev = ProtocolVersion::new(SemVer::new(10, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(9, 0, 0)); + // Major version must not decrease ... + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_check_follows_minor_version_upgrade() { + // [major version] may remain the same in the case of a minor or patch version increase. + + // Minor version must not decrease within the same major version + let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 2, 0)); + + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + } + + #[test] + fn should_not_care_if_minor_bump_resets_patch() { + let prev = ProtocolVersion::new(SemVer::new(1, 2, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 3, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + + let prev = ProtocolVersion::new(SemVer::new(1, 20, 42)); + let next = ProtocolVersion::new(SemVer::new(1, 30, 43)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + } + + #[test] + fn should_not_care_if_major_bump_resets_minor_or_patch() { + // A major version increase resets both the minor and patch versions to ( 0.0 ). + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 1, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + let next = ProtocolVersion::new(SemVer::new(2, 0, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + let next = ProtocolVersion::new(SemVer::new(2, 1, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + } + + #[test] + fn should_reject_patch_version_rollback() { + // Patch version must not decrease or remain the same within the same major and minor + // version pair, but may skip. + let prev = ProtocolVersion::new(SemVer::new(1, 0, 42)); + let next = ProtocolVersion::new(SemVer::new(1, 0, 41)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + let next = ProtocolVersion::new(SemVer::new(1, 0, 13)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_accept_patch_version_update_with_optional_code() { + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 0, 1)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + + let prev = ProtocolVersion::new(SemVer::new(1, 0, 8)); + let next = ProtocolVersion::new(SemVer::new(1, 0, 42)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + } + + #[test] + fn should_accept_minor_version_update_with_optional_code() { + // installer is optional for minor bump + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(1, 1, 0)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + + let prev = ProtocolVersion::new(SemVer::new(3, 98, 0)); + let next = ProtocolVersion::new(SemVer::new(3, 99, 0)); + let value = prev.check_next_version(&next); + assert!(!value.is_invalid(), "should be valid"); + assert!(!value.is_major_version(), "should not be a major version"); + } + + #[test] + fn should_allow_skip_minor_version_within_major_version() { + let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); + + let next = ProtocolVersion::new(SemVer::new(1, 3, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + + let next = ProtocolVersion::new(SemVer::new(1, 7, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + } + + #[test] + fn should_allow_skip_patch_version_within_minor_version() { + let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); + + let next = ProtocolVersion::new(SemVer::new(1, 1, 2)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + } + + #[test] + fn should_allow_skipped_minor_and_patch_on_major_bump() { + // skip minor + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 1, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + // skip patch + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 0, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + // skip many minors and patches + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 3, 10)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + } + + #[test] + fn should_allow_code_on_major_update() { + // major upgrade requires installer to be present + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 0, 0)); + assert!( + prev.check_next_version(&next).is_major_version(), + "should be major version" + ); + + let prev = ProtocolVersion::new(SemVer::new(2, 99, 99)); + let next = ProtocolVersion::new(SemVer::new(3, 0, 0)); + assert!( + prev.check_next_version(&next).is_major_version(), + "should be major version" + ); + } + + #[test] + fn should_not_skip_major_version() { + // can bump only by 1 + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(3, 0, 0)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_reject_major_version_rollback() { + // can bump forward + let prev = ProtocolVersion::new(SemVer::new(2, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(0, 0, 0)); + assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + } + + #[test] + fn should_check_same_version_is_invalid() { + for ver in &[ + ProtocolVersion::from_parts(1, 0, 0), + ProtocolVersion::from_parts(1, 2, 0), + ProtocolVersion::from_parts(1, 2, 3), + ] { + assert_eq!(ver.check_next_version(ver), VersionCheckResult::Invalid); + } + } + + #[test] + fn should_not_be_compatible_with_different_major_version() { + let current = ProtocolVersion::from_parts(1, 2, 3); + let other = ProtocolVersion::from_parts(2, 5, 6); + assert!(!current.is_compatible_with(&other)); + + let current = ProtocolVersion::from_parts(1, 0, 0); + let other = ProtocolVersion::from_parts(2, 0, 0); + assert!(!current.is_compatible_with(&other)); + } + + #[test] + fn should_be_compatible_with_equal_major_version_backwards() { + let current = ProtocolVersion::from_parts(1, 99, 99); + let other = ProtocolVersion::from_parts(1, 0, 0); + assert!(current.is_compatible_with(&other)); + } + + #[test] + fn should_be_compatible_with_equal_major_version_forwards() { + let current = ProtocolVersion::from_parts(1, 0, 0); + let other = ProtocolVersion::from_parts(1, 99, 99); + assert!(current.is_compatible_with(&other)); + } + + #[test] + fn should_serialize_to_json_properly() { + let protocol_version = ProtocolVersion::from_parts(1, 1, 1); + let json = serde_json::to_string(&protocol_version).unwrap(); + let expected = "\"1.1.1\""; + assert_eq!(json, expected); + } + + #[test] + fn serialize_roundtrip() { + let protocol_version = ProtocolVersion::from_parts(1, 1, 1); + let serialized_json = serde_json::to_string(&protocol_version).unwrap(); + assert_eq!( + protocol_version, + serde_json::from_str(&serialized_json).unwrap() + ); + + let serialized_bincode = bincode::serialize(&protocol_version).unwrap(); + assert_eq!( + protocol_version, + bincode::deserialize(&serialized_bincode).unwrap() + ); + } +} diff --git a/casper_types_ver_2_0/src/reactor_state.rs b/casper_types_ver_2_0/src/reactor_state.rs new file mode 100644 index 00000000..19de98d8 --- /dev/null +++ b/casper_types_ver_2_0/src/reactor_state.rs @@ -0,0 +1,109 @@ +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use derive_more::Display; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use crate::testing::TestRng; + +/// The state of the reactor. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug, Display)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum ReactorState { + /// Get all components and reactor state set up on start. + Initialize, + /// Orient to the network and attempt to catch up to tip. + CatchUp, + /// Running commit upgrade and creating immediate switch block. + Upgrading, + /// Stay caught up with tip. + KeepUp, + /// Node is currently caught up and is an active validator. + Validate, + /// Node should be shut down for upgrade. + ShutdownForUpgrade, +} + +impl ReactorState { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..6) { + 0 => Self::Initialize, + 1 => Self::CatchUp, + 2 => Self::Upgrading, + 3 => Self::KeepUp, + 4 => Self::Validate, + 5 => Self::ShutdownForUpgrade, + _ => panic!(), + } + } +} + +const INITIALIZE_TAG: u8 = 0; +const CATCHUP_TAG: u8 = 1; +const UPGRADING_TAG: u8 = 2; +const KEEPUP_TAG: u8 = 3; +const VALIDATE_TAG: u8 = 4; +const SHUTDOWN_FOR_UPGRADE_TAG: u8 = 5; + +impl ToBytes for ReactorState { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ReactorState::Initialize => INITIALIZE_TAG, + ReactorState::CatchUp => CATCHUP_TAG, + ReactorState::Upgrading => UPGRADING_TAG, + ReactorState::KeepUp => KEEPUP_TAG, + ReactorState::Validate => VALIDATE_TAG, + ReactorState::ShutdownForUpgrade => SHUTDOWN_FOR_UPGRADE_TAG, + } + .write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for ReactorState { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let reactor_state = match tag { + INITIALIZE_TAG => ReactorState::Initialize, + CATCHUP_TAG => ReactorState::CatchUp, + UPGRADING_TAG => ReactorState::Upgrading, + KEEPUP_TAG => ReactorState::KeepUp, + VALIDATE_TAG => ReactorState::Validate, + SHUTDOWN_FOR_UPGRADE_TAG => ReactorState::ShutdownForUpgrade, + _ => return Err(bytesrepr::Error::NotRepresentable), + }; + Ok((reactor_state, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = ReactorState::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/semver.rs b/casper_types_ver_2_0/src/semver.rs new file mode 100644 index 00000000..5feafe53 --- /dev/null +++ b/casper_types_ver_2_0/src/semver.rs @@ -0,0 +1,152 @@ +use alloc::vec::Vec; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, + num::ParseIntError, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}; + +/// Length of SemVer when serialized +pub const SEM_VER_SERIALIZED_LENGTH: usize = 3 * U32_SERIALIZED_LENGTH; + +/// A struct for semantic versioning. +#[derive( + Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct SemVer { + /// Major version. + pub major: u32, + /// Minor version. + pub minor: u32, + /// Patch version. + pub patch: u32, +} + +impl SemVer { + /// Version 1.0.0. + pub const V1_0_0: SemVer = SemVer { + major: 1, + minor: 0, + patch: 0, + }; + + /// Constructs a new `SemVer` from the given semver parts. + pub const fn new(major: u32, minor: u32, patch: u32) -> SemVer { + SemVer { + major, + minor, + patch, + } + } +} + +impl ToBytes for SemVer { + fn to_bytes(&self) -> Result, Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.major.to_bytes()?); + ret.append(&mut self.minor.to_bytes()?); + ret.append(&mut self.patch.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + SEM_VER_SERIALIZED_LENGTH + } +} + +impl FromBytes for SemVer { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (major, rem): (u32, &[u8]) = FromBytes::from_bytes(bytes)?; + let (minor, rem): (u32, &[u8]) = FromBytes::from_bytes(rem)?; + let (patch, rem): (u32, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((SemVer::new(major, minor, patch), rem)) + } +} + +impl Display for SemVer { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}.{}.{}", self.major, self.minor, self.patch) + } +} + +/// Parsing error when creating a SemVer. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ParseSemVerError { + /// Invalid version format. + InvalidVersionFormat, + /// Error parsing an integer. + ParseIntError(ParseIntError), +} + +impl Display for ParseSemVerError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + ParseSemVerError::InvalidVersionFormat => formatter.write_str("invalid version format"), + ParseSemVerError::ParseIntError(error) => error.fmt(formatter), + } + } +} + +impl From for ParseSemVerError { + fn from(error: ParseIntError) -> ParseSemVerError { + ParseSemVerError::ParseIntError(error) + } +} + +impl TryFrom<&str> for SemVer { + type Error = ParseSemVerError; + fn try_from(value: &str) -> Result { + let tokens: Vec<&str> = value.split('.').collect(); + if tokens.len() != 3 { + return Err(ParseSemVerError::InvalidVersionFormat); + } + + Ok(SemVer { + major: tokens[0].parse()?, + minor: tokens[1].parse()?, + patch: tokens[2].parse()?, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use core::convert::TryInto; + + #[test] + fn should_compare_semver_versions() { + assert!(SemVer::new(0, 0, 0) < SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 1, 0) < SemVer::new(1, 2, 0)); + assert!(SemVer::new(1, 0, 0) < SemVer::new(1, 2, 0)); + assert!(SemVer::new(1, 0, 0) < SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 0) < SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 3) == SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 3) >= SemVer::new(1, 2, 3)); + assert!(SemVer::new(1, 2, 3) <= SemVer::new(1, 2, 3)); + assert!(SemVer::new(2, 0, 0) >= SemVer::new(1, 99, 99)); + assert!(SemVer::new(2, 0, 0) > SemVer::new(1, 99, 99)); + } + + #[test] + fn parse_from_string() { + let ver1: SemVer = "100.20.3".try_into().expect("should parse"); + assert_eq!(ver1, SemVer::new(100, 20, 3)); + let ver2: SemVer = "0.0.1".try_into().expect("should parse"); + assert_eq!(ver2, SemVer::new(0, 0, 1)); + + assert!(SemVer::try_from("1.a.2.3").is_err()); + assert!(SemVer::try_from("1. 2.3").is_err()); + assert!(SemVer::try_from("12345124361461.0.1").is_err()); + assert!(SemVer::try_from("1.2.3.4").is_err()); + assert!(SemVer::try_from("1.2").is_err()); + assert!(SemVer::try_from("1").is_err()); + assert!(SemVer::try_from("0").is_err()); + } +} diff --git a/casper_types_ver_2_0/src/serde_helpers.rs b/casper_types_ver_2_0/src/serde_helpers.rs new file mode 100644 index 00000000..b1e94baf --- /dev/null +++ b/casper_types_ver_2_0/src/serde_helpers.rs @@ -0,0 +1,109 @@ +use alloc::string::String; +use core::convert::TryFrom; + +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::Digest; + +pub(crate) mod raw_32_byte_array { + use super::*; + + pub(crate) fn serialize( + array: &[u8; 32], + serializer: S, + ) -> Result { + if serializer.is_human_readable() { + base16::encode_lower(array).serialize(serializer) + } else { + array.serialize(serializer) + } + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result<[u8; 32], D::Error> { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let bytes = base16::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; + <[u8; 32]>::try_from(bytes.as_ref()).map_err(SerdeError::custom) + } else { + <[u8; 32]>::deserialize(deserializer) + } + } +} + +pub(crate) mod contract_hash_as_digest { + use super::*; + use crate::AddressableEntityHash; + + pub(crate) fn serialize( + contract_hash: &AddressableEntityHash, + serializer: S, + ) -> Result { + Digest::from(contract_hash.value()).serialize(serializer) + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result { + let digest = Digest::deserialize(deserializer)?; + Ok(AddressableEntityHash::new(digest.value())) + } +} + +pub(crate) mod contract_package_hash_as_digest { + use super::*; + use crate::PackageHash; + + pub(crate) fn serialize( + contract_package_hash: &PackageHash, + serializer: S, + ) -> Result { + Digest::from(contract_package_hash.value()).serialize(serializer) + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result { + let digest = Digest::deserialize(deserializer)?; + Ok(PackageHash::new(digest.value())) + } +} + +/// This module allows `DeployHash`es to be serialized and deserialized using the underlying +/// `[u8; 32]` rather than delegating to the wrapped `Digest`, which in turn delegates to a +/// `Vec` for legacy reasons. +/// +/// This is required as the `DeployHash` defined in `casper-types` up until v4.0.0 used the array +/// form, while the `DeployHash` defined in `casper-node` during this period delegated to `Digest`. +/// +/// We use this module in places where the old `casper_types_ver_2_0::DeployHash` was held as a member of a +/// type which implements `Serialize` and/or `Deserialize`. +pub(crate) mod deploy_hash_as_array { + use super::*; + use crate::DeployHash; + + pub(crate) fn serialize( + deploy_hash: &DeployHash, + serializer: S, + ) -> Result { + if serializer.is_human_readable() { + base16::encode_lower(&deploy_hash.inner().value()).serialize(serializer) + } else { + deploy_hash.inner().value().serialize(serializer) + } + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result { + let bytes = if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let vec_bytes = base16::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; + <[u8; DeployHash::LENGTH]>::try_from(vec_bytes.as_ref()).map_err(SerdeError::custom)? + } else { + <[u8; DeployHash::LENGTH]>::deserialize(deserializer)? + }; + Ok(DeployHash::new(Digest::from(bytes))) + } +} diff --git a/casper_types_ver_2_0/src/stored_value.rs b/casper_types_ver_2_0/src/stored_value.rs new file mode 100644 index 00000000..7725fb32 --- /dev/null +++ b/casper_types_ver_2_0/src/stored_value.rs @@ -0,0 +1,899 @@ +mod global_state_identifier; +mod type_mismatch; + +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; +use core::{convert::TryFrom, fmt::Debug}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de, ser, Deserialize, Deserializer, Serialize, Serializer}; +use serde_bytes::ByteBuf; + +use crate::{ + account::Account, + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contract_messages::{MessageChecksum, MessageTopicSummary}, + contract_wasm::ContractWasm, + contracts::{Contract, ContractPackage}, + package::Package, + system::auction::{Bid, BidKind, EraInfo, UnbondingPurse, WithdrawPurse}, + AddressableEntity, ByteCode, CLValue, DeployInfo, Transfer, +}; +pub use global_state_identifier::GlobalStateIdentifier; +pub use type_mismatch::TypeMismatch; + +#[allow(clippy::large_enum_variant)] +#[repr(u8)] +enum Tag { + CLValue = 0, + Account = 1, + ContractWasm = 2, + Contract = 3, + ContractPackage = 4, + Transfer = 5, + DeployInfo = 6, + EraInfo = 7, + Bid = 8, + Withdraw = 9, + Unbonding = 10, + AddressableEntity = 11, + BidKind = 12, + Package = 13, + ByteCode = 14, + MessageTopic = 15, + Message = 16, +} + +/// A value stored in Global State. +#[allow(clippy::large_enum_variant)] +#[derive(Eq, PartialEq, Clone, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(with = "serde_helpers::BinarySerHelper") +)] +pub enum StoredValue { + /// A CLValue. + CLValue(CLValue), + /// An account. + Account(Account), + /// Contract wasm. + ContractWasm(ContractWasm), + /// A contract. + Contract(Contract), + /// A contract package. + ContractPackage(ContractPackage), + /// A `Transfer`. + Transfer(Transfer), + /// Info about a deploy. + DeployInfo(DeployInfo), + /// Info about an era. + EraInfo(EraInfo), + /// Variant that stores [`Bid`]. + Bid(Box), + /// Variant that stores withdraw information. + Withdraw(Vec), + /// Unbonding information. + Unbonding(Vec), + /// An `AddressableEntity`. + AddressableEntity(AddressableEntity), + /// Variant that stores [`BidKind`]. + BidKind(BidKind), + /// A `Package`. + Package(Package), + /// A record of byte code. + ByteCode(ByteCode), + /// Variant that stores a message topic. + MessageTopic(MessageTopicSummary), + /// Variant that stores a message digest. + Message(MessageChecksum), +} + +impl StoredValue { + /// Returns a reference to the wrapped `CLValue` if this is a `CLValue` variant. + pub fn as_cl_value(&self) -> Option<&CLValue> { + match self { + StoredValue::CLValue(cl_value) => Some(cl_value), + _ => None, + } + } + + /// Returns a reference to the wrapped `Account` if this is an `Account` variant. + pub fn as_account(&self) -> Option<&Account> { + match self { + StoredValue::Account(account) => Some(account), + _ => None, + } + } + + /// Returns a reference to the wrapped `ByteCode` if this is a `ByteCode` variant. + pub fn as_byte_code(&self) -> Option<&ByteCode> { + match self { + StoredValue::ByteCode(byte_code) => Some(byte_code), + _ => None, + } + } + + /// Returns a reference to the wrapped `Contract` if this is a `Contract` variant. + pub fn as_contract(&self) -> Option<&Contract> { + match self { + StoredValue::Contract(contract) => Some(contract), + _ => None, + } + } + + /// Returns a reference to the wrapped `Package` if this is a `Package` variant. + pub fn as_package(&self) -> Option<&Package> { + match self { + StoredValue::Package(package) => Some(package), + _ => None, + } + } + + /// Returns a reference to the wrapped `Transfer` if this is a `Transfer` variant. + pub fn as_transfer(&self) -> Option<&Transfer> { + match self { + StoredValue::Transfer(transfer) => Some(transfer), + _ => None, + } + } + + /// Returns a reference to the wrapped `DeployInfo` if this is a `DeployInfo` variant. + pub fn as_deploy_info(&self) -> Option<&DeployInfo> { + match self { + StoredValue::DeployInfo(deploy_info) => Some(deploy_info), + _ => None, + } + } + + /// Returns a reference to the wrapped `EraInfo` if this is an `EraInfo` variant. + pub fn as_era_info(&self) -> Option<&EraInfo> { + match self { + StoredValue::EraInfo(era_info) => Some(era_info), + _ => None, + } + } + + /// Returns a reference to the wrapped `Bid` if this is a `Bid` variant. + pub fn as_bid(&self) -> Option<&Bid> { + match self { + StoredValue::Bid(bid) => Some(bid), + _ => None, + } + } + + /// Returns a reference to the wrapped list of `WithdrawPurse`s if this is a `Withdraw` variant. + pub fn as_withdraw(&self) -> Option<&Vec> { + match self { + StoredValue::Withdraw(withdraw_purses) => Some(withdraw_purses), + _ => None, + } + } + + /// Returns a reference to the wrapped list of `UnbondingPurse`s if this is an `Unbonding` + /// variant. + pub fn as_unbonding(&self) -> Option<&Vec> { + match self { + StoredValue::Unbonding(unbonding_purses) => Some(unbonding_purses), + _ => None, + } + } + + /// Returns a reference to the wrapped `AddressableEntity` if this is an `AddressableEntity` + /// variant. + pub fn as_addressable_entity(&self) -> Option<&AddressableEntity> { + match self { + StoredValue::AddressableEntity(entity) => Some(entity), + _ => None, + } + } + + /// Returns a reference to the wrapped `MessageTopicSummary` if this is a `MessageTopic` + /// variant. + pub fn as_message_topic_summary(&self) -> Option<&MessageTopicSummary> { + match self { + StoredValue::MessageTopic(summary) => Some(summary), + _ => None, + } + } + + /// Returns a reference to the wrapped `MessageChecksum` if this is a `Message` + /// variant. + pub fn as_message_checksum(&self) -> Option<&MessageChecksum> { + match self { + StoredValue::Message(checksum) => Some(checksum), + _ => None, + } + } + + /// Returns a reference to the wrapped `BidKind` if this is a `BidKind` variant. + pub fn as_bid_kind(&self) -> Option<&BidKind> { + match self { + StoredValue::BidKind(bid_kind) => Some(bid_kind), + _ => None, + } + } + + /// Returns the `CLValue` if this is a `CLValue` variant. + pub fn into_cl_value(self) -> Option { + match self { + StoredValue::CLValue(cl_value) => Some(cl_value), + _ => None, + } + } + + /// Returns the `Account` if this is an `Account` variant. + pub fn into_account(self) -> Option { + match self { + StoredValue::Account(account) => Some(account), + _ => None, + } + } + + /// Returns the `ContractWasm` if this is a `ContractWasm` variant. + pub fn into_contract_wasm(self) -> Option { + match self { + StoredValue::ContractWasm(contract_wasm) => Some(contract_wasm), + _ => None, + } + } + + /// Returns the `Contract` if this is a `Contract` variant. + pub fn into_contract(self) -> Option { + match self { + StoredValue::Contract(contract) => Some(contract), + _ => None, + } + } + + /// Returns the `Package` if this is a `Package` variant. + pub fn into_contract_package(self) -> Option { + match self { + StoredValue::ContractPackage(contract_package) => Some(contract_package), + _ => None, + } + } + + /// Returns the `Transfer` if this is a `Transfer` variant. + pub fn into_transfer(self) -> Option { + match self { + StoredValue::Transfer(transfer) => Some(transfer), + _ => None, + } + } + + /// Returns the `DeployInfo` if this is a `DeployInfo` variant. + pub fn into_deploy_info(self) -> Option { + match self { + StoredValue::DeployInfo(deploy_info) => Some(deploy_info), + _ => None, + } + } + + /// Returns the `EraInfo` if this is an `EraInfo` variant. + pub fn into_era_info(self) -> Option { + match self { + StoredValue::EraInfo(era_info) => Some(era_info), + _ => None, + } + } + + /// Returns the `Bid` if this is a `Bid` variant. + pub fn into_bid(self) -> Option { + match self { + StoredValue::Bid(bid) => Some(*bid), + _ => None, + } + } + + /// Returns the list of `WithdrawPurse`s if this is a `Withdraw` variant. + pub fn into_withdraw(self) -> Option> { + match self { + StoredValue::Withdraw(withdraw_purses) => Some(withdraw_purses), + _ => None, + } + } + + /// Returns the list of `UnbondingPurse`s if this is an `Unbonding` variant. + pub fn into_unbonding(self) -> Option> { + match self { + StoredValue::Unbonding(unbonding_purses) => Some(unbonding_purses), + _ => None, + } + } + + /// Returns the `AddressableEntity` if this is an `AddressableEntity` variant. + pub fn into_addressable_entity(self) -> Option { + match self { + StoredValue::AddressableEntity(entity) => Some(entity), + _ => None, + } + } + + /// Returns the `BidKind` if this is a `BidKind` variant. + pub fn into_bid_kind(self) -> Option { + match self { + StoredValue::BidKind(bid_kind) => Some(bid_kind), + _ => None, + } + } + + /// Returns the type name of the [`StoredValue`] enum variant. + /// + /// For [`CLValue`] variants it will return the name of the [`CLType`](crate::cl_type::CLType) + pub fn type_name(&self) -> String { + match self { + StoredValue::CLValue(cl_value) => format!("{:?}", cl_value.cl_type()), + StoredValue::Account(_) => "Account".to_string(), + StoredValue::ContractWasm(_) => "ContractWasm".to_string(), + StoredValue::Contract(_) => "Contract".to_string(), + StoredValue::ContractPackage(_) => "ContractPackage".to_string(), + StoredValue::Transfer(_) => "Transfer".to_string(), + StoredValue::DeployInfo(_) => "DeployInfo".to_string(), + StoredValue::EraInfo(_) => "EraInfo".to_string(), + StoredValue::Bid(_) => "Bid".to_string(), + StoredValue::Withdraw(_) => "Withdraw".to_string(), + StoredValue::Unbonding(_) => "Unbonding".to_string(), + StoredValue::AddressableEntity(_) => "AddressableEntity".to_string(), + StoredValue::BidKind(_) => "BidKind".to_string(), + StoredValue::ByteCode(_) => "ByteCode".to_string(), + StoredValue::Package(_) => "Package".to_string(), + StoredValue::MessageTopic(_) => "MessageTopic".to_string(), + StoredValue::Message(_) => "Message".to_string(), + } + } + + fn tag(&self) -> Tag { + match self { + StoredValue::CLValue(_) => Tag::CLValue, + StoredValue::Account(_) => Tag::Account, + StoredValue::ContractWasm(_) => Tag::ContractWasm, + StoredValue::ContractPackage(_) => Tag::ContractPackage, + StoredValue::Contract(_) => Tag::Contract, + StoredValue::Transfer(_) => Tag::Transfer, + StoredValue::DeployInfo(_) => Tag::DeployInfo, + StoredValue::EraInfo(_) => Tag::EraInfo, + StoredValue::Bid(_) => Tag::Bid, + StoredValue::Withdraw(_) => Tag::Withdraw, + StoredValue::Unbonding(_) => Tag::Unbonding, + StoredValue::AddressableEntity(_) => Tag::AddressableEntity, + StoredValue::BidKind(_) => Tag::BidKind, + StoredValue::Package(_) => Tag::Package, + StoredValue::ByteCode(_) => Tag::ByteCode, + StoredValue::MessageTopic(_) => Tag::MessageTopic, + StoredValue::Message(_) => Tag::Message, + } + } +} + +impl From for StoredValue { + fn from(value: CLValue) -> StoredValue { + StoredValue::CLValue(value) + } +} +impl From for StoredValue { + fn from(value: Account) -> StoredValue { + StoredValue::Account(value) + } +} + +impl From for StoredValue { + fn from(value: ContractWasm) -> Self { + StoredValue::ContractWasm(value) + } +} + +impl From for StoredValue { + fn from(value: ContractPackage) -> Self { + StoredValue::ContractPackage(value) + } +} + +impl From for StoredValue { + fn from(value: Contract) -> Self { + StoredValue::Contract(value) + } +} + +impl From for StoredValue { + fn from(value: AddressableEntity) -> StoredValue { + StoredValue::AddressableEntity(value) + } +} +impl From for StoredValue { + fn from(value: Package) -> StoredValue { + StoredValue::Package(value) + } +} + +impl From for StoredValue { + fn from(bid: Bid) -> StoredValue { + StoredValue::Bid(Box::new(bid)) + } +} + +impl From for StoredValue { + fn from(bid_kind: BidKind) -> StoredValue { + StoredValue::BidKind(bid_kind) + } +} + +impl From for StoredValue { + fn from(value: ByteCode) -> StoredValue { + StoredValue::ByteCode(value) + } +} + +impl TryFrom for CLValue { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + let type_name = stored_value.type_name(); + match stored_value { + StoredValue::CLValue(cl_value) => Ok(cl_value), + StoredValue::Package(contract_package) => Ok(CLValue::from_t(contract_package) + .map_err(|_error| TypeMismatch::new("ContractPackage".to_string(), type_name))?), + _ => Err(TypeMismatch::new("CLValue".to_string(), type_name)), + } + } +} + +impl TryFrom for Account { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::Account(account) => Ok(account), + _ => Err(TypeMismatch::new( + "Account".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ContractWasm { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::ContractWasm(contract_wasm) => Ok(contract_wasm), + _ => Err(TypeMismatch::new( + "ContractWasm".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ByteCode { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::ByteCode(byte_code) => Ok(byte_code), + _ => Err(TypeMismatch::new( + "ByteCode".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ContractPackage { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::ContractPackage(contract_package) => Ok(contract_package), + _ => Err(TypeMismatch::new( + "ContractPackage".to_string(), + value.type_name(), + )), + } + } +} + +impl TryFrom for Contract { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::Contract(contract) => Ok(contract), + _ => Err(TypeMismatch::new( + "Contract".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for Package { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::Package(contract_package) => Ok(contract_package), + _ => Err(TypeMismatch::new( + "ContractPackage".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for AddressableEntity { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::AddressableEntity(contract) => Ok(contract), + _ => Err(TypeMismatch::new( + "AddressableEntity".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for Transfer { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::Transfer(transfer) => Ok(transfer), + _ => Err(TypeMismatch::new("Transfer".to_string(), value.type_name())), + } + } +} + +impl TryFrom for DeployInfo { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::DeployInfo(deploy_info) => Ok(deploy_info), + _ => Err(TypeMismatch::new( + "DeployInfo".to_string(), + value.type_name(), + )), + } + } +} + +impl TryFrom for EraInfo { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::EraInfo(era_info) => Ok(era_info), + _ => Err(TypeMismatch::new("EraInfo".to_string(), value.type_name())), + } + } +} + +impl TryFrom for Bid { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::Bid(bid) => Ok(*bid), + _ => Err(TypeMismatch::new("Bid".to_string(), value.type_name())), + } + } +} + +impl TryFrom for BidKind { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::BidKind(bid_kind) => Ok(bid_kind), + _ => Err(TypeMismatch::new("BidKind".to_string(), value.type_name())), + } + } +} + +impl ToBytes for StoredValue { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + StoredValue::CLValue(cl_value) => cl_value.serialized_length(), + StoredValue::Account(account) => account.serialized_length(), + StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(), + StoredValue::Contract(contract_header) => contract_header.serialized_length(), + StoredValue::ContractPackage(contract_package) => { + contract_package.serialized_length() + } + StoredValue::Transfer(transfer) => transfer.serialized_length(), + StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(), + StoredValue::EraInfo(era_info) => era_info.serialized_length(), + StoredValue::Bid(bid) => bid.serialized_length(), + StoredValue::Withdraw(withdraw_purses) => withdraw_purses.serialized_length(), + StoredValue::Unbonding(unbonding_purses) => unbonding_purses.serialized_length(), + StoredValue::AddressableEntity(entity) => entity.serialized_length(), + StoredValue::BidKind(bid_kind) => bid_kind.serialized_length(), + StoredValue::Package(package) => package.serialized_length(), + StoredValue::ByteCode(byte_code) => byte_code.serialized_length(), + StoredValue::MessageTopic(message_topic_summary) => { + message_topic_summary.serialized_length() + } + StoredValue::Message(message_digest) => message_digest.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag() as u8); + match self { + StoredValue::CLValue(cl_value) => cl_value.write_bytes(writer)?, + StoredValue::Account(account) => account.write_bytes(writer)?, + StoredValue::ContractWasm(contract_wasm) => contract_wasm.write_bytes(writer)?, + StoredValue::Contract(contract_header) => contract_header.write_bytes(writer)?, + StoredValue::ContractPackage(contract_package) => { + contract_package.write_bytes(writer)? + } + StoredValue::Transfer(transfer) => transfer.write_bytes(writer)?, + StoredValue::DeployInfo(deploy_info) => deploy_info.write_bytes(writer)?, + StoredValue::EraInfo(era_info) => era_info.write_bytes(writer)?, + StoredValue::Bid(bid) => bid.write_bytes(writer)?, + StoredValue::Withdraw(unbonding_purses) => unbonding_purses.write_bytes(writer)?, + StoredValue::Unbonding(unbonding_purses) => unbonding_purses.write_bytes(writer)?, + StoredValue::AddressableEntity(entity) => entity.write_bytes(writer)?, + StoredValue::BidKind(bid_kind) => bid_kind.write_bytes(writer)?, + StoredValue::Package(package) => package.write_bytes(writer)?, + StoredValue::ByteCode(byte_code) => byte_code.write_bytes(writer)?, + StoredValue::MessageTopic(message_topic_summary) => { + message_topic_summary.write_bytes(writer)? + } + StoredValue::Message(message_digest) => message_digest.write_bytes(writer)?, + }; + Ok(()) + } +} + +impl FromBytes for StoredValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == Tag::CLValue as u8 => CLValue::from_bytes(remainder) + .map(|(cl_value, remainder)| (StoredValue::CLValue(cl_value), remainder)), + tag if tag == Tag::Account as u8 => Account::from_bytes(remainder) + .map(|(account, remainder)| (StoredValue::Account(account), remainder)), + tag if tag == Tag::ContractWasm as u8 => { + ContractWasm::from_bytes(remainder).map(|(contract_wasm, remainder)| { + (StoredValue::ContractWasm(contract_wasm), remainder) + }) + } + tag if tag == Tag::ContractPackage as u8 => { + ContractPackage::from_bytes(remainder).map(|(contract_package, remainder)| { + (StoredValue::ContractPackage(contract_package), remainder) + }) + } + tag if tag == Tag::Contract as u8 => Contract::from_bytes(remainder) + .map(|(contract, remainder)| (StoredValue::Contract(contract), remainder)), + tag if tag == Tag::Transfer as u8 => Transfer::from_bytes(remainder) + .map(|(transfer, remainder)| (StoredValue::Transfer(transfer), remainder)), + tag if tag == Tag::DeployInfo as u8 => DeployInfo::from_bytes(remainder) + .map(|(deploy_info, remainder)| (StoredValue::DeployInfo(deploy_info), remainder)), + tag if tag == Tag::EraInfo as u8 => EraInfo::from_bytes(remainder) + .map(|(deploy_info, remainder)| (StoredValue::EraInfo(deploy_info), remainder)), + tag if tag == Tag::Bid as u8 => Bid::from_bytes(remainder) + .map(|(bid, remainder)| (StoredValue::Bid(Box::new(bid)), remainder)), + tag if tag == Tag::BidKind as u8 => BidKind::from_bytes(remainder) + .map(|(bid_kind, remainder)| (StoredValue::BidKind(bid_kind), remainder)), + tag if tag == Tag::Withdraw as u8 => { + Vec::::from_bytes(remainder).map(|(withdraw_purses, remainder)| { + (StoredValue::Withdraw(withdraw_purses), remainder) + }) + } + tag if tag == Tag::Unbonding as u8 => { + Vec::::from_bytes(remainder).map(|(unbonding_purses, remainder)| { + (StoredValue::Unbonding(unbonding_purses), remainder) + }) + } + tag if tag == Tag::AddressableEntity as u8 => AddressableEntity::from_bytes(remainder) + .map(|(entity, remainder)| (StoredValue::AddressableEntity(entity), remainder)), + tag if tag == Tag::Package as u8 => Package::from_bytes(remainder) + .map(|(package, remainder)| (StoredValue::Package(package), remainder)), + tag if tag == Tag::ByteCode as u8 => ByteCode::from_bytes(remainder) + .map(|(byte_code, remainder)| (StoredValue::ByteCode(byte_code), remainder)), + tag if tag == Tag::MessageTopic as u8 => MessageTopicSummary::from_bytes(remainder) + .map(|(message_summary, remainder)| { + (StoredValue::MessageTopic(message_summary), remainder) + }), + tag if tag == Tag::Message as u8 => MessageChecksum::from_bytes(remainder) + .map(|(checksum, remainder)| (StoredValue::Message(checksum), remainder)), + _ => Err(Error::Formatting), + } + } +} + +mod serde_helpers { + use super::*; + + #[derive(Serialize)] + pub(super) enum BinarySerHelper<'a> { + /// A CLValue. + CLValue(&'a CLValue), + /// An account. + Account(&'a Account), + ContractWasm(&'a ContractWasm), + /// A contract. + Contract(&'a Contract), + /// A `Package`. + ContractPackage(&'a ContractPackage), + /// A `Transfer`. + Transfer(&'a Transfer), + /// Info about a deploy. + DeployInfo(&'a DeployInfo), + /// Info about an era. + EraInfo(&'a EraInfo), + /// Variant that stores [`Bid`]. + Bid(&'a Bid), + /// Variant that stores withdraw information. + Withdraw(&'a Vec), + /// Unbonding information. + Unbonding(&'a Vec), + /// An `AddressableEntity`. + AddressableEntity(&'a AddressableEntity), + /// Variant that stores [`BidKind`]. + BidKind(&'a BidKind), + /// Package. + Package(&'a Package), + /// A record of byte code. + ByteCode(&'a ByteCode), + /// Variant that stores [`MessageTopicSummary`]. + MessageTopic(&'a MessageTopicSummary), + /// Variant that stores a [`MessageChecksum`]. + Message(&'a MessageChecksum), + } + + #[derive(Deserialize)] + pub(super) enum BinaryDeserHelper { + /// A CLValue. + CLValue(CLValue), + /// An account. + Account(Account), + /// A contract wasm. + ContractWasm(ContractWasm), + /// A contract. + Contract(Contract), + /// A `Package`. + ContractPackage(ContractPackage), + /// A `Transfer`. + Transfer(Transfer), + /// Info about a deploy. + DeployInfo(DeployInfo), + /// Info about an era. + EraInfo(EraInfo), + /// Variant that stores [`Bid`]. + Bid(Box), + /// Variant that stores withdraw information. + Withdraw(Vec), + /// Unbonding information. + Unbonding(Vec), + /// An `AddressableEntity`. + AddressableEntity(AddressableEntity), + /// Variant that stores [`BidKind`]. + BidKind(BidKind), + /// A record of a Package. + Package(Package), + /// A record of byte code. + ByteCode(ByteCode), + /// Variant that stores [`MessageTopicSummary`]. + MessageTopic(MessageTopicSummary), + /// Variant that stores [`MessageChecksum`]. + Message(MessageChecksum), + } + + impl<'a> From<&'a StoredValue> for BinarySerHelper<'a> { + fn from(stored_value: &'a StoredValue) -> Self { + match stored_value { + StoredValue::CLValue(payload) => BinarySerHelper::CLValue(payload), + StoredValue::Account(payload) => BinarySerHelper::Account(payload), + StoredValue::ContractWasm(payload) => BinarySerHelper::ContractWasm(payload), + StoredValue::Contract(payload) => BinarySerHelper::Contract(payload), + StoredValue::ContractPackage(payload) => BinarySerHelper::ContractPackage(payload), + StoredValue::Transfer(payload) => BinarySerHelper::Transfer(payload), + StoredValue::DeployInfo(payload) => BinarySerHelper::DeployInfo(payload), + StoredValue::EraInfo(payload) => BinarySerHelper::EraInfo(payload), + StoredValue::Bid(payload) => BinarySerHelper::Bid(payload), + StoredValue::Withdraw(payload) => BinarySerHelper::Withdraw(payload), + StoredValue::Unbonding(payload) => BinarySerHelper::Unbonding(payload), + StoredValue::AddressableEntity(payload) => { + BinarySerHelper::AddressableEntity(payload) + } + StoredValue::BidKind(payload) => BinarySerHelper::BidKind(payload), + StoredValue::Package(payload) => BinarySerHelper::Package(payload), + StoredValue::ByteCode(payload) => BinarySerHelper::ByteCode(payload), + StoredValue::MessageTopic(message_topic_summary) => { + BinarySerHelper::MessageTopic(message_topic_summary) + } + StoredValue::Message(message_digest) => BinarySerHelper::Message(message_digest), + } + } + } + + impl From for StoredValue { + fn from(helper: BinaryDeserHelper) -> Self { + match helper { + BinaryDeserHelper::CLValue(payload) => StoredValue::CLValue(payload), + BinaryDeserHelper::Account(payload) => StoredValue::Account(payload), + BinaryDeserHelper::ContractWasm(payload) => StoredValue::ContractWasm(payload), + BinaryDeserHelper::Contract(payload) => StoredValue::Contract(payload), + BinaryDeserHelper::ContractPackage(payload) => { + StoredValue::ContractPackage(payload) + } + BinaryDeserHelper::Transfer(payload) => StoredValue::Transfer(payload), + BinaryDeserHelper::DeployInfo(payload) => StoredValue::DeployInfo(payload), + BinaryDeserHelper::EraInfo(payload) => StoredValue::EraInfo(payload), + BinaryDeserHelper::Bid(bid) => StoredValue::Bid(bid), + BinaryDeserHelper::Withdraw(payload) => StoredValue::Withdraw(payload), + BinaryDeserHelper::Unbonding(payload) => StoredValue::Unbonding(payload), + BinaryDeserHelper::AddressableEntity(payload) => { + StoredValue::AddressableEntity(payload) + } + BinaryDeserHelper::BidKind(payload) => StoredValue::BidKind(payload), + BinaryDeserHelper::ByteCode(payload) => StoredValue::ByteCode(payload), + BinaryDeserHelper::Package(payload) => StoredValue::Package(payload), + BinaryDeserHelper::MessageTopic(message_topic_summary) => { + StoredValue::MessageTopic(message_topic_summary) + } + BinaryDeserHelper::Message(message_digest) => StoredValue::Message(message_digest), + } + } + } +} + +impl Serialize for StoredValue { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + serde_helpers::BinarySerHelper::from(self).serialize(serializer) + } else { + let bytes = self + .to_bytes() + .map_err(|error| ser::Error::custom(format!("{:?}", error)))?; + ByteBuf::from(bytes).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for StoredValue { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let json_helper = serde_helpers::BinaryDeserHelper::deserialize(deserializer)?; + Ok(StoredValue::from(json_helper)) + } else { + let bytes = ByteBuf::deserialize(deserializer)?.into_vec(); + bytesrepr::deserialize::(bytes) + .map_err(|error| de::Error::custom(format!("{:?}", error))) + } + } +} + +#[cfg(test)] +mod tests { + use proptest::proptest; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn serialization_roundtrip(v in gens::stored_value_arb()) { + bytesrepr::test_serialization_roundtrip(&v); + } + } +} diff --git a/casper_types_ver_2_0/src/stored_value/global_state_identifier.rs b/casper_types_ver_2_0/src/stored_value/global_state_identifier.rs new file mode 100644 index 00000000..e99cf27a --- /dev/null +++ b/casper_types_ver_2_0/src/stored_value/global_state_identifier.rs @@ -0,0 +1,127 @@ +use alloc::vec::Vec; + +#[cfg(test)] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + BlockHash, BlockIdentifier, Digest, +}; + +const BLOCK_HASH_TAG: u8 = 0; +const BLOCK_HEIGHT_TAG: u8 = 1; +const STATE_ROOT_HASH_TAG: u8 = 2; + +/// Identifier for possible ways to query Global State +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum GlobalStateIdentifier { + /// Query using a block hash. + BlockHash(BlockHash), + /// Query using a block height. + BlockHeight(u64), + /// Query using the state root hash. + StateRootHash(Digest), +} + +impl GlobalStateIdentifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => Self::BlockHash(BlockHash::random(rng)), + 1 => Self::BlockHeight(rng.gen()), + 2 => Self::StateRootHash(Digest::random(rng)), + _ => panic!(), + } + } +} + +impl From for GlobalStateIdentifier { + fn from(block_identifier: BlockIdentifier) -> Self { + match block_identifier { + BlockIdentifier::Hash(block_hash) => GlobalStateIdentifier::BlockHash(block_hash), + BlockIdentifier::Height(block_height) => { + GlobalStateIdentifier::BlockHeight(block_height) + } + } + } +} + +impl FromBytes for GlobalStateIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + match bytes.split_first() { + Some((&BLOCK_HASH_TAG, rem)) => { + let (block_hash, rem) = FromBytes::from_bytes(rem)?; + Ok((GlobalStateIdentifier::BlockHash(block_hash), rem)) + } + Some((&BLOCK_HEIGHT_TAG, rem)) => { + let (block_height, rem) = FromBytes::from_bytes(rem)?; + Ok((GlobalStateIdentifier::BlockHeight(block_height), rem)) + } + Some((&STATE_ROOT_HASH_TAG, rem)) => { + let (state_root_hash, rem) = FromBytes::from_bytes(rem)?; + Ok((GlobalStateIdentifier::StateRootHash(state_root_hash), rem)) + } + Some(_) | None => Err(bytesrepr::Error::Formatting), + } + } +} + +impl ToBytes for GlobalStateIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + GlobalStateIdentifier::BlockHash(block_hash) => { + writer.push(BLOCK_HASH_TAG); + block_hash.write_bytes(writer)?; + } + GlobalStateIdentifier::BlockHeight(block_height) => { + writer.push(BLOCK_HEIGHT_TAG); + block_height.write_bytes(writer)?; + } + GlobalStateIdentifier::StateRootHash(state_root_hash) => { + writer.push(STATE_ROOT_HASH_TAG); + state_root_hash.write_bytes(writer)?; + } + } + Ok(()) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + GlobalStateIdentifier::BlockHash(block_hash) => block_hash.serialized_length(), + GlobalStateIdentifier::BlockHeight(block_height) => { + block_height.serialized_length() + } + GlobalStateIdentifier::StateRootHash(state_root_hash) => { + state_root_hash.serialized_length() + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = GlobalStateIdentifier::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/src/stored_value/type_mismatch.rs b/casper_types_ver_2_0/src/stored_value/type_mismatch.rs new file mode 100644 index 00000000..d866f976 --- /dev/null +++ b/casper_types_ver_2_0/src/stored_value/type_mismatch.rs @@ -0,0 +1,68 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct TypeMismatch { + /// The name of the expected type. + expected: String, + /// The actual type found. + found: String, +} + +impl TypeMismatch { + /// Creates a new `TypeMismatch`. + pub fn new(expected: String, found: String) -> TypeMismatch { + TypeMismatch { expected, found } + } +} + +impl Display for TypeMismatch { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "Type mismatch. Expected {} but found {}.", + self.expected, self.found + ) + } +} + +impl ToBytes for TypeMismatch { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.expected.write_bytes(writer)?; + self.found.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.expected.serialized_length() + self.found.serialized_length() + } +} + +impl FromBytes for TypeMismatch { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (expected, remainder) = String::from_bytes(bytes)?; + let (found, remainder) = String::from_bytes(remainder)?; + Ok((TypeMismatch { expected, found }, remainder)) + } +} + +#[cfg(feature = "std")] +impl StdError for TypeMismatch {} diff --git a/casper_types_ver_2_0/src/system.rs b/casper_types_ver_2_0/src/system.rs new file mode 100644 index 00000000..e742b4d3 --- /dev/null +++ b/casper_types_ver_2_0/src/system.rs @@ -0,0 +1,12 @@ +//! System modules, formerly known as "system contracts" +pub mod auction; +mod call_stack_element; +mod error; +pub mod handle_payment; +pub mod mint; +pub mod standard_payment; +mod system_contract_type; + +pub use call_stack_element::{CallStackElement, CallStackElementTag}; +pub use error::Error; +pub use system_contract_type::{SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT}; diff --git a/casper_types_ver_2_0/src/system/auction.rs b/casper_types_ver_2_0/src/system/auction.rs new file mode 100644 index 00000000..85bf7b4f --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction.rs @@ -0,0 +1,279 @@ +//! Contains implementation of a Auction contract functionality. +mod bid; +mod bid_addr; +mod bid_kind; +mod constants; +mod delegator; +mod entry_points; +mod era_info; +mod error; +mod seigniorage_recipient; +mod unbonding_purse; +mod validator_bid; +mod withdraw_purse; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use alloc::collections::btree_map::Entry; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use itertools::Itertools; + +use alloc::{boxed::Box, collections::BTreeMap, vec::Vec}; + +pub use bid::{Bid, VESTING_SCHEDULE_LENGTH_MILLIS}; +pub use bid_addr::{BidAddr, BidAddrTag}; +pub use bid_kind::{BidKind, BidKindTag}; +pub use constants::*; +pub use delegator::Delegator; +pub use entry_points::auction_entry_points; +pub use era_info::{EraInfo, SeigniorageAllocation}; +pub use error::Error; +pub use seigniorage_recipient::SeigniorageRecipient; +pub use unbonding_purse::UnbondingPurse; +pub use validator_bid::ValidatorBid; +pub use withdraw_purse::WithdrawPurse; + +#[cfg(any(feature = "testing", test))] +pub(crate) mod gens { + pub use super::era_info::gens::*; +} + +use crate::{account::AccountHash, EraId, PublicKey, U512}; + +/// Representation of delegation rate of tokens. Range from 0..=100. +pub type DelegationRate = u8; + +/// Validators mapped to their bids. +pub type ValidatorBids = BTreeMap>; + +/// Weights of validators. "Weight" in this context means a sum of their stakes. +pub type ValidatorWeights = BTreeMap; + +/// List of era validators +pub type EraValidators = BTreeMap; + +/// Collection of seigniorage recipients. +pub type SeigniorageRecipients = BTreeMap; + +/// Snapshot of `SeigniorageRecipients` for a given era. +pub type SeigniorageRecipientsSnapshot = BTreeMap; + +/// Validators and delegators mapped to their unbonding purses. +pub type UnbondingPurses = BTreeMap>; + +/// Validators and delegators mapped to their withdraw purses. +pub type WithdrawPurses = BTreeMap>; + +/// Aggregated representation of validator and associated delegator bids. +pub type Staking = BTreeMap)>; + +/// Utils for working with a vector of BidKind. +#[cfg(any(all(feature = "std", feature = "testing"), test))] +pub trait BidsExt { + /// Returns Bid matching public_key, if present. + fn unified_bid(&self, public_key: &PublicKey) -> Option; + + /// Returns ValidatorBid matching public_key, if present. + fn validator_bid(&self, public_key: &PublicKey) -> Option; + + /// Returns total validator stake, if present. + fn validator_total_stake(&self, public_key: &PublicKey) -> Option; + + /// Returns Delegator entries matching validator public key, if present. + fn delegators_by_validator_public_key(&self, public_key: &PublicKey) -> Option>; + + /// Returns Delegator entry by public keys, if present. + fn delegator_by_public_keys( + &self, + validator_public_key: &PublicKey, + delegator_public_key: &PublicKey, + ) -> Option; + + /// Returns true if containing any elements matching the provided validator public key. + fn contains_validator_public_key(&self, public_key: &PublicKey) -> bool; + + /// Removes any items with a public key matching the provided validator public key. + fn remove_by_validator_public_key(&mut self, public_key: &PublicKey); + + /// Creates a map of Validator public keys to associated Delegator public keys. + fn public_key_map(&self) -> BTreeMap>; + + /// Inserts if bid_kind does not exist, otherwise replaces. + fn upsert(&mut self, bid_kind: BidKind); +} + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +impl BidsExt for Vec { + fn unified_bid(&self, public_key: &PublicKey) -> Option { + if let BidKind::Unified(bid) = self + .iter() + .find(|x| x.is_validator() && &x.validator_public_key() == public_key)? + { + Some(*bid.clone()) + } else { + None + } + } + + fn validator_bid(&self, public_key: &PublicKey) -> Option { + if let BidKind::Validator(validator_bid) = self + .iter() + .find(|x| x.is_validator() && &x.validator_public_key() == public_key)? + { + Some(*validator_bid.clone()) + } else { + None + } + } + + fn validator_total_stake(&self, public_key: &PublicKey) -> Option { + if let Some(validator_bid) = self.validator_bid(public_key) { + let delegator_stake = { + match self.delegators_by_validator_public_key(validator_bid.validator_public_key()) + { + None => U512::zero(), + Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(), + } + }; + return Some(validator_bid.staked_amount() + delegator_stake); + } + + if let BidKind::Unified(bid) = self + .iter() + .find(|x| x.is_validator() && &x.validator_public_key() == public_key)? + { + return Some(*bid.staked_amount()); + } + + None + } + + fn delegators_by_validator_public_key(&self, public_key: &PublicKey) -> Option> { + let mut ret = vec![]; + for delegator in self + .iter() + .filter(|x| x.is_delegator() && &x.validator_public_key() == public_key) + { + if let BidKind::Delegator(delegator) = delegator { + ret.push(*delegator.clone()); + } + } + + if ret.is_empty() { + None + } else { + Some(ret) + } + } + + fn delegator_by_public_keys( + &self, + validator_public_key: &PublicKey, + delegator_public_key: &PublicKey, + ) -> Option { + if let BidKind::Delegator(delegator) = self.iter().find(|x| { + &x.validator_public_key() == validator_public_key + && x.delegator_public_key() == Some(delegator_public_key.clone()) + })? { + Some(*delegator.clone()) + } else { + None + } + } + + fn contains_validator_public_key(&self, public_key: &PublicKey) -> bool { + self.iter().any(|x| &x.validator_public_key() == public_key) + } + + fn remove_by_validator_public_key(&mut self, public_key: &PublicKey) { + self.retain(|x| &x.validator_public_key() != public_key) + } + + fn public_key_map(&self) -> BTreeMap> { + let mut ret = BTreeMap::new(); + let validators = self + .iter() + .filter(|x| x.is_validator()) + .cloned() + .collect_vec(); + for bid_kind in validators { + ret.insert(bid_kind.validator_public_key().clone(), vec![]); + } + let delegators = self + .iter() + .filter(|x| x.is_delegator()) + .cloned() + .collect_vec(); + for bid_kind in delegators { + if let BidKind::Delegator(delegator) = bid_kind { + match ret.entry(delegator.validator_public_key().clone()) { + Entry::Vacant(ve) => { + ve.insert(vec![delegator.delegator_public_key().clone()]); + } + Entry::Occupied(mut oe) => { + let delegators = oe.get_mut(); + delegators.push(delegator.delegator_public_key().clone()) + } + } + } + } + let unified = self + .iter() + .filter(|x| x.is_unified()) + .cloned() + .collect_vec(); + for bid_kind in unified { + if let BidKind::Unified(unified) = bid_kind { + let delegators = unified + .delegators() + .iter() + .map(|(_, y)| y.delegator_public_key().clone()) + .collect(); + ret.insert(unified.validator_public_key().clone(), delegators); + } + } + ret + } + + fn upsert(&mut self, bid_kind: BidKind) { + let maybe_index = match bid_kind { + BidKind::Unified(_) | BidKind::Validator(_) => self + .iter() + .find_position(|x| { + x.validator_public_key() == bid_kind.validator_public_key() + && x.tag() == bid_kind.tag() + }) + .map(|(idx, _)| idx), + BidKind::Delegator(_) => self + .iter() + .find_position(|x| { + x.is_delegator() + && x.validator_public_key() == bid_kind.validator_public_key() + && x.delegator_public_key() == bid_kind.delegator_public_key() + }) + .map(|(idx, _)| idx), + }; + + match maybe_index { + Some(index) => { + self.insert(index, bid_kind); + } + None => { + self.push(bid_kind); + } + } + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::delegator_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/bid.rs b/casper_types_ver_2_0/src/system/auction/bid.rs new file mode 100644 index 00000000..622d8a21 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/bid.rs @@ -0,0 +1,609 @@ +mod vesting; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{DelegationRate, Delegator, Error, ValidatorBid}, + CLType, CLTyped, PublicKey, URef, U512, +}; + +pub use vesting::{VestingSchedule, VESTING_SCHEDULE_LENGTH_MILLIS}; + +/// An entry in the validator map. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Bid { + /// Validator public key. + validator_public_key: PublicKey, + /// The purse that was used for bonding. + bonding_purse: URef, + /// The amount of tokens staked by a validator (not including delegators). + staked_amount: U512, + /// Delegation rate. + delegation_rate: DelegationRate, + /// Vesting schedule for a genesis validator. `None` if non-genesis validator. + vesting_schedule: Option, + /// This validator's delegators, indexed by their public keys. + #[serde(with = "BTreeMapToArray::")] + delegators: BTreeMap, + /// `true` if validator has been "evicted". + inactive: bool, +} + +impl Bid { + #[allow(missing_docs)] + pub fn from_non_unified( + validator_bid: ValidatorBid, + delegators: BTreeMap, + ) -> Self { + Self { + validator_public_key: validator_bid.validator_public_key().clone(), + bonding_purse: *validator_bid.bonding_purse(), + staked_amount: validator_bid.staked_amount(), + delegation_rate: *validator_bid.delegation_rate(), + vesting_schedule: validator_bid.vesting_schedule().cloned(), + delegators, + inactive: validator_bid.inactive(), + } + } + + /// Creates new instance of a bid with locked funds. + pub fn locked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + release_timestamp_millis: u64, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + let delegators = BTreeMap::new(); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Creates new instance of a bid with unlocked funds. + pub fn unlocked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + ) -> Self { + let vesting_schedule = None; + let delegators = BTreeMap::new(); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty(validator_public_key: PublicKey, bonding_purse: URef) -> Self { + let vesting_schedule = None; + let delegators = BTreeMap::new(); + let inactive = true; + let staked_amount = 0.into(); + let delegation_rate = Default::default(); + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Gets the validator public key of the provided bid + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Gets the bonding purse of the provided bid + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount(&self) -> &U512 { + &self.staked_amount + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Gets the delegation rate of the provided bid + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis + /// validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided bid. `None` if a + /// non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } + + /// Returns a reference to the delegators of the provided bid + pub fn delegators(&self) -> &BTreeMap { + &self.delegators + } + + /// Returns a mutable reference to the delegators of the provided bid + pub fn delegators_mut(&mut self) -> &mut BTreeMap { + &mut self.delegators + } + + /// Returns `true` if validator is inactive + pub fn inactive(&self) -> bool { + self.inactive + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::UnbondTooLarge)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::ValidatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::ValidatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Updates the delegation rate of the provided bid + pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self { + self.delegation_rate = delegation_rate; + self + } + + /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than + /// or equal to the bid's initial release timestamp and the bid is owned by a genesis + /// validator. This method initializes with default 14 week vesting schedule. + /// + /// Returns `true` if the provided bid's vesting schedule was initialized. + pub fn process(&mut self, timestamp_millis: u64) -> bool { + self.process_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than + /// or equal to the bid's initial release timestamp and the bid is owned by a genesis + /// validator. + /// + /// Returns `true` if the provided bid's vesting schedule was initialized. + pub fn process_with_vesting_schedule( + &mut self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + // Put timestamp-sensitive processing logic in here + let staked_amount = self.staked_amount; + let vesting_schedule = match self.vesting_schedule_mut() { + Some(vesting_schedule) => vesting_schedule, + None => return false, + }; + if timestamp_millis < vesting_schedule.initial_release_timestamp_millis() { + return false; + } + + let mut initialized = false; + + if vesting_schedule.initialize_with_schedule(staked_amount, vesting_schedule_period_millis) + { + initialized = true; + } + + for delegator in self.delegators_mut().values_mut() { + let staked_amount = delegator.staked_amount(); + if let Some(vesting_schedule) = delegator.vesting_schedule_mut() { + if timestamp_millis >= vesting_schedule.initial_release_timestamp_millis() + && vesting_schedule + .initialize_with_schedule(staked_amount, vesting_schedule_period_millis) + { + initialized = true; + } + } + } + + initialized + } + + /// Sets given bid's `inactive` field to `false` + pub fn activate(&mut self) -> bool { + self.inactive = false; + false + } + + /// Sets given bid's `inactive` field to `true` + pub fn deactivate(&mut self) -> bool { + self.inactive = true; + true + } + + /// Returns the total staked amount of validator + all delegators + pub fn total_staked_amount(&self) -> Result { + self.delegators + .iter() + .try_fold(U512::zero(), |a, (_, b)| a.checked_add(b.staked_amount())) + .and_then(|delegators_sum| delegators_sum.checked_add(*self.staked_amount())) + .ok_or(Error::InvalidAmount) + } +} + +impl CLTyped for Bid { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for Bid { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.bonding_purse.serialized_length() + + self.staked_amount.serialized_length() + + self.delegation_rate.serialized_length() + + self.vesting_schedule.serialized_length() + + self.delegators.serialized_length() + + self.inactive.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + self.delegators.write_bytes(writer)?; + self.inactive.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Bid { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; + let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?; + let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + let (delegators, bytes) = FromBytes::from_bytes(bytes)?; + let (inactive, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Bid { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + }, + bytes, + )) + } +} + +impl Display for Bid { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "bid {{ bonding purse {}, staked {}, delegation rate {}, delegators {{", + self.bonding_purse, self.staked_amount, self.delegation_rate + )?; + + let count = self.delegators.len(); + for (index, delegator) in self.delegators.values().enumerate() { + write!( + formatter, + "{}{}", + delegator, + if index + 1 == count { "" } else { ", " } + )?; + } + + write!( + formatter, + "}}, is {}inactive }}", + if self.inactive { "" } else { "not " } + ) + } +} + +struct DelegatorLabels; + +impl KeyValueLabels for DelegatorLabels { + const KEY: &'static str = "delegator_public_key"; + const VALUE: &'static str = "delegator"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for DelegatorLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("PublicKeyAndDelegator"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = + Some("A delegator associated with the given validator."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = + Some("The public key of the delegator."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The delegator details."); +} + +#[cfg(test)] +mod tests { + use alloc::collections::BTreeMap; + + use crate::{ + bytesrepr, + system::auction::{bid::VestingSchedule, Bid, DelegationRate, Delegator}, + AccessRights, PublicKey, SecretKey, URef, U512, + }; + + const WEEK_MILLIS: u64 = 7 * 24 * 60 * 60 * 1000; + const TEST_VESTING_SCHEDULE_LENGTH_MILLIS: u64 = 7 * WEEK_MILLIS; + + #[test] + fn serialization_roundtrip() { + let founding_validator = Bid { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), + staked_amount: U512::one(), + delegation_rate: DelegationRate::max_value(), + vesting_schedule: Some(VestingSchedule::default()), + delegators: BTreeMap::default(), + inactive: true, + }; + bytesrepr::test_serialization_roundtrip(&founding_validator); + } + + #[test] + fn should_immediately_initialize_unlock_amounts() { + const TIMESTAMP_MILLIS: u64 = 0; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let vesting_schedule_period_millis = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let mut bid = Bid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + ); + + assert!(bid.process_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis, + )); + assert!(!bid.is_locked_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis + )); + } + + #[test] + fn should_initialize_delegators_different_timestamps() { + const TIMESTAMP_MILLIS: u64 = WEEK_MILLIS; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let delegator_1_pk: PublicKey = (&SecretKey::ed25519_from_bytes([43; 32]).unwrap()).into(); + let delegator_2_pk: PublicKey = (&SecretKey::ed25519_from_bytes([44; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let delegator_1_release_timestamp = TIMESTAMP_MILLIS + 1; + let delegator_1_bonding_purse = URef::new([52; 32], AccessRights::ADD); + let delegator_1_staked_amount = U512::from(2000); + + let delegator_2_release_timestamp = TIMESTAMP_MILLIS + 2; + let delegator_2_bonding_purse = URef::new([62; 32], AccessRights::ADD); + let delegator_2_staked_amount = U512::from(3000); + + let delegator_1 = Delegator::locked( + delegator_1_pk.clone(), + delegator_1_staked_amount, + delegator_1_bonding_purse, + validator_pk.clone(), + delegator_1_release_timestamp, + ); + + let delegator_2 = Delegator::locked( + delegator_2_pk.clone(), + delegator_2_staked_amount, + delegator_2_bonding_purse, + validator_pk.clone(), + delegator_2_release_timestamp, + ); + + let mut bid = Bid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + ); + + assert!(!bid.process_with_vesting_schedule( + validator_release_timestamp - 1, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + { + let delegators = bid.delegators_mut(); + + delegators.insert(delegator_1_pk.clone(), delegator_1); + delegators.insert(delegator_2_pk.clone(), delegator_2); + } + + assert!(bid.process_with_vesting_schedule( + delegator_1_release_timestamp, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + let delegator_1_updated_1 = bid.delegators().get(&delegator_1_pk).cloned().unwrap(); + assert!(delegator_1_updated_1 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + + let delegator_2_updated_1 = bid.delegators().get(&delegator_2_pk).cloned().unwrap(); + assert!(delegator_2_updated_1 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_none()); + + assert!(bid.process_with_vesting_schedule( + delegator_2_release_timestamp, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + let delegator_1_updated_2 = bid.delegators().get(&delegator_1_pk).cloned().unwrap(); + assert!(delegator_1_updated_2 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + // Delegator 1 is already initialized and did not change after 2nd Bid::process + assert_eq!(delegator_1_updated_1, delegator_1_updated_2); + + let delegator_2_updated_2 = bid.delegators().get(&delegator_2_pk).cloned().unwrap(); + assert!(delegator_2_updated_2 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + + // Delegator 2 is different compared to first Bid::process + assert_ne!(delegator_2_updated_1, delegator_2_updated_2); + + // Validator initialized, and all delegators initialized + assert!(!bid.process_with_vesting_schedule( + delegator_2_release_timestamp + 1, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_unified_bid(bid in gens::unified_bid_arb(0..3)) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/bid/vesting.rs b/casper_types_ver_2_0/src/system/auction/bid/vesting.rs new file mode 100644 index 00000000..ae496a4b --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/bid/vesting.rs @@ -0,0 +1,520 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes}, + U512, +}; + +const DAY_MILLIS: usize = 24 * 60 * 60 * 1000; +const DAYS_IN_WEEK: usize = 7; +const WEEK_MILLIS: usize = DAYS_IN_WEEK * DAY_MILLIS; + +/// Length of total vesting schedule in days. +const VESTING_SCHEDULE_LENGTH_DAYS: usize = 91; +/// Length of total vesting schedule expressed in days. +pub const VESTING_SCHEDULE_LENGTH_MILLIS: u64 = + VESTING_SCHEDULE_LENGTH_DAYS as u64 * DAY_MILLIS as u64; +/// 91 days / 7 days in a week = 13 weeks +const LOCKED_AMOUNTS_MAX_LENGTH: usize = (VESTING_SCHEDULE_LENGTH_DAYS / DAYS_IN_WEEK) + 1; + +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct VestingSchedule { + initial_release_timestamp_millis: u64, + locked_amounts: Option<[U512; LOCKED_AMOUNTS_MAX_LENGTH]>, +} + +fn vesting_schedule_period_to_weeks(vesting_schedule_period_millis: u64) -> usize { + debug_assert_ne!(DAY_MILLIS, 0); + debug_assert_ne!(DAYS_IN_WEEK, 0); + vesting_schedule_period_millis as usize / DAY_MILLIS / DAYS_IN_WEEK +} + +impl VestingSchedule { + pub fn new(initial_release_timestamp_millis: u64) -> Self { + let locked_amounts = None; + VestingSchedule { + initial_release_timestamp_millis, + locked_amounts, + } + } + + /// Initializes vesting schedule with a configured amount of weekly releases. + /// + /// Returns `false` if already initialized. + /// + /// # Panics + /// + /// Panics if `vesting_schedule_period_millis` represents more than 13 weeks. + pub fn initialize_with_schedule( + &mut self, + staked_amount: U512, + vesting_schedule_period_millis: u64, + ) -> bool { + if self.locked_amounts.is_some() { + return false; + } + + let locked_amounts_length = + vesting_schedule_period_to_weeks(vesting_schedule_period_millis); + + assert!( + locked_amounts_length < LOCKED_AMOUNTS_MAX_LENGTH, + "vesting schedule period must be less than {} weeks", + LOCKED_AMOUNTS_MAX_LENGTH, + ); + + if locked_amounts_length == 0 || vesting_schedule_period_millis == 0 { + // Zero weeks means instant unlock of staked amount. + self.locked_amounts = Some(Default::default()); + return true; + } + + let release_period: U512 = U512::from(locked_amounts_length + 1); + let weekly_release = staked_amount / release_period; + + let mut locked_amounts = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH]; + let mut remaining_locked = staked_amount; + + for locked_amount in locked_amounts.iter_mut().take(locked_amounts_length) { + remaining_locked -= weekly_release; + *locked_amount = remaining_locked; + } + + assert_eq!( + locked_amounts.get(locked_amounts_length), + Some(&U512::zero()), + "first element after the schedule should be zero" + ); + + self.locked_amounts = Some(locked_amounts); + true + } + + /// Initializes weekly release for a fixed amount of 14 weeks period. + /// + /// Returns `false` if already initialized. + pub fn initialize(&mut self, staked_amount: U512) -> bool { + self.initialize_with_schedule(staked_amount, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + pub fn initial_release_timestamp_millis(&self) -> u64 { + self.initial_release_timestamp_millis + } + + pub fn locked_amounts(&self) -> Option<&[U512]> { + let locked_amounts = self.locked_amounts.as_ref()?; + Some(locked_amounts.as_slice()) + } + + pub fn locked_amount(&self, timestamp_millis: u64) -> Option { + let locked_amounts = self.locked_amounts()?; + + let index = { + let index_timestamp = + timestamp_millis.checked_sub(self.initial_release_timestamp_millis)?; + (index_timestamp as usize).checked_div(WEEK_MILLIS)? + }; + + let locked_amount = locked_amounts.get(index).cloned().unwrap_or_default(); + + Some(locked_amount) + } + + /// Checks if this vesting schedule is still under the vesting + pub(crate) fn is_vesting( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + let vested_period = match self.locked_amounts() { + Some(locked_amounts) => { + let vesting_weeks = locked_amounts + .iter() + .position(|amount| amount.is_zero()) + .expect("vesting schedule should always have zero at the end"); // SAFETY: at least one zero is guaranteed by `initialize_with_schedule` method + + let vesting_weeks_millis = + (vesting_weeks as u64).saturating_mul(WEEK_MILLIS as u64); + + self.initial_release_timestamp_millis() + .saturating_add(vesting_weeks_millis) + } + None => { + // Uninitialized yet but we know this will be the configured period of time. + self.initial_release_timestamp_millis() + .saturating_add(vesting_schedule_period_millis) + } + }; + + timestamp_millis < vested_period + } +} + +impl ToBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.iter().map(ToBytes::serialized_length).sum::() + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + for amount in self { + amount.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] { + fn from_bytes(mut bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH]; + for value in &mut result { + let (amount, rem) = FromBytes::from_bytes(bytes)?; + *value = amount; + bytes = rem; + } + Ok((result, bytes)) + } +} + +impl ToBytes for VestingSchedule { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.initial_release_timestamp_millis.to_bytes()?); + result.append(&mut self.locked_amounts.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.initial_release_timestamp_millis.serialized_length() + + self.locked_amounts.serialized_length() + } +} + +impl FromBytes for VestingSchedule { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (initial_release_timestamp_millis, bytes) = FromBytes::from_bytes(bytes)?; + let (locked_amounts, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + VestingSchedule { + initial_release_timestamp_millis, + locked_amounts, + }, + bytes, + )) + } +} + +/// Generators for [`VestingSchedule`] +#[cfg(test)] +mod gens { + use proptest::{ + array, option, + prelude::{Arbitrary, Strategy}, + }; + + use super::VestingSchedule; + use crate::gens::u512_arb; + + pub fn vesting_schedule_arb() -> impl Strategy { + (::arbitrary(), option::of(array::uniform14(u512_arb()))).prop_map( + |(initial_release_timestamp_millis, locked_amounts)| VestingSchedule { + initial_release_timestamp_millis, + locked_amounts, + }, + ) + } +} + +#[cfg(test)] +mod tests { + use proptest::{prop_assert, proptest}; + + use crate::{ + bytesrepr, + gens::u512_arb, + system::auction::bid::{ + vesting::{gens::vesting_schedule_arb, vesting_schedule_period_to_weeks, WEEK_MILLIS}, + VestingSchedule, + }, + U512, + }; + + use super::*; + + /// Default lock-in period of 90 days + const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS as u64; + const RELEASE_TIMESTAMP: u64 = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + const STAKE: u64 = 140; + + const DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS as u64; + const LOCKED_AMOUNTS_LENGTH: usize = + (DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS as usize / WEEK_MILLIS) + 1; + + #[test] + #[should_panic = "vesting schedule period must be less than"] + fn test_vesting_schedule_exceeding_the_maximum_should_not_panic() { + let future_date = 98 * DAY_MILLIS as u64; + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize_with_schedule(U512::from(STAKE), future_date); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_amount_check_should_not_panic() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize(U512::from(STAKE)); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_with_zero_length_schedule_should_not_panic() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize_with_schedule(U512::from(STAKE), 0); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_amount() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize(U512::from(STAKE)); + + let mut timestamp = RELEASE_TIMESTAMP; + + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(130)) + ); + + timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64 - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(130)) + ); + + timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(120)) + ); + + timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64 + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(120)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(120)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(110)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(110)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(110)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(100)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(100)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(20)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(10)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(10)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(10)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14) - 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14); + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); + } + + fn vested_amounts_match_initial_stake( + initial_stake: U512, + release_timestamp: u64, + vesting_schedule_length: u64, + ) -> bool { + let mut vesting_schedule = VestingSchedule::new(release_timestamp); + vesting_schedule.initialize_with_schedule(initial_stake, vesting_schedule_length); + + let mut total_vested_amounts = U512::zero(); + + for i in 0..LOCKED_AMOUNTS_LENGTH { + let timestamp = release_timestamp + (WEEK_MILLIS * i) as u64; + if let Some(locked_amount) = vesting_schedule.locked_amount(timestamp) { + let current_vested_amount = initial_stake - locked_amount - total_vested_amounts; + total_vested_amounts += current_vested_amount + } + } + + total_vested_amounts == initial_stake + } + + #[test] + fn vested_amounts_conserve_stake() { + let stake = U512::from(1000); + assert!(vested_amounts_match_initial_stake( + stake, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + )) + } + + #[test] + fn is_vesting_with_default_schedule() { + let initial_stake = U512::from(1000u64); + let release_timestamp = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + let mut vesting_schedule = VestingSchedule::new(release_timestamp); + + let is_vesting_before: Vec = (0..LOCKED_AMOUNTS_LENGTH + 1) + .map(|i| { + vesting_schedule.is_vesting( + release_timestamp + (WEEK_MILLIS * i) as u64, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + ) + }) + .collect(); + + assert_eq!( + is_vesting_before, + vec![ + true, true, true, true, true, true, true, true, true, true, true, true, true, + false, // week after is always set to zero + false + ] + ); + vesting_schedule.initialize(initial_stake); + + let is_vesting_after: Vec = (0..LOCKED_AMOUNTS_LENGTH + 1) + .map(|i| { + vesting_schedule.is_vesting( + release_timestamp + (WEEK_MILLIS * i) as u64, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + ) + }) + .collect(); + + assert_eq!( + is_vesting_after, + vec![ + true, true, true, true, true, true, true, true, true, true, true, true, true, + false, // week after is always set to zero + false, + ] + ); + } + + #[test] + fn should_calculate_vesting_schedule_period_to_weeks() { + let thirteen_weeks_millis = 13 * 7 * DAY_MILLIS as u64; + assert_eq!(vesting_schedule_period_to_weeks(thirteen_weeks_millis), 13,); + + assert_eq!(vesting_schedule_period_to_weeks(0), 0); + assert_eq!( + vesting_schedule_period_to_weeks(u64::MAX), + 30_500_568_904usize + ); + } + + proptest! { + #[test] + fn prop_total_vested_amounts_conserve_stake(stake in u512_arb()) { + prop_assert!(vested_amounts_match_initial_stake( + stake, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + )) + } + + #[test] + fn prop_serialization_roundtrip(vesting_schedule in vesting_schedule_arb()) { + bytesrepr::test_serialization_roundtrip(&vesting_schedule) + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/bid_addr.rs b/casper_types_ver_2_0/src/system/auction/bid_addr.rs new file mode 100644 index 00000000..618b4994 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/bid_addr.rs @@ -0,0 +1,335 @@ +use crate::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + bytesrepr, + bytesrepr::{FromBytes, ToBytes}, + system::auction::error::Error, + Key, KeyTag, PublicKey, +}; +use alloc::vec::Vec; +use core::fmt::{Debug, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +const UNIFIED_TAG: u8 = 0; +const VALIDATOR_TAG: u8 = 1; +const DELEGATOR_TAG: u8 = 2; + +/// Serialization tag for BidAddr variants. +#[derive( + Debug, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, +)] +#[repr(u8)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BidAddrTag { + /// BidAddr for legacy unified bid. + Unified = UNIFIED_TAG, + /// BidAddr for validator bid. + #[default] + Validator = VALIDATOR_TAG, + /// BidAddr for delegator bid. + Delegator = DELEGATOR_TAG, +} + +impl Display for BidAddrTag { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = match self { + BidAddrTag::Unified => UNIFIED_TAG, + BidAddrTag::Validator => VALIDATOR_TAG, + BidAddrTag::Delegator => DELEGATOR_TAG, + }; + write!(f, "{}", base16::encode_lower(&[tag])) + } +} + +impl BidAddrTag { + /// The length in bytes of a [`BidAddrTag`]. + pub const BID_ADDR_TAG_LENGTH: usize = 1; + + /// Attempts to map `BidAddrTag` from a u8. + pub fn try_from_u8(value: u8) -> Option { + // TryFrom requires std, so doing this instead. + if value == UNIFIED_TAG { + return Some(BidAddrTag::Unified); + } + if value == VALIDATOR_TAG { + return Some(BidAddrTag::Validator); + } + if value == DELEGATOR_TAG { + return Some(BidAddrTag::Delegator); + } + + None + } +} + +/// Bid Address +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BidAddr { + /// Unified BidAddr. + Unified(AccountHash), + /// Validator BidAddr. + Validator(AccountHash), + /// Delegator BidAddr. + Delegator { + /// The validator addr. + validator: AccountHash, + /// The delegator addr. + delegator: AccountHash, + }, +} + +impl BidAddr { + /// The length in bytes of a [`BidAddr`] for a validator bid. + pub const VALIDATOR_BID_ADDR_LENGTH: usize = + ACCOUNT_HASH_LENGTH + BidAddrTag::BID_ADDR_TAG_LENGTH; + + /// The length in bytes of a [`BidAddr`] for a delegator bid. + pub const DELEGATOR_BID_ADDR_LENGTH: usize = + (ACCOUNT_HASH_LENGTH * 2) + BidAddrTag::BID_ADDR_TAG_LENGTH; + + /// Constructs a new [`BidAddr`] instance from a validator's [`AccountHash`]. + pub const fn new_validator_addr(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self { + BidAddr::Validator(AccountHash::new(validator)) + } + + /// Constructs a new [`BidAddr`] instance from the [`AccountHash`] pair of a validator + /// and a delegator. + pub const fn new_delegator_addr( + pair: ([u8; ACCOUNT_HASH_LENGTH], [u8; ACCOUNT_HASH_LENGTH]), + ) -> Self { + BidAddr::Delegator { + validator: AccountHash::new(pair.0), + delegator: AccountHash::new(pair.1), + } + } + + #[allow(missing_docs)] + pub const fn legacy(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self { + BidAddr::Unified(AccountHash::new(validator)) + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_from_public_keys( + validator: &PublicKey, + maybe_delegator: Option<&PublicKey>, + ) -> Self { + if let Some(delegator) = maybe_delegator { + BidAddr::Delegator { + validator: AccountHash::from(validator), + delegator: AccountHash::from(delegator), + } + } else { + BidAddr::Validator(AccountHash::from(validator)) + } + } + + /// Returns the common prefix of all delegators to the cited validator. + pub fn delegators_prefix(&self) -> Result, Error> { + let validator = self.validator_account_hash(); + let mut ret = Vec::with_capacity(validator.serialized_length() + 2); + ret.push(KeyTag::BidAddr as u8); + ret.push(BidAddrTag::Delegator as u8); + validator.write_bytes(&mut ret)?; + Ok(ret) + } + + /// Validator account hash. + pub fn validator_account_hash(&self) -> AccountHash { + match self { + BidAddr::Unified(account_hash) | BidAddr::Validator(account_hash) => *account_hash, + BidAddr::Delegator { validator, .. } => *validator, + } + } + + /// Delegator account hash or none. + pub fn maybe_delegator_account_hash(&self) -> Option { + match self { + BidAddr::Unified(_) | BidAddr::Validator(_) => None, + BidAddr::Delegator { delegator, .. } => Some(*delegator), + } + } + + /// If true, this instance is the key for a delegator bid record. + /// Else, it is the key for a validator bid record. + pub fn is_delegator_bid_addr(&self) -> bool { + match self { + BidAddr::Unified(_) | BidAddr::Validator(_) => false, + BidAddr::Delegator { .. } => true, + } + } + + /// How long will be the serialized value for this instance. + pub fn serialized_length(&self) -> usize { + match self { + BidAddr::Unified(account_hash) | BidAddr::Validator(account_hash) => { + ToBytes::serialized_length(account_hash) + 1 + } + BidAddr::Delegator { + validator, + delegator, + } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1, + } + } + + /// Returns the BiddAddrTag of this instance. + pub fn tag(&self) -> BidAddrTag { + match self { + BidAddr::Unified(_) => BidAddrTag::Unified, + BidAddr::Validator(_) => BidAddrTag::Validator, + BidAddr::Delegator { .. } => BidAddrTag::Delegator, + } + } +} + +impl ToBytes for BidAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.push(self.tag() as u8); + buffer.append(&mut self.validator_account_hash().to_bytes()?); + if let Some(delegator) = self.maybe_delegator_account_hash() { + buffer.append(&mut delegator.to_bytes()?); + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.serialized_length() + } +} + +impl FromBytes for BidAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == BidAddrTag::Unified as u8 => AccountHash::from_bytes(remainder) + .map(|(account_hash, remainder)| (BidAddr::Unified(account_hash), remainder)), + tag if tag == BidAddrTag::Validator as u8 => AccountHash::from_bytes(remainder) + .map(|(account_hash, remainder)| (BidAddr::Validator(account_hash), remainder)), + tag if tag == BidAddrTag::Delegator as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (delegator, remainder) = AccountHash::from_bytes(remainder)?; + Ok(( + BidAddr::Delegator { + validator, + delegator, + }, + remainder, + )) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Default for BidAddr { + fn default() -> Self { + BidAddr::Validator(AccountHash::default()) + } +} + +impl From for Key { + fn from(bid_addr: BidAddr) -> Self { + Key::BidAddr(bid_addr) + } +} + +impl From for BidAddr { + fn from(account_hash: AccountHash) -> Self { + BidAddr::Validator(account_hash) + } +} + +impl From for BidAddr { + fn from(public_key: PublicKey) -> Self { + BidAddr::Validator(public_key.to_account_hash()) + } +} + +impl Display for BidAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = self.tag(); + match self { + BidAddr::Unified(account_hash) | BidAddr::Validator(account_hash) => { + write!(f, "{}{}", tag, account_hash) + } + BidAddr::Delegator { + validator, + delegator, + } => write!(f, "{}{}{}", tag, validator, delegator), + } + } +} + +impl Debug for BidAddr { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + match self { + BidAddr::Unified(validator) => write!(f, "BidAddr::Unified({:?})", validator), + BidAddr::Validator(validator) => write!(f, "BidAddr::Validator({:?})", validator), + BidAddr::Delegator { + validator, + delegator, + } => { + write!(f, "BidAddr::Delegator({:?}{:?})", validator, delegator) + } + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BidAddr { + BidAddr::Validator(AccountHash::new(rng.gen())) + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, system::auction::BidAddr}; + + #[test] + fn serialization_roundtrip() { + let bid_addr = BidAddr::legacy([1; 32]); + bytesrepr::test_serialization_roundtrip(&bid_addr); + let bid_addr = BidAddr::new_validator_addr([1; 32]); + bytesrepr::test_serialization_roundtrip(&bid_addr); + let bid_addr = BidAddr::new_delegator_addr(([1; 32], [2; 32])); + bytesrepr::test_serialization_roundtrip(&bid_addr); + } +} + +#[cfg(test)] +mod prop_test_validator_addr { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_addr_validator(validator_bid_addr in gens::bid_addr_validator_arb()) { + bytesrepr::test_serialization_roundtrip(&validator_bid_addr); + } + } +} + +#[cfg(test)] +mod prop_test_delegator_addr { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_addr_delegator(delegator_bid_addr in gens::bid_addr_delegator_arb()) { + bytesrepr::test_serialization_roundtrip(&delegator_bid_addr); + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/bid_kind.rs b/casper_types_ver_2_0/src/system/auction/bid_kind.rs new file mode 100644 index 00000000..865f3ba9 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/bid_kind.rs @@ -0,0 +1,323 @@ +use crate::{ + bytesrepr, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + system::auction::{bid::VestingSchedule, Bid, Delegator, ValidatorBid}, + PublicKey, URef, U512, +}; + +use crate::system::auction::BidAddr; +use alloc::{boxed::Box, vec::Vec}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// BidKindTag variants. +#[allow(clippy::large_enum_variant)] +#[repr(u8)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +pub enum BidKindTag { + /// Unified bid. + Unified = 0, + /// Validator bid. + Validator = 1, + /// Delegator bid. + Delegator = 2, +} + +/// Auction bid variants. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BidKind { + /// A unified record indexed on validator data, with an embedded collection of all delegator + /// bids assigned to that validator. The Unified variant is for legacy retrograde support, new + /// instances will not be created going forward. + Unified(Box), + /// A bid record containing only validator data. + Validator(Box), + /// A bid record containing only delegator data. + Delegator(Box), +} + +impl BidKind { + /// Returns validator public key. + pub fn validator_public_key(&self) -> PublicKey { + match self { + BidKind::Unified(bid) => bid.validator_public_key().clone(), + BidKind::Validator(validator_bid) => validator_bid.validator_public_key().clone(), + BidKind::Delegator(delegator_bid) => delegator_bid.validator_public_key().clone(), + } + } + + /// Returns delegator public key, if any. + pub fn maybe_delegator_public_key(&self) -> Option { + match self { + BidKind::Unified(_) | BidKind::Validator(_) => None, + BidKind::Delegator(delegator_bid) => Some(delegator_bid.delegator_public_key().clone()), + } + } + + /// Returns BidAddr. + pub fn bid_addr(&self) -> BidAddr { + match self { + BidKind::Unified(bid) => BidAddr::Unified(bid.validator_public_key().to_account_hash()), + BidKind::Validator(validator_bid) => { + BidAddr::Validator(validator_bid.validator_public_key().to_account_hash()) + } + BidKind::Delegator(delegator_bid) => { + let validator = delegator_bid.validator_public_key().to_account_hash(); + let delegator = delegator_bid.delegator_public_key().to_account_hash(); + BidAddr::Delegator { + validator, + delegator, + } + } + } + } + + /// Is this instance a unified bid?. + pub fn is_unified(&self) -> bool { + match self { + BidKind::Unified(_) => true, + BidKind::Validator(_) | BidKind::Delegator(_) => false, + } + } + + /// Is this instance a validator bid?. + pub fn is_validator(&self) -> bool { + match self { + BidKind::Validator(_) => true, + BidKind::Unified(_) | BidKind::Delegator(_) => false, + } + } + + /// Is this instance a delegator bid?. + pub fn is_delegator(&self) -> bool { + match self { + BidKind::Delegator(_) => true, + BidKind::Unified(_) | BidKind::Validator(_) => false, + } + } + + /// The staked amount. + pub fn staked_amount(&self) -> U512 { + match self { + BidKind::Unified(bid) => *bid.staked_amount(), + BidKind::Validator(validator_bid) => validator_bid.staked_amount(), + BidKind::Delegator(delegator) => delegator.staked_amount(), + } + } + + /// The bonding purse. + pub fn bonding_purse(&self) -> URef { + match self { + BidKind::Unified(bid) => *bid.bonding_purse(), + BidKind::Validator(validator_bid) => *validator_bid.bonding_purse(), + BidKind::Delegator(delegator) => *delegator.bonding_purse(), + } + } + + /// The delegator public key, if relevant. + pub fn delegator_public_key(&self) -> Option { + match self { + BidKind::Unified(_) | BidKind::Validator(_) => None, + BidKind::Delegator(delegator) => Some(delegator.delegator_public_key().clone()), + } + } + + /// Is this bid inactive? + pub fn inactive(&self) -> bool { + match self { + BidKind::Unified(bid) => bid.inactive(), + BidKind::Validator(validator_bid) => validator_bid.inactive(), + BidKind::Delegator(delegator) => delegator.staked_amount().is_zero(), + } + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + match self { + BidKind::Unified(bid) => bid.is_locked(timestamp_millis), + BidKind::Validator(validator_bid) => validator_bid.is_locked(timestamp_millis), + BidKind::Delegator(delegator) => delegator.is_locked(timestamp_millis), + } + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match self { + BidKind::Unified(bid) => bid + .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis), + BidKind::Validator(validator_bid) => validator_bid + .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis), + BidKind::Delegator(delegator) => delegator + .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis), + } + } + + /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis + /// validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + match self { + BidKind::Unified(bid) => bid.vesting_schedule(), + BidKind::Validator(validator_bid) => validator_bid.vesting_schedule(), + BidKind::Delegator(delegator) => delegator.vesting_schedule(), + } + } + + /// BidKindTag. + pub fn tag(&self) -> BidKindTag { + match self { + BidKind::Unified(_) => BidKindTag::Unified, + BidKind::Validator(_) => BidKindTag::Validator, + BidKind::Delegator(_) => BidKindTag::Delegator, + } + } +} + +impl ToBytes for BidKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + let (tag, mut serialized_data) = match self { + BidKind::Unified(bid) => (BidKindTag::Unified, bid.to_bytes()?), + BidKind::Validator(validator_bid) => (BidKindTag::Validator, validator_bid.to_bytes()?), + BidKind::Delegator(delegator_bid) => (BidKindTag::Delegator, delegator_bid.to_bytes()?), + }; + result.push(tag as u8); + result.append(&mut serialized_data); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + BidKind::Unified(bid) => bid.serialized_length(), + BidKind::Validator(validator_bid) => validator_bid.serialized_length(), + BidKind::Delegator(delegator_bid) => delegator_bid.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag() as u8); + match self { + //StoredValue::CLValue(cl_value) => cl_value.write_bytes(writer)?, + BidKind::Unified(bid) => bid.write_bytes(writer)?, + BidKind::Validator(validator_bid) => validator_bid.write_bytes(writer)?, + BidKind::Delegator(delegator_bid) => delegator_bid.write_bytes(writer)?, + }; + Ok(()) + } +} + +impl FromBytes for BidKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == BidKindTag::Unified as u8 => Bid::from_bytes(remainder) + .map(|(bid, remainder)| (BidKind::Unified(Box::new(bid)), remainder)), + tag if tag == BidKindTag::Validator as u8 => { + ValidatorBid::from_bytes(remainder).map(|(validator_bid, remainder)| { + (BidKind::Validator(Box::new(validator_bid)), remainder) + }) + } + tag if tag == BidKindTag::Delegator as u8 => { + Delegator::from_bytes(remainder).map(|(delegator_bid, remainder)| { + (BidKind::Delegator(Box::new(delegator_bid)), remainder) + }) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::{BidKind, *}; + use crate::{bytesrepr, system::auction::DelegationRate, AccessRights, SecretKey}; + + #[test] + fn serialization_roundtrip() { + let validator_public_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + let bid = Bid::unlocked( + validator_public_key.clone(), + bonding_purse, + U512::one(), + DelegationRate::max_value(), + ); + let unified_bid = BidKind::Unified(Box::new(bid.clone())); + let validator_bid = ValidatorBid::from(bid.clone()); + + let delegator_public_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([1u8; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator = Delegator::unlocked( + delegator_public_key, + U512::one(), + bonding_purse, + validator_public_key, + ); + let delegator_bid = BidKind::Delegator(Box::new(delegator)); + + bytesrepr::test_serialization_roundtrip(&bid); + bytesrepr::test_serialization_roundtrip(&unified_bid); + bytesrepr::test_serialization_roundtrip(&validator_bid); + bytesrepr::test_serialization_roundtrip(&delegator_bid); + } +} + +#[cfg(test)] +mod prop_test_bid_kind_unified { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_unified(bid_kind in gens::unified_bid_arb(0..3)) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} + +#[cfg(test)] +mod prop_test_bid_kind_validator { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_validator(bid_kind in gens::validator_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} + +#[cfg(test)] +mod prop_test_bid_kind_delegator { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_delegator(bid_kind in gens::delegator_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/constants.rs b/casper_types_ver_2_0/src/system/auction/constants.rs new file mode 100644 index 00000000..f3038f8e --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/constants.rs @@ -0,0 +1,98 @@ +use crate::EraId; + +use super::DelegationRate; + +/// Initial value of era id we start at genesis. +pub const INITIAL_ERA_ID: EraId = EraId::new(0); + +/// Initial value of era end timestamp. +pub const INITIAL_ERA_END_TIMESTAMP_MILLIS: u64 = 0; + +/// Delegation rate is a fraction between 0-1. Validator sets the delegation rate +/// in integer terms, which is then divided by the denominator to obtain the fraction. +pub const DELEGATION_RATE_DENOMINATOR: DelegationRate = 100; + +/// We use one trillion as a block reward unit because it's large enough to allow precise +/// fractions, and small enough for many block rewards to fit into a u64. +pub const BLOCK_REWARD: u64 = 1_000_000_000_000; + +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; +/// Named constant for `delegation_rate`. +pub const ARG_DELEGATION_RATE: &str = "delegation_rate"; +/// Named constant for `account_hash`. +pub const ARG_PUBLIC_KEY: &str = "public_key"; +/// Named constant for `validator`. +pub const ARG_VALIDATOR: &str = "validator"; +/// Named constant for `delegator`. +pub const ARG_DELEGATOR: &str = "delegator"; +/// Named constant for `validator_purse`. +pub const ARG_VALIDATOR_PURSE: &str = "validator_purse"; +/// Named constant for `validator_keys`. +pub const ARG_VALIDATOR_KEYS: &str = "validator_keys"; +/// Named constant for `validator_public_keys`. +pub const ARG_VALIDATOR_PUBLIC_KEYS: &str = "validator_public_keys"; +/// Named constant for `new_validator`. +pub const ARG_NEW_VALIDATOR: &str = "new_validator"; +/// Named constant for `era_id`. +pub const ARG_ERA_ID: &str = "era_id"; +/// Named constant for `validator_public_key`. +pub const ARG_VALIDATOR_PUBLIC_KEY: &str = "validator_public_key"; +/// Named constant for `delegator_public_key`. +pub const ARG_DELEGATOR_PUBLIC_KEY: &str = "delegator_public_key"; +/// Named constant for `validator_slots` argument. +pub const ARG_VALIDATOR_SLOTS: &str = VALIDATOR_SLOTS_KEY; +/// Named constant for `mint_contract_package_hash` +pub const ARG_MINT_CONTRACT_PACKAGE_HASH: &str = "mint_contract_package_hash"; +/// Named constant for `genesis_validators` +pub const ARG_GENESIS_VALIDATORS: &str = "genesis_validators"; +/// Named constant of `auction_delay` +pub const ARG_AUCTION_DELAY: &str = "auction_delay"; +/// Named constant for `locked_funds_period` +pub const ARG_LOCKED_FUNDS_PERIOD: &str = "locked_funds_period"; +/// Named constant for `unbonding_delay` +pub const ARG_UNBONDING_DELAY: &str = "unbonding_delay"; +/// Named constant for `era_end_timestamp_millis`; +pub const ARG_ERA_END_TIMESTAMP_MILLIS: &str = "era_end_timestamp_millis"; +/// Named constant for `evicted_validators`; +pub const ARG_EVICTED_VALIDATORS: &str = "evicted_validators"; +/// Named constant for `rewards_map`; +pub const ARG_REWARDS_MAP: &str = "rewards_map"; + +/// Named constant for method `get_era_validators`. +pub const METHOD_GET_ERA_VALIDATORS: &str = "get_era_validators"; +/// Named constant for method `add_bid`. +pub const METHOD_ADD_BID: &str = "add_bid"; +/// Named constant for method `withdraw_bid`. +pub const METHOD_WITHDRAW_BID: &str = "withdraw_bid"; +/// Named constant for method `delegate`. +pub const METHOD_DELEGATE: &str = "delegate"; +/// Named constant for method `undelegate`. +pub const METHOD_UNDELEGATE: &str = "undelegate"; +/// Named constant for method `redelegate`. +pub const METHOD_REDELEGATE: &str = "redelegate"; +/// Named constant for method `run_auction`. +pub const METHOD_RUN_AUCTION: &str = "run_auction"; +/// Named constant for method `slash`. +pub const METHOD_SLASH: &str = "slash"; +/// Named constant for method `distribute`. +pub const METHOD_DISTRIBUTE: &str = "distribute"; +/// Named constant for method `read_era_id`. +pub const METHOD_READ_ERA_ID: &str = "read_era_id"; +/// Named constant for method `activate_bid`. +pub const METHOD_ACTIVATE_BID: &str = "activate_bid"; + +/// Storage for `EraId`. +pub const ERA_ID_KEY: &str = "era_id"; +/// Storage for era-end timestamp. +pub const ERA_END_TIMESTAMP_MILLIS_KEY: &str = "era_end_timestamp_millis"; +/// Storage for `SeigniorageRecipientsSnapshot`. +pub const SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY: &str = "seigniorage_recipients_snapshot"; +/// Total validator slots allowed. +pub const VALIDATOR_SLOTS_KEY: &str = "validator_slots"; +/// Amount of auction delay. +pub const AUCTION_DELAY_KEY: &str = "auction_delay"; +/// Default lock period for new bid entries represented in eras. +pub const LOCKED_FUNDS_PERIOD_KEY: &str = "locked_funds_period"; +/// Unbonding delay expressed in eras. +pub const UNBONDING_DELAY_KEY: &str = "unbonding_delay"; diff --git a/casper_types_ver_2_0/src/system/auction/delegator.rs b/casper_types_ver_2_0/src/system/auction/delegator.rs new file mode 100644 index 00000000..ff672353 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/delegator.rs @@ -0,0 +1,309 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{bid::VestingSchedule, Error, VESTING_SCHEDULE_LENGTH_MILLIS}, + CLType, CLTyped, PublicKey, URef, U512, +}; + +/// Represents a party delegating their stake to a validator (or "delegatee") +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Delegator { + delegator_public_key: PublicKey, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + vesting_schedule: Option, +} + +impl Delegator { + /// Creates a new [`Delegator`] + pub fn unlocked( + delegator_public_key: PublicKey, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + ) -> Self { + let vesting_schedule = None; + Delegator { + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + } + } + + /// Creates new instance of a [`Delegator`] with locked funds. + pub fn locked( + delegator_public_key: PublicKey, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + release_timestamp_millis: u64, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + Delegator { + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + } + } + + /// Returns public key of the delegator. + pub fn delegator_public_key(&self) -> &PublicKey { + &self.delegator_public_key + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + + /// Returns the staked amount + pub fn staked_amount(&self) -> U512 { + self.staked_amount + } + + /// Returns the mutable staked amount + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Returns the bonding purse + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns delegatee + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::InvalidAmount)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::DelegatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::DelegatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Returns a reference to the vesting schedule of the provided + /// delegator bid. `None` if a non-genesis validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided + /// delegator bid. `None` if a non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty( + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + bonding_purse: URef, + ) -> Self { + let vesting_schedule = None; + let staked_amount = 0.into(); + Self { + validator_public_key, + delegator_public_key, + bonding_purse, + staked_amount, + vesting_schedule, + } + } +} + +impl CLTyped for Delegator { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for Delegator { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.delegator_public_key.to_bytes()?); + buffer.extend(self.staked_amount.to_bytes()?); + buffer.extend(self.bonding_purse.to_bytes()?); + buffer.extend(self.validator_public_key.to_bytes()?); + buffer.extend(self.vesting_schedule.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.delegator_public_key.serialized_length() + + self.staked_amount.serialized_length() + + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.vesting_schedule.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.delegator_public_key.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Delegator { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (delegator_public_key, bytes) = PublicKey::from_bytes(bytes)?; + let (staked_amount, bytes) = U512::from_bytes(bytes)?; + let (bonding_purse, bytes) = URef::from_bytes(bytes)?; + let (validator_public_key, bytes) = PublicKey::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Delegator { + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + }, + bytes, + )) + } +} + +impl Display for Delegator { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "delegator {{ {} {} motes, bonding purse {}, validator {} }}", + self.delegator_public_key, + self.staked_amount, + self.bonding_purse, + self.validator_public_key + ) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, system::auction::Delegator, AccessRights, PublicKey, SecretKey, URef, U512, + }; + + #[test] + fn serialization_roundtrip() { + let staked_amount = U512::one(); + let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + let delegator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + + let validator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let unlocked_delegator = Delegator::unlocked( + delegator_public_key.clone(), + staked_amount, + bonding_purse, + validator_public_key.clone(), + ); + bytesrepr::test_serialization_roundtrip(&unlocked_delegator); + + let release_timestamp_millis = 42; + let locked_delegator = Delegator::locked( + delegator_public_key, + staked_amount, + bonding_purse, + validator_public_key, + release_timestamp_millis, + ); + bytesrepr::test_serialization_roundtrip(&locked_delegator); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::delegator_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/entry_points.rs b/casper_types_ver_2_0/src/system/auction/entry_points.rs new file mode 100644 index 00000000..252550e5 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/entry_points.rs @@ -0,0 +1,142 @@ +use crate::{ + system::auction::{ + DelegationRate, ValidatorWeights, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, + ARG_ERA_END_TIMESTAMP_MILLIS, ARG_NEW_VALIDATOR, ARG_PUBLIC_KEY, ARG_VALIDATOR, + ARG_VALIDATOR_PUBLIC_KEY, METHOD_ACTIVATE_BID, METHOD_ADD_BID, METHOD_DELEGATE, + METHOD_DISTRIBUTE, METHOD_GET_ERA_VALIDATORS, METHOD_READ_ERA_ID, METHOD_REDELEGATE, + METHOD_RUN_AUCTION, METHOD_SLASH, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID, + }, + CLType, CLTyped, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, + PublicKey, U512, +}; + +use super::ARG_REWARDS_MAP; + +/// Creates auction contract entry points. +pub fn auction_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntryPoint::new( + METHOD_GET_ERA_VALIDATORS, + vec![], + Option::::cl_type(), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_ADD_BID, + vec![ + Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(ARG_DELEGATION_RATE, DelegationRate::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_WITHDRAW_BID, + vec![ + Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_DELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_UNDELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_REDELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + Parameter::new(ARG_NEW_VALIDATOR, PublicKey::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_RUN_AUCTION, + vec![Parameter::new(ARG_ERA_END_TIMESTAMP_MILLIS, u64::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_SLASH, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_DISTRIBUTE, + vec![Parameter::new( + ARG_REWARDS_MAP, + CLType::map(CLType::PublicKey, CLType::U512), + )], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_READ_ERA_ID, + vec![], + CLType::U64, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_ACTIVATE_BID, + vec![Parameter::new(ARG_VALIDATOR_PUBLIC_KEY, CLType::PublicKey)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/casper_types_ver_2_0/src/system/auction/era_info.rs b/casper_types_ver_2_0/src/system/auction/era_info.rs new file mode 100644 index 00000000..d9cb9e4b --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/era_info.rs @@ -0,0 +1,311 @@ +use alloc::{boxed::Box, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, PublicKey, U512, +}; + +const SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG: u8 = 0; +const SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG: u8 = 1; + +/// Information about a seigniorage allocation +#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum SeigniorageAllocation { + /// Info about a seigniorage allocation for a validator + Validator { + /// Validator's public key + validator_public_key: PublicKey, + /// Allocated amount + amount: U512, + }, + /// Info about a seigniorage allocation for a delegator + Delegator { + /// Delegator's public key + delegator_public_key: PublicKey, + /// Validator's public key + validator_public_key: PublicKey, + /// Allocated amount + amount: U512, + }, +} + +impl SeigniorageAllocation { + /// Constructs a [`SeigniorageAllocation::Validator`] + pub const fn validator(validator_public_key: PublicKey, amount: U512) -> Self { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } + } + + /// Constructs a [`SeigniorageAllocation::Delegator`] + pub const fn delegator( + delegator_public_key: PublicKey, + validator_public_key: PublicKey, + amount: U512, + ) -> Self { + SeigniorageAllocation::Delegator { + delegator_public_key, + validator_public_key, + amount, + } + } + + /// Returns the amount for a given seigniorage allocation + pub fn amount(&self) -> &U512 { + match self { + SeigniorageAllocation::Validator { amount, .. } => amount, + SeigniorageAllocation::Delegator { amount, .. } => amount, + } + } + + fn tag(&self) -> u8 { + match self { + SeigniorageAllocation::Validator { .. } => SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG, + SeigniorageAllocation::Delegator { .. } => SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG, + } + } +} + +impl ToBytes for SeigniorageAllocation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.tag().serialized_length() + + match self { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } => validator_public_key.serialized_length() + amount.serialized_length(), + SeigniorageAllocation::Delegator { + delegator_public_key, + validator_public_key, + amount, + } => { + delegator_public_key.serialized_length() + + validator_public_key.serialized_length() + + amount.serialized_length() + } + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag()); + match self { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } => { + validator_public_key.write_bytes(writer)?; + amount.write_bytes(writer)?; + } + SeigniorageAllocation::Delegator { + delegator_public_key, + validator_public_key, + amount, + } => { + delegator_public_key.write_bytes(writer)?; + validator_public_key.write_bytes(writer)?; + amount.write_bytes(writer)?; + } + } + Ok(()) + } +} + +impl FromBytes for SeigniorageAllocation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = ::from_bytes(bytes)?; + match tag { + SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG => { + let (validator_public_key, rem) = PublicKey::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + Ok(( + SeigniorageAllocation::validator(validator_public_key, amount), + rem, + )) + } + SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG => { + let (delegator_public_key, rem) = PublicKey::from_bytes(rem)?; + let (validator_public_key, rem) = PublicKey::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + Ok(( + SeigniorageAllocation::delegator( + delegator_public_key, + validator_public_key, + amount, + ), + rem, + )) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl CLTyped for SeigniorageAllocation { + fn cl_type() -> CLType { + CLType::Any + } +} + +/// Auction metadata. Intended to be recorded at each era. +#[derive(Debug, Default, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct EraInfo { + seigniorage_allocations: Vec, +} + +impl EraInfo { + /// Constructs a [`EraInfo`]. + pub fn new() -> Self { + let seigniorage_allocations = Vec::new(); + EraInfo { + seigniorage_allocations, + } + } + + /// Returns a reference to the seigniorage allocations collection + pub fn seigniorage_allocations(&self) -> &Vec { + &self.seigniorage_allocations + } + + /// Returns a mutable reference to the seigniorage allocations collection + pub fn seigniorage_allocations_mut(&mut self) -> &mut Vec { + &mut self.seigniorage_allocations + } + + /// Returns all seigniorage allocations that match the provided public key + /// using the following criteria: + /// * If the match candidate is a validator allocation, the provided public key is matched + /// against the validator public key. + /// * If the match candidate is a delegator allocation, the provided public key is matched + /// against the delegator public key. + pub fn select(&self, public_key: PublicKey) -> impl Iterator { + self.seigniorage_allocations + .iter() + .filter(move |allocation| match allocation { + SeigniorageAllocation::Validator { + validator_public_key, + .. + } => public_key == *validator_public_key, + SeigniorageAllocation::Delegator { + delegator_public_key, + .. + } => public_key == *delegator_public_key, + }) + } +} + +impl ToBytes for EraInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.seigniorage_allocations().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.seigniorage_allocations.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.seigniorage_allocations().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EraInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (seigniorage_allocations, rem) = Vec::::from_bytes(bytes)?; + Ok(( + EraInfo { + seigniorage_allocations, + }, + rem, + )) + } +} + +impl CLTyped for EraInfo { + fn cl_type() -> CLType { + CLType::List(Box::new(SeigniorageAllocation::cl_type())) + } +} + +/// Generators for [`SeigniorageAllocation`] and [`EraInfo`] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::{ + collection::{self, SizeRange}, + prelude::Strategy, + prop_oneof, + }; + + use crate::{ + crypto::gens::public_key_arb, + gens::u512_arb, + system::auction::{EraInfo, SeigniorageAllocation}, + }; + + fn seigniorage_allocation_validator_arb() -> impl Strategy { + (public_key_arb(), u512_arb()).prop_map(|(validator_public_key, amount)| { + SeigniorageAllocation::validator(validator_public_key, amount) + }) + } + + fn seigniorage_allocation_delegator_arb() -> impl Strategy { + (public_key_arb(), public_key_arb(), u512_arb()).prop_map( + |(delegator_public_key, validator_public_key, amount)| { + SeigniorageAllocation::delegator(delegator_public_key, validator_public_key, amount) + }, + ) + } + + /// Creates an arbitrary [`SeignorageAllocation`](crate::system::auction::SeigniorageAllocation) + pub fn seigniorage_allocation_arb() -> impl Strategy { + prop_oneof![ + seigniorage_allocation_validator_arb(), + seigniorage_allocation_delegator_arb() + ] + } + + /// Creates an arbitrary [`EraInfo`] + pub fn era_info_arb(size: impl Into) -> impl Strategy { + collection::vec(seigniorage_allocation_arb(), size).prop_map(|allocations| { + let mut era_info = EraInfo::new(); + *era_info.seigniorage_allocations_mut() = allocations; + era_info + }) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn test_serialization_roundtrip(era_info in gens::era_info_arb(0..32)) { + bytesrepr::test_serialization_roundtrip(&era_info) + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/error.rs b/casper_types_ver_2_0/src/system/auction/error.rs new file mode 100644 index 00000000..0ddbb2f8 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/error.rs @@ -0,0 +1,545 @@ +//! Home of the Auction contract's [`enum@Error`] type. +use alloc::vec::Vec; +use core::{ + convert::{TryFrom, TryInto}, + fmt::{self, Display, Formatter}, + result, +}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// Errors which can occur while executing the Auction contract. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(test, derive(strum::EnumIter))] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Unable to find named key in the contract's named keys. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(0, Error::MissingKey as u8); + /// ``` + MissingKey = 0, + /// Given named key contains invalid variant. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(1, Error::InvalidKeyVariant as u8); + /// ``` + InvalidKeyVariant = 1, + /// Value under an uref does not exist. This means the installer contract didn't work properly. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(2, Error::MissingValue as u8); + /// ``` + MissingValue = 2, + /// ABI serialization issue while reading or writing. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(3, Error::Serialization as u8); + /// ``` + Serialization = 3, + /// Triggered when contract was unable to transfer desired amount of tokens into a bid purse. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(4, Error::TransferToBidPurse as u8); + /// ``` + TransferToBidPurse = 4, + /// User passed invalid amount of tokens which might result in wrong values after calculation. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(5, Error::InvalidAmount as u8); + /// ``` + InvalidAmount = 5, + /// Unable to find a bid by account hash in `active_bids` map. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(6, Error::BidNotFound as u8); + /// ``` + BidNotFound = 6, + /// Validator's account hash was not found in the map. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(7, Error::ValidatorNotFound as u8); + /// ``` + ValidatorNotFound = 7, + /// Delegator's account hash was not found in the map. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(8, Error::DelegatorNotFound as u8); + /// ``` + DelegatorNotFound = 8, + /// Storage problem. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(9, Error::Storage as u8); + /// ``` + Storage = 9, + /// Raised when system is unable to bond. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(10, Error::Bonding as u8); + /// ``` + Bonding = 10, + /// Raised when system is unable to unbond. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(11, Error::Unbonding as u8); + /// ``` + Unbonding = 11, + /// Raised when Mint contract is unable to release founder stake. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(12, Error::ReleaseFounderStake as u8); + /// ``` + ReleaseFounderStake = 12, + /// Raised when the system is unable to determine purse balance. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(13, Error::GetBalance as u8); + /// ``` + GetBalance = 13, + /// Raised when an entry point is called from invalid account context. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(14, Error::InvalidContext as u8); + /// ``` + InvalidContext = 14, + /// Raised whenever a validator's funds are still locked in but an attempt to withdraw was + /// made. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(15, Error::ValidatorFundsLocked as u8); + /// ``` + ValidatorFundsLocked = 15, + /// Raised when caller is not the system account. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(16, Error::InvalidCaller as u8); + /// ``` + InvalidCaller = 16, + /// Raised when function is supplied a public key that does match the caller's or does not have + /// an associated account. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(17, Error::InvalidPublicKey as u8); + /// ``` + InvalidPublicKey = 17, + /// Validator is not not bonded. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(18, Error::BondNotFound as u8); + /// ``` + BondNotFound = 18, + /// Unable to create purse. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(19, Error::CreatePurseFailed as u8); + /// ``` + CreatePurseFailed = 19, + /// Attempted to unbond an amount which was too large. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(20, Error::UnbondTooLarge as u8); + /// ``` + UnbondTooLarge = 20, + /// Attempted to bond with a stake which was too small. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(21, Error::BondTooSmall as u8); + /// ``` + BondTooSmall = 21, + /// Raised when rewards are to be distributed to delegators, but the validator has no + /// delegations. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(22, Error::MissingDelegations as u8); + /// ``` + MissingDelegations = 22, + /// The validators returned by the consensus component should match + /// current era validators when distributing rewards. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(23, Error::MismatchedEraValidators as u8); + /// ``` + MismatchedEraValidators = 23, + /// Failed to mint reward tokens. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(24, Error::MintReward as u8); + /// ``` + MintReward = 24, + /// Invalid number of validator slots. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(25, Error::InvalidValidatorSlotsValue as u8); + /// ``` + InvalidValidatorSlotsValue = 25, + /// Failed to reduce total supply. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(26, Error::MintReduceTotalSupply as u8); + /// ``` + MintReduceTotalSupply = 26, + /// Triggered when contract was unable to transfer desired amount of tokens into a delegators + /// purse. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(27, Error::TransferToDelegatorPurse as u8); + /// ``` + TransferToDelegatorPurse = 27, + /// Triggered when contract was unable to perform a transfer to distribute validators reward. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(28, Error::ValidatorRewardTransfer as u8); + /// ``` + ValidatorRewardTransfer = 28, + /// Triggered when contract was unable to perform a transfer to distribute delegators rewards. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(29, Error::DelegatorRewardTransfer as u8); + /// ``` + DelegatorRewardTransfer = 29, + /// Failed to transfer desired amount while withdrawing delegators reward. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(30, Error::WithdrawDelegatorReward as u8); + /// ``` + WithdrawDelegatorReward = 30, + /// Failed to transfer desired amount while withdrawing validators reward. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(31, Error::WithdrawValidatorReward as u8); + /// ``` + WithdrawValidatorReward = 31, + /// Failed to transfer desired amount into unbonding purse. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(32, Error::TransferToUnbondingPurse as u8); + /// ``` + TransferToUnbondingPurse = 32, + /// Failed to record era info. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(33, Error::RecordEraInfo as u8); + /// ``` + RecordEraInfo = 33, + /// Failed to create a [`crate::CLValue`]. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(34, Error::CLValue as u8); + /// ``` + CLValue = 34, + /// Missing seigniorage recipients for given era. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(35, Error::MissingSeigniorageRecipients as u8); + /// ``` + MissingSeigniorageRecipients = 35, + /// Failed to transfer funds. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(36, Error::Transfer as u8); + /// ``` + Transfer = 36, + /// Delegation rate exceeds rate. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(37, Error::DelegationRateTooLarge as u8); + /// ``` + DelegationRateTooLarge = 37, + /// Raised whenever a delegator's funds are still locked in but an attempt to undelegate was + /// made. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(38, Error::DelegatorFundsLocked as u8); + /// ``` + DelegatorFundsLocked = 38, + /// An arithmetic overflow has occurred. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(39, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 39, + /// Execution exceeded the gas limit. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(40, Error::GasLimit as u8); + /// ``` + GasLimit = 40, + /// Too many frames on the runtime stack. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(41, Error::RuntimeStackOverflow as u8); + /// ``` + RuntimeStackOverflow = 41, + /// An error that is raised when there is an error in the mint contract that cannot + /// be mapped to a specific auction error. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(42, Error::MintError as u8); + /// ``` + MintError = 42, + /// The validator has exceeded the maximum amount of delegators allowed. + /// NOTE: This variant is no longer in use. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(43, Error::ExceededDelegatorSizeLimit as u8); + /// ``` + ExceededDelegatorSizeLimit = 43, + /// The global delegator capacity for the auction has been reached. + /// NOTE: This variant is no longer in use. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(44, Error::GlobalDelegatorCapacityReached as u8); + /// ``` + GlobalDelegatorCapacityReached = 44, + /// The delegated amount is below the minimum allowed. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(45, Error::DelegationAmountTooSmall as u8); + /// ``` + DelegationAmountTooSmall = 45, + /// Runtime stack error. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(46, Error::RuntimeStack as u8); + /// ``` + RuntimeStack = 46, + /// An error that is raised on private chain only when a `disable_auction_bids` flag is set to + /// `true`. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(47, Error::AuctionBidsDisabled as u8); + /// ``` + AuctionBidsDisabled = 47, + /// Error getting accumulation purse. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(48, Error::GetAccumulationPurse as u8); + /// ``` + GetAccumulationPurse = 48, + /// Failed to transfer desired amount into administrators account. + /// ``` + /// # use casper_types_ver_2_0::system::auction::Error; + /// assert_eq!(49, Error::TransferToAdministrator as u8); + /// ``` + TransferToAdministrator = 49, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::MissingKey => formatter.write_str("Missing key"), + Error::InvalidKeyVariant => formatter.write_str("Invalid key variant"), + Error::MissingValue => formatter.write_str("Missing value"), + Error::Serialization => formatter.write_str("Serialization error"), + Error::TransferToBidPurse => formatter.write_str("Transfer to bid purse error"), + Error::InvalidAmount => formatter.write_str("Invalid amount"), + Error::BidNotFound => formatter.write_str("Bid not found"), + Error::ValidatorNotFound => formatter.write_str("Validator not found"), + Error::DelegatorNotFound => formatter.write_str("Delegator not found"), + Error::Storage => formatter.write_str("Storage error"), + Error::Bonding => formatter.write_str("Bonding error"), + Error::Unbonding => formatter.write_str("Unbonding error"), + Error::ReleaseFounderStake => formatter.write_str("Unable to release founder stake"), + Error::GetBalance => formatter.write_str("Unable to get purse balance"), + Error::InvalidContext => formatter.write_str("Invalid context"), + Error::ValidatorFundsLocked => formatter.write_str("Validator's funds are locked"), + Error::InvalidCaller => formatter.write_str("Function must be called by system account"), + Error::InvalidPublicKey => formatter.write_str("Supplied public key does not match caller's public key or has no associated account"), + Error::BondNotFound => formatter.write_str("Validator's bond not found"), + Error::CreatePurseFailed => formatter.write_str("Unable to create purse"), + Error::UnbondTooLarge => formatter.write_str("Unbond is too large"), + Error::BondTooSmall => formatter.write_str("Bond is too small"), + Error::MissingDelegations => formatter.write_str("Validators has not received any delegations"), + Error::MismatchedEraValidators => formatter.write_str("Mismatched era validator sets to distribute rewards"), + Error::MintReward => formatter.write_str("Failed to mint rewards"), + Error::InvalidValidatorSlotsValue => formatter.write_str("Invalid number of validator slots"), + Error::MintReduceTotalSupply => formatter.write_str("Failed to reduce total supply"), + Error::TransferToDelegatorPurse => formatter.write_str("Transfer to delegators purse error"), + Error::ValidatorRewardTransfer => formatter.write_str("Reward transfer to validator error"), + Error::DelegatorRewardTransfer => formatter.write_str("Rewards transfer to delegator error"), + Error::WithdrawDelegatorReward => formatter.write_str("Withdraw delegator reward error"), + Error::WithdrawValidatorReward => formatter.write_str("Withdraw validator reward error"), + Error::TransferToUnbondingPurse => formatter.write_str("Transfer to unbonding purse error"), + Error::RecordEraInfo => formatter.write_str("Record era info error"), + Error::CLValue => formatter.write_str("CLValue error"), + Error::MissingSeigniorageRecipients => formatter.write_str("Missing seigniorage recipients for given era"), + Error::Transfer => formatter.write_str("Transfer error"), + Error::DelegationRateTooLarge => formatter.write_str("Delegation rate too large"), + Error::DelegatorFundsLocked => formatter.write_str("Delegator's funds are locked"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow"), + Error::GasLimit => formatter.write_str("Execution exceeded the gas limit"), + Error::RuntimeStackOverflow => formatter.write_str("Runtime stack overflow"), + Error::MintError => formatter.write_str("An error in the mint contract execution"), + Error::ExceededDelegatorSizeLimit => formatter.write_str("The amount of delegators per validator has been exceeded"), + Error::GlobalDelegatorCapacityReached => formatter.write_str("The global delegator capacity has been reached"), + Error::DelegationAmountTooSmall => formatter.write_str("The delegated amount is below the minimum allowed"), + Error::RuntimeStack => formatter.write_str("Runtime stack error"), + Error::AuctionBidsDisabled => formatter.write_str("Auction bids are disabled"), + Error::GetAccumulationPurse => formatter.write_str("Get accumulation purse error"), + Error::TransferToAdministrator => formatter.write_str("Transfer to administrator error"), + } + } +} + +impl CLTyped for Error { + fn cl_type() -> CLType { + CLType::U8 + } +} + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +#[derive(Debug, PartialEq, Eq)] +pub struct TryFromU8ForError(()); + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for Error { + type Error = TryFromU8ForError; + + fn try_from(value: u8) -> result::Result { + match value { + d if d == Error::MissingKey as u8 => Ok(Error::MissingKey), + d if d == Error::InvalidKeyVariant as u8 => Ok(Error::InvalidKeyVariant), + d if d == Error::MissingValue as u8 => Ok(Error::MissingValue), + d if d == Error::Serialization as u8 => Ok(Error::Serialization), + d if d == Error::TransferToBidPurse as u8 => Ok(Error::TransferToBidPurse), + d if d == Error::InvalidAmount as u8 => Ok(Error::InvalidAmount), + d if d == Error::BidNotFound as u8 => Ok(Error::BidNotFound), + d if d == Error::ValidatorNotFound as u8 => Ok(Error::ValidatorNotFound), + d if d == Error::DelegatorNotFound as u8 => Ok(Error::DelegatorNotFound), + d if d == Error::Storage as u8 => Ok(Error::Storage), + d if d == Error::Bonding as u8 => Ok(Error::Bonding), + d if d == Error::Unbonding as u8 => Ok(Error::Unbonding), + d if d == Error::ReleaseFounderStake as u8 => Ok(Error::ReleaseFounderStake), + d if d == Error::GetBalance as u8 => Ok(Error::GetBalance), + d if d == Error::InvalidContext as u8 => Ok(Error::InvalidContext), + d if d == Error::ValidatorFundsLocked as u8 => Ok(Error::ValidatorFundsLocked), + d if d == Error::InvalidCaller as u8 => Ok(Error::InvalidCaller), + d if d == Error::InvalidPublicKey as u8 => Ok(Error::InvalidPublicKey), + d if d == Error::BondNotFound as u8 => Ok(Error::BondNotFound), + d if d == Error::CreatePurseFailed as u8 => Ok(Error::CreatePurseFailed), + d if d == Error::UnbondTooLarge as u8 => Ok(Error::UnbondTooLarge), + d if d == Error::BondTooSmall as u8 => Ok(Error::BondTooSmall), + d if d == Error::MissingDelegations as u8 => Ok(Error::MissingDelegations), + d if d == Error::MismatchedEraValidators as u8 => Ok(Error::MismatchedEraValidators), + d if d == Error::MintReward as u8 => Ok(Error::MintReward), + d if d == Error::InvalidValidatorSlotsValue as u8 => { + Ok(Error::InvalidValidatorSlotsValue) + } + d if d == Error::MintReduceTotalSupply as u8 => Ok(Error::MintReduceTotalSupply), + d if d == Error::TransferToDelegatorPurse as u8 => Ok(Error::TransferToDelegatorPurse), + d if d == Error::ValidatorRewardTransfer as u8 => Ok(Error::ValidatorRewardTransfer), + d if d == Error::DelegatorRewardTransfer as u8 => Ok(Error::DelegatorRewardTransfer), + d if d == Error::WithdrawDelegatorReward as u8 => Ok(Error::WithdrawDelegatorReward), + d if d == Error::WithdrawValidatorReward as u8 => Ok(Error::WithdrawValidatorReward), + d if d == Error::TransferToUnbondingPurse as u8 => Ok(Error::TransferToUnbondingPurse), + + d if d == Error::RecordEraInfo as u8 => Ok(Error::RecordEraInfo), + d if d == Error::CLValue as u8 => Ok(Error::CLValue), + d if d == Error::MissingSeigniorageRecipients as u8 => { + Ok(Error::MissingSeigniorageRecipients) + } + d if d == Error::Transfer as u8 => Ok(Error::Transfer), + d if d == Error::DelegationRateTooLarge as u8 => Ok(Error::DelegationRateTooLarge), + d if d == Error::DelegatorFundsLocked as u8 => Ok(Error::DelegatorFundsLocked), + d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow), + d if d == Error::GasLimit as u8 => Ok(Error::GasLimit), + d if d == Error::RuntimeStackOverflow as u8 => Ok(Error::RuntimeStackOverflow), + d if d == Error::MintError as u8 => Ok(Error::MintError), + d if d == Error::ExceededDelegatorSizeLimit as u8 => { + Ok(Error::ExceededDelegatorSizeLimit) + } + d if d == Error::GlobalDelegatorCapacityReached as u8 => { + Ok(Error::GlobalDelegatorCapacityReached) + } + d if d == Error::DelegationAmountTooSmall as u8 => Ok(Error::DelegationAmountTooSmall), + d if d == Error::RuntimeStack as u8 => Ok(Error::RuntimeStack), + d if d == Error::AuctionBidsDisabled as u8 => Ok(Error::AuctionBidsDisabled), + d if d == Error::GetAccumulationPurse as u8 => Ok(Error::GetAccumulationPurse), + d if d == Error::TransferToAdministrator as u8 => Ok(Error::TransferToAdministrator), + _ => Err(TryFromU8ForError(())), + } + } +} + +impl ToBytes for Error { + fn to_bytes(&self) -> result::Result, bytesrepr::Error> { + let value = *self as u8; + value.to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for Error { + fn from_bytes(bytes: &[u8]) -> result::Result<(Self, &[u8]), bytesrepr::Error> { + let (value, rem): (u8, _) = FromBytes::from_bytes(bytes)?; + let error: Error = value + .try_into() + // In case an Error variant is unable to be determined it would return an + // Error::Formatting as if its unable to be correctly deserialized. + .map_err(|_| bytesrepr::Error::Formatting)?; + Ok((error, rem)) + } +} + +impl From for Error { + fn from(_: bytesrepr::Error) -> Self { + Error::Serialization + } +} + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +pub enum PurseLookupError { + KeyNotFound, + KeyUnexpectedType, +} + +impl From for Error { + fn from(error: PurseLookupError) -> Self { + match error { + PurseLookupError::KeyNotFound => Error::MissingKey, + PurseLookupError::KeyUnexpectedType => Error::InvalidKeyVariant, + } + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use strum::IntoEnumIterator; + + use super::Error; + + #[test] + fn error_forward_trips() { + for expected_error_variant in Error::iter() { + assert_eq!( + Error::try_from(expected_error_variant as u8), + Ok(expected_error_variant) + ) + } + } + + #[test] + fn error_backward_trips() { + for u8 in 0..=u8::max_value() { + match Error::try_from(u8) { + Ok(error_variant) => { + assert_eq!(u8, error_variant as u8, "Error code mismatch") + } + Err(_) => continue, + }; + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/seigniorage_recipient.rs b/casper_types_ver_2_0/src/system/auction/seigniorage_recipient.rs new file mode 100644 index 00000000..a82450f6 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/seigniorage_recipient.rs @@ -0,0 +1,196 @@ +use alloc::{collections::BTreeMap, vec::Vec}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{Bid, DelegationRate}, + CLType, CLTyped, PublicKey, U512, +}; + +/// The seigniorage recipient details. +#[derive(Default, PartialEq, Eq, Clone, Debug)] +pub struct SeigniorageRecipient { + /// Validator stake (not including delegators) + stake: U512, + /// Delegation rate of a seigniorage recipient. + delegation_rate: DelegationRate, + /// Delegators and their bids. + delegator_stake: BTreeMap, +} + +impl SeigniorageRecipient { + /// Creates a new SeigniorageRecipient + pub fn new( + stake: U512, + delegation_rate: DelegationRate, + delegator_stake: BTreeMap, + ) -> Self { + Self { + stake, + delegation_rate, + delegator_stake, + } + } + + /// Returns stake of the provided recipient + pub fn stake(&self) -> &U512 { + &self.stake + } + + /// Returns delegation rate of the provided recipient + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns delegators of the provided recipient and their stake + pub fn delegator_stake(&self) -> &BTreeMap { + &self.delegator_stake + } + + /// Calculates total stake, including delegators' total stake + pub fn total_stake(&self) -> Option { + self.delegator_total_stake()?.checked_add(self.stake) + } + + /// Calculates total stake for all delegators + pub fn delegator_total_stake(&self) -> Option { + let mut total_stake: U512 = U512::zero(); + for stake in self.delegator_stake.values() { + total_stake = total_stake.checked_add(*stake)?; + } + Some(total_stake) + } +} + +impl CLTyped for SeigniorageRecipient { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for SeigniorageRecipient { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(self.stake.to_bytes()?); + result.extend(self.delegation_rate.to_bytes()?); + result.extend(self.delegator_stake.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.stake.serialized_length() + + self.delegation_rate.serialized_length() + + self.delegator_stake.serialized_length() + } +} + +impl FromBytes for SeigniorageRecipient { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (stake, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (delegator_stake, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + SeigniorageRecipient { + stake, + delegation_rate, + delegator_stake, + }, + bytes, + )) + } +} + +impl From<&Bid> for SeigniorageRecipient { + fn from(bid: &Bid) -> Self { + let delegator_stake = bid + .delegators() + .iter() + .map(|(public_key, delegator)| (public_key.clone(), delegator.staked_amount())) + .collect(); + Self { + stake: *bid.staked_amount(), + delegation_rate: *bid.delegation_rate(), + delegator_stake, + } + } +} + +#[cfg(test)] +mod tests { + use alloc::collections::BTreeMap; + use core::iter::FromIterator; + + use crate::{ + bytesrepr, + system::auction::{DelegationRate, SeigniorageRecipient}, + PublicKey, SecretKey, U512, + }; + + #[test] + fn serialization_roundtrip() { + let delegator_1_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_2_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_3_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let seigniorage_recipient = SeigniorageRecipient { + stake: U512::max_value(), + delegation_rate: DelegationRate::max_value(), + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_key, U512::max_value()), + (delegator_2_key, U512::max_value()), + (delegator_3_key, U512::zero()), + ]), + }; + bytesrepr::test_serialization_roundtrip(&seigniorage_recipient); + } + + #[test] + fn test_overflow_in_delegation_rate() { + let delegator_1_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_2_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_3_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let seigniorage_recipient = SeigniorageRecipient { + stake: U512::max_value(), + delegation_rate: DelegationRate::max_value(), + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_key, U512::max_value()), + (delegator_2_key, U512::max_value()), + (delegator_3_key, U512::zero()), + ]), + }; + assert_eq!(seigniorage_recipient.total_stake(), None) + } + + #[test] + fn test_overflow_in_delegation_total_stake() { + let delegator_1_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_2_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_3_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let seigniorage_recipient = SeigniorageRecipient { + stake: U512::max_value(), + delegation_rate: DelegationRate::max_value(), + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_key, U512::max_value()), + (delegator_2_key, U512::max_value()), + (delegator_3_key, U512::max_value()), + ]), + }; + assert_eq!(seigniorage_recipient.delegator_total_stake(), None) + } +} diff --git a/casper_types_ver_2_0/src/system/auction/unbonding_purse.rs b/casper_types_ver_2_0/src/system/auction/unbonding_purse.rs new file mode 100644 index 00000000..965376d2 --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/unbonding_purse.rs @@ -0,0 +1,238 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, EraId, PublicKey, URef, U512, +}; + +use super::WithdrawPurse; + +/// Unbonding purse. +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct UnbondingPurse { + /// Bonding Purse + bonding_purse: URef, + /// Validators public key. + validator_public_key: PublicKey, + /// Unbonders public key. + unbonder_public_key: PublicKey, + /// Era in which this unbonding request was created. + era_of_creation: EraId, + /// Unbonding Amount. + amount: U512, + /// The validator public key to re-delegate to. + new_validator: Option, +} + +impl UnbondingPurse { + /// Creates [`UnbondingPurse`] instance for an unbonding request. + pub const fn new( + bonding_purse: URef, + validator_public_key: PublicKey, + unbonder_public_key: PublicKey, + era_of_creation: EraId, + amount: U512, + new_validator: Option, + ) -> Self { + Self { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + new_validator, + } + } + + /// Checks if given request is made by a validator by checking if public key of unbonder is same + /// as a key owned by validator. + pub fn is_validator(&self) -> bool { + self.validator_public_key == self.unbonder_public_key + } + + /// Returns bonding purse used to make this unbonding request. + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns public key of validator. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns public key of unbonder. + /// + /// For withdrawal requests that originated from validator's public key through `withdraw_bid` + /// entrypoint this is equal to [`UnbondingPurse::validator_public_key`] and + /// [`UnbondingPurse::is_validator`] is `true`. + pub fn unbonder_public_key(&self) -> &PublicKey { + &self.unbonder_public_key + } + + /// Returns era which was used to create this unbonding request. + pub fn era_of_creation(&self) -> EraId { + self.era_of_creation + } + + /// Returns unbonding amount. + pub fn amount(&self) -> &U512 { + &self.amount + } + + /// Returns the public key for the new validator. + pub fn new_validator(&self) -> &Option { + &self.new_validator + } + + /// Sets amount to provided value. + pub fn with_amount(&mut self, amount: U512) { + self.amount = amount; + } +} + +impl ToBytes for UnbondingPurse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(&self.bonding_purse.to_bytes()?); + result.extend(&self.validator_public_key.to_bytes()?); + result.extend(&self.unbonder_public_key.to_bytes()?); + result.extend(&self.era_of_creation.to_bytes()?); + result.extend(&self.amount.to_bytes()?); + result.extend(&self.new_validator.to_bytes()?); + Ok(result) + } + fn serialized_length(&self) -> usize { + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.unbonder_public_key.serialized_length() + + self.era_of_creation.serialized_length() + + self.amount.serialized_length() + + self.new_validator.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.bonding_purse.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.unbonder_public_key.write_bytes(writer)?; + self.era_of_creation.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.new_validator.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for UnbondingPurse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?; + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + let (new_validator, remainder) = Option::::from_bytes(remainder)?; + + Ok(( + UnbondingPurse { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + new_validator, + }, + remainder, + )) + } +} + +impl CLTyped for UnbondingPurse { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl From for UnbondingPurse { + fn from(withdraw_purse: WithdrawPurse) -> Self { + UnbondingPurse::new( + withdraw_purse.bonding_purse, + withdraw_purse.validator_public_key, + withdraw_purse.unbonder_public_key, + withdraw_purse.era_of_creation, + withdraw_purse.amount, + None, + ) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, system::auction::UnbondingPurse, AccessRights, EraId, PublicKey, SecretKey, + URef, U512, + }; + + const BONDING_PURSE: URef = URef::new([14; 32], AccessRights::READ_ADD_WRITE); + const ERA_OF_WITHDRAWAL: EraId = EraId::MAX; + + fn validator_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn unbonder_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn amount() -> U512 { + U512::max_value() - 1 + } + + #[test] + fn serialization_roundtrip_for_unbonding_purse() { + let unbonding_purse = UnbondingPurse { + bonding_purse: BONDING_PURSE, + validator_public_key: validator_public_key(), + unbonder_public_key: unbonder_public_key(), + era_of_creation: ERA_OF_WITHDRAWAL, + amount: amount(), + new_validator: None, + }; + + bytesrepr::test_serialization_roundtrip(&unbonding_purse); + } + + #[test] + fn should_be_validator_condition_for_unbonding_purse() { + let validator_unbonding_purse = UnbondingPurse::new( + BONDING_PURSE, + validator_public_key(), + validator_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + None, + ); + assert!(validator_unbonding_purse.is_validator()); + } + + #[test] + fn should_be_delegator_condition_for_unbonding_purse() { + let delegator_unbonding_purse = UnbondingPurse::new( + BONDING_PURSE, + validator_public_key(), + unbonder_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + None, + ); + assert!(!delegator_unbonding_purse.is_validator()); + } +} diff --git a/casper_types_ver_2_0/src/system/auction/validator_bid.rs b/casper_types_ver_2_0/src/system/auction/validator_bid.rs new file mode 100644 index 00000000..a90b725b --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/validator_bid.rs @@ -0,0 +1,380 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{ + bid::VestingSchedule, DelegationRate, Error, VESTING_SCHEDULE_LENGTH_MILLIS, + }, + CLType, CLTyped, PublicKey, URef, U512, +}; + +use crate::system::auction::Bid; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// An entry in the validator map. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ValidatorBid { + /// Validator public key + validator_public_key: PublicKey, + /// The purse that was used for bonding. + bonding_purse: URef, + /// The amount of tokens staked by a validator (not including delegators). + staked_amount: U512, + /// Delegation rate + delegation_rate: DelegationRate, + /// Vesting schedule for a genesis validator. `None` if non-genesis validator. + vesting_schedule: Option, + /// `true` if validator has been "evicted" + inactive: bool, +} + +impl ValidatorBid { + /// Creates new instance of a bid with locked funds. + pub fn locked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + release_timestamp_millis: u64, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + } + } + + /// Creates new instance of a bid with unlocked funds. + pub fn unlocked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + ) -> Self { + let vesting_schedule = None; + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + } + } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty(validator_public_key: PublicKey, bonding_purse: URef) -> Self { + let vesting_schedule = None; + let inactive = true; + let staked_amount = 0.into(); + let delegation_rate = Default::default(); + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + } + } + + /// Gets the validator public key of the provided bid + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Gets the bonding purse of the provided bid + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount(&self) -> U512 { + self.staked_amount + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Gets the delegation rate of the provided bid + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis + /// validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided bid. `None` if a + /// non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } + + /// Returns `true` if validator is inactive + pub fn inactive(&self) -> bool { + self.inactive + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::UnbondTooLarge)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::ValidatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::ValidatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Updates the delegation rate of the provided bid + pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self { + self.delegation_rate = delegation_rate; + self + } + + /// Sets given bid's `inactive` field to `false` + pub fn activate(&mut self) -> bool { + self.inactive = false; + false + } + + /// Sets given bid's `inactive` field to `true` + pub fn deactivate(&mut self) -> bool { + self.inactive = true; + true + } +} + +impl CLTyped for ValidatorBid { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for ValidatorBid { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.validator_public_key.write_bytes(&mut result)?; + self.bonding_purse.write_bytes(&mut result)?; + self.staked_amount.write_bytes(&mut result)?; + self.delegation_rate.write_bytes(&mut result)?; + self.vesting_schedule.write_bytes(&mut result)?; + self.inactive.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.bonding_purse.serialized_length() + + self.staked_amount.serialized_length() + + self.delegation_rate.serialized_length() + + self.vesting_schedule.serialized_length() + + self.inactive.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + self.inactive.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ValidatorBid { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; + let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?; + let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + let (inactive, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + ValidatorBid { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + }, + bytes, + )) + } +} + +impl From for ValidatorBid { + fn from(bid: Bid) -> Self { + ValidatorBid { + validator_public_key: bid.validator_public_key().clone(), + bonding_purse: *bid.bonding_purse(), + staked_amount: *bid.staked_amount(), + delegation_rate: *bid.delegation_rate(), + vesting_schedule: bid.vesting_schedule().cloned(), + inactive: bid.inactive(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, + system::auction::{bid::VestingSchedule, DelegationRate, ValidatorBid}, + AccessRights, PublicKey, SecretKey, URef, U512, + }; + + #[test] + fn serialization_roundtrip_active() { + let founding_validator = ValidatorBid { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), + staked_amount: U512::one(), + delegation_rate: DelegationRate::MAX, + vesting_schedule: Some(VestingSchedule::default()), + inactive: false, + }; + bytesrepr::test_serialization_roundtrip(&founding_validator); + } + + #[test] + fn serialization_roundtrip_inactive() { + let founding_validator = ValidatorBid { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), + staked_amount: U512::one(), + delegation_rate: DelegationRate::max_value(), + vesting_schedule: Some(VestingSchedule::default()), + inactive: true, + }; + bytesrepr::test_serialization_roundtrip(&founding_validator); + } + + #[test] + fn should_immediately_initialize_unlock_amounts() { + const TIMESTAMP_MILLIS: u64 = 0; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let vesting_schedule_period_millis = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let bid = ValidatorBid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + ); + + assert!(!bid.is_locked_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::validator_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/casper_types_ver_2_0/src/system/auction/withdraw_purse.rs b/casper_types_ver_2_0/src/system/auction/withdraw_purse.rs new file mode 100644 index 00000000..9dc3806b --- /dev/null +++ b/casper_types_ver_2_0/src/system/auction/withdraw_purse.rs @@ -0,0 +1,192 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, EraId, PublicKey, URef, U512, +}; + +/// A withdraw purse, a legacy structure. +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct WithdrawPurse { + /// Bonding Purse + pub(crate) bonding_purse: URef, + /// Validators public key. + pub(crate) validator_public_key: PublicKey, + /// Unbonders public key. + pub(crate) unbonder_public_key: PublicKey, + /// Era in which this unbonding request was created. + pub(crate) era_of_creation: EraId, + /// Unbonding Amount. + pub(crate) amount: U512, +} + +impl WithdrawPurse { + /// Creates [`WithdrawPurse`] instance for an unbonding request. + pub const fn new( + bonding_purse: URef, + validator_public_key: PublicKey, + unbonder_public_key: PublicKey, + era_of_creation: EraId, + amount: U512, + ) -> Self { + Self { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + } + } + + /// Checks if given request is made by a validator by checking if public key of unbonder is same + /// as a key owned by validator. + pub fn is_validator(&self) -> bool { + self.validator_public_key == self.unbonder_public_key + } + + /// Returns bonding purse used to make this unbonding request. + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns public key of validator. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns public key of unbonder. + /// + /// For withdrawal requests that originated from validator's public key through `withdraw_bid` + /// entrypoint this is equal to [`WithdrawPurse::validator_public_key`] and + /// [`WithdrawPurse::is_validator`] is `true`. + pub fn unbonder_public_key(&self) -> &PublicKey { + &self.unbonder_public_key + } + + /// Returns era which was used to create this unbonding request. + pub fn era_of_creation(&self) -> EraId { + self.era_of_creation + } + + /// Returns unbonding amount. + pub fn amount(&self) -> &U512 { + &self.amount + } +} + +impl ToBytes for WithdrawPurse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(&self.bonding_purse.to_bytes()?); + result.extend(&self.validator_public_key.to_bytes()?); + result.extend(&self.unbonder_public_key.to_bytes()?); + result.extend(&self.era_of_creation.to_bytes()?); + result.extend(&self.amount.to_bytes()?); + + Ok(result) + } + fn serialized_length(&self) -> usize { + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.unbonder_public_key.serialized_length() + + self.era_of_creation.serialized_length() + + self.amount.serialized_length() + } +} + +impl FromBytes for WithdrawPurse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?; + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + WithdrawPurse { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + }, + remainder, + )) + } +} + +impl CLTyped for WithdrawPurse { + fn cl_type() -> CLType { + CLType::Any + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, AccessRights, EraId, PublicKey, SecretKey, URef, U512}; + + use super::WithdrawPurse; + + const BONDING_PURSE: URef = URef::new([41; 32], AccessRights::READ_ADD_WRITE); + const ERA_OF_WITHDRAWAL: EraId = EraId::MAX; + + fn validator_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn unbonder_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([45; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn amount() -> U512 { + U512::max_value() - 1 + } + + #[test] + fn serialization_roundtrip_for_withdraw_purse() { + let withdraw_purse = WithdrawPurse { + bonding_purse: BONDING_PURSE, + validator_public_key: validator_public_key(), + unbonder_public_key: unbonder_public_key(), + era_of_creation: ERA_OF_WITHDRAWAL, + amount: amount(), + }; + + bytesrepr::test_serialization_roundtrip(&withdraw_purse); + } + + #[test] + fn should_be_validator_condition_for_withdraw_purse() { + let validator_withdraw_purse = WithdrawPurse::new( + BONDING_PURSE, + validator_public_key(), + validator_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + ); + assert!(validator_withdraw_purse.is_validator()); + } + + #[test] + fn should_be_delegator_condition_for_withdraw_purse() { + let delegator_withdraw_purse = WithdrawPurse::new( + BONDING_PURSE, + validator_public_key(), + unbonder_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + ); + assert!(!delegator_withdraw_purse.is_validator()); + } +} diff --git a/casper_types_ver_2_0/src/system/call_stack_element.rs b/casper_types_ver_2_0/src/system/call_stack_element.rs new file mode 100644 index 00000000..df09eac3 --- /dev/null +++ b/casper_types_ver_2_0/src/system/call_stack_element.rs @@ -0,0 +1,164 @@ +use alloc::vec::Vec; + +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::FromPrimitive; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + package::PackageHash, + AddressableEntityHash, CLType, CLTyped, +}; + +/// Tag representing variants of CallStackElement for purposes of serialization. +#[derive(FromPrimitive, ToPrimitive)] +#[repr(u8)] +pub enum CallStackElementTag { + /// Session tag. + Session = 0, + /// StoredContract tag. + StoredContract, +} + +/// Represents the origin of a sub-call. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum CallStackElement { + /// Session + Session { + /// The account hash of the caller + account_hash: AccountHash, + }, + // /// Effectively an EntryPointType::Session - stored access to a session. + // StoredSession { + // /// The account hash of the caller + // account_hash: AccountHash, + // /// The package hash + // package_hash: PackageHash, + // /// The contract hash + // contract_hash: AddressableEntityHash, + // }, + /// AddressableEntity + AddressableEntity { + /// The package hash + package_hash: PackageHash, + /// The entity hash + entity_hash: AddressableEntityHash, + }, +} + +impl CallStackElement { + /// Creates a [`CallStackElement::Session`]. This represents a call into session code, and + /// should only ever happen once in a call stack. + pub fn session(account_hash: AccountHash) -> Self { + CallStackElement::Session { account_hash } + } + + /// Creates a [`'CallStackElement::StoredContract`]. This represents a call into a contract with + /// `EntryPointType::Contract`. + pub fn stored_contract( + package_hash: PackageHash, + contract_hash: AddressableEntityHash, + ) -> Self { + CallStackElement::AddressableEntity { + package_hash, + entity_hash: contract_hash, + } + } + + // /// Creates a [`'CallStackElement::StoredSession`]. This represents a call into a contract + // with /// `EntryPointType::Session`. + // pub fn stored_session( + // account_hash: AccountHash, + // package_hash: PackageHash, + // contract_hash: AddressableEntityHash, + // ) -> Self { + // CallStackElement::StoredSession { + // account_hash, + // package_hash, + // contract_hash, + // } + // } + + /// Gets the tag from self. + pub fn tag(&self) -> CallStackElementTag { + match self { + CallStackElement::Session { .. } => CallStackElementTag::Session, + + CallStackElement::AddressableEntity { .. } => CallStackElementTag::StoredContract, + } + } + + /// Gets the [`AddressableEntityHash`] for both stored session and stored contract variants. + pub fn contract_hash(&self) -> Option<&AddressableEntityHash> { + match self { + CallStackElement::Session { .. } => None, + + CallStackElement::AddressableEntity { + entity_hash: contract_hash, + .. + } => Some(contract_hash), + } + } +} + +impl ToBytes for CallStackElement { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.push(self.tag() as u8); + match self { + CallStackElement::Session { account_hash } => { + result.append(&mut account_hash.to_bytes()?) + } + + CallStackElement::AddressableEntity { + package_hash, + entity_hash: contract_hash, + } => { + result.append(&mut package_hash.to_bytes()?); + result.append(&mut contract_hash.to_bytes()?); + } + }; + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + CallStackElement::Session { account_hash } => account_hash.serialized_length(), + CallStackElement::AddressableEntity { + package_hash, + entity_hash: contract_hash, + } => package_hash.serialized_length() + contract_hash.serialized_length(), + } + } +} + +impl FromBytes for CallStackElement { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + let tag = CallStackElementTag::from_u8(tag).ok_or(bytesrepr::Error::Formatting)?; + match tag { + CallStackElementTag::Session => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((CallStackElement::Session { account_hash }, remainder)) + } + CallStackElementTag::StoredContract => { + let (package_hash, remainder) = PackageHash::from_bytes(remainder)?; + let (contract_hash, remainder) = AddressableEntityHash::from_bytes(remainder)?; + Ok(( + CallStackElement::AddressableEntity { + package_hash, + entity_hash: contract_hash, + }, + remainder, + )) + } + } + } +} + +impl CLTyped for CallStackElement { + fn cl_type() -> CLType { + CLType::Any + } +} diff --git a/casper_types_ver_2_0/src/system/error.rs b/casper_types_ver_2_0/src/system/error.rs new file mode 100644 index 00000000..c63e3f58 --- /dev/null +++ b/casper_types_ver_2_0/src/system/error.rs @@ -0,0 +1,43 @@ +use core::fmt::{self, Display, Formatter}; + +use crate::system::{auction, handle_payment, mint}; + +/// An aggregate enum error with variants for each system contract's error. +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub enum Error { + /// Contains a [`mint::Error`]. + Mint(mint::Error), + /// Contains a [`handle_payment::Error`]. + HandlePayment(handle_payment::Error), + /// Contains a [`auction::Error`]. + Auction(auction::Error), +} + +impl From for Error { + fn from(error: mint::Error) -> Error { + Error::Mint(error) + } +} + +impl From for Error { + fn from(error: handle_payment::Error) -> Error { + Error::HandlePayment(error) + } +} + +impl From for Error { + fn from(error: auction::Error) -> Error { + Error::Auction(error) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::Mint(error) => write!(formatter, "Mint error: {}", error), + Error::HandlePayment(error) => write!(formatter, "HandlePayment error: {}", error), + Error::Auction(error) => write!(formatter, "Auction error: {}", error), + } + } +} diff --git a/casper_types_ver_2_0/src/system/handle_payment.rs b/casper_types_ver_2_0/src/system/handle_payment.rs new file mode 100644 index 00000000..1b12f3ec --- /dev/null +++ b/casper_types_ver_2_0/src/system/handle_payment.rs @@ -0,0 +1,8 @@ +//! Contains implementation of a Handle Payment contract functionality. +mod constants; +mod entry_points; +mod error; + +pub use constants::*; +pub use entry_points::handle_payment_entry_points; +pub use error::Error; diff --git a/casper_types_ver_2_0/src/system/handle_payment/constants.rs b/casper_types_ver_2_0/src/system/handle_payment/constants.rs new file mode 100644 index 00000000..ef0feedd --- /dev/null +++ b/casper_types_ver_2_0/src/system/handle_payment/constants.rs @@ -0,0 +1,37 @@ +/// Named constant for `purse`. +pub const ARG_PURSE: &str = "purse"; +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; +/// Named constant for `source`. +pub const ARG_ACCOUNT: &str = "account"; +/// Named constant for `target`. +pub const ARG_TARGET: &str = "target"; + +/// Named constant for method `get_payment_purse`. +pub const METHOD_GET_PAYMENT_PURSE: &str = "get_payment_purse"; +/// Named constant for method `set_refund_purse`. +pub const METHOD_SET_REFUND_PURSE: &str = "set_refund_purse"; +/// Named constant for method `get_refund_purse`. +pub const METHOD_GET_REFUND_PURSE: &str = "get_refund_purse"; +/// Named constant for method `finalize_payment`. +pub const METHOD_FINALIZE_PAYMENT: &str = "finalize_payment"; +/// Named constant for method `distribute_accumulated_fees`. +pub const METHOD_DISTRIBUTE_ACCUMULATED_FEES: &str = "distribute_accumulated_fees"; + +/// Storage for handle payment contract hash. +pub const CONTRACT_HASH_KEY: &str = "contract_hash"; + +/// Storage for handle payment access key. +pub const CONTRACT_ACCESS_KEY: &str = "access_key"; + +/// The uref name where the Handle Payment accepts payment for computation on behalf of validators. +pub const PAYMENT_PURSE_KEY: &str = "payment_purse"; + +/// The uref name where the Handle Payment will refund unused payment back to the user. The uref +/// this name corresponds to is set by the user. +pub const REFUND_PURSE_KEY: &str = "refund_purse"; +/// Storage for handle payment accumulation purse key. +/// +/// This purse is used when `fee_elimination` config is set to `Accumulate` which makes sense for +/// some private chains. +pub const ACCUMULATION_PURSE_KEY: &str = "accumulation_purse"; diff --git a/casper_types_ver_2_0/src/system/handle_payment/entry_points.rs b/casper_types_ver_2_0/src/system/handle_payment/entry_points.rs new file mode 100644 index 00000000..f07b09f5 --- /dev/null +++ b/casper_types_ver_2_0/src/system/handle_payment/entry_points.rs @@ -0,0 +1,66 @@ +use alloc::boxed::Box; + +use crate::{ + system::handle_payment::{ + ARG_ACCOUNT, ARG_AMOUNT, ARG_PURSE, METHOD_FINALIZE_PAYMENT, METHOD_GET_PAYMENT_PURSE, + METHOD_GET_REFUND_PURSE, METHOD_SET_REFUND_PURSE, + }, + CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, +}; + +use super::METHOD_DISTRIBUTE_ACCUMULATED_FEES; + +/// Creates handle payment contract entry points. +pub fn handle_payment_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let get_payment_purse = EntryPoint::new( + METHOD_GET_PAYMENT_PURSE, + vec![], + CLType::URef, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(get_payment_purse); + + let set_refund_purse = EntryPoint::new( + METHOD_SET_REFUND_PURSE, + vec![Parameter::new(ARG_PURSE, CLType::URef)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(set_refund_purse); + + let get_refund_purse = EntryPoint::new( + METHOD_GET_REFUND_PURSE, + vec![], + CLType::Option(Box::new(CLType::URef)), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(get_refund_purse); + + let finalize_payment = EntryPoint::new( + METHOD_FINALIZE_PAYMENT, + vec![ + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_ACCOUNT, CLType::ByteArray(32)), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(finalize_payment); + + let distribute_accumulated_fees = EntryPoint::new( + METHOD_DISTRIBUTE_ACCUMULATED_FEES, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(distribute_accumulated_fees); + + entry_points +} diff --git a/casper_types_ver_2_0/src/system/handle_payment/error.rs b/casper_types_ver_2_0/src/system/handle_payment/error.rs new file mode 100644 index 00000000..0c158c93 --- /dev/null +++ b/casper_types_ver_2_0/src/system/handle_payment/error.rs @@ -0,0 +1,424 @@ +//! Home of the Handle Payment contract's [`enum@Error`] type. +use alloc::vec::Vec; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, + result, +}; + +use crate::{ + bytesrepr::{self, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// Errors which can occur while executing the Handle Payment contract. +// TODO: Split this up into user errors vs. system errors. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + // ===== User errors ===== + /// The given validator is not bonded. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(0, Error::NotBonded as u8); + /// ``` + NotBonded = 0, + /// There are too many bonding or unbonding attempts already enqueued to allow more. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(1, Error::TooManyEventsInQueue as u8); + /// ``` + TooManyEventsInQueue = 1, + /// At least one validator must remain bonded. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(2, Error::CannotUnbondLastValidator as u8); + /// ``` + CannotUnbondLastValidator = 2, + /// Failed to bond or unbond as this would have resulted in exceeding the maximum allowed + /// difference between the largest and smallest stakes. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(3, Error::SpreadTooHigh as u8); + /// ``` + SpreadTooHigh = 3, + /// The given validator already has a bond or unbond attempt enqueued. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(4, Error::MultipleRequests as u8); + /// ``` + MultipleRequests = 4, + /// Attempted to bond with a stake which was too small. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(5, Error::BondTooSmall as u8); + /// ``` + BondTooSmall = 5, + /// Attempted to bond with a stake which was too large. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(6, Error::BondTooLarge as u8); + /// ``` + BondTooLarge = 6, + /// Attempted to unbond an amount which was too large. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(7, Error::UnbondTooLarge as u8); + /// ``` + UnbondTooLarge = 7, + /// While bonding, the transfer from source purse to the Handle Payment internal purse failed. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(8, Error::BondTransferFailed as u8); + /// ``` + BondTransferFailed = 8, + /// While unbonding, the transfer from the Handle Payment internal purse to the destination + /// purse failed. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(9, Error::UnbondTransferFailed as u8); + /// ``` + UnbondTransferFailed = 9, + // ===== System errors ===== + /// Internal error: a [`BlockTime`](crate::BlockTime) was unexpectedly out of sequence. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(10, Error::TimeWentBackwards as u8); + /// ``` + TimeWentBackwards = 10, + /// Internal error: stakes were unexpectedly empty. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(11, Error::StakesNotFound as u8); + /// ``` + StakesNotFound = 11, + /// Internal error: the Handle Payment contract's payment purse wasn't found. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(12, Error::PaymentPurseNotFound as u8); + /// ``` + PaymentPurseNotFound = 12, + /// Internal error: the Handle Payment contract's payment purse key was the wrong type. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(13, Error::PaymentPurseKeyUnexpectedType as u8); + /// ``` + PaymentPurseKeyUnexpectedType = 13, + /// Internal error: couldn't retrieve the balance for the Handle Payment contract's payment + /// purse. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(14, Error::PaymentPurseBalanceNotFound as u8); + /// ``` + PaymentPurseBalanceNotFound = 14, + /// Internal error: the Handle Payment contract's bonding purse wasn't found. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(15, Error::BondingPurseNotFound as u8); + /// ``` + BondingPurseNotFound = 15, + /// Internal error: the Handle Payment contract's bonding purse key was the wrong type. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(16, Error::BondingPurseKeyUnexpectedType as u8); + /// ``` + BondingPurseKeyUnexpectedType = 16, + /// Internal error: the Handle Payment contract's refund purse key was the wrong type. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(17, Error::RefundPurseKeyUnexpectedType as u8); + /// ``` + RefundPurseKeyUnexpectedType = 17, + /// Internal error: the Handle Payment contract's rewards purse wasn't found. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(18, Error::RewardsPurseNotFound as u8); + /// ``` + RewardsPurseNotFound = 18, + /// Internal error: the Handle Payment contract's rewards purse key was the wrong type. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(19, Error::RewardsPurseKeyUnexpectedType as u8); + /// ``` + RewardsPurseKeyUnexpectedType = 19, + // TODO: Put these in their own enum, and wrap them separately in `BondingError` and + // `UnbondingError`. + /// Internal error: failed to deserialize the stake's key. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(20, Error::StakesKeyDeserializationFailed as u8); + /// ``` + StakesKeyDeserializationFailed = 20, + /// Internal error: failed to deserialize the stake's balance. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(21, Error::StakesDeserializationFailed as u8); + /// ``` + StakesDeserializationFailed = 21, + /// The invoked Handle Payment function can only be called by system contracts, but was called + /// by a user contract. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(22, Error::SystemFunctionCalledByUserAccount as u8); + /// ``` + SystemFunctionCalledByUserAccount = 22, + /// Internal error: while finalizing payment, the amount spent exceeded the amount available. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(23, Error::InsufficientPaymentForAmountSpent as u8); + /// ``` + InsufficientPaymentForAmountSpent = 23, + /// Internal error: while finalizing payment, failed to pay the validators (the transfer from + /// the Handle Payment contract's payment purse to rewards purse failed). + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(24, Error::FailedTransferToRewardsPurse as u8); + /// ``` + FailedTransferToRewardsPurse = 24, + /// Internal error: while finalizing payment, failed to refund the caller's purse (the transfer + /// from the Handle Payment contract's payment purse to refund purse or account's main purse + /// failed). + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(25, Error::FailedTransferToAccountPurse as u8); + /// ``` + FailedTransferToAccountPurse = 25, + /// Handle Payment contract's "set_refund_purse" method can only be called by the payment code + /// of a deploy, but was called by the session code. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(26, Error::SetRefundPurseCalledOutsidePayment as u8); + /// ``` + SetRefundPurseCalledOutsidePayment = 26, + /// Raised when the system is unable to determine purse balance. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(27, Error::GetBalance as u8); + /// ``` + GetBalance = 27, + /// Raised when the system is unable to put named key. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(28, Error::PutKey as u8); + /// ``` + PutKey = 28, + /// Raised when the system is unable to remove given named key. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(29, Error::RemoveKey as u8); + /// ``` + RemoveKey = 29, + /// Failed to transfer funds. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(30, Error::Transfer as u8); + /// ``` + Transfer = 30, + /// An arithmetic overflow occurred + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(31, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 31, + // NOTE: These variants below will be removed once support for WASM system contracts will be + // dropped. + #[doc(hidden)] + GasLimit = 32, + /// Refund purse is a payment purse. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(33, Error::RefundPurseIsPaymentPurse as u8); + /// ``` + RefundPurseIsPaymentPurse = 33, + /// Error raised while reducing total supply on the mint system contract. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(34, Error::ReduceTotalSupply as u8); + /// ``` + ReduceTotalSupply = 34, + /// Error writing to a storage. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(35, Error::Storage as u8); + /// ``` + Storage = 35, + /// Internal error: the Handle Payment contract's accumulation purse wasn't found. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(36, Error::AccumulationPurseNotFound as u8); + /// ``` + AccumulationPurseNotFound = 36, + /// Internal error: the Handle Payment contract's accumulation purse key was the wrong type. + /// ``` + /// # use casper_types_ver_2_0::system::handle_payment::Error; + /// assert_eq!(37, Error::AccumulationPurseKeyUnexpectedType as u8); + /// ``` + AccumulationPurseKeyUnexpectedType = 37, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::NotBonded => formatter.write_str("Not bonded"), + Error::TooManyEventsInQueue => formatter.write_str("Too many events in queue"), + Error::CannotUnbondLastValidator => formatter.write_str("Cannot unbond last validator"), + Error::SpreadTooHigh => formatter.write_str("Spread is too high"), + Error::MultipleRequests => formatter.write_str("Multiple requests"), + Error::BondTooSmall => formatter.write_str("Bond is too small"), + Error::BondTooLarge => formatter.write_str("Bond is too large"), + Error::UnbondTooLarge => formatter.write_str("Unbond is too large"), + Error::BondTransferFailed => formatter.write_str("Bond transfer failed"), + Error::UnbondTransferFailed => formatter.write_str("Unbond transfer failed"), + Error::TimeWentBackwards => formatter.write_str("Time went backwards"), + Error::StakesNotFound => formatter.write_str("Stakes not found"), + Error::PaymentPurseNotFound => formatter.write_str("Payment purse not found"), + Error::PaymentPurseKeyUnexpectedType => { + formatter.write_str("Payment purse has unexpected type") + } + Error::PaymentPurseBalanceNotFound => { + formatter.write_str("Payment purse balance not found") + } + Error::BondingPurseNotFound => formatter.write_str("Bonding purse not found"), + Error::BondingPurseKeyUnexpectedType => { + formatter.write_str("Bonding purse key has unexpected type") + } + Error::RefundPurseKeyUnexpectedType => { + formatter.write_str("Refund purse key has unexpected type") + } + Error::RewardsPurseNotFound => formatter.write_str("Rewards purse not found"), + Error::RewardsPurseKeyUnexpectedType => { + formatter.write_str("Rewards purse has unexpected type") + } + Error::StakesKeyDeserializationFailed => { + formatter.write_str("Failed to deserialize stake's key") + } + Error::StakesDeserializationFailed => { + formatter.write_str("Failed to deserialize stake's balance") + } + Error::SystemFunctionCalledByUserAccount => { + formatter.write_str("System function was called by user account") + } + Error::InsufficientPaymentForAmountSpent => { + formatter.write_str("Insufficient payment for amount spent") + } + Error::FailedTransferToRewardsPurse => { + formatter.write_str("Transfer to rewards purse has failed") + } + Error::FailedTransferToAccountPurse => { + formatter.write_str("Transfer to account's purse failed") + } + Error::SetRefundPurseCalledOutsidePayment => { + formatter.write_str("Set refund purse was called outside payment") + } + Error::GetBalance => formatter.write_str("Unable to get purse balance"), + Error::PutKey => formatter.write_str("Unable to put named key"), + Error::RemoveKey => formatter.write_str("Unable to remove named key"), + Error::Transfer => formatter.write_str("Failed to transfer funds"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow"), + Error::GasLimit => formatter.write_str("GasLimit"), + Error::RefundPurseIsPaymentPurse => { + formatter.write_str("Refund purse is a payment purse.") + } + Error::ReduceTotalSupply => formatter.write_str("Failed to reduce total supply."), + Error::Storage => formatter.write_str("Failed to write to storage."), + Error::AccumulationPurseNotFound => formatter.write_str("Accumulation purse not found"), + Error::AccumulationPurseKeyUnexpectedType => { + formatter.write_str("Accumulation purse has unexpected type") + } + } + } +} + +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Error::NotBonded as u8 => Error::NotBonded, + v if v == Error::TooManyEventsInQueue as u8 => Error::TooManyEventsInQueue, + v if v == Error::CannotUnbondLastValidator as u8 => Error::CannotUnbondLastValidator, + v if v == Error::SpreadTooHigh as u8 => Error::SpreadTooHigh, + v if v == Error::MultipleRequests as u8 => Error::MultipleRequests, + v if v == Error::BondTooSmall as u8 => Error::BondTooSmall, + v if v == Error::BondTooLarge as u8 => Error::BondTooLarge, + v if v == Error::UnbondTooLarge as u8 => Error::UnbondTooLarge, + v if v == Error::BondTransferFailed as u8 => Error::BondTransferFailed, + v if v == Error::UnbondTransferFailed as u8 => Error::UnbondTransferFailed, + v if v == Error::TimeWentBackwards as u8 => Error::TimeWentBackwards, + v if v == Error::StakesNotFound as u8 => Error::StakesNotFound, + v if v == Error::PaymentPurseNotFound as u8 => Error::PaymentPurseNotFound, + v if v == Error::PaymentPurseKeyUnexpectedType as u8 => { + Error::PaymentPurseKeyUnexpectedType + } + v if v == Error::PaymentPurseBalanceNotFound as u8 => { + Error::PaymentPurseBalanceNotFound + } + v if v == Error::BondingPurseNotFound as u8 => Error::BondingPurseNotFound, + v if v == Error::BondingPurseKeyUnexpectedType as u8 => { + Error::BondingPurseKeyUnexpectedType + } + v if v == Error::RefundPurseKeyUnexpectedType as u8 => { + Error::RefundPurseKeyUnexpectedType + } + v if v == Error::RewardsPurseNotFound as u8 => Error::RewardsPurseNotFound, + v if v == Error::RewardsPurseKeyUnexpectedType as u8 => { + Error::RewardsPurseKeyUnexpectedType + } + v if v == Error::StakesKeyDeserializationFailed as u8 => { + Error::StakesKeyDeserializationFailed + } + v if v == Error::StakesDeserializationFailed as u8 => { + Error::StakesDeserializationFailed + } + v if v == Error::SystemFunctionCalledByUserAccount as u8 => { + Error::SystemFunctionCalledByUserAccount + } + v if v == Error::InsufficientPaymentForAmountSpent as u8 => { + Error::InsufficientPaymentForAmountSpent + } + v if v == Error::FailedTransferToRewardsPurse as u8 => { + Error::FailedTransferToRewardsPurse + } + v if v == Error::FailedTransferToAccountPurse as u8 => { + Error::FailedTransferToAccountPurse + } + v if v == Error::SetRefundPurseCalledOutsidePayment as u8 => { + Error::SetRefundPurseCalledOutsidePayment + } + + v if v == Error::GetBalance as u8 => Error::GetBalance, + v if v == Error::PutKey as u8 => Error::PutKey, + v if v == Error::RemoveKey as u8 => Error::RemoveKey, + v if v == Error::Transfer as u8 => Error::Transfer, + v if v == Error::ArithmeticOverflow as u8 => Error::ArithmeticOverflow, + v if v == Error::GasLimit as u8 => Error::GasLimit, + v if v == Error::RefundPurseIsPaymentPurse as u8 => Error::RefundPurseIsPaymentPurse, + v if v == Error::ReduceTotalSupply as u8 => Error::ReduceTotalSupply, + v if v == Error::Storage as u8 => Error::Storage, + v if v == Error::AccumulationPurseNotFound as u8 => Error::AccumulationPurseNotFound, + v if v == Error::AccumulationPurseKeyUnexpectedType as u8 => { + Error::AccumulationPurseKeyUnexpectedType + } + _ => return Err(()), + }; + Ok(error) + } +} + +impl CLTyped for Error { + fn cl_type() -> CLType { + CLType::U8 + } +} + +impl ToBytes for Error { + fn to_bytes(&self) -> result::Result, bytesrepr::Error> { + let value = *self as u8; + value.to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} diff --git a/casper_types_ver_2_0/src/system/mint.rs b/casper_types_ver_2_0/src/system/mint.rs new file mode 100644 index 00000000..4a7e58a1 --- /dev/null +++ b/casper_types_ver_2_0/src/system/mint.rs @@ -0,0 +1,8 @@ +//! Contains implementation of a Mint contract functionality. +mod constants; +mod entry_points; +mod error; + +pub use constants::*; +pub use entry_points::mint_entry_points; +pub use error::Error; diff --git a/casper_types_ver_2_0/src/system/mint/constants.rs b/casper_types_ver_2_0/src/system/mint/constants.rs new file mode 100644 index 00000000..cffada44 --- /dev/null +++ b/casper_types_ver_2_0/src/system/mint/constants.rs @@ -0,0 +1,40 @@ +/// Named constant for `purse`. +pub const ARG_PURSE: &str = "purse"; +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; +/// Named constant for `id`. +pub const ARG_ID: &str = "id"; +/// Named constant for `to`. +pub const ARG_TO: &str = "to"; +/// Named constant for `source`. +pub const ARG_SOURCE: &str = "source"; +/// Named constant for `target`. +pub const ARG_TARGET: &str = "target"; +/// Named constant for `round_seigniorage_rate` used in installer. +pub const ARG_ROUND_SEIGNIORAGE_RATE: &str = "round_seigniorage_rate"; + +/// Named constant for method `mint`. +pub const METHOD_MINT: &str = "mint"; +/// Named constant for method `reduce_total_supply`. +pub const METHOD_REDUCE_TOTAL_SUPPLY: &str = "reduce_total_supply"; +/// Named constant for (synthetic) method `create` +pub const METHOD_CREATE: &str = "create"; +/// Named constant for method `balance`. +pub const METHOD_BALANCE: &str = "balance"; +/// Named constant for method `transfer`. +pub const METHOD_TRANSFER: &str = "transfer"; +/// Named constant for method `read_base_round_reward`. +pub const METHOD_READ_BASE_ROUND_REWARD: &str = "read_base_round_reward"; +/// Named constant for method `mint_into_existing_purse`. +pub const METHOD_MINT_INTO_EXISTING_PURSE: &str = "mint_into_existing_purse"; + +/// Storage for mint contract hash. +pub const HASH_KEY: &str = "mint_hash"; +/// Storage for mint access key. +pub const ACCESS_KEY: &str = "mint_access"; +/// Storage for base round reward key. +pub const BASE_ROUND_REWARD_KEY: &str = "mint_base_round_reward"; +/// Storage for mint total supply key. +pub const TOTAL_SUPPLY_KEY: &str = "total_supply"; +/// Storage for mint round seigniorage rate. +pub const ROUND_SEIGNIORAGE_RATE_KEY: &str = "round_seigniorage_rate"; diff --git a/casper_types_ver_2_0/src/system/mint/entry_points.rs b/casper_types_ver_2_0/src/system/mint/entry_points.rs new file mode 100644 index 00000000..6002b338 --- /dev/null +++ b/casper_types_ver_2_0/src/system/mint/entry_points.rs @@ -0,0 +1,102 @@ +use alloc::boxed::Box; + +use crate::{ + addressable_entity::Parameters, + system::mint::{ + ARG_AMOUNT, ARG_ID, ARG_PURSE, ARG_SOURCE, ARG_TARGET, ARG_TO, METHOD_BALANCE, + METHOD_CREATE, METHOD_MINT, METHOD_MINT_INTO_EXISTING_PURSE, METHOD_READ_BASE_ROUND_REWARD, + METHOD_REDUCE_TOTAL_SUPPLY, METHOD_TRANSFER, + }, + CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, +}; + +/// Returns entry points for a mint system contract. +pub fn mint_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntryPoint::new( + METHOD_MINT, + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::URef), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_REDUCE_TOTAL_SUPPLY, + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_CREATE, + Parameters::new(), + CLType::URef, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_BALANCE, + vec![Parameter::new(ARG_PURSE, CLType::URef)], + CLType::Option(Box::new(CLType::U512)), + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_TRANSFER, + vec![ + Parameter::new(ARG_TO, CLType::Option(Box::new(CLType::ByteArray(32)))), + Parameter::new(ARG_SOURCE, CLType::URef), + Parameter::new(ARG_TARGET, CLType::URef), + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_ID, CLType::Option(Box::new(CLType::U64))), + ], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_READ_BASE_ROUND_REWARD, + Parameters::new(), + CLType::U512, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntryPoint::new( + METHOD_MINT_INTO_EXISTING_PURSE, + vec![ + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_PURSE, CLType::URef), + ], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::AddressableEntity, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/casper_types_ver_2_0/src/system/mint/error.rs b/casper_types_ver_2_0/src/system/mint/error.rs new file mode 100644 index 00000000..f7d4f3fb --- /dev/null +++ b/casper_types_ver_2_0/src/system/mint/error.rs @@ -0,0 +1,300 @@ +//! Home of the Mint contract's [`enum@Error`] type. + +use alloc::vec::Vec; +use core::{ + convert::{TryFrom, TryInto}, + fmt::{self, Display, Formatter}, +}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// Errors which can occur while executing the Mint contract. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Insufficient funds to complete the transfer. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(0, Error::InsufficientFunds as u8); + /// ``` + InsufficientFunds = 0, + /// Source purse not found. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(1, Error::SourceNotFound as u8); + /// ``` + SourceNotFound = 1, + /// Destination purse not found. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(2, Error::DestNotFound as u8); + /// ``` + DestNotFound = 2, + /// The given [`URef`](crate::URef) does not reference the account holder's purse, or such a + /// `URef` does not have the required [`AccessRights`](crate::AccessRights). + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(3, Error::InvalidURef as u8); + /// ``` + InvalidURef = 3, + /// The source purse is not writeable (see [`URef::is_writeable`](crate::URef::is_writeable)), + /// or the destination purse is not addable (see + /// [`URef::is_addable`](crate::URef::is_addable)). + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(4, Error::InvalidAccessRights as u8); + /// ``` + InvalidAccessRights = 4, + /// Tried to create a new purse with a non-zero initial balance. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(5, Error::InvalidNonEmptyPurseCreation as u8); + /// ``` + InvalidNonEmptyPurseCreation = 5, + /// Failed to read from local or global storage. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(6, Error::Storage as u8); + /// ``` + Storage = 6, + /// Purse not found while trying to get balance. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(7, Error::PurseNotFound as u8); + /// ``` + PurseNotFound = 7, + /// Unable to obtain a key by its name. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(8, Error::MissingKey as u8); + /// ``` + MissingKey = 8, + /// Total supply not found. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(9, Error::TotalSupplyNotFound as u8); + /// ``` + TotalSupplyNotFound = 9, + /// Failed to record transfer. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(10, Error::RecordTransferFailure as u8); + /// ``` + RecordTransferFailure = 10, + /// Invalid attempt to reduce total supply. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(11, Error::InvalidTotalSupplyReductionAttempt as u8); + /// ``` + InvalidTotalSupplyReductionAttempt = 11, + /// Failed to create new uref. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(12, Error::NewURef as u8); + /// ``` + NewURef = 12, + /// Failed to put key. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(13, Error::PutKey as u8); + /// ``` + PutKey = 13, + /// Failed to write to dictionary. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(14, Error::WriteDictionary as u8); + /// ``` + WriteDictionary = 14, + /// Failed to create a [`crate::CLValue`]. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(15, Error::CLValue as u8); + /// ``` + CLValue = 15, + /// Failed to serialize data. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(16, Error::Serialize as u8); + /// ``` + Serialize = 16, + /// Source and target purse [`crate::URef`]s are equal. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(17, Error::EqualSourceAndTarget as u8); + /// ``` + EqualSourceAndTarget = 17, + /// An arithmetic overflow has occurred. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(18, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 18, + + // NOTE: These variants below will be removed once support for WASM system contracts will be + // dropped. + #[doc(hidden)] + GasLimit = 19, + + /// Raised when an entry point is called from invalid account context. + InvalidContext = 20, + + /// Session code tried to transfer more CSPR than user approved. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(21, Error::UnapprovedSpendingAmount as u8); + UnapprovedSpendingAmount = 21, + + /// Failed to transfer tokens on a private chain. + /// ``` + /// # use casper_types_ver_2_0::system::mint::Error; + /// assert_eq!(22, Error::DisabledUnrestrictedTransfers as u8); + DisabledUnrestrictedTransfers = 22, + + #[cfg(test)] + #[doc(hidden)] + Sentinel, +} + +/// Used for testing; this should be guaranteed to be the maximum valid value of [`Error`] enum. +#[cfg(test)] +const MAX_ERROR_VALUE: u8 = Error::Sentinel as u8; + +impl CLTyped for Error { + fn cl_type() -> CLType { + CLType::U8 + } +} + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +pub struct TryFromU8ForError(()); + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for Error { + type Error = TryFromU8ForError; + + fn try_from(value: u8) -> Result { + match value { + d if d == Error::InsufficientFunds as u8 => Ok(Error::InsufficientFunds), + d if d == Error::SourceNotFound as u8 => Ok(Error::SourceNotFound), + d if d == Error::DestNotFound as u8 => Ok(Error::DestNotFound), + d if d == Error::InvalidURef as u8 => Ok(Error::InvalidURef), + d if d == Error::InvalidAccessRights as u8 => Ok(Error::InvalidAccessRights), + d if d == Error::InvalidNonEmptyPurseCreation as u8 => { + Ok(Error::InvalidNonEmptyPurseCreation) + } + d if d == Error::Storage as u8 => Ok(Error::Storage), + d if d == Error::PurseNotFound as u8 => Ok(Error::PurseNotFound), + d if d == Error::MissingKey as u8 => Ok(Error::MissingKey), + d if d == Error::TotalSupplyNotFound as u8 => Ok(Error::TotalSupplyNotFound), + d if d == Error::RecordTransferFailure as u8 => Ok(Error::RecordTransferFailure), + d if d == Error::InvalidTotalSupplyReductionAttempt as u8 => { + Ok(Error::InvalidTotalSupplyReductionAttempt) + } + d if d == Error::NewURef as u8 => Ok(Error::NewURef), + d if d == Error::PutKey as u8 => Ok(Error::PutKey), + d if d == Error::WriteDictionary as u8 => Ok(Error::WriteDictionary), + d if d == Error::CLValue as u8 => Ok(Error::CLValue), + d if d == Error::Serialize as u8 => Ok(Error::Serialize), + d if d == Error::EqualSourceAndTarget as u8 => Ok(Error::EqualSourceAndTarget), + d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow), + d if d == Error::GasLimit as u8 => Ok(Error::GasLimit), + d if d == Error::InvalidContext as u8 => Ok(Error::InvalidContext), + d if d == Error::UnapprovedSpendingAmount as u8 => Ok(Error::UnapprovedSpendingAmount), + d if d == Error::DisabledUnrestrictedTransfers as u8 => { + Ok(Error::DisabledUnrestrictedTransfers) + } + _ => Err(TryFromU8ForError(())), + } + } +} + +impl ToBytes for Error { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let value = *self as u8; + value.to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for Error { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, rem): (u8, _) = FromBytes::from_bytes(bytes)?; + let error: Error = value + .try_into() + // In case an Error variant is unable to be determined it would return an + // Error::Formatting as if its unable to be correctly deserialized. + .map_err(|_| bytesrepr::Error::Formatting)?; + Ok((error, rem)) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::InsufficientFunds => formatter.write_str("Insufficient funds"), + Error::SourceNotFound => formatter.write_str("Source not found"), + Error::DestNotFound => formatter.write_str("Destination not found"), + Error::InvalidURef => formatter.write_str("Invalid URef"), + Error::InvalidAccessRights => formatter.write_str("Invalid AccessRights"), + Error::InvalidNonEmptyPurseCreation => { + formatter.write_str("Invalid non-empty purse creation") + } + Error::Storage => formatter.write_str("Storage error"), + Error::PurseNotFound => formatter.write_str("Purse not found"), + Error::MissingKey => formatter.write_str("Missing key"), + Error::TotalSupplyNotFound => formatter.write_str("Total supply not found"), + Error::RecordTransferFailure => formatter.write_str("Failed to record transfer"), + Error::InvalidTotalSupplyReductionAttempt => { + formatter.write_str("Invalid attempt to reduce total supply") + } + Error::NewURef => formatter.write_str("Failed to create new uref"), + Error::PutKey => formatter.write_str("Failed to put key"), + Error::WriteDictionary => formatter.write_str("Failed to write dictionary"), + Error::CLValue => formatter.write_str("Failed to create a CLValue"), + Error::Serialize => formatter.write_str("Failed to serialize data"), + Error::EqualSourceAndTarget => formatter.write_str("Invalid target purse"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow has occurred"), + Error::GasLimit => formatter.write_str("GasLimit"), + Error::InvalidContext => formatter.write_str("Invalid context"), + Error::UnapprovedSpendingAmount => formatter.write_str("Unapproved spending amount"), + Error::DisabledUnrestrictedTransfers => { + formatter.write_str("Disabled unrestricted transfers") + } + #[cfg(test)] + Error::Sentinel => formatter.write_str("Sentinel error"), + } + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use super::{Error, TryFromU8ForError, MAX_ERROR_VALUE}; + + #[test] + fn error_round_trips() { + for i in 0..=u8::max_value() { + match Error::try_from(i) { + Ok(error) if i < MAX_ERROR_VALUE => assert_eq!(error as u8, i), + Ok(error) => panic!( + "value of variant {:?} ({}) exceeds MAX_ERROR_VALUE ({})", + error, i, MAX_ERROR_VALUE + ), + Err(TryFromU8ForError(())) if i >= MAX_ERROR_VALUE => (), + Err(TryFromU8ForError(())) => { + panic!("missing conversion from u8 to error value: {}", i) + } + } + } + } +} diff --git a/casper_types_ver_2_0/src/system/standard_payment.rs b/casper_types_ver_2_0/src/system/standard_payment.rs new file mode 100644 index 00000000..92c3fab3 --- /dev/null +++ b/casper_types_ver_2_0/src/system/standard_payment.rs @@ -0,0 +1,6 @@ +//! Contains implementation of a standard payment contract implementation. +mod constants; +mod entry_points; + +pub use constants::*; +pub use entry_points::standard_payment_entry_points; diff --git a/casper_types_ver_2_0/src/system/standard_payment/constants.rs b/casper_types_ver_2_0/src/system/standard_payment/constants.rs new file mode 100644 index 00000000..9bd88784 --- /dev/null +++ b/casper_types_ver_2_0/src/system/standard_payment/constants.rs @@ -0,0 +1,10 @@ +/// Named constant for `amount`. +pub const ARG_AMOUNT: &str = "amount"; + +/// Named constant for method `pay`. +pub const METHOD_PAY: &str = "pay"; + +/// Storage for standard payment contract hash. +pub const HASH_KEY: &str = "standard_payment_hash"; +/// Storage for standard payment access key. +pub const ACCESS_KEY: &str = "standard_payment_access"; diff --git a/casper_types_ver_2_0/src/system/standard_payment/entry_points.rs b/casper_types_ver_2_0/src/system/standard_payment/entry_points.rs new file mode 100644 index 00000000..3eeaed52 --- /dev/null +++ b/casper_types_ver_2_0/src/system/standard_payment/entry_points.rs @@ -0,0 +1,25 @@ +use alloc::{boxed::Box, string::ToString}; + +use crate::{ + system::standard_payment::{ARG_AMOUNT, METHOD_PAY}, + CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, +}; + +/// Creates standard payment contract entry points. +pub fn standard_payment_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntryPoint::new( + METHOD_PAY.to_string(), + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U32), + }, + EntryPointAccess::Public, + EntryPointType::Session, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/casper_types_ver_2_0/src/system/system_contract_type.rs b/casper_types_ver_2_0/src/system/system_contract_type.rs new file mode 100644 index 00000000..0ad6551a --- /dev/null +++ b/casper_types_ver_2_0/src/system/system_contract_type.rs @@ -0,0 +1,249 @@ +//! Home of system contract type enum. + +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + ApiError, EntryPoints, +}; + +const MINT_TAG: u8 = 0; +const HANDLE_PAYMENT_TAG: u8 = 1; +const STANDARD_PAYMENT_TAG: u8 = 2; +const AUCTION_TAG: u8 = 3; + +use super::{ + auction::auction_entry_points, handle_payment::handle_payment_entry_points, + mint::mint_entry_points, standard_payment::standard_payment_entry_points, +}; + +/// System contract types. +/// +/// Used by converting to a `u32` and passing as the `system_contract_index` argument of +/// `ext_ffi::casper_get_system_contract()`. +#[derive( + Debug, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Hash, Serialize, Deserialize, Copy, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum SystemEntityType { + /// Mint contract. + #[default] + Mint, + /// Handle Payment contract. + HandlePayment, + /// Standard Payment contract. + StandardPayment, + /// Auction contract. + Auction, +} + +impl ToBytes for SystemEntityType { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + SystemEntityType::Mint => { + writer.push(MINT_TAG); + } + SystemEntityType::HandlePayment => { + writer.push(HANDLE_PAYMENT_TAG); + } + SystemEntityType::StandardPayment => { + writer.push(STANDARD_PAYMENT_TAG); + } + SystemEntityType::Auction => writer.push(AUCTION_TAG), + } + Ok(()) + } +} + +impl FromBytes for SystemEntityType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + MINT_TAG => Ok((SystemEntityType::Mint, remainder)), + HANDLE_PAYMENT_TAG => Ok((SystemEntityType::HandlePayment, remainder)), + STANDARD_PAYMENT_TAG => Ok((SystemEntityType::StandardPayment, remainder)), + AUCTION_TAG => Ok((SystemEntityType::Auction, remainder)), + _ => Err(Error::Formatting), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> SystemEntityType { + match rng.gen_range(0..=3) { + 0 => SystemEntityType::Mint, + 1 => SystemEntityType::Auction, + 2 => SystemEntityType::StandardPayment, + 3 => SystemEntityType::HandlePayment, + _ => unreachable!(), + } + } +} + +/// Name of mint system contract +pub const MINT: &str = "mint"; +/// Name of handle payment system contract +pub const HANDLE_PAYMENT: &str = "handle payment"; +/// Name of standard payment system contract +pub const STANDARD_PAYMENT: &str = "standard payment"; +/// Name of auction system contract +pub const AUCTION: &str = "auction"; + +impl SystemEntityType { + /// Returns the name of the system contract. + pub fn contract_name(&self) -> String { + match self { + SystemEntityType::Mint => MINT.to_string(), + SystemEntityType::HandlePayment => HANDLE_PAYMENT.to_string(), + SystemEntityType::StandardPayment => STANDARD_PAYMENT.to_string(), + SystemEntityType::Auction => AUCTION.to_string(), + } + } + + /// Returns the entrypoint of the system contract. + pub fn contract_entry_points(&self) -> EntryPoints { + match self { + SystemEntityType::Mint => mint_entry_points(), + SystemEntityType::HandlePayment => handle_payment_entry_points(), + SystemEntityType::StandardPayment => standard_payment_entry_points(), + SystemEntityType::Auction => auction_entry_points(), + } + } +} + +impl From for u32 { + fn from(system_contract_type: SystemEntityType) -> u32 { + match system_contract_type { + SystemEntityType::Mint => 0, + SystemEntityType::HandlePayment => 1, + SystemEntityType::StandardPayment => 2, + SystemEntityType::Auction => 3, + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for SystemEntityType { + type Error = ApiError; + fn try_from(value: u32) -> Result { + match value { + 0 => Ok(SystemEntityType::Mint), + 1 => Ok(SystemEntityType::HandlePayment), + 2 => Ok(SystemEntityType::StandardPayment), + 3 => Ok(SystemEntityType::Auction), + _ => Err(ApiError::InvalidSystemContract), + } + } +} + +impl Display for SystemEntityType { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match *self { + SystemEntityType::Mint => write!(f, "{}", MINT), + SystemEntityType::HandlePayment => write!(f, "{}", HANDLE_PAYMENT), + SystemEntityType::StandardPayment => write!(f, "{}", STANDARD_PAYMENT), + SystemEntityType::Auction => write!(f, "{}", AUCTION), + } + } +} + +#[cfg(test)] +mod tests { + use std::string::ToString; + + use super::*; + + #[test] + fn get_index_of_mint_contract() { + let index: u32 = SystemEntityType::Mint.into(); + assert_eq!(index, 0u32); + assert_eq!(SystemEntityType::Mint.to_string(), MINT); + } + + #[test] + fn get_index_of_handle_payment_contract() { + let index: u32 = SystemEntityType::HandlePayment.into(); + assert_eq!(index, 1u32); + assert_eq!(SystemEntityType::HandlePayment.to_string(), HANDLE_PAYMENT); + } + + #[test] + fn get_index_of_standard_payment_contract() { + let index: u32 = SystemEntityType::StandardPayment.into(); + assert_eq!(index, 2u32); + assert_eq!( + SystemEntityType::StandardPayment.to_string(), + STANDARD_PAYMENT + ); + } + + #[test] + fn get_index_of_auction_contract() { + let index: u32 = SystemEntityType::Auction.into(); + assert_eq!(index, 3u32); + assert_eq!(SystemEntityType::Auction.to_string(), AUCTION); + } + + #[test] + fn create_mint_variant_from_int() { + let mint = SystemEntityType::try_from(0).ok().unwrap(); + assert_eq!(mint, SystemEntityType::Mint); + } + + #[test] + fn create_handle_payment_variant_from_int() { + let handle_payment = SystemEntityType::try_from(1).ok().unwrap(); + assert_eq!(handle_payment, SystemEntityType::HandlePayment); + } + + #[test] + fn create_standard_payment_variant_from_int() { + let handle_payment = SystemEntityType::try_from(2).ok().unwrap(); + assert_eq!(handle_payment, SystemEntityType::StandardPayment); + } + + #[test] + fn create_auction_variant_from_int() { + let auction = SystemEntityType::try_from(3).ok().unwrap(); + assert_eq!(auction, SystemEntityType::Auction); + } + + #[test] + fn create_unknown_system_contract_variant() { + assert!(SystemEntityType::try_from(4).is_err()); + assert!(SystemEntityType::try_from(5).is_err()); + assert!(SystemEntityType::try_from(10).is_err()); + assert!(SystemEntityType::try_from(u32::max_value()).is_err()); + } +} diff --git a/casper_types_ver_2_0/src/tagged.rs b/casper_types_ver_2_0/src/tagged.rs new file mode 100644 index 00000000..deddfe83 --- /dev/null +++ b/casper_types_ver_2_0/src/tagged.rs @@ -0,0 +1,5 @@ +/// The quality of having a tag +pub trait Tagged { + /// Returns the tag of a given object + fn tag(&self) -> T; +} diff --git a/casper_types_ver_2_0/src/testing.rs b/casper_types_ver_2_0/src/testing.rs new file mode 100644 index 00000000..24b7efd3 --- /dev/null +++ b/casper_types_ver_2_0/src/testing.rs @@ -0,0 +1,195 @@ +//! An RNG for testing purposes. +use std::{ + cell::RefCell, + cmp, env, + fmt::{self, Debug, Display, Formatter}, + iter, thread, +}; + +use rand::{ + self, + distributions::{uniform::SampleRange, Distribution, Standard}, + CryptoRng, Error, Rng, RngCore, SeedableRng, +}; +use rand_pcg::Pcg64Mcg; + +thread_local! { + static THIS_THREAD_HAS_RNG: RefCell = RefCell::new(false); +} + +const CL_TEST_SEED: &str = "CL_TEST_SEED"; + +type Seed = ::Seed; // [u8; 16] + +/// A fast, seedable pseudorandom number generator for use in tests which prints the seed if the +/// thread in which it is created panics. +/// +/// Only one `TestRng` is permitted per thread. +pub struct TestRng { + seed: Seed, + rng: Pcg64Mcg, +} + +impl TestRng { + /// Constructs a new `TestRng` using a seed generated from the env var `CL_TEST_SEED` if set or + /// from cryptographically secure random data if not. + /// + /// Note that `new()` or `default()` should only be called once per test. If a test needs to + /// spawn multiple threads each with their own `TestRng`, then use `new()` to create a single, + /// master `TestRng`, then use it to create a seed per child thread. The child `TestRng`s can + /// then be constructed in their own threads via `from_seed()`. + /// + /// # Panics + /// + /// Panics if a `TestRng` has already been created on this thread. + pub fn new() -> Self { + Self::set_flag_or_panic(); + + let mut seed = Seed::default(); + match env::var(CL_TEST_SEED) { + Ok(seed_as_hex) => { + base16::decode_slice(&seed_as_hex, &mut seed).unwrap_or_else(|error| { + THIS_THREAD_HAS_RNG.with(|flag| { + *flag.borrow_mut() = false; + }); + panic!("can't parse '{}' as a TestRng seed: {}", seed_as_hex, error) + }); + } + Err(_) => { + rand::thread_rng().fill(&mut seed); + } + }; + + let rng = Pcg64Mcg::from_seed(seed); + + TestRng { seed, rng } + } + + /// Constructs a new `TestRng` using `seed`. This should be used in cases where a test needs to + /// spawn multiple threads each with their own `TestRng`. A single, master `TestRng` should be + /// constructed before any child threads are spawned, and that one should be used to create + /// seeds for the child threads' `TestRng`s. + /// + /// # Panics + /// + /// Panics if a `TestRng` has already been created on this thread. + pub fn from_seed(seed: Seed) -> Self { + Self::set_flag_or_panic(); + let rng = Pcg64Mcg::from_seed(seed); + TestRng { seed, rng } + } + + /// Returns a random `String` of length within the range specified by `length_range`. + pub fn random_string>(&mut self, length_range: R) -> String { + let count = self.gen_range(length_range); + iter::repeat_with(|| self.gen::()) + .take(count) + .collect() + } + + /// Returns a random `Vec` of length within the range specified by `length_range`. + pub fn random_vec, T>(&mut self, length_range: R) -> Vec + where + Standard: Distribution, + { + let count = self.gen_range(length_range); + iter::repeat_with(|| self.gen::()).take(count).collect() + } + + fn set_flag_or_panic() { + THIS_THREAD_HAS_RNG.with(|flag| { + if *flag.borrow() { + panic!("cannot create multiple TestRngs on the same thread"); + } + *flag.borrow_mut() = true; + }); + } + + /// Creates a child RNG. + /// + /// The resulting RNG is seeded from `self` deterministically. + pub fn create_child(&mut self) -> Self { + let seed = self.gen(); + let rng = Pcg64Mcg::from_seed(seed); + TestRng { seed, rng } + } +} + +impl Default for TestRng { + fn default() -> Self { + TestRng::new() + } +} + +impl Display for TestRng { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "TestRng seed: {}", + base16::encode_lower(&self.seed) + ) + } +} + +impl Debug for TestRng { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + Display::fmt(self, formatter) + } +} + +impl Drop for TestRng { + fn drop(&mut self) { + if thread::panicking() { + let line_1 = format!("Thread: {}", thread::current().name().unwrap_or("unnamed")); + let line_2 = "To reproduce failure, try running with env var:"; + let line_3 = format!("{}={}", CL_TEST_SEED, base16::encode_lower(&self.seed)); + let max_length = cmp::max(line_1.len(), line_2.len()); + let border = "=".repeat(max_length); + println!( + "\n{}\n{}\n{}\n{}\n{}\n", + border, line_1, line_2, line_3, border + ); + } + } +} + +impl SeedableRng for TestRng { + type Seed = ::Seed; + + fn from_seed(seed: Self::Seed) -> Self { + Self::from_seed(seed) + } +} + +impl RngCore for TestRng { + fn next_u32(&mut self) -> u32 { + self.rng.next_u32() + } + + fn next_u64(&mut self) -> u64 { + self.rng.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.rng.fill_bytes(dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.rng.try_fill_bytes(dest) + } +} + +impl CryptoRng for TestRng {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[should_panic(expected = "cannot create multiple TestRngs on the same thread")] + fn second_test_rng_in_thread_should_panic() { + let _test_rng1 = TestRng::new(); + let seed = [1; 16]; + let _test_rng2 = TestRng::from_seed(seed); + } +} diff --git a/casper_types_ver_2_0/src/timestamp.rs b/casper_types_ver_2_0/src/timestamp.rs new file mode 100644 index 00000000..524d0b14 --- /dev/null +++ b/casper_types_ver_2_0/src/timestamp.rs @@ -0,0 +1,470 @@ +use alloc::vec::Vec; +use core::{ + fmt::{self, Display, Formatter}, + ops::{Add, AddAssign, Div, Mul, Rem, Shl, Shr, Sub, SubAssign}, + time::Duration, +}; +#[cfg(any(feature = "std", test))] +use std::{str::FromStr, time::SystemTime}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use humantime::{DurationError, TimestampError}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// Example timestamp equal to 2020-11-17T00:39:24.072Z. +#[cfg(feature = "json-schema")] +const TIMESTAMP: Timestamp = Timestamp(1_605_573_564_072); + +/// A timestamp type, representing a concrete moment in time. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Timestamp formatted as per RFC 3339") +)] +pub struct Timestamp(#[cfg_attr(feature = "json-schema", schemars(with = "String"))] u64); + +impl Timestamp { + /// The maximum value a timestamp can have. + pub const MAX: Timestamp = Timestamp(u64::MAX); + + #[cfg(any(feature = "std", test))] + /// Returns the timestamp of the current moment. + pub fn now() -> Self { + let millis = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_millis() as u64; + Timestamp(millis) + } + + #[cfg(any(feature = "std", test))] + /// Returns the time that has elapsed since this timestamp. + pub fn elapsed(&self) -> TimeDiff { + TimeDiff(Timestamp::now().0.saturating_sub(self.0)) + } + + /// Returns a zero timestamp. + pub fn zero() -> Self { + Timestamp(0) + } + + /// Returns the timestamp as the number of milliseconds since the Unix epoch + pub fn millis(&self) -> u64 { + self.0 + } + + /// Returns the difference between `self` and `other`, or `0` if `self` is earlier than `other`. + pub fn saturating_diff(self, other: Timestamp) -> TimeDiff { + TimeDiff(self.0.saturating_sub(other.0)) + } + + /// Returns the difference between `self` and `other`, or `0` if that would be before the epoch. + #[must_use] + pub fn saturating_sub(self, other: TimeDiff) -> Timestamp { + Timestamp(self.0.saturating_sub(other.0)) + } + + /// Returns the sum of `self` and `other`, or the maximum possible value if that would be + /// exceeded. + #[must_use] + pub fn saturating_add(self, other: TimeDiff) -> Timestamp { + Timestamp(self.0.saturating_add(other.0)) + } + + /// Returns the number of trailing zeros in the number of milliseconds since the epoch. + pub fn trailing_zeros(&self) -> u8 { + self.0.trailing_zeros() as u8 + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &TIMESTAMP + } + + /// Returns a random `Timestamp`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Timestamp(1_596_763_000_000 + rng.gen_range(200_000..1_000_000)) + } + + /// Checked subtraction for timestamps + #[cfg(any(feature = "testing", test))] + pub fn checked_sub(self, other: TimeDiff) -> Option { + self.0.checked_sub(other.0).map(Timestamp) + } +} + +impl Display for Timestamp { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + #[cfg(any(feature = "std", test))] + return match SystemTime::UNIX_EPOCH.checked_add(Duration::from_millis(self.0)) { + Some(system_time) => write!(f, "{}", humantime::format_rfc3339_millis(system_time)) + .or_else(|e| write!(f, "Invalid timestamp: {}: {}", e, self.0)), + None => write!(f, "invalid Timestamp: {} ms after the Unix epoch", self.0), + }; + + #[cfg(not(any(feature = "std", test)))] + write!(f, "timestamp({}ms)", self.0) + } +} + +#[cfg(any(feature = "std", test))] +impl FromStr for Timestamp { + type Err = TimestampError; + + fn from_str(value: &str) -> Result { + let system_time = humantime::parse_rfc3339_weak(value)?; + let inner = system_time + .duration_since(SystemTime::UNIX_EPOCH) + .map_err(|_| TimestampError::OutOfRange)? + .as_millis() as u64; + Ok(Timestamp(inner)) + } +} + +impl Add for Timestamp { + type Output = Timestamp; + + fn add(self, diff: TimeDiff) -> Timestamp { + Timestamp(self.0 + diff.0) + } +} + +impl AddAssign for Timestamp { + fn add_assign(&mut self, rhs: TimeDiff) { + self.0 += rhs.0; + } +} + +#[cfg(any(feature = "testing", test))] +impl Sub for Timestamp { + type Output = Timestamp; + + fn sub(self, diff: TimeDiff) -> Timestamp { + Timestamp(self.0 - diff.0) + } +} + +impl Rem for Timestamp { + type Output = TimeDiff; + + fn rem(self, diff: TimeDiff) -> TimeDiff { + TimeDiff(self.0 % diff.0) + } +} + +impl Shl for Timestamp +where + u64: Shl, +{ + type Output = Timestamp; + + fn shl(self, rhs: T) -> Timestamp { + Timestamp(self.0 << rhs) + } +} + +impl Shr for Timestamp +where + u64: Shr, +{ + type Output = Timestamp; + + fn shr(self, rhs: T) -> Timestamp { + Timestamp(self.0 >> rhs) + } +} + +#[cfg(any(feature = "std", test))] +impl Serialize for Timestamp { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for Timestamp { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + Timestamp::from_str(&value_as_string).map_err(SerdeError::custom) + } else { + let inner = u64::deserialize(deserializer)?; + Ok(Timestamp(inner)) + } + } +} + +impl ToBytes for Timestamp { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Timestamp { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + u64::from_bytes(bytes).map(|(inner, remainder)| (Timestamp(inner), remainder)) + } +} + +impl From for Timestamp { + fn from(milliseconds_since_epoch: u64) -> Timestamp { + Timestamp(milliseconds_since_epoch) + } +} + +/// A time difference between two timestamps. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Human-readable duration.") +)] +pub struct TimeDiff(#[cfg_attr(feature = "json-schema", schemars(with = "String"))] u64); + +impl Display for TimeDiff { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + #[cfg(any(feature = "std", test))] + return write!(f, "{}", humantime::format_duration(Duration::from(*self))); + + #[cfg(not(any(feature = "std", test)))] + write!(f, "time diff({}ms)", self.0) + } +} + +#[cfg(any(feature = "std", test))] +impl FromStr for TimeDiff { + type Err = DurationError; + + fn from_str(value: &str) -> Result { + let inner = humantime::parse_duration(value)?.as_millis() as u64; + Ok(TimeDiff(inner)) + } +} + +impl TimeDiff { + /// Returns the time difference as the number of milliseconds since the Unix epoch + pub fn millis(&self) -> u64 { + self.0 + } + + /// Creates a new time difference from seconds. + pub const fn from_seconds(seconds: u32) -> Self { + TimeDiff(seconds as u64 * 1_000) + } + + /// Creates a new time difference from milliseconds. + pub const fn from_millis(millis: u64) -> Self { + TimeDiff(millis) + } + + /// Returns the product, or `TimeDiff(u64::MAX)` if it would overflow. + #[must_use] + pub fn saturating_mul(self, rhs: u64) -> Self { + TimeDiff(self.0.saturating_mul(rhs)) + } +} + +impl Add for TimeDiff { + type Output = TimeDiff; + + fn add(self, rhs: TimeDiff) -> TimeDiff { + TimeDiff(self.0 + rhs.0) + } +} + +impl AddAssign for TimeDiff { + fn add_assign(&mut self, rhs: TimeDiff) { + self.0 += rhs.0; + } +} + +impl Sub for TimeDiff { + type Output = TimeDiff; + + fn sub(self, rhs: TimeDiff) -> TimeDiff { + TimeDiff(self.0 - rhs.0) + } +} + +impl SubAssign for TimeDiff { + fn sub_assign(&mut self, rhs: TimeDiff) { + self.0 -= rhs.0; + } +} + +impl Mul for TimeDiff { + type Output = TimeDiff; + + fn mul(self, rhs: u64) -> TimeDiff { + TimeDiff(self.0 * rhs) + } +} + +impl Div for TimeDiff { + type Output = TimeDiff; + + fn div(self, rhs: u64) -> TimeDiff { + TimeDiff(self.0 / rhs) + } +} + +impl Div for TimeDiff { + type Output = u64; + + fn div(self, rhs: TimeDiff) -> u64 { + self.0 / rhs.0 + } +} + +impl From for Duration { + fn from(diff: TimeDiff) -> Duration { + Duration::from_millis(diff.0) + } +} + +#[cfg(any(feature = "std", test))] +impl Serialize for TimeDiff { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for TimeDiff { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + TimeDiff::from_str(&value_as_string).map_err(SerdeError::custom) + } else { + let inner = u64::deserialize(deserializer)?; + Ok(TimeDiff(inner)) + } + } +} + +impl ToBytes for TimeDiff { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TimeDiff { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + u64::from_bytes(bytes).map(|(inner, remainder)| (TimeDiff(inner), remainder)) + } +} + +impl From for TimeDiff { + fn from(duration: Duration) -> TimeDiff { + TimeDiff(duration.as_millis() as u64) + } +} + +/// A module for the `[serde(with = serde_option_time_diff)]` attribute, to serialize and +/// deserialize `Option` treating `None` as 0. +#[cfg(any(feature = "std", test))] +pub mod serde_option_time_diff { + use super::*; + + /// Serializes an `Option`, using `0` if the value is `None`. + pub fn serialize( + maybe_td: &Option, + serializer: S, + ) -> Result { + maybe_td + .unwrap_or_else(|| TimeDiff::from_millis(0)) + .serialize(serializer) + } + + /// Deserializes an `Option`, returning `None` if the value is `0`. + pub fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result, D::Error> { + let td = TimeDiff::deserialize(deserializer)?; + if td.0 == 0 { + Ok(None) + } else { + Ok(Some(td)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn timestamp_serialization_roundtrip() { + let timestamp = Timestamp::now(); + + let timestamp_as_string = timestamp.to_string(); + assert_eq!( + timestamp, + Timestamp::from_str(×tamp_as_string).unwrap() + ); + + let serialized_json = serde_json::to_string(×tamp).unwrap(); + assert_eq!(timestamp, serde_json::from_str(&serialized_json).unwrap()); + + let serialized_bincode = bincode::serialize(×tamp).unwrap(); + assert_eq!( + timestamp, + bincode::deserialize(&serialized_bincode).unwrap() + ); + + bytesrepr::test_serialization_roundtrip(×tamp); + } + + #[test] + fn timediff_serialization_roundtrip() { + let mut rng = TestRng::new(); + let timediff = TimeDiff(rng.gen()); + + let timediff_as_string = timediff.to_string(); + assert_eq!(timediff, TimeDiff::from_str(&timediff_as_string).unwrap()); + + let serialized_json = serde_json::to_string(&timediff).unwrap(); + assert_eq!(timediff, serde_json::from_str(&serialized_json).unwrap()); + + let serialized_bincode = bincode::serialize(&timediff).unwrap(); + assert_eq!(timediff, bincode::deserialize(&serialized_bincode).unwrap()); + + bytesrepr::test_serialization_roundtrip(&timediff); + } + + #[test] + fn does_not_crash_for_big_timestamp_value() { + assert!(Timestamp::MAX.to_string().starts_with("Invalid timestamp:")); + } +} diff --git a/casper_types_ver_2_0/src/transaction.rs b/casper_types_ver_2_0/src/transaction.rs new file mode 100644 index 00000000..3583e142 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction.rs @@ -0,0 +1,340 @@ +mod addressable_entity_identifier; +mod deploy; +mod execution_info; +mod finalized_approvals; +mod initiator_addr; +#[cfg(any(feature = "std", test))] +mod initiator_addr_and_secret_key; +mod package_identifier; +mod pricing_mode; +mod runtime_args; +mod transaction_approvals_hash; +mod transaction_entry_point; +mod transaction_hash; +mod transaction_header; +mod transaction_id; +mod transaction_invocation_target; +mod transaction_runtime; +mod transaction_scheduling; +mod transaction_session_kind; +mod transaction_target; +mod transaction_v1; + +use alloc::{collections::BTreeSet, vec::Vec}; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +use tracing::error; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, Timestamp, +}; +#[cfg(feature = "json-schema")] +use crate::{account::ACCOUNT_HASH_LENGTH, SecretKey, TimeDiff, URef}; +pub use addressable_entity_identifier::AddressableEntityIdentifier; +pub use deploy::{ + Deploy, DeployApproval, DeployApprovalsHash, DeployConfigFailure, DeployDecodeFromJsonError, + DeployError, DeployExcessiveSizeError, DeployFootprint, DeployHash, DeployHeader, DeployId, + ExecutableDeployItem, ExecutableDeployItemIdentifier, FinalizedDeployApprovals, TransferTarget, +}; +#[cfg(any(feature = "std", test))] +pub use deploy::{DeployBuilder, DeployBuilderError}; +pub use execution_info::ExecutionInfo; +pub use finalized_approvals::FinalizedApprovals; +pub use initiator_addr::InitiatorAddr; +#[cfg(any(feature = "std", test))] +use initiator_addr_and_secret_key::InitiatorAddrAndSecretKey; +pub use package_identifier::PackageIdentifier; +pub use pricing_mode::PricingMode; +pub use runtime_args::{NamedArg, RuntimeArgs}; +pub use transaction_approvals_hash::TransactionApprovalsHash; +pub use transaction_entry_point::TransactionEntryPoint; +pub use transaction_hash::TransactionHash; +pub use transaction_header::TransactionHeader; +pub use transaction_id::TransactionId; +pub use transaction_invocation_target::TransactionInvocationTarget; +pub use transaction_runtime::TransactionRuntime; +pub use transaction_scheduling::TransactionScheduling; +pub use transaction_session_kind::TransactionSessionKind; +pub use transaction_target::TransactionTarget; +pub use transaction_v1::{ + FinalizedTransactionV1Approvals, TransactionV1, TransactionV1Approval, + TransactionV1ApprovalsHash, TransactionV1Body, TransactionV1ConfigFailure, + TransactionV1DecodeFromJsonError, TransactionV1Error, TransactionV1ExcessiveSizeError, + TransactionV1Hash, TransactionV1Header, +}; +#[cfg(any(feature = "std", test))] +pub use transaction_v1::{TransactionV1Builder, TransactionV1BuilderError}; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; + +#[cfg(feature = "json-schema")] +pub(super) static TRANSACTION: Lazy = Lazy::new(|| { + let secret_key = SecretKey::example(); + let source = URef::from_formatted_str( + "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + ) + .unwrap(); + let target = URef::from_formatted_str( + "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", + ) + .unwrap(); + let to = Some(AccountHash::new([40; ACCOUNT_HASH_LENGTH])); + let id = Some(999); + + let v1_txn = TransactionV1Builder::new_transfer(source, target, 30_000_000_000_u64, to, id) + .unwrap() + .with_chain_name("casper-example") + .with_timestamp(*Timestamp::example()) + .with_ttl(TimeDiff::from_seconds(3_600)) + .with_secret_key(secret_key) + .build() + .unwrap(); + Transaction::V1(v1_txn) +}); + +/// A versioned wrapper for a transaction or deploy. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum Transaction { + /// A deploy. + Deploy(Deploy), + /// A version 1 transaction. + #[cfg_attr(any(feature = "std", test), serde(rename = "Version1"))] + V1(TransactionV1), +} + +impl Transaction { + /// Returns the `TransactionHash` identifying this transaction. + pub fn hash(&self) -> TransactionHash { + match self { + Transaction::Deploy(deploy) => TransactionHash::from(*deploy.hash()), + Transaction::V1(txn) => TransactionHash::from(*txn.hash()), + } + } + + /// Returns the computed approvals hash identifying this transaction's approvals. + pub fn compute_approvals_hash(&self) -> Result { + let approvals_hash = match self { + Transaction::Deploy(deploy) => { + TransactionApprovalsHash::Deploy(deploy.compute_approvals_hash()?) + } + Transaction::V1(txn) => TransactionApprovalsHash::V1(txn.compute_approvals_hash()?), + }; + Ok(approvals_hash) + } + + /// Returns the computed `TransactionId` uniquely identifying this transaction and its + /// approvals. + pub fn compute_id(&self) -> TransactionId { + match self { + Transaction::Deploy(deploy) => { + let deploy_hash = *deploy.hash(); + let approvals_hash = deploy.compute_approvals_hash().unwrap_or_else(|error| { + error!(%error, "failed to serialize deploy approvals"); + DeployApprovalsHash::from(Digest::default()) + }); + TransactionId::new_deploy(deploy_hash, approvals_hash) + } + Transaction::V1(txn) => { + let txn_hash = *txn.hash(); + let approvals_hash = txn.compute_approvals_hash().unwrap_or_else(|error| { + error!(%error, "failed to serialize transaction approvals"); + TransactionV1ApprovalsHash::from(Digest::default()) + }); + TransactionId::new_v1(txn_hash, approvals_hash) + } + } + } + + /// Returns the address of the initiator of the transaction. + pub fn initiator_addr(&self) -> InitiatorAddr { + match self { + Transaction::Deploy(deploy) => InitiatorAddr::PublicKey(deploy.account().clone()), + Transaction::V1(txn) => txn.initiator_addr().clone(), + } + } + + /// Returns `true` if the transaction has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + match self { + Transaction::Deploy(deploy) => deploy.expired(current_instant), + Transaction::V1(txn) => txn.expired(current_instant), + } + } + + /// Returns the timestamp of when the transaction expires, i.e. `self.timestamp + self.ttl`. + pub fn expires(&self) -> Timestamp { + match self { + Transaction::Deploy(deploy) => deploy.header().expires(), + Transaction::V1(txn) => txn.header().expires(), + } + } + + /// Returns the set of account hashes corresponding to the public keys of the approvals. + pub fn signers(&self) -> BTreeSet { + match self { + Transaction::Deploy(deploy) => deploy + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + Transaction::V1(txn) => txn + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &TRANSACTION + } + + /// Returns a random, valid but possibly expired transaction. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + Transaction::Deploy(Deploy::random_valid_native_transfer(rng)) + } else { + Transaction::V1(TransactionV1::random(rng)) + } + } +} + +impl From for Transaction { + fn from(deploy: Deploy) -> Self { + Self::Deploy(deploy) + } +} + +impl From for Transaction { + fn from(txn: TransactionV1) -> Self { + Self::V1(txn) + } +} + +impl ToBytes for Transaction { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Transaction::Deploy(deploy) => { + DEPLOY_TAG.write_bytes(writer)?; + deploy.write_bytes(writer) + } + Transaction::V1(txn) => { + V1_TAG.write_bytes(writer)?; + txn.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Transaction::Deploy(deploy) => deploy.serialized_length(), + Transaction::V1(txn) => txn.serialized_length(), + } + } +} + +impl FromBytes for Transaction { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (deploy, remainder) = Deploy::from_bytes(remainder)?; + Ok((Transaction::Deploy(deploy), remainder)) + } + V1_TAG => { + let (txn, remainder) = TransactionV1::from_bytes(remainder)?; + Ok((Transaction::V1(txn), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Display for Transaction { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Transaction::Deploy(deploy) => Display::fmt(deploy, formatter), + Transaction::V1(txn) => Display::fmt(txn, formatter), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + + let transaction = Transaction::from(Deploy::random(rng)); + let json_string = serde_json::to_string_pretty(&transaction).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transaction, decoded); + + let transaction = Transaction::from(TransactionV1::random(rng)); + let json_string = serde_json::to_string_pretty(&transaction).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transaction, decoded); + } + + #[test] + fn bincode_roundtrip() { + let rng = &mut TestRng::new(); + + let transaction = Transaction::from(Deploy::random(rng)); + let serialized = bincode::serialize(&transaction).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transaction, deserialized); + + let transaction = Transaction::from(TransactionV1::random(rng)); + let serialized = bincode::serialize(&transaction).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transaction, deserialized); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let transaction = Transaction::from(Deploy::random(rng)); + bytesrepr::test_serialization_roundtrip(&transaction); + + let transaction = Transaction::from(TransactionV1::random(rng)); + bytesrepr::test_serialization_roundtrip(&transaction); + } +} diff --git a/casper_types_ver_2_0/src/transaction/addressable_entity_identifier.rs b/casper_types_ver_2_0/src/transaction/addressable_entity_identifier.rs new file mode 100644 index 00000000..bf588473 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/addressable_entity_identifier.rs @@ -0,0 +1,122 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::{ExecutableDeployItem, TransactionTarget}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + AddressableEntityHash, +}; + +const HASH_TAG: u8 = 0; +const NAME_TAG: u8 = 1; + +/// Identifier for the contract object within a [`TransactionTarget::Stored`] or an +/// [`ExecutableDeployItem`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + description = "Identifier for the contract object within a `Stored` transaction target \ + or an `ExecutableDeployItem`." + ) +)] +#[serde(deny_unknown_fields)] +pub enum AddressableEntityIdentifier { + /// The hash identifying the addressable entity. + Hash(AddressableEntityHash), + /// The name identifying the addressable entity. + Name(String), +} + +impl AddressableEntityIdentifier { + /// Returns a random `AddressableEntityIdentifier`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + AddressableEntityIdentifier::Hash(AddressableEntityHash::new(rng.gen())) + } else { + AddressableEntityIdentifier::Name(rng.random_string(1..21)) + } + } +} + +impl Display for AddressableEntityIdentifier { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + AddressableEntityIdentifier::Hash(hash) => write!(formatter, "entity-hash({})", hash), + AddressableEntityIdentifier::Name(name) => write!(formatter, "entity-name({})", name), + } + } +} + +impl ToBytes for AddressableEntityIdentifier { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + AddressableEntityIdentifier::Hash(hash) => { + HASH_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + AddressableEntityIdentifier::Name(name) => { + NAME_TAG.write_bytes(writer)?; + name.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + AddressableEntityIdentifier::Hash(hash) => hash.serialized_length(), + AddressableEntityIdentifier::Name(name) => name.serialized_length(), + } + } +} + +impl FromBytes for AddressableEntityIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + HASH_TAG => { + let (hash, remainder) = AddressableEntityHash::from_bytes(remainder)?; + Ok((AddressableEntityIdentifier::Hash(hash), remainder)) + } + NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + Ok((AddressableEntityIdentifier::Name(name), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&AddressableEntityIdentifier::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy.rs b/casper_types_ver_2_0/src/transaction/deploy.rs new file mode 100644 index 00000000..d93bd489 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy.rs @@ -0,0 +1,2007 @@ +mod deploy_approval; +mod deploy_approvals_hash; +#[cfg(any(feature = "std", test))] +mod deploy_builder; +mod deploy_footprint; +mod deploy_hash; +mod deploy_header; +mod deploy_id; +mod error; +mod executable_deploy_item; +mod finalized_deploy_approvals; + +use alloc::{collections::BTreeSet, vec::Vec}; +use core::{ + cmp, + fmt::{self, Debug, Display, Formatter}, + hash, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +pub use finalized_deploy_approvals::FinalizedDeployApprovals; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(feature = "std", test))] +use { + super::{InitiatorAddr, InitiatorAddrAndSecretKey}, + itertools::Itertools, + serde::{Deserialize, Serialize}, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use { + crate::{ + bytesrepr::Bytes, + system::auction::{ + ARG_AMOUNT as ARG_AUCTION_AMOUNT, ARG_DELEGATOR, ARG_NEW_VALIDATOR, + ARG_PUBLIC_KEY as ARG_AUCTION_PUBLIC_KEY, ARG_VALIDATOR, METHOD_DELEGATE, + METHOD_REDELEGATE, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID, + }, + AddressableEntityHash, + {system::mint::ARG_AMOUNT, TransactionConfig, U512}, + {testing::TestRng, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MIN_TRANSFER_MOTES}, + }, + rand::{Rng, RngCore}, + tracing::{debug, warn}, +}; +#[cfg(feature = "json-schema")] +use {once_cell::sync::Lazy, schemars::JsonSchema}; + +#[cfg(any( + all(feature = "std", feature = "testing"), + feature = "json-schema", + test +))] +use crate::runtime_args; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::RuntimeArgs; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, Digest, DisplayIter, PublicKey, SecretKey, TimeDiff, Timestamp, +}; + +pub use deploy_approval::DeployApproval; +pub use deploy_approvals_hash::DeployApprovalsHash; +#[cfg(any(feature = "std", test))] +pub use deploy_builder::{DeployBuilder, DeployBuilderError}; +pub use deploy_footprint::DeployFootprint; +pub use deploy_hash::DeployHash; +pub use deploy_header::DeployHeader; +pub use deploy_id::DeployId; +pub use error::{ + DecodeFromJsonError as DeployDecodeFromJsonError, DeployConfigFailure, Error as DeployError, + ExcessiveSizeError as DeployExcessiveSizeError, +}; +pub use executable_deploy_item::{ + ExecutableDeployItem, ExecutableDeployItemIdentifier, TransferTarget, +}; + +#[cfg(feature = "json-schema")] +static DEPLOY: Lazy = Lazy::new(|| { + let payment_args = runtime_args! { + "amount" => 1000 + }; + let payment = ExecutableDeployItem::StoredContractByName { + name: String::from("casper-example"), + entry_point: String::from("example-entry-point"), + args: payment_args, + }; + let session_args = runtime_args! { + "amount" => 1000 + }; + let session = ExecutableDeployItem::Transfer { args: session_args }; + let serialized_body = serialize_body(&payment, &session); + let body_hash = Digest::hash(serialized_body); + + let secret_key = SecretKey::example(); + let timestamp = *Timestamp::example(); + let header = DeployHeader::new( + PublicKey::from(secret_key), + timestamp, + TimeDiff::from_seconds(3_600), + 1, + body_hash, + vec![DeployHash::new(Digest::from([1u8; Digest::LENGTH]))], + String::from("casper-example"), + ); + let serialized_header = serialize_header(&header); + let hash = DeployHash::new(Digest::hash(serialized_header)); + + let mut approvals = BTreeSet::new(); + let approval = DeployApproval::create(&hash, secret_key); + approvals.insert(approval); + + Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: OnceCell::new(), + } +}); + +/// A signed smart contract. +/// +/// To construct a new `Deploy`, use a [`DeployBuilder`]. +#[derive(Clone, Eq, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "A signed smart contract.") +)] +pub struct Deploy { + hash: DeployHash, + header: DeployHeader, + payment: ExecutableDeployItem, + session: ExecutableDeployItem, + approvals: BTreeSet, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + is_valid: OnceCell>, +} + +impl Deploy { + /// Called by the `DeployBuilder` to construct a new `Deploy`. + #[cfg(any(feature = "std", test))] + #[allow(clippy::too_many_arguments)] + fn build( + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + dependencies: Vec, + chain_name: String, + payment: ExecutableDeployItem, + session: ExecutableDeployItem, + initiator_addr_and_secret_key: InitiatorAddrAndSecretKey, + ) -> Deploy { + let serialized_body = serialize_body(&payment, &session); + let body_hash = Digest::hash(serialized_body); + + let account = match initiator_addr_and_secret_key.initiator_addr() { + InitiatorAddr::PublicKey(public_key) => public_key, + InitiatorAddr::AccountHash(_) | InitiatorAddr::EntityAddr(_) => unreachable!(), + }; + + let dependencies = dependencies.into_iter().unique().collect(); + let header = DeployHeader::new( + account, + timestamp, + ttl, + gas_price, + body_hash, + dependencies, + chain_name, + ); + let serialized_header = serialize_header(&header); + let hash = DeployHash::new(Digest::hash(serialized_header)); + + let mut deploy = Deploy { + hash, + header, + payment, + session, + approvals: BTreeSet::new(), + #[cfg(any(feature = "once_cell", test))] + is_valid: OnceCell::new(), + }; + + if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() { + deploy.sign(secret_key); + } + deploy + } + + /// Returns the `DeployHash` identifying this `Deploy`. + pub fn hash(&self) -> &DeployHash { + &self.hash + } + + /// Returns the public key of the account providing the context in which to run the `Deploy`. + pub fn account(&self) -> &PublicKey { + self.header.account() + } + + /// Returns the creation timestamp of the `Deploy`. + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp() + } + + /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid. + /// + /// After this duration has ended, the `Deploy` will be considered expired. + pub fn ttl(&self) -> TimeDiff { + self.header.ttl() + } + + /// Returns `true` if the `Deploy` has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.header.expired(current_instant) + } + + /// Returns the price per gas unit for the `Deploy`. + pub fn gas_price(&self) -> u64 { + self.header.gas_price() + } + + /// Returns the hash of the body (i.e. the Wasm code) of the `Deploy`. + pub fn body_hash(&self) -> &Digest { + self.header.body_hash() + } + + /// Returns the name of the chain the `Deploy` should be executed on. + pub fn chain_name(&self) -> &str { + self.header.chain_name() + } + + /// Returns a reference to the `DeployHeader` of this `Deploy`. + pub fn header(&self) -> &DeployHeader { + &self.header + } + + /// Consumes `self`, returning the `DeployHeader` of this `Deploy`. + pub fn take_header(self) -> DeployHeader { + self.header + } + + /// Returns the `ExecutableDeployItem` for payment code. + pub fn payment(&self) -> &ExecutableDeployItem { + &self.payment + } + + /// Returns the `ExecutableDeployItem` for session code. + pub fn session(&self) -> &ExecutableDeployItem { + &self.session + } + + /// Returns the `Approval`s for this deploy. + pub fn approvals(&self) -> &BTreeSet { + &self.approvals + } + + /// Adds a signature of this `Deploy`'s hash to its approvals. + pub fn sign(&mut self, secret_key: &SecretKey) { + let approval = DeployApproval::create(&self.hash, secret_key); + self.approvals.insert(approval); + } + + /// Returns the `ApprovalsHash` of this `Deploy`'s approvals. + pub fn compute_approvals_hash(&self) -> Result { + DeployApprovalsHash::compute(&self.approvals) + } + + /// Returns `true` if the serialized size of the deploy is not greater than + /// `max_transaction_size`. + #[cfg(any(feature = "std", test))] + pub fn is_valid_size(&self, max_transaction_size: u32) -> Result<(), DeployExcessiveSizeError> { + let deploy_size = self.serialized_length(); + if deploy_size > max_transaction_size as usize { + return Err(DeployExcessiveSizeError { + max_transaction_size, + actual_deploy_size: deploy_size, + }); + } + Ok(()) + } + + /// Returns `Ok` if and only if this `Deploy`'s body hashes to the value of `body_hash()`, and + /// if this `Deploy`'s header hashes to the value claimed as the deploy hash. + pub fn has_valid_hash(&self) -> Result<(), DeployConfigFailure> { + let serialized_body = serialize_body(&self.payment, &self.session); + let body_hash = Digest::hash(serialized_body); + if body_hash != *self.header.body_hash() { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?self, ?body_hash, "invalid deploy body hash"); + return Err(DeployConfigFailure::InvalidBodyHash); + } + + let serialized_header = serialize_header(&self.header); + let hash = DeployHash::new(Digest::hash(serialized_header)); + if hash != self.hash { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?self, ?hash, "invalid deploy hash"); + return Err(DeployConfigFailure::InvalidDeployHash); + } + Ok(()) + } + + /// Returns `Ok` if and only if: + /// * the deploy hash is correct (should be the hash of the header), and + /// * the body hash is correct (should be the hash of the body), and + /// * approvals are non empty, and + /// * all approvals are valid signatures of the deploy hash + pub fn is_valid(&self) -> Result<(), DeployConfigFailure> { + #[cfg(any(feature = "once_cell", test))] + return self.is_valid.get_or_init(|| validate_deploy(self)).clone(); + + #[cfg(not(any(feature = "once_cell", test)))] + validate_deploy(self) + } + + /// Returns the `DeployFootprint`. + pub fn footprint(&self) -> Result { + let header = self.header().clone(); + let gas_estimate = match self.payment().payment_amount(header.gas_price()) { + Some(gas) => gas, + None => { + return Err(DeployError::InvalidPayment); + } + }; + let size_estimate = self.serialized_length(); + let is_transfer = self.session.is_transfer(); + Ok(DeployFootprint { + header, + gas_estimate, + size_estimate, + is_transfer, + }) + } + + /// Returns `Ok` if and only if: + /// * the chain_name is correct, + /// * the configured parameters are complied with at the given timestamp + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn is_config_compliant( + &self, + chain_name: &str, + config: &TransactionConfig, + max_associated_keys: u32, + timestamp_leeway: TimeDiff, + at: Timestamp, + ) -> Result<(), DeployConfigFailure> { + self.is_valid_size(config.max_transaction_size)?; + + let header = self.header(); + if header.chain_name() != chain_name { + debug!( + deploy_hash = %self.hash(), + deploy_header = %header, + chain_name = %header.chain_name(), + "invalid chain identifier" + ); + return Err(DeployConfigFailure::InvalidChainName { + expected: chain_name.to_string(), + got: header.chain_name().to_string(), + }); + } + + header.is_valid(config, timestamp_leeway, at, &self.hash)?; + + if self.approvals.len() > max_associated_keys as usize { + debug!( + deploy_hash = %self.hash(), + number_of_associated_keys = %self.approvals.len(), + max_associated_keys = %max_associated_keys, + "number of associated keys exceeds the maximum limit" + ); + return Err(DeployConfigFailure::ExcessiveApprovals { + got: self.approvals.len() as u32, + max_associated_keys, + }); + } + + // Transfers have a fixed cost and won't blow the block gas limit. + // Other deploys can, therefore, statically check the payment amount + // associated with the deploy. + if !self.session().is_transfer() { + let value = self + .payment() + .args() + .get(ARG_AMOUNT) + .ok_or(DeployConfigFailure::MissingPaymentAmount)?; + let payment_amount = value + .clone() + .into_t::() + .map_err(|_| DeployConfigFailure::FailedToParsePaymentAmount)?; + if payment_amount > U512::from(config.block_gas_limit) { + debug!( + amount = %payment_amount, + block_gas_limit = %config.block_gas_limit, + "payment amount exceeds block gas limit" + ); + return Err(DeployConfigFailure::ExceededBlockGasLimit { + block_gas_limit: config.block_gas_limit, + got: Box::new(payment_amount), + }); + } + } + + let payment_args_length = self.payment().args().serialized_length(); + if payment_args_length > config.deploy_config.payment_args_max_length as usize { + debug!( + payment_args_length, + payment_args_max_length = config.deploy_config.payment_args_max_length, + "payment args excessive" + ); + return Err(DeployConfigFailure::ExcessivePaymentArgsLength { + max_length: config.deploy_config.payment_args_max_length as usize, + got: payment_args_length, + }); + } + + let session_args_length = self.session().args().serialized_length(); + if session_args_length > config.deploy_config.session_args_max_length as usize { + debug!( + session_args_length, + session_args_max_length = config.deploy_config.session_args_max_length, + "session args excessive" + ); + return Err(DeployConfigFailure::ExcessiveSessionArgsLength { + max_length: config.deploy_config.session_args_max_length as usize, + got: session_args_length, + }); + } + + if self.session().is_transfer() { + let item = self.session().clone(); + let attempted = item + .args() + .get(ARG_AMOUNT) + .ok_or_else(|| { + debug!("missing transfer 'amount' runtime argument"); + DeployConfigFailure::MissingTransferAmount + })? + .clone() + .into_t::() + .map_err(|_| { + debug!("failed to parse transfer 'amount' runtime argument as a U512"); + DeployConfigFailure::FailedToParseTransferAmount + })?; + let minimum = U512::from(config.native_transfer_minimum_motes); + if attempted < minimum { + debug!( + minimum = %config.native_transfer_minimum_motes, + amount = %attempted, + "insufficient transfer amount" + ); + return Err(DeployConfigFailure::InsufficientTransferAmount { + minimum: Box::new(minimum), + attempted: Box::new(attempted), + }); + } + } + + Ok(()) + } + + // This method is not intended to be used by third party crates. + // + // It is required to allow finalized approvals to be injected after reading a `Deploy` from + // storage. + #[doc(hidden)] + pub fn with_approvals(mut self, approvals: BTreeSet) -> Self { + self.approvals = approvals; + self + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &DEPLOY + } + + /// Constructs a new signed `Deploy`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + #[allow(clippy::too_many_arguments)] + pub fn new( + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + dependencies: Vec, + chain_name: String, + payment: ExecutableDeployItem, + session: ExecutableDeployItem, + secret_key: &SecretKey, + account: Option, + ) -> Deploy { + let account_and_secret_key = match account { + Some(account) => InitiatorAddrAndSecretKey::Both { + initiator_addr: InitiatorAddr::PublicKey(account), + secret_key, + }, + None => InitiatorAddrAndSecretKey::SecretKey(secret_key), + }; + + Deploy::build( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + account_and_secret_key, + ) + } + + /// Returns a random `Deploy`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + let timestamp = Timestamp::random(rng); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..300)); + Deploy::random_with_timestamp_and_ttl(rng, timestamp, ttl) + } + + /// Returns a random `Deploy` but using the specified `timestamp` and `ttl`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_timestamp_and_ttl( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let gas_price = rng.gen_range(1..100); + + let dependencies = vec![ + DeployHash::new(Digest::hash(rng.next_u64().to_le_bytes())), + DeployHash::new(Digest::hash(rng.next_u64().to_le_bytes())), + DeployHash::new(Digest::hash(rng.next_u64().to_le_bytes())), + ]; + let chain_name = String::from("casper-example"); + + // We need "amount" in order to be able to get correct info via `deploy_info()`. + let payment_args = runtime_args! { + "amount" => U512::from(DEFAULT_MAX_PAYMENT_MOTES), + }; + let payment = ExecutableDeployItem::StoredContractByName { + name: String::from("casper-example"), + entry_point: String::from("example-entry-point"), + args: payment_args, + }; + + let session = rng.gen(); + + let secret_key = SecretKey::random(rng); + + Deploy::new( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + &secret_key, + None, + ) + } + + /// Turns `self` into an invalid `Deploy` by clearing the `chain_name`, invalidating the deploy + /// hash. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn invalidate(&mut self) { + self.header.invalidate(); + } + + /// Returns a random `Deploy` for a native transfer. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_valid_native_transfer(rng: &mut TestRng) -> Self { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..300)); + Self::random_valid_native_transfer_with_timestamp_and_ttl(rng, timestamp, ttl) + } + + /// Returns a random `Deploy` for a native transfer with timestamp and ttl. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_valid_native_transfer_with_timestamp_and_ttl( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let deploy = Self::random_with_timestamp_and_ttl(rng, timestamp, ttl); + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let payment_args = runtime_args! { + "amount" => U512::from(10), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let secret_key = SecretKey::random(rng); + Deploy::new( + timestamp, + ttl, + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random `Deploy` for a native transfer with no dependencies. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_valid_native_transfer_without_deps(rng: &mut TestRng) -> Self { + let deploy = Self::random(rng); + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let payment_args = runtime_args! { + "amount" => U512::from(10), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let secret_key = SecretKey::random(rng); + Deploy::new( + Timestamp::now(), + deploy.header.ttl(), + deploy.header.gas_price(), + vec![], + deploy.header.chain_name().to_string(), + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random invalid `Deploy` without a payment amount specified. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_without_payment_amount(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: RuntimeArgs::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with an invalid value for the payment amount. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_mangled_payment_amount(rng: &mut TestRng) -> Self { + let payment_args = runtime_args! { + "amount" => "invalid-argument" + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random `Deploy` with custom payment specified as a stored contract by name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_custom_payment_contract_by_name(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredContractByName { + name: "Test".to_string(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored contract by + /// hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_payment_contract_by_hash(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredContractByHash { + hash: [19; 32].into(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored contract by + /// hash, but calling an invalid entry point. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_entry_point_in_payment_contract(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredContractByHash { + hash: [19; 32].into(), + entry_point: "non-existent-entry-point".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random `Deploy` with custom payment specified as a stored versioned contract by + /// name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_custom_payment_package_by_name(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredVersionedContractByName { + name: "Test".to_string(), + version: None, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned + /// contract by hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_payment_package_by_hash(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredVersionedContractByHash { + hash: Default::default(), + version: None, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned + /// contract by hash, but calling an invalid entry point. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_nonexistent_contract_version_in_payment_package(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredVersionedContractByHash { + hash: [19; 32].into(), + version: Some(6u32), + entry_point: "non-existent-entry-point".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random `Deploy` with custom session specified as a stored contract by name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_session_contract_by_name(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredContractByName { + name: "Test".to_string(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with custom session specified as a stored contract by + /// hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_session_contract_by_hash(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredContractByHash { + hash: Default::default(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with custom session specified as a stored contract by + /// hash, but calling an invalid entry point. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_entry_point_in_session_contract(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredContractByHash { + hash: [19; 32].into(), + entry_point: "non-existent-entry-point".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random `Deploy` with custom session specified as a stored versioned contract by + /// name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_session_package_by_name(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredVersionedContractByName { + name: "Test".to_string(), + version: None, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with custom session specified as a stored versioned + /// contract by hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_session_package_by_hash(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredVersionedContractByHash { + hash: Default::default(), + version: None, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with custom session specified as a stored versioned + /// contract by hash, but calling an invalid entry point. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_nonexistent_contract_version_in_session_package(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredVersionedContractByHash { + hash: [19; 32].into(), + version: Some(6u32), + entry_point: "non-existent-entry-point".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid transfer `Deploy` with the "target" runtime arg missing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_without_transfer_target(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid transfer `Deploy` with the "amount" runtime arg missing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_without_transfer_amount(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid transfer `Deploy` with an invalid "amount" runtime arg. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_mangled_transfer_amount(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "amount" => "mangled-transfer-amount", + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with empty session bytes. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_empty_session_module_bytes(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with an expired TTL. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_expired_deploy(rng: &mut TestRng) -> Self { + let deploy = Self::random_valid_native_transfer(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new( + Timestamp::zero(), + TimeDiff::from_seconds(1u32), + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + deploy.payment, + deploy.session, + &secret_key, + None, + ) + } + + /// Returns a random `Deploy` with native transfer as payment code. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_native_transfer_in_payment_logic(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let payment = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_payment(rng, payment) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + fn random_transfer_with_payment(rng: &mut TestRng, payment: ExecutableDeployItem) -> Self { + let deploy = Self::random_valid_native_transfer(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new( + deploy.header.timestamp(), + deploy.header.ttl(), + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + payment, + deploy.session, + &secret_key, + None, + ) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + fn random_transfer_with_session(rng: &mut TestRng, session: ExecutableDeployItem) -> Self { + let deploy = Self::random_valid_native_transfer(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new( + deploy.header.timestamp(), + deploy.header.ttl(), + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + deploy.payment, + session, + &secret_key, + None, + ) + } + + /// Creates a withdraw bid deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn withdraw_bid( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_AUCTION_AMOUNT => amount, + ARG_AUCTION_PUBLIC_KEY => public_key.clone(), + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash, + entry_point: METHOD_WITHDRAW_BID.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(public_key)), + ) + } + + /// Creates a delegate deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn delegate( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_DELEGATOR => delegator_public_key.clone(), + ARG_VALIDATOR => validator_public_key, + ARG_AUCTION_AMOUNT => amount, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash, + entry_point: METHOD_DELEGATE.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey( + delegator_public_key, + )), + ) + } + + /// Creates an undelegate deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn undelegate( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_DELEGATOR => delegator_public_key.clone(), + ARG_VALIDATOR => validator_public_key, + ARG_AUCTION_AMOUNT => amount, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash, + entry_point: METHOD_UNDELEGATE.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey( + delegator_public_key, + )), + ) + } + + /// Creates an redelegate deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + #[allow(clippy::too_many_arguments)] + pub fn redelegate( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + redelegate_validator_public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_DELEGATOR => delegator_public_key.clone(), + ARG_VALIDATOR => validator_public_key, + ARG_NEW_VALIDATOR => redelegate_validator_public_key, + ARG_AUCTION_AMOUNT => amount, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash, + entry_point: METHOD_REDELEGATE.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey( + delegator_public_key, + )), + ) + } +} + +impl hash::Hash for Deploy { + fn hash(&self, state: &mut H) { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let Deploy { + hash, + header, + payment, + session, + approvals, + } = self; + hash.hash(state); + header.hash(state); + payment.hash(state); + session.hash(state); + approvals.hash(state); + } +} + +impl PartialEq for Deploy { + fn eq(&self, other: &Deploy) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let Deploy { + hash, + header, + payment, + session, + approvals, + } = self; + *hash == other.hash + && *header == other.header + && *payment == other.payment + && *session == other.session + && *approvals == other.approvals + } +} + +impl Ord for Deploy { + fn cmp(&self, other: &Deploy) -> cmp::Ordering { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let Deploy { + hash, + header, + payment, + session, + approvals, + } = self; + hash.cmp(&other.hash) + .then_with(|| header.cmp(&other.header)) + .then_with(|| payment.cmp(&other.payment)) + .then_with(|| session.cmp(&other.session)) + .then_with(|| approvals.cmp(&other.approvals)) + } +} + +impl PartialOrd for Deploy { + fn partial_cmp(&self, other: &Deploy) -> Option { + Some(self.cmp(other)) + } +} + +impl ToBytes for Deploy { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.header.write_bytes(writer)?; + self.hash.write_bytes(writer)?; + self.payment.write_bytes(writer)?; + self.session.write_bytes(writer)?; + self.approvals.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.header.serialized_length() + + self.hash.serialized_length() + + self.payment.serialized_length() + + self.session.serialized_length() + + self.approvals.serialized_length() + } +} + +impl FromBytes for Deploy { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (header, remainder) = DeployHeader::from_bytes(bytes)?; + let (hash, remainder) = DeployHash::from_bytes(remainder)?; + let (payment, remainder) = ExecutableDeployItem::from_bytes(remainder)?; + let (session, remainder) = ExecutableDeployItem::from_bytes(remainder)?; + let (approvals, remainder) = BTreeSet::::from_bytes(remainder)?; + let maybe_valid_deploy = Deploy { + header, + hash, + payment, + session, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_valid: OnceCell::new(), + }; + Ok((maybe_valid_deploy, remainder)) + } +} + +impl Display for Deploy { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy[{}, {}, payment_code: {}, session_code: {}, approvals: {}]", + self.hash, + self.header, + self.payment, + self.session, + DisplayIter::new(self.approvals.iter()) + ) + } +} + +fn serialize_header(header: &DeployHeader) -> Vec { + header + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize deploy header: {}", error)) +} + +fn serialize_body(payment: &ExecutableDeployItem, session: &ExecutableDeployItem) -> Vec { + let mut buffer = Vec::with_capacity(payment.serialized_length() + session.serialized_length()); + payment + .write_bytes(&mut buffer) + .unwrap_or_else(|error| panic!("should serialize payment code: {}", error)); + session + .write_bytes(&mut buffer) + .unwrap_or_else(|error| panic!("should serialize session code: {}", error)); + buffer +} + +/// Computationally expensive validity check for a given deploy instance, including asymmetric_key +/// signing verification. +fn validate_deploy(deploy: &Deploy) -> Result<(), DeployConfigFailure> { + if deploy.approvals.is_empty() { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?deploy, "deploy has no approvals"); + return Err(DeployConfigFailure::EmptyApprovals); + } + + deploy.has_valid_hash()?; + + for (index, approval) in deploy.approvals.iter().enumerate() { + if let Err(error) = crypto::verify(deploy.hash, approval.signature(), approval.signer()) { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?deploy, "failed to verify approval {}: {}", index, error); + return Err(DeployConfigFailure::InvalidApproval { index, error }); + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::{iter, time::Duration}; + + use super::*; + use crate::CLValue; + + const DEFAULT_MAX_ASSOCIATED_KEYS: u32 = 100; + + #[test] + fn json_roundtrip() { + let mut rng = TestRng::new(); + let deploy = Deploy::random(&mut rng); + let json_string = serde_json::to_string_pretty(&deploy).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(deploy, decoded); + } + + #[test] + fn bincode_roundtrip() { + let mut rng = TestRng::new(); + let deploy = Deploy::random(&mut rng); + let serialized = bincode::serialize(&deploy).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(deploy, deserialized); + } + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let deploy = Deploy::random(&mut rng); + bytesrepr::test_serialization_roundtrip(deploy.header()); + bytesrepr::test_serialization_roundtrip(&deploy); + } + + fn create_deploy( + rng: &mut TestRng, + ttl: TimeDiff, + dependency_count: usize, + chain_name: &str, + ) -> Deploy { + let secret_key = SecretKey::random(rng); + let dependencies = iter::repeat_with(|| DeployHash::random(rng)) + .take(dependency_count) + .collect(); + let transfer_args = { + let mut transfer_args = RuntimeArgs::new(); + let value = CLValue::from_t(U512::from(DEFAULT_MIN_TRANSFER_MOTES)) + .expect("should create CLValue"); + transfer_args.insert_cl_value("amount", value); + transfer_args + }; + Deploy::new( + Timestamp::now(), + ttl, + 1, + dependencies, + chain_name.to_string(), + ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: RuntimeArgs::new(), + }, + ExecutableDeployItem::Transfer { + args: transfer_args, + }, + &secret_key, + None, + ) + } + + #[test] + fn is_valid() { + let mut rng = TestRng::new(); + let deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + assert_eq!( + deploy.is_valid.get(), + None, + "is valid should initially be None" + ); + deploy.is_valid().expect("should be valid"); + assert_eq!( + deploy.is_valid.get(), + Some(&Ok(())), + "is valid should be true" + ); + } + + fn check_is_not_valid(invalid_deploy: Deploy, expected_error: DeployConfigFailure) { + assert!( + invalid_deploy.is_valid.get().is_none(), + "is valid should initially be None" + ); + let actual_error = invalid_deploy.is_valid().unwrap_err(); + + // Ignore the `error_msg` field of `InvalidApproval` when comparing to expected error, as + // this makes the test too fragile. Otherwise expect the actual error should exactly match + // the expected error. + match expected_error { + DeployConfigFailure::InvalidApproval { + index: expected_index, + .. + } => match actual_error { + DeployConfigFailure::InvalidApproval { + index: actual_index, + .. + } => { + assert_eq!(actual_index, expected_index); + } + _ => panic!("expected {}, got: {}", expected_error, actual_error), + }, + _ => { + assert_eq!(actual_error, expected_error,); + } + } + + // The actual error should have been lazily initialized correctly. + assert_eq!( + invalid_deploy.is_valid.get(), + Some(&Err(actual_error)), + "is valid should now be Some" + ); + } + + #[test] + fn not_valid_due_to_invalid_body_hash() { + let mut rng = TestRng::new(); + let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + + deploy.session = ExecutableDeployItem::Transfer { + args: runtime_args! { + "amount" => 1 + }, + }; + check_is_not_valid(deploy, DeployConfigFailure::InvalidBodyHash); + } + + #[test] + fn not_valid_due_to_invalid_deploy_hash() { + let mut rng = TestRng::new(); + let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + + // deploy.header.gas_price = 2; + deploy.invalidate(); + check_is_not_valid(deploy, DeployConfigFailure::InvalidDeployHash); + } + + #[test] + fn not_valid_due_to_empty_approvals() { + let mut rng = TestRng::new(); + let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + deploy.approvals = BTreeSet::new(); + assert!(deploy.approvals.is_empty()); + check_is_not_valid(deploy, DeployConfigFailure::EmptyApprovals) + } + + #[test] + fn not_valid_due_to_invalid_approval() { + let mut rng = TestRng::new(); + let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + + let deploy2 = Deploy::random(&mut rng); + + deploy.approvals.extend(deploy2.approvals.clone()); + // the expected index for the invalid approval will be the first index at which there is an + // approval coming from deploy2 + let expected_index = deploy + .approvals + .iter() + .enumerate() + .find(|(_, approval)| deploy2.approvals.contains(approval)) + .map(|(index, _)| index) + .unwrap(); + check_is_not_valid( + deploy, + DeployConfigFailure::InvalidApproval { + index: expected_index, + error: crypto::Error::SignatureError, // This field is ignored in the check. + }, + ); + } + + #[test] + fn is_acceptable() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies.into(), + chain_name, + ); + let current_timestamp = deploy.header().timestamp(); + deploy + .is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp, + ) + .expect("should be acceptable"); + } + + #[test] + fn not_acceptable_due_to_invalid_chain_name() { + let mut rng = TestRng::new(); + let expected_chain_name = "net-1"; + let wrong_chain_name = "net-2".to_string(); + let config = TransactionConfig::default(); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies.into(), + &wrong_chain_name, + ); + + let expected_error = DeployConfigFailure::InvalidChainName { + expected: expected_chain_name.to_string(), + got: wrong_chain_name, + }; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant( + expected_chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_excessive_dependencies() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + + let dependency_count = usize::from(config.deploy_config.max_dependencies + 1); + + let deploy = create_deploy(&mut rng, config.max_ttl, dependency_count, chain_name); + + let expected_error = DeployConfigFailure::ExcessiveDependencies { + max_dependencies: config.deploy_config.max_dependencies, + got: dependency_count, + }; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_excessive_ttl() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + + let ttl = config.max_ttl + TimeDiff::from(Duration::from_secs(1)); + + let deploy = create_deploy( + &mut rng, + ttl, + config.deploy_config.max_dependencies.into(), + chain_name, + ); + + let expected_error = DeployConfigFailure::ExcessiveTimeToLive { + max_ttl: config.max_ttl, + got: ttl, + }; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_timestamp_in_future() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let leeway = TimeDiff::from_seconds(2); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies.into(), + chain_name, + ); + let current_timestamp = deploy.header.timestamp() - leeway - TimeDiff::from_seconds(1); + + let expected_error = DeployConfigFailure::TimestampInFuture { + validation_timestamp: current_timestamp, + timestamp_leeway: leeway, + got: deploy.header.timestamp(), + }; + + assert_eq!( + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + leeway, + current_timestamp + ), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn acceptable_if_timestamp_slightly_in_future() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let leeway = TimeDiff::from_seconds(2); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies.into(), + chain_name, + ); + let current_timestamp = deploy.header.timestamp() - (leeway / 2); + deploy + .is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + leeway, + current_timestamp, + ) + .expect("should be acceptable"); + } + + #[test] + fn not_acceptable_due_to_missing_payment_amount() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: RuntimeArgs::default(), + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies.into(), + chain_name, + ); + + deploy.payment = payment; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(DeployConfigFailure::MissingPaymentAmount) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_mangled_payment_amount() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => "mangled-amount" + }, + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies.into(), + chain_name, + ); + + deploy.payment = payment; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(DeployConfigFailure::FailedToParsePaymentAmount) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_excessive_payment_amount() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let amount = U512::from(config.block_gas_limit + 1); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => amount + }, + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies.into(), + chain_name, + ); + + deploy.payment = payment; + deploy.session = session; + + let expected_error = DeployConfigFailure::ExceededBlockGasLimit { + block_gas_limit: config.block_gas_limit, + got: Box::new(amount), + }; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn transfer_acceptable_regardless_of_excessive_payment_amount() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random(&mut rng); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let amount = U512::from(config.block_gas_limit + 1); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => amount + }, + }; + + let transfer_args = { + let mut transfer_args = RuntimeArgs::new(); + let value = CLValue::from_t(U512::from(DEFAULT_MIN_TRANSFER_MOTES)) + .expect("should create CLValue"); + transfer_args.insert_cl_value("amount", value); + transfer_args + }; + + let deploy = Deploy::new( + Timestamp::now(), + config.max_ttl, + 1, + vec![], + chain_name.to_string(), + payment, + ExecutableDeployItem::Transfer { + args: transfer_args, + }, + &secret_key, + None, + ); + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Ok(()), + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ) + ) + } + + #[test] + fn not_acceptable_due_to_excessive_approvals() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies as usize, + chain_name, + ); + // This test is to ensure a given limit is being checked. + // Therefore, set the limit to one less than the approvals in the deploy. + let max_associated_keys = (deploy.approvals.len() - 1) as u32; + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(DeployConfigFailure::ExcessiveApprovals { + got: deploy.approvals.len() as u32, + max_associated_keys: (deploy.approvals.len() - 1) as u32 + }), + deploy.is_config_compliant( + chain_name, + &config, + max_associated_keys, + TimeDiff::default(), + current_timestamp + ) + ) + } + + #[test] + fn not_acceptable_due_to_missing_transfer_amount() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies as usize, + chain_name, + ); + + let transfer_args = RuntimeArgs::default(); + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(DeployConfigFailure::MissingTransferAmount), + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ) + ) + } + + #[test] + fn not_acceptable_due_to_mangled_transfer_amount() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies as usize, + chain_name, + ); + + let transfer_args = runtime_args! { + "amount" => "mangled-amount", + "source" => PublicKey::random(&mut rng).to_account_hash(), + "target" => PublicKey::random(&mut rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(DeployConfigFailure::FailedToParseTransferAmount), + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ) + ) + } + + #[test] + fn not_acceptable_due_to_insufficient_transfer_amount() { + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let config = TransactionConfig::default(); + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies as usize, + chain_name, + ); + + let amount = config.native_transfer_minimum_motes - 1; + let insufficient_amount = U512::from(amount); + + let transfer_args = runtime_args! { + "amount" => insufficient_amount, + "source" => PublicKey::random(&mut rng).to_account_hash(), + "target" => PublicKey::random(&mut rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(DeployConfigFailure::InsufficientTransferAmount { + minimum: Box::new(U512::from(config.native_transfer_minimum_motes)), + attempted: Box::new(insufficient_amount), + }), + deploy.is_config_compliant( + chain_name, + &config, + DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ) + ) + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_approval.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_approval.rs new file mode 100644 index 00000000..f01a74f7 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_approval.rs @@ -0,0 +1,103 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::DeployHash; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, PublicKey, SecretKey, Signature, +}; + +/// A struct containing a signature of a deploy hash and the public key of the signer. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct DeployApproval { + signer: PublicKey, + signature: Signature, +} + +impl DeployApproval { + /// Creates an approval by signing the given deploy hash using the given secret key. + pub fn create(hash: &DeployHash, secret_key: &SecretKey) -> Self { + let signer = PublicKey::from(secret_key); + let signature = crypto::sign(hash, secret_key, &signer); + Self { signer, signature } + } + + /// Returns a new approval. + pub fn new(signer: PublicKey, signature: Signature) -> Self { + Self { signer, signature } + } + + /// Returns the public key of the approval's signer. + pub fn signer(&self) -> &PublicKey { + &self.signer + } + + /// Returns the approval signature. + pub fn signature(&self) -> &Signature { + &self.signature + } + + /// Returns a random `Approval`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + Self { + signer: PublicKey::random(rng), + signature: Signature::ed25519([0; Signature::ED25519_LENGTH]).unwrap(), + } + } +} + +impl Display for DeployApproval { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "approval({})", self.signer) + } +} + +impl ToBytes for DeployApproval { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.signer.write_bytes(writer)?; + self.signature.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.signer.serialized_length() + self.signature.serialized_length() + } +} + +impl FromBytes for DeployApproval { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (signer, remainder) = PublicKey::from_bytes(bytes)?; + let (signature, remainder) = Signature::from_bytes(remainder)?; + let approval = DeployApproval { signer, signature }; + Ok((approval, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let approval = DeployApproval::random(rng); + bytesrepr::test_serialization_roundtrip(&approval); + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_approvals_hash.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_approvals_hash.rs new file mode 100644 index 00000000..6c098805 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_approvals_hash.rs @@ -0,0 +1,111 @@ +use alloc::{collections::BTreeSet, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use super::DeployApproval; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// The cryptographic hash of the bytesrepr-encoded set of approvals for a single deploy. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct DeployApprovalsHash(Digest); + +impl DeployApprovalsHash { + /// The number of bytes in a `DeployApprovalsHash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `DeployApprovalsHash` by bytesrepr-encoding `approvals` and creating a + /// [`Digest`] of this. + pub fn compute(approvals: &BTreeSet) -> Result { + let digest = Digest::hash(approvals.to_bytes()?); + Ok(DeployApprovalsHash(digest)) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Returns a new `DeployApprovalsHash` directly initialized with the provided bytes; no + /// hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + DeployApprovalsHash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `DeployApprovalsHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + DeployApprovalsHash(hash) + } +} + +impl From for Digest { + fn from(deploy_hash: DeployApprovalsHash) -> Self { + deploy_hash.0 + } +} + +impl From for DeployApprovalsHash { + fn from(digest: Digest) -> Self { + Self(digest) + } +} + +impl Display for DeployApprovalsHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "approvals-hash({})", self.0,) + } +} + +impl AsRef<[u8]> for DeployApprovalsHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for DeployApprovalsHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for DeployApprovalsHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (DeployApprovalsHash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = DeployApprovalsHash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_builder.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_builder.rs new file mode 100644 index 00000000..7c79e0de --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_builder.rs @@ -0,0 +1,155 @@ +mod error; + +use super::{ + super::{InitiatorAddr, InitiatorAddrAndSecretKey}, + Deploy, DeployHash, ExecutableDeployItem, TransferTarget, +}; +use crate::{PublicKey, SecretKey, TimeDiff, Timestamp, URef, U512}; +pub use error::DeployBuilderError; + +/// A builder for constructing a [`Deploy`]. +pub struct DeployBuilder<'a> { + account: Option, + secret_key: Option<&'a SecretKey>, + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + dependencies: Vec, + chain_name: String, + payment: Option, + session: ExecutableDeployItem, +} + +impl<'a> DeployBuilder<'a> { + /// The default time-to-live for `Deploy`s, i.e. 30 minutes. + pub const DEFAULT_TTL: TimeDiff = TimeDiff::from_millis(30 * 60 * 1_000); + /// The default gas price for `Deploy`s, i.e. `1`. + pub const DEFAULT_GAS_PRICE: u64 = 1; + + /// Returns a new `DeployBuilder`. + /// + /// # Note + /// + /// Before calling [`build`](Self::build), you must ensure + /// * that an account is provided by either calling [`with_account`](Self::with_account) or + /// [`with_secret_key`](Self::with_secret_key) + /// * that payment code is provided by either calling + /// [`with_standard_payment`](Self::with_standard_payment) or + /// [`with_payment`](Self::with_payment) + pub fn new>(chain_name: C, session: ExecutableDeployItem) -> Self { + DeployBuilder { + account: None, + secret_key: None, + timestamp: Timestamp::now(), + ttl: Self::DEFAULT_TTL, + gas_price: Self::DEFAULT_GAS_PRICE, + dependencies: vec![], + chain_name: chain_name.into(), + payment: None, + session, + } + } + + /// Returns a new `DeployBuilder` with session code suitable for a transfer. + /// + /// If `maybe_source` is None, the account's main purse is used as the source of the transfer. + /// + /// # Note + /// + /// Before calling [`build`](Self::build), you must ensure + /// * that an account is provided by either calling [`with_account`](Self::with_account) or + /// [`with_secret_key`](Self::with_secret_key) + /// * that payment code is provided by either calling + /// [`with_standard_payment`](Self::with_standard_payment) or + /// [`with_payment`](Self::with_payment) + pub fn new_transfer, A: Into>( + chain_name: C, + amount: A, + maybe_source: Option, + target: TransferTarget, + maybe_transfer_id: Option, + ) -> Self { + let session = + ExecutableDeployItem::new_transfer(amount, maybe_source, target, maybe_transfer_id); + DeployBuilder::new(chain_name, session) + } + + /// Sets the `account` in the `Deploy`. + /// + /// If not provided, the public key derived from the secret key used in the `DeployBuilder` will + /// be used as the `account` in the `Deploy`. + pub fn with_account(mut self, account: PublicKey) -> Self { + self.account = Some(account); + self + } + + /// Sets the secret key used to sign the `Deploy` on calling [`build`](Self::build). + /// + /// If not provided, the `Deploy` can still be built, but will be unsigned and will be invalid + /// until subsequently signed. + pub fn with_secret_key(mut self, secret_key: &'a SecretKey) -> Self { + self.secret_key = Some(secret_key); + self + } + + /// Sets the `payment` in the `Deploy` to a standard payment with the given amount. + pub fn with_standard_payment>(mut self, amount: A) -> Self { + self.payment = Some(ExecutableDeployItem::new_standard_payment(amount)); + self + } + + /// Sets the `payment` in the `Deploy`. + pub fn with_payment(mut self, payment: ExecutableDeployItem) -> Self { + self.payment = Some(payment); + self + } + + /// Sets the `timestamp` in the `Deploy`. + /// + /// If not provided, the timestamp will be set to the time when the `DeployBuilder` was + /// constructed. + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.timestamp = timestamp; + self + } + + /// Sets the `ttl` (time-to-live) in the `Deploy`. + /// + /// If not provided, the ttl will be set to [`Self::DEFAULT_TTL`]. + pub fn with_ttl(mut self, ttl: TimeDiff) -> Self { + self.ttl = ttl; + self + } + + /// Returns the new `Deploy`, or an error if neither + /// [`with_standard_payment`](Self::with_standard_payment) nor + /// [`with_payment`](Self::with_payment) were previously called. + pub fn build(self) -> Result { + let initiator_addr_and_secret_key = match (self.account, self.secret_key) { + (Some(account), Some(secret_key)) => InitiatorAddrAndSecretKey::Both { + initiator_addr: InitiatorAddr::PublicKey(account), + secret_key, + }, + (Some(account), None) => { + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(account)) + } + (None, Some(secret_key)) => InitiatorAddrAndSecretKey::SecretKey(secret_key), + (None, None) => return Err(DeployBuilderError::DeployMissingSessionAccount), + }; + + let payment = self + .payment + .ok_or(DeployBuilderError::DeployMissingPaymentCode)?; + let deploy = Deploy::build( + self.timestamp, + self.ttl, + self.gas_price, + self.dependencies, + self.chain_name, + payment, + self.session, + initiator_addr_and_secret_key, + ); + Ok(deploy) + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_builder/error.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_builder/error.rs new file mode 100644 index 00000000..30ac6fa6 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_builder/error.rs @@ -0,0 +1,44 @@ +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(doc)] +use super::{Deploy, DeployBuilder}; + +/// Errors returned while building a [`Deploy`] using a [`DeployBuilder`]. +#[derive(Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub enum DeployBuilderError { + /// Failed to build `Deploy` due to missing session account. + /// + /// Call [`DeployBuilder::with_account`] or [`DeployBuilder::with_secret_key`] before + /// calling [`DeployBuilder::build`]. + DeployMissingSessionAccount, + /// Failed to build `Deploy` due to missing payment code. + /// + /// Call [`DeployBuilder::with_standard_payment`] or [`DeployBuilder::with_payment`] before + /// calling [`DeployBuilder::build`]. + DeployMissingPaymentCode, +} + +impl Display for DeployBuilderError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + DeployBuilderError::DeployMissingSessionAccount => { + write!( + formatter, + "deploy requires session account - use `with_account` or `with_secret_key`" + ) + } + DeployBuilderError::DeployMissingPaymentCode => { + write!( + formatter, + "deploy requires payment code - use `with_payment` or `with_standard_payment`" + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for DeployBuilderError {} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_footprint.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_footprint.rs new file mode 100644 index 00000000..c45d23b8 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_footprint.rs @@ -0,0 +1,28 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Deploy; +use super::DeployHeader; +use crate::Gas; + +/// Information about how much block limit a [`Deploy`] will consume. +#[derive(Clone, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct DeployFootprint { + /// The header of the `Deploy`. + pub header: DeployHeader, + /// The estimated gas consumption of the `Deploy`. + pub gas_estimate: Gas, + /// The bytesrepr serialized length of the `Deploy`. + pub size_estimate: usize, + /// Whether the `Deploy` is a transfer or not. + pub is_transfer: bool, +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_hash.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_hash.rs new file mode 100644 index 00000000..0b38d6de --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_hash.rs @@ -0,0 +1,116 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Deploy; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// The cryptographic hash of a [`Deploy`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded deploy hash.") +)] +#[serde(deny_unknown_fields)] +pub struct DeployHash(Digest); + +impl DeployHash { + /// The number of bytes in a `DeployHash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `DeployHash`. + pub const fn new(hash: Digest) -> Self { + DeployHash(hash) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Returns a new `DeployHash` directly initialized with the provided bytes; no hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + DeployHash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `DeployHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + DeployHash(hash) + } +} + +impl From for DeployHash { + fn from(digest: Digest) -> Self { + DeployHash(digest) + } +} + +impl From for Digest { + fn from(deploy_hash: DeployHash) -> Self { + deploy_hash.0 + } +} + +impl Display for DeployHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "deploy-hash({})", self.0,) + } +} + +impl AsRef<[u8]> for DeployHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for DeployHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for DeployHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (DeployHash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = DeployHash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_header.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_header.rs new file mode 100644 index 00000000..37bc7ea1 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_header.rs @@ -0,0 +1,230 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "std", test))] +use tracing::debug; + +#[cfg(doc)] +use super::Deploy; +use super::DeployHash; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, DisplayIter, PublicKey, TimeDiff, Timestamp, +}; +#[cfg(any(feature = "std", test))] +use crate::{DeployConfigFailure, TransactionConfig}; + +/// The header portion of a [`Deploy`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct DeployHeader { + account: PublicKey, + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + body_hash: Digest, + dependencies: Vec, + chain_name: String, +} + +impl DeployHeader { + #[cfg(any(feature = "std", feature = "json-schema", test))] + pub(super) fn new( + account: PublicKey, + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + body_hash: Digest, + dependencies: Vec, + chain_name: String, + ) -> Self { + DeployHeader { + account, + timestamp, + ttl, + gas_price, + body_hash, + dependencies, + chain_name, + } + } + + /// Returns the public key of the account providing the context in which to run the `Deploy`. + pub fn account(&self) -> &PublicKey { + &self.account + } + + /// Returns the creation timestamp of the `Deploy`. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid. + /// + /// After this duration has ended, the `Deploy` will be considered expired. + pub fn ttl(&self) -> TimeDiff { + self.ttl + } + + /// Returns `true` if the `Deploy` has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.expires() < current_instant + } + + /// Returns the price per gas unit for the `Deploy`. + pub fn gas_price(&self) -> u64 { + self.gas_price + } + + /// Returns the hash of the body (i.e. the Wasm code) of the `Deploy`. + pub fn body_hash(&self) -> &Digest { + &self.body_hash + } + + /// Returns the list of other `Deploy`s that have to be executed before this one. + pub fn dependencies(&self) -> &Vec { + &self.dependencies + } + + /// Returns the name of the chain the `Deploy` should be executed on. + pub fn chain_name(&self) -> &str { + &self.chain_name + } + + /// Returns `Ok` if and only if the dependencies count and TTL are within limits, and the + /// timestamp is not later than `at + timestamp_leeway`. Does NOT check for expiry. + #[cfg(any(feature = "std", test))] + pub fn is_valid( + &self, + config: &TransactionConfig, + timestamp_leeway: TimeDiff, + at: Timestamp, + deploy_hash: &DeployHash, + ) -> Result<(), DeployConfigFailure> { + if self.dependencies.len() > config.deploy_config.max_dependencies as usize { + debug!( + %deploy_hash, + deploy_header = %self, + max_dependencies = %config.deploy_config.max_dependencies, + "deploy dependency ceiling exceeded" + ); + return Err(DeployConfigFailure::ExcessiveDependencies { + max_dependencies: config.deploy_config.max_dependencies, + got: self.dependencies().len(), + }); + } + + if self.ttl() > config.max_ttl { + debug!( + %deploy_hash, + deploy_header = %self, + max_ttl = %config.max_ttl, + "deploy ttl excessive" + ); + return Err(DeployConfigFailure::ExcessiveTimeToLive { + max_ttl: config.max_ttl, + got: self.ttl(), + }); + } + + if self.timestamp() > at + timestamp_leeway { + debug!(%deploy_hash, deploy_header = %self, %at, "deploy timestamp in the future"); + return Err(DeployConfigFailure::TimestampInFuture { + validation_timestamp: at, + timestamp_leeway, + got: self.timestamp(), + }); + } + + Ok(()) + } + + /// Returns the timestamp of when the `Deploy` expires, i.e. `self.timestamp + self.ttl`. + pub fn expires(&self) -> Timestamp { + self.timestamp.saturating_add(self.ttl) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub(super) fn invalidate(&mut self) { + self.chain_name.clear(); + } +} + +impl ToBytes for DeployHeader { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.account.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.ttl.write_bytes(writer)?; + self.gas_price.write_bytes(writer)?; + self.body_hash.write_bytes(writer)?; + self.dependencies.write_bytes(writer)?; + self.chain_name.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.account.serialized_length() + + self.timestamp.serialized_length() + + self.ttl.serialized_length() + + self.gas_price.serialized_length() + + self.body_hash.serialized_length() + + self.dependencies.serialized_length() + + self.chain_name.serialized_length() + } +} + +impl FromBytes for DeployHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (account, remainder) = PublicKey::from_bytes(bytes)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (ttl, remainder) = TimeDiff::from_bytes(remainder)?; + let (gas_price, remainder) = u64::from_bytes(remainder)?; + let (body_hash, remainder) = Digest::from_bytes(remainder)?; + let (dependencies, remainder) = Vec::::from_bytes(remainder)?; + let (chain_name, remainder) = String::from_bytes(remainder)?; + let deploy_header = DeployHeader { + account, + timestamp, + ttl, + gas_price, + body_hash, + dependencies, + chain_name, + }; + Ok((deploy_header, remainder)) + } +} + +impl Display for DeployHeader { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy-header[account: {}, timestamp: {}, ttl: {}, gas_price: {}, body_hash: {}, \ + dependencies: [{}], chain_name: {}]", + self.account, + self.timestamp, + self.ttl, + self.gas_price, + self.body_hash, + DisplayIter::new(self.dependencies.iter()), + self.chain_name, + ) + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/deploy_id.rs b/casper_types_ver_2_0/src/transaction/deploy/deploy_id.rs new file mode 100644 index 00000000..82bf91a2 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/deploy_id.rs @@ -0,0 +1,116 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Deploy; +use super::{DeployApprovalsHash, DeployHash}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + TransactionId, +}; + +/// The unique identifier of a [`Deploy`], comprising its [`DeployHash`] and +/// [`DeployApprovalsHash`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct DeployId { + deploy_hash: DeployHash, + approvals_hash: DeployApprovalsHash, +} + +impl DeployId { + /// Returns a new `DeployId`. + pub fn new(deploy_hash: DeployHash, approvals_hash: DeployApprovalsHash) -> Self { + DeployId { + deploy_hash, + approvals_hash, + } + } + + /// Returns the deploy hash. + pub fn deploy_hash(&self) -> &DeployHash { + &self.deploy_hash + } + + /// Returns the approvals hash. + pub fn approvals_hash(&self) -> &DeployApprovalsHash { + &self.approvals_hash + } + + /// Consumes `self`, returning a tuple of the constituent parts. + pub fn destructure(self) -> (DeployHash, DeployApprovalsHash) { + (self.deploy_hash, self.approvals_hash) + } + + /// Returns a random `DeployId`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + DeployId::new(DeployHash::random(rng), DeployApprovalsHash::random(rng)) + } +} + +impl Display for DeployId { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy-id({}, {})", + self.deploy_hash, self.approvals_hash + ) + } +} + +impl ToBytes for DeployId { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.approvals_hash.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.deploy_hash.serialized_length() + self.approvals_hash.serialized_length() + } +} + +impl FromBytes for DeployId { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (deploy_hash, remainder) = DeployHash::from_bytes(bytes)?; + let (approvals_hash, remainder) = DeployApprovalsHash::from_bytes(remainder)?; + let id = DeployId::new(deploy_hash, approvals_hash); + Ok((id, remainder)) + } +} + +impl From for TransactionId { + fn from(id: DeployId) -> Self { + Self::Deploy { + deploy_hash: id.deploy_hash, + approvals_hash: id.approvals_hash, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let id = DeployId::random(rng); + bytesrepr::test_serialization_roundtrip(&id); + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/error.rs b/casper_types_ver_2_0/src/transaction/deploy/error.rs new file mode 100644 index 00000000..c3388cdb --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/error.rs @@ -0,0 +1,400 @@ +use alloc::{boxed::Box, string::String}; +use core::{ + array::TryFromSliceError, + fmt::{self, Display, Formatter}, +}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::Serialize; + +use crate::{crypto, TimeDiff, Timestamp, U512}; + +/// A representation of the way in which a deploy failed validation checks. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum DeployConfigFailure { + /// Invalid chain name. + InvalidChainName { + /// The expected chain name. + expected: String, + /// The received chain name. + got: String, + }, + + /// Too many dependencies. + ExcessiveDependencies { + /// The dependencies limit. + max_dependencies: u8, + /// The actual number of dependencies provided. + got: usize, + }, + + /// Deploy is too large. + ExcessiveSize(ExcessiveSizeError), + + /// Excessive time-to-live. + ExcessiveTimeToLive { + /// The time-to-live limit. + max_ttl: TimeDiff, + /// The received time-to-live. + got: TimeDiff, + }, + + /// Deploy's timestamp is in the future. + TimestampInFuture { + /// The node's timestamp when validating the deploy. + validation_timestamp: Timestamp, + /// Any configured leeway added to `validation_timestamp`. + timestamp_leeway: TimeDiff, + /// The deploy's timestamp. + got: Timestamp, + }, + + /// The provided body hash does not match the actual hash of the body. + InvalidBodyHash, + + /// The provided deploy hash does not match the actual hash of the deploy. + InvalidDeployHash, + + /// The deploy has no approvals. + EmptyApprovals, + + /// Invalid approval. + InvalidApproval { + /// The index of the approval at fault. + index: usize, + /// The approval verification error. + error: crypto::Error, + }, + + /// Excessive length of deploy's session args. + ExcessiveSessionArgsLength { + /// The byte size limit of session arguments. + max_length: usize, + /// The received length of session arguments. + got: usize, + }, + + /// Excessive length of deploy's payment args. + ExcessivePaymentArgsLength { + /// The byte size limit of payment arguments. + max_length: usize, + /// The received length of payment arguments. + got: usize, + }, + + /// Missing payment "amount" runtime argument. + MissingPaymentAmount, + + /// Failed to parse payment "amount" runtime argument. + FailedToParsePaymentAmount, + + /// The payment amount associated with the deploy exceeds the block gas limit. + ExceededBlockGasLimit { + /// Configured block gas limit. + block_gas_limit: u64, + /// The payment amount received. + got: Box, + }, + + /// Missing payment "amount" runtime argument + MissingTransferAmount, + + /// Failed to parse transfer "amount" runtime argument. + FailedToParseTransferAmount, + + /// Insufficient transfer amount. + InsufficientTransferAmount { + /// The minimum transfer amount. + minimum: Box, + /// The attempted transfer amount. + attempted: Box, + }, + + /// The amount of approvals on the deploy exceeds the max_associated_keys limit. + ExcessiveApprovals { + /// Number of approvals on the deploy. + got: u32, + /// The chainspec limit for max_associated_keys. + max_associated_keys: u32, + }, +} + +impl Display for DeployConfigFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + DeployConfigFailure::InvalidChainName { expected, got } => { + write!( + formatter, + "invalid chain name: expected {}, got {}", + expected, got + ) + } + DeployConfigFailure::ExcessiveDependencies { + max_dependencies, + got, + } => { + write!( + formatter, + "{} dependencies exceeds limit of {}", + got, max_dependencies + ) + } + DeployConfigFailure::ExcessiveSize(error) => { + write!(formatter, "deploy size too large: {}", error) + } + DeployConfigFailure::ExcessiveTimeToLive { max_ttl, got } => { + write!( + formatter, + "time-to-live of {} exceeds limit of {}", + got, max_ttl + ) + } + DeployConfigFailure::TimestampInFuture { + validation_timestamp, + timestamp_leeway, + got, + } => { + write!( + formatter, + "timestamp of {} is later than node's timestamp of {} plus leeway of {}", + got, validation_timestamp, timestamp_leeway + ) + } + DeployConfigFailure::InvalidBodyHash => { + write!( + formatter, + "the provided body hash does not match the actual hash of the body" + ) + } + DeployConfigFailure::InvalidDeployHash => { + write!( + formatter, + "the provided hash does not match the actual hash of the deploy" + ) + } + DeployConfigFailure::EmptyApprovals => { + write!(formatter, "the deploy has no approvals") + } + DeployConfigFailure::InvalidApproval { index, error } => { + write!( + formatter, + "the approval at index {} is invalid: {}", + index, error + ) + } + DeployConfigFailure::ExcessiveSessionArgsLength { max_length, got } => { + write!( + formatter, + "serialized session code runtime args of {} exceeds limit of {}", + got, max_length + ) + } + DeployConfigFailure::ExcessivePaymentArgsLength { max_length, got } => { + write!( + formatter, + "serialized payment code runtime args of {} exceeds limit of {}", + got, max_length + ) + } + DeployConfigFailure::MissingPaymentAmount => { + write!(formatter, "missing payment 'amount' runtime argument") + } + DeployConfigFailure::FailedToParsePaymentAmount => { + write!(formatter, "failed to parse payment 'amount' as U512") + } + DeployConfigFailure::ExceededBlockGasLimit { + block_gas_limit, + got, + } => { + write!( + formatter, + "payment amount of {} exceeds the block gas limit of {}", + got, block_gas_limit + ) + } + DeployConfigFailure::MissingTransferAmount => { + write!(formatter, "missing transfer 'amount' runtime argument") + } + DeployConfigFailure::FailedToParseTransferAmount => { + write!(formatter, "failed to parse transfer 'amount' as U512") + } + DeployConfigFailure::InsufficientTransferAmount { minimum, attempted } => { + write!( + formatter, + "insufficient transfer amount; minimum: {} attempted: {}", + minimum, attempted + ) + } + DeployConfigFailure::ExcessiveApprovals { + got, + max_associated_keys, + } => { + write!( + formatter, + "number of approvals {} exceeds the maximum number of associated keys {}", + got, max_associated_keys + ) + } + } + } +} + +impl From for DeployConfigFailure { + fn from(error: ExcessiveSizeError) -> Self { + DeployConfigFailure::ExcessiveSize(error) + } +} + +#[cfg(feature = "std")] +impl StdError for DeployConfigFailure { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + DeployConfigFailure::InvalidApproval { error, .. } => Some(error), + DeployConfigFailure::InvalidChainName { .. } + | DeployConfigFailure::ExcessiveDependencies { .. } + | DeployConfigFailure::ExcessiveSize(_) + | DeployConfigFailure::ExcessiveTimeToLive { .. } + | DeployConfigFailure::TimestampInFuture { .. } + | DeployConfigFailure::InvalidBodyHash + | DeployConfigFailure::InvalidDeployHash + | DeployConfigFailure::EmptyApprovals + | DeployConfigFailure::ExcessiveSessionArgsLength { .. } + | DeployConfigFailure::ExcessivePaymentArgsLength { .. } + | DeployConfigFailure::MissingPaymentAmount + | DeployConfigFailure::FailedToParsePaymentAmount + | DeployConfigFailure::ExceededBlockGasLimit { .. } + | DeployConfigFailure::MissingTransferAmount + | DeployConfigFailure::FailedToParseTransferAmount + | DeployConfigFailure::InsufficientTransferAmount { .. } + | DeployConfigFailure::ExcessiveApprovals { .. } => None, + } + } +} + +/// Error returned when a Deploy is too large. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ExcessiveSizeError { + /// The maximum permitted serialized deploy size, in bytes. + pub max_transaction_size: u32, + /// The serialized size of the deploy provided, in bytes. + pub actual_deploy_size: usize, +} + +impl Display for ExcessiveSizeError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy size of {} bytes exceeds limit of {}", + self.actual_deploy_size, self.max_transaction_size + ) + } +} + +#[cfg(feature = "std")] +impl StdError for ExcessiveSizeError {} + +/// Errors other than validation failures relating to `Deploy`s. +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// Error while encoding to JSON. + EncodeToJson(serde_json::Error), + + /// Error while decoding from JSON. + DecodeFromJson(DecodeFromJsonError), + + /// Failed to get "amount" from `payment()`'s runtime args. + InvalidPayment, +} + +impl From for Error { + fn from(error: serde_json::Error) -> Self { + Error::EncodeToJson(error) + } +} + +impl From for Error { + fn from(error: DecodeFromJsonError) -> Self { + Error::DecodeFromJson(error) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::EncodeToJson(error) => { + write!(formatter, "encoding to json: {}", error) + } + Error::DecodeFromJson(error) => { + write!(formatter, "decoding from json: {}", error) + } + Error::InvalidPayment => { + write!(formatter, "invalid payment: missing 'amount' arg") + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + Error::EncodeToJson(error) => Some(error), + Error::DecodeFromJson(error) => Some(error), + Error::InvalidPayment => None, + } + } +} + +/// Error while decoding a `Deploy` from JSON. +#[derive(Debug)] +#[non_exhaustive] +pub enum DecodeFromJsonError { + /// Failed to decode from base 16. + FromHex(base16::DecodeError), + + /// Failed to convert slice to array. + TryFromSlice(TryFromSliceError), +} + +impl From for DecodeFromJsonError { + fn from(error: base16::DecodeError) -> Self { + DecodeFromJsonError::FromHex(error) + } +} + +impl From for DecodeFromJsonError { + fn from(error: TryFromSliceError) -> Self { + DecodeFromJsonError::TryFromSlice(error) + } +} + +impl Display for DecodeFromJsonError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + DecodeFromJsonError::FromHex(error) => { + write!(formatter, "{}", error) + } + DecodeFromJsonError::TryFromSlice(error) => { + write!(formatter, "{}", error) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for DecodeFromJsonError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + DecodeFromJsonError::FromHex(error) => Some(error), + DecodeFromJsonError::TryFromSlice(error) => Some(error), + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/executable_deploy_item.rs b/casper_types_ver_2_0/src/transaction/deploy/executable_deploy_item.rs new file mode 100644 index 00000000..e553a87c --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/executable_deploy_item.rs @@ -0,0 +1,827 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Alphanumeric, Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Deploy; +use crate::{ + account::AccountHash, + addressable_entity::DEFAULT_ENTRY_POINT_NAME, + bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + package::{EntityVersion, PackageHash}, + runtime_args, serde_helpers, + system::mint::ARG_AMOUNT, + AddressableEntityHash, AddressableEntityIdentifier, Gas, Motes, PackageIdentifier, Phase, + PublicKey, RuntimeArgs, URef, U512, +}; +#[cfg(any(feature = "testing", test))] +use crate::{testing::TestRng, CLValue}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; +const MODULE_BYTES_TAG: u8 = 0; +const STORED_CONTRACT_BY_HASH_TAG: u8 = 1; +const STORED_CONTRACT_BY_NAME_TAG: u8 = 2; +const STORED_VERSIONED_CONTRACT_BY_HASH_TAG: u8 = 3; +const STORED_VERSIONED_CONTRACT_BY_NAME_TAG: u8 = 4; +const TRANSFER_TAG: u8 = 5; +const TRANSFER_ARG_AMOUNT: &str = "amount"; +const TRANSFER_ARG_SOURCE: &str = "source"; +const TRANSFER_ARG_TARGET: &str = "target"; +const TRANSFER_ARG_ID: &str = "id"; + +/// Identifier for an [`ExecutableDeployItem`]. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum ExecutableDeployItemIdentifier { + /// The deploy item is of the type [`ExecutableDeployItem::ModuleBytes`] + Module, + /// The deploy item is a variation of a stored contract. + AddressableEntity(AddressableEntityIdentifier), + /// The deploy item is a variation of a stored contract package. + Package(PackageIdentifier), + /// The deploy item is a native transfer. + Transfer, +} + +/// The executable component of a [`Deploy`]. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutableDeployItem { + /// Executable specified as raw bytes that represent Wasm code and an instance of + /// [`RuntimeArgs`]. + ModuleBytes { + /// Raw Wasm module bytes with 'call' exported as an entrypoint. + #[cfg_attr( + feature = "json-schema", + schemars(description = "Hex-encoded raw Wasm bytes.") + )] + module_bytes: Bytes, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of + /// [`RuntimeArgs`]. + StoredContractByHash { + /// Contract hash. + #[serde(with = "serde_helpers::contract_hash_as_digest")] + #[cfg_attr( + feature = "json-schema", + schemars( + // this attribute is necessary due to a bug: https://github.com/GREsau/schemars/issues/89 + with = "AddressableEntityHash", + description = "Hex-encoded contract hash." + ) + )] + hash: AddressableEntityHash, + /// Name of an entry point. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored contract referenced by a named key existing in the signer's account context, entry + /// point and an instance of [`RuntimeArgs`]. + StoredContractByName { + /// Named key. + name: String, + /// Name of an entry point. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored versioned contract referenced by its [`PackageHash`], entry point and an + /// instance of [`RuntimeArgs`]. + StoredVersionedContractByHash { + /// Contract package hash + #[serde(with = "serde_helpers::contract_package_hash_as_digest")] + #[cfg_attr( + feature = "json-schema", + schemars( + // this attribute is necessary due to a bug: https://github.com/GREsau/schemars/issues/89 + with = "PackageHash", + description = "Hex-encoded contract package hash." + ) + )] + hash: PackageHash, + /// An optional version of the contract to call. It will default to the highest enabled + /// version if no value is specified. + version: Option, + /// Entry point name. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored versioned contract referenced by a named key existing in the signer's account + /// context, entry point and an instance of [`RuntimeArgs`]. + StoredVersionedContractByName { + /// Named key. + name: String, + /// An optional version of the contract to call. It will default to the highest enabled + /// version if no value is specified. + version: Option, + /// Entry point name. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// A native transfer which does not contain or reference a Wasm code. + Transfer { + /// Runtime arguments. + args: RuntimeArgs, + }, +} + +impl ExecutableDeployItem { + /// Returns a new `ExecutableDeployItem::ModuleBytes`. + pub fn new_module_bytes(module_bytes: Bytes, args: RuntimeArgs) -> Self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } + } + + /// Returns a new `ExecutableDeployItem::ModuleBytes` suitable for use as standard payment code + /// of a `Deploy`. + pub fn new_standard_payment>(amount: A) -> Self { + ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + ARG_AMOUNT => amount.into(), + }, + } + } + + /// Returns a new `ExecutableDeployItem::StoredContractByHash`. + pub fn new_stored_contract_by_hash( + hash: AddressableEntityHash, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem::StoredContractByName`. + pub fn new_stored_contract_by_name( + name: String, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem::StoredVersionedContractByHash`. + pub fn new_stored_versioned_contract_by_hash( + hash: PackageHash, + version: Option, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem::StoredVersionedContractByName`. + pub fn new_stored_versioned_contract_by_name( + name: String, + version: Option, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem` suitable for use as session code for a transfer. + /// + /// If `maybe_source` is None, the account's main purse is used as the source. + pub fn new_transfer>( + amount: A, + maybe_source: Option, + target: TransferTarget, + maybe_transfer_id: Option, + ) -> Self { + let mut args = RuntimeArgs::new(); + args.insert(TRANSFER_ARG_AMOUNT, amount.into()) + .expect("should serialize amount arg"); + + if let Some(source) = maybe_source { + args.insert(TRANSFER_ARG_SOURCE, source) + .expect("should serialize source arg"); + } + + match target { + TransferTarget::PublicKey(public_key) => args + .insert(TRANSFER_ARG_TARGET, public_key) + .expect("should serialize public key target arg"), + TransferTarget::AccountHash(account_hash) => args + .insert(TRANSFER_ARG_TARGET, account_hash) + .expect("should serialize account hash target arg"), + TransferTarget::URef(uref) => args + .insert(TRANSFER_ARG_TARGET, uref) + .expect("should serialize uref target arg"), + } + + args.insert(TRANSFER_ARG_ID, maybe_transfer_id) + .expect("should serialize transfer id arg"); + + ExecutableDeployItem::Transfer { args } + } + + /// Returns the entry point name. + pub fn entry_point_name(&self) -> &str { + match self { + ExecutableDeployItem::ModuleBytes { .. } | ExecutableDeployItem::Transfer { .. } => { + DEFAULT_ENTRY_POINT_NAME + } + ExecutableDeployItem::StoredVersionedContractByName { entry_point, .. } + | ExecutableDeployItem::StoredVersionedContractByHash { entry_point, .. } + | ExecutableDeployItem::StoredContractByHash { entry_point, .. } + | ExecutableDeployItem::StoredContractByName { entry_point, .. } => entry_point, + } + } + + /// Returns the identifier of the `ExecutableDeployItem`. + pub fn identifier(&self) -> ExecutableDeployItemIdentifier { + match self { + ExecutableDeployItem::ModuleBytes { .. } => ExecutableDeployItemIdentifier::Module, + ExecutableDeployItem::StoredContractByHash { hash, .. } => { + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Hash(*hash), + ) + } + ExecutableDeployItem::StoredContractByName { name, .. } => { + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Name(name.clone()), + ) + } + ExecutableDeployItem::StoredVersionedContractByHash { hash, version, .. } => { + ExecutableDeployItemIdentifier::Package(PackageIdentifier::Hash { + package_hash: *hash, + version: *version, + }) + } + ExecutableDeployItem::StoredVersionedContractByName { name, version, .. } => { + ExecutableDeployItemIdentifier::Package(PackageIdentifier::Name { + name: name.clone(), + version: *version, + }) + } + ExecutableDeployItem::Transfer { .. } => ExecutableDeployItemIdentifier::Transfer, + } + } + + /// Returns the identifier of the contract in the deploy item, if present. + pub fn contract_identifier(&self) -> Option { + match self { + ExecutableDeployItem::ModuleBytes { .. } + | ExecutableDeployItem::StoredVersionedContractByHash { .. } + | ExecutableDeployItem::StoredVersionedContractByName { .. } + | ExecutableDeployItem::Transfer { .. } => None, + ExecutableDeployItem::StoredContractByHash { hash, .. } => { + Some(AddressableEntityIdentifier::Hash(*hash)) + } + ExecutableDeployItem::StoredContractByName { name, .. } => { + Some(AddressableEntityIdentifier::Name(name.clone())) + } + } + } + + /// Returns the identifier of the contract package in the deploy item, if present. + pub fn contract_package_identifier(&self) -> Option { + match self { + ExecutableDeployItem::ModuleBytes { .. } + | ExecutableDeployItem::StoredContractByHash { .. } + | ExecutableDeployItem::StoredContractByName { .. } + | ExecutableDeployItem::Transfer { .. } => None, + + ExecutableDeployItem::StoredVersionedContractByHash { hash, version, .. } => { + Some(PackageIdentifier::Hash { + package_hash: *hash, + version: *version, + }) + } + ExecutableDeployItem::StoredVersionedContractByName { name, version, .. } => { + Some(PackageIdentifier::Name { + name: name.clone(), + version: *version, + }) + } + } + } + + /// Returns the runtime arguments. + pub fn args(&self) -> &RuntimeArgs { + match self { + ExecutableDeployItem::ModuleBytes { args, .. } + | ExecutableDeployItem::StoredContractByHash { args, .. } + | ExecutableDeployItem::StoredContractByName { args, .. } + | ExecutableDeployItem::StoredVersionedContractByHash { args, .. } + | ExecutableDeployItem::StoredVersionedContractByName { args, .. } + | ExecutableDeployItem::Transfer { args } => args, + } + } + + /// Returns the payment amount from args (if any) as Gas. + pub fn payment_amount(&self, conv_rate: u64) -> Option { + let cl_value = self.args().get(ARG_AMOUNT)?; + let motes = cl_value.clone().into_t::().ok()?; + Gas::from_motes(Motes::new(motes), conv_rate) + } + + /// Returns `true` if this deploy item is a native transfer. + pub fn is_transfer(&self) -> bool { + matches!(self, ExecutableDeployItem::Transfer { .. }) + } + + /// Returns `true` if this deploy item is a standard payment. + pub fn is_standard_payment(&self, phase: Phase) -> bool { + if phase != Phase::Payment { + return false; + } + + if let ExecutableDeployItem::ModuleBytes { module_bytes, .. } = self { + return module_bytes.is_empty(); + } + + false + } + + /// Returns `true` if the deploy item is a contract identified by its name. + pub fn is_by_name(&self) -> bool { + matches!( + self, + ExecutableDeployItem::StoredVersionedContractByName { .. } + ) || matches!(self, ExecutableDeployItem::StoredContractByName { .. }) + } + + /// Returns the name of the contract or contract package, if the deploy item is identified by + /// name. + pub fn by_name(&self) -> Option { + match self { + ExecutableDeployItem::StoredContractByName { name, .. } + | ExecutableDeployItem::StoredVersionedContractByName { name, .. } => { + Some(name.clone()) + } + ExecutableDeployItem::ModuleBytes { .. } + | ExecutableDeployItem::StoredContractByHash { .. } + | ExecutableDeployItem::StoredVersionedContractByHash { .. } + | ExecutableDeployItem::Transfer { .. } => None, + } + } + + /// Returns `true` if the deploy item is a stored contract. + pub fn is_stored_contract(&self) -> bool { + matches!(self, ExecutableDeployItem::StoredContractByHash { .. }) + || matches!(self, ExecutableDeployItem::StoredContractByName { .. }) + } + + /// Returns `true` if the deploy item is a stored contract package. + pub fn is_stored_contract_package(&self) -> bool { + matches!( + self, + ExecutableDeployItem::StoredVersionedContractByHash { .. } + ) || matches!( + self, + ExecutableDeployItem::StoredVersionedContractByName { .. } + ) + } + + /// Returns `true` if the deploy item is [`ModuleBytes`]. + /// + /// [`ModuleBytes`]: ExecutableDeployItem::ModuleBytes + pub fn is_module_bytes(&self) -> bool { + matches!(self, Self::ModuleBytes { .. }) + } + + /// Returns a random `ExecutableDeployItem`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + rng.gen() + } +} + +impl ToBytes for ExecutableDeployItem { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => { + writer.push(MODULE_BYTES_TAG); + module_bytes.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } => { + writer.push(STORED_CONTRACT_BY_HASH_TAG); + hash.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } => { + writer.push(STORED_CONTRACT_BY_NAME_TAG); + name.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } => { + writer.push(STORED_VERSIONED_CONTRACT_BY_HASH_TAG); + hash.write_bytes(writer)?; + version.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } => { + writer.push(STORED_VERSIONED_CONTRACT_BY_NAME_TAG); + name.write_bytes(writer)?; + version.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::Transfer { args } => { + writer.push(TRANSFER_TAG); + args.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => { + module_bytes.serialized_length() + args.serialized_length() + } + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } => { + hash.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } => { + name.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } => { + hash.serialized_length() + + version.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } => { + name.serialized_length() + + version.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::Transfer { args } => args.serialized_length(), + } + } +} + +impl FromBytes for ExecutableDeployItem { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + MODULE_BYTES_TAG => { + let (module_bytes, remainder) = Bytes::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::ModuleBytes { module_bytes, args }, + remainder, + )) + } + STORED_CONTRACT_BY_HASH_TAG => { + let (hash, remainder) = AddressableEntityHash::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + }, + remainder, + )) + } + STORED_CONTRACT_BY_NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + }, + remainder, + )) + } + STORED_VERSIONED_CONTRACT_BY_HASH_TAG => { + let (hash, remainder) = PackageHash::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + }, + remainder, + )) + } + STORED_VERSIONED_CONTRACT_BY_NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + }, + remainder, + )) + } + TRANSFER_TAG => { + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok((ExecutableDeployItem::Transfer { args }, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Display for ExecutableDeployItem { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, .. } => { + write!(f, "module-bytes [{} bytes]", module_bytes.len()) + } + ExecutableDeployItem::StoredContractByHash { + hash, entry_point, .. + } => write!( + f, + "stored-contract-by-hash: {:10}, entry-point: {}", + HexFmt(hash), + entry_point, + ), + ExecutableDeployItem::StoredContractByName { + name, entry_point, .. + } => write!( + f, + "stored-contract-by-name: {}, entry-point: {}", + name, entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version: Some(ver), + entry_point, + .. + } => write!( + f, + "stored-versioned-contract-by-hash: {:10}, version: {}, entry-point: {}", + HexFmt(hash), + ver, + entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByHash { + hash, entry_point, .. + } => write!( + f, + "stored-versioned-contract-by-hash: {:10}, version: latest, entry-point: {}", + HexFmt(hash), + entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByName { + name, + version: Some(ver), + entry_point, + .. + } => write!( + f, + "stored-versioned-contract: {}, version: {}, entry-point: {}", + name, ver, entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByName { + name, entry_point, .. + } => write!( + f, + "stored-versioned-contract: {}, version: latest, entry-point: {}", + name, entry_point, + ), + ExecutableDeployItem::Transfer { .. } => write!(f, "transfer"), + } + } +} + +impl Debug for ExecutableDeployItem { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => f + .debug_struct("ModuleBytes") + .field("module_bytes", &format!("[{} bytes]", module_bytes.len())) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } => f + .debug_struct("StoredContractByHash") + .field("hash", &base16::encode_lower(hash)) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } => f + .debug_struct("StoredContractByName") + .field("name", &name) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } => f + .debug_struct("StoredVersionedContractByHash") + .field("hash", &base16::encode_lower(hash)) + .field("version", version) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } => f + .debug_struct("StoredVersionedContractByName") + .field("name", &name) + .field("version", version) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::Transfer { args } => { + f.debug_struct("Transfer").field("args", args).finish() + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExecutableDeployItem { + fn random_bytes(rng: &mut R) -> Vec { + let mut bytes = vec![0u8; rng.gen_range(0..100)]; + rng.fill_bytes(bytes.as_mut()); + bytes + } + + fn random_string(rng: &mut R) -> String { + rng.sample_iter(&Alphanumeric) + .take(20) + .map(char::from) + .collect() + } + + let mut args = RuntimeArgs::new(); + let _ = args.insert(random_string(rng), Bytes::from(random_bytes(rng))); + + match rng.gen_range(0..5) { + 0 => ExecutableDeployItem::ModuleBytes { + module_bytes: random_bytes(rng).into(), + args, + }, + 1 => ExecutableDeployItem::StoredContractByHash { + hash: AddressableEntityHash::new(rng.gen()), + entry_point: random_string(rng), + args, + }, + 2 => ExecutableDeployItem::StoredContractByName { + name: random_string(rng), + entry_point: random_string(rng), + args, + }, + 3 => ExecutableDeployItem::StoredVersionedContractByHash { + hash: PackageHash::new(rng.gen()), + version: rng.gen(), + entry_point: random_string(rng), + args, + }, + 4 => ExecutableDeployItem::StoredVersionedContractByName { + name: random_string(rng), + version: rng.gen(), + entry_point: random_string(rng), + args, + }, + 5 => { + let amount = rng.gen_range(2_500_000_000_u64..1_000_000_000_000_000); + let mut transfer_args = RuntimeArgs::new(); + transfer_args.insert_cl_value( + ARG_AMOUNT, + CLValue::from_t(U512::from(amount)).expect("should get CLValue from U512"), + ); + ExecutableDeployItem::Transfer { + args: transfer_args, + } + } + _ => unreachable!(), + } + } +} + +/// The various types which can be used as the `target` runtime argument of a native transfer. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq)] +pub enum TransferTarget { + /// A public key. + PublicKey(PublicKey), + /// An account hash. + AccountHash(AccountHash), + /// A URef. + URef(URef), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serialization_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + let executable_deploy_item = ExecutableDeployItem::random(rng); + bytesrepr::test_serialization_roundtrip(&executable_deploy_item); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/deploy/finalized_deploy_approvals.rs b/casper_types_ver_2_0/src/transaction/deploy/finalized_deploy_approvals.rs new file mode 100644 index 00000000..37fb66ad --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/deploy/finalized_deploy_approvals.rs @@ -0,0 +1,76 @@ +use alloc::{collections::BTreeSet, vec::Vec}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + DeployApproval, +}; + +/// A set of approvals that has been agreed upon by consensus to approve of a specific deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct FinalizedDeployApprovals(BTreeSet); + +impl FinalizedDeployApprovals { + /// Creates a new set of finalized deploy approvals. + pub fn new(approvals: BTreeSet) -> Self { + Self(approvals) + } + + /// Returns the inner `BTreeSet` of approvals. + pub fn inner(&self) -> &BTreeSet { + &self.0 + } + + /// Converts this set of deploy approvals into the inner `BTreeSet`. + pub fn into_inner(self) -> BTreeSet { + self.0 + } + + /// Returns a random FinalizedDeployApprovals. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let count = rng.gen_range(1..10); + let approvals = (0..count).map(|_| DeployApproval::random(rng)).collect(); + FinalizedDeployApprovals(approvals) + } +} + +impl ToBytes for FinalizedDeployApprovals { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for FinalizedDeployApprovals { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (approvals, remainder) = BTreeSet::::from_bytes(bytes)?; + Ok((FinalizedDeployApprovals(approvals), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let approvals = FinalizedDeployApprovals::random(rng); + bytesrepr::test_serialization_roundtrip(&approvals); + } +} diff --git a/casper_types_ver_2_0/src/transaction/execution_info.rs b/casper_types_ver_2_0/src/transaction/execution_info.rs new file mode 100644 index 00000000..26303f5c --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/execution_info.rs @@ -0,0 +1,62 @@ +use alloc::vec::Vec; + +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + execution::ExecutionResult, + BlockHash, +}; + +/// The block hash and height in which a given deploy was executed, along with the execution result +/// if known. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ExecutionInfo { + /// The hash of the block in which the deploy was executed. + pub block_hash: BlockHash, + /// The height of the block in which the deploy was executed. + pub block_height: u64, + /// The execution result if known. + pub execution_result: Option, +} + +impl FromBytes for ExecutionInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, bytes) = FromBytes::from_bytes(bytes)?; + let (block_height, bytes) = FromBytes::from_bytes(bytes)?; + let (execution_result, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + ExecutionInfo { + block_hash, + block_height, + execution_result, + }, + bytes, + )) + } +} + +impl ToBytes for ExecutionInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut result)?; + Ok(result) + } + + fn write_bytes(&self, bytes: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(bytes)?; + self.block_height.write_bytes(bytes)?; + self.execution_result.write_bytes(bytes)?; + Ok(()) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.block_height.serialized_length() + + self.execution_result.serialized_length() + } +} diff --git a/casper_types_ver_2_0/src/transaction/finalized_approvals.rs b/casper_types_ver_2_0/src/transaction/finalized_approvals.rs new file mode 100644 index 00000000..708873d2 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/finalized_approvals.rs @@ -0,0 +1,128 @@ +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Transaction, +}; + +use super::{deploy::FinalizedDeployApprovals, transaction_v1::FinalizedTransactionV1Approvals}; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; + +/// A set of approvals that has been agreed upon by consensus to approve of a specific transaction. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum FinalizedApprovals { + /// Approvals for a Deploy. + Deploy(FinalizedDeployApprovals), + /// Approvals for a TransactionV1. + V1(FinalizedTransactionV1Approvals), +} + +impl FinalizedApprovals { + /// Creates a new set of finalized approvals from a transaction. + pub fn new(transaction: &Transaction) -> Self { + match transaction { + Transaction::Deploy(deploy) => { + Self::Deploy(FinalizedDeployApprovals::new(deploy.approvals().clone())) + } + Transaction::V1(txn) => Self::V1(FinalizedTransactionV1Approvals::new( + txn.approvals().clone(), + )), + } + } + + /// Returns a random FinalizedApprovals. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen_bool(0.5) { + Self::Deploy(FinalizedDeployApprovals::random(rng)) + } else { + Self::V1(FinalizedTransactionV1Approvals::random(rng)) + } + } +} + +impl From for FinalizedApprovals { + fn from(approvals: FinalizedDeployApprovals) -> Self { + Self::Deploy(approvals) + } +} + +impl From for FinalizedApprovals { + fn from(approvals: FinalizedTransactionV1Approvals) -> Self { + Self::V1(approvals) + } +} + +impl ToBytes for FinalizedApprovals { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + FinalizedApprovals::Deploy(approvals) => { + DEPLOY_TAG.write_bytes(writer)?; + approvals.write_bytes(writer) + } + FinalizedApprovals::V1(approvals) => { + V1_TAG.write_bytes(writer)?; + approvals.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + FinalizedApprovals::Deploy(approvals) => approvals.serialized_length(), + FinalizedApprovals::V1(approvals) => approvals.serialized_length(), + } + } +} + +impl FromBytes for FinalizedApprovals { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (approvals, remainder) = FinalizedDeployApprovals::from_bytes(remainder)?; + Ok((FinalizedApprovals::Deploy(approvals), remainder)) + } + V1_TAG => { + let (approvals, remainder) = + FinalizedTransactionV1Approvals::from_bytes(remainder)?; + Ok((FinalizedApprovals::V1(approvals), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let approvals = FinalizedApprovals::from(FinalizedDeployApprovals::random(rng)); + bytesrepr::test_serialization_roundtrip(&approvals); + + let approvals = FinalizedApprovals::from(FinalizedTransactionV1Approvals::random(rng)); + bytesrepr::test_serialization_roundtrip(&approvals); + } +} diff --git a/casper_types_ver_2_0/src/transaction/initiator_addr.rs b/casper_types_ver_2_0/src/transaction/initiator_addr.rs new file mode 100644 index 00000000..0f09d6f9 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/initiator_addr.rs @@ -0,0 +1,165 @@ +use alloc::vec::Vec; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::TransactionV1; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + serde_helpers, EntityAddr, PublicKey, +}; + +const PUBLIC_KEY_TAG: u8 = 0; +const ACCOUNT_HASH_TAG: u8 = 1; +const ENTITY_ADDR_TAG: u8 = 2; + +/// The address of the initiator of a [`TransactionV1`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The address of the initiator of a TransactionV1.") +)] +#[serde(deny_unknown_fields)] +pub enum InitiatorAddr { + /// The public key of the initiator. + PublicKey(PublicKey), + /// The account hash derived from the public key of the initiator. + AccountHash(AccountHash), + /// The entity address of the initiator. + #[serde(with = "serde_helpers::raw_32_byte_array")] + #[cfg_attr( + feature = "json-schema", + schemars( + with = "String", + description = "Hex-encoded entity address of the initiator." + ) + )] + EntityAddr(EntityAddr), +} + +impl InitiatorAddr { + /// Returns a random `InitiatorAddr`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + PUBLIC_KEY_TAG => InitiatorAddr::PublicKey(PublicKey::random(rng)), + ACCOUNT_HASH_TAG => InitiatorAddr::AccountHash(rng.gen()), + ENTITY_ADDR_TAG => InitiatorAddr::EntityAddr(rng.gen()), + _ => unreachable!(), + } + } +} + +impl Display for InitiatorAddr { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + InitiatorAddr::PublicKey(public_key) => write!(formatter, "{}", public_key), + InitiatorAddr::AccountHash(account_hash) => { + write!(formatter, "account-hash({})", account_hash) + } + InitiatorAddr::EntityAddr(entity_addr) => { + write!(formatter, "entity-addr({:10})", HexFmt(entity_addr)) + } + } + } +} + +impl Debug for InitiatorAddr { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + InitiatorAddr::PublicKey(public_key) => formatter + .debug_tuple("PublicKey") + .field(public_key) + .finish(), + InitiatorAddr::AccountHash(account_hash) => formatter + .debug_tuple("AccountHash") + .field(account_hash) + .finish(), + InitiatorAddr::EntityAddr(entity_addr) => formatter + .debug_tuple("EntityAddr") + .field(&HexFmt(entity_addr)) + .finish(), + } + } +} + +impl ToBytes for InitiatorAddr { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + InitiatorAddr::PublicKey(public_key) => { + PUBLIC_KEY_TAG.write_bytes(writer)?; + public_key.write_bytes(writer) + } + InitiatorAddr::AccountHash(account_hash) => { + ACCOUNT_HASH_TAG.write_bytes(writer)?; + account_hash.write_bytes(writer) + } + InitiatorAddr::EntityAddr(entity_addr) => { + ENTITY_ADDR_TAG.write_bytes(writer)?; + entity_addr.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + InitiatorAddr::PublicKey(public_key) => public_key.serialized_length(), + InitiatorAddr::AccountHash(account_hash) => account_hash.serialized_length(), + InitiatorAddr::EntityAddr(entity_addr) => entity_addr.serialized_length(), + } + } +} + +impl FromBytes for InitiatorAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + PUBLIC_KEY_TAG => { + let (public_key, remainder) = PublicKey::from_bytes(remainder)?; + Ok((InitiatorAddr::PublicKey(public_key), remainder)) + } + ACCOUNT_HASH_TAG => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((InitiatorAddr::AccountHash(account_hash), remainder)) + } + ENTITY_ADDR_TAG => { + let (entity_addr, remainder) = EntityAddr::from_bytes(remainder)?; + Ok((InitiatorAddr::EntityAddr(entity_addr), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&InitiatorAddr::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/initiator_addr_and_secret_key.rs b/casper_types_ver_2_0/src/transaction/initiator_addr_and_secret_key.rs new file mode 100644 index 00000000..d503e0a8 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/initiator_addr_and_secret_key.rs @@ -0,0 +1,40 @@ +use crate::{InitiatorAddr, PublicKey, SecretKey}; + +/// Used when constructing a deploy or transaction. +#[derive(Debug)] +pub(super) enum InitiatorAddrAndSecretKey<'a> { + /// Provides both the initiator address and the secret key (not necessarily for the same + /// initiator address) used to sign the deploy or transaction. + Both { + /// The initiator address of the account. + initiator_addr: InitiatorAddr, + /// The secret key used to sign the deploy or transaction. + secret_key: &'a SecretKey, + }, + /// The initiator address only (no secret key). The deploy or transaction will be created + /// unsigned. + InitiatorAddr(InitiatorAddr), + /// The initiator address will be derived from the provided secret key, and the deploy or + /// transaction will be signed by the same secret key. + SecretKey(&'a SecretKey), +} + +impl<'a> InitiatorAddrAndSecretKey<'a> { + pub fn initiator_addr(&self) -> InitiatorAddr { + match self { + InitiatorAddrAndSecretKey::Both { initiator_addr, .. } + | InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) => initiator_addr.clone(), + InitiatorAddrAndSecretKey::SecretKey(secret_key) => { + InitiatorAddr::PublicKey(PublicKey::from(*secret_key)) + } + } + } + + pub fn secret_key(&self) -> Option<&SecretKey> { + match self { + InitiatorAddrAndSecretKey::Both { secret_key, .. } + | InitiatorAddrAndSecretKey::SecretKey(secret_key) => Some(secret_key), + InitiatorAddrAndSecretKey::InitiatorAddr(_) => None, + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/package_identifier.rs b/casper_types_ver_2_0/src/transaction/package_identifier.rs new file mode 100644 index 00000000..29cdb623 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/package_identifier.rs @@ -0,0 +1,191 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + EntityVersion, PackageHash, +}; +#[cfg(doc)] +use crate::{ExecutableDeployItem, TransactionTarget}; + +const HASH_TAG: u8 = 0; +const NAME_TAG: u8 = 1; + +/// Identifier for the package object within a [`TransactionTarget::Stored`] or an +/// [`ExecutableDeployItem`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + description = "Identifier for the package object within a `Stored` transaction target or \ + an `ExecutableDeployItem`." + ) +)] +pub enum PackageIdentifier { + /// The hash and optional version identifying the contract package. + Hash { + /// The hash of the contract package. + package_hash: PackageHash, + /// The version of the contract package. + /// + /// `None` implies latest version. + version: Option, + }, + /// The name and optional version identifying the contract package. + Name { + /// The name of the contract package. + name: String, + /// The version of the contract package. + /// + /// `None` implies latest version. + version: Option, + }, +} + +impl PackageIdentifier { + /// Returns the optional version of the contract package. + /// + /// `None` implies latest version. + pub fn version(&self) -> Option { + match self { + PackageIdentifier::Hash { version, .. } | PackageIdentifier::Name { version, .. } => { + *version + } + } + } + + /// Returns a random `PackageIdentifier`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let version = rng.gen::().then(|| rng.gen::()); + if rng.gen() { + PackageIdentifier::Hash { + package_hash: PackageHash::new(rng.gen()), + version, + } + } else { + PackageIdentifier::Name { + name: rng.random_string(1..21), + version, + } + } + } +} + +impl Display for PackageIdentifier { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + PackageIdentifier::Hash { + package_hash: contract_package_hash, + version: Some(ver), + } => write!( + formatter, + "package-id({}, version {})", + HexFmt(contract_package_hash), + ver + ), + PackageIdentifier::Hash { + package_hash: contract_package_hash, + .. + } => write!( + formatter, + "package-id({}, latest)", + HexFmt(contract_package_hash), + ), + PackageIdentifier::Name { + name, + version: Some(ver), + } => write!(formatter, "package-id({}, version {})", name, ver), + PackageIdentifier::Name { name, .. } => { + write!(formatter, "package-id({}, latest)", name) + } + } + } +} + +impl ToBytes for PackageIdentifier { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PackageIdentifier::Hash { + package_hash, + version, + } => { + HASH_TAG.write_bytes(writer)?; + package_hash.write_bytes(writer)?; + version.write_bytes(writer) + } + PackageIdentifier::Name { name, version } => { + NAME_TAG.write_bytes(writer)?; + name.write_bytes(writer)?; + version.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + PackageIdentifier::Hash { + package_hash, + version, + } => package_hash.serialized_length() + version.serialized_length(), + PackageIdentifier::Name { name, version } => { + name.serialized_length() + version.serialized_length() + } + } + } +} + +impl FromBytes for PackageIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + HASH_TAG => { + let (package_hash, remainder) = PackageHash::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let id = PackageIdentifier::Hash { + package_hash, + version, + }; + Ok((id, remainder)) + } + NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let id = PackageIdentifier::Name { name, version }; + Ok((id, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&PackageIdentifier::random(rng)); + } +} diff --git a/casper_types_ver_2_0/src/transaction/pricing_mode.rs b/casper_types_ver_2_0/src/transaction/pricing_mode.rs new file mode 100644 index 00000000..97304f03 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/pricing_mode.rs @@ -0,0 +1,121 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +const GAS_PRICE_MULTIPLIER_TAG: u8 = 0; +const FIXED_TAG: u8 = 1; +const RESERVED_TAG: u8 = 2; + +/// The pricing mode of a [`Transaction`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Pricing mode of a Transaction.") +)] +#[serde(deny_unknown_fields)] +pub enum PricingMode { + /// Multiplies the gas used by the given amount. + /// + /// This is the same behaviour as for the `Deploy::gas_price`. + GasPriceMultiplier(u64), + /// First-in-first-out handling of transactions, i.e. pricing mode is irrelevant to ordering. + Fixed, + /// The payment for this transaction was previously reserved. + Reserved, +} + +impl PricingMode { + /// Returns a random `PricingMode. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => PricingMode::GasPriceMultiplier(rng.gen()), + 1 => PricingMode::Fixed, + 2 => PricingMode::Reserved, + _ => unreachable!(), + } + } +} + +impl Display for PricingMode { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + PricingMode::GasPriceMultiplier(multiplier) => { + write!(formatter, "gas price multiplier {}", multiplier) + } + PricingMode::Fixed => write!(formatter, "fixed pricing"), + PricingMode::Reserved => write!(formatter, "reserved"), + } + } +} + +impl ToBytes for PricingMode { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PricingMode::GasPriceMultiplier(multiplier) => { + GAS_PRICE_MULTIPLIER_TAG.write_bytes(writer)?; + multiplier.write_bytes(writer) + } + PricingMode::Fixed => FIXED_TAG.write_bytes(writer), + PricingMode::Reserved => RESERVED_TAG.write_bytes(writer), + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + PricingMode::GasPriceMultiplier(multiplier) => multiplier.serialized_length(), + PricingMode::Fixed | PricingMode::Reserved => 0, + } + } +} + +impl FromBytes for PricingMode { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + GAS_PRICE_MULTIPLIER_TAG => { + let (multiplier, remainder) = u64::from_bytes(remainder)?; + Ok((PricingMode::GasPriceMultiplier(multiplier), remainder)) + } + FIXED_TAG => Ok((PricingMode::Fixed, remainder)), + RESERVED_TAG => Ok((PricingMode::Reserved, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&PricingMode::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/runtime_args.rs b/casper_types_ver_2_0/src/transaction/runtime_args.rs new file mode 100644 index 00000000..fd8d4dd8 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/runtime_args.rs @@ -0,0 +1,388 @@ +//! Home of RuntimeArgs for calling contracts + +use alloc::{collections::BTreeMap, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::{bytesrepr::Bytes, testing::TestRng}; +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes}, + CLType, CLTyped, CLValue, CLValueError, U512, +}; +/// Named arguments to a contract. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct NamedArg(String, CLValue); + +impl NamedArg { + /// Returns a new `NamedArg`. + pub fn new(name: String, value: CLValue) -> Self { + NamedArg(name, value) + } + + /// Returns the name of the named arg. + pub fn name(&self) -> &str { + &self.0 + } + + /// Returns the value of the named arg. + pub fn cl_value(&self) -> &CLValue { + &self.1 + } + + /// Returns a mutable reference to the value of the named arg. + pub fn cl_value_mut(&mut self) -> &mut CLValue { + &mut self.1 + } +} + +impl From<(String, CLValue)> for NamedArg { + fn from((name, value): (String, CLValue)) -> NamedArg { + NamedArg(name, value) + } +} + +impl ToBytes for NamedArg { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + self.1.serialized_length() + } +} + +impl FromBytes for NamedArg { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (cl_value, remainder) = CLValue::from_bytes(remainder)?; + Ok((NamedArg(name, cl_value), remainder)) + } +} + +/// Represents a collection of arguments passed to a smart contract. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct RuntimeArgs(Vec); + +impl RuntimeArgs { + /// Create an empty [`RuntimeArgs`] instance. + pub fn new() -> RuntimeArgs { + RuntimeArgs::default() + } + + /// A wrapper that lets you easily and safely create runtime arguments. + /// + /// This method is useful when you have to construct a [`RuntimeArgs`] with multiple entries, + /// but error handling at given call site would require to have a match statement for each + /// [`RuntimeArgs::insert`] call. With this method you can use ? operator inside the closure and + /// then handle single result. When `try_block` will be stabilized this method could be + /// deprecated in favor of using those blocks. + pub fn try_new(func: F) -> Result + where + F: FnOnce(&mut RuntimeArgs) -> Result<(), CLValueError>, + { + let mut runtime_args = RuntimeArgs::new(); + func(&mut runtime_args)?; + Ok(runtime_args) + } + + /// Gets an argument by its name. + pub fn get(&self, name: &str) -> Option<&CLValue> { + self.0.iter().find_map(|NamedArg(named_name, named_value)| { + if named_name == name { + Some(named_value) + } else { + None + } + }) + } + + /// Gets the length of the collection. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the collection of arguments is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Inserts a new named argument into the collection. + pub fn insert(&mut self, key: K, value: V) -> Result<(), CLValueError> + where + K: Into, + V: CLTyped + ToBytes, + { + let cl_value = CLValue::from_t(value)?; + self.0.push(NamedArg(key.into(), cl_value)); + Ok(()) + } + + /// Inserts a new named argument into the collection. + pub fn insert_cl_value(&mut self, key: K, cl_value: CLValue) + where + K: Into, + { + self.0.push(NamedArg(key.into(), cl_value)); + } + + /// Returns all the values of the named args. + pub fn to_values(&self) -> Vec<&CLValue> { + self.0.iter().map(|NamedArg(_name, value)| value).collect() + } + + /// Returns an iterator of references over all arguments in insertion order. + pub fn named_args(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns an iterator of mutable references over all arguments in insertion order. + pub fn named_args_mut(&mut self) -> impl Iterator { + self.0.iter_mut() + } + + /// Returns the numeric value of `name` arg from the runtime arguments or defaults to + /// 0 if that arg doesn't exist or is not an integer type. + /// + /// Supported [`CLType`]s for numeric conversions are U64, and U512. + /// + /// Returns an error if parsing the arg fails. + pub fn try_get_number(&self, name: &str) -> Result { + let amount_arg = match self.get(name) { + None => return Ok(U512::zero()), + Some(arg) => arg, + }; + match amount_arg.cl_type() { + CLType::U512 => amount_arg.clone().into_t::(), + CLType::U64 => amount_arg.clone().into_t::().map(U512::from), + _ => Ok(U512::zero()), + } + } + + /// Returns a random `RuntimeArgs`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + fn random_bytes(rng: &mut TestRng) -> Bytes { + let mut buffer = vec![0u8; rng.gen_range(0..100)]; + rng.fill_bytes(buffer.as_mut()); + Bytes::from(buffer) + } + + let count = rng.gen_range(0..6); + let mut args = RuntimeArgs::new(); + for _ in 0..count { + let key = rng.random_string(1..21); + let value = random_bytes(rng); + let _ = args.insert(key, value); + } + args + } +} + +impl From> for RuntimeArgs { + fn from(values: Vec) -> Self { + RuntimeArgs(values) + } +} + +impl From> for RuntimeArgs { + fn from(cl_values: BTreeMap) -> RuntimeArgs { + RuntimeArgs(cl_values.into_iter().map(NamedArg::from).collect()) + } +} + +impl From for BTreeMap { + fn from(args: RuntimeArgs) -> BTreeMap { + let mut map = BTreeMap::new(); + for named in args.0 { + map.insert(named.0, named.1); + } + map + } +} + +impl ToBytes for RuntimeArgs { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for RuntimeArgs { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (args, remainder) = Vec::::from_bytes(bytes)?; + Ok((RuntimeArgs(args), remainder)) + } +} + +/// Macro that makes it easier to construct named arguments. +/// +/// NOTE: This macro does not propagate possible errors that could occur while creating a +/// [`CLValue`]. For such cases creating [`RuntimeArgs`] manually is recommended. +/// +/// # Example usage +/// ``` +/// use casper_types_ver_2_0::runtime_args; +/// let _named_args = runtime_args! { +/// "foo" => 42, +/// "bar" => "Hello, world!" +/// }; +/// ``` +#[macro_export] +macro_rules! runtime_args { + () => ($crate::RuntimeArgs::new()); + ( $($key:expr => $value:expr,)+ ) => (runtime_args!($($key => $value),+)); + ( $($key:expr => $value:expr),* ) => { + { + let mut named_args = $crate::RuntimeArgs::new(); + $( + named_args.insert($key, $value).unwrap(); + )* + named_args + } + }; +} + +#[cfg(test)] +mod tests { + use super::*; + + const ARG_AMOUNT: &str = "amount"; + + #[test] + fn test_runtime_args() { + let arg1 = CLValue::from_t(1).unwrap(); + let arg2 = CLValue::from_t("Foo").unwrap(); + let arg3 = CLValue::from_t(Some(1)).unwrap(); + let args = { + let mut map = BTreeMap::new(); + map.insert("bar".into(), arg2.clone()); + map.insert("foo".into(), arg1.clone()); + map.insert("qwer".into(), arg3.clone()); + map + }; + let runtime_args = RuntimeArgs::from(args); + assert_eq!(runtime_args.get("qwer"), Some(&arg3)); + assert_eq!(runtime_args.get("foo"), Some(&arg1)); + assert_eq!(runtime_args.get("bar"), Some(&arg2)); + assert_eq!(runtime_args.get("aaa"), None); + + // Ensure macro works + + let runtime_args_2 = runtime_args! { + "bar" => "Foo", + "foo" => 1i32, + "qwer" => Some(1i32), + }; + assert_eq!(runtime_args, runtime_args_2); + } + + #[test] + fn empty_macro() { + assert_eq!(runtime_args! {}, RuntimeArgs::new()); + } + + #[test] + fn btreemap_compat() { + // This test assumes same serialization format as BTreeMap + let runtime_args_1 = runtime_args! { + "bar" => "Foo", + "foo" => 1i32, + "qwer" => Some(1i32), + }; + let tagless = runtime_args_1.to_bytes().unwrap().to_vec(); + + let mut runtime_args_2 = BTreeMap::new(); + runtime_args_2.insert(String::from("bar"), CLValue::from_t("Foo").unwrap()); + runtime_args_2.insert(String::from("foo"), CLValue::from_t(1i32).unwrap()); + runtime_args_2.insert(String::from("qwer"), CLValue::from_t(Some(1i32)).unwrap()); + + assert_eq!(tagless, runtime_args_2.to_bytes().unwrap()); + } + + #[test] + fn named_serialization_roundtrip() { + let args = runtime_args! { + "foo" => 1i32, + }; + bytesrepr::test_serialization_roundtrip(&args); + } + + #[test] + fn should_create_args_with() { + let res = RuntimeArgs::try_new(|runtime_args| { + runtime_args.insert(String::from("foo"), 123)?; + runtime_args.insert(String::from("bar"), 456)?; + Ok(()) + }); + + let expected = runtime_args! { + "foo" => 123, + "bar" => 456, + }; + assert!(matches!(res, Ok(args) if expected == args)); + } + + #[test] + fn try_get_number_should_work() { + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, 0u64).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, U512::zero()).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let args = RuntimeArgs::new(); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let hundred = 100u64; + + let mut args = RuntimeArgs::new(); + let input = U512::from(hundred); + args.insert(ARG_AMOUNT, input).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), input); + + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, hundred).expect("is ok"); + assert_eq!( + args.try_get_number(ARG_AMOUNT).unwrap(), + U512::from(hundred) + ); + } + + #[test] + fn try_get_number_should_return_zero_for_non_numeric_type() { + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, "Non-numeric-string").unwrap(); + assert_eq!( + args.try_get_number(ARG_AMOUNT).expect("should get amount"), + U512::zero() + ); + } + + #[test] + fn try_get_number_should_return_zero_if_amount_is_missing() { + let args = RuntimeArgs::new(); + assert_eq!( + args.try_get_number(ARG_AMOUNT).expect("should get amount"), + U512::zero() + ); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_approvals_hash.rs b/casper_types_ver_2_0/src/transaction/transaction_approvals_hash.rs new file mode 100644 index 00000000..ed11ee42 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_approvals_hash.rs @@ -0,0 +1,110 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::TransactionV1; +use super::{DeployApprovalsHash, TransactionV1ApprovalsHash}; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; + +/// A versioned wrapper for a transaction approvals hash or deploy approvals hash. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub enum TransactionApprovalsHash { + /// A deploy approvals hash. + Deploy(DeployApprovalsHash), + /// A version 1 transaction approvals hash. + #[serde(rename = "Version1")] + V1(TransactionV1ApprovalsHash), +} + +impl From for TransactionApprovalsHash { + fn from(hash: DeployApprovalsHash) -> Self { + Self::Deploy(hash) + } +} + +impl From for TransactionApprovalsHash { + fn from(hash: TransactionV1ApprovalsHash) -> Self { + Self::V1(hash) + } +} + +impl Display for TransactionApprovalsHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionApprovalsHash::Deploy(hash) => Display::fmt(hash, formatter), + TransactionApprovalsHash::V1(hash) => Display::fmt(hash, formatter), + } + } +} + +impl ToBytes for TransactionApprovalsHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionApprovalsHash::Deploy(hash) => { + DEPLOY_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + TransactionApprovalsHash::V1(hash) => { + V1_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionApprovalsHash::Deploy(hash) => hash.serialized_length(), + TransactionApprovalsHash::V1(hash) => hash.serialized_length(), + } + } +} + +impl FromBytes for TransactionApprovalsHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (hash, remainder) = DeployApprovalsHash::from_bytes(remainder)?; + Ok((TransactionApprovalsHash::Deploy(hash), remainder)) + } + V1_TAG => { + let (hash, remainder) = TransactionV1ApprovalsHash::from_bytes(remainder)?; + Ok((TransactionApprovalsHash::V1(hash), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let hash = TransactionApprovalsHash::from(DeployApprovalsHash::random(rng)); + bytesrepr::test_serialization_roundtrip(&hash); + + let hash = TransactionApprovalsHash::from(TransactionV1ApprovalsHash::random(rng)); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_entry_point.rs b/casper_types_ver_2_0/src/transaction/transaction_entry_point.rs new file mode 100644 index 00000000..45e3afb1 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_entry_point.rs @@ -0,0 +1,232 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +const CUSTOM_TAG: u8 = 0; +const TRANSFER_TAG: u8 = 1; +const ADD_BID_TAG: u8 = 2; +const WITHDRAW_BID_TAG: u8 = 3; +const DELEGATE_TAG: u8 = 4; +const UNDELEGATE_TAG: u8 = 5; +const REDELEGATE_TAG: u8 = 6; + +/// The entry point of a [`Transaction`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Entry point of a Transaction.") +)] +#[serde(deny_unknown_fields)] +pub enum TransactionEntryPoint { + /// A non-native, arbitrary entry point. + Custom(String), + /// The `transfer` native entry point, used to transfer `Motes` from a source purse to a target + /// purse. + /// + /// Requires the following runtime args: + /// * "source": `URef` + /// * "target": `URef` + /// * "amount": `U512` + /// + /// The following optional runtime args can also be provided: + /// * "to": `Option` + /// * "id": `Option` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `transfer` native entry point, used to transfer `Motes` from a \ + source purse to a target purse." + ) + )] + Transfer, + /// The `add_bid` native entry point, used to create or top off a bid purse. + /// + /// Requires the following runtime args: + /// * "public_key": `PublicKey` + /// * "delegation_rate": `u8` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `add_bid` native entry point, used to create or top off a bid purse." + ) + )] + AddBid, + /// The `withdraw_bid` native entry point, used to decrease a stake. + /// + /// Requires the following runtime args: + /// * "public_key": `PublicKey` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars(description = "The `withdraw_bid` native entry point, used to decrease a stake.") + )] + WithdrawBid, + + /// The `delegate` native entry point, used to add a new delegator or increase an existing + /// delegator's stake. + /// + /// Requires the following runtime args: + /// * "delegator": `PublicKey` + /// * "validator": `PublicKey` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `delegate` native entry point, used to add a new delegator or \ + increase an existing delegator's stake." + ) + )] + Delegate, + + /// The `undelegate` native entry point, used to reduce a delegator's stake or remove the + /// delegator if the remaining stake is 0. + /// + /// Requires the following runtime args: + /// * "delegator": `PublicKey` + /// * "validator": `PublicKey` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `undelegate` native entry point, used to reduce a delegator's \ + stake or remove the delegator if the remaining stake is 0." + ) + )] + Undelegate, + + /// The `redelegate` native entry point, used to reduce a delegator's stake or remove the + /// delegator if the remaining stake is 0, and after the unbonding delay, automatically + /// delegate to a new validator. + /// + /// Requires the following runtime args: + /// * "delegator": `PublicKey` + /// * "validator": `PublicKey` + /// * "amount": `U512` + /// * "new_validator": `PublicKey` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `redelegate` native entry point, used to reduce a delegator's stake \ + or remove the delegator if the remaining stake is 0, and after the unbonding delay, \ + automatically delegate to a new validator." + ) + )] + Redelegate, +} + +impl TransactionEntryPoint { + /// Returns a random `TransactionEntryPoint`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..7) { + CUSTOM_TAG => TransactionEntryPoint::Custom(rng.random_string(1..21)), + TRANSFER_TAG => TransactionEntryPoint::Transfer, + ADD_BID_TAG => TransactionEntryPoint::AddBid, + WITHDRAW_BID_TAG => TransactionEntryPoint::WithdrawBid, + DELEGATE_TAG => TransactionEntryPoint::Delegate, + UNDELEGATE_TAG => TransactionEntryPoint::Undelegate, + REDELEGATE_TAG => TransactionEntryPoint::Redelegate, + _ => unreachable!(), + } + } +} + +impl Display for TransactionEntryPoint { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionEntryPoint::Custom(entry_point) => { + write!(formatter, "custom({entry_point})") + } + TransactionEntryPoint::Transfer => write!(formatter, "transfer"), + TransactionEntryPoint::AddBid => write!(formatter, "add_bid"), + TransactionEntryPoint::WithdrawBid => write!(formatter, "withdraw_bid"), + TransactionEntryPoint::Delegate => write!(formatter, "delegate"), + TransactionEntryPoint::Undelegate => write!(formatter, "undelegate"), + TransactionEntryPoint::Redelegate => write!(formatter, "redelegate"), + } + } +} + +impl ToBytes for TransactionEntryPoint { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionEntryPoint::Custom(entry_point) => { + CUSTOM_TAG.write_bytes(writer)?; + entry_point.write_bytes(writer) + } + TransactionEntryPoint::Transfer => TRANSFER_TAG.write_bytes(writer), + TransactionEntryPoint::AddBid => ADD_BID_TAG.write_bytes(writer), + TransactionEntryPoint::WithdrawBid => WITHDRAW_BID_TAG.write_bytes(writer), + TransactionEntryPoint::Delegate => DELEGATE_TAG.write_bytes(writer), + TransactionEntryPoint::Undelegate => UNDELEGATE_TAG.write_bytes(writer), + TransactionEntryPoint::Redelegate => REDELEGATE_TAG.write_bytes(writer), + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionEntryPoint::Custom(entry_point) => entry_point.serialized_length(), + TransactionEntryPoint::Transfer + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate => 0, + } + } +} + +impl FromBytes for TransactionEntryPoint { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + CUSTOM_TAG => { + let (entry_point, remainder) = String::from_bytes(remainder)?; + Ok((TransactionEntryPoint::Custom(entry_point), remainder)) + } + TRANSFER_TAG => Ok((TransactionEntryPoint::Transfer, remainder)), + ADD_BID_TAG => Ok((TransactionEntryPoint::AddBid, remainder)), + WITHDRAW_BID_TAG => Ok((TransactionEntryPoint::WithdrawBid, remainder)), + DELEGATE_TAG => Ok((TransactionEntryPoint::Delegate, remainder)), + UNDELEGATE_TAG => Ok((TransactionEntryPoint::Undelegate, remainder)), + REDELEGATE_TAG => Ok((TransactionEntryPoint::Redelegate, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&TransactionEntryPoint::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_hash.rs b/casper_types_ver_2_0/src/transaction/transaction_hash.rs new file mode 100644 index 00000000..7f7d31f9 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_hash.rs @@ -0,0 +1,143 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::TransactionV1; +use super::{DeployHash, TransactionV1Hash}; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; + +/// A versioned wrapper for a transaction hash or deploy hash. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum TransactionHash { + /// A deploy hash. + Deploy(DeployHash), + /// A version 1 transaction hash. + #[serde(rename = "Version1")] + V1(TransactionV1Hash), +} + +impl TransactionHash { + /// Returns a random `TransactionHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..2) { + 0 => TransactionHash::from(DeployHash::random(rng)), + 1 => TransactionHash::from(TransactionV1Hash::random(rng)), + _ => panic!(), + } + } +} + +impl From for TransactionHash { + fn from(hash: DeployHash) -> Self { + Self::Deploy(hash) + } +} + +impl From<&DeployHash> for TransactionHash { + fn from(hash: &DeployHash) -> Self { + Self::from(*hash) + } +} + +impl From for TransactionHash { + fn from(hash: TransactionV1Hash) -> Self { + Self::V1(hash) + } +} + +impl From<&TransactionV1Hash> for TransactionHash { + fn from(hash: &TransactionV1Hash) -> Self { + Self::from(*hash) + } +} + +impl Display for TransactionHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionHash::Deploy(hash) => Display::fmt(hash, formatter), + TransactionHash::V1(hash) => Display::fmt(hash, formatter), + } + } +} + +impl ToBytes for TransactionHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionHash::Deploy(hash) => { + DEPLOY_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + TransactionHash::V1(hash) => { + V1_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionHash::Deploy(hash) => hash.serialized_length(), + TransactionHash::V1(hash) => hash.serialized_length(), + } + } +} + +impl FromBytes for TransactionHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (hash, remainder) = DeployHash::from_bytes(remainder)?; + Ok((TransactionHash::Deploy(hash), remainder)) + } + V1_TAG => { + let (hash, remainder) = TransactionV1Hash::from_bytes(remainder)?; + Ok((TransactionHash::V1(hash), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let hash = TransactionHash::from(DeployHash::random(rng)); + bytesrepr::test_serialization_roundtrip(&hash); + + let hash = TransactionHash::from(TransactionV1Hash::random(rng)); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_header.rs b/casper_types_ver_2_0/src/transaction/transaction_header.rs new file mode 100644 index 00000000..d1a864bb --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_header.rs @@ -0,0 +1,116 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use super::{DeployHeader, TransactionV1Header}; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; + +/// A versioned wrapper for a transaction header or deploy header. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum TransactionHeader { + /// A deploy header. + Deploy(DeployHeader), + /// A version 1 transaction header. + #[cfg_attr(any(feature = "std", test), serde(rename = "Version1"))] + V1(TransactionV1Header), +} + +impl From for TransactionHeader { + fn from(hash: DeployHeader) -> Self { + Self::Deploy(hash) + } +} + +impl From for TransactionHeader { + fn from(hash: TransactionV1Header) -> Self { + Self::V1(hash) + } +} + +impl Display for TransactionHeader { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionHeader::Deploy(hash) => Display::fmt(hash, formatter), + TransactionHeader::V1(hash) => Display::fmt(hash, formatter), + } + } +} + +impl ToBytes for TransactionHeader { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionHeader::Deploy(header) => { + DEPLOY_TAG.write_bytes(writer)?; + header.write_bytes(writer) + } + TransactionHeader::V1(header) => { + V1_TAG.write_bytes(writer)?; + header.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionHeader::Deploy(header) => header.serialized_length(), + TransactionHeader::V1(header) => header.serialized_length(), + } + } +} + +impl FromBytes for TransactionHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (header, remainder) = DeployHeader::from_bytes(remainder)?; + Ok((TransactionHeader::Deploy(header), remainder)) + } + V1_TAG => { + let (header, remainder) = TransactionV1Header::from_bytes(remainder)?; + Ok((TransactionHeader::V1(header), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{testing::TestRng, Deploy, TransactionV1}; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let header = TransactionHeader::from(Deploy::random(rng).take_header()); + bytesrepr::test_serialization_roundtrip(&header); + + let header = TransactionHeader::from(TransactionV1::random(rng).take_header()); + bytesrepr::test_serialization_roundtrip(&header); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_id.rs b/casper_types_ver_2_0/src/transaction/transaction_id.rs new file mode 100644 index 00000000..8f9569b9 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_id.rs @@ -0,0 +1,197 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use super::{ + DeployApprovalsHash, DeployHash, TransactionApprovalsHash, TransactionHash, + TransactionV1ApprovalsHash, TransactionV1Hash, +}; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; + +/// The unique identifier of a [`Transaction`], comprising its [`TransactionHash`] and +/// [`TransactionApprovalsHash`]. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub enum TransactionId { + /// A deploy identifier. + Deploy { + /// The deploy hash. + deploy_hash: DeployHash, + /// The deploy's approvals hash. + approvals_hash: DeployApprovalsHash, + }, + /// A version 1 transaction identifier. + #[serde(rename = "Version1")] + V1 { + /// The transaction hash. + transaction_v1_hash: TransactionV1Hash, + /// The transaction's approvals hash. + approvals_hash: TransactionV1ApprovalsHash, + }, +} + +impl TransactionId { + /// Returns a new `TransactionId::Deploy`. + pub fn new_deploy(deploy_hash: DeployHash, approvals_hash: DeployApprovalsHash) -> Self { + TransactionId::Deploy { + deploy_hash, + approvals_hash, + } + } + + /// Returns a new `TransactionId::V1`. + pub fn new_v1( + transaction_v1_hash: TransactionV1Hash, + approvals_hash: TransactionV1ApprovalsHash, + ) -> Self { + TransactionId::V1 { + transaction_v1_hash, + approvals_hash, + } + } + + /// Returns the transaction hash. + pub fn transaction_hash(&self) -> TransactionHash { + match self { + TransactionId::Deploy { deploy_hash, .. } => TransactionHash::from(*deploy_hash), + TransactionId::V1 { + transaction_v1_hash, + .. + } => TransactionHash::from(*transaction_v1_hash), + } + } + + /// Returns the approvals hash. + pub fn approvals_hash(&self) -> TransactionApprovalsHash { + match self { + TransactionId::Deploy { approvals_hash, .. } => { + TransactionApprovalsHash::from(*approvals_hash) + } + TransactionId::V1 { approvals_hash, .. } => { + TransactionApprovalsHash::from(*approvals_hash) + } + } + } + + /// Returns a random `TransactionId`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + return TransactionId::new_deploy( + DeployHash::random(rng), + DeployApprovalsHash::random(rng), + ); + } + TransactionId::new_v1( + TransactionV1Hash::random(rng), + TransactionV1ApprovalsHash::random(rng), + ) + } +} + +impl Display for TransactionId { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction-id({}, {})", + self.transaction_hash(), + self.approvals_hash() + ) + } +} + +impl ToBytes for TransactionId { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionId::Deploy { + deploy_hash, + approvals_hash, + } => { + DEPLOY_TAG.write_bytes(writer)?; + deploy_hash.write_bytes(writer)?; + approvals_hash.write_bytes(writer) + } + TransactionId::V1 { + transaction_v1_hash, + approvals_hash, + } => { + V1_TAG.write_bytes(writer)?; + transaction_v1_hash.write_bytes(writer)?; + approvals_hash.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionId::Deploy { + deploy_hash, + approvals_hash, + } => deploy_hash.serialized_length() + approvals_hash.serialized_length(), + TransactionId::V1 { + transaction_v1_hash, + approvals_hash, + } => transaction_v1_hash.serialized_length() + approvals_hash.serialized_length(), + } + } +} + +impl FromBytes for TransactionId { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (deploy_hash, remainder) = DeployHash::from_bytes(remainder)?; + let (approvals_hash, remainder) = DeployApprovalsHash::from_bytes(remainder)?; + let id = TransactionId::Deploy { + deploy_hash, + approvals_hash, + }; + Ok((id, remainder)) + } + V1_TAG => { + let (transaction_v1_hash, remainder) = TransactionV1Hash::from_bytes(remainder)?; + let (approvals_hash, remainder) = + TransactionV1ApprovalsHash::from_bytes(remainder)?; + let id = TransactionId::V1 { + transaction_v1_hash, + approvals_hash, + }; + Ok((id, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let id = TransactionId::random(rng); + bytesrepr::test_serialization_roundtrip(&id); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_invocation_target.rs b/casper_types_ver_2_0/src/transaction/transaction_invocation_target.rs new file mode 100644 index 00000000..c9a322f3 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_invocation_target.rs @@ -0,0 +1,303 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::AddressableEntityIdentifier; +#[cfg(doc)] +use super::TransactionTarget; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + serde_helpers, AddressableEntityHash, EntityAddr, EntityVersion, PackageAddr, PackageHash, + PackageIdentifier, +}; + +const INVOCABLE_ENTITY_TAG: u8 = 0; +const INVOCABLE_ENTITY_ALIAS_TAG: u8 = 1; +const PACKAGE_TAG: u8 = 2; +const PACKAGE_ALIAS_TAG: u8 = 3; + +/// The identifier of a [`TransactionTarget::Stored`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Identifier of a `Stored` transaction target.") +)] +#[serde(deny_unknown_fields)] +pub enum TransactionInvocationTarget { + /// The address identifying the invocable entity. + #[serde(with = "serde_helpers::raw_32_byte_array")] + #[cfg_attr( + feature = "json-schema", + schemars( + with = "String", + description = "Hex-encoded entity address identifying the invocable entity." + ) + )] + InvocableEntity(EntityAddr), // currently needs to be of contract tag variant + /// The alias identifying the invocable entity. + InvocableEntityAlias(String), + /// The address and optional version identifying the package. + Package { + /// The package address. + #[serde(with = "serde_helpers::raw_32_byte_array")] + #[cfg_attr( + feature = "json-schema", + schemars(with = "String", description = "Hex-encoded address of the package.") + )] + addr: PackageAddr, + /// The package version. + /// + /// If `None`, the latest enabled version is implied. + version: Option, + }, + /// The alias and optional version identifying the package. + PackageAlias { + /// The package alias. + alias: String, + /// The package version. + /// + /// If `None`, the latest enabled version is implied. + version: Option, + }, +} + +impl TransactionInvocationTarget { + /// Returns a new `TransactionInvocationTarget::InvocableEntity`. + pub fn new_invocable_entity(addr: EntityAddr) -> Self { + TransactionInvocationTarget::InvocableEntity(addr) + } + + /// Returns a new `TransactionInvocationTarget::InvocableEntityAlias`. + pub fn new_invocable_entity_alias(alias: String) -> Self { + TransactionInvocationTarget::InvocableEntityAlias(alias) + } + + /// Returns a new `TransactionInvocationTarget::Package`. + pub fn new_package(addr: PackageAddr, version: Option) -> Self { + TransactionInvocationTarget::Package { addr, version } + } + + /// Returns a new `TransactionInvocationTarget::PackageAlias`. + pub fn new_package_alias(alias: String, version: Option) -> Self { + TransactionInvocationTarget::PackageAlias { alias, version } + } + + /// Returns the identifier of the addressable entity, if present. + pub fn addressable_entity_identifier(&self) -> Option { + match self { + TransactionInvocationTarget::InvocableEntity(addr) => Some( + AddressableEntityIdentifier::Hash(AddressableEntityHash::new(*addr)), + ), + TransactionInvocationTarget::InvocableEntityAlias(alias) => { + Some(AddressableEntityIdentifier::Name(alias.clone())) + } + TransactionInvocationTarget::Package { .. } + | TransactionInvocationTarget::PackageAlias { .. } => None, + } + } + + /// Returns the identifier of the contract package, if present. + pub fn package_identifier(&self) -> Option { + match self { + TransactionInvocationTarget::InvocableEntity(_) + | TransactionInvocationTarget::InvocableEntityAlias(_) => None, + TransactionInvocationTarget::Package { addr, version } => { + Some(PackageIdentifier::Hash { + package_hash: PackageHash::new(*addr), + version: *version, + }) + } + TransactionInvocationTarget::PackageAlias { alias, version } => { + Some(PackageIdentifier::Name { + name: alias.clone(), + version: *version, + }) + } + } + } + + /// Returns a random `TransactionInvocationTarget`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..4) { + INVOCABLE_ENTITY_TAG => TransactionInvocationTarget::InvocableEntity(rng.gen()), + INVOCABLE_ENTITY_ALIAS_TAG => { + TransactionInvocationTarget::InvocableEntityAlias(rng.random_string(1..21)) + } + PACKAGE_TAG => TransactionInvocationTarget::Package { + addr: rng.gen(), + version: rng.gen::().then(|| rng.gen::()), + }, + PACKAGE_ALIAS_TAG => TransactionInvocationTarget::PackageAlias { + alias: rng.random_string(1..21), + version: rng.gen::().then(|| rng.gen::()), + }, + _ => unreachable!(), + } + } +} + +impl Display for TransactionInvocationTarget { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionInvocationTarget::InvocableEntity(addr) => { + write!(formatter, "invocable-entity({:10})", HexFmt(addr)) + } + TransactionInvocationTarget::InvocableEntityAlias(alias) => { + write!(formatter, "invocable-entity({})", alias) + } + TransactionInvocationTarget::Package { + addr, + version: Some(ver), + } => { + write!(formatter, "package({:10}, version {})", HexFmt(addr), ver) + } + TransactionInvocationTarget::Package { + addr, + version: None, + } => { + write!(formatter, "package({:10}, latest)", HexFmt(addr)) + } + TransactionInvocationTarget::PackageAlias { + alias, + version: Some(ver), + } => { + write!(formatter, "package({}, version {})", alias, ver) + } + TransactionInvocationTarget::PackageAlias { + alias, + version: None, + } => { + write!(formatter, "package({}, latest)", alias) + } + } + } +} + +impl Debug for TransactionInvocationTarget { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionInvocationTarget::InvocableEntity(addr) => formatter + .debug_tuple("InvocableEntity") + .field(&HexFmt(addr)) + .finish(), + TransactionInvocationTarget::InvocableEntityAlias(alias) => formatter + .debug_tuple("InvocableEntityAlias") + .field(alias) + .finish(), + TransactionInvocationTarget::Package { addr, version } => formatter + .debug_struct("Package") + .field("addr", &HexFmt(addr)) + .field("version", version) + .finish(), + TransactionInvocationTarget::PackageAlias { alias, version } => formatter + .debug_struct("PackageAlias") + .field("alias", alias) + .field("version", version) + .finish(), + } + } +} + +impl ToBytes for TransactionInvocationTarget { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionInvocationTarget::InvocableEntity(addr) => { + INVOCABLE_ENTITY_TAG.write_bytes(writer)?; + addr.write_bytes(writer) + } + TransactionInvocationTarget::InvocableEntityAlias(alias) => { + INVOCABLE_ENTITY_ALIAS_TAG.write_bytes(writer)?; + alias.write_bytes(writer) + } + TransactionInvocationTarget::Package { addr, version } => { + PACKAGE_TAG.write_bytes(writer)?; + addr.write_bytes(writer)?; + version.write_bytes(writer) + } + TransactionInvocationTarget::PackageAlias { alias, version } => { + PACKAGE_ALIAS_TAG.write_bytes(writer)?; + alias.write_bytes(writer)?; + version.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionInvocationTarget::InvocableEntity(addr) => addr.serialized_length(), + TransactionInvocationTarget::InvocableEntityAlias(alias) => { + alias.serialized_length() + } + TransactionInvocationTarget::Package { addr, version } => { + addr.serialized_length() + version.serialized_length() + } + TransactionInvocationTarget::PackageAlias { alias, version } => { + alias.serialized_length() + version.serialized_length() + } + } + } +} + +impl FromBytes for TransactionInvocationTarget { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + INVOCABLE_ENTITY_TAG => { + let (addr, remainder) = EntityAddr::from_bytes(remainder)?; + let target = TransactionInvocationTarget::InvocableEntity(addr); + Ok((target, remainder)) + } + INVOCABLE_ENTITY_ALIAS_TAG => { + let (alias, remainder) = String::from_bytes(remainder)?; + let target = TransactionInvocationTarget::InvocableEntityAlias(alias); + Ok((target, remainder)) + } + PACKAGE_TAG => { + let (addr, remainder) = PackageAddr::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let target = TransactionInvocationTarget::Package { addr, version }; + Ok((target, remainder)) + } + PACKAGE_ALIAS_TAG => { + let (alias, remainder) = String::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let target = TransactionInvocationTarget::PackageAlias { alias, version }; + Ok((target, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&TransactionInvocationTarget::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_runtime.rs b/casper_types_ver_2_0/src/transaction/transaction_runtime.rs new file mode 100644 index 00000000..c1fac1ed --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_runtime.rs @@ -0,0 +1,73 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +/// The runtime used to execute a [`Transaction`]. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Runtime used to execute a Transaction.") +)] +#[serde(deny_unknown_fields)] +#[repr(u8)] +pub enum TransactionRuntime { + /// The Casper Version 1 Virtual Machine. + VmCasperV1, +} + +impl Display for TransactionRuntime { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionRuntime::VmCasperV1 => write!(formatter, "vm-casper-v1"), + } + } +} + +impl ToBytes for TransactionRuntime { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + (*self as u8).write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for TransactionRuntime { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + v if v == TransactionRuntime::VmCasperV1 as u8 => { + Ok((TransactionRuntime::VmCasperV1, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + bytesrepr::test_serialization_roundtrip(&TransactionRuntime::VmCasperV1); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_scheduling.rs b/casper_types_ver_2_0/src/transaction/transaction_scheduling.rs new file mode 100644 index 00000000..381d358e --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_scheduling.rs @@ -0,0 +1,133 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + EraId, Timestamp, +}; + +const STANDARD_TAG: u8 = 0; +const FUTURE_ERA_TAG: u8 = 1; +const FUTURE_TIMESTAMP_TAG: u8 = 2; + +/// The scheduling mode of a [`Transaction`]. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Scheduling mode of a Transaction.") +)] +pub enum TransactionScheduling { + /// No special scheduling applied. + Standard, + /// Execution should be scheduled for the specified era. + FutureEra(EraId), + /// Execution should be scheduled for the specified timestamp or later. + FutureTimestamp(Timestamp), +} + +impl TransactionScheduling { + /// Returns a random `TransactionScheduling`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + STANDARD_TAG => TransactionScheduling::Standard, + FUTURE_ERA_TAG => TransactionScheduling::FutureEra(EraId::random(rng)), + FUTURE_TIMESTAMP_TAG => TransactionScheduling::FutureTimestamp(Timestamp::random(rng)), + _ => unreachable!(), + } + } +} + +impl Display for TransactionScheduling { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionScheduling::Standard => write!(formatter, "schedule(standard)"), + TransactionScheduling::FutureEra(era_id) => write!(formatter, "schedule({})", era_id), + TransactionScheduling::FutureTimestamp(timestamp) => { + write!(formatter, "schedule({})", timestamp) + } + } + } +} + +impl ToBytes for TransactionScheduling { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionScheduling::Standard => STANDARD_TAG.write_bytes(writer), + TransactionScheduling::FutureEra(era_id) => { + FUTURE_ERA_TAG.write_bytes(writer)?; + era_id.write_bytes(writer) + } + TransactionScheduling::FutureTimestamp(timestamp) => { + FUTURE_TIMESTAMP_TAG.write_bytes(writer)?; + timestamp.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionScheduling::Standard => 0, + TransactionScheduling::FutureEra(era_id) => era_id.serialized_length(), + TransactionScheduling::FutureTimestamp(timestamp) => timestamp.serialized_length(), + } + } +} + +impl FromBytes for TransactionScheduling { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + STANDARD_TAG => Ok((TransactionScheduling::Standard, remainder)), + FUTURE_ERA_TAG => { + let (era_id, remainder) = EraId::from_bytes(remainder)?; + Ok((TransactionScheduling::FutureEra(era_id), remainder)) + } + FUTURE_TIMESTAMP_TAG => { + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + Ok((TransactionScheduling::FutureTimestamp(timestamp), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&TransactionScheduling::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_session_kind.rs b/casper_types_ver_2_0/src/transaction/transaction_session_kind.rs new file mode 100644 index 00000000..eabe065a --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_session_kind.rs @@ -0,0 +1,118 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// The session kind of a [`Transaction`]. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Session kind of a Transaction.") +)] +#[serde(deny_unknown_fields)] +#[repr(u8)] +pub enum TransactionSessionKind { + /// A standard (non-special-case) session. + /// + /// This kind of session is not allowed to install or upgrade a stored contract, but can call + /// stored contracts. + Standard = 0, + /// A session which installs a stored contract. + Installer = 1, + /// A session which upgrades a previously-installed stored contract. Such a session must have + /// "package_id: PackageIdentifier" runtime arg present. + Upgrader = 2, + /// A session which doesn't call any stored contracts. + /// + /// This kind of session is not allowed to install or upgrade a stored contract. + Isolated = 3, +} + +impl TransactionSessionKind { + /// Returns a random `TransactionSessionKind`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..4) { + v if v == TransactionSessionKind::Standard as u8 => TransactionSessionKind::Standard, + v if v == TransactionSessionKind::Installer as u8 => TransactionSessionKind::Installer, + v if v == TransactionSessionKind::Upgrader as u8 => TransactionSessionKind::Upgrader, + v if v == TransactionSessionKind::Isolated as u8 => TransactionSessionKind::Isolated, + _ => unreachable!(), + } + } +} + +impl Display for TransactionSessionKind { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionSessionKind::Standard => write!(formatter, "standard"), + TransactionSessionKind::Installer => write!(formatter, "installer"), + TransactionSessionKind::Upgrader => write!(formatter, "upgrader"), + TransactionSessionKind::Isolated => write!(formatter, "isolated"), + } + } +} + +impl ToBytes for TransactionSessionKind { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + (*self as u8).write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for TransactionSessionKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + v if v == TransactionSessionKind::Standard as u8 => { + Ok((TransactionSessionKind::Standard, remainder)) + } + v if v == TransactionSessionKind::Installer as u8 => { + Ok((TransactionSessionKind::Installer, remainder)) + } + v if v == TransactionSessionKind::Upgrader as u8 => { + Ok((TransactionSessionKind::Upgrader, remainder)) + } + v if v == TransactionSessionKind::Isolated as u8 => { + Ok((TransactionSessionKind::Isolated, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&TransactionSessionKind::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_target.rs b/casper_types_ver_2_0/src/transaction/transaction_target.rs new file mode 100644 index 00000000..76516f6e --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_target.rs @@ -0,0 +1,236 @@ +use alloc::vec::Vec; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use super::{TransactionInvocationTarget, TransactionRuntime, TransactionSessionKind}; +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +const NATIVE_TAG: u8 = 0; +const STORED_TAG: u8 = 1; +const SESSION_TAG: u8 = 2; + +/// The execution target of a [`Transaction`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Execution target of a Transaction.") +)] +#[serde(deny_unknown_fields)] +pub enum TransactionTarget { + /// The execution target is a native operation (e.g. a transfer). + Native, + /// The execution target is a stored entity or package. + Stored { + /// The identifier of the stored execution target. + id: TransactionInvocationTarget, + /// The execution runtime to use. + runtime: TransactionRuntime, + }, + /// The execution target is the included module bytes, i.e. compiled Wasm. + Session { + /// The kind of session. + kind: TransactionSessionKind, + /// The compiled Wasm. + module_bytes: Bytes, + /// The execution runtime to use. + runtime: TransactionRuntime, + }, +} + +impl TransactionTarget { + /// Returns a new `TransactionTarget::Native`. + pub fn new_native() -> Self { + TransactionTarget::Native + } + + /// Returns a new `TransactionTarget::Stored`. + pub fn new_stored(id: TransactionInvocationTarget, runtime: TransactionRuntime) -> Self { + TransactionTarget::Stored { id, runtime } + } + + /// Returns a new `TransactionTarget::Session`. + pub fn new_session( + kind: TransactionSessionKind, + module_bytes: Bytes, + runtime: TransactionRuntime, + ) -> Self { + TransactionTarget::Session { + kind, + module_bytes, + runtime, + } + } + + /// Returns a random `TransactionTarget`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + NATIVE_TAG => TransactionTarget::Native, + STORED_TAG => TransactionTarget::new_stored( + TransactionInvocationTarget::random(rng), + TransactionRuntime::VmCasperV1, + ), + SESSION_TAG => { + let mut buffer = vec![0u8; rng.gen_range(0..100)]; + rng.fill_bytes(buffer.as_mut()); + TransactionTarget::new_session( + TransactionSessionKind::random(rng), + Bytes::from(buffer), + TransactionRuntime::VmCasperV1, + ) + } + _ => unreachable!(), + } + } +} + +impl Display for TransactionTarget { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionTarget::Native => write!(formatter, "native"), + TransactionTarget::Stored { id, runtime } => { + write!(formatter, "stored({}, {})", id, runtime) + } + TransactionTarget::Session { + kind, + module_bytes, + runtime, + } => write!( + formatter, + "session({}, {} module bytes, {})", + kind, + module_bytes.len(), + runtime + ), + } + } +} + +impl Debug for TransactionTarget { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + TransactionTarget::Native => formatter.debug_struct("Native").finish(), + TransactionTarget::Stored { id, runtime } => formatter + .debug_struct("Stored") + .field("id", id) + .field("runtime", runtime) + .finish(), + TransactionTarget::Session { + kind, + module_bytes, + runtime, + } => { + struct BytesLen(usize); + impl Debug for BytesLen { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "{} bytes", self.0) + } + } + + formatter + .debug_struct("Session") + .field("kind", kind) + .field("module_bytes", &BytesLen(module_bytes.len())) + .field("runtime", runtime) + .finish() + } + } + } +} + +impl ToBytes for TransactionTarget { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionTarget::Native => NATIVE_TAG.write_bytes(writer), + TransactionTarget::Stored { id, runtime } => { + STORED_TAG.write_bytes(writer)?; + id.write_bytes(writer)?; + runtime.write_bytes(writer) + } + TransactionTarget::Session { + kind, + module_bytes, + runtime, + } => { + SESSION_TAG.write_bytes(writer)?; + kind.write_bytes(writer)?; + module_bytes.write_bytes(writer)?; + runtime.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionTarget::Native => 0, + TransactionTarget::Stored { id, runtime } => { + id.serialized_length() + runtime.serialized_length() + } + TransactionTarget::Session { + kind, + module_bytes, + runtime, + } => { + kind.serialized_length() + + module_bytes.serialized_length() + + runtime.serialized_length() + } + } + } +} + +impl FromBytes for TransactionTarget { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + NATIVE_TAG => Ok((TransactionTarget::Native, remainder)), + STORED_TAG => { + let (id, remainder) = TransactionInvocationTarget::from_bytes(remainder)?; + let (runtime, remainder) = TransactionRuntime::from_bytes(remainder)?; + let target = TransactionTarget::new_stored(id, runtime); + Ok((target, remainder)) + } + SESSION_TAG => { + let (kind, remainder) = TransactionSessionKind::from_bytes(remainder)?; + let (module_bytes, remainder) = Bytes::from_bytes(remainder)?; + let (runtime, remainder) = TransactionRuntime::from_bytes(remainder)?; + let target = TransactionTarget::new_session(kind, module_bytes, runtime); + Ok((target, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&TransactionTarget::random(rng)); + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1.rs b/casper_types_ver_2_0/src/transaction/transaction_v1.rs new file mode 100644 index 00000000..b8bb9f7f --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1.rs @@ -0,0 +1,809 @@ +mod errors_v1; +mod finalized_transaction_v1_approvals; +mod transaction_v1_approval; +mod transaction_v1_approvals_hash; +mod transaction_v1_body; +#[cfg(any(feature = "std", test))] +mod transaction_v1_builder; +mod transaction_v1_hash; +mod transaction_v1_header; + +#[cfg(any(feature = "std", test))] +use alloc::string::ToString; +use alloc::{collections::BTreeSet, vec::Vec}; +use core::{ + cmp, + fmt::{self, Debug, Display, Formatter}, + hash, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +use tracing::debug; + +#[cfg(any(feature = "std", test))] +use super::InitiatorAddrAndSecretKey; +use super::{ + InitiatorAddr, PricingMode, TransactionEntryPoint, TransactionScheduling, TransactionTarget, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +#[cfg(any(feature = "std", test))] +use crate::TransactionConfig; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, Digest, DisplayIter, RuntimeArgs, SecretKey, TimeDiff, Timestamp, +}; +pub use errors_v1::{ + DecodeFromJsonErrorV1 as TransactionV1DecodeFromJsonError, ErrorV1 as TransactionV1Error, + ExcessiveSizeErrorV1 as TransactionV1ExcessiveSizeError, TransactionV1ConfigFailure, +}; +pub use finalized_transaction_v1_approvals::FinalizedTransactionV1Approvals; +pub use transaction_v1_approval::TransactionV1Approval; +pub use transaction_v1_approvals_hash::TransactionV1ApprovalsHash; +pub use transaction_v1_body::TransactionV1Body; +#[cfg(any(feature = "std", test))] +pub use transaction_v1_builder::{TransactionV1Builder, TransactionV1BuilderError}; +pub use transaction_v1_hash::TransactionV1Hash; +pub use transaction_v1_header::TransactionV1Header; + +/// A unit of work sent by a client to the network, which when executed can cause global state to +/// be altered. +/// +/// To construct a new `TransactionV1`, use a [`TransactionV1Builder`]. +#[derive(Clone, Eq, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + description = "A unit of work sent by a client to the network, which when executed can \ + cause global state to be altered." + ) +)] +pub struct TransactionV1 { + hash: TransactionV1Hash, + header: TransactionV1Header, + body: TransactionV1Body, + approvals: BTreeSet, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell>, +} + +impl TransactionV1 { + /// Called by the `TransactionBuilder` to construct a new `TransactionV1`. + #[cfg(any(feature = "std", test))] + pub(super) fn build( + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + body: TransactionV1Body, + pricing_mode: PricingMode, + payment_amount: Option, + initiator_addr_and_secret_key: InitiatorAddrAndSecretKey, + ) -> TransactionV1 { + let initiator_addr = initiator_addr_and_secret_key.initiator_addr(); + let body_hash = Digest::hash( + body.to_bytes() + .unwrap_or_else(|error| panic!("should serialize body: {}", error)), + ); + let header = TransactionV1Header::new( + chain_name, + timestamp, + ttl, + body_hash, + pricing_mode, + payment_amount, + initiator_addr, + ); + + let hash = header.compute_hash(); + let mut transaction = TransactionV1 { + hash, + header, + body, + approvals: BTreeSet::new(), + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + }; + + if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() { + transaction.sign(secret_key); + } + transaction + } + + /// Returns the hash identifying this transaction. + pub fn hash(&self) -> &TransactionV1Hash { + &self.hash + } + + /// Returns the name of the chain the transaction should be executed on. + pub fn chain_name(&self) -> &str { + self.header.chain_name() + } + + /// Returns the creation timestamp of the transaction. + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp() + } + + /// Returns the duration after the creation timestamp for which the transaction will stay valid. + /// + /// After this duration has ended, the transaction will be considered expired. + pub fn ttl(&self) -> TimeDiff { + self.header.ttl() + } + + /// Returns `true` if the transaction has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.header.expired(current_instant) + } + + /// Returns the pricing mode for the transaction. + pub fn pricing_mode(&self) -> &PricingMode { + self.header.pricing_mode() + } + + /// Returns the payment amount for the transaction. + pub fn payment_amount(&self) -> Option { + self.header.payment_amount() + } + + /// Returns the address of the initiator of the transaction. + pub fn initiator_addr(&self) -> &InitiatorAddr { + self.header.initiator_addr() + } + + /// Returns a reference to the header of this transaction. + pub fn header(&self) -> &TransactionV1Header { + &self.header + } + + /// Consumes `self`, returning the header of this transaction. + pub fn take_header(self) -> TransactionV1Header { + self.header + } + + /// Returns the runtime args of the transaction. + pub fn args(&self) -> &RuntimeArgs { + self.body.args() + } + + /// Returns the target of the transaction. + pub fn target(&self) -> &TransactionTarget { + self.body.target() + } + + /// Returns the entry point of the transaction. + pub fn entry_point(&self) -> &TransactionEntryPoint { + self.body.entry_point() + } + + /// Returns the scheduling kind of the transaction. + pub fn scheduling(&self) -> &TransactionScheduling { + self.body.scheduling() + } + + /// Returns the body of this transaction. + pub fn body(&self) -> &TransactionV1Body { + &self.body + } + + /// Returns the approvals for this transaction. + pub fn approvals(&self) -> &BTreeSet { + &self.approvals + } + + /// Adds a signature of this transaction's hash to its approvals. + pub fn sign(&mut self, secret_key: &SecretKey) { + let approval = TransactionV1Approval::create(&self.hash, secret_key); + self.approvals.insert(approval); + } + + /// Returns the `TransactionV1ApprovalsHash` of this transaction's approvals. + pub fn compute_approvals_hash(&self) -> Result { + TransactionV1ApprovalsHash::compute(&self.approvals) + } + + /// Returns `true` if the serialized size of the transaction is not greater than + /// `max_transaction_size`. + #[cfg(any(feature = "std", test))] + fn is_valid_size( + &self, + max_transaction_size: u32, + ) -> Result<(), TransactionV1ExcessiveSizeError> { + let actual_transaction_size = self.serialized_length(); + if actual_transaction_size > max_transaction_size as usize { + return Err(TransactionV1ExcessiveSizeError { + max_transaction_size, + actual_transaction_size, + }); + } + Ok(()) + } + + /// Returns `Ok` if and only if this transaction's body hashes to the value of `body_hash()`, + /// and if this transaction's header hashes to the value claimed as the transaction hash. + pub fn has_valid_hash(&self) -> Result<(), TransactionV1ConfigFailure> { + let body_hash = Digest::hash( + self.body + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize body: {}", error)), + ); + if body_hash != *self.header.body_hash() { + debug!(?self, ?body_hash, "invalid transaction body hash"); + return Err(TransactionV1ConfigFailure::InvalidBodyHash); + } + + let hash = TransactionV1Hash::new(Digest::hash( + self.header + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize header: {}", error)), + )); + if hash != self.hash { + debug!(?self, ?hash, "invalid transaction hash"); + return Err(TransactionV1ConfigFailure::InvalidTransactionHash); + } + Ok(()) + } + + /// Returns `Ok` if and only if: + /// * the transaction hash is correct (see [`TransactionV1::has_valid_hash`] for details) + /// * approvals are non empty, and + /// * all approvals are valid signatures of the signed hash + pub fn verify(&self) -> Result<(), TransactionV1ConfigFailure> { + #[cfg(any(feature = "once_cell", test))] + return self.is_verified.get_or_init(|| self.do_verify()).clone(); + + #[cfg(not(any(feature = "once_cell", test)))] + self.do_verify() + } + + fn do_verify(&self) -> Result<(), TransactionV1ConfigFailure> { + if self.approvals.is_empty() { + debug!(?self, "transaction has no approvals"); + return Err(TransactionV1ConfigFailure::EmptyApprovals); + } + + self.has_valid_hash()?; + + for (index, approval) in self.approvals.iter().enumerate() { + if let Err(error) = crypto::verify(self.hash, approval.signature(), approval.signer()) { + debug!( + ?self, + "failed to verify transaction approval {}: {}", index, error + ); + return Err(TransactionV1ConfigFailure::InvalidApproval { index, error }); + } + } + + Ok(()) + } + + /// Returns `Ok` if and only if: + /// * the chain_name is correct, + /// * the configured parameters are complied with at the given timestamp + #[cfg(any(feature = "std", test))] + pub fn is_config_compliant( + &self, + chain_name: &str, + config: &TransactionConfig, + max_associated_keys: u32, + timestamp_leeway: TimeDiff, + at: Timestamp, + ) -> Result<(), TransactionV1ConfigFailure> { + self.is_valid_size(config.max_transaction_size)?; + + let header = self.header(); + if header.chain_name() != chain_name { + debug!( + transaction_hash = %self.hash(), + transaction_header = %header, + chain_name = %header.chain_name(), + "invalid chain identifier" + ); + return Err(TransactionV1ConfigFailure::InvalidChainName { + expected: chain_name.to_string(), + got: header.chain_name().to_string(), + }); + } + + header.is_valid(config, timestamp_leeway, at, &self.hash)?; + + if self.approvals.len() > max_associated_keys as usize { + debug!( + transaction_hash = %self.hash(), + number_of_approvals = %self.approvals.len(), + max_associated_keys = %max_associated_keys, + "number of transaction approvals exceeds the limit" + ); + return Err(TransactionV1ConfigFailure::ExcessiveApprovals { + got: self.approvals.len() as u32, + max_associated_keys, + }); + } + + if let Some(payment) = self.payment_amount() { + if payment > config.block_gas_limit { + debug!( + amount = %payment, + block_gas_limit = %config.block_gas_limit, + "payment amount exceeds block gas limit" + ); + return Err(TransactionV1ConfigFailure::ExceedsBlockGasLimit { + block_gas_limit: config.block_gas_limit, + got: payment, + }); + } + } + + self.body.is_valid(config) + } + + // This method is not intended to be used by third party crates. + // + // It is required to allow finalized approvals to be injected after reading a transaction from + // storage. + #[doc(hidden)] + pub fn with_approvals(mut self, approvals: BTreeSet) -> Self { + self.approvals = approvals; + self + } + + /// Returns a random, valid but possibly expired transaction. + /// + /// Note that the [`TransactionV1Builder`] can be used to create a random transaction with + /// more specific values. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + TransactionV1Builder::new_random(rng).build().unwrap() + } + + /// Turns `self` into an invalid transaction by clearing the `chain_name`, invalidating the + /// transaction header hash. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn invalidate(&mut self) { + self.header.invalidate(); + } + + /// Used by the `TestTransactionV1Builder` to inject invalid approvals for testing purposes. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub(super) fn apply_approvals(&mut self, approvals: Vec) { + self.approvals.extend(approvals); + } +} + +impl hash::Hash for TransactionV1 { + fn hash(&self, state: &mut H) { + // Destructure to make sure we don't accidentally omit fields. + let TransactionV1 { + hash, + header, + body, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: _, + } = self; + hash.hash(state); + header.hash(state); + body.hash(state); + approvals.hash(state); + } +} + +impl PartialEq for TransactionV1 { + fn eq(&self, other: &TransactionV1) -> bool { + // Destructure to make sure we don't accidentally omit fields. + let TransactionV1 { + hash, + header, + body, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: _, + } = self; + *hash == other.hash + && *header == other.header + && *body == other.body + && *approvals == other.approvals + } +} + +impl Ord for TransactionV1 { + fn cmp(&self, other: &TransactionV1) -> cmp::Ordering { + // Destructure to make sure we don't accidentally omit fields. + let TransactionV1 { + hash, + header, + body, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: _, + } = self; + hash.cmp(&other.hash) + .then_with(|| header.cmp(&other.header)) + .then_with(|| body.cmp(&other.body)) + .then_with(|| approvals.cmp(&other.approvals)) + } +} + +impl PartialOrd for TransactionV1 { + fn partial_cmp(&self, other: &TransactionV1) -> Option { + Some(self.cmp(other)) + } +} + +impl ToBytes for TransactionV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.header.write_bytes(writer)?; + self.body.write_bytes(writer)?; + self.approvals.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.header.serialized_length() + + self.body.serialized_length() + + self.approvals.serialized_length() + } +} + +impl FromBytes for TransactionV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = TransactionV1Hash::from_bytes(bytes)?; + let (header, remainder) = TransactionV1Header::from_bytes(remainder)?; + let (body, remainder) = TransactionV1Body::from_bytes(remainder)?; + let (approvals, remainder) = BTreeSet::::from_bytes(remainder)?; + let transaction = TransactionV1 { + hash, + header, + body, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + }; + Ok((transaction, remainder)) + } +} + +impl Display for TransactionV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction-v1[{}, {}, approvals: {}]", + self.header, + self.body, + DisplayIter::new(self.approvals.iter()) + ) + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::*; + + const MAX_ASSOCIATED_KEYS: u32 = 5; + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1::random(rng); + let json_string = serde_json::to_string_pretty(&transaction).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transaction, decoded); + } + + #[test] + fn bincode_roundtrip() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1::random(rng); + let serialized = bincode::serialize(&transaction).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transaction, deserialized); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1::random(rng); + bytesrepr::test_serialization_roundtrip(transaction.header()); + bytesrepr::test_serialization_roundtrip(&transaction); + } + + #[test] + fn is_valid() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1::random(rng); + assert_eq!( + transaction.is_verified.get(), + None, + "is_verified should initially be None" + ); + transaction.verify().expect("should verify"); + assert_eq!( + transaction.is_verified.get(), + Some(&Ok(())), + "is_verified should be true" + ); + } + + fn check_is_not_valid( + invalid_transaction: TransactionV1, + expected_error: TransactionV1ConfigFailure, + ) { + assert!( + invalid_transaction.is_verified.get().is_none(), + "is_verified should initially be None" + ); + let actual_error = invalid_transaction.verify().unwrap_err(); + + // Ignore the `error_msg` field of `InvalidApproval` when comparing to expected error, as + // this makes the test too fragile. Otherwise expect the actual error should exactly match + // the expected error. + match expected_error { + TransactionV1ConfigFailure::InvalidApproval { + index: expected_index, + .. + } => match actual_error { + TransactionV1ConfigFailure::InvalidApproval { + index: actual_index, + .. + } => { + assert_eq!(actual_index, expected_index); + } + _ => panic!("expected {}, got: {}", expected_error, actual_error), + }, + _ => { + assert_eq!(actual_error, expected_error,); + } + } + + // The actual error should have been lazily initialized correctly. + assert_eq!( + invalid_transaction.is_verified.get(), + Some(&Err(actual_error)), + "is_verified should now be Some" + ); + } + + #[test] + fn not_valid_due_to_invalid_transaction_hash() { + let rng = &mut TestRng::new(); + let mut transaction = TransactionV1::random(rng); + + transaction.invalidate(); + check_is_not_valid( + transaction, + TransactionV1ConfigFailure::InvalidTransactionHash, + ); + } + + #[test] + fn not_valid_due_to_empty_approvals() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1Builder::new_random(rng) + .with_no_secret_key() + .build() + .unwrap(); + assert!(transaction.approvals.is_empty()); + check_is_not_valid(transaction, TransactionV1ConfigFailure::EmptyApprovals) + } + + #[test] + fn not_valid_due_to_invalid_approval() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1Builder::new_random(rng) + .with_invalid_approval(rng) + .build() + .unwrap(); + + // The expected index for the invalid approval will be the first index at which there is an + // approval where the signer is not the account holder. + let account_holder = match transaction.initiator_addr() { + InitiatorAddr::PublicKey(public_key) => public_key.clone(), + InitiatorAddr::AccountHash(_) | InitiatorAddr::EntityAddr(_) => unreachable!(), + }; + let expected_index = transaction + .approvals + .iter() + .enumerate() + .find(|(_, approval)| approval.signer() != &account_holder) + .map(|(index, _)| index) + .unwrap(); + check_is_not_valid( + transaction, + TransactionV1ConfigFailure::InvalidApproval { + index: expected_index, + error: crypto::Error::SignatureError, // This field is ignored in the check. + }, + ); + } + + #[test] + fn is_config_compliant() { + let rng = &mut TestRng::new(); + let chain_name = "net-1"; + let transaction = TransactionV1Builder::new_random(rng) + .with_chain_name(chain_name) + .build() + .unwrap(); + + let transaction_config = TransactionConfig::default(); + let current_timestamp = transaction.timestamp(); + transaction + .is_config_compliant( + chain_name, + &transaction_config, + MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp, + ) + .expect("should be acceptable"); + } + + #[test] + fn not_acceptable_due_to_invalid_chain_name() { + let rng = &mut TestRng::new(); + let expected_chain_name = "net-1"; + let wrong_chain_name = "net-2"; + let transaction_config = TransactionConfig::default(); + + let transaction = TransactionV1Builder::new_random(rng) + .with_chain_name(wrong_chain_name) + .build() + .unwrap(); + + let expected_error = TransactionV1ConfigFailure::InvalidChainName { + expected: expected_chain_name.to_string(), + got: wrong_chain_name.to_string(), + }; + + let current_timestamp = transaction.timestamp(); + assert_eq!( + transaction.is_config_compliant( + expected_chain_name, + &transaction_config, + MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(expected_error) + ); + assert!( + transaction.is_verified.get().is_none(), + "transaction should not have run expensive `is_verified` call" + ); + } + + #[test] + fn not_acceptable_due_to_excessive_ttl() { + let rng = &mut TestRng::new(); + let chain_name = "net-1"; + let transaction_config = TransactionConfig::default(); + let ttl = transaction_config.max_ttl + TimeDiff::from(Duration::from_secs(1)); + let transaction = TransactionV1Builder::new_random(rng) + .with_ttl(ttl) + .with_chain_name(chain_name) + .build() + .unwrap(); + + let expected_error = TransactionV1ConfigFailure::ExcessiveTimeToLive { + max_ttl: transaction_config.max_ttl, + got: ttl, + }; + + let current_timestamp = transaction.timestamp(); + assert_eq!( + transaction.is_config_compliant( + chain_name, + &transaction_config, + MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(expected_error) + ); + assert!( + transaction.is_verified.get().is_none(), + "transaction should not have run expensive `is_verified` call" + ); + } + + #[test] + fn not_acceptable_due_to_timestamp_in_future() { + let rng = &mut TestRng::new(); + let chain_name = "net-1"; + let transaction_config = TransactionConfig::default(); + let leeway = TimeDiff::from_seconds(2); + + let transaction = TransactionV1Builder::new_random(rng) + .with_chain_name(chain_name) + .build() + .unwrap(); + let current_timestamp = transaction.timestamp() - leeway - TimeDiff::from_seconds(1); + + let expected_error = TransactionV1ConfigFailure::TimestampInFuture { + validation_timestamp: current_timestamp, + timestamp_leeway: leeway, + got: transaction.timestamp(), + }; + + assert_eq!( + transaction.is_config_compliant( + chain_name, + &transaction_config, + MAX_ASSOCIATED_KEYS, + leeway, + current_timestamp + ), + Err(expected_error) + ); + assert!( + transaction.is_verified.get().is_none(), + "transaction should not have run expensive `is_verified` call" + ); + } + + #[test] + fn not_acceptable_due_to_excessive_approvals() { + let rng = &mut TestRng::new(); + let chain_name = "net-1"; + let transaction_config = TransactionConfig::default(); + let mut transaction = TransactionV1Builder::new_random(rng) + .with_chain_name(chain_name) + .build() + .unwrap(); + + for _ in 0..MAX_ASSOCIATED_KEYS { + transaction.sign(&SecretKey::random(rng)); + } + + let current_timestamp = transaction.timestamp(); + + let expected_error = TransactionV1ConfigFailure::ExcessiveApprovals { + got: MAX_ASSOCIATED_KEYS + 1, + max_associated_keys: MAX_ASSOCIATED_KEYS, + }; + + assert_eq!( + transaction.is_config_compliant( + chain_name, + &transaction_config, + MAX_ASSOCIATED_KEYS, + TimeDiff::default(), + current_timestamp + ), + Err(expected_error) + ); + assert!( + transaction.is_verified.get().is_none(), + "transaction should not have run expensive `is_verified` call" + ); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/errors_v1.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/errors_v1.rs new file mode 100644 index 00000000..d41cedc0 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/errors_v1.rs @@ -0,0 +1,386 @@ +use alloc::string::String; +use core::{ + array::TryFromSliceError, + fmt::{self, Display, Formatter}, +}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::Serialize; + +use super::super::TransactionEntryPoint; +#[cfg(doc)] +use super::TransactionV1; +use crate::{crypto, CLType, TimeDiff, Timestamp, U512}; + +/// Returned when a [`TransactionV1`] fails validation. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum TransactionV1ConfigFailure { + /// Invalid chain name. + InvalidChainName { + /// The expected chain name. + expected: String, + /// The transaction's chain name. + got: String, + }, + + /// Transaction is too large. + ExcessiveSize(ExcessiveSizeErrorV1), + + /// Excessive time-to-live. + ExcessiveTimeToLive { + /// The time-to-live limit. + max_ttl: TimeDiff, + /// The transaction's time-to-live. + got: TimeDiff, + }, + + /// Transaction's timestamp is in the future. + TimestampInFuture { + /// The node's timestamp when validating the transaction. + validation_timestamp: Timestamp, + /// Any configured leeway added to `validation_timestamp`. + timestamp_leeway: TimeDiff, + /// The transaction's timestamp. + got: Timestamp, + }, + + /// The provided body hash does not match the actual hash of the body. + InvalidBodyHash, + + /// The provided transaction hash does not match the actual hash of the transaction. + InvalidTransactionHash, + + /// The transaction has no approvals. + EmptyApprovals, + + /// Invalid approval. + InvalidApproval { + /// The index of the approval at fault. + index: usize, + /// The approval verification error. + error: crypto::Error, + }, + + /// Excessive length of transaction's runtime args. + ExcessiveArgsLength { + /// The byte size limit of runtime arguments. + max_length: usize, + /// The length of the transaction's runtime arguments. + got: usize, + }, + + /// The amount of approvals on the transaction exceeds the configured limit. + ExcessiveApprovals { + /// The chainspec limit for max_associated_keys. + max_associated_keys: u32, + /// Number of approvals on the transaction. + got: u32, + }, + + /// The payment amount associated with the transaction exceeds the block gas limit. + ExceedsBlockGasLimit { + /// Configured block gas limit. + block_gas_limit: u64, + /// The payment amount received. + got: u64, + }, + + /// Missing a required runtime arg. + MissingArg { + /// The name of the missing arg. + arg_name: String, + }, + + /// Given runtime arg is not expected type. + UnexpectedArgType { + /// The name of the invalid arg. + arg_name: String, + /// The expected type for the given runtime arg. + expected: CLType, + /// The provided type of the given runtime arg. + got: CLType, + }, + + /// Insufficient transfer amount. + InsufficientTransferAmount { + /// The minimum transfer amount. + minimum: u64, + /// The attempted transfer amount. + attempted: U512, + }, + + /// The entry point for this transaction target cannot not be `TransactionEntryPoint::Custom`. + EntryPointCannotBeCustom { + /// The invalid entry point. + entry_point: TransactionEntryPoint, + }, + + /// The entry point for this transaction target must be `TransactionEntryPoint::Custom`. + EntryPointMustBeCustom { + /// The invalid entry point. + entry_point: TransactionEntryPoint, + }, + + /// The transaction has empty module bytes. + EmptyModuleBytes, +} + +impl Display for TransactionV1ConfigFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionV1ConfigFailure::InvalidChainName { expected, got } => { + write!( + formatter, + "invalid chain name: expected {expected}, got {got}" + ) + } + TransactionV1ConfigFailure::ExcessiveSize(error) => { + write!(formatter, "transaction size too large: {error}") + } + TransactionV1ConfigFailure::ExcessiveTimeToLive { max_ttl, got } => { + write!( + formatter, + "time-to-live of {got} exceeds limit of {max_ttl}" + ) + } + TransactionV1ConfigFailure::TimestampInFuture { + validation_timestamp, + timestamp_leeway, + got, + } => { + write!( + formatter, + "timestamp of {got} is later than node's validation timestamp of \ + {validation_timestamp} plus leeway of {timestamp_leeway}" + ) + } + TransactionV1ConfigFailure::InvalidBodyHash => { + write!( + formatter, + "the provided hash does not match the actual hash of the transaction body" + ) + } + TransactionV1ConfigFailure::InvalidTransactionHash => { + write!( + formatter, + "the provided hash does not match the actual hash of the transaction" + ) + } + TransactionV1ConfigFailure::EmptyApprovals => { + write!(formatter, "the transaction has no approvals") + } + TransactionV1ConfigFailure::InvalidApproval { index, error } => { + write!( + formatter, + "the transaction approval at index {index} is invalid: {error}" + ) + } + TransactionV1ConfigFailure::ExcessiveArgsLength { max_length, got } => { + write!( + formatter, + "serialized transaction runtime args of {got} bytes exceeds limit of \ + {max_length} bytes" + ) + } + TransactionV1ConfigFailure::ExcessiveApprovals { + max_associated_keys, + got, + } => { + write!( + formatter, + "number of transaction approvals {got} exceeds the maximum number of \ + associated keys {max_associated_keys}", + ) + } + TransactionV1ConfigFailure::ExceedsBlockGasLimit { + block_gas_limit, + got, + } => { + write!( + formatter, + "payment amount of {got} exceeds the block gas limit of {block_gas_limit}" + ) + } + TransactionV1ConfigFailure::MissingArg { arg_name } => { + write!(formatter, "missing required runtime argument '{arg_name}'") + } + TransactionV1ConfigFailure::UnexpectedArgType { + arg_name, + expected, + got, + } => { + write!( + formatter, + "expected type of '{arg_name}' runtime argument to be {expected}, but got {got}" + ) + } + TransactionV1ConfigFailure::InsufficientTransferAmount { minimum, attempted } => { + write!( + formatter, + "insufficient transfer amount; minimum: {minimum} attempted: {attempted}" + ) + } + TransactionV1ConfigFailure::EntryPointCannotBeCustom { entry_point } => { + write!(formatter, "entry point cannot be custom: {entry_point}") + } + TransactionV1ConfigFailure::EntryPointMustBeCustom { entry_point } => { + write!(formatter, "entry point must be custom: {entry_point}") + } + TransactionV1ConfigFailure::EmptyModuleBytes => { + write!(formatter, "the transaction has empty module bytes") + } + } + } +} + +impl From for TransactionV1ConfigFailure { + fn from(error: ExcessiveSizeErrorV1) -> Self { + TransactionV1ConfigFailure::ExcessiveSize(error) + } +} + +#[cfg(feature = "std")] +impl StdError for TransactionV1ConfigFailure { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + TransactionV1ConfigFailure::InvalidApproval { error, .. } => Some(error), + TransactionV1ConfigFailure::InvalidChainName { .. } + | TransactionV1ConfigFailure::ExcessiveSize(_) + | TransactionV1ConfigFailure::ExcessiveTimeToLive { .. } + | TransactionV1ConfigFailure::TimestampInFuture { .. } + | TransactionV1ConfigFailure::InvalidBodyHash + | TransactionV1ConfigFailure::InvalidTransactionHash + | TransactionV1ConfigFailure::EmptyApprovals + | TransactionV1ConfigFailure::ExcessiveArgsLength { .. } + | TransactionV1ConfigFailure::ExcessiveApprovals { .. } + | TransactionV1ConfigFailure::ExceedsBlockGasLimit { .. } + | TransactionV1ConfigFailure::MissingArg { .. } + | TransactionV1ConfigFailure::UnexpectedArgType { .. } + | TransactionV1ConfigFailure::InsufficientTransferAmount { .. } + | TransactionV1ConfigFailure::EntryPointCannotBeCustom { .. } + | TransactionV1ConfigFailure::EntryPointMustBeCustom { .. } + | TransactionV1ConfigFailure::EmptyModuleBytes => None, + } + } +} + +/// Error returned when a transaction is too large. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ExcessiveSizeErrorV1 { + /// The maximum permitted serialized transaction size, in bytes. + pub max_transaction_size: u32, + /// The serialized size of the transaction provided, in bytes. + pub actual_transaction_size: usize, +} + +impl Display for ExcessiveSizeErrorV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction size of {} bytes exceeds limit of {}", + self.actual_transaction_size, self.max_transaction_size + ) + } +} + +#[cfg(feature = "std")] +impl StdError for ExcessiveSizeErrorV1 {} + +/// Errors other than validation failures relating to Transactions. +#[derive(Debug)] +#[non_exhaustive] +pub enum ErrorV1 { + /// Error while encoding to JSON. + EncodeToJson(serde_json::Error), + + /// Error while decoding from JSON. + DecodeFromJson(DecodeFromJsonErrorV1), +} + +impl From for ErrorV1 { + fn from(error: serde_json::Error) -> Self { + ErrorV1::EncodeToJson(error) + } +} + +impl From for ErrorV1 { + fn from(error: DecodeFromJsonErrorV1) -> Self { + ErrorV1::DecodeFromJson(error) + } +} + +impl Display for ErrorV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + ErrorV1::EncodeToJson(error) => { + write!(formatter, "encoding to json: {}", error) + } + ErrorV1::DecodeFromJson(error) => { + write!(formatter, "decoding from json: {}", error) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for ErrorV1 { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + ErrorV1::EncodeToJson(error) => Some(error), + ErrorV1::DecodeFromJson(error) => Some(error), + } + } +} + +/// Error while decoding a `TransactionV1` from JSON. +#[derive(Debug)] +#[non_exhaustive] +pub enum DecodeFromJsonErrorV1 { + /// Failed to decode from base 16. + FromHex(base16::DecodeError), + + /// Failed to convert slice to array. + TryFromSlice(TryFromSliceError), +} + +impl From for DecodeFromJsonErrorV1 { + fn from(error: base16::DecodeError) -> Self { + DecodeFromJsonErrorV1::FromHex(error) + } +} + +impl From for DecodeFromJsonErrorV1 { + fn from(error: TryFromSliceError) -> Self { + DecodeFromJsonErrorV1::TryFromSlice(error) + } +} + +impl Display for DecodeFromJsonErrorV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + DecodeFromJsonErrorV1::FromHex(error) => { + write!(formatter, "{}", error) + } + DecodeFromJsonErrorV1::TryFromSlice(error) => { + write!(formatter, "{}", error) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for DecodeFromJsonErrorV1 { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + DecodeFromJsonErrorV1::FromHex(error) => Some(error), + DecodeFromJsonErrorV1::TryFromSlice(error) => Some(error), + } + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/finalized_transaction_v1_approvals.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/finalized_transaction_v1_approvals.rs new file mode 100644 index 00000000..a10c4ed2 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/finalized_transaction_v1_approvals.rs @@ -0,0 +1,78 @@ +use alloc::{collections::BTreeSet, vec::Vec}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + TransactionV1Approval, +}; + +/// A set of approvals that has been agreed upon by consensus to approve of a specific +/// `TransactionV1`. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct FinalizedTransactionV1Approvals(BTreeSet); + +impl FinalizedTransactionV1Approvals { + /// Creates a new set of finalized transaction approvals. + pub fn new(approvals: BTreeSet) -> Self { + Self(approvals) + } + + /// Returns the inner `BTreeSet` of approvals. + pub fn inner(&self) -> &BTreeSet { + &self.0 + } + + /// Converts this set of finalized approvals into the inner `BTreeSet`. + pub fn into_inner(self) -> BTreeSet { + self.0 + } + + /// Returns a random FinalizedTransactionV1Approvals. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let count = rng.gen_range(1..10); + let approvals = (0..count) + .map(|_| TransactionV1Approval::random(rng)) + .collect(); + FinalizedTransactionV1Approvals(approvals) + } +} +impl ToBytes for FinalizedTransactionV1Approvals { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for FinalizedTransactionV1Approvals { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (approvals, remainder) = BTreeSet::::from_bytes(bytes)?; + Ok((FinalizedTransactionV1Approvals(approvals), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let approvals = FinalizedTransactionV1Approvals::random(rng); + bytesrepr::test_serialization_roundtrip(&approvals); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approval.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approval.rs new file mode 100644 index 00000000..0d6cb087 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approval.rs @@ -0,0 +1,102 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::TransactionV1Hash; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, PublicKey, SecretKey, Signature, +}; + +/// A struct containing a signature of a transaction hash and the public key of the signer. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct TransactionV1Approval { + signer: PublicKey, + signature: Signature, +} + +impl TransactionV1Approval { + /// Creates an approval by signing the given transaction hash using the given secret key. + pub fn create(hash: &TransactionV1Hash, secret_key: &SecretKey) -> Self { + let signer = PublicKey::from(secret_key); + let signature = crypto::sign(hash, secret_key, &signer); + Self { signer, signature } + } + + /// Returns a new approval. + pub fn new(signer: PublicKey, signature: Signature) -> Self { + Self { signer, signature } + } + + /// Returns the public key of the approval's signer. + pub fn signer(&self) -> &PublicKey { + &self.signer + } + + /// Returns the approval signature. + pub fn signature(&self) -> &Signature { + &self.signature + } + + /// Returns a random `TransactionV1Approval`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = TransactionV1Hash::random(rng); + let secret_key = SecretKey::random(rng); + TransactionV1Approval::create(&hash, &secret_key) + } +} + +impl Display for TransactionV1Approval { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "approval({})", self.signer) + } +} + +impl ToBytes for TransactionV1Approval { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.signer.write_bytes(writer)?; + self.signature.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.signer.serialized_length() + self.signature.serialized_length() + } +} + +impl FromBytes for TransactionV1Approval { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (signer, remainder) = PublicKey::from_bytes(bytes)?; + let (signature, remainder) = Signature::from_bytes(remainder)?; + let approval = TransactionV1Approval { signer, signature }; + Ok((approval, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let approval = TransactionV1Approval::random(rng); + bytesrepr::test_serialization_roundtrip(&approval); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approvals_hash.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approvals_hash.rs new file mode 100644 index 00000000..cf148819 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_approvals_hash.rs @@ -0,0 +1,114 @@ +use alloc::{collections::BTreeSet, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::TransactionV1; +use super::TransactionV1Approval; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// The cryptographic hash of the bytesrepr-encoded set of approvals for a single [`TransactionV1`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct TransactionV1ApprovalsHash(Digest); + +impl TransactionV1ApprovalsHash { + /// The number of bytes in a `TransactionV1ApprovalsHash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `TransactionV1ApprovalsHash` by bytesrepr-encoding `approvals` and creating + /// a [`Digest`] of this. + pub fn compute(approvals: &BTreeSet) -> Result { + let digest = Digest::hash(approvals.to_bytes()?); + Ok(TransactionV1ApprovalsHash(digest)) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Returns a new `TransactionV1ApprovalsHash` directly initialized with the provided bytes; no + /// hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + TransactionV1ApprovalsHash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `TransactionV1ApprovalsHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + TransactionV1ApprovalsHash(hash) + } +} + +impl From for Digest { + fn from(hash: TransactionV1ApprovalsHash) -> Self { + hash.0 + } +} + +impl From for TransactionV1ApprovalsHash { + fn from(digest: Digest) -> Self { + Self(digest) + } +} + +impl Display for TransactionV1ApprovalsHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "transaction-v1-approvals-hash({})", self.0,) + } +} + +impl AsRef<[u8]> for TransactionV1ApprovalsHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for TransactionV1ApprovalsHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TransactionV1ApprovalsHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes) + .map(|(inner, remainder)| (TransactionV1ApprovalsHash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = TransactionV1ApprovalsHash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body.rs new file mode 100644 index 00000000..edc515df --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body.rs @@ -0,0 +1,426 @@ +#[cfg(any(feature = "std", test))] +pub(super) mod arg_handling; + +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "std", test))] +use tracing::debug; + +use super::super::{RuntimeArgs, TransactionEntryPoint, TransactionScheduling, TransactionTarget}; +#[cfg(doc)] +use super::TransactionV1; +#[cfg(any(feature = "std", test))] +use super::{TransactionConfig, TransactionV1ConfigFailure}; +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::{ + bytesrepr::Bytes, testing::TestRng, PublicKey, TransactionInvocationTarget, TransactionRuntime, + TransactionSessionKind, +}; + +/// The body of a [`TransactionV1`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Body of a `TransactionV1`.") +)] +pub struct TransactionV1Body { + pub(super) args: RuntimeArgs, + pub(super) target: TransactionTarget, + pub(super) entry_point: TransactionEntryPoint, + pub(super) scheduling: TransactionScheduling, +} + +impl TransactionV1Body { + /// Returns a new `TransactionV1Body`. + pub fn new( + args: RuntimeArgs, + target: TransactionTarget, + entry_point: TransactionEntryPoint, + scheduling: TransactionScheduling, + ) -> Self { + TransactionV1Body { + args, + target, + entry_point, + scheduling, + } + } + + /// Returns the runtime args of the transaction. + pub fn args(&self) -> &RuntimeArgs { + &self.args + } + + /// Returns the target of the transaction. + pub fn target(&self) -> &TransactionTarget { + &self.target + } + + /// Returns the entry point of the transaction. + pub fn entry_point(&self) -> &TransactionEntryPoint { + &self.entry_point + } + + /// Returns the scheduling kind of the transaction. + pub fn scheduling(&self) -> &TransactionScheduling { + &self.scheduling + } + + #[cfg(any(feature = "std", test))] + pub(super) fn is_valid( + &self, + config: &TransactionConfig, + ) -> Result<(), TransactionV1ConfigFailure> { + let args_length = self.args.serialized_length(); + if args_length > config.transaction_v1_config.max_args_length as usize { + debug!( + args_length, + max_args_length = config.transaction_v1_config.max_args_length, + "transaction runtime args excessive size" + ); + return Err(TransactionV1ConfigFailure::ExcessiveArgsLength { + max_length: config.transaction_v1_config.max_args_length as usize, + got: args_length, + }); + } + + match &self.target { + TransactionTarget::Native => match self.entry_point { + TransactionEntryPoint::Custom(_) => { + debug!( + entry_point = %self.entry_point, + "native transaction cannot have custom entry point" + ); + Err(TransactionV1ConfigFailure::EntryPointCannotBeCustom { + entry_point: self.entry_point.clone(), + }) + } + TransactionEntryPoint::Transfer => arg_handling::has_valid_transfer_args( + &self.args, + config.native_transfer_minimum_motes, + ), + TransactionEntryPoint::AddBid => arg_handling::has_valid_add_bid_args(&self.args), + TransactionEntryPoint::WithdrawBid => { + arg_handling::has_valid_withdraw_bid_args(&self.args) + } + TransactionEntryPoint::Delegate => { + arg_handling::has_valid_delegate_args(&self.args) + } + TransactionEntryPoint::Undelegate => { + arg_handling::has_valid_undelegate_args(&self.args) + } + TransactionEntryPoint::Redelegate => { + arg_handling::has_valid_redelegate_args(&self.args) + } + }, + TransactionTarget::Stored { .. } => match &self.entry_point { + TransactionEntryPoint::Custom(_) => Ok(()), + TransactionEntryPoint::Transfer + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate => { + debug!( + entry_point = %self.entry_point, + "transaction targeting stored entity/package must have custom entry point" + ); + Err(TransactionV1ConfigFailure::EntryPointMustBeCustom { + entry_point: self.entry_point.clone(), + }) + } + }, + TransactionTarget::Session { module_bytes, .. } => match &self.entry_point { + TransactionEntryPoint::Custom(_) => { + if module_bytes.is_empty() { + debug!("transaction with session code must not have empty module bytes"); + return Err(TransactionV1ConfigFailure::EmptyModuleBytes); + } + Ok(()) + } + TransactionEntryPoint::Transfer + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate => { + debug!( + entry_point = %self.entry_point, + "transaction with session code must have custom entry point" + ); + Err(TransactionV1ConfigFailure::EntryPointMustBeCustom { + entry_point: self.entry_point.clone(), + }) + } + }, + } + } + + /// Returns a random `TransactionV1Body`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..8) { + 0 => { + let source = rng.gen(); + let target = rng.gen(); + let amount = rng.gen_range( + TransactionConfig::default().native_transfer_minimum_motes..=u64::MAX, + ); + let maybe_to = rng.gen::().then(|| rng.gen()); + let maybe_id = rng.gen::().then(|| rng.gen()); + let args = + arg_handling::new_transfer_args(source, target, amount, maybe_to, maybe_id) + .unwrap(); + TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Transfer, + TransactionScheduling::random(rng), + ) + } + 1 => { + let public_key = PublicKey::random(rng); + let delegation_rate = rng.gen(); + let amount = rng.gen::(); + let args = + arg_handling::new_add_bid_args(public_key, delegation_rate, amount).unwrap(); + TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::AddBid, + TransactionScheduling::random(rng), + ) + } + 2 => { + let public_key = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_withdraw_bid_args(public_key, amount).unwrap(); + TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::WithdrawBid, + TransactionScheduling::random(rng), + ) + } + 3 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_delegate_args(delegator, validator, amount).unwrap(); + TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Delegate, + TransactionScheduling::random(rng), + ) + } + 4 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_undelegate_args(delegator, validator, amount).unwrap(); + TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Undelegate, + TransactionScheduling::random(rng), + ) + } + 5 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let new_validator = PublicKey::random(rng); + let args = + arg_handling::new_redelegate_args(delegator, validator, amount, new_validator) + .unwrap(); + TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Redelegate, + TransactionScheduling::random(rng), + ) + } + 6 => { + let target = TransactionTarget::Stored { + id: TransactionInvocationTarget::random(rng), + runtime: TransactionRuntime::VmCasperV1, + }; + TransactionV1Body::new( + RuntimeArgs::random(rng), + target, + TransactionEntryPoint::Custom(rng.random_string(1..11)), + TransactionScheduling::random(rng), + ) + } + 7 => { + let mut buffer = vec![0u8; rng.gen_range(0..100)]; + rng.fill_bytes(buffer.as_mut()); + let target = TransactionTarget::Session { + kind: TransactionSessionKind::random(rng), + module_bytes: Bytes::from(buffer), + runtime: TransactionRuntime::VmCasperV1, + }; + TransactionV1Body::new( + RuntimeArgs::random(rng), + target, + TransactionEntryPoint::Custom(rng.random_string(1..11)), + TransactionScheduling::random(rng), + ) + } + _ => unreachable!(), + } + } +} + +impl Display for TransactionV1Body { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "v1-body({} {} {})", + self.target, self.entry_point, self.scheduling + ) + } +} + +impl ToBytes for TransactionV1Body { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.args.write_bytes(writer)?; + self.target.write_bytes(writer)?; + self.entry_point.write_bytes(writer)?; + self.scheduling.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.args.serialized_length() + + self.target.serialized_length() + + self.entry_point.serialized_length() + + self.scheduling.serialized_length() + } +} + +impl FromBytes for TransactionV1Body { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (args, remainder) = RuntimeArgs::from_bytes(bytes)?; + let (target, remainder) = TransactionTarget::from_bytes(remainder)?; + let (entry_point, remainder) = TransactionEntryPoint::from_bytes(remainder)?; + let (scheduling, remainder) = TransactionScheduling::from_bytes(remainder)?; + let body = TransactionV1Body::new(args, target, entry_point, scheduling); + Ok((body, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::runtime_args; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let body = TransactionV1Body::random(rng); + bytesrepr::test_serialization_roundtrip(&body); + } + + #[test] + fn not_acceptable_due_to_excessive_args_length() { + let rng = &mut TestRng::new(); + let mut config = TransactionConfig::default(); + config.transaction_v1_config.max_args_length = 10; + let mut body = TransactionV1Body::random(rng); + body.args = runtime_args! {"a" => 1_u8}; + + let expected_error = TransactionV1ConfigFailure::ExcessiveArgsLength { + max_length: 10, + got: 15, + }; + + assert_eq!(body.is_valid(&config,), Err(expected_error)); + } + + #[test] + fn not_acceptable_due_to_custom_entry_point_in_native() { + let rng = &mut TestRng::new(); + let public_key = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_withdraw_bid_args(public_key, amount).unwrap(); + let entry_point = TransactionEntryPoint::Custom("call".to_string()); + let body = TransactionV1Body::new( + args, + TransactionTarget::Native, + entry_point.clone(), + TransactionScheduling::random(rng), + ); + + let expected_error = TransactionV1ConfigFailure::EntryPointCannotBeCustom { entry_point }; + + let config = TransactionConfig::default(); + assert_eq!(body.is_valid(&config,), Err(expected_error)); + } + + #[test] + fn not_acceptable_due_to_non_custom_entry_point_in_stored_or_session() { + let rng = &mut TestRng::new(); + let config = TransactionConfig::default(); + + let mut check = |entry_point: TransactionEntryPoint| { + let stored_target = TransactionTarget::new_stored( + TransactionInvocationTarget::InvocableEntity([0; 32]), + TransactionRuntime::VmCasperV1, + ); + let session_target = TransactionTarget::new_session( + TransactionSessionKind::Standard, + Bytes::from(vec![1]), + TransactionRuntime::VmCasperV1, + ); + + let stored_body = TransactionV1Body::new( + RuntimeArgs::new(), + stored_target, + entry_point.clone(), + TransactionScheduling::random(rng), + ); + let session_body = TransactionV1Body::new( + RuntimeArgs::new(), + session_target, + entry_point.clone(), + TransactionScheduling::random(rng), + ); + + let expected_error = TransactionV1ConfigFailure::EntryPointMustBeCustom { entry_point }; + + assert_eq!(stored_body.is_valid(&config,), Err(expected_error.clone())); + assert_eq!(session_body.is_valid(&config,), Err(expected_error)); + }; + + check(TransactionEntryPoint::Transfer); + check(TransactionEntryPoint::AddBid); + check(TransactionEntryPoint::WithdrawBid); + check(TransactionEntryPoint::Delegate); + check(TransactionEntryPoint::Undelegate); + check(TransactionEntryPoint::Redelegate); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs new file mode 100644 index 00000000..bc0ac80a --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs @@ -0,0 +1,783 @@ +use core::marker::PhantomData; + +use tracing::debug; + +use super::super::TransactionV1ConfigFailure; +use crate::{ + account::AccountHash, + bytesrepr::{FromBytes, ToBytes}, + CLTyped, CLValue, CLValueError, PublicKey, RuntimeArgs, URef, U512, +}; + +const TRANSFER_ARG_SOURCE: RequiredArg = RequiredArg::new("source"); +const TRANSFER_ARG_TARGET: RequiredArg = RequiredArg::new("target"); +const TRANSFER_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); +const TRANSFER_ARG_TO: OptionalArg = OptionalArg::new("to"); +const TRANSFER_ARG_ID: OptionalArg = OptionalArg::new("id"); + +const ADD_BID_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); +const ADD_BID_ARG_DELEGATION_RATE: RequiredArg = RequiredArg::new("delegation_rate"); +const ADD_BID_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const WITHDRAW_BID_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); +const WITHDRAW_BID_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const DELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const DELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const DELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const UNDELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const UNDELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const UNDELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const REDELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const REDELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const REDELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); +const REDELEGATE_ARG_NEW_VALIDATOR: RequiredArg = RequiredArg::new("new_validator"); + +struct RequiredArg { + name: &'static str, + _phantom: PhantomData, +} + +impl RequiredArg { + const fn new(name: &'static str) -> Self { + Self { + name, + _phantom: PhantomData, + } + } + + fn get(&self, args: &RuntimeArgs) -> Result + where + T: CLTyped + FromBytes, + { + let cl_value = args.get(self.name).ok_or_else(|| { + debug!("missing required runtime argument '{}'", self.name); + TransactionV1ConfigFailure::MissingArg { + arg_name: self.name.to_string(), + } + })?; + parse_cl_value(cl_value, self.name) + } + + fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError> + where + T: CLTyped + ToBytes, + { + args.insert(self.name, value) + } +} + +struct OptionalArg { + name: &'static str, + _phantom: PhantomData, +} + +impl OptionalArg { + const fn new(name: &'static str) -> Self { + Self { + name, + _phantom: PhantomData, + } + } + + fn get(&self, args: &RuntimeArgs) -> Result, TransactionV1ConfigFailure> + where + T: CLTyped + FromBytes, + { + let cl_value = match args.get(self.name) { + Some(value) => value, + None => return Ok(None), + }; + let value = parse_cl_value(cl_value, self.name)?; + Ok(value) + } + + fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError> + where + T: CLTyped + ToBytes, + { + args.insert(self.name, Some(value)) + } +} + +fn parse_cl_value( + cl_value: &CLValue, + arg_name: &str, +) -> Result { + cl_value.to_t::().map_err(|_| { + debug!( + "expected runtime argument '{arg_name}' to be of type {}, but is {}", + T::cl_type(), + cl_value.cl_type() + ); + TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: arg_name.to_string(), + expected: T::cl_type(), + got: cl_value.cl_type().clone(), + } + }) +} + +/// Creates a `RuntimeArgs` suitable for use in a transfer transaction. +pub(in crate::transaction::transaction_v1) fn new_transfer_args>( + source: URef, + target: URef, + amount: A, + maybe_to: Option, + maybe_id: Option, +) -> Result { + let mut args = RuntimeArgs::new(); + TRANSFER_ARG_SOURCE.insert(&mut args, source)?; + TRANSFER_ARG_TARGET.insert(&mut args, target)?; + TRANSFER_ARG_AMOUNT.insert(&mut args, amount.into())?; + if let Some(to) = maybe_to { + TRANSFER_ARG_TO.insert(&mut args, to)?; + } + if let Some(id) = maybe_id { + TRANSFER_ARG_ID.insert(&mut args, id)?; + } + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a transfer transaction. +pub(in crate::transaction::transaction_v1) fn has_valid_transfer_args( + args: &RuntimeArgs, + native_transfer_minimum_motes: u64, +) -> Result<(), TransactionV1ConfigFailure> { + let _source = TRANSFER_ARG_SOURCE.get(args)?; + let _target = TRANSFER_ARG_TARGET.get(args)?; + let amount = TRANSFER_ARG_AMOUNT.get(args)?; + if amount < U512::from(native_transfer_minimum_motes) { + debug!( + minimum = %native_transfer_minimum_motes, + %amount, + "insufficient transfer amount" + ); + return Err(TransactionV1ConfigFailure::InsufficientTransferAmount { + minimum: native_transfer_minimum_motes, + attempted: amount, + }); + } + let _maybe_to = TRANSFER_ARG_TO.get(args)?; + let _maybe_id = TRANSFER_ARG_ID.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in an add_bid transaction. +pub(in crate::transaction::transaction_v1) fn new_add_bid_args>( + public_key: PublicKey, + delegation_rate: u8, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + ADD_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?; + ADD_BID_ARG_DELEGATION_RATE.insert(&mut args, delegation_rate)?; + ADD_BID_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in an add_bid transaction. +pub(in crate::transaction::transaction_v1) fn has_valid_add_bid_args( + args: &RuntimeArgs, +) -> Result<(), TransactionV1ConfigFailure> { + let _public_key = ADD_BID_ARG_PUBLIC_KEY.get(args)?; + let _delegation_rate = ADD_BID_ARG_DELEGATION_RATE.get(args)?; + let _amount = ADD_BID_ARG_AMOUNT.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a withdraw_bid transaction. +pub(in crate::transaction::transaction_v1) fn new_withdraw_bid_args>( + public_key: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + WITHDRAW_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?; + WITHDRAW_BID_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in an withdraw_bid transaction. +pub(in crate::transaction::transaction_v1) fn has_valid_withdraw_bid_args( + args: &RuntimeArgs, +) -> Result<(), TransactionV1ConfigFailure> { + let _public_key = WITHDRAW_BID_ARG_PUBLIC_KEY.get(args)?; + let _amount = WITHDRAW_BID_ARG_AMOUNT.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a delegate transaction. +pub(in crate::transaction::transaction_v1) fn new_delegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + DELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + DELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + DELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a delegate transaction. +pub(in crate::transaction::transaction_v1) fn has_valid_delegate_args( + args: &RuntimeArgs, +) -> Result<(), TransactionV1ConfigFailure> { + let _delegator = DELEGATE_ARG_DELEGATOR.get(args)?; + let _validator = DELEGATE_ARG_VALIDATOR.get(args)?; + let _amount = DELEGATE_ARG_AMOUNT.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in an undelegate transaction. +pub(in crate::transaction::transaction_v1) fn new_undelegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + UNDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + UNDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + UNDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in an undelegate transaction. +pub(in crate::transaction::transaction_v1) fn has_valid_undelegate_args( + args: &RuntimeArgs, +) -> Result<(), TransactionV1ConfigFailure> { + let _delegator = UNDELEGATE_ARG_DELEGATOR.get(args)?; + let _validator = UNDELEGATE_ARG_VALIDATOR.get(args)?; + let _amount = UNDELEGATE_ARG_AMOUNT.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a redelegate transaction. +pub(in crate::transaction::transaction_v1) fn new_redelegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + new_validator: PublicKey, +) -> Result { + let mut args = RuntimeArgs::new(); + REDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + REDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + REDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + REDELEGATE_ARG_NEW_VALIDATOR.insert(&mut args, new_validator)?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a redelegate transaction. +pub(in crate::transaction::transaction_v1) fn has_valid_redelegate_args( + args: &RuntimeArgs, +) -> Result<(), TransactionV1ConfigFailure> { + let _delegator = REDELEGATE_ARG_DELEGATOR.get(args)?; + let _validator = REDELEGATE_ARG_VALIDATOR.get(args)?; + let _amount = REDELEGATE_ARG_AMOUNT.get(args)?; + let _new_validator = REDELEGATE_ARG_NEW_VALIDATOR.get(args)?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + use crate::{runtime_args, testing::TestRng, CLType}; + + #[test] + fn should_validate_transfer_args() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + // Check random args, within motes limit. + let args = new_transfer_args( + rng.gen(), + rng.gen(), + U512::from(rng.gen_range(min_motes..=u64::MAX)), + rng.gen::().then(|| rng.gen()), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + has_valid_transfer_args(&args, min_motes).unwrap(); + + // Check at minimum motes limit. + let args = new_transfer_args( + rng.gen(), + rng.gen(), + U512::from(min_motes), + rng.gen::().then(|| rng.gen()), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + has_valid_transfer_args(&args, min_motes).unwrap(); + + // Check with extra arg. + let mut args = new_transfer_args( + rng.gen(), + rng.gen(), + U512::from(min_motes), + rng.gen::().then(|| rng.gen()), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + args.insert("a", 1).unwrap(); + has_valid_transfer_args(&args, min_motes).unwrap(); + } + + #[test] + fn transfer_args_with_low_amount_should_be_invalid() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + + let args = runtime_args! { + TRANSFER_ARG_SOURCE.name => rng.gen::(), + TRANSFER_ARG_TARGET.name => rng.gen::(), + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes - 1) + }; + + let expected_error = TransactionV1ConfigFailure::InsufficientTransferAmount { + minimum: min_motes, + attempted: U512::from(min_motes - 1), + }; + + assert_eq!( + has_valid_transfer_args(&args, min_motes), + Err(expected_error) + ); + } + + #[test] + fn transfer_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + + // Missing "source". + let args = runtime_args! { + TRANSFER_ARG_TARGET.name => rng.gen::(), + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: TRANSFER_ARG_SOURCE.name.to_string(), + }; + assert_eq!( + has_valid_transfer_args(&args, min_motes), + Err(expected_error) + ); + + // Missing "target". + let args = runtime_args! { + TRANSFER_ARG_SOURCE.name => rng.gen::(), + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: TRANSFER_ARG_TARGET.name.to_string(), + }; + assert_eq!( + has_valid_transfer_args(&args, min_motes), + Err(expected_error) + ); + + // Missing "amount". + let args = runtime_args! { + TRANSFER_ARG_SOURCE.name => rng.gen::(), + TRANSFER_ARG_TARGET.name => rng.gen::() + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: TRANSFER_ARG_AMOUNT.name.to_string(), + }; + assert_eq!( + has_valid_transfer_args(&args, min_motes), + Err(expected_error) + ); + } + + #[test] + fn transfer_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + + // Wrong "source" type (a required arg). + let args = runtime_args! { + TRANSFER_ARG_SOURCE.name => 1_u8, + TRANSFER_ARG_TARGET.name => rng.gen::(), + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes) + }; + let expected_error = TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: TRANSFER_ARG_SOURCE.name.to_string(), + expected: CLType::URef, + got: CLType::U8, + }; + assert_eq!( + has_valid_transfer_args(&args, min_motes), + Err(expected_error) + ); + + // Wrong "to" type (an optional arg). + let args = runtime_args! { + TRANSFER_ARG_SOURCE.name => rng.gen::(), + TRANSFER_ARG_TARGET.name => rng.gen::(), + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes), + TRANSFER_ARG_TO.name => 1_u8 + }; + let expected_error = TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: TRANSFER_ARG_TO.name.to_string(), + expected: Option::::cl_type(), + got: CLType::U8, + }; + assert_eq!( + has_valid_transfer_args(&args, min_motes), + Err(expected_error) + ); + } + + #[test] + fn should_validate_add_bid_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = + new_add_bid_args(PublicKey::random(rng), rng.gen(), rng.gen::()).unwrap(); + has_valid_add_bid_args(&args).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_add_bid_args(&args).unwrap(); + } + + #[test] + fn add_bid_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "public_key". + let args = runtime_args! { + ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::(), + ADD_BID_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: ADD_BID_ARG_PUBLIC_KEY.name.to_string(), + }; + assert_eq!(has_valid_add_bid_args(&args), Err(expected_error)); + + // Missing "delegation_rate". + let args = runtime_args! { + ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + ADD_BID_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: ADD_BID_ARG_DELEGATION_RATE.name.to_string(), + }; + assert_eq!(has_valid_add_bid_args(&args), Err(expected_error)); + + // Missing "amount". + let args = runtime_args! { + ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::() + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: ADD_BID_ARG_AMOUNT.name.to_string(), + }; + assert_eq!(has_valid_add_bid_args(&args), Err(expected_error)); + } + + #[test] + fn add_bid_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::(), + ADD_BID_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: ADD_BID_ARG_AMOUNT.name.to_string(), + expected: CLType::U512, + got: CLType::U64, + }; + assert_eq!(has_valid_add_bid_args(&args), Err(expected_error)); + } + + #[test] + fn should_validate_withdraw_bid_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_withdraw_bid_args(PublicKey::random(rng), rng.gen::()).unwrap(); + has_valid_withdraw_bid_args(&args).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_withdraw_bid_args(&args).unwrap(); + } + + #[test] + fn withdraw_bid_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "public_key". + let args = runtime_args! { + WITHDRAW_BID_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: WITHDRAW_BID_ARG_PUBLIC_KEY.name.to_string(), + }; + assert_eq!(has_valid_withdraw_bid_args(&args), Err(expected_error)); + + // Missing "amount". + let args = runtime_args! { + WITHDRAW_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: WITHDRAW_BID_ARG_AMOUNT.name.to_string(), + }; + assert_eq!(has_valid_withdraw_bid_args(&args), Err(expected_error)); + } + + #[test] + fn withdraw_bid_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + WITHDRAW_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + WITHDRAW_BID_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: WITHDRAW_BID_ARG_AMOUNT.name.to_string(), + expected: CLType::U512, + got: CLType::U64, + }; + assert_eq!(has_valid_withdraw_bid_args(&args), Err(expected_error)); + } + + #[test] + fn should_validate_delegate_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_delegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + rng.gen::(), + ) + .unwrap(); + has_valid_delegate_args(&args).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_delegate_args(&args).unwrap(); + } + + #[test] + fn delegate_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "delegator". + let args = runtime_args! { + DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + DELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: DELEGATE_ARG_DELEGATOR.name.to_string(), + }; + assert_eq!(has_valid_delegate_args(&args), Err(expected_error)); + + // Missing "validator". + let args = runtime_args! { + DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + DELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: DELEGATE_ARG_VALIDATOR.name.to_string(), + }; + assert_eq!(has_valid_delegate_args(&args), Err(expected_error)); + + // Missing "amount". + let args = runtime_args! { + DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: DELEGATE_ARG_AMOUNT.name.to_string(), + }; + assert_eq!(has_valid_delegate_args(&args), Err(expected_error)); + } + + #[test] + fn delegate_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + DELEGATE_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: DELEGATE_ARG_AMOUNT.name.to_string(), + expected: CLType::U512, + got: CLType::U64, + }; + assert_eq!(has_valid_delegate_args(&args), Err(expected_error)); + } + + #[test] + fn should_validate_undelegate_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_undelegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + rng.gen::(), + ) + .unwrap(); + has_valid_undelegate_args(&args).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_undelegate_args(&args).unwrap(); + } + + #[test] + fn undelegate_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "delegator". + let args = runtime_args! { + UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: UNDELEGATE_ARG_DELEGATOR.name.to_string(), + }; + assert_eq!(has_valid_undelegate_args(&args), Err(expected_error)); + + // Missing "validator". + let args = runtime_args! { + UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: UNDELEGATE_ARG_VALIDATOR.name.to_string(), + }; + assert_eq!(has_valid_undelegate_args(&args), Err(expected_error)); + + // Missing "amount". + let args = runtime_args! { + UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: UNDELEGATE_ARG_AMOUNT.name.to_string(), + }; + assert_eq!(has_valid_undelegate_args(&args), Err(expected_error)); + } + + #[test] + fn undelegate_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: UNDELEGATE_ARG_AMOUNT.name.to_string(), + expected: CLType::U512, + got: CLType::U64, + }; + assert_eq!(has_valid_undelegate_args(&args), Err(expected_error)); + } + + #[test] + fn should_validate_redelegate_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_redelegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + rng.gen::(), + PublicKey::random(rng), + ) + .unwrap(); + has_valid_redelegate_args(&args).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_redelegate_args(&args).unwrap(); + } + + #[test] + fn redelegate_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "delegator". + let args = runtime_args! { + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: REDELEGATE_ARG_DELEGATOR.name.to_string(), + }; + assert_eq!(has_valid_redelegate_args(&args), Err(expected_error)); + + // Missing "validator". + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: REDELEGATE_ARG_VALIDATOR.name.to_string(), + }; + assert_eq!(has_valid_redelegate_args(&args), Err(expected_error)); + + // Missing "amount". + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: REDELEGATE_ARG_AMOUNT.name.to_string(), + }; + assert_eq!(has_valid_redelegate_args(&args), Err(expected_error)); + + // Missing "new_validator". + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()), + }; + let expected_error = TransactionV1ConfigFailure::MissingArg { + arg_name: REDELEGATE_ARG_NEW_VALIDATOR.name.to_string(), + }; + assert_eq!(has_valid_redelegate_args(&args), Err(expected_error)); + } + + #[test] + fn redelegate_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => rng.gen::(), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = TransactionV1ConfigFailure::UnexpectedArgType { + arg_name: REDELEGATE_ARG_AMOUNT.name.to_string(), + expected: CLType::U512, + got: CLType::U64, + }; + assert_eq!(has_valid_redelegate_args(&args), Err(expected_error)); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder.rs new file mode 100644 index 00000000..f707cfe2 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder.rs @@ -0,0 +1,490 @@ +mod error; + +use core::marker::PhantomData; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +use super::{ + super::{ + InitiatorAddr, TransactionEntryPoint, TransactionInvocationTarget, TransactionRuntime, + TransactionScheduling, TransactionSessionKind, TransactionTarget, + }, + transaction_v1_body::arg_handling, + InitiatorAddrAndSecretKey, PricingMode, TransactionV1, TransactionV1Body, +}; +use crate::{ + account::AccountHash, bytesrepr::Bytes, CLValue, CLValueError, EntityAddr, EntityVersion, + PackageAddr, PublicKey, RuntimeArgs, SecretKey, TimeDiff, Timestamp, URef, U512, +}; +#[cfg(any(feature = "testing", test))] +use crate::{testing::TestRng, TransactionConfig, TransactionV1Approval, TransactionV1Hash}; +pub use error::TransactionV1BuilderError; + +/// A builder for constructing a [`TransactionV1`]. +/// +/// # Note +/// +/// Before calling [`build`](Self::build), you must ensure that: +/// * an initiator_addr is provided by either calling +/// [`with_initiator_addr`](Self::with_initiator_addr) or +/// [`with_secret_key`](Self::with_secret_key) +/// * the chain name is set by calling [`with_chain_name`](Self::with_chain_name) +/// +/// If no secret key is provided, the resulting transaction will be unsigned, and hence invalid. +/// It can be signed later (multiple times if desired) to make it valid before sending to the +/// network for execution. +pub struct TransactionV1Builder<'a> { + chain_name: Option, + timestamp: Timestamp, + ttl: TimeDiff, + body: TransactionV1Body, + pricing_mode: PricingMode, + payment_amount: Option, + initiator_addr: Option, + #[cfg(not(any(feature = "testing", test)))] + secret_key: Option<&'a SecretKey>, + #[cfg(any(feature = "testing", test))] + secret_key: Option, + #[cfg(any(feature = "testing", test))] + invalid_approvals: Vec, + _phantom_data: PhantomData<&'a ()>, +} + +impl<'a> TransactionV1Builder<'a> { + /// The default time-to-live for transactions, i.e. 30 minutes. + pub const DEFAULT_TTL: TimeDiff = TimeDiff::from_millis(30 * 60 * 1_000); + /// The default pricing mode for transactions, i.e. multiplier of 1. + pub const DEFAULT_PRICING_MODE: PricingMode = PricingMode::GasPriceMultiplier(1); + /// The default runtime for transactions, i.e. Casper Version 1 Virtual Machine. + pub const DEFAULT_RUNTIME: TransactionRuntime = TransactionRuntime::VmCasperV1; + /// The default scheduling for transactions, i.e. `Standard`. + pub const DEFAULT_SCHEDULING: TransactionScheduling = TransactionScheduling::Standard; + + fn new(body: TransactionV1Body) -> Self { + TransactionV1Builder { + chain_name: None, + timestamp: Timestamp::now(), + ttl: Self::DEFAULT_TTL, + body, + pricing_mode: Self::DEFAULT_PRICING_MODE, + payment_amount: None, + initiator_addr: None, + secret_key: None, + _phantom_data: PhantomData, + #[cfg(any(feature = "testing", test))] + invalid_approvals: vec![], + } + } + + /// Returns a new `TransactionV1Builder` suitable for building a native transfer transaction. + pub fn new_transfer>( + source: URef, + target: URef, + amount: A, + maybe_to: Option, + maybe_id: Option, + ) -> Result { + let args = arg_handling::new_transfer_args(source, target, amount, maybe_to, maybe_id)?; + let body = TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Transfer, + Self::DEFAULT_SCHEDULING, + ); + Ok(TransactionV1Builder::new(body)) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native add_bid transaction. + pub fn new_add_bid>( + public_key: PublicKey, + delegation_rate: u8, + amount: A, + ) -> Result { + let args = arg_handling::new_add_bid_args(public_key, delegation_rate, amount)?; + let body = TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::AddBid, + Self::DEFAULT_SCHEDULING, + ); + Ok(TransactionV1Builder::new(body)) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native withdraw_bid + /// transaction. + pub fn new_withdraw_bid>( + public_key: PublicKey, + amount: A, + ) -> Result { + let args = arg_handling::new_withdraw_bid_args(public_key, amount)?; + let body = TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::WithdrawBid, + Self::DEFAULT_SCHEDULING, + ); + Ok(TransactionV1Builder::new(body)) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native delegate transaction. + pub fn new_delegate>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + ) -> Result { + let args = arg_handling::new_delegate_args(delegator, validator, amount)?; + let body = TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Delegate, + Self::DEFAULT_SCHEDULING, + ); + Ok(TransactionV1Builder::new(body)) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native undelegate transaction. + pub fn new_undelegate>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + ) -> Result { + let args = arg_handling::new_undelegate_args(delegator, validator, amount)?; + let body = TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Undelegate, + Self::DEFAULT_SCHEDULING, + ); + Ok(TransactionV1Builder::new(body)) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native redelegate transaction. + pub fn new_redelegate>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + new_validator: PublicKey, + ) -> Result { + let args = arg_handling::new_redelegate_args(delegator, validator, amount, new_validator)?; + let body = TransactionV1Body::new( + args, + TransactionTarget::Native, + TransactionEntryPoint::Redelegate, + Self::DEFAULT_SCHEDULING, + ); + Ok(TransactionV1Builder::new(body)) + } + + fn new_targeting_stored>( + id: TransactionInvocationTarget, + entry_point: E, + ) -> Self { + let target = TransactionTarget::Stored { + id, + runtime: Self::DEFAULT_RUNTIME, + }; + let body = TransactionV1Body::new( + RuntimeArgs::new(), + target, + TransactionEntryPoint::Custom(entry_point.into()), + Self::DEFAULT_SCHEDULING, + ); + TransactionV1Builder::new(body) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a stored + /// entity. + pub fn new_targeting_invocable_entity>( + addr: EntityAddr, + entry_point: E, + ) -> Self { + let id = TransactionInvocationTarget::new_invocable_entity(addr); + Self::new_targeting_stored(id, entry_point) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a stored + /// entity via its alias. + pub fn new_targeting_invocable_entity_via_alias, E: Into>( + alias: A, + entry_point: E, + ) -> Self { + let id = TransactionInvocationTarget::new_invocable_entity_alias(alias.into()); + Self::new_targeting_stored(id, entry_point) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a + /// package. + pub fn new_targeting_package>( + addr: PackageAddr, + version: Option, + entry_point: E, + ) -> Self { + let id = TransactionInvocationTarget::new_package(addr, version); + Self::new_targeting_stored(id, entry_point) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a + /// package via its alias. + pub fn new_targeting_package_via_alias, E: Into>( + alias: A, + version: Option, + entry_point: E, + ) -> Self { + let id = TransactionInvocationTarget::new_package_alias(alias.into(), version); + Self::new_targeting_stored(id, entry_point) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction for running session + /// logic, i.e. compiled Wasm. + pub fn new_session>( + kind: TransactionSessionKind, + module_bytes: Bytes, + entry_point: E, + ) -> Self { + let target = TransactionTarget::Session { + kind, + module_bytes, + runtime: Self::DEFAULT_RUNTIME, + }; + let body = TransactionV1Body::new( + RuntimeArgs::new(), + target, + TransactionEntryPoint::Custom(entry_point.into()), + Self::DEFAULT_SCHEDULING, + ); + TransactionV1Builder::new(body) + } + + /// Returns a new `TransactionV1Builder` which will build a random, valid but possibly expired + /// transaction. + /// + /// The transaction can be made invalid in the following ways: + /// * unsigned by calling `with_no_secret_key` + /// * given an invalid approval by calling `with_invalid_approval` + #[cfg(any(feature = "testing", test))] + pub fn new_random(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + let ttl_millis = rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis()); + let body = TransactionV1Body::random(rng); + TransactionV1Builder { + chain_name: Some(rng.random_string(5..10)), + timestamp: Timestamp::random(rng), + ttl: TimeDiff::from_millis(ttl_millis), + body, + pricing_mode: PricingMode::random(rng), + payment_amount: Some( + rng.gen_range(2_500_000_000..=TransactionConfig::default().block_gas_limit), + ), + initiator_addr: Some(InitiatorAddr::PublicKey(PublicKey::from(&secret_key))), + secret_key: Some(secret_key), + _phantom_data: PhantomData, + invalid_approvals: vec![], + } + } + + /// Sets the `chain_name` in the transaction. + /// + /// Must be provided or building will fail. + pub fn with_chain_name>(mut self, chain_name: C) -> Self { + self.chain_name = Some(chain_name.into()); + self + } + + /// Sets the `timestamp` in the transaction. + /// + /// If not provided, the timestamp will be set to the time when the builder was constructed. + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.timestamp = timestamp; + self + } + + /// Sets the `ttl` (time-to-live) in the transaction. + /// + /// If not provided, the ttl will be set to [`Self::DEFAULT_TTL`]. + pub fn with_ttl(mut self, ttl: TimeDiff) -> Self { + self.ttl = ttl; + self + } + + /// Sets the `pricing_mode` in the transaction. + /// + /// If not provided, the pricing mode will be set to [`Self::DEFAULT_PRICING_MODE`]. + pub fn with_pricing_mode(mut self, pricing_mode: PricingMode) -> Self { + self.pricing_mode = pricing_mode; + self + } + + /// Sets the `payment_amount` in the transaction. + /// + /// If not provided, `payment_amount` will be set to `None`. + pub fn with_payment_amount(mut self, payment_amount: u64) -> Self { + self.payment_amount = Some(payment_amount); + self + } + + /// Sets the `initiator_addr` in the transaction. + /// + /// If not provided, the public key derived from the secret key used in the builder will be + /// used as the `InitiatorAddr::PublicKey` in the transaction. + pub fn with_initiator_addr(mut self, initiator_addr: InitiatorAddr) -> Self { + self.initiator_addr = Some(initiator_addr); + self + } + + /// Sets the secret key used to sign the transaction on calling [`build`](Self::build). + /// + /// If not provided, the transaction can still be built, but will be unsigned and will be + /// invalid until subsequently signed. + pub fn with_secret_key(mut self, secret_key: &'a SecretKey) -> Self { + #[cfg(not(any(feature = "testing", test)))] + { + self.secret_key = Some(secret_key); + } + #[cfg(any(feature = "testing", test))] + { + self.secret_key = Some( + SecretKey::from_der(secret_key.to_der().expect("should der-encode")) + .expect("should der-decode"), + ); + } + self + } + + /// Appends the given runtime arg into the body's `args`. + pub fn with_runtime_arg>(mut self, key: K, cl_value: CLValue) -> Self { + self.body.args.insert_cl_value(key, cl_value); + self + } + + /// Sets the runtime args in the transaction. + /// + /// NOTE: this overwrites any existing runtime args. To append to existing args, use + /// [`TransactionV1Builder::with_runtime_arg`]. + pub fn with_runtime_args(mut self, args: RuntimeArgs) -> Self { + self.body.args = args; + self + } + + /// Sets the runtime for the transaction. + /// + /// If not provided, the runtime will be set to [`Self::DEFAULT_RUNTIME`]. + /// + /// NOTE: This has no effect for native transactions, i.e. where the `body.target` is + /// `TransactionTarget::Native`. + pub fn with_runtime(mut self, runtime: TransactionRuntime) -> Self { + match &mut self.body.target { + TransactionTarget::Native => {} + TransactionTarget::Stored { + runtime: existing_runtime, + .. + } => { + *existing_runtime = runtime; + } + TransactionTarget::Session { + runtime: existing_runtime, + .. + } => { + *existing_runtime = runtime; + } + } + self + } + + /// Sets the scheduling for the transaction. + /// + /// If not provided, the scheduling will be set to [`Self::DEFAULT_SCHEDULING`]. + pub fn with_scheduling(mut self, scheduling: TransactionScheduling) -> Self { + self.body.scheduling = scheduling; + self + } + + /// Sets the secret key to `None`, meaning the transaction can still be built but will be + /// unsigned and will be invalid until subsequently signed. + #[cfg(any(feature = "testing", test))] + pub fn with_no_secret_key(mut self) -> Self { + self.secret_key = None; + self + } + + /// Sets an invalid approval in the transaction. + #[cfg(any(feature = "testing", test))] + pub fn with_invalid_approval(mut self, rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + let hash = TransactionV1Hash::random(rng); + let approval = TransactionV1Approval::create(&hash, &secret_key); + self.invalid_approvals.push(approval); + self + } + + /// Returns the new transaction, or an error if non-defaulted fields were not set. + /// + /// For more info, see [the `TransactionBuilder` documentation](TransactionV1Builder). + pub fn build(self) -> Result { + self.do_build() + } + + #[cfg(not(any(feature = "testing", test)))] + fn do_build(self) -> Result { + let initiator_addr_and_secret_key = match (self.initiator_addr, self.secret_key) { + (Some(initiator_addr), Some(secret_key)) => InitiatorAddrAndSecretKey::Both { + initiator_addr, + secret_key, + }, + (Some(initiator_addr), None) => { + InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) + } + (None, Some(secret_key)) => InitiatorAddrAndSecretKey::SecretKey(secret_key), + (None, None) => return Err(TransactionV1BuilderError::MissingInitiatorAddr), + }; + + let chain_name = self + .chain_name + .ok_or(TransactionV1BuilderError::MissingChainName)?; + + let transaction = TransactionV1::build( + chain_name, + self.timestamp, + self.ttl, + self.body, + self.pricing_mode, + self.payment_amount, + initiator_addr_and_secret_key, + ); + + Ok(transaction) + } + + #[cfg(any(feature = "testing", test))] + fn do_build(self) -> Result { + let initiator_addr_and_secret_key = match (self.initiator_addr, &self.secret_key) { + (Some(initiator_addr), Some(secret_key)) => InitiatorAddrAndSecretKey::Both { + initiator_addr, + secret_key, + }, + (Some(initiator_addr), None) => { + InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) + } + (None, Some(secret_key)) => InitiatorAddrAndSecretKey::SecretKey(secret_key), + (None, None) => return Err(TransactionV1BuilderError::MissingInitiatorAddr), + }; + + let chain_name = self + .chain_name + .ok_or(TransactionV1BuilderError::MissingChainName)?; + + let mut transaction = TransactionV1::build( + chain_name, + self.timestamp, + self.ttl, + self.body, + self.pricing_mode, + self.payment_amount, + initiator_addr_and_secret_key, + ); + + transaction.apply_approvals(self.invalid_approvals); + + Ok(transaction) + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder/error.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder/error.rs new file mode 100644 index 00000000..f9212100 --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_builder/error.rs @@ -0,0 +1,44 @@ +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(doc)] +use super::{TransactionV1, TransactionV1Builder}; + +/// Errors returned while building a [`TransactionV1`] using a [`TransactionV1Builder`]. +#[derive(Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub enum TransactionV1BuilderError { + /// Failed to build transaction due to missing initiator_addr. + /// + /// Call [`TransactionV1Builder::with_initiator_addr`] or + /// [`TransactionV1Builder::with_secret_key`] before calling [`TransactionV1Builder::build`]. + MissingInitiatorAddr, + /// Failed to build transaction due to missing chain name. + /// + /// Call [`TransactionV1Builder::with_chain_name`] before calling + /// [`TransactionV1Builder::build`]. + MissingChainName, +} + +impl Display for TransactionV1BuilderError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionV1BuilderError::MissingInitiatorAddr => { + write!( + formatter, + "transaction requires account - use `with_account` or `with_secret_key`" + ) + } + TransactionV1BuilderError::MissingChainName => { + write!( + formatter, + "transaction requires chain name - use `with_chain_name`" + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for TransactionV1BuilderError {} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_hash.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_hash.rs new file mode 100644 index 00000000..c7ba947d --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_hash.rs @@ -0,0 +1,117 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::TransactionV1; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// The cryptographic hash of a [`TransactionV1`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded TransactionV1 hash.") +)] +#[serde(deny_unknown_fields)] +pub struct TransactionV1Hash(Digest); + +impl TransactionV1Hash { + /// The number of bytes in a `TransactionV1Hash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `TransactionV1Hash`. + pub const fn new(hash: Digest) -> Self { + TransactionV1Hash(hash) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Returns a new `TransactionV1Hash` directly initialized with the provided bytes; no hashing + /// is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + TransactionV1Hash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `TransactionV1Hash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + TransactionV1Hash(hash) + } +} + +impl From for TransactionV1Hash { + fn from(digest: Digest) -> Self { + TransactionV1Hash(digest) + } +} + +impl From for Digest { + fn from(transaction_hash: TransactionV1Hash) -> Self { + transaction_hash.0 + } +} + +impl Display for TransactionV1Hash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "transaction-v1-hash({})", self.0) + } +} + +impl AsRef<[u8]> for TransactionV1Hash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for TransactionV1Hash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TransactionV1Hash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (TransactionV1Hash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = TransactionV1Hash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_header.rs b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_header.rs new file mode 100644 index 00000000..65926bee --- /dev/null +++ b/casper_types_ver_2_0/src/transaction/transaction_v1/transaction_v1_header.rs @@ -0,0 +1,244 @@ +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "std", test))] +use tracing::debug; + +#[cfg(doc)] +use super::TransactionV1; +use super::{InitiatorAddr, PricingMode}; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, TimeDiff, Timestamp, +}; +#[cfg(any(feature = "std", test))] +use crate::{TransactionConfig, TransactionV1ConfigFailure, TransactionV1Hash}; + +/// The header portion of a [`TransactionV1`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The header portion of a TransactionV1.") +)] +pub struct TransactionV1Header { + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + body_hash: Digest, + pricing_mode: PricingMode, + payment_amount: Option, + initiator_addr: InitiatorAddr, +} + +impl TransactionV1Header { + #[cfg(any(feature = "std", feature = "json-schema", test))] + pub(super) fn new( + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + body_hash: Digest, + pricing_mode: PricingMode, + payment_amount: Option, + initiator_addr: InitiatorAddr, + ) -> Self { + TransactionV1Header { + chain_name, + timestamp, + ttl, + body_hash, + pricing_mode, + payment_amount, + initiator_addr, + } + } + + /// Computes the hash identifying this transaction. + #[cfg(any(feature = "std", test))] + pub fn compute_hash(&self) -> TransactionV1Hash { + TransactionV1Hash::new(Digest::hash( + self.to_bytes() + .unwrap_or_else(|error| panic!("should serialize header: {}", error)), + )) + } + + /// Returns the name of the chain the transaction should be executed on. + pub fn chain_name(&self) -> &str { + &self.chain_name + } + + /// Returns the creation timestamp of the transaction. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the duration after the creation timestamp for which the transaction will stay valid. + /// + /// After this duration has ended, the transaction will be considered expired. + pub fn ttl(&self) -> TimeDiff { + self.ttl + } + + /// Returns `true` if the transaction has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.expires() < current_instant + } + + /// Returns the hash of the body of the transaction. + pub fn body_hash(&self) -> &Digest { + &self.body_hash + } + + /// Returns the pricing mode for the transaction. + pub fn pricing_mode(&self) -> &PricingMode { + &self.pricing_mode + } + + /// Returns the payment amount for the transaction. + pub fn payment_amount(&self) -> Option { + self.payment_amount + } + + /// Returns the address of the initiator of the transaction. + pub fn initiator_addr(&self) -> &InitiatorAddr { + &self.initiator_addr + } + + /// Returns `Ok` if and only if the TTL is within limits, and the timestamp is not later than + /// `at + timestamp_leeway`. Does NOT check for expiry. + #[cfg(any(feature = "std", test))] + pub fn is_valid( + &self, + config: &TransactionConfig, + timestamp_leeway: TimeDiff, + at: Timestamp, + transaction_hash: &TransactionV1Hash, + ) -> Result<(), TransactionV1ConfigFailure> { + if self.ttl() > config.max_ttl { + debug!( + %transaction_hash, + transaction_header = %self, + max_ttl = %config.max_ttl, + "transaction ttl excessive" + ); + return Err(TransactionV1ConfigFailure::ExcessiveTimeToLive { + max_ttl: config.max_ttl, + got: self.ttl(), + }); + } + + if self.timestamp() > at + timestamp_leeway { + debug!( + %transaction_hash, transaction_header = %self, %at, + "transaction timestamp in the future" + ); + return Err(TransactionV1ConfigFailure::TimestampInFuture { + validation_timestamp: at, + timestamp_leeway, + got: self.timestamp(), + }); + } + + Ok(()) + } + + /// Returns the timestamp of when the transaction expires, i.e. `self.timestamp + self.ttl`. + pub fn expires(&self) -> Timestamp { + self.timestamp.saturating_add(self.ttl) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub(super) fn invalidate(&mut self) { + self.chain_name.clear(); + } +} + +impl ToBytes for TransactionV1Header { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.chain_name.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.ttl.write_bytes(writer)?; + self.body_hash.write_bytes(writer)?; + self.pricing_mode.write_bytes(writer)?; + self.payment_amount.write_bytes(writer)?; + self.initiator_addr.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.chain_name.serialized_length() + + self.timestamp.serialized_length() + + self.ttl.serialized_length() + + self.body_hash.serialized_length() + + self.pricing_mode.serialized_length() + + self.payment_amount.serialized_length() + + self.initiator_addr.serialized_length() + } +} + +impl FromBytes for TransactionV1Header { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (chain_name, remainder) = String::from_bytes(bytes)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (ttl, remainder) = TimeDiff::from_bytes(remainder)?; + let (body_hash, remainder) = Digest::from_bytes(remainder)?; + let (pricing_mode, remainder) = PricingMode::from_bytes(remainder)?; + let (payment_amount, remainder) = Option::::from_bytes(remainder)?; + let (initiator_addr, remainder) = InitiatorAddr::from_bytes(remainder)?; + let transaction_header = TransactionV1Header { + chain_name, + timestamp, + ttl, + body_hash, + pricing_mode, + payment_amount, + initiator_addr, + }; + Ok((transaction_header, remainder)) + } +} + +impl Display for TransactionV1Header { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + #[cfg(any(feature = "std", test))] + let hash = self.compute_hash(); + #[cfg(not(any(feature = "std", test)))] + let hash = "unknown"; + write!( + formatter, + "transaction-v1-header[{}, chain_name: {}, timestamp: {}, ttl: {}, pricing mode: {}, \ + payment_amount: {}, initiator: {}]", + hash, + self.chain_name, + self.timestamp, + self.ttl, + self.pricing_mode, + if let Some(payment) = self.payment_amount { + payment.to_string() + } else { + "none".to_string() + }, + self.initiator_addr + ) + } +} diff --git a/casper_types_ver_2_0/src/transfer.rs b/casper_types_ver_2_0/src/transfer.rs new file mode 100644 index 00000000..38dfe8f0 --- /dev/null +++ b/casper_types_ver_2_0/src/transfer.rs @@ -0,0 +1,414 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, serde_helpers, CLType, CLTyped, DeployHash, URef, U512, +}; + +/// The length of a transfer address. +pub const TRANSFER_ADDR_LENGTH: usize = 32; +pub(super) const TRANSFER_ADDR_FORMATTED_STRING_PREFIX: &str = "transfer-"; + +/// Represents a transfer from one purse to another +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Transfer { + /// Deploy that created the transfer + #[serde(with = "serde_helpers::deploy_hash_as_array")] + #[cfg_attr( + feature = "json-schema", + schemars( + with = "DeployHash", + description = "Hex-encoded Deploy hash of Deploy that created the transfer." + ) + )] + pub deploy_hash: DeployHash, + /// Account from which transfer was executed + pub from: AccountHash, + /// Account to which funds are transferred + pub to: Option, + /// Source purse + pub source: URef, + /// Target purse + pub target: URef, + /// Transfer amount + pub amount: U512, + /// Gas + pub gas: U512, + /// User-defined id + pub id: Option, +} + +impl Transfer { + /// Creates a [`Transfer`]. + #[allow(clippy::too_many_arguments)] + pub fn new( + deploy_hash: DeployHash, + from: AccountHash, + to: Option, + source: URef, + target: URef, + amount: U512, + gas: U512, + id: Option, + ) -> Self { + Transfer { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + } + } +} + +impl FromBytes for Transfer { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (deploy_hash, rem) = FromBytes::from_bytes(bytes)?; + let (from, rem) = AccountHash::from_bytes(rem)?; + let (to, rem) = >::from_bytes(rem)?; + let (source, rem) = URef::from_bytes(rem)?; + let (target, rem) = URef::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + let (gas, rem) = U512::from_bytes(rem)?; + let (id, rem) = >::from_bytes(rem)?; + Ok(( + Transfer { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + }, + rem, + )) + } +} + +impl ToBytes for Transfer { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.deploy_hash.write_bytes(&mut result)?; + self.from.write_bytes(&mut result)?; + self.to.write_bytes(&mut result)?; + self.source.write_bytes(&mut result)?; + self.target.write_bytes(&mut result)?; + self.amount.write_bytes(&mut result)?; + self.gas.write_bytes(&mut result)?; + self.id.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.deploy_hash.serialized_length() + + self.from.serialized_length() + + self.to.serialized_length() + + self.source.serialized_length() + + self.target.serialized_length() + + self.amount.serialized_length() + + self.gas.serialized_length() + + self.id.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.from.write_bytes(writer)?; + self.to.write_bytes(writer)?; + self.source.write_bytes(writer)?; + self.target.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.gas.write_bytes(writer)?; + self.id.write_bytes(writer)?; + Ok(()) + } +} + +/// Error returned when decoding a `TransferAddr` from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The address is not valid hex. + Hex(base16::DecodeError), + /// The slice is the wrong length. + Length(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Length(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "prefix is not 'transfer-'"), + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Length(error) => write!(f, "address portion is wrong length: {}", error), + } + } +} + +/// A newtype wrapping a [u8; [TRANSFER_ADDR_LENGTH]] which is the raw bytes of the +/// transfer address. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct TransferAddr([u8; TRANSFER_ADDR_LENGTH]); + +impl TransferAddr { + /// Constructs a new `TransferAddr` instance from the raw bytes. + pub const fn new(value: [u8; TRANSFER_ADDR_LENGTH]) -> TransferAddr { + TransferAddr(value) + } + + /// Returns the raw bytes of the transfer address as an array. + pub fn value(&self) -> [u8; TRANSFER_ADDR_LENGTH] { + self.0 + } + + /// Returns the raw bytes of the transfer address as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `TransferAddr` as a prefixed, hex-encoded string. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + TRANSFER_ADDR_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a `TransferAddr`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(TRANSFER_ADDR_FORMATTED_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = + <[u8; TRANSFER_ADDR_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(TransferAddr(bytes)) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for TransferAddr { + fn schema_name() -> String { + String::from("TransferAddr") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some("Hex-encoded transfer address.".to_string()); + schema_object.into() + } +} + +impl Serialize for TransferAddr { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for TransferAddr { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + TransferAddr::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; TRANSFER_ADDR_LENGTH]>::deserialize(deserializer)?; + Ok(TransferAddr(bytes)) + } + } +} + +impl Display for TransferAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for TransferAddr { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "TransferAddr({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for TransferAddr { + fn cl_type() -> CLType { + CLType::ByteArray(TRANSFER_ADDR_LENGTH as u32) + } +} + +impl ToBytes for TransferAddr { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for TransferAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, remainder) = FromBytes::from_bytes(bytes)?; + Ok((TransferAddr::new(bytes), remainder)) + } +} + +impl AsRef<[u8]> for TransferAddr { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> TransferAddr { + TransferAddr::new(rng.gen()) + } +} + +/// Generators for [`Transfer`] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::{prop::option, Arbitrary, Strategy}; + + use crate::{ + deploy_info::gens::{account_hash_arb, deploy_hash_arb}, + gens::{u512_arb, uref_arb}, + Transfer, + }; + + /// Creates an arbitrary [`Transfer`] + pub fn transfer_arb() -> impl Strategy { + ( + deploy_hash_arb(), + account_hash_arb(), + option::of(account_hash_arb()), + uref_arb(), + uref_arb(), + u512_arb(), + u512_arb(), + option::of(::arbitrary()), + ) + .prop_map(|(deploy_hash, from, to, source, target, amount, gas, id)| { + Transfer { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + } + }) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + proptest! { + #[test] + fn test_serialization_roundtrip(transfer in gens::transfer_arb()) { + bytesrepr::test_serialization_roundtrip(&transfer) + } + } + + #[test] + fn transfer_addr_from_str() { + let transfer_address = TransferAddr([4; 32]); + let encoded = transfer_address.to_formatted_string(); + let decoded = TransferAddr::from_formatted_str(&encoded).unwrap(); + assert_eq!(transfer_address, decoded); + + let invalid_prefix = + "transfe-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(invalid_prefix).is_err()); + + let invalid_prefix = + "transfer0000000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = "transfer-00000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(short_addr).is_err()); + + let long_addr = + "transfer-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(TransferAddr::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "transfer-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(TransferAddr::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn transfer_addr_serde_roundtrip() { + let transfer_address = TransferAddr([255; 32]); + let serialized = bincode::serialize(&transfer_address).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transfer_address, decoded); + } + + #[test] + fn transfer_addr_json_roundtrip() { + let transfer_address = TransferAddr([255; 32]); + let json_string = serde_json::to_string_pretty(&transfer_address).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transfer_address, decoded); + } +} diff --git a/casper_types_ver_2_0/src/transfer_result.rs b/casper_types_ver_2_0/src/transfer_result.rs new file mode 100644 index 00000000..ba9ce66b --- /dev/null +++ b/casper_types_ver_2_0/src/transfer_result.rs @@ -0,0 +1,39 @@ +use core::fmt::Debug; + +use crate::ApiError; + +/// The result of an attempt to transfer between purses. +pub type TransferResult = Result; + +/// The result of a successful transfer between purses. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(i32)] +pub enum TransferredTo { + /// The destination account already existed. + ExistingAccount = 0, + /// The destination account was created. + NewAccount = 1, +} + +impl TransferredTo { + /// Converts an `i32` to a [`TransferResult`], where: + /// * `0` represents `Ok(TransferredTo::ExistingAccount)`, + /// * `1` represents `Ok(TransferredTo::NewAccount)`, + /// * all other inputs are mapped to `Err(ApiError::Transfer)`. + pub fn result_from(value: i32) -> TransferResult { + match value { + x if x == TransferredTo::ExistingAccount as i32 => Ok(TransferredTo::ExistingAccount), + x if x == TransferredTo::NewAccount as i32 => Ok(TransferredTo::NewAccount), + _ => Err(ApiError::Transfer), + } + } + + // This conversion is not intended to be used by third party crates. + #[doc(hidden)] + pub fn i32_from(result: TransferResult) -> i32 { + match result { + Ok(transferred_to) => transferred_to as i32, + Err(_) => 2, + } + } +} diff --git a/casper_types_ver_2_0/src/uint.rs b/casper_types_ver_2_0/src/uint.rs new file mode 100644 index 00000000..bdb30a45 --- /dev/null +++ b/casper_types_ver_2_0/src/uint.rs @@ -0,0 +1,1001 @@ +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; +use core::{ + fmt::{self, Formatter}, + iter::Sum, + ops::Add, +}; + +use num_integer::Integer; +use num_traits::{ + AsPrimitive, Bounded, CheckedAdd, CheckedMul, CheckedSub, Num, One, Unsigned, WrappingAdd, + WrappingSub, Zero, +}; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{ + de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor}, + ser::{Serialize, SerializeStruct, Serializer}, +}; + +use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +#[allow( + clippy::assign_op_pattern, + clippy::ptr_offset_with_cast, + clippy::manual_range_contains, + clippy::range_plus_one, + clippy::transmute_ptr_to_ptr, + clippy::reversed_empty_ranges +)] +mod macro_code { + #[cfg(feature = "datasize")] + use datasize::DataSize; + use uint::construct_uint; + + construct_uint! { + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct U512(8); + } + construct_uint! { + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct U256(4); + } + construct_uint! { + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct U128(2); + } +} + +pub use self::macro_code::{U128, U256, U512}; + +/// Error type for parsing [`U128`], [`U256`], [`U512`] from a string. +#[derive(Debug)] +#[non_exhaustive] +pub enum UIntParseError { + /// Contains the parsing error from the `uint` crate, which only supports base-10 parsing. + FromDecStr(uint::FromDecStrErr), + /// Parsing was attempted on a string representing the number in some base other than 10. + /// + /// Note: a general radix may be supported in the future. + InvalidRadix, +} + +macro_rules! impl_traits_for_uint { + ($type:ident, $total_bytes:expr, $test_mod:ident) => { + impl Serialize for $type { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + return self.to_string().serialize(serializer); + } + + let mut buffer = [0u8; $total_bytes]; + self.to_little_endian(&mut buffer); + let non_zero_bytes: Vec = buffer + .iter() + .rev() + .skip_while(|b| **b == 0) + .cloned() + .collect(); + let num_bytes = non_zero_bytes.len(); + + let mut state = serializer.serialize_struct("bigint", num_bytes + 1)?; + state.serialize_field("", &(num_bytes as u8))?; + + for byte in non_zero_bytes.into_iter().rev() { + state.serialize_field("", &byte)?; + } + state.end() + } + } + + impl<'de> Deserialize<'de> for $type { + fn deserialize>(deserializer: D) -> Result { + struct BigNumVisitor; + + impl<'de> Visitor<'de> for BigNumVisitor { + type Value = $type; + + fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + formatter.write_str("bignum struct") + } + + fn visit_seq>( + self, + mut sequence: V, + ) -> Result<$type, V::Error> { + let length: u8 = sequence + .next_element()? + .ok_or_else(|| de::Error::invalid_length(0, &self))?; + let mut buffer = [0u8; $total_bytes]; + for index in 0..length as usize { + let value = sequence + .next_element()? + .ok_or_else(|| de::Error::invalid_length(index + 1, &self))?; + buffer[index as usize] = value; + } + let result = $type::from_little_endian(&buffer); + Ok(result) + } + + fn visit_map>(self, mut map: V) -> Result<$type, V::Error> { + let _length_key: u8 = map + .next_key()? + .ok_or_else(|| de::Error::missing_field("length"))?; + let length: u8 = map + .next_value() + .map_err(|_| de::Error::invalid_length(0, &self))?; + let mut buffer = [0u8; $total_bytes]; + for index in 0..length { + let _byte_key: u8 = map + .next_key()? + .ok_or_else(|| de::Error::missing_field("byte"))?; + let value = map.next_value().map_err(|_| { + de::Error::invalid_length(index as usize + 1, &self) + })?; + buffer[index as usize] = value; + } + let result = $type::from_little_endian(&buffer); + Ok(result) + } + } + + const FIELDS: &'static [&'static str] = &[ + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", + "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", + "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", + "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", + "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", + ]; + + if deserializer.is_human_readable() { + let decimal_string = String::deserialize(deserializer)?; + return Self::from_dec_str(&decimal_string) + .map_err(|error| de::Error::custom(format!("{:?}", error))); + } + + deserializer.deserialize_struct("bigint", FIELDS, BigNumVisitor) + } + } + + impl ToBytes for $type { + fn to_bytes(&self) -> Result, Error> { + let mut buf = [0u8; $total_bytes]; + self.to_little_endian(&mut buf); + let mut non_zero_bytes: Vec = + buf.iter().rev().skip_while(|b| **b == 0).cloned().collect(); + let num_bytes = non_zero_bytes.len() as u8; + non_zero_bytes.push(num_bytes); + non_zero_bytes.reverse(); + Ok(non_zero_bytes) + } + + fn serialized_length(&self) -> usize { + let mut buf = [0u8; $total_bytes]; + self.to_little_endian(&mut buf); + let non_zero_bytes = buf.iter().rev().skip_while(|b| **b == 0).count(); + U8_SERIALIZED_LENGTH + non_zero_bytes + } + } + + impl FromBytes for $type { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (num_bytes, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + + if num_bytes > $total_bytes { + Err(Error::Formatting) + } else { + let (value, rem) = bytesrepr::safe_split_at(rem, num_bytes as usize)?; + let result = $type::from_little_endian(value); + Ok((result, rem)) + } + } + } + + // Trait implementations for unifying U* as numeric types + impl Zero for $type { + fn zero() -> Self { + $type::zero() + } + + fn is_zero(&self) -> bool { + self.is_zero() + } + } + + impl One for $type { + fn one() -> Self { + $type::one() + } + } + + // Requires Zero and One to be implemented + impl Num for $type { + type FromStrRadixErr = UIntParseError; + fn from_str_radix(str: &str, radix: u32) -> Result { + if radix == 10 { + $type::from_dec_str(str).map_err(UIntParseError::FromDecStr) + } else { + // TODO: other radix parsing + Err(UIntParseError::InvalidRadix) + } + } + } + + // Requires Num to be implemented + impl Unsigned for $type {} + + // Additional numeric trait, which also holds for these types + impl Bounded for $type { + fn min_value() -> Self { + $type::zero() + } + + fn max_value() -> Self { + $type::MAX + } + } + + // Instead of implementing arbitrary methods we can use existing traits from num_trait + // crate. + impl WrappingAdd for $type { + fn wrapping_add(&self, other: &$type) -> $type { + self.overflowing_add(*other).0 + } + } + + impl WrappingSub for $type { + fn wrapping_sub(&self, other: &$type) -> $type { + self.overflowing_sub(*other).0 + } + } + + impl CheckedMul for $type { + fn checked_mul(&self, v: &$type) -> Option<$type> { + $type::checked_mul(*self, *v) + } + } + + impl CheckedSub for $type { + fn checked_sub(&self, v: &$type) -> Option<$type> { + $type::checked_sub(*self, *v) + } + } + + impl CheckedAdd for $type { + fn checked_add(&self, v: &$type) -> Option<$type> { + $type::checked_add(*self, *v) + } + } + + impl Integer for $type { + /// Unsigned integer division. Returns the same result as `div` (`/`). + #[inline] + fn div_floor(&self, other: &Self) -> Self { + *self / *other + } + + /// Unsigned integer modulo operation. Returns the same result as `rem` (`%`). + #[inline] + fn mod_floor(&self, other: &Self) -> Self { + *self % *other + } + + /// Calculates the Greatest Common Divisor (GCD) of the number and `other` + #[inline] + fn gcd(&self, other: &Self) -> Self { + let zero = Self::zero(); + // Use Stein's algorithm + let mut m = *self; + let mut n = *other; + if m == zero || n == zero { + return m | n; + } + + // find common factors of 2 + let shift = (m | n).trailing_zeros(); + + // divide n and m by 2 until odd + m >>= m.trailing_zeros(); + n >>= n.trailing_zeros(); + + while m != n { + if m > n { + m -= n; + m >>= m.trailing_zeros(); + } else { + n -= m; + n >>= n.trailing_zeros(); + } + } + m << shift + } + + /// Calculates the Lowest Common Multiple (LCM) of the number and `other`. + #[inline] + fn lcm(&self, other: &Self) -> Self { + self.gcd_lcm(other).1 + } + + /// Calculates the Greatest Common Divisor (GCD) and + /// Lowest Common Multiple (LCM) of the number and `other`. + #[inline] + fn gcd_lcm(&self, other: &Self) -> (Self, Self) { + if self.is_zero() && other.is_zero() { + return (Self::zero(), Self::zero()); + } + let gcd = self.gcd(other); + let lcm = *self * (*other / gcd); + (gcd, lcm) + } + + /// Deprecated, use `is_multiple_of` instead. + #[inline] + fn divides(&self, other: &Self) -> bool { + self.is_multiple_of(other) + } + + /// Returns `true` if the number is a multiple of `other`. + #[inline] + fn is_multiple_of(&self, other: &Self) -> bool { + *self % *other == $type::zero() + } + + /// Returns `true` if the number is divisible by `2`. + #[inline] + fn is_even(&self) -> bool { + (self.0[0]) & 1 == 0 + } + + /// Returns `true` if the number is not divisible by `2`. + #[inline] + fn is_odd(&self) -> bool { + !self.is_even() + } + + /// Simultaneous truncated integer division and modulus. + #[inline] + fn div_rem(&self, other: &Self) -> (Self, Self) { + (*self / *other, *self % *other) + } + } + + impl AsPrimitive<$type> for i32 { + fn as_(self) -> $type { + if self >= 0 { + $type::from(self as u32) + } else { + let abs = 0u32.wrapping_sub(self as u32); + $type::zero().wrapping_sub(&$type::from(abs)) + } + } + } + + impl AsPrimitive<$type> for i64 { + fn as_(self) -> $type { + if self >= 0 { + $type::from(self as u64) + } else { + let abs = 0u64.wrapping_sub(self as u64); + $type::zero().wrapping_sub(&$type::from(abs)) + } + } + } + + impl AsPrimitive<$type> for u8 { + fn as_(self) -> $type { + $type::from(self) + } + } + + impl AsPrimitive<$type> for u32 { + fn as_(self) -> $type { + $type::from(self) + } + } + + impl AsPrimitive<$type> for u64 { + fn as_(self) -> $type { + $type::from(self) + } + } + + impl AsPrimitive for $type { + fn as_(self) -> i32 { + self.0[0] as i32 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> i64 { + self.0[0] as i64 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> u8 { + self.0[0] as u8 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> u32 { + self.0[0] as u32 + } + } + + impl AsPrimitive for $type { + fn as_(self) -> u64 { + self.0[0] + } + } + + impl Sum for $type { + fn sum>(iter: I) -> Self { + iter.fold($type::zero(), Add::add) + } + } + + impl Distribution<$type> for Standard { + fn sample(&self, rng: &mut R) -> $type { + let mut raw_bytes = [0u8; $total_bytes]; + rng.fill_bytes(raw_bytes.as_mut()); + $type::from(raw_bytes) + } + } + + #[cfg(feature = "json-schema")] + impl schemars::JsonSchema for $type { + fn schema_name() -> String { + format!("U{}", $total_bytes * 8) + } + + fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some(format!( + "Decimal representation of a {}-bit integer.", + $total_bytes * 8 + )); + schema_object.into() + } + } + + #[cfg(test)] + mod $test_mod { + use super::*; + + #[test] + fn test_div_mod_floor() { + assert_eq!($type::from(10).div_floor(&$type::from(3)), $type::from(3)); + assert_eq!($type::from(10).mod_floor(&$type::from(3)), $type::from(1)); + assert_eq!( + $type::from(10).div_mod_floor(&$type::from(3)), + ($type::from(3), $type::from(1)) + ); + assert_eq!($type::from(5).div_floor(&$type::from(5)), $type::from(1)); + assert_eq!($type::from(5).mod_floor(&$type::from(5)), $type::from(0)); + assert_eq!( + $type::from(5).div_mod_floor(&$type::from(5)), + ($type::from(1), $type::from(0)) + ); + assert_eq!($type::from(3).div_floor(&$type::from(7)), $type::from(0)); + assert_eq!($type::from(3).mod_floor(&$type::from(7)), $type::from(3)); + assert_eq!( + $type::from(3).div_mod_floor(&$type::from(7)), + ($type::from(0), $type::from(3)) + ); + } + + #[test] + fn test_gcd() { + assert_eq!($type::from(10).gcd(&$type::from(2)), $type::from(2)); + assert_eq!($type::from(10).gcd(&$type::from(3)), $type::from(1)); + assert_eq!($type::from(0).gcd(&$type::from(3)), $type::from(3)); + assert_eq!($type::from(3).gcd(&$type::from(3)), $type::from(3)); + assert_eq!($type::from(56).gcd(&$type::from(42)), $type::from(14)); + assert_eq!( + $type::MAX.gcd(&($type::MAX / $type::from(2))), + $type::from(1) + ); + assert_eq!($type::from(15).gcd(&$type::from(17)), $type::from(1)); + } + + #[test] + fn test_lcm() { + assert_eq!($type::from(1).lcm(&$type::from(0)), $type::from(0)); + assert_eq!($type::from(0).lcm(&$type::from(1)), $type::from(0)); + assert_eq!($type::from(1).lcm(&$type::from(1)), $type::from(1)); + assert_eq!($type::from(8).lcm(&$type::from(9)), $type::from(72)); + assert_eq!($type::from(11).lcm(&$type::from(5)), $type::from(55)); + assert_eq!($type::from(15).lcm(&$type::from(17)), $type::from(255)); + assert_eq!($type::from(4).lcm(&$type::from(8)), $type::from(8)); + } + + #[test] + fn test_is_multiple_of() { + assert!($type::from(6).is_multiple_of(&$type::from(6))); + assert!($type::from(6).is_multiple_of(&$type::from(3))); + assert!($type::from(6).is_multiple_of(&$type::from(1))); + assert!(!$type::from(3).is_multiple_of(&$type::from(5))) + } + + #[test] + fn is_even() { + assert_eq!($type::from(0).is_even(), true); + assert_eq!($type::from(1).is_even(), false); + assert_eq!($type::from(2).is_even(), true); + assert_eq!($type::from(3).is_even(), false); + assert_eq!($type::from(4).is_even(), true); + } + + #[test] + fn is_odd() { + assert_eq!($type::from(0).is_odd(), false); + assert_eq!($type::from(1).is_odd(), true); + assert_eq!($type::from(2).is_odd(), false); + assert_eq!($type::from(3).is_odd(), true); + assert_eq!($type::from(4).is_odd(), false); + } + + #[test] + #[should_panic] + fn overflow_mul_test() { + let _ = $type::MAX * $type::from(2); + } + + #[test] + #[should_panic] + fn overflow_add_test() { + let _ = $type::MAX + $type::from(1); + } + + #[test] + #[should_panic] + fn underflow_sub_test() { + let _ = $type::zero() - $type::from(1); + } + } + }; +} + +impl_traits_for_uint!(U128, 16, u128_test); +impl_traits_for_uint!(U256, 32, u256_test); +impl_traits_for_uint!(U512, 64, u512_test); + +impl AsPrimitive for U128 { + fn as_(self) -> U128 { + self + } +} + +impl AsPrimitive for U128 { + fn as_(self) -> U256 { + let mut result = U256::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U128 { + fn as_(self) -> U512 { + let mut result = U512::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U256 { + fn as_(self) -> U128 { + let mut result = U128::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U256 { + fn as_(self) -> U256 { + self + } +} + +impl AsPrimitive for U256 { + fn as_(self) -> U512 { + let mut result = U512::zero(); + result.0[..4].clone_from_slice(&self.0[..4]); + result + } +} + +impl AsPrimitive for U512 { + fn as_(self) -> U128 { + let mut result = U128::zero(); + result.0[..2].clone_from_slice(&self.0[..2]); + result + } +} + +impl AsPrimitive for U512 { + fn as_(self) -> U256 { + let mut result = U256::zero(); + result.0[..4].clone_from_slice(&self.0[..4]); + result + } +} + +impl AsPrimitive for U512 { + fn as_(self) -> U512 { + self + } +} + +#[cfg(test)] +mod tests { + use std::fmt::Debug; + + use serde::de::DeserializeOwned; + + use super::*; + + fn check_as_i32>(expected: i32, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_i64>(expected: i64, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u8>(expected: u8, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u32>(expected: u32, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u64>(expected: u64, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u128>(expected: U128, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u256>(expected: U256, input: T) { + assert_eq!(expected, input.as_()); + } + + fn check_as_u512>(expected: U512, input: T) { + assert_eq!(expected, input.as_()); + } + + #[test] + fn as_primitive_from_i32() { + let mut input = 0_i32; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = i32::max_value() - 1; + check_as_i32(input, input); + check_as_i64(i64::from(input), input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + + input = i32::min_value() + 1; + check_as_i32(input, input); + check_as_i64(i64::from(input), input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + // i32::min_value() is -1 - i32::max_value() + check_as_u128( + U128::zero().wrapping_sub(&U128::from(i32::max_value())), + input, + ); + check_as_u256( + U256::zero().wrapping_sub(&U256::from(i32::max_value())), + input, + ); + check_as_u512( + U512::zero().wrapping_sub(&U512::from(i32::max_value())), + input, + ); + } + + #[test] + fn as_primitive_from_i64() { + let mut input = 0_i64; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = i64::max_value() - 1; + check_as_i32(input as i32, input); + check_as_i64(input, input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + + input = i64::min_value() + 1; + check_as_i32(input as i32, input); + check_as_i64(input, input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input as u64, input); + // i64::min_value() is (-1 - i64::max_value()) + check_as_u128( + U128::zero().wrapping_sub(&U128::from(i64::max_value())), + input, + ); + check_as_u256( + U256::zero().wrapping_sub(&U256::from(i64::max_value())), + input, + ); + check_as_u512( + U512::zero().wrapping_sub(&U512::from(i64::max_value())), + input, + ); + } + + #[test] + fn as_primitive_from_u8() { + let mut input = 0_u8; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = u8::max_value() - 1; + check_as_i32(i32::from(input), input); + check_as_i64(i64::from(input), input); + check_as_u8(input, input); + check_as_u32(u32::from(input), input); + check_as_u64(u64::from(input), input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + } + + #[test] + fn as_primitive_from_u32() { + let mut input = 0_u32; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = u32::max_value() - 1; + check_as_i32(input as i32, input); + check_as_i64(i64::from(input), input); + check_as_u8(input as u8, input); + check_as_u32(input, input); + check_as_u64(u64::from(input), input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + } + + #[test] + fn as_primitive_from_u64() { + let mut input = 0_u64; + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = u64::max_value() - 1; + check_as_i32(input as i32, input); + check_as_i64(input as i64, input); + check_as_u8(input as u8, input); + check_as_u32(input as u32, input); + check_as_u64(input, input); + check_as_u128(U128::from(input), input); + check_as_u256(U256::from(input), input); + check_as_u512(U512::from(input), input); + } + + fn make_little_endian_arrays(little_endian_bytes: &[u8]) -> ([u8; 4], [u8; 8]) { + let le_32 = { + let mut le_32 = [0; 4]; + le_32.copy_from_slice(&little_endian_bytes[..4]); + le_32 + }; + + let le_64 = { + let mut le_64 = [0; 8]; + le_64.copy_from_slice(&little_endian_bytes[..8]); + le_64 + }; + + (le_32, le_64) + } + + #[test] + fn as_primitive_from_u128() { + let mut input = U128::zero(); + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = U128::max_value() - 1; + + let mut little_endian_bytes = [0_u8; 64]; + input.to_little_endian(&mut little_endian_bytes[..16]); + let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes); + + check_as_i32(i32::from_le_bytes(le_32), input); + check_as_i64(i64::from_le_bytes(le_64), input); + check_as_u8(little_endian_bytes[0], input); + check_as_u32(u32::from_le_bytes(le_32), input); + check_as_u64(u64::from_le_bytes(le_64), input); + check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input); + check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input); + check_as_u512(U512::from_little_endian(&little_endian_bytes), input); + } + + #[test] + fn as_primitive_from_u256() { + let mut input = U256::zero(); + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = U256::max_value() - 1; + + let mut little_endian_bytes = [0_u8; 64]; + input.to_little_endian(&mut little_endian_bytes[..32]); + let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes); + + check_as_i32(i32::from_le_bytes(le_32), input); + check_as_i64(i64::from_le_bytes(le_64), input); + check_as_u8(little_endian_bytes[0], input); + check_as_u32(u32::from_le_bytes(le_32), input); + check_as_u64(u64::from_le_bytes(le_64), input); + check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input); + check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input); + check_as_u512(U512::from_little_endian(&little_endian_bytes), input); + } + + #[test] + fn as_primitive_from_u512() { + let mut input = U512::zero(); + check_as_i32(0, input); + check_as_i64(0, input); + check_as_u8(0, input); + check_as_u32(0, input); + check_as_u64(0, input); + check_as_u128(U128::zero(), input); + check_as_u256(U256::zero(), input); + check_as_u512(U512::zero(), input); + + input = U512::max_value() - 1; + + let mut little_endian_bytes = [0_u8; 64]; + input.to_little_endian(&mut little_endian_bytes); + let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes); + + check_as_i32(i32::from_le_bytes(le_32), input); + check_as_i64(i64::from_le_bytes(le_64), input); + check_as_u8(little_endian_bytes[0], input); + check_as_u32(u32::from_le_bytes(le_32), input); + check_as_u64(u64::from_le_bytes(le_64), input); + check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input); + check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input); + check_as_u512(U512::from_little_endian(&little_endian_bytes), input); + } + + #[test] + fn wrapping_test_u512() { + let max = U512::max_value(); + let value = max.wrapping_add(&1.into()); + assert_eq!(value, 0.into()); + + let min = U512::min_value(); + let value = min.wrapping_sub(&1.into()); + assert_eq!(value, U512::max_value()); + } + + #[test] + fn wrapping_test_u256() { + let max = U256::max_value(); + let value = max.wrapping_add(&1.into()); + assert_eq!(value, 0.into()); + + let min = U256::min_value(); + let value = min.wrapping_sub(&1.into()); + assert_eq!(value, U256::max_value()); + } + + #[test] + fn wrapping_test_u128() { + let max = U128::max_value(); + let value = max.wrapping_add(&1.into()); + assert_eq!(value, 0.into()); + + let min = U128::min_value(); + let value = min.wrapping_sub(&1.into()); + assert_eq!(value, U128::max_value()); + } + + fn serde_roundtrip(value: T) { + { + let serialized = bincode::serialize(&value).unwrap(); + let deserialized = bincode::deserialize(serialized.as_slice()).unwrap(); + assert_eq!(value, deserialized); + } + { + let serialized = serde_json::to_string_pretty(&value).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); + assert_eq!(value, deserialized); + } + } + + #[test] + fn serde_roundtrip_u512() { + serde_roundtrip(U512::min_value()); + serde_roundtrip(U512::from(1)); + serde_roundtrip(U512::from(u64::max_value())); + serde_roundtrip(U512::max_value()); + } + + #[test] + fn serde_roundtrip_u256() { + serde_roundtrip(U256::min_value()); + serde_roundtrip(U256::from(1)); + serde_roundtrip(U256::from(u64::max_value())); + serde_roundtrip(U256::max_value()); + } + + #[test] + fn serde_roundtrip_u128() { + serde_roundtrip(U128::min_value()); + serde_roundtrip(U128::from(1)); + serde_roundtrip(U128::from(u64::max_value())); + serde_roundtrip(U128::max_value()); + } +} diff --git a/casper_types_ver_2_0/src/uref.rs b/casper_types_ver_2_0/src/uref.rs new file mode 100644 index 00000000..c24b2e85 --- /dev/null +++ b/casper_types_ver_2_0/src/uref.rs @@ -0,0 +1,424 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + bytesrepr, + bytesrepr::{Error, FromBytes}, + checksummed_hex, AccessRights, ApiError, Key, ACCESS_RIGHTS_SERIALIZED_LENGTH, +}; + +/// The number of bytes in a [`URef`] address. +pub const UREF_ADDR_LENGTH: usize = 32; + +/// The number of bytes in a serialized [`URef`] where the [`AccessRights`] are not `None`. +pub const UREF_SERIALIZED_LENGTH: usize = UREF_ADDR_LENGTH + ACCESS_RIGHTS_SERIALIZED_LENGTH; + +pub(super) const UREF_FORMATTED_STRING_PREFIX: &str = "uref-"; + +/// The address of a `URef` (unforgeable reference) on the network. +pub type URefAddr = [u8; UREF_ADDR_LENGTH]; + +/// Error while parsing a URef from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Prefix is not "uref-". + InvalidPrefix, + /// No access rights as suffix. + MissingSuffix, + /// Access rights are invalid. + InvalidAccessRights, + /// Failed to decode address portion of URef. + Hex(base16::DecodeError), + /// Failed to parse an int. + Int(ParseIntError), + /// The address portion is the wrong length. + Address(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: ParseIntError) -> Self { + FromStrError::Int(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Address(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "prefix is not 'uref-'"), + FromStrError::MissingSuffix => write!(f, "no access rights as suffix"), + FromStrError::InvalidAccessRights => write!(f, "invalid access rights"), + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Int(error) => write!(f, "failed to parse an int: {}", error), + FromStrError::Address(error) => { + write!(f, "address portion is the wrong length: {}", error) + } + } + } +} + +/// Represents an unforgeable reference, containing an address in the network's global storage and +/// the [`AccessRights`] of the reference. +/// +/// A `URef` can be used to index entities such as [`CLValue`](crate::CLValue)s, or smart contracts. +#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct URef(URefAddr, AccessRights); + +impl URef { + /// Constructs a [`URef`] from an address and access rights. + pub const fn new(address: URefAddr, access_rights: AccessRights) -> Self { + URef(address, access_rights) + } + + /// Returns the address of this [`URef`]. + pub fn addr(&self) -> URefAddr { + self.0 + } + + /// Returns the access rights of this [`URef`]. + pub fn access_rights(&self) -> AccessRights { + self.1 + } + + /// Returns a new [`URef`] with the same address and updated access rights. + #[must_use] + pub fn with_access_rights(self, access_rights: AccessRights) -> Self { + URef(self.0, access_rights) + } + + /// Removes the access rights from this [`URef`]. + #[must_use] + pub fn remove_access_rights(self) -> Self { + URef(self.0, AccessRights::NONE) + } + + /// Returns `true` if the access rights are `Some` and + /// [`is_readable`](AccessRights::is_readable) is `true` for them. + #[must_use] + pub fn is_readable(self) -> bool { + self.1.is_readable() + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::READ`] permission. + #[must_use] + pub fn into_read(self) -> URef { + URef(self.0, AccessRights::READ) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::WRITE`] permission. + #[must_use] + pub fn into_write(self) -> URef { + URef(self.0, AccessRights::WRITE) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::ADD`] permission. + #[must_use] + pub fn into_add(self) -> URef { + URef(self.0, AccessRights::ADD) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::READ_ADD_WRITE`] + /// permission. + #[must_use] + pub fn into_read_add_write(self) -> URef { + URef(self.0, AccessRights::READ_ADD_WRITE) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::READ_WRITE`] + /// permission. + #[must_use] + pub fn into_read_write(self) -> URef { + URef(self.0, AccessRights::READ_WRITE) + } + + /// Returns `true` if the access rights are `Some` and + /// [`is_writeable`](AccessRights::is_writeable) is `true` for them. + pub fn is_writeable(self) -> bool { + self.1.is_writeable() + } + + /// Returns `true` if the access rights are `Some` and [`is_addable`](AccessRights::is_addable) + /// is `true` for them. + pub fn is_addable(self) -> bool { + self.1.is_addable() + } + + /// Formats the address and access rights of the [`URef`] in a unique way that could be used as + /// a name when storing the given `URef` in a global state. + pub fn to_formatted_string(self) -> String { + // Extract bits as numerical value, with no flags marked as 0. + let access_rights_bits = self.access_rights().bits(); + // Access rights is represented as octal, which means that max value of u8 can + // be represented as maximum of 3 octal digits. + format!( + "{}{}-{:03o}", + UREF_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.addr()), + access_rights_bits + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a `URef`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(UREF_FORMATTED_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let parts = remainder.splitn(2, '-').collect::>(); + if parts.len() != 2 { + return Err(FromStrError::MissingSuffix); + } + let addr = URefAddr::try_from(checksummed_hex::decode(parts[0])?.as_ref())?; + let access_rights_value = u8::from_str_radix(parts[1], 8)?; + let access_rights = AccessRights::from_bits(access_rights_value) + .ok_or(FromStrError::InvalidAccessRights)?; + Ok(URef(addr, access_rights)) + } + + /// Removes specific access rights from this URef if present. + pub fn disable_access_rights(&mut self, access_rights: AccessRights) { + self.1.remove(access_rights) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for URef { + fn schema_name() -> String { + String::from("URef") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some(String::from("Hex-encoded, formatted URef.")); + schema_object.into() + } +} + +impl Display for URef { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let addr = self.addr(); + let access_rights = self.access_rights(); + write!( + f, + "URef({}, {})", + base16::encode_lower(&addr), + access_rights + ) + } +} + +impl Debug for URef { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +impl bytesrepr::ToBytes for URef { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + UREF_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), self::Error> { + writer.extend_from_slice(&self.0); + self.1.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for URef { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (id, rem) = FromBytes::from_bytes(bytes)?; + let (access_rights, rem) = FromBytes::from_bytes(rem)?; + Ok((URef(id, access_rights), rem)) + } +} + +impl Serialize for URef { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + (self.0, self.1).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for URef { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + URef::from_formatted_str(&formatted_string).map_err(D::Error::custom) + } else { + let (address, access_rights) = <(URefAddr, AccessRights)>::deserialize(deserializer)?; + Ok(URef(address, access_rights)) + } + } +} + +impl TryFrom for URef { + type Error = ApiError; + + fn try_from(key: Key) -> Result { + if let Key::URef(uref) = key { + Ok(uref) + } else { + Err(ApiError::UnexpectedKeyVariant) + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> URef { + URef::new(rng.gen(), rng.gen()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn uref_as_string() { + // Since we are putting URefs to named_keys map keyed by the label that + // `as_string()` returns, any changes to the string representation of + // that type cannot break the format. + let addr_array = [0u8; 32]; + let uref_a = URef::new(addr_array, AccessRights::READ); + assert_eq!( + uref_a.to_formatted_string(), + "uref-0000000000000000000000000000000000000000000000000000000000000000-001" + ); + let uref_b = URef::new(addr_array, AccessRights::WRITE); + assert_eq!( + uref_b.to_formatted_string(), + "uref-0000000000000000000000000000000000000000000000000000000000000000-002" + ); + + let uref_c = uref_b.remove_access_rights(); + assert_eq!( + uref_c.to_formatted_string(), + "uref-0000000000000000000000000000000000000000000000000000000000000000-000" + ); + } + + fn round_trip(uref: URef) { + let string = uref.to_formatted_string(); + let parsed_uref = URef::from_formatted_str(&string).unwrap(); + assert_eq!(uref, parsed_uref); + } + + #[test] + fn uref_from_str() { + round_trip(URef::new([0; 32], AccessRights::NONE)); + round_trip(URef::new([255; 32], AccessRights::READ_ADD_WRITE)); + + let invalid_prefix = + "ref-0000000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(invalid_prefix).is_err()); + + let invalid_prefix = + "uref0000000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = "uref-00000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(short_addr).is_err()); + + let long_addr = + "uref-000000000000000000000000000000000000000000000000000000000000000000-000"; + assert!(URef::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "uref-000000000000000000000000000000000000000000000000000000000000000g-000"; + assert!(URef::from_formatted_str(invalid_hex).is_err()); + + let invalid_suffix_separator = + "uref-0000000000000000000000000000000000000000000000000000000000000000:000"; + assert!(URef::from_formatted_str(invalid_suffix_separator).is_err()); + + let invalid_suffix = + "uref-0000000000000000000000000000000000000000000000000000000000000000-abc"; + assert!(URef::from_formatted_str(invalid_suffix).is_err()); + + let invalid_access_rights = + "uref-0000000000000000000000000000000000000000000000000000000000000000-200"; + assert!(URef::from_formatted_str(invalid_access_rights).is_err()); + } + + #[test] + fn serde_roundtrip() { + let uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE); + let serialized = bincode::serialize(&uref).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(uref, decoded); + } + + #[test] + fn json_roundtrip() { + let uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE); + let json_string = serde_json::to_string_pretty(&uref).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(uref, decoded); + } + + #[test] + fn should_disable_access_rights() { + let mut uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE); + assert!(uref.is_writeable()); + uref.disable_access_rights(AccessRights::WRITE); + assert_eq!(uref.access_rights(), AccessRights::READ_ADD); + + uref.disable_access_rights(AccessRights::WRITE); + assert!( + !uref.is_writeable(), + "Disabling access bit twice should be a noop" + ); + + assert_eq!(uref.access_rights(), AccessRights::READ_ADD); + + uref.disable_access_rights(AccessRights::READ_ADD); + assert_eq!(uref.access_rights(), AccessRights::NONE); + + uref.disable_access_rights(AccessRights::READ_ADD); + assert_eq!(uref.access_rights(), AccessRights::NONE); + + uref.disable_access_rights(AccessRights::NONE); + assert_eq!(uref.access_rights(), AccessRights::NONE); + } +} diff --git a/casper_types_ver_2_0/src/validator_change.rs b/casper_types_ver_2_0/src/validator_change.rs new file mode 100644 index 00000000..92b66f8d --- /dev/null +++ b/casper_types_ver_2_0/src/validator_change.rs @@ -0,0 +1,101 @@ +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// A change to a validator's status between two eras. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Ord, PartialOrd)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum ValidatorChange { + /// The validator got newly added to the validator set. + Added, + /// The validator was removed from the validator set. + Removed, + /// The validator was banned from this era. + Banned, + /// The validator was excluded from proposing new blocks in this era. + CannotPropose, + /// We saw the validator misbehave in this era. + SeenAsFaulty, +} + +impl ValidatorChange { + /// Returns a random `ValidatorChange`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + match rng.gen_range(0..5) { + ADDED_TAG => ValidatorChange::Added, + REMOVED_TAG => ValidatorChange::Removed, + BANNED_TAG => ValidatorChange::Banned, + CANNOT_PROPOSE_TAG => ValidatorChange::CannotPropose, + SEEN_AS_FAULTY_TAG => ValidatorChange::SeenAsFaulty, + _ => unreachable!(), + } + } +} + +const ADDED_TAG: u8 = 0; +const REMOVED_TAG: u8 = 1; +const BANNED_TAG: u8 = 2; +const CANNOT_PROPOSE_TAG: u8 = 3; +const SEEN_AS_FAULTY_TAG: u8 = 4; + +impl ToBytes for ValidatorChange { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ValidatorChange::Added => ADDED_TAG, + ValidatorChange::Removed => REMOVED_TAG, + ValidatorChange::Banned => BANNED_TAG, + ValidatorChange::CannotPropose => CANNOT_PROPOSE_TAG, + ValidatorChange::SeenAsFaulty => SEEN_AS_FAULTY_TAG, + } + .write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + bytesrepr::U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for ValidatorChange { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let id = match tag { + ADDED_TAG => ValidatorChange::Added, + REMOVED_TAG => ValidatorChange::Removed, + BANNED_TAG => ValidatorChange::Banned, + CANNOT_PROPOSE_TAG => ValidatorChange::CannotPropose, + SEEN_AS_FAULTY_TAG => ValidatorChange::SeenAsFaulty, + _ => return Err(bytesrepr::Error::NotRepresentable), + }; + Ok((id, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = ValidatorChange::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/casper_types_ver_2_0/tests/version_numbers.rs b/casper_types_ver_2_0/tests/version_numbers.rs new file mode 100644 index 00000000..5787cf50 --- /dev/null +++ b/casper_types_ver_2_0/tests/version_numbers.rs @@ -0,0 +1,5 @@ +#[cfg(feature = "version-sync")] +#[test] +fn test_html_root_url() { + version_sync::assert_html_root_url_updated!("src/lib.rs"); +} diff --git a/event_sidecar/Cargo.toml b/event_sidecar/Cargo.toml new file mode 100644 index 00000000..ee84a44c --- /dev/null +++ b/event_sidecar/Cargo.toml @@ -0,0 +1,90 @@ +[package] +name = "casper-event-sidecar" +authors = ["George Williamson ", "Jakub Zajkowski "] +version = "1.0.0" +edition = "2018" +readme = "README.md" +description = "App for storing and republishing sse events of a casper node" +license-file = "../LICENSE" +documentation = "README.md" +homepage = "https://github.com/CasperLabs/event-sidecar" +repository = "https://github.com/CasperLabs/event-sidecar" + +[features] +additional-metrics = ["casper-event-types/additional-metrics"] +testing = [] + +[dependencies] +anyhow = { workspace = true } +async-trait = "0.1.56" +bytes = "1.2.0" +casper-event-listener = { path = "../listener", version = "1.0.0" } +casper-event-types = { path = "../types", version = "1.0.0" } +casper-types = { workspace = true, features = ["std", "json-schema"] } +derive-new = "0.5.9" +eventsource-stream = "0.2.3" +futures = { workspace = true } +hex = "0.4.3" +hex_fmt = "0.3.0" +http = "0.2.1" +hyper = "0.14.4" +indexmap = "2.0.0" +itertools = "0.10.3" +jsonschema = "0.17.1" +once_cell = { workspace = true } +rand = "0.8.3" +regex = "1.6.0" +reqwest = "0.11.11" +schemars = "0.8.16" +sea-query = "0.30" +serde = { workspace = true, default-features = true, features = ["derive", "rc"] } +serde_json = "1.0" +sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "any", "sqlite", "postgres"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tokio-stream = { version = "0.1.4", features = ["sync"] } +tower = { version = "0.4.13", features = ["buffer", "limit", "make", "timeout"] } +tracing = { workspace = true, default-features = true } +tracing-subscriber = { workspace = true } +utoipa = { version = "3.4.4", features = ["rc_schema"] } +utoipa-swagger-ui = { version = "3.1.5" } +warp = { version = "0.3.6", features = ["compression"] } +wheelbuf = "0.2.0" + +[dev-dependencies] +async-stream = { workspace = true } +casper-event-types = { path = "../types", version = "1.0.0", features = ["sse-data-testing"] } +casper-types = { workspace = true, features = ["std", "testing"] } +colored = "2.0.0" +futures-util = { workspace = true } +pg-embed = { git = "https://github.com/faokunega/pg-embed", tag = "v0.8.0" } +portpicker = "0.1.1" +pretty_assertions = "1.3.0" +reqwest = { version = "0.11.3", features = ["stream"] } +tabled = { version = "0.10.0", features = ["derive", "color"] } +tempfile = "3" +tokio-util = "0.7.8" + +[package.metadata.deb] +revision = "0" +assets = [ + ["../target/release/casper-event-sidecar", "/usr/bin/casper-event-sidecar", "755"], + ["../resources/ETC_README.md", "/etc/casper-event-sidecar/README.md", "644"], + ["../resources/example_configs/default_sse_only_config.toml", "/etc/casper-event-sidecar/config.toml", "644"] +] +maintainer-scripts = "../resources/maintainer_scripts/debian" +extended-description = """ +Package for Casper Event Sidecar +""" + +[package.metadata.deb.systemd-units] +unit-scripts = "../resources/maintainer_scripts/casper_event_sidecar" +restart-after-upgrade = true + +[package.metadata.deb.variants.bionic] +name = "casper-event-sidecar" +revision = "0+bionic" + +[package.metadata.deb.variants.focal] +name = "casper-event-sidecar" +revision = "0+focal" diff --git a/sidecar/src/admin_server.rs b/event_sidecar/src/admin_server.rs similarity index 90% rename from sidecar/src/admin_server.rs rename to event_sidecar/src/admin_server.rs index 2c206c27..bd64bb49 100644 --- a/sidecar/src/admin_server.rs +++ b/event_sidecar/src/admin_server.rs @@ -1,9 +1,10 @@ -use crate::types::config::AdminServerConfig; +use crate::types::config::AdminApiServerConfig; use crate::utils::{resolve_address, root_filter, Unexpected}; use anyhow::Error; use casper_event_types::metrics::metrics_summary; use hyper::Server; use std::net::TcpListener; +use std::process::ExitCode; use std::time::Duration; use tower::{buffer::Buffer, make::Shared, ServiceBuilder}; use warp::Filter; @@ -37,7 +38,7 @@ impl AdminServer { } } -pub async fn run_server(config: AdminServerConfig) -> Result<(), Error> { +pub async fn run_server(config: AdminApiServerConfig) -> Result { AdminServer { port: config.port, max_concurrent_requests: config.max_concurrent_requests, @@ -45,6 +46,7 @@ pub async fn run_server(config: AdminServerConfig) -> Result<(), Error> { } .start() .await + .map(|_| ExitCode::SUCCESS) } /// Return metrics data at a given time. @@ -65,7 +67,7 @@ async fn metrics_handler() -> Result { #[cfg(test)] mod tests { - use crate::{admin_server::run_server, types::config::AdminServerConfig}; + use crate::{admin_server::run_server, types::config::AdminApiServerConfig}; use portpicker::pick_unused_port; use reqwest::Response; @@ -73,7 +75,7 @@ mod tests { async fn given_config_should_start_admin_server() { let port = pick_unused_port().unwrap(); let request_url = format!("http://localhost:{}/metrics", port); - let admin_config = AdminServerConfig { + let admin_config = AdminApiServerConfig { port, max_concurrent_requests: 1, max_requests_per_second: 1, diff --git a/sidecar/src/api_version_manager.rs b/event_sidecar/src/api_version_manager.rs similarity index 100% rename from sidecar/src/api_version_manager.rs rename to event_sidecar/src/api_version_manager.rs diff --git a/sidecar/src/database/database_errors.rs b/event_sidecar/src/database/database_errors.rs similarity index 100% rename from sidecar/src/database/database_errors.rs rename to event_sidecar/src/database/database_errors.rs diff --git a/sidecar/src/database/env_vars.rs b/event_sidecar/src/database/env_vars.rs similarity index 100% rename from sidecar/src/database/env_vars.rs rename to event_sidecar/src/database/env_vars.rs diff --git a/sidecar/src/database/errors.rs b/event_sidecar/src/database/errors.rs similarity index 100% rename from sidecar/src/database/errors.rs rename to event_sidecar/src/database/errors.rs diff --git a/sidecar/src/database/migration_manager.rs b/event_sidecar/src/database/migration_manager.rs similarity index 100% rename from sidecar/src/database/migration_manager.rs rename to event_sidecar/src/database/migration_manager.rs diff --git a/sidecar/src/database/migration_manager/tests.rs b/event_sidecar/src/database/migration_manager/tests.rs similarity index 100% rename from sidecar/src/database/migration_manager/tests.rs rename to event_sidecar/src/database/migration_manager/tests.rs diff --git a/sidecar/src/database/mod.rs b/event_sidecar/src/database/mod.rs similarity index 82% rename from sidecar/src/database/mod.rs rename to event_sidecar/src/database/mod.rs index bbcbd297..c7796eed 100644 --- a/sidecar/src/database/mod.rs +++ b/event_sidecar/src/database/mod.rs @@ -11,3 +11,5 @@ pub mod sqlite_database; #[cfg(test)] pub mod tests; pub mod types; + +pub use self::database_errors::DatabaseConfigError; diff --git a/sidecar/src/database/postgresql_database.rs b/event_sidecar/src/database/postgresql_database.rs similarity index 100% rename from sidecar/src/database/postgresql_database.rs rename to event_sidecar/src/database/postgresql_database.rs diff --git a/sidecar/src/database/postgresql_database/reader.rs b/event_sidecar/src/database/postgresql_database/reader.rs similarity index 100% rename from sidecar/src/database/postgresql_database/reader.rs rename to event_sidecar/src/database/postgresql_database/reader.rs diff --git a/sidecar/src/database/postgresql_database/tests.rs b/event_sidecar/src/database/postgresql_database/tests.rs similarity index 100% rename from sidecar/src/database/postgresql_database/tests.rs rename to event_sidecar/src/database/postgresql_database/tests.rs diff --git a/sidecar/src/database/postgresql_database/writer.rs b/event_sidecar/src/database/postgresql_database/writer.rs similarity index 100% rename from sidecar/src/database/postgresql_database/writer.rs rename to event_sidecar/src/database/postgresql_database/writer.rs diff --git a/sidecar/src/database/reader_generator.rs b/event_sidecar/src/database/reader_generator.rs similarity index 100% rename from sidecar/src/database/reader_generator.rs rename to event_sidecar/src/database/reader_generator.rs diff --git a/sidecar/src/database/sqlite_database.rs b/event_sidecar/src/database/sqlite_database.rs similarity index 100% rename from sidecar/src/database/sqlite_database.rs rename to event_sidecar/src/database/sqlite_database.rs diff --git a/sidecar/src/database/sqlite_database/reader.rs b/event_sidecar/src/database/sqlite_database/reader.rs similarity index 100% rename from sidecar/src/database/sqlite_database/reader.rs rename to event_sidecar/src/database/sqlite_database/reader.rs diff --git a/sidecar/src/database/sqlite_database/tests.rs b/event_sidecar/src/database/sqlite_database/tests.rs similarity index 100% rename from sidecar/src/database/sqlite_database/tests.rs rename to event_sidecar/src/database/sqlite_database/tests.rs diff --git a/sidecar/src/database/sqlite_database/writer.rs b/event_sidecar/src/database/sqlite_database/writer.rs similarity index 100% rename from sidecar/src/database/sqlite_database/writer.rs rename to event_sidecar/src/database/sqlite_database/writer.rs diff --git a/sidecar/src/database/tests.rs b/event_sidecar/src/database/tests.rs similarity index 100% rename from sidecar/src/database/tests.rs rename to event_sidecar/src/database/tests.rs diff --git a/sidecar/src/database/types.rs b/event_sidecar/src/database/types.rs similarity index 100% rename from sidecar/src/database/types.rs rename to event_sidecar/src/database/types.rs diff --git a/sidecar/src/database/writer_generator.rs b/event_sidecar/src/database/writer_generator.rs similarity index 100% rename from sidecar/src/database/writer_generator.rs rename to event_sidecar/src/database/writer_generator.rs diff --git a/sidecar/src/event_stream_server.rs b/event_sidecar/src/event_stream_server.rs similarity index 100% rename from sidecar/src/event_stream_server.rs rename to event_sidecar/src/event_stream_server.rs diff --git a/sidecar/src/event_stream_server/config.rs b/event_sidecar/src/event_stream_server/config.rs similarity index 100% rename from sidecar/src/event_stream_server/config.rs rename to event_sidecar/src/event_stream_server/config.rs diff --git a/sidecar/src/event_stream_server/endpoint.rs b/event_sidecar/src/event_stream_server/endpoint.rs similarity index 100% rename from sidecar/src/event_stream_server/endpoint.rs rename to event_sidecar/src/event_stream_server/endpoint.rs diff --git a/sidecar/src/event_stream_server/event_indexer.rs b/event_sidecar/src/event_stream_server/event_indexer.rs similarity index 100% rename from sidecar/src/event_stream_server/event_indexer.rs rename to event_sidecar/src/event_stream_server/event_indexer.rs diff --git a/sidecar/src/event_stream_server/http_server.rs b/event_sidecar/src/event_stream_server/http_server.rs similarity index 100% rename from sidecar/src/event_stream_server/http_server.rs rename to event_sidecar/src/event_stream_server/http_server.rs diff --git a/sidecar/src/event_stream_server/sse_server.rs b/event_sidecar/src/event_stream_server/sse_server.rs similarity index 100% rename from sidecar/src/event_stream_server/sse_server.rs rename to event_sidecar/src/event_stream_server/sse_server.rs diff --git a/sidecar/src/event_stream_server/tests.rs b/event_sidecar/src/event_stream_server/tests.rs similarity index 100% rename from sidecar/src/event_stream_server/tests.rs rename to event_sidecar/src/event_stream_server/tests.rs diff --git a/event_sidecar/src/lib.rs b/event_sidecar/src/lib.rs new file mode 100644 index 00000000..7bca20d0 --- /dev/null +++ b/event_sidecar/src/lib.rs @@ -0,0 +1,790 @@ +#![deny(clippy::complexity)] +#![deny(clippy::cognitive_complexity)] +#![deny(clippy::too_many_lines)] + +extern crate core; +mod admin_server; +mod api_version_manager; +mod database; +mod event_stream_server; +pub mod rest_server; +mod sql; +#[cfg(test)] +pub(crate) mod testing; +#[cfg(test)] +pub(crate) mod tests; +mod types; +mod utils; + +use std::collections::HashMap; +use std::process::ExitCode; +use std::{net::IpAddr, path::PathBuf, str::FromStr, time::Duration}; + +use crate::{ + event_stream_server::{Config as SseConfig, EventStreamServer}, + rest_server::run_server as start_rest_server, + types::{ + database::{DatabaseWriteError, DatabaseWriter}, + sse_events::*, + }, +}; +use anyhow::{Context, Error}; +use api_version_manager::{ApiVersionManager, GuardedApiVersionManager}; +use casper_event_listener::{ + EventListener, EventListenerBuilder, NodeConnectionInterface, SseEvent, +}; +use casper_event_types::{metrics, sse_data::SseData, Filter}; +use futures::future::join_all; +use hex_fmt::HexFmt; +use tokio::{ + sync::mpsc::{channel as mpsc_channel, Receiver, Sender}, + task::JoinHandle, + time::sleep, +}; +use tracing::{debug, error, info, trace, warn}; +use types::database::DatabaseReader; +#[cfg(feature = "additional-metrics")] +use utils::start_metrics_thread; + +pub use admin_server::run_server as run_admin_server; +pub use database::DatabaseConfigError; +pub use types::config::{ + AdminApiServerConfig, Connection, RestApiServerConfig, SseEventServerConfig, StorageConfig, + StorageConfigSerdeTarget, +}; + +pub type Database = types::database::Database; + +const DEFAULT_CHANNEL_SIZE: usize = 1000; + +pub async fn run( + config: SseEventServerConfig, + database: Database, + storage_path: String, +) -> Result { + validate_config(&config)?; + let (event_listeners, sse_data_receivers) = build_event_listeners(&config)?; + // This channel allows SseData to be sent from multiple connected nodes to the single EventStreamServer. + let (outbound_sse_data_sender, outbound_sse_data_receiver) = + mpsc_channel(config.outbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); + let connection_configs = config.connections.clone(); + + // Task to manage incoming events from all three filters + let listening_task_handle = start_sse_processors( + connection_configs, + event_listeners, + sse_data_receivers, + database.clone(), + outbound_sse_data_sender.clone(), + ); + + let event_broadcasting_handle = + start_event_broadcasting(&config, storage_path, outbound_sse_data_receiver); + + tokio::try_join!( + flatten_handle(event_broadcasting_handle), + flatten_handle(listening_task_handle), + ) + .map(|_| Ok(ExitCode::SUCCESS))? +} + +fn start_event_broadcasting( + config: &SseEventServerConfig, + storage_path: String, + mut outbound_sse_data_receiver: Receiver<(SseData, Option, Option)>, +) -> JoinHandle> { + let event_stream_server_port = config.event_stream_server.port; + let buffer_length = config.event_stream_server.event_stream_buffer_length; + let max_concurrent_subscribers = config.event_stream_server.max_concurrent_subscribers; + tokio::spawn(async move { + // Create new instance for the Sidecar's Event Stream Server + let mut event_stream_server = EventStreamServer::new( + SseConfig::new( + event_stream_server_port, + Some(buffer_length), + Some(max_concurrent_subscribers), + ), + PathBuf::from(storage_path), + ) + .context("Error starting EventStreamServer")?; + while let Some((sse_data, inbound_filter, maybe_json_data)) = + outbound_sse_data_receiver.recv().await + { + event_stream_server.broadcast(sse_data, inbound_filter, maybe_json_data); + } + Err::<(), Error>(Error::msg("Event broadcasting finished")) + }) +} + +fn start_sse_processors( + connection_configs: Vec, + event_listeners: Vec, + sse_data_receivers: Vec>, + database: Database, + outbound_sse_data_sender: Sender<(SseData, Option, Option)>, +) -> JoinHandle> { + tokio::spawn(async move { + let mut join_handles = Vec::with_capacity(event_listeners.len()); + let api_version_manager = ApiVersionManager::new(); + + for ((mut event_listener, connection_config), sse_data_receiver) in event_listeners + .into_iter() + .zip(connection_configs) + .zip(sse_data_receivers) + { + tokio::spawn(async move { + let res = event_listener.stream_aggregated_events().await; + if let Err(e) = res { + let addr = event_listener.get_node_interface().ip_address.to_string(); + error!("Disconnected from {}. Reason: {}", addr, e.to_string()); + } + }); + let join_handle = spawn_sse_processor( + &database, + sse_data_receiver, + &outbound_sse_data_sender, + connection_config, + &api_version_manager, + ); + join_handles.push(join_handle); + } + + let _ = join_all(join_handles).await; + //Send Shutdown to the sidecar sse endpoint + let _ = outbound_sse_data_sender + .send((SseData::Shutdown, None, None)) + .await; + // Below sleep is a workaround to allow the above Shutdown to propagate. + // If we don't do this there is a race condition between handling of the message and dropping of the outbound server + // which happens when we leave this function and the `tokio::try_join!` exits due to this. This race condition causes 9 of 10 + // tries to not propagate the Shutdown (ususally drop happens faster than message propagation to outbound). + // Fixing this race condition would require rewriting a lot of code. AFAICT the only drawback to this workaround is that the + // rest server and the sse server will exit 200ms later than it would without it. + sleep(Duration::from_millis(200)).await; + Err::<(), Error>(Error::msg("Connected node(s) are unavailable")) + }) +} + +fn spawn_sse_processor( + database: &Database, + sse_data_receiver: Receiver, + outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, + connection_config: Connection, + api_version_manager: &std::sync::Arc>, +) -> JoinHandle> { + match database.clone() { + Database::SqliteDatabaseWrapper(db) => tokio::spawn(sse_processor( + sse_data_receiver, + outbound_sse_data_sender.clone(), + db.clone(), + false, + connection_config.enable_logging, + api_version_manager.clone(), + )), + Database::PostgreSqlDatabaseWrapper(db) => tokio::spawn(sse_processor( + sse_data_receiver, + outbound_sse_data_sender.clone(), + db.clone(), + true, + connection_config.enable_logging, + api_version_manager.clone(), + )), + } +} + +pub async fn run_rest_server( + rest_server_config: RestApiServerConfig, + database: Database, +) -> Result { + match database { + Database::SqliteDatabaseWrapper(db) => start_rest_server(rest_server_config, db).await, + Database::PostgreSqlDatabaseWrapper(db) => start_rest_server(rest_server_config, db).await, + } + .map(|_| ExitCode::SUCCESS) +} + +fn build_event_listeners( + config: &SseEventServerConfig, +) -> Result<(Vec, Vec>), Error> { + let mut event_listeners = Vec::with_capacity(config.connections.len()); + let mut sse_data_receivers = Vec::new(); + for connection in &config.connections { + let (inbound_sse_data_sender, inbound_sse_data_receiver) = + mpsc_channel(config.inbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); + sse_data_receivers.push(inbound_sse_data_receiver); + let event_listener = builder(connection, inbound_sse_data_sender)?.build(); + event_listeners.push(event_listener?); + } + Ok((event_listeners, sse_data_receivers)) +} + +fn builder( + connection: &Connection, + inbound_sse_data_sender: Sender, +) -> Result { + let node_interface = NodeConnectionInterface { + ip_address: IpAddr::from_str(&connection.ip_address)?, + sse_port: connection.sse_port, + rest_port: connection.rest_port, + }; + let event_listener_builder = EventListenerBuilder { + node: node_interface, + max_connection_attempts: connection.max_attempts, + delay_between_attempts: Duration::from_secs( + connection.delay_between_retries_in_seconds as u64, + ), + allow_partial_connection: connection.allow_partial_connection, + sse_event_sender: inbound_sse_data_sender, + connection_timeout: Duration::from_secs( + connection.connection_timeout_in_seconds.unwrap_or(5) as u64, + ), + sleep_between_keep_alive_checks: Duration::from_secs( + connection + .sleep_between_keep_alive_checks_in_seconds + .unwrap_or(60) as u64, + ), + no_message_timeout: Duration::from_secs( + connection.no_message_timeout_in_seconds.unwrap_or(120) as u64, + ), + }; + Ok(event_listener_builder) +} + +fn validate_config(config: &SseEventServerConfig) -> Result<(), Error> { + if config + .connections + .iter() + .any(|connection| connection.max_attempts < 1) + { + return Err(Error::msg( + "Unable to run: max_attempts setting must be above 0 for the sidecar to attempt connection" + )); + } + Ok(()) +} + +async fn flatten_handle(handle: JoinHandle>) -> Result { + match handle.await { + Ok(Ok(result)) => Ok(result), + Ok(Err(err)) => Err(err), + Err(join_err) => Err(Error::from(join_err)), + } +} + +async fn handle_database_save_result( + entity_name: &str, + entity_identifier: &str, + res: Result, + outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, + inbound_filter: Filter, + json_data: Option, + build_sse_data: F, +) where + F: FnOnce() -> SseData, +{ + match res { + Ok(_) => { + count_internal_event("main_inbound_sse_data", "db_save_end"); + count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_start"); + if let Err(error) = outbound_sse_data_sender + .send((build_sse_data(), Some(inbound_filter), json_data)) + .await + { + count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_end"); + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } else { + count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_end"); + } + } + Err(DatabaseWriteError::UniqueConstraint(uc_err)) => { + count_internal_event("main_inbound_sse_data", "db_save_end"); + debug!( + "Already received {} ({}), logged in event_log", + entity_name, entity_identifier, + ); + trace!(?uc_err); + } + Err(other_err) => { + count_internal_event("main_inbound_sse_data", "db_save_end"); + count_error(format!("db_save_error_{}", entity_name).as_str()); + warn!(?other_err, "Unexpected error saving {}", entity_identifier); + } + } + count_internal_event("main_inbound_sse_data", "event_received_end"); +} + +/// Function to handle single event in the sse_processor. +/// Returns false if the handling indicated that no other messages should be processed. +/// Returns true otherwise. +#[allow(clippy::too_many_lines)] +async fn handle_single_event( + sse_event: SseEvent, + database: Db, + enable_event_logging: bool, + outbound_sse_data_sender: Sender<(SseData, Option, Option)>, + api_version_manager: GuardedApiVersionManager, +) { + match sse_event.data { + SseData::ApiVersion(_) | SseData::Shutdown => { + //don't do debug counting for ApiVersion since we don't store it + } + _ => { + count_internal_event("main_inbound_sse_data", "event_received_start"); + } + } + match sse_event.data { + SseData::SidecarVersion(_) => { + //Do nothing -> the inbound shouldn't produce this endpoint, it can be only produced by sidecar to the outbound + } + SseData::ApiVersion(version) => { + handle_api_version( + api_version_manager, + version, + &outbound_sse_data_sender, + sse_event.inbound_filter, + enable_event_logging, + ) + .await; + } + SseData::BlockAdded { block, block_hash } => { + if enable_event_logging { + let hex_block_hash = HexFmt(block_hash.inner()); + info!("Block Added: {:18}", hex_block_hash); + debug!("Block Added: {}", hex_block_hash); + } + count_internal_event("main_inbound_sse_data", "db_save_start"); + let res = database + .save_block_added( + BlockAdded::new(block_hash, block.clone()), + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + handle_database_save_result( + "BlockAdded", + HexFmt(block_hash.inner()).to_string().as_str(), + res, + &outbound_sse_data_sender, + sse_event.inbound_filter, + sse_event.json_data, + || SseData::BlockAdded { block, block_hash }, + ) + .await; + } + SseData::DeployAccepted { deploy } => { + if enable_event_logging { + let hex_deploy_hash = HexFmt(deploy.hash().inner()); + info!("Deploy Accepted: {:18}", hex_deploy_hash); + debug!("Deploy Accepted: {}", hex_deploy_hash); + } + let deploy_accepted = DeployAccepted::new(deploy.clone()); + count_internal_event("main_inbound_sse_data", "db_save_start"); + let res = database + .save_deploy_accepted( + deploy_accepted, + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + handle_database_save_result( + "DeployAccepted", + HexFmt(deploy.hash().inner()).to_string().as_str(), + res, + &outbound_sse_data_sender, + sse_event.inbound_filter, + sse_event.json_data, + || SseData::DeployAccepted { deploy }, + ) + .await; + } + SseData::DeployExpired { deploy_hash } => { + if enable_event_logging { + let hex_deploy_hash = HexFmt(deploy_hash.inner()); + info!("Deploy Expired: {:18}", hex_deploy_hash); + debug!("Deploy Expired: {}", hex_deploy_hash); + } + count_internal_event("main_inbound_sse_data", "db_save_start"); + let res = database + .save_deploy_expired( + DeployExpired::new(deploy_hash), + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + handle_database_save_result( + "DeployExpired", + HexFmt(deploy_hash.inner()).to_string().as_str(), + res, + &outbound_sse_data_sender, + sse_event.inbound_filter, + sse_event.json_data, + || SseData::DeployExpired { deploy_hash }, + ) + .await; + } + SseData::DeployProcessed { + deploy_hash, + account, + timestamp, + ttl, + dependencies, + block_hash, + execution_result, + } => { + if enable_event_logging { + let hex_deploy_hash = HexFmt(deploy_hash.inner()); + info!("Deploy Processed: {:18}", hex_deploy_hash); + debug!("Deploy Processed: {}", hex_deploy_hash); + } + let deploy_processed = DeployProcessed::new( + deploy_hash.clone(), + account.clone(), + timestamp, + ttl, + dependencies.clone(), + block_hash.clone(), + execution_result.clone(), + ); + count_internal_event("main_inbound_sse_data", "db_save_start"); + let res = database + .save_deploy_processed( + deploy_processed.clone(), + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + + handle_database_save_result( + "DeployProcessed", + HexFmt(deploy_hash.inner()).to_string().as_str(), + res, + &outbound_sse_data_sender, + sse_event.inbound_filter, + sse_event.json_data, + || SseData::DeployProcessed { + deploy_hash, + account, + timestamp, + ttl, + dependencies, + block_hash, + execution_result, + }, + ) + .await; + } + SseData::Fault { + era_id, + timestamp, + public_key, + } => { + let fault = Fault::new(era_id, public_key.clone(), timestamp); + warn!(%fault, "Fault reported"); + count_internal_event("main_inbound_sse_data", "db_save_start"); + let res = database + .save_fault( + fault.clone(), + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + + handle_database_save_result( + "Fault", + format!("{:#?}", fault).as_str(), + res, + &outbound_sse_data_sender, + sse_event.inbound_filter, + sse_event.json_data, + || SseData::Fault { + era_id, + timestamp, + public_key, + }, + ) + .await; + } + SseData::FinalitySignature(fs) => { + if enable_event_logging { + debug!( + "Finality Signature: {} for {}", + fs.signature(), + fs.block_hash() + ); + } + let finality_signature = FinalitySignature::new(fs.clone()); + count_internal_event("main_inbound_sse_data", "db_save_start"); + let res = database + .save_finality_signature( + finality_signature.clone(), + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + handle_database_save_result( + "FinalitySignature", + "", + res, + &outbound_sse_data_sender, + sse_event.inbound_filter, + sse_event.json_data, + || SseData::FinalitySignature(fs), + ) + .await; + } + SseData::Step { + era_id, + execution_effect, + } => { + let step = Step::new(era_id, execution_effect.clone()); + if enable_event_logging { + info!("Step at era: {}", era_id.value()); + } + count_internal_event("main_inbound_sse_data", "db_save_start"); + let res = database + .save_step( + step, + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + handle_database_save_result( + "Step", + format!("{}", era_id.value()).as_str(), + res, + &outbound_sse_data_sender, + sse_event.inbound_filter, + sse_event.json_data, + || SseData::Step { + era_id, + execution_effect, + }, + ) + .await; + } + SseData::Shutdown => handle_shutdown(sse_event, database, outbound_sse_data_sender).await, + } +} + +async fn handle_shutdown( + sse_event: SseEvent, + sqlite_database: Db, + outbound_sse_data_sender: Sender<(SseData, Option, Option)>, +) { + warn!("Node ({}) is unavailable", sse_event.source.to_string()); + let res = sqlite_database + .save_shutdown( + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + ) + .await; + match res { + Ok(_) | Err(DatabaseWriteError::UniqueConstraint(_)) => { + // We push to outbound on UniqueConstraint error because in sse_server we match shutdowns to outbounds based on the filter they came from to prevent duplicates. + // But that also means that we need to pass through all the Shutdown events so the sse_server can determine to which outbound filters they need to be pushed (we + // don't store in DB the information from which filter did shutdown came). + if let Err(error) = outbound_sse_data_sender + .send(( + SseData::Shutdown, + Some(sse_event.inbound_filter), + sse_event.json_data, + )) + .await + { + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } + } + Err(other_err) => { + count_error("db_save_error_shutdown"); + warn!(?other_err, "Unexpected error saving Shutdown") + } + } +} + +async fn handle_api_version( + api_version_manager: std::sync::Arc>, + version: casper_types::ProtocolVersion, + outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, + filter: Filter, + enable_event_logging: bool, +) { + let mut manager_guard = api_version_manager.lock().await; + let changed_newest_version = manager_guard.store_version(version); + if changed_newest_version { + if let Err(error) = outbound_sse_data_sender + .send((SseData::ApiVersion(version), Some(filter), None)) + .await + { + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } + } + drop(manager_guard); + if enable_event_logging { + info!(%version, "API Version"); + } +} + +async fn sse_processor( + inbound_sse_data_receiver: Receiver, + outbound_sse_data_sender: Sender<(SseData, Option, Option)>, + database: Db, + database_supports_multithreaded_processing: bool, + enable_event_logging: bool, + api_version_manager: GuardedApiVersionManager, +) -> Result<(), Error> { + #[cfg(feature = "additional-metrics")] + let metrics_tx = start_metrics_thread("sse_save".to_string()); + // This task starts the listener pushing events to the sse_data_receiver + if database_supports_multithreaded_processing { + start_multi_threaded_events_consumer( + inbound_sse_data_receiver, + outbound_sse_data_sender, + database, + enable_event_logging, + api_version_manager, + #[cfg(feature = "additional-metrics")] + metrics_tx, + ) + .await; + } else { + start_single_threaded_events_consumer( + inbound_sse_data_receiver, + outbound_sse_data_sender, + database, + enable_event_logging, + api_version_manager, + #[cfg(feature = "additional-metrics")] + metrics_tx, + ) + .await; + } + Ok(()) +} + +fn handle_events_in_thread( + mut queue_rx: Receiver, + database: Db, + outbound_sse_data_sender: Sender<(SseData, Option, Option)>, + api_version_manager: GuardedApiVersionManager, + enable_event_logging: bool, + #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, +) { + tokio::spawn(async move { + while let Some(sse_event) = queue_rx.recv().await { + handle_single_event( + sse_event, + database.clone(), + enable_event_logging, + outbound_sse_data_sender.clone(), + api_version_manager.clone(), + ) + .await; + #[cfg(feature = "additional-metrics")] + let _ = metrics_sender.send(()).await; + } + }); +} + +fn build_queues(cache_size: usize) -> HashMap, Receiver)> { + let mut map = HashMap::new(); + map.insert(Filter::Deploys, mpsc_channel(cache_size)); + map.insert(Filter::Events, mpsc_channel(cache_size)); + map.insert(Filter::Main, mpsc_channel(cache_size)); + map.insert(Filter::Sigs, mpsc_channel(cache_size)); + map +} + +async fn start_multi_threaded_events_consumer< + Db: DatabaseReader + DatabaseWriter + Clone + Send + Sync + 'static, +>( + mut inbound_sse_data_receiver: Receiver, + outbound_sse_data_sender: Sender<(SseData, Option, Option)>, + database: Db, + enable_event_logging: bool, + api_version_manager: GuardedApiVersionManager, + #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, +) { + let mut senders_and_receivers_map = build_queues(DEFAULT_CHANNEL_SIZE); + let mut senders_map = HashMap::new(); + for (filter, (tx, rx)) in senders_and_receivers_map.drain() { + handle_events_in_thread( + rx, + database.clone(), + outbound_sse_data_sender.clone(), + api_version_manager.clone(), + enable_event_logging, + #[cfg(feature = "additional-metrics")] + metrics_sender.clone(), + ); + senders_map.insert(filter, tx); + } + + while let Some(sse_event) = inbound_sse_data_receiver.recv().await { + if let Some(tx) = senders_map.get(&sse_event.inbound_filter) { + tx.send(sse_event).await.unwrap() + } else { + error!( + "Failed to find an sse handler queue for inbound filter {}", + sse_event.inbound_filter + ); + break; + } + } +} + +async fn start_single_threaded_events_consumer< + Db: DatabaseReader + DatabaseWriter + Clone + Send + Sync, +>( + mut inbound_sse_data_receiver: Receiver, + outbound_sse_data_sender: Sender<(SseData, Option, Option)>, + database: Db, + enable_event_logging: bool, + api_version_manager: GuardedApiVersionManager, + #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, +) { + while let Some(sse_event) = inbound_sse_data_receiver.recv().await { + handle_single_event( + sse_event, + database.clone(), + enable_event_logging, + outbound_sse_data_sender.clone(), + api_version_manager.clone(), + ) + .await; + #[cfg(feature = "additional-metrics")] + let _ = metrics_sender.send(()).await; + } +} + +fn count_error(reason: &str) { + metrics::ERROR_COUNTS + .with_label_values(&["main", reason]) + .inc(); +} + +/// This metric is used for debugging of possible issues +/// with sidecar to determine at which step of processing there was a hang. +/// If we determine that this issue was fixed completely this can be removed +/// (the corresponding metric also). +fn count_internal_event(category: &str, reason: &str) { + metrics::INTERNAL_EVENTS + .with_label_values(&[category, reason]) + .inc(); +} diff --git a/sidecar/src/rest_server.rs b/event_sidecar/src/rest_server.rs similarity index 92% rename from sidecar/src/rest_server.rs rename to event_sidecar/src/rest_server.rs index 8e8507b3..b186163f 100644 --- a/sidecar/src/rest_server.rs +++ b/event_sidecar/src/rest_server.rs @@ -14,14 +14,14 @@ use tower::{buffer::Buffer, make::Shared, ServiceBuilder}; use warp::Filter; use crate::{ - types::{config::RestServerConfig, database::DatabaseReader}, + types::{config::RestApiServerConfig, database::DatabaseReader}, utils::resolve_address, }; const BIND_ALL_INTERFACES: &str = "0.0.0.0"; pub async fn run_server( - config: RestServerConfig, + config: RestApiServerConfig, database: Db, ) -> Result<(), Error> { let api = filters::combined_filters(database); diff --git a/sidecar/src/rest_server/errors.rs b/event_sidecar/src/rest_server/errors.rs similarity index 100% rename from sidecar/src/rest_server/errors.rs rename to event_sidecar/src/rest_server/errors.rs diff --git a/sidecar/src/rest_server/filters.rs b/event_sidecar/src/rest_server/filters.rs similarity index 100% rename from sidecar/src/rest_server/filters.rs rename to event_sidecar/src/rest_server/filters.rs diff --git a/sidecar/src/rest_server/handlers.rs b/event_sidecar/src/rest_server/handlers.rs similarity index 100% rename from sidecar/src/rest_server/handlers.rs rename to event_sidecar/src/rest_server/handlers.rs diff --git a/sidecar/src/rest_server/openapi.rs b/event_sidecar/src/rest_server/openapi.rs similarity index 100% rename from sidecar/src/rest_server/openapi.rs rename to event_sidecar/src/rest_server/openapi.rs diff --git a/sidecar/src/rest_server/openapi/schema_transformation_visitor.rs b/event_sidecar/src/rest_server/openapi/schema_transformation_visitor.rs similarity index 100% rename from sidecar/src/rest_server/openapi/schema_transformation_visitor.rs rename to event_sidecar/src/rest_server/openapi/schema_transformation_visitor.rs diff --git a/sidecar/src/rest_server/tests.rs b/event_sidecar/src/rest_server/tests.rs similarity index 100% rename from sidecar/src/rest_server/tests.rs rename to event_sidecar/src/rest_server/tests.rs diff --git a/sidecar/src/sql.rs b/event_sidecar/src/sql.rs similarity index 100% rename from sidecar/src/sql.rs rename to event_sidecar/src/sql.rs diff --git a/sidecar/src/sql/tables.rs b/event_sidecar/src/sql/tables.rs similarity index 100% rename from sidecar/src/sql/tables.rs rename to event_sidecar/src/sql/tables.rs diff --git a/sidecar/src/sql/tables/block_added.rs b/event_sidecar/src/sql/tables/block_added.rs similarity index 100% rename from sidecar/src/sql/tables/block_added.rs rename to event_sidecar/src/sql/tables/block_added.rs diff --git a/sidecar/src/sql/tables/deploy_accepted.rs b/event_sidecar/src/sql/tables/deploy_accepted.rs similarity index 100% rename from sidecar/src/sql/tables/deploy_accepted.rs rename to event_sidecar/src/sql/tables/deploy_accepted.rs diff --git a/sidecar/src/sql/tables/deploy_event.rs b/event_sidecar/src/sql/tables/deploy_event.rs similarity index 100% rename from sidecar/src/sql/tables/deploy_event.rs rename to event_sidecar/src/sql/tables/deploy_event.rs diff --git a/sidecar/src/sql/tables/deploy_expired.rs b/event_sidecar/src/sql/tables/deploy_expired.rs similarity index 100% rename from sidecar/src/sql/tables/deploy_expired.rs rename to event_sidecar/src/sql/tables/deploy_expired.rs diff --git a/sidecar/src/sql/tables/deploy_processed.rs b/event_sidecar/src/sql/tables/deploy_processed.rs similarity index 100% rename from sidecar/src/sql/tables/deploy_processed.rs rename to event_sidecar/src/sql/tables/deploy_processed.rs diff --git a/sidecar/src/sql/tables/event_log.rs b/event_sidecar/src/sql/tables/event_log.rs similarity index 100% rename from sidecar/src/sql/tables/event_log.rs rename to event_sidecar/src/sql/tables/event_log.rs diff --git a/sidecar/src/sql/tables/event_type.rs b/event_sidecar/src/sql/tables/event_type.rs similarity index 98% rename from sidecar/src/sql/tables/event_type.rs rename to event_sidecar/src/sql/tables/event_type.rs index 84dd8e26..39326b8e 100644 --- a/sidecar/src/sql/tables/event_type.rs +++ b/event_sidecar/src/sql/tables/event_type.rs @@ -3,6 +3,7 @@ use sea_query::{ TableCreateStatement, }; +#[allow(clippy::enum_variant_names)] #[derive(Iden)] pub(super) enum EventType { Table, diff --git a/sidecar/src/sql/tables/fault.rs b/event_sidecar/src/sql/tables/fault.rs similarity index 100% rename from sidecar/src/sql/tables/fault.rs rename to event_sidecar/src/sql/tables/fault.rs diff --git a/sidecar/src/sql/tables/finality_signature.rs b/event_sidecar/src/sql/tables/finality_signature.rs similarity index 100% rename from sidecar/src/sql/tables/finality_signature.rs rename to event_sidecar/src/sql/tables/finality_signature.rs diff --git a/sidecar/src/sql/tables/migration.rs b/event_sidecar/src/sql/tables/migration.rs similarity index 100% rename from sidecar/src/sql/tables/migration.rs rename to event_sidecar/src/sql/tables/migration.rs diff --git a/sidecar/src/sql/tables/shutdown.rs b/event_sidecar/src/sql/tables/shutdown.rs similarity index 98% rename from sidecar/src/sql/tables/shutdown.rs rename to event_sidecar/src/sql/tables/shutdown.rs index 547bf542..7057c3d6 100644 --- a/sidecar/src/sql/tables/shutdown.rs +++ b/event_sidecar/src/sql/tables/shutdown.rs @@ -5,6 +5,7 @@ use sea_query::{ use super::event_log::EventLog; +#[allow(clippy::enum_variant_names)] #[derive(Iden)] pub(crate) enum Shutdown { #[iden = "Shutdown"] diff --git a/sidecar/src/sql/tables/step.rs b/event_sidecar/src/sql/tables/step.rs similarity index 100% rename from sidecar/src/sql/tables/step.rs rename to event_sidecar/src/sql/tables/step.rs diff --git a/sidecar/src/testing.rs b/event_sidecar/src/testing.rs similarity index 100% rename from sidecar/src/testing.rs rename to event_sidecar/src/testing.rs diff --git a/sidecar/src/testing/fake_database.rs b/event_sidecar/src/testing/fake_database.rs similarity index 100% rename from sidecar/src/testing/fake_database.rs rename to event_sidecar/src/testing/fake_database.rs diff --git a/sidecar/src/testing/fake_event_stream.rs b/event_sidecar/src/testing/fake_event_stream.rs similarity index 100% rename from sidecar/src/testing/fake_event_stream.rs rename to event_sidecar/src/testing/fake_event_stream.rs diff --git a/sidecar/src/testing/mock_node.rs b/event_sidecar/src/testing/mock_node.rs similarity index 100% rename from sidecar/src/testing/mock_node.rs rename to event_sidecar/src/testing/mock_node.rs diff --git a/sidecar/src/testing/raw_sse_events_utils.rs b/event_sidecar/src/testing/raw_sse_events_utils.rs similarity index 100% rename from sidecar/src/testing/raw_sse_events_utils.rs rename to event_sidecar/src/testing/raw_sse_events_utils.rs diff --git a/sidecar/src/testing/shared.rs b/event_sidecar/src/testing/shared.rs similarity index 100% rename from sidecar/src/testing/shared.rs rename to event_sidecar/src/testing/shared.rs diff --git a/sidecar/src/testing/simple_sse_server.rs b/event_sidecar/src/testing/simple_sse_server.rs similarity index 100% rename from sidecar/src/testing/simple_sse_server.rs rename to event_sidecar/src/testing/simple_sse_server.rs diff --git a/sidecar/src/testing/test_clock.rs b/event_sidecar/src/testing/test_clock.rs similarity index 100% rename from sidecar/src/testing/test_clock.rs rename to event_sidecar/src/testing/test_clock.rs diff --git a/sidecar/src/testing/testing_config.rs b/event_sidecar/src/testing/testing_config.rs similarity index 79% rename from sidecar/src/testing/testing_config.rs rename to event_sidecar/src/testing/testing_config.rs index 2cd870da..1b9d5400 100644 --- a/sidecar/src/testing/testing_config.rs +++ b/event_sidecar/src/testing/testing_config.rs @@ -3,11 +3,14 @@ use portpicker::Port; use std::sync::{Arc, Mutex}; use tempfile::TempDir; -use crate::types::config::{Config, Connection, StorageConfig}; +use crate::types::config::{Connection, RestApiServerConfig, SseEventServerConfig, StorageConfig}; /// A basic wrapper with helper methods for constructing and tweaking [Config]s for use in tests. +#[derive(Clone)] pub struct TestingConfig { - pub(crate) config: Config, + pub(crate) event_server_config: SseEventServerConfig, + pub(crate) storage_config: StorageConfig, + pub(crate) rest_api_server_config: RestApiServerConfig, } #[cfg(test)] @@ -50,19 +53,24 @@ pub(crate) fn prepare_config(temp_storage: &TempDir) -> TestingConfig { impl TestingConfig { /// Creates a Default instance of TestingConfig which contains a Default instance of [Config] pub(crate) fn default() -> Self { - let config = Config::default(); - - Self { config } + let event_server_config = SseEventServerConfig::default(); + let storage_config = StorageConfig::default(); + let rest_api_server_config = RestApiServerConfig::default(); + Self { + event_server_config, + storage_config, + rest_api_server_config, + } } /// Specify where test storage (database, sse cache) should be located. /// By default it is set to `/target/test_storage` however it is recommended to overwrite this with a `TempDir` path for testing purposes. pub(crate) fn set_storage_path(&mut self, path: String) { - self.config.storage.set_storage_path(path); + self.storage_config.set_storage_path(path); } pub(crate) fn set_storage(&mut self, storage: StorageConfig) { - self.config.storage = storage; + self.storage_config = storage; } pub(crate) fn add_connection( @@ -85,7 +93,7 @@ impl TestingConfig { sleep_between_keep_alive_checks_in_seconds: Some(100), no_message_timeout_in_seconds: Some(100), }; - self.config.connections.push(connection); + self.event_server_config.connections.push(connection); random_port_for_sse } @@ -95,7 +103,7 @@ impl TestingConfig { port_of_node: u16, allow_partial_connection: bool, ) { - for connection in &mut self.config.connections { + for connection in &mut self.event_server_config.connections { if connection.sse_port == port_of_node { connection.allow_partial_connection = allow_partial_connection; break; @@ -112,7 +120,7 @@ impl TestingConfig { max_attempts: usize, delay_between_retries_in_seconds: usize, ) { - for connection in &mut self.config.connections { + for connection in &mut self.event_server_config.connections { if connection.sse_port == port_of_node { connection.max_attempts = max_attempts; connection.delay_between_retries_in_seconds = delay_between_retries_in_seconds; @@ -129,22 +137,17 @@ impl TestingConfig { pub(crate) fn allocate_available_ports(&mut self) { let rest_server_port = get_port(); let sse_server_port = get_port(); - self.config.rest_server.port = rest_server_port; - self.config.event_stream_server.port = sse_server_port; + self.rest_api_server_config.port = rest_server_port; + self.event_server_config.event_stream_server.port = sse_server_port; } /// Returns the inner [Config] - pub(crate) fn inner(&self) -> Config { - self.config.clone() - } - - /// Returns the port that the sidecar REST server is bound to. - pub(crate) fn rest_server_port(&self) -> u16 { - self.config.rest_server.port + pub(crate) fn inner(&self) -> SseEventServerConfig { + self.event_server_config.clone() } /// Returns the port that the sidecar SSE server is bound to. pub(crate) fn event_stream_server_port(&self) -> u16 { - self.config.event_stream_server.port + self.event_server_config.event_stream_server.port } } diff --git a/sidecar/src/tests.rs b/event_sidecar/src/tests.rs similarity index 100% rename from sidecar/src/tests.rs rename to event_sidecar/src/tests.rs diff --git a/sidecar/src/tests/integration_tests.rs b/event_sidecar/src/tests/integration_tests.rs similarity index 95% rename from sidecar/src/tests/integration_tests.rs rename to event_sidecar/src/tests/integration_tests.rs index 9f0799e7..da0e5e1e 100644 --- a/sidecar/src/tests/integration_tests.rs +++ b/event_sidecar/src/tests/integration_tests.rs @@ -24,13 +24,13 @@ use crate::{ testing_config::{prepare_config, TestingConfig}, }, types::{ - database::DatabaseWriter, + database::{Database, DatabaseWriter}, sse_events::{BlockAdded, Fault}, }, utils::tests::{ any_string_contains, build_test_config, build_test_config_with_retries, build_test_config_without_connections, start_nodes_and_wait, start_sidecar, - stop_nodes_and_wait, wait_for_n_messages, + start_sidecar_with_rest_api, stop_nodes_and_wait, wait_for_n_messages, }, }; @@ -43,10 +43,16 @@ async fn should_not_allow_zero_max_attempts() { let sse_port_for_node = testing_config.add_connection(None, None, None); testing_config.set_retries_for_node(sse_port_for_node, 0, 0); - - let shutdown_error = run(testing_config.inner()) + let sqlite_database = SqliteDatabase::new_from_config(&testing_config.storage_config) .await - .expect_err("Sidecar should return an Err on shutdown"); + .expect("database should start"); + let shutdown_error = run( + testing_config.inner(), + Database::SqliteDatabaseWrapper(sqlite_database), + testing_config.storage_config.get_storage_path().clone(), + ) + .await + .expect_err("Sidecar should return an Err on shutdown"); assert_eq!( shutdown_error.to_string(), @@ -71,7 +77,7 @@ async fn given_sidecar_when_only_node_shuts_down_then_shut_down() { node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - let sidecar_join = start_sidecar(testing_config.inner()).await; + let sidecar_join = start_sidecar(testing_config).await; let (_, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; @@ -102,7 +108,7 @@ async fn should_allow_client_connection_to_sse() { node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; @@ -125,13 +131,13 @@ async fn should_respond_to_rest_query() { node_port_for_rest_connection, event_stream_server_port, ) = build_test_config(); - let sidecar_rest_server_port = testing_config.rest_server_port(); + let sidecar_rest_server_port = testing_config.rest_api_server_config.port; let mut node_mock = MockNodeBuilder::build_example_1_5_2_node( node_port_for_sse_connection, node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar_with_rest_api(testing_config).await; let (_, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; @@ -167,7 +173,7 @@ async fn should_allow_partial_connection_on_one_filter() { node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; @@ -210,7 +216,7 @@ async fn should_fail_to_reconnect() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(31, receiver, Duration::from_secs(120)).await; @@ -257,7 +263,7 @@ async fn should_reconnect() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; let receiver = wait_for_n_messages(31, receiver, Duration::from_secs(120)).await; @@ -300,7 +306,7 @@ async fn shutdown_should_be_passed_through() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(2, receiver, Duration::from_secs(120)).await; @@ -331,7 +337,7 @@ async fn connecting_to_node_prior_to_1_5_2_should_fail() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, _) = fetch_data_from_endpoint_with_panic_flag( "/events/main?start_from=0", event_stream_server_port, @@ -363,7 +369,7 @@ async fn shutdown_should_be_passed_through_when_versions_change() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; let receiver = wait_for_n_messages(3, receiver, Duration::from_secs(120)).await; @@ -400,7 +406,7 @@ async fn should_produce_shutdown_to_sidecar_endpoint() { node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/sidecar", event_stream_server_port).await; stop_nodes_and_wait(vec![&mut node_mock]).await; @@ -437,7 +443,7 @@ async fn sidecar_should_use_start_from_if_database_is_empty() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(2, receiver, Duration::from_secs(120)).await; @@ -460,7 +466,7 @@ async fn sidecar_should_use_start_from_if_database_is_not_empty() { event_stream_server_port, ) = build_test_config(); //Prepopulating database - let sqlite_database = SqliteDatabase::new_from_config(&testing_config.config.storage) + let sqlite_database = SqliteDatabase::new_from_config(&testing_config.storage_config) .await .expect("database should start"); sqlite_database @@ -481,7 +487,7 @@ async fn sidecar_should_use_start_from_if_database_is_not_empty() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(120)).await; @@ -511,7 +517,7 @@ async fn sidecar_should_connect_to_multiple_nodes() { (sse_port_2, rest_port_2), (sse_port_3, rest_port_3), ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(4, receiver, Duration::from_secs(120)).await; @@ -547,7 +553,7 @@ async fn sidecar_should_not_downgrade_api_version_when_new_nodes_disconnect() { (sse_port_1, rest_port_1), (sse_port_2, rest_port_2), ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; let receiver = wait_for_n_messages(2, receiver, Duration::from_secs(120)).await; @@ -581,7 +587,7 @@ async fn sidecar_should_report_only_one_api_version_if_there_was_no_update() { (sse_port_1, rest_port_1), (sse_port_2, rest_port_2), ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(3, receiver, Duration::from_secs(120)).await; @@ -613,7 +619,7 @@ async fn sidecar_should_connect_to_multiple_nodes_even_if_some_of_them_dont_resp (sse_port_2, rest_port_2), (8888, 9999), //Ports which should be not occupied ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(3, receiver, Duration::from_secs(120)).await; @@ -646,7 +652,7 @@ async fn partial_connection_test(allow_partial_connection: bool) -> Vec Result { - let toml_content = - std::fs::read_to_string(config_path).context("Error reading config file contents")?; - toml::from_str(&toml_content).context("Error parsing config into TOML format") -} - // This struct is used to parse the toml-formatted config file so the values can be utilised in the code. #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -#[cfg_attr(test, derive(Default))] -pub struct Config { - pub inbound_channel_size: Option, - pub outbound_channel_size: Option, - pub connections: Vec, - pub storage: StorageConfig, - pub rest_server: RestServerConfig, - pub event_stream_server: EventStreamServerConfig, - pub admin_server: Option, -} -#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -#[cfg_attr(test, derive(Default))] -pub struct ConfigSerdeTarget { +pub struct SseEventServerConfig { pub inbound_channel_size: Option, pub outbound_channel_size: Option, pub connections: Vec, - pub storage: Option, - pub rest_server: RestServerConfig, pub event_stream_server: EventStreamServerConfig, - pub admin_server: Option, } -impl TryFrom for Config { - type Error = DatabaseConfigError; - fn try_from(value: ConfigSerdeTarget) -> Result { - Ok(Config { - inbound_channel_size: value.inbound_channel_size, - outbound_channel_size: value.outbound_channel_size, - connections: value.connections, - storage: value.storage.unwrap_or_default().try_into()?, - rest_server: value.rest_server, - event_stream_server: value.event_stream_server, - admin_server: value.admin_server, - }) +#[cfg(any(feature = "testing", test))] +impl Default for SseEventServerConfig { + fn default() -> Self { + Self { + inbound_channel_size: Some(100), + outbound_channel_size: Some(100), + connections: vec![], + event_stream_server: EventStreamServerConfig::default(), + } } } + #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] pub struct Connection { pub ip_address: String, @@ -248,7 +223,7 @@ impl TryFrom for PostgresqlConfig { } #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -pub struct RestServerConfig { +pub struct RestApiServerConfig { pub port: u16, pub max_concurrent_requests: u32, pub max_requests_per_second: u32, @@ -262,98 +237,16 @@ pub struct EventStreamServerConfig { } #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -pub struct AdminServerConfig { +pub struct AdminApiServerConfig { pub port: u16, pub max_concurrent_requests: u32, pub max_requests_per_second: u32, } -#[cfg(test)] +#[cfg(any(feature = "testing", test))] mod tests { use super::*; - #[test] - fn should_parse_nctl_config_toml() { - let expected_config = Config { - inbound_channel_size: None, - outbound_channel_size: None, - connections: vec![ - Connection::example_connection_1(), - Connection::example_connection_2(), - Connection::example_connection_3(), - ], - storage: StorageConfig::SqliteDbConfig { - storage_path: "./target/storage".to_string(), - sqlite_config: SqliteConfig { - file_name: "sqlite_database.db3".to_string(), - max_connections_in_pool: 100, - wal_autocheckpointing_interval: 1000, - }, - }, - rest_server: build_rest_server_config(), - event_stream_server: EventStreamServerConfig::default(), - admin_server: None, - }; - - let parsed_config: Config = read_config("../EXAMPLE_NCTL_CONFIG.toml") - .expect("Error parsing EXAMPLE_NCTL_CONFIG.toml") - .try_into() - .unwrap(); - - assert_eq!(parsed_config, expected_config); - } - - #[test] - fn should_parse_node_config_toml() { - let mut expected_connection = Connection::example_connection_1(); - expected_connection.sse_port = 9999; - expected_connection.rest_port = 8888; - expected_connection.max_attempts = 10; - expected_connection.enable_logging = true; - let mut expected_connection_2 = expected_connection.clone(); - expected_connection_2.ip_address = "168.254.51.2".to_string(); - let mut expected_connection_3 = expected_connection.clone(); - expected_connection_3.ip_address = "168.254.51.3".to_string(); - let expected_config = Config { - inbound_channel_size: None, - outbound_channel_size: None, - connections: vec![ - expected_connection, - expected_connection_2, - expected_connection_3, - ], - storage: StorageConfig::SqliteDbConfig { - storage_path: "/var/lib/casper-event-sidecar".to_string(), - sqlite_config: SqliteConfig { - file_name: "sqlite_database.db3".to_string(), - max_connections_in_pool: 100, - wal_autocheckpointing_interval: 1000, - }, - }, - rest_server: build_rest_server_config(), - event_stream_server: EventStreamServerConfig::default(), - admin_server: Some(AdminServerConfig { - port: 18887, - max_concurrent_requests: 1, - max_requests_per_second: 1, - }), - }; - let parsed_config: Config = read_config("../EXAMPLE_NODE_CONFIG.toml") - .expect("Error parsing EXAMPLE_NODE_CONFIG.toml") - .try_into() - .unwrap(); - - assert_eq!(parsed_config, expected_config); - } - - fn build_rest_server_config() -> RestServerConfig { - RestServerConfig { - port: 18888, - max_concurrent_requests: 50, - max_requests_per_second: 50, - } - } - impl Connection { pub fn example_connection_1() -> Connection { Connection { @@ -437,7 +330,7 @@ mod tests { } } - impl Default for RestServerConfig { + impl Default for RestApiServerConfig { fn default() -> Self { Self { port: 17777, diff --git a/sidecar/src/types/database.rs b/event_sidecar/src/types/database.rs similarity index 93% rename from sidecar/src/types/database.rs rename to event_sidecar/src/types/database.rs index 2292d320..4ba48dfb 100644 --- a/sidecar/src/types/database.rs +++ b/event_sidecar/src/types/database.rs @@ -7,12 +7,13 @@ use crate::{ types::sse_events::{ BlockAdded, DeployAccepted, DeployExpired, DeployProcessed, Fault, FinalitySignature, Step, }, + StorageConfig, }; -use anyhow::Error; +use anyhow::{Context, Error}; use async_trait::async_trait; use casper_event_types::FinalitySignature as FinSig; use serde::{Deserialize, Serialize}; -use std::sync::Arc; +use std::{path::Path, sync::Arc}; use utoipa::ToSchema; #[derive(Clone)] @@ -21,6 +22,32 @@ pub enum Database { PostgreSqlDatabaseWrapper(PostgreSqlDatabase), } +impl Database { + pub async fn build(config: &StorageConfig) -> Result { + match config { + StorageConfig::SqliteDbConfig { + storage_path, + sqlite_config, + } => { + let path_to_database_dir = Path::new(storage_path); + let sqlite_database = + SqliteDatabase::new(path_to_database_dir, sqlite_config.clone()) + .await + .context("Error instantiating sqlite database")?; + Ok(Database::SqliteDatabaseWrapper(sqlite_database)) + } + StorageConfig::PostgreSqlDbConfig { + postgresql_config, .. + } => { + let postgres_database = PostgreSqlDatabase::new(postgresql_config.clone()) + .await + .context("Error instantiating postgres database")?; + Ok(Database::PostgreSqlDatabaseWrapper(postgres_database)) + } + } + } +} + /// Describes a reference for the writing interface of an 'Event Store' database. /// There is a one-to-one relationship between each method and each event that can be received from the node. /// Each method takes the `data` and `id` fields as well as the source IP address (useful for tying the node-specific `id` to the relevant node). diff --git a/sidecar/src/types/sse_events.rs b/event_sidecar/src/types/sse_events.rs similarity index 100% rename from sidecar/src/types/sse_events.rs rename to event_sidecar/src/types/sse_events.rs diff --git a/sidecar/src/utils.rs b/event_sidecar/src/utils.rs similarity index 86% rename from sidecar/src/utils.rs rename to event_sidecar/src/utils.rs index 83bb940a..9b4d3034 100644 --- a/sidecar/src/utils.rs +++ b/event_sidecar/src/utils.rs @@ -148,12 +148,14 @@ pub fn start_metrics_thread(module_name: String) -> Sender<()> { #[cfg(test)] pub mod tests { use crate::database::postgresql_database::PostgreSqlDatabase; + use crate::database::sqlite_database::SqliteDatabase; use crate::run; + use crate::run_rest_server; use crate::testing::mock_node::tests::MockNode; use crate::testing::testing_config::get_port; use crate::testing::testing_config::prepare_config; use crate::testing::testing_config::TestingConfig; - use crate::types::config::Config; + use crate::Database; use anyhow::Error; use anyhow::Error as AnyhowError; use pg_embed::pg_enums::PgAuthMethod; @@ -163,6 +165,7 @@ pub mod tests { postgres::PgEmbed, }; use std::path::PathBuf; + use std::process::ExitCode; use std::time::Duration; use tempfile::{tempdir, TempDir}; use tokio::sync::mpsc::Receiver; @@ -246,7 +249,7 @@ pub mod tests { node_mock.set_sse_port(node_port_for_sse_connection); node_mock.set_rest_port(node_port_for_rest_connection); start_nodes_and_wait(vec![node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config.clone()).await; MockNodeTestProperties { testing_config, temp_storage_dir, @@ -255,9 +258,21 @@ pub mod tests { event_stream_server_port, } } - pub async fn start_sidecar(config: Config) -> tokio::task::JoinHandle> { - tokio::spawn(async move { run(config).await }) // starting event sidecar + + pub async fn start_sidecar_with_rest_api( + config: TestingConfig, + ) -> tokio::task::JoinHandle> { + tokio::spawn(async move { unpack_test_config_and_run(config, true).await }) + // starting event sidecar + } + + pub async fn start_sidecar( + config: TestingConfig, + ) -> tokio::task::JoinHandle> { + tokio::spawn(async move { unpack_test_config_and_run(config, false).await }) + // starting event sidecar } + pub fn build_test_config() -> (TestingConfig, TempDir, u16, u16, u16) { build_test_config_with_retries(10, 1) } @@ -275,10 +290,18 @@ pub mod tests { let (mut testing_config, temp_storage_dir, event_stream_server_port) = build_test_config_without_connections(); testing_config.add_connection(None, None, None); - let node_port_for_sse_connection = - testing_config.config.connections.get(0).unwrap().sse_port; - let node_port_for_rest_connection = - testing_config.config.connections.get(0).unwrap().rest_port; + let node_port_for_sse_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .sse_port; + let node_port_for_rest_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .rest_port; testing_config.set_retries_for_node( node_port_for_sse_connection, max_attempts, @@ -379,10 +402,18 @@ pub mod tests { let event_stream_server_port = testing_config.event_stream_server_port(); testing_config.set_storage(StorageConfig::postgres_with_port(context.port)); testing_config.add_connection(None, None, None); - let node_port_for_sse_connection = - testing_config.config.connections.get(0).unwrap().sse_port; - let node_port_for_rest_connection = - testing_config.config.connections.get(0).unwrap().rest_port; + let node_port_for_sse_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .sse_port; + let node_port_for_rest_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .rest_port; testing_config.set_retries_for_node( node_port_for_sse_connection, max_attempts, @@ -398,4 +429,24 @@ pub mod tests { context, ) } + + pub async fn unpack_test_config_and_run( + testing_config: TestingConfig, + spin_up_rest_api: bool, + ) -> Result { + let sse_config = testing_config.inner(); + let storage_config = testing_config.storage_config; + let sqlite_database = SqliteDatabase::new_from_config(&storage_config) + .await + .unwrap(); + let database = Database::SqliteDatabaseWrapper(sqlite_database); + if spin_up_rest_api { + let rest_api_server_config = testing_config.rest_api_server_config; + let database_for_rest_api = database.clone(); + tokio::spawn(async move { + run_rest_server(rest_api_server_config, database_for_rest_api).await + }); + } + run(sse_config, database, storage_config.get_storage_path()).await + } } diff --git a/json_rpc/CHANGELOG.md b/json_rpc/CHANGELOG.md new file mode 100644 index 00000000..97e70598 --- /dev/null +++ b/json_rpc/CHANGELOG.md @@ -0,0 +1,28 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + + + +## 1.1.0 + +### Added +* Support configuration of CORS Origin. + + + +## 1.0.0 + +### Added +* Add initial content. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 diff --git a/json_rpc/Cargo.toml b/json_rpc/Cargo.toml new file mode 100644 index 00000000..2c93191c --- /dev/null +++ b/json_rpc/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "casper-json-rpc" +version = "1.1.0" +authors = ["Fraser Hutchison "] +edition = "2018" +description = "A library suitable for use as the framework for a JSON-RPC server." +readme = "README.md" +documentation = "https://docs.rs/casper-json-rpc" +homepage = "https://casperlabs.io" +repository = "https://github.com/casper-network/casper-node/tree/master/json_rpc" +license = "Apache-2.0" + +[dependencies] +bytes = "1.1.0" +futures = { workspace = true } +http = "0.2.7" +itertools = "0.10.3" +serde = { workspace = true, default-features = true, features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +tracing = { workspace = true, default-features = true } +warp = "0.3.6" + +[dev-dependencies] +env_logger = "0.9.0" +hyper = "0.14.18" +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "test-util"] } diff --git a/json_rpc/README.md b/json_rpc/README.md new file mode 100644 index 00000000..9b16ca2d --- /dev/null +++ b/json_rpc/README.md @@ -0,0 +1,118 @@ +# `casper-json-rpc` + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Build Status](https://drone-auto-casper-network.casperlabs.io/api/badges/casper-network/casper-node/status.svg?branch=dev)](http://drone-auto-casper-network.casperlabs.io/casper-network/casper-node) +[![Crates.io](https://img.shields.io/crates/v/casper-json-rpc)](https://crates.io/crates/casper-json-rpc) +[![Documentation](https://docs.rs/casper-node/badge.svg)](https://docs.rs/casper-json-rpc) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) + +A library suitable for use as the framework for a JSON-RPC server. + +# Usage + +Normally usage will involve two steps: + * construct a set of request handlers using a + [`RequestHandlersBuilder`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/struct.RequestHandlersBuilder.html) + * call [`casper_json_rpc::route`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/fn.route.html) to construct a + boxed warp filter ready to be passed to [`warp::service`](https://docs.rs/warp/latest/warp/fn.service.html) for + example + +# Example + +```rust +use casper_json_rpc::{Error, Params, RequestHandlersBuilder}; +use std::{convert::Infallible, sync::Arc}; + +async fn get(params: Option) -> Result { + // * parse params or return `ReservedErrorCode::InvalidParams` error + // * handle request and return result + Ok("got it".to_string()) +} + +async fn put(params: Option, other_input: &str) -> Result { + Ok(other_input.to_string()) +} + +#[tokio::main] +async fn main() { + // Register handlers for methods "get" and "put". + let mut handlers = RequestHandlersBuilder::new(); + handlers.register_handler("get", Arc::new(get)); + let put_handler = move |params| async move { put(params, "other input").await }; + handlers.register_handler("put", Arc::new(put_handler)); + let handlers = handlers.build(); + + // Get the new route. + let path = "rpc"; + let max_body_bytes = 1024; + let route = casper_json_rpc::route(path, max_body_bytes, handlers); + + // Convert it into a `Service` and run it. + let make_svc = hyper::service::make_service_fn(move |_| { + let svc = warp::service(route.clone()); + async move { Ok::<_, Infallible>(svc.clone()) } + }); + + hyper::Server::bind(&([127, 0, 0, 1], 3030).into()) + .serve(make_svc) + .await + .unwrap(); +} +``` + +If this receives a request such as + +``` +curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":"id","method":"get"}' http://127.0.0.1:3030/rpc +``` + +then the server will respond with + +```json +{"jsonrpc":"2.0","id":"id","result":"got it"} +``` + +# Errors + +To return a JSON-RPC response indicating an error, use +[`Error::new`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/struct.Error.html#method.new). Most error +conditions which require returning a reserved error are already handled in the provided warp filters. The only +exception is +[`ReservedErrorCode::InvalidParams`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/enum.ReservedErrorCode.html#variant.InvalidParams) +which should be returned by any RPC handler which deems the provided `params: Option` to be invalid for any +reason. + +Generally a set of custom error codes should be provided. These should all implement +[`ErrorCodeT`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/trait.ErrorCodeT.html). + +## Example custom error code + +```rust +use serde::{Deserialize, Serialize}; +use casper_json_rpc::ErrorCodeT; + +#[derive(Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[repr(i64)] +pub enum ErrorCode { + /// The requested item was not found. + NoSuchItem = -1, + /// Failed to put the requested item to storage. + FailedToPutItem = -2, +} + +impl From for (i64, &'static str) { + fn from(error_code: ErrorCode) -> Self { + match error_code { + ErrorCode::NoSuchItem => (error_code as i64, "No such item"), + ErrorCode::FailedToPutItem => (error_code as i64, "Failed to put item"), + } + } +} + +impl ErrorCodeT for ErrorCode {} +``` + +# License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/json_rpc/src/error.rs b/json_rpc/src/error.rs new file mode 100644 index 00000000..3ad2bae6 --- /dev/null +++ b/json_rpc/src/error.rs @@ -0,0 +1,282 @@ +use std::{borrow::Cow, fmt::Debug, hash::Hash}; + +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use tracing::{error, warn}; + +/// A marker trait for a type suitable for use as an error code when constructing an [`Error`]. +/// +/// The implementing type must also implement `Into<(i64, &'static str)>` where the tuple represents +/// the "code" and "message" fields of the `Error`. +/// +/// As per the JSON-RPC specification, the code must not fall in the reserved range, i.e. it must +/// not be between -32768 and -32000 inclusive. +/// +/// Generally the "message" will be a brief const &str, where additional request-specific info can +/// be provided via the `additional_info` parameter of [`Error::new`]. +/// +/// # Example +/// +/// ``` +/// use serde::{Deserialize, Serialize}; +/// use casper_json_rpc::ErrorCodeT; +/// +/// #[derive(Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +/// #[repr(i64)] +/// pub enum ErrorCode { +/// /// The requested item was not found. +/// NoSuchItem = -1, +/// /// Failed to put the requested item to storage. +/// FailedToPutItem = -2, +/// } +/// +/// impl From for (i64, &'static str) { +/// fn from(error_code: ErrorCode) -> Self { +/// match error_code { +/// ErrorCode::NoSuchItem => (error_code as i64, "No such item"), +/// ErrorCode::FailedToPutItem => (error_code as i64, "Failed to put item"), +/// } +/// } +/// } +/// +/// impl ErrorCodeT for ErrorCode {} +/// ``` +pub trait ErrorCodeT: + Into<(i64, &'static str)> + for<'de> Deserialize<'de> + Copy + Eq + Debug +{ + /// Whether this type represents reserved error codes or not. + /// + /// This should normally be left with the default return value of `false`. + #[doc(hidden)] + fn is_reserved() -> bool { + false + } +} + +/// The various reserved codes which can be returned in the JSON-RPC response's "error" object. +/// +/// See [the JSON-RPC Specification](https://www.jsonrpc.org/specification#error_object) for further +/// details. +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +#[repr(i64)] +pub enum ReservedErrorCode { + /// Invalid JSON was received by the server. + ParseError = -32700, + /// The JSON sent is not a valid Request object. + InvalidRequest = -32600, + /// The method does not exist or is not available. + MethodNotFound = -32601, + /// Invalid method parameter(s). + InvalidParams = -32602, + /// Internal JSON-RPC error. + InternalError = -32603, +} + +impl From for (i64, &'static str) { + fn from(error_code: ReservedErrorCode) -> Self { + match error_code { + ReservedErrorCode::ParseError => (error_code as i64, "Parse error"), + ReservedErrorCode::InvalidRequest => (error_code as i64, "Invalid Request"), + ReservedErrorCode::MethodNotFound => (error_code as i64, "Method not found"), + ReservedErrorCode::InvalidParams => (error_code as i64, "Invalid params"), + ReservedErrorCode::InternalError => (error_code as i64, "Internal error"), + } + } +} + +impl ErrorCodeT for ReservedErrorCode { + fn is_reserved() -> bool { + true + } +} + +/// An object suitable to be returned in a JSON-RPC response as the "error" field. +/// +/// See [the JSON-RPC Specification](https://www.jsonrpc.org/specification#error_object) for further +/// details. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[serde(deny_unknown_fields)] +pub struct Error { + /// A number that indicates the error type that occurred. + code: i64, + /// A short description of the error. + message: Cow<'static, str>, + /// Additional information about the error. + #[serde(skip_serializing_if = "Option::is_none")] + data: Option, +} + +impl Error { + /// Returns a new `Error`, converting `error_code` to the "code" and "message" fields, and + /// JSON-encoding `additional_info` as the "data" field. + /// + /// Other than when providing a [`ReservedErrorCode`], the converted "code" must not fall in the + /// reserved range as defined in the JSON-RPC specification, i.e. it must not be between -32768 + /// and -32100 inclusive. + /// + /// Note that in an upcoming release, the restriction will be tightened to disallow error codes + /// in the implementation-defined server-errors range. I.e. codes in the range -32768 to -32000 + /// inclusive will be disallowed. + /// + /// If the converted code is within the reserved range when it should not be, or if + /// JSON-encoding `additional_data` fails, the returned `Self` is built from + /// [`ReservedErrorCode::InternalError`] with the "data" field being a String providing more + /// info on the underlying error. + pub fn new(error_code: C, additional_info: T) -> Self { + let (code, message): (i64, &'static str) = error_code.into(); + + if !C::is_reserved() && (-32768..=-32100).contains(&code) { + warn!(%code, "provided json-rpc error code is reserved; returning internal error"); + let (code, message) = ReservedErrorCode::InternalError.into(); + return Error { + code, + message: Cow::Borrowed(message), + data: Some(Value::String(format!( + "attempted to return reserved error code {}", + code + ))), + }; + } + + let data = match serde_json::to_value(additional_info) { + Ok(Value::Null) => None, + Ok(value) => Some(value), + Err(error) => { + error!(%error, "failed to json-encode additional info in json-rpc error"); + let (code, message) = ReservedErrorCode::InternalError.into(); + return Error { + code, + message: Cow::Borrowed(message), + data: Some(Value::String(format!( + "failed to json-encode additional info in json-rpc error: {}", + error + ))), + }; + } + }; + + Error { + code, + message: Cow::Borrowed(message), + data, + } + } + + /// Returns the code of the error. + pub fn code(&self) -> i64 { + self.code + } +} + +#[cfg(test)] +mod tests { + use serde::ser::{Error as _, Serializer}; + + use super::*; + + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] + struct TestErrorCode { + // If `true` the error code will be one in the reserved range. + in_reserved_range: bool, + } + + impl From for (i64, &'static str) { + fn from(error_code: TestErrorCode) -> Self { + if error_code.in_reserved_range { + (-32768, "Invalid test error") + } else { + (-123, "Valid test error") + } + } + } + + impl ErrorCodeT for TestErrorCode {} + + #[derive(Serialize)] + struct AdditionalInfo { + id: u64, + context: &'static str, + } + + impl Default for AdditionalInfo { + fn default() -> Self { + AdditionalInfo { + id: 1314, + context: "TEST", + } + } + } + + struct FailToEncode; + + impl Serialize for FailToEncode { + fn serialize(&self, _serializer: S) -> Result { + Err(S::Error::custom("won't encode")) + } + } + + #[test] + fn should_construct_reserved_error() { + const EXPECTED_WITH_DATA: &str = + r#"{"code":-32700,"message":"Parse error","data":{"id":1314,"context":"TEST"}}"#; + const EXPECTED_WITHOUT_DATA: &str = r#"{"code":-32601,"message":"Method not found"}"#; + const EXPECTED_WITH_BAD_DATA: &str = r#"{"code":-32603,"message":"Internal error","data":"failed to json-encode additional info in json-rpc error: won't encode"}"#; + + let error_with_data = Error::new(ReservedErrorCode::ParseError, AdditionalInfo::default()); + let encoded = serde_json::to_string(&error_with_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_DATA); + + let error_without_data = Error::new(ReservedErrorCode::MethodNotFound, None::); + let encoded = serde_json::to_string(&error_without_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITHOUT_DATA); + + let error_with_bad_data = Error::new(ReservedErrorCode::InvalidParams, FailToEncode); + let encoded = serde_json::to_string(&error_with_bad_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_BAD_DATA); + } + + #[test] + fn should_construct_custom_error() { + const EXPECTED_WITH_DATA: &str = + r#"{"code":-123,"message":"Valid test error","data":{"id":1314,"context":"TEST"}}"#; + const EXPECTED_WITHOUT_DATA: &str = r#"{"code":-123,"message":"Valid test error"}"#; + const EXPECTED_WITH_BAD_DATA: &str = r#"{"code":-32603,"message":"Internal error","data":"failed to json-encode additional info in json-rpc error: won't encode"}"#; + + let good_error_code = TestErrorCode { + in_reserved_range: false, + }; + + let error_with_data = Error::new(good_error_code, AdditionalInfo::default()); + let encoded = serde_json::to_string(&error_with_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_DATA); + + let error_without_data = Error::new(good_error_code, ()); + let encoded = serde_json::to_string(&error_without_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITHOUT_DATA); + + let error_with_bad_data = Error::new(good_error_code, FailToEncode); + let encoded = serde_json::to_string(&error_with_bad_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_BAD_DATA); + } + + #[test] + fn should_fall_back_to_internal_error_on_bad_custom_error() { + const EXPECTED: &str = r#"{"code":-32603,"message":"Internal error","data":"attempted to return reserved error code -32603"}"#; + + let bad_error_code = TestErrorCode { + in_reserved_range: true, + }; + + let error_with_data = Error::new(bad_error_code, AdditionalInfo::default()); + let encoded = serde_json::to_string(&error_with_data).unwrap(); + assert_eq!(encoded, EXPECTED); + + let error_without_data = Error::new(bad_error_code, None::); + let encoded = serde_json::to_string(&error_without_data).unwrap(); + assert_eq!(encoded, EXPECTED); + + let error_with_bad_data = Error::new(bad_error_code, FailToEncode); + let encoded = serde_json::to_string(&error_with_bad_data).unwrap(); + assert_eq!(encoded, EXPECTED); + } +} diff --git a/json_rpc/src/filters.rs b/json_rpc/src/filters.rs new file mode 100644 index 00000000..940144fe --- /dev/null +++ b/json_rpc/src/filters.rs @@ -0,0 +1,205 @@ +//! Warp filters which can be combined to provide JSON-RPC endpoints. +//! +//! Generally these lower-level filters will not need to be explicitly called. Instead, +//! [`casper_json_rpc::route()`](crate::route) should be sufficient. + +#[cfg(test)] +mod tests; + +use bytes::Bytes; +use http::{header::CONTENT_TYPE, HeaderMap, StatusCode}; +use serde_json::{json, Map, Value}; +use tracing::{debug, trace, warn}; +use warp::{ + body, + filters::BoxedFilter, + reject::{self, Rejection}, + reply::{self, WithStatus}, + Filter, +}; + +use crate::{ + error::{Error, ReservedErrorCode}, + rejections::{BodyTooLarge, MissingContentTypeHeader, MissingId, UnsupportedMediaType}, + request::{ErrorOrRejection, Request}, + request_handlers::RequestHandlers, + response::Response, +}; + +const CONTENT_TYPE_VALUE: &str = "application/json"; + +/// Returns a boxed warp filter which handles the initial setup. +/// +/// This includes: +/// * setting the full path +/// * setting the method to POST +/// * ensuring the "content-type" header exists and is set to "application/json" +/// * ensuring the body has at most `max_body_bytes` bytes +pub fn base_filter>(path: P, max_body_bytes: u32) -> BoxedFilter<()> { + let path = path.as_ref().to_string(); + warp::path::path(path) + .and(warp::path::end()) + .and(warp::filters::method::post()) + .and( + warp::filters::header::headers_cloned().and_then(|headers: HeaderMap| async move { + for (name, value) in headers.iter() { + if name.as_str() == CONTENT_TYPE.as_str() { + if value + .as_bytes() + .eq_ignore_ascii_case(CONTENT_TYPE_VALUE.as_bytes()) + { + return Ok(()); + } else { + trace!(content_type = ?value.to_str(), "invalid {}", CONTENT_TYPE); + return Err(reject::custom(UnsupportedMediaType)); + } + } + } + trace!("missing {}", CONTENT_TYPE); + Err(reject::custom(MissingContentTypeHeader)) + }), + ) + .untuple_one() + .and(body::content_length_limit(max_body_bytes as u64).or_else( + move |_rejection| async move { Err(reject::custom(BodyTooLarge(max_body_bytes))) }, + )) + .boxed() +} + +/// Handles parsing a JSON-RPC request from the given HTTP body, executing it using the appropriate +/// handler, and providing a JSON-RPC response (which could be a success or failure). +/// +/// Returns an `Err(Rejection)` only if the request is a Notification as per the JSON-RPC +/// specification, i.e. the request doesn't contain an "id" field. In this case, no JSON-RPC +/// response is sent to the client. +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +async fn handle_body( + body: Bytes, + handlers: RequestHandlers, + allow_unknown_fields: bool, +) -> Result { + let response = match serde_json::from_slice::>(&body) { + Ok(unvalidated_request) => match Request::new(unvalidated_request, allow_unknown_fields) { + Ok(request) => handlers.handle_request(request).await, + Err(ErrorOrRejection::Error { id, error }) => { + debug!(?error, "got an invalid request"); + Response::new_failure(id, error) + } + Err(ErrorOrRejection::Rejection(rejection)) => { + debug!(?rejection, "rejecting an invalid request"); + return Err(rejection); + } + }, + Err(error) => { + debug!(%error, "got bad json"); + let error = Error::new(ReservedErrorCode::ParseError, error.to_string()); + Response::new_failure(Value::Null, error) + } + }; + Ok(response) +} + +/// Returns a boxed warp filter which handles parsing a JSON-RPC request from the given HTTP body, +/// executing it using the appropriate handler, and providing a reply. +/// +/// The reply will normally be built from a JSON-RPC response (which could be a success or failure). +/// +/// However, the reply could be built from a [`Rejection`] if the request is a Notification as per +/// the JSON-RPC specification, i.e. the request doesn't contain an "id" field. In this case, no +/// JSON-RPC response is sent to the client, only an HTTP response. +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +pub fn main_filter( + handlers: RequestHandlers, + allow_unknown_fields: bool, +) -> BoxedFilter<(WithStatus,)> { + body::bytes() + .and_then(move |body| { + let handlers = handlers.clone(); + async move { handle_body(body, handlers, allow_unknown_fields).await } + }) + .map(|response| reply::with_status(reply::json(&response), StatusCode::OK)) + .boxed() +} + +/// Handler for rejections where no JSON-RPC response is sent, but an HTTP response is required. +/// +/// The HTTP response body will be a JSON object of the form: +/// ```json +/// { "message": } +/// ``` +pub async fn handle_rejection(error: Rejection) -> Result, Rejection> { + let code; + let message; + + if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::UNSUPPORTED_MEDIA_TYPE; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::PAYLOAD_TOO_LARGE; + } else if error.is_not_found() { + trace!("{:?}", error); + message = "Path not found".to_string(); + code = StatusCode::NOT_FOUND; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::METHOD_NOT_ALLOWED; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::LENGTH_REQUIRED; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::PAYLOAD_TOO_LARGE; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::UNSUPPORTED_MEDIA_TYPE; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::FORBIDDEN; + } else { + // We should handle all rejection types before this. + warn!(?error, "unhandled warp rejection in json-rpc server"); + message = format!("Internal server error: unhandled rejection: {:?}", error); + code = StatusCode::INTERNAL_SERVER_ERROR; + } + + Ok(reply::with_status( + reply::json(&json!({ "message": message })), + code, + )) +} diff --git a/json_rpc/src/filters/tests.rs b/json_rpc/src/filters/tests.rs new file mode 100644 index 00000000..b771a8d4 --- /dev/null +++ b/json_rpc/src/filters/tests.rs @@ -0,0 +1,18 @@ +mod base_filter_with_recovery_tests; +mod main_filter_with_recovery_tests; + +use serde::Deserialize; + +/// The HTTP response body returned in the event of a warp rejection. +#[derive(Deserialize)] +#[serde(deny_unknown_fields)] +struct ResponseBodyOnRejection { + message: String, +} + +impl ResponseBodyOnRejection { + async fn from_response(response: http::Response) -> Self { + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + serde_json::from_slice(&body_bytes).unwrap() + } +} diff --git a/json_rpc/src/filters/tests/base_filter_with_recovery_tests.rs b/json_rpc/src/filters/tests/base_filter_with_recovery_tests.rs new file mode 100644 index 00000000..361893eb --- /dev/null +++ b/json_rpc/src/filters/tests/base_filter_with_recovery_tests.rs @@ -0,0 +1,220 @@ +use http::StatusCode; +use warp::{filters::BoxedFilter, reply, test::RequestBuilder, Filter, Reply}; + +use super::ResponseBodyOnRejection; +use crate::filters::{base_filter, handle_rejection, CONTENT_TYPE_VALUE}; + +const PATH: &str = "rpc"; +const MAX_BODY_BYTES: u32 = 10; + +fn base_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + base_filter(PATH, MAX_BODY_BYTES) + .map(reply) // return an empty body on success + .with(warp::cors().allow_origin("http://a.com")) + .recover(handle_rejection) // or convert a rejection to JSON-encoded `ResponseBody` + .boxed() +} + +fn valid_base_filter_request_builder() -> RequestBuilder { + warp::test::request() + .path(&format!("/{}", PATH)) + .header("content-type", CONTENT_TYPE_VALUE) + .method("POST") + .body([0_u8; MAX_BODY_BYTES as usize]) +} + +#[tokio::test] +async fn should_accept_valid_request() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::OK); + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + assert!(body_bytes.is_empty()); +} + +#[tokio::test] +async fn should_reject_invalid_path() { + async fn test_with_invalid_path(path: &str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .path(path) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::NOT_FOUND); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!(response_body.message, "Path not found"); + } + + let _ = env_logger::try_init(); + + // A root path. + test_with_invalid_path("/").await; + + // A path which doesn't match the server's. + test_with_invalid_path("/not_the_right_path").await; + + // A path which extends the server's + test_with_invalid_path(&format!("/{0}/{0}", PATH)).await; +} + +#[tokio::test] +async fn should_reject_unsupported_http_method() { + async fn test_with_unsupported_method(method: &'static str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .method(method) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!(response_body.message, "HTTP method not allowed"); + } + + let _ = env_logger::try_init(); + + test_with_unsupported_method("GET").await; + test_with_unsupported_method("PUT").await; + test_with_unsupported_method("DELETE").await; + test_with_unsupported_method("HEAD").await; + test_with_unsupported_method("OPTIONS").await; + test_with_unsupported_method("CONNECT").await; + test_with_unsupported_method("PATCH").await; + test_with_unsupported_method("TRACE").await; + test_with_unsupported_method("a").await; +} + +#[tokio::test] +async fn should_reject_missing_content_type_header() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = warp::test::request() + .path(&format!("/{}", PATH)) + .method("POST") + .body("") + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "The request's content-type is not set" + ); +} + +#[tokio::test] +async fn should_reject_invalid_content_type() { + async fn test_invalid_content_type(value: &'static str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .header("content-type", value) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::UNSUPPORTED_MEDIA_TYPE); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "The request's content-type is not supported" + ); + } + + let _ = env_logger::try_init(); + + test_invalid_content_type("text/html").await; + test_invalid_content_type("multipart/form-data").await; + test_invalid_content_type("a").await; + test_invalid_content_type("").await; +} + +#[tokio::test] +async fn should_reject_large_body() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .body([0_u8; MAX_BODY_BYTES as usize + 1]) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::PAYLOAD_TOO_LARGE); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "The request payload exceeds the maximum allowed of 10 bytes" + ); +} + +#[tokio::test] +async fn should_reject_cors() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .header("Origin", "http://b.com") + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::FORBIDDEN); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "CORS request forbidden: origin not allowed" + ); +} + +#[tokio::test] +async fn should_handle_any_case_content_type() { + async fn test_content_type(key: &'static str, value: &'static str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .header(key, value) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::OK); + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + assert!(body_bytes.is_empty()); + } + + let _ = env_logger::try_init(); + + test_content_type("Content-Type", "application/json").await; + test_content_type("Content-Type", "Application/JSON").await; + test_content_type("content-type", "application/json").await; + test_content_type("content-type", "Application/JSON").await; + test_content_type("CONTENT-TYPE", "APPLICATION/JSON").await; + test_content_type("CoNtEnT-tYpE", "ApPliCaTiOn/JsOn").await; +} diff --git a/json_rpc/src/filters/tests/main_filter_with_recovery_tests.rs b/json_rpc/src/filters/tests/main_filter_with_recovery_tests.rs new file mode 100644 index 00000000..1e158921 --- /dev/null +++ b/json_rpc/src/filters/tests/main_filter_with_recovery_tests.rs @@ -0,0 +1,320 @@ +use std::sync::Arc; + +use http::StatusCode; +use serde::{ + ser::{Error as _, Serializer}, + Deserialize, Serialize, +}; +use serde_json::Value; +use warp::{filters::BoxedFilter, Filter, Reply}; + +use super::ResponseBodyOnRejection; +use crate::{ + filters::{handle_rejection, main_filter}, + Error, Params, RequestHandlersBuilder, ReservedErrorCode, Response, +}; + +const GET_GOOD_THING: &str = "get good thing"; +const GET_BAD_THING: &str = "get bad thing"; + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] +struct GoodThing { + good_thing: String, +} + +/// A type which always errors when being serialized. +struct BadThing; + +impl Serialize for BadThing { + fn serialize(&self, _serializer: S) -> Result { + Err(S::Error::custom("won't encode")) + } +} + +async fn get_good_thing(params: Option) -> Result { + match params { + Some(Params::Array(array)) => Ok(GoodThing { + good_thing: array[0].as_str().unwrap().to_string(), + }), + _ => Err(Error::new(ReservedErrorCode::InvalidParams, "no params")), + } +} + +async fn get_bad_thing(_params: Option) -> Result { + Ok(BadThing) +} + +async fn from_http_response(response: http::Response) -> Response { + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + serde_json::from_slice(&body_bytes).unwrap() +} + +fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + handlers.register_handler(GET_GOOD_THING, Arc::new(get_good_thing)); + handlers.register_handler(GET_BAD_THING, Arc::new(get_bad_thing)); + let handlers = handlers.build(); + + main_filter(handlers, false) + .recover(handle_rejection) + .boxed() +} + +#[tokio::test] +async fn should_handle_valid_request() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `fn get_good_thing` and return `Ok` as "params" is Some, causing a + // Response::Success to be returned to the client. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":"a","method":"get good thing","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), "a"); + assert_eq!( + rpc_response.result(), + Some(GoodThing { + good_thing: "one".to_string() + }) + ); +} + +#[tokio::test] +async fn should_handle_valid_request_where_rpc_returns_error() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `fn get_good_thing` and return `Err` as "params" is None, causing + // a Response::Failure (invalid params) to be returned to the client. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":"a","method":"get good thing"}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), "a"); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new(ReservedErrorCode::InvalidParams, "no params") + ); +} + +#[tokio::test] +async fn should_handle_valid_request_where_result_encoding_fails() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `fn get_bad_thing` which returns a type which fails to encode, + // causing a Response::Failure (internal error) to be returned to the client. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":"a","method":"get bad thing"}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), "a"); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InternalError, + "failed to encode json-rpc response value: won't encode" + ) + ); +} + +#[tokio::test] +async fn should_handle_request_for_method_not_registered() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as the ID has fractional parts. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1,"method":"not registered","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), 1); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::MethodNotFound, + "'not registered' is not a supported json-rpc method on this server" + ) + ); +} + +#[tokio::test] +async fn should_handle_request_with_invalid_id() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as the ID has fractional parts. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1.1,"method":"get good thing","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), &Value::Null); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InvalidRequest, + "'id' must not contain fractional parts if it is a number" + ) + ); +} + +#[tokio::test] +async fn should_handle_request_with_no_id() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return no JSON-RPC response, only an + // HTTP response (bad request) to the client as no ID was provided. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","method":"get good thing","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::BAD_REQUEST); + let response_body = ResponseBodyOnRejection::from_response(http_response).await; + assert_eq!( + response_body.message, + "The request is missing the 'id' field" + ); +} + +#[tokio::test] +async fn should_handle_request_with_extra_field() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as the request has an extra field. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1,"method":"get good thing","params":[2],"extra":"field"}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), 1); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InvalidRequest, + "Unexpected field: 'extra'" + ) + ); +} + +#[tokio::test] +async fn should_handle_malformed_request_with_valid_id() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client, but with the ID included in the response as it was able to be parsed. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1,"method":{"not":"a string"}}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), 1); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'method' to be a String" + ) + ); +} + +#[tokio::test] +async fn should_handle_malformed_request_but_valid_json() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as it can't be parsed as a JSON-RPC request. + let http_response = warp::test::request() + .body(r#"{"a":1}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), &Value::Null); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new(ReservedErrorCode::InvalidRequest, "Missing 'jsonrpc' field") + ); +} + +#[tokio::test] +async fn should_handle_invalid_json() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (parse error) + // to the client as it cannot be parsed as JSON. + let http_response = warp::test::request() + .body(r#"a"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), &Value::Null); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::ParseError, + "expected value at line 1 column 1" + ) + ); +} diff --git a/json_rpc/src/lib.rs b/json_rpc/src/lib.rs new file mode 100644 index 00000000..f82a79cc --- /dev/null +++ b/json_rpc/src/lib.rs @@ -0,0 +1,177 @@ +//! # casper-json-rpc +//! +//! A library suitable for use as the framework for a JSON-RPC server. +//! +//! # Usage +//! +//! Normally usage will involve two steps: +//! * construct a set of request handlers using a [`RequestHandlersBuilder`] +//! * call [`casper_json_rpc::route`](route) to construct a boxed warp filter ready to be passed +//! to [`warp::service`](https://docs.rs/warp/latest/warp/fn.service.html) for example +//! +//! # Example +//! +//! ```no_run +//! use casper_json_rpc::{Error, Params, RequestHandlersBuilder}; +//! use std::{convert::Infallible, sync::Arc}; +//! +//! # #[allow(unused)] +//! async fn get(params: Option) -> Result { +//! // * parse params or return `ReservedErrorCode::InvalidParams` error +//! // * handle request and return result +//! Ok("got it".to_string()) +//! } +//! +//! # #[allow(unused)] +//! async fn put(params: Option, other_input: &str) -> Result { +//! Ok(other_input.to_string()) +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! // Register handlers for methods "get" and "put". +//! let mut handlers = RequestHandlersBuilder::new(); +//! handlers.register_handler("get", Arc::new(get)); +//! let put_handler = move |params| async move { put(params, "other input").await }; +//! handlers.register_handler("put", Arc::new(put_handler)); +//! let handlers = handlers.build(); +//! +//! // Get the new route. +//! let path = "rpc"; +//! let max_body_bytes = 1024; +//! let allow_unknown_fields = false; +//! let route = casper_json_rpc::route(path, max_body_bytes, handlers, allow_unknown_fields); +//! +//! // Convert it into a `Service` and run it. +//! let make_svc = hyper::service::make_service_fn(move |_| { +//! let svc = warp::service(route.clone()); +//! async move { Ok::<_, Infallible>(svc.clone()) } +//! }); +//! +//! hyper::Server::bind(&([127, 0, 0, 1], 3030).into()) +//! .serve(make_svc) +//! .await +//! .unwrap(); +//! } +//! ``` +//! +//! # Errors +//! +//! To return a JSON-RPC response indicating an error, use [`Error::new`]. Most error conditions +//! which require returning a reserved error are already handled in the provided warp filters. The +//! only exception is [`ReservedErrorCode::InvalidParams`] which should be returned by any RPC +//! handler which deems the provided `params: Option` to be invalid for any reason. +//! +//! Generally a set of custom error codes should be provided. These should all implement +//! [`ErrorCodeT`]. + +#![doc(html_root_url = "https://docs.rs/casper-json-rpc/1.1.0")] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png", + test(attr(deny(warnings))) +)] +#![warn( + missing_docs, + trivial_casts, + trivial_numeric_casts, + unused_qualifications +)] + +mod error; +pub mod filters; +mod rejections; +mod request; +mod request_handlers; +mod response; + +use http::{header::CONTENT_TYPE, Method}; +use warp::{filters::BoxedFilter, Filter, Reply}; + +pub use error::{Error, ErrorCodeT, ReservedErrorCode}; +pub use request::Params; +pub use request_handlers::{RequestHandlers, RequestHandlersBuilder}; +pub use response::Response; + +const JSON_RPC_VERSION: &str = "2.0"; + +/// Specifies the CORS origin +pub enum CorsOrigin { + /// Any (*) origin is allowed. + Any, + /// Only the specified origin is allowed. + Specified(String), +} + +/// Constructs a set of warp filters suitable for use in a JSON-RPC server. +/// +/// `path` specifies the exact HTTP path for JSON-RPC requests, e.g. "rpc" will match requests on +/// exactly "/rpc", and not "/rpc/other". +/// +/// `max_body_bytes` sets an upper limit for the number of bytes in the HTTP request body. For +/// further details, see +/// [`warp::filters::body::content_length_limit`](https://docs.rs/warp/latest/warp/filters/body/fn.content_length_limit.html). +/// +/// `handlers` is the map of functions to which incoming requests will be dispatched. These are +/// keyed by the JSON-RPC request's "method". +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +/// +/// For further details, see the docs for the [`filters`] functions. +pub fn route>( + path: P, + max_body_bytes: u32, + handlers: RequestHandlers, + allow_unknown_fields: bool, +) -> BoxedFilter<(impl Reply,)> { + filters::base_filter(path, max_body_bytes) + .and(filters::main_filter(handlers, allow_unknown_fields)) + .recover(filters::handle_rejection) + .boxed() +} + +/// Constructs a set of warp filters suitable for use in a JSON-RPC server. +/// +/// `path` specifies the exact HTTP path for JSON-RPC requests, e.g. "rpc" will match requests on +/// exactly "/rpc", and not "/rpc/other". +/// +/// `max_body_bytes` sets an upper limit for the number of bytes in the HTTP request body. For +/// further details, see +/// [`warp::filters::body::content_length_limit`](https://docs.rs/warp/latest/warp/filters/body/fn.content_length_limit.html). +/// +/// `handlers` is the map of functions to which incoming requests will be dispatched. These are +/// keyed by the JSON-RPC request's "method". +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +/// +/// Note that this is a convenience function combining the lower-level functions in [`filters`] +/// along with [a warp CORS filter](https://docs.rs/warp/latest/warp/filters/cors/index.html) which +/// * allows any origin or specified origin +/// * allows "content-type" as a header +/// * allows the method "POST" +/// +/// For further details, see the docs for the [`filters`] functions. +pub fn route_with_cors>( + path: P, + max_body_bytes: u32, + handlers: RequestHandlers, + allow_unknown_fields: bool, + cors_header: &CorsOrigin, +) -> BoxedFilter<(impl Reply,)> { + filters::base_filter(path, max_body_bytes) + .and(filters::main_filter(handlers, allow_unknown_fields)) + .recover(filters::handle_rejection) + .with(match cors_header { + CorsOrigin::Any => warp::cors() + .allow_any_origin() + .allow_header(CONTENT_TYPE) + .allow_method(Method::POST), + CorsOrigin::Specified(origin) => warp::cors() + .allow_origin(origin.as_str()) + .allow_header(CONTENT_TYPE) + .allow_method(Method::POST), + }) + .boxed() +} diff --git a/json_rpc/src/rejections.rs b/json_rpc/src/rejections.rs new file mode 100644 index 00000000..8219abbf --- /dev/null +++ b/json_rpc/src/rejections.rs @@ -0,0 +1,72 @@ +//! These types are used to allow a given warp filter to reject a request. The rejections are +//! handled in a subsequent function, where they are converted into meaningful responses. +//! +//! Rather than being returned to the client as a JSON-RPC response with the `error` field set, +//! they instead indicate a response at the HTTP level only. + +use std::fmt::{self, Display, Formatter}; + +use warp::reject::Reject; + +/// Indicates the "Content-Type" header of the request is not "application/json". +/// +/// This rejection is converted into an HTTP 415 (unsupported media type) error. +#[derive(Debug)] +pub(crate) struct UnsupportedMediaType; + +impl Display for UnsupportedMediaType { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + formatter.write_str("The request's content-type is not supported") + } +} + +impl Reject for UnsupportedMediaType {} + +/// Indicates the "Content-Type" header is missing from the request. +/// +/// This rejection is converted into an HTTP 400 (bad request) error. +#[derive(Debug)] +pub(crate) struct MissingContentTypeHeader; + +impl Display for MissingContentTypeHeader { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + formatter.write_str("The request's content-type is not set") + } +} + +impl Reject for MissingContentTypeHeader {} + +/// Indicates the JSON-RPC request is missing the `id` field. +/// +/// As per the JSON-RPC specification, this is classed as a Notification and the server should not +/// send a response. While no JSON-RPC response is generated for this error, we return an HTTP 400 +/// (bad request) error, as the node API does not support client Notifications. +#[derive(Debug)] +pub(crate) struct MissingId; + +impl Display for MissingId { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + formatter.write_str("The request is missing the 'id' field") + } +} + +impl Reject for MissingId {} + +/// Indicates the HTTP request body is greater than the maximum allowed. +/// +/// Wraps the configured maximum allowed on the server, set via the `max_body_bytes` parameter in +/// `base_filter()`. +#[derive(Debug)] +pub(crate) struct BodyTooLarge(pub(crate) u32); + +impl Display for BodyTooLarge { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!( + formatter, + "The request payload exceeds the maximum allowed of {} bytes", + self.0 + ) + } +} + +impl Reject for BodyTooLarge {} diff --git a/json_rpc/src/request.rs b/json_rpc/src/request.rs new file mode 100644 index 00000000..b0241603 --- /dev/null +++ b/json_rpc/src/request.rs @@ -0,0 +1,461 @@ +mod params; + +use itertools::Itertools; +use serde_json::{Map, Value}; +use warp::reject::{self, Rejection}; + +use crate::{ + error::{Error, ReservedErrorCode}, + rejections::MissingId, + JSON_RPC_VERSION, +}; +pub use params::Params; + +const JSONRPC_FIELD_NAME: &str = "jsonrpc"; +const METHOD_FIELD_NAME: &str = "method"; +const PARAMS_FIELD_NAME: &str = "params"; +const ID_FIELD_NAME: &str = "id"; + +/// Errors are returned to the client as a JSON-RPC response and HTTP 200 (OK), whereas rejections +/// cause no JSON-RPC response to be sent, but an appropriate HTTP 4xx error will be returned. +#[derive(Debug)] +pub(crate) enum ErrorOrRejection { + Error { id: Value, error: Error }, + Rejection(Rejection), +} + +/// A request which has been validated as conforming to the JSON-RPC specification. +pub(crate) struct Request { + pub id: Value, + pub method: String, + pub params: Option, +} + +/// Returns `Ok` if `id` is a String, Null or a Number with no fractional part. +fn is_valid(id: &Value) -> Result<(), Error> { + match id { + Value::String(_) | Value::Null => (), + Value::Number(number) => { + if number.is_f64() { + return Err(Error::new( + ReservedErrorCode::InvalidRequest, + "'id' must not contain fractional parts if it is a number", + )); + } + } + _ => { + return Err(Error::new( + ReservedErrorCode::InvalidRequest, + "'id' should be a string or integer", + )); + } + } + Ok(()) +} + +impl Request { + /// Returns `Ok` if the request is valid as per + /// [the JSON-RPC specification](https://www.jsonrpc.org/specification#request_object). + /// + /// Returns an `Error` in any of the following cases: + /// * "jsonrpc" field is not "2.0" + /// * "method" field is not a String + /// * "params" field is present, but is not an Array or Object + /// * "id" field is not a String, valid Number or Null + /// * "id" field is a Number with fractional part + /// * `allow_unknown_fields` is `false` and extra fields exist + /// + /// Returns a `Rejection` if the "id" field is `None`. + pub(super) fn new( + mut request: Map, + allow_unknown_fields: bool, + ) -> Result { + // Just copy "id" field for now to return verbatim in any errors before we get to actually + // validating the "id" field itself. + let id = request.get(ID_FIELD_NAME).cloned().unwrap_or_default(); + + match request.remove(JSONRPC_FIELD_NAME) { + Some(Value::String(jsonrpc)) => { + if jsonrpc != JSON_RPC_VERSION { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Expected 'jsonrpc' to be '2.0', but got '{}'", jsonrpc), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + } + Some(Value::Number(jsonrpc)) => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!( + "Expected 'jsonrpc' to be a String with value '2.0', but got a Number '{}'", + jsonrpc + ), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + Some(jsonrpc) => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!( + "Expected 'jsonrpc' to be a String with value '2.0', but got '{}'", + jsonrpc + ), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + None => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Missing '{}' field", JSONRPC_FIELD_NAME), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + } + + let method = match request.remove(METHOD_FIELD_NAME) { + Some(Value::String(method)) => method, + Some(_) => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Expected '{}' to be a String", METHOD_FIELD_NAME), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + None => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Missing '{}' field", METHOD_FIELD_NAME), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + }; + + let params = match request.remove(PARAMS_FIELD_NAME) { + Some(unvalidated_params) => Some(Params::try_from(&id, unvalidated_params)?), + None => None, + }; + + let id = match request.remove(ID_FIELD_NAME) { + Some(id) => { + is_valid(&id).map_err(|error| ErrorOrRejection::Error { + id: Value::Null, + error, + })?; + id + } + None => return Err(ErrorOrRejection::Rejection(reject::custom(MissingId))), + }; + + if !allow_unknown_fields && !request.is_empty() { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!( + "Unexpected field{}: {}", + if request.len() > 1 { "s" } else { "" }, + request.keys().map(|f| format!("'{}'", f)).join(", ") + ), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + + Ok(Request { id, method, params }) + } +} + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn should_validate_using_valid_id() { + fn run_test(id: Value) { + let method = "a".to_string(); + let params_inner = vec![Value::Bool(true)]; + + let unvalidated = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: id, + METHOD_FIELD_NAME: method, + PARAMS_FIELD_NAME: params_inner, + }) + .as_object() + .cloned() + .unwrap(); + + let request = Request::new(unvalidated, false).unwrap(); + assert_eq!(request.id, id); + assert_eq!(request.method, method); + assert_eq!(request.params.unwrap(), Params::Array(params_inner)); + } + + run_test(Value::String("the id".to_string())); + run_test(json!(1314)); + run_test(Value::Null); + } + + #[test] + fn should_fail_to_validate_id_with_wrong_type() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: true, + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::Null, + error, + }) => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "'id' should be a string or integer" + ) + ); + } + + #[test] + fn should_fail_to_validate_id_with_fractional_part() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: 1.1, + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::Null, + error, + }) => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "'id' must not contain fractional parts if it is a number" + ) + ); + } + + #[test] + fn should_reject_with_missing_id() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + match Request::new(request, false) { + Err(ErrorOrRejection::Rejection(_)) => (), + _ => panic!("should be rejection"), + }; + } + + #[test] + fn should_fail_to_validate_with_invalid_jsonrpc_field_value() { + let request = json!({ + JSONRPC_FIELD_NAME: "2.1", + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'jsonrpc' to be '2.0', but got '2.1'" + ) + ); + } + + #[test] + fn should_fail_to_validate_with_invalid_jsonrpc_field_type() { + let request = json!({ + JSONRPC_FIELD_NAME: true, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'jsonrpc' to be a String with value '2.0', but got 'true'" + ) + ); + } + + #[test] + fn should_fail_to_validate_with_missing_jsonrpc_field() { + let request = json!({ + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new(ReservedErrorCode::InvalidRequest, "Missing 'jsonrpc' field") + ); + } + + #[test] + fn should_fail_to_validate_with_invalid_method_field_type() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: 1, + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'method' to be a String" + ) + ); + } + + #[test] + fn should_fail_to_validate_with_missing_method_field() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new(ReservedErrorCode::InvalidRequest, "Missing 'method' field") + ); + } + + #[test] + fn should_fail_to_validate_with_invalid_params_type() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + PARAMS_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "If present, 'params' must be an Array or Object, but was a String" + ) + ); + } + + fn request_with_extra_fields() -> Map { + json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + "extra": 1, + "another": true, + }) + .as_object() + .cloned() + .unwrap() + } + + #[test] + fn should_validate_with_extra_fields_if_allowed() { + let request = request_with_extra_fields(); + assert!(Request::new(request, true).is_ok()); + } + + #[test] + fn should_fail_to_validate_with_extra_fields_if_disallowed() { + let request = request_with_extra_fields(); + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Unexpected fields: 'another', 'extra'" + ) + ); + } +} diff --git a/json_rpc/src/request/params.rs b/json_rpc/src/request/params.rs new file mode 100644 index 00000000..4da0dfb2 --- /dev/null +++ b/json_rpc/src/request/params.rs @@ -0,0 +1,202 @@ +use std::fmt::{self, Display, Formatter}; + +use serde_json::{Map, Value}; + +use super::ErrorOrRejection; +use crate::error::{Error, ReservedErrorCode}; + +/// The "params" field of a JSON-RPC request. +/// +/// As per [the JSON-RPC specification](https://www.jsonrpc.org/specification#parameter_structures), +/// if present these must be a JSON Array or Object. +/// +/// **NOTE:** Currently we treat '"params": null' as '"params": []', but this deviation from the +/// standard will be removed in an upcoming release, and `null` will become an invalid value. +/// +/// `Params` is effectively a restricted [`serde_json::Value`], and can be converted to a `Value` +/// using `Value::from()` if required. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum Params { + /// Represents a JSON Array. + Array(Vec), + /// Represents a JSON Object. + Object(Map), +} + +impl Params { + pub(super) fn try_from(request_id: &Value, params: Value) -> Result { + let err_invalid_request = |additional_info: &str| { + let error = Error::new(ReservedErrorCode::InvalidRequest, additional_info); + Err(ErrorOrRejection::Error { + id: request_id.clone(), + error, + }) + }; + + match params { + Value::Null => Ok(Params::Array(vec![])), + Value::Bool(false) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was 'false'", + ), + Value::Bool(true) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was 'true'", + ), + Value::Number(_) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was a Number", + ), + Value::String(_) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was a String", + ), + Value::Array(array) => Ok(Params::Array(array)), + Value::Object(map) => Ok(Params::Object(map)), + } + } + + /// Returns `true` if `self` is an Array, otherwise returns `false`. + pub fn is_array(&self) -> bool { + self.as_array().is_some() + } + + /// Returns a reference to the inner `Vec` if `self` is an Array, otherwise returns `None`. + pub fn as_array(&self) -> Option<&Vec> { + match self { + Params::Array(array) => Some(array), + _ => None, + } + } + + /// Returns a mutable reference to the inner `Vec` if `self` is an Array, otherwise returns + /// `None`. + pub fn as_array_mut(&mut self) -> Option<&mut Vec> { + match self { + Params::Array(array) => Some(array), + _ => None, + } + } + + /// Returns `true` if `self` is an Object, otherwise returns `false`. + pub fn is_object(&self) -> bool { + self.as_object().is_some() + } + + /// Returns a reference to the inner `Map` if `self` is an Object, otherwise returns `None`. + pub fn as_object(&self) -> Option<&Map> { + match self { + Params::Object(map) => Some(map), + _ => None, + } + } + + /// Returns a mutable reference to the inner `Map` if `self` is an Object, otherwise returns + /// `None`. + pub fn as_object_mut(&mut self) -> Option<&mut Map> { + match self { + Params::Object(map) => Some(map), + _ => None, + } + } + + /// Returns `true` if `self` is an empty Array or an empty Object, otherwise returns `false`. + pub fn is_empty(&self) -> bool { + match self { + Params::Array(array) => array.is_empty(), + Params::Object(map) => map.is_empty(), + } + } +} + +impl Display for Params { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + Display::fmt(&Value::from(self.clone()), formatter) + } +} + +/// The default value for `Params` is an empty Array. +impl Default for Params { + fn default() -> Self { + Params::Array(vec![]) + } +} + +impl From for Value { + fn from(params: Params) -> Self { + match params { + Params::Array(array) => Value::Array(array), + Params::Object(map) => Value::Object(map), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn should_fail_to_convert_invalid_params(bad_params: Value, expected_invalid_type_msg: &str) { + let original_id = Value::from(1_i8); + match Params::try_from(&original_id, bad_params).unwrap_err() { + ErrorOrRejection::Error { id, error } => { + assert_eq!(id, original_id); + let expected_error = format!( + r#"{{"code":-32600,"message":"Invalid Request","data":"If present, 'params' must be an Array or Object, but was {}"}}"#, + expected_invalid_type_msg + ); + assert_eq!(serde_json::to_string(&error).unwrap(), expected_error); + } + other => panic!("unexpected: {:?}", other), + } + } + + #[test] + fn should_convert_params_from_null() { + let original_id = Value::from(1_i8); + + let params = Params::try_from(&original_id, Value::Null).unwrap(); + assert!(matches!(params, Params::Array(v) if v.is_empty())); + } + + #[test] + fn should_fail_to_convert_params_from_false() { + should_fail_to_convert_invalid_params(Value::Bool(false), "'false'") + } + + #[test] + fn should_fail_to_convert_params_from_true() { + should_fail_to_convert_invalid_params(Value::Bool(true), "'true'") + } + + #[test] + fn should_fail_to_convert_params_from_a_number() { + should_fail_to_convert_invalid_params(Value::from(9_u8), "a Number") + } + + #[test] + fn should_fail_to_convert_params_from_a_string() { + should_fail_to_convert_invalid_params(Value::from("s"), "a String") + } + + #[test] + fn should_convert_params_from_an_array() { + let original_id = Value::from(1_i8); + + let params = Params::try_from(&original_id, Value::Array(vec![])).unwrap(); + assert!(matches!(params, Params::Array(v) if v.is_empty())); + + let array = vec![Value::from(9_i16), Value::Bool(false)]; + let params = Params::try_from(&original_id, Value::Array(array.clone())).unwrap(); + assert!(matches!(params, Params::Array(v) if v == array)); + } + + #[test] + fn should_convert_params_from_an_object() { + let original_id = Value::from(1_i8); + + let params = Params::try_from(&original_id, Value::Object(Map::new())).unwrap(); + assert!(matches!(params, Params::Object(v) if v.is_empty())); + + let mut map = Map::new(); + map.insert("a".to_string(), Value::from(9_i16)); + map.insert("b".to_string(), Value::Bool(false)); + let params = Params::try_from(&original_id, Value::Object(map.clone())).unwrap(); + assert!(matches!(params, Params::Object(v) if v == map)); + } +} diff --git a/json_rpc/src/request_handlers.rs b/json_rpc/src/request_handlers.rs new file mode 100644 index 00000000..4eed4856 --- /dev/null +++ b/json_rpc/src/request_handlers.rs @@ -0,0 +1,115 @@ +use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc}; + +use futures::FutureExt; +use serde::Serialize; +use serde_json::Value; +use tracing::{debug, error}; + +use crate::{ + error::{Error, ReservedErrorCode}, + request::{Params, Request}, + response::Response, +}; + +/// A boxed future of `Result`; the return type of a request-handling closure. +type HandleRequestFuture = Pin> + Send>>; +/// A request-handling closure. +type RequestHandler = Arc) -> HandleRequestFuture + Send + Sync>; + +/// A collection of request-handlers, indexed by the JSON-RPC "method" applicable to each. +/// +/// There needs to be a unique handler for each JSON-RPC request "method" to be handled. Handlers +/// are added via a [`RequestHandlersBuilder`]. +#[derive(Clone)] +pub struct RequestHandlers(Arc>); + +impl RequestHandlers { + /// Finds the relevant handler for the given request's "method" field, and invokes it with the + /// given "params" value. + /// + /// If a handler cannot be found, a MethodNotFound error is created. In this case, or if + /// invoking the handler yields an [`Error`], the error is converted into a + /// [`Response::Failure`]. + /// + /// Otherwise a [`Response::Success`] is returned. + pub(crate) async fn handle_request(&self, request: Request) -> Response { + let handler = match self.0.get(request.method.as_str()) { + Some(handler) => Arc::clone(handler), + None => { + debug!(requested_method = %request.method.as_str(), "failed to get handler"); + let error = Error::new( + ReservedErrorCode::MethodNotFound, + format!( + "'{}' is not a supported json-rpc method on this server", + request.method.as_str() + ), + ); + return Response::new_failure(request.id, error); + } + }; + + match handler(request.params).await { + Ok(result) => Response::new_success(request.id, result), + Err(error) => Response::new_failure(request.id, error), + } + } +} + +/// A builder for [`RequestHandlers`]. +// +// This builder exists so the internal `HashMap` can be populated before it is made immutable behind +// the `Arc` in the `RequestHandlers`. +#[derive(Default)] +pub struct RequestHandlersBuilder(HashMap<&'static str, RequestHandler>); + +impl RequestHandlersBuilder { + /// Returns a new builder. + pub fn new() -> Self { + Self::default() + } + + /// Adds a new request-handler which will be called to handle all JSON-RPC requests with the + /// given "method" field. + /// + /// The handler should be an async closure or function with a signature like: + /// ```ignore + /// async fn handle_it(params: Option) -> Result + /// ``` + /// where `T` implements `Serialize` and will be used as the JSON-RPC response's "result" field. + pub fn register_handler(&mut self, method: &'static str, handler: Arc) + where + Func: Fn(Option) -> Fut + Send + Sync + 'static, + Fut: Future> + Send, + T: Serialize + 'static, + { + let handler = Arc::clone(&handler); + // The provided handler returns a future with output of `Result`. We need to + // convert that to a boxed future with output `Result` to store it in a + // homogenous collection. + let wrapped_handler = move |maybe_params| { + let handler = Arc::clone(&handler); + async move { + let success = Arc::clone(&handler)(maybe_params).await?; + serde_json::to_value(success).map_err(|error| { + error!(%error, "failed to encode json-rpc response value"); + Error::new( + ReservedErrorCode::InternalError, + format!("failed to encode json-rpc response value: {}", error), + ) + }) + } + .boxed() + }; + if self.0.insert(method, Arc::new(wrapped_handler)).is_some() { + error!( + method, + "already registered a handler for this json-rpc request method" + ); + } + } + + /// Finalize building by converting `self` to a [`RequestHandlers`]. + pub fn build(self) -> RequestHandlers { + RequestHandlers(Arc::new(self.0)) + } +} diff --git a/json_rpc/src/response.rs b/json_rpc/src/response.rs new file mode 100644 index 00000000..b9daf81b --- /dev/null +++ b/json_rpc/src/response.rs @@ -0,0 +1,108 @@ +use std::borrow::Cow; + +use serde::{ + de::{DeserializeOwned, Deserializer}, + Deserialize, Serialize, +}; +use serde_json::Value; +use tracing::error; + +use super::{Error, JSON_RPC_VERSION}; + +/// A JSON-RPC response. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[serde(deny_unknown_fields, untagged)] +pub enum Response { + /// A successful RPC execution. + Success { + /// The JSON-RPC version field. + #[serde(deserialize_with = "set_jsonrpc_field")] + jsonrpc: Cow<'static, str>, + /// The same ID as was passed in the corresponding request. + id: Value, + /// The successful result of executing the RPC. + result: Value, + }, + /// An RPC execution which failed. + Failure { + /// The JSON-RPC version field. + #[serde(deserialize_with = "set_jsonrpc_field")] + jsonrpc: Cow<'static, str>, + /// The same ID as was passed in the corresponding request. + id: Value, + /// The error encountered while executing the RPC. + error: Error, + }, +} + +impl Response { + /// Returns a new `Response::Success`. + pub fn new_success(id: Value, result: Value) -> Self { + Response::Success { + jsonrpc: Cow::Borrowed(JSON_RPC_VERSION), + id, + result, + } + } + + /// Returns a new `Response::Failure`. + pub fn new_failure(id: Value, error: Error) -> Self { + Response::Failure { + jsonrpc: Cow::Borrowed(JSON_RPC_VERSION), + id, + error, + } + } + + /// Returns `true` is this is a `Response::Success`. + pub fn is_success(&self) -> bool { + matches!(self, Response::Success { .. }) + } + + /// Returns `true` is this is a `Response::Failure`. + pub fn is_failure(&self) -> bool { + matches!(self, Response::Failure { .. }) + } + + /// Returns the "result" field, or `None` if this is a `Response::Failure`. + pub fn raw_result(&self) -> Option<&Value> { + match &self { + Response::Success { result, .. } => Some(result), + Response::Failure { .. } => None, + } + } + + /// Returns the "result" field parsed as `T`, or `None` if this is a `Response::Failure` or if + /// parsing fails. + pub fn result(&self) -> Option { + match &self { + Response::Success { result, .. } => serde_json::from_value(result.clone()) + .map_err(|error| { + error!("failed to parse: {}", error); + }) + .ok(), + Response::Failure { .. } => None, + } + } + + /// Returns the "error" field or `None` if this is a `Response::Success`. + pub fn error(&self) -> Option<&Error> { + match &self { + Response::Success { .. } => None, + Response::Failure { error, .. } => Some(error), + } + } + + /// Returns the "id" field. + pub fn id(&self) -> &Value { + match &self { + Response::Success { id, .. } | Response::Failure { id, .. } => id, + } + } +} + +fn set_jsonrpc_field<'de, D: Deserializer<'de>>( + _deserializer: D, +) -> Result, D::Error> { + Ok(Cow::Borrowed(JSON_RPC_VERSION)) +} diff --git a/listener/Cargo.toml b/listener/Cargo.toml index c79f9223..c8e49e60 100644 --- a/listener/Cargo.toml +++ b/listener/Cargo.toml @@ -9,29 +9,29 @@ homepage = "https://github.com/CasperLabs/event-sidecar" repository = "https://github.com/CasperLabs/event-sidecar" [dependencies] -anyhow = "1.0.65" +anyhow = { workspace = true } async-stream = { workspace = true } async-trait = "0.1.72" bytes = "1.2.0" casper-event-types = { path = "../types", version = "1.0.0" } -casper-types = { version = "3.0.0", features = ["std"] } +casper-types = { workspace = true, features = ["std"] } eventsource-stream = "0.2.3" -futures = "0.3.24" +futures = { workspace = true } +futures-util = { workspace = true } +once_cell = { workspace = true } reqwest = { version = "0.11", features = ["json", "stream"] } -serde = { version = "1.0", features = ["derive"] } +serde = { workspace = true, default-features = true, features = ["derive"] } serde_json = "1.0" -thiserror = "1.0.37" -tokio = { version = "1", features = ["full"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } tokio-stream = { version = "0.1.4", features = ["sync"] } tokio-util = "0.7.8" -tracing = "0.1" +tracing = { workspace = true, default-features = true } url = "2.3.1" -once_cell = { workspace = true } -futures-util = { workspace = true } [dev-dependencies] -casper-event-types = { path = "../types", version = "1.0.0", features = ["sse-data-testing"]} +casper-event-types = { path = "../types", version = "1.0.0", features = ["sse-data-testing"] } eventsource-stream = "0.2.3" mockito = "1.2.0" portpicker = "0.1.1" -warp = { version = "0.3.6"} +warp = { version = "0.3.6" } diff --git a/resources/ETC_README.md b/resources/ETC_README.md index 99ba0a1d..6683d749 100644 --- a/resources/ETC_README.md +++ b/resources/ETC_README.md @@ -30,7 +30,7 @@ The Sidecar can connect to Casper nodes with versions greater or equal to `1.5.2 The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. ``` -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 9999 rest_port = 8888 @@ -44,8 +44,8 @@ sleep_between_keep_alive_checks_in_seconds = 30 ``` * `ip_address` - The IP address of the node to monitor. -* `sse_port` - The node's event stream (SSE) port. This [example configuration](../EXAMPLE_NODE_CONFIG.toml) uses port `9999`. -* `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](../EXAMPLE_NODE_CONFIG.toml) uses port `8888`. +* `sse_port` - The node's event stream (SSE) port. This [example configuration](../resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `9999`. +* `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](../resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `8888`. * `max_attempts` - The maximum number of attempts the Sidecar will make to connect to the node. If set to `0`, the Sidecar will not attempt to connect. * `delay_between_retries_in_seconds` - The delay between attempts to connect to the node. * `allow_partial_connection` - Determining whether the sidecar will allow a partial connection to this node. @@ -54,10 +54,10 @@ sleep_between_keep_alive_checks_in_seconds = 30 * `no_message_timeout_in_seconds` - Number of seconds after which the connection will be restarted if no bytes were received. Parameter is optional, defaults to 120 * `sleep_between_keep_alive_checks_in_seconds` - Optional parameter specifying the time intervals (in seconds) for checking if the connection is still alive. Defaults to 60 -Connecting to multiple nodes requires multiple `[[connections]]` sections: +Connecting to multiple nodes requires multiple `[[sse_server.connections]]` sections: ``` -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 9999 rest_port = 8888 @@ -66,7 +66,7 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true -[[connections]] +[[sse_server.connections]] ip_address = "18.154.79.193" sse_port = 1234 rest_port = 3456 @@ -167,7 +167,7 @@ This information determines outbound connection criteria for the Sidecar's `rest ``` -[rest_server] +[rest_api_server] port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 @@ -200,7 +200,7 @@ Additionally, there are the following two options: This optional section configures the Sidecar's administrative REST server. If this section is not specified, the Sidecar will not start an admin server. ``` -[admin_server] +[admin_api_server] port = 18887 max_concurrent_requests = 1 max_requests_per_second = 1 diff --git a/EXAMPLE_NCTL_CONFIG.toml b/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml similarity index 80% rename from EXAMPLE_NCTL_CONFIG.toml rename to resources/example_configs/EXAMPLE_NCTL_CONFIG.toml index 7dbee9e9..78f31211 100644 --- a/EXAMPLE_NCTL_CONFIG.toml +++ b/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml @@ -1,4 +1,4 @@ -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18101 rest_port = 14101 @@ -7,7 +7,7 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18102 rest_port = 14102 @@ -16,7 +16,7 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = false -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18103 rest_port = 14103 @@ -26,6 +26,11 @@ allow_partial_connection = false enable_logging = false connection_timeout_in_seconds = 3 +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + [storage] storage_path = "./target/storage" @@ -35,12 +40,12 @@ max_connections_in_pool = 100 # https://www.sqlite.org/compile.html#default_wal_autocheckpoint wal_autocheckpointing_interval = 1000 -[rest_server] +[rest_api_server] port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 +[admin_api_server] +port = 18887 +max_concurrent_requests = 1 +max_requests_per_second = 1 diff --git a/EXAMPLE_NCTL_POSTGRES_CONFIG.toml b/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml similarity index 87% rename from EXAMPLE_NCTL_POSTGRES_CONFIG.toml rename to resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml index b380eb9e..43a30918 100644 --- a/EXAMPLE_NCTL_POSTGRES_CONFIG.toml +++ b/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml @@ -1,4 +1,4 @@ -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18101 rest_port = 14101 @@ -7,7 +7,7 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18102 rest_port = 14102 @@ -16,7 +16,7 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = false -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18103 rest_port = 14103 @@ -26,6 +26,11 @@ allow_partial_connection = false enable_logging = false connection_timeout_in_seconds = 3 +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + [storage] storage_path = "./target/storage" @@ -36,12 +41,7 @@ database_password = "p@$$w0rd" database_username = "postgres" max_connections_in_pool = 30 -[rest_server] +[rest_api_server] port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 - -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 diff --git a/EXAMPLE_NODE_CONFIG.toml b/resources/example_configs/EXAMPLE_NODE_CONFIG.toml similarity index 84% rename from EXAMPLE_NODE_CONFIG.toml rename to resources/example_configs/EXAMPLE_NODE_CONFIG.toml index 212db146..be579bce 100644 --- a/EXAMPLE_NODE_CONFIG.toml +++ b/resources/example_configs/EXAMPLE_NODE_CONFIG.toml @@ -1,4 +1,4 @@ -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 9999 rest_port = 8888 @@ -7,7 +7,7 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true -[[connections]] +[[sse_server.connections]] ip_address = "168.254.51.2" sse_port = 9999 rest_port = 8888 @@ -16,7 +16,7 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true -[[connections]] +[[sse_server.connections]] ip_address = "168.254.51.3" sse_port = 9999 rest_port = 8888 @@ -25,6 +25,11 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + [storage] storage_path = "/var/lib/casper-event-sidecar" @@ -34,17 +39,12 @@ max_connections_in_pool = 100 # https://www.sqlite.org/compile.html#default_wal_autocheckpoint wal_autocheckpointing_interval = 1000 -[rest_server] +[rest_api_server] port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 - -[admin_server] +[admin_api_server] port = 18887 max_concurrent_requests = 1 -max_requests_per_second = 1 \ No newline at end of file +max_requests_per_second = 1 diff --git a/resources/example_configs/default_rpc_only_config.toml b/resources/example_configs/default_rpc_only_config.toml new file mode 100644 index 00000000..127110bd --- /dev/null +++ b/resources/example_configs/default_rpc_only_config.toml @@ -0,0 +1,86 @@ +# ================================================== +# Configuration options for the JSON-RPC HTTP server +# ================================================== +[rpc_server.main_server] +# Enables the JSON-RPC HTTP server. +enable_server = true + +# Listening address for JSON-RPC HTTP server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the JSON-RPC HTTP server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:7777' + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 100 + +# Maximum number of bytes to accept in a single request body. +max_body_bytes = 2_621_440 + +# Specifies which origin will be reported as allowed by RPC server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ======================================================================== +# Configuration options for the speculative execution JSON-RPC HTTP server +# ======================================================================== +[rpc_server.speculative_exec_server] + +# Enables the speculative execution JSON-RPC HTTP server. +enable_server = true + +# Listening address for speculative execution JSON-RPC HTTP server. If the port +# is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. +# If binding fails, the speculative execution JSON-RPC HTTP server will not run, +# but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:7778' + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 1 + +# Maximum number of bytes to accept in a single request body. +max_body_bytes = 2_621_440 + +# Specifies which origin will be reported as allowed by speculative execution server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + +# ========================================= +# Configuration options for the node client +# ========================================= +[rpc_server.node_client] +# The address of the node to connect to. +address = '127.0.0.1:28104' +# Maximum size of a request in bytes. +max_request_size_bytes = 4_194_304 +# Maximum size of a response in bytes. +max_response_size_bytes = 4_194_304 +# Maximum number of in-flight node requests. +request_limit = 3 +# Number of node requests that can be buffered. +request_buffer_size = 16 + +[rpc_server.node_client.exponential_backoff] +# The initial delay in milliseconds before the first retry. +initial_delay_ms = 1000 +# The maximum delay in milliseconds before a retry. +max_delay_ms = 32_000 +# The multiplier to apply to the previous delay to get the next delay. +coefficient = 2 +# Maximum number of connection attempts. +max_attempts = 30 diff --git a/resources/default_config.toml b/resources/example_configs/default_sse_only_config.toml similarity index 89% rename from resources/default_config.toml rename to resources/example_configs/default_sse_only_config.toml index b38ae9f3..45216224 100644 --- a/resources/default_config.toml +++ b/resources/example_configs/default_sse_only_config.toml @@ -1,4 +1,4 @@ -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 9999 rest_port = 8888 @@ -7,6 +7,11 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + [storage] storage_path = "/var/lib/casper-event-sidecar" @@ -16,16 +21,11 @@ max_connections_in_pool = 100 # https://www.sqlite.org/compile.html#default_wal_autocheckpoint wal_autocheckpointing_interval = 1000 -[rest_server] +[rest_api_server] port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 - [admin_server] port = 18887 max_concurrent_requests = 1 diff --git a/resources/test/rpc_schema.json b/resources/test/rpc_schema.json new file mode 100644 index 00000000..0878d503 --- /dev/null +++ b/resources/test/rpc_schema.json @@ -0,0 +1,7364 @@ +{ + "openrpc": "1.0.0-rc1", + "info": { + "version": "1.5.3", + "title": "Client API of Casper Node", + "description": "This describes the JSON-RPC 2.0 API of a node on the Casper network.", + "contact": { + "name": "Casper Labs", + "url": "https://casperlabs.io" + }, + "license": { + "name": "APACHE LICENSE, VERSION 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + } + }, + "servers": [ + { + "name": "any Casper Network node", + "url": "http://IP:PORT/rpc/" + } + ], + "methods": [ + { + "name": "account_put_deploy", + "summary": "receives a Deploy to be executed by the network (DEPRECATED: use `account_put_transaction` instead)", + "params": [ + { + "name": "deploy", + "schema": { + "description": "The `Deploy`.", + "$ref": "#/components/schemas/Deploy" + }, + "required": true + } + ], + "result": { + "name": "account_put_deploy_result", + "schema": { + "description": "Result for \"account_put_deploy\" RPC response.", + "type": "object", + "required": [ + "api_version", + "deploy_hash" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "deploy_hash": { + "description": "The deploy hash.", + "$ref": "#/components/schemas/DeployHash" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "account_put_deploy_example", + "params": [ + { + "name": "deploy", + "value": { + "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "gas_price": 1, + "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", + "dependencies": [ + "0101010101010101010101010101010101010101010101010101010101010101" + ], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "session": { + "Transfer": { + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" + } + ] + } + } + ], + "result": { + "name": "account_put_deploy_example_result", + "value": { + "api_version": "1.5.3", + "deploy_hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" + } + } + } + ] + }, + { + "name": "account_put_transaction", + "summary": "receives a Transaction to be executed by the network", + "params": [ + { + "name": "transaction", + "schema": { + "description": "The `Transaction`.", + "$ref": "#/components/schemas/Transaction" + }, + "required": true + } + ], + "result": { + "name": "account_put_transaction_result", + "schema": { + "description": "Result for \"account_put_transaction\" RPC response.", + "type": "object", + "required": [ + "api_version", + "transaction_hash" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "transaction_hash": { + "description": "The transaction hash.", + "$ref": "#/components/schemas/TransactionHash" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "account_put_transaction_example", + "params": [ + { + "name": "transaction", + "value": { + "Version1": { + "hash": "6aaf4a54499e3757eb4be6967503dcc431e4623bf8bb57a14c1729a114a1aaa2", + "header": { + "chain_name": "casper-example", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "body_hash": "d2433e28993036fbdf7c963cd753893fefe619e7dbb5c0cafa5cb03bcf3ff9db", + "pricing_mode": { + "GasPriceMultiplier": 1 + }, + "payment_amount": null, + "initiator_addr": { + "PublicKey": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + } + }, + "body": { + "args": [ + [ + "source", + { + "cl_type": "URef", + "bytes": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a07", + "parsed": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007" + } + ], + [ + "target", + { + "cl_type": "URef", + "bytes": "1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b00", + "parsed": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000" + } + ], + [ + "amount", + { + "cl_type": "U512", + "bytes": "0500ac23fc06", + "parsed": "30000000000" + } + ], + [ + "to", + { + "cl_type": { + "Option": { + "ByteArray": 32 + } + }, + "bytes": "012828282828282828282828282828282828282828282828282828282828282828", + "parsed": "2828282828282828282828282828282828282828282828282828282828282828" + } + ], + [ + "id", + { + "cl_type": { + "Option": "U64" + }, + "bytes": "01e703000000000000", + "parsed": 999 + } + ] + ], + "target": "Native", + "entry_point": "Transfer", + "scheduling": "Standard" + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "012152c1eab67f63faa6a482ec4847ecd145c3b2c3e2affe763303ecb4ccf8618a1b2d24de7313fbf8a2ac1b5256471cc6bbf21745af15516331e5fc3d4a2fa201" + } + ] + } + } + } + ], + "result": { + "name": "account_put_transaction_example_result", + "value": { + "api_version": "1.5.3", + "transaction_hash": { + "Version1": "6aaf4a54499e3757eb4be6967503dcc431e4623bf8bb57a14c1729a114a1aaa2" + } + } + } + } + ] + }, + { + "name": "info_get_deploy", + "summary": "returns a Deploy from the network (DEPRECATED: use `info_get_transaction` instead)", + "params": [ + { + "name": "deploy_hash", + "schema": { + "description": "The deploy hash.", + "$ref": "#/components/schemas/DeployHash" + }, + "required": true + }, + { + "name": "finalized_approvals", + "schema": { + "description": "Whether to return the deploy with the finalized approvals substituted. If `false` or omitted, returns the deploy with the approvals that were originally received by the node.", + "default": false, + "type": "boolean" + }, + "required": false + } + ], + "result": { + "name": "info_get_deploy_result", + "schema": { + "description": "Result for \"info_get_deploy\" RPC response.", + "type": "object", + "required": [ + "api_version", + "deploy" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "deploy": { + "description": "The deploy.", + "$ref": "#/components/schemas/Deploy" + }, + "block_hash": { + "description": "The hash of the block in which the deploy was executed.", + "$ref": "#/components/schemas/BlockHash" + }, + "block_height": { + "description": "The height of the block in which the deploy was executed.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "execution_result": { + "description": "The execution result if known.", + "anyOf": [ + { + "$ref": "#/components/schemas/ExecutionResult" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_deploy_example", + "params": [ + { + "name": "deploy_hash", + "value": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" + }, + { + "name": "finalized_approvals", + "value": true + } + ], + "result": { + "name": "info_get_deploy_example_result", + "value": { + "api_version": "1.5.3", + "deploy": { + "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "gas_price": 1, + "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", + "dependencies": [ + "0101010101010101010101010101010101010101010101010101010101010101" + ], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "session": { + "Transfer": { + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" + } + ] + }, + "block_hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e", + "block_height": 10, + "execution_result": { + "Version2": { + "Success": { + "effects": [ + { + "key": "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + "kind": { + "AddUInt64": 8 + } + }, + { + "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + "kind": "Identity" + } + ], + "transfers": [ + "transfer-5959595959595959595959595959595959595959595959595959595959595959", + "transfer-8282828282828282828282828282828282828282828282828282828282828282" + ], + "cost": "123456" + } + } + } + } + } + } + ] + }, + { + "name": "info_get_transaction", + "summary": "returns a Transaction from the network", + "params": [ + { + "name": "transaction_hash", + "schema": { + "description": "The transaction hash.", + "$ref": "#/components/schemas/TransactionHash" + }, + "required": true + }, + { + "name": "finalized_approvals", + "schema": { + "description": "Whether to return the transaction with the finalized approvals substituted. If `false` or omitted, returns the transaction with the approvals that were originally received by the node.", + "default": false, + "type": "boolean" + }, + "required": false + } + ], + "result": { + "name": "info_get_transaction_result", + "schema": { + "description": "Result for \"info_get_transaction\" RPC response.", + "type": "object", + "required": [ + "api_version", + "transaction" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "transaction": { + "description": "The transaction.", + "$ref": "#/components/schemas/Transaction" + }, + "block_hash": { + "description": "The hash of the block in which the deploy was executed.", + "$ref": "#/components/schemas/BlockHash" + }, + "block_height": { + "description": "The height of the block in which the deploy was executed.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "execution_result": { + "description": "The execution result if known.", + "anyOf": [ + { + "$ref": "#/components/schemas/ExecutionResult" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_transaction_example", + "params": [ + { + "name": "transaction_hash", + "value": { + "Version1": "6aaf4a54499e3757eb4be6967503dcc431e4623bf8bb57a14c1729a114a1aaa2" + } + }, + { + "name": "finalized_approvals", + "value": true + } + ], + "result": { + "name": "info_get_transaction_example_result", + "value": { + "api_version": "1.5.3", + "transaction": { + "Version1": { + "hash": "6aaf4a54499e3757eb4be6967503dcc431e4623bf8bb57a14c1729a114a1aaa2", + "header": { + "chain_name": "casper-example", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "body_hash": "d2433e28993036fbdf7c963cd753893fefe619e7dbb5c0cafa5cb03bcf3ff9db", + "pricing_mode": { + "GasPriceMultiplier": 1 + }, + "payment_amount": null, + "initiator_addr": { + "PublicKey": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + } + }, + "body": { + "args": [ + [ + "source", + { + "cl_type": "URef", + "bytes": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a07", + "parsed": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007" + } + ], + [ + "target", + { + "cl_type": "URef", + "bytes": "1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b00", + "parsed": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000" + } + ], + [ + "amount", + { + "cl_type": "U512", + "bytes": "0500ac23fc06", + "parsed": "30000000000" + } + ], + [ + "to", + { + "cl_type": { + "Option": { + "ByteArray": 32 + } + }, + "bytes": "012828282828282828282828282828282828282828282828282828282828282828", + "parsed": "2828282828282828282828282828282828282828282828282828282828282828" + } + ], + [ + "id", + { + "cl_type": { + "Option": "U64" + }, + "bytes": "01e703000000000000", + "parsed": 999 + } + ] + ], + "target": "Native", + "entry_point": "Transfer", + "scheduling": "Standard" + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "012152c1eab67f63faa6a482ec4847ecd145c3b2c3e2affe763303ecb4ccf8618a1b2d24de7313fbf8a2ac1b5256471cc6bbf21745af15516331e5fc3d4a2fa201" + } + ] + } + }, + "block_hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e", + "block_height": 10, + "execution_result": { + "Version2": { + "Success": { + "effects": [ + { + "key": "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + "kind": { + "AddUInt64": 8 + } + }, + { + "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + "kind": "Identity" + } + ], + "transfers": [ + "transfer-5959595959595959595959595959595959595959595959595959595959595959", + "transfer-8282828282828282828282828282828282828282828282828282828282828282" + ], + "cost": "123456" + } + } + } + } + } + } + ] + }, + { + "name": "state_get_account_info", + "summary": "returns an Account from the network", + "params": [ + { + "name": "account_identifier", + "schema": { + "description": "The public key of the Account.", + "$ref": "#/components/schemas/AccountIdentifier" + }, + "required": true + }, + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "state_get_account_info_result", + "schema": { + "description": "Result for \"state_get_account_info\" RPC response.", + "type": "object", + "required": [ + "account", + "api_version", + "merkle_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "account": { + "description": "The account.", + "$ref": "#/components/schemas/Account" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_account_info_example", + "params": [ + { + "name": "account_identifier", + "value": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + }, + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "state_get_account_info_example_result", + "value": { + "api_version": "1.5.3", + "account": { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "named_keys": [ + { + "name": "main_purse", + "key": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + } + ], + "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "associated_keys": [ + { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "weight": 1 + } + ], + "action_thresholds": { + "deployment": 1, + "key_management": 1 + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "state_get_dictionary_item", + "summary": "returns an item from a Dictionary", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "Hash of the state root", + "$ref": "#/components/schemas/Digest" + }, + "required": true + }, + { + "name": "dictionary_identifier", + "schema": { + "description": "The Dictionary query identifier.", + "$ref": "#/components/schemas/DictionaryIdentifier" + }, + "required": true + } + ], + "result": { + "name": "state_get_dictionary_item_result", + "schema": { + "description": "Result for \"state_get_dictionary_item\" RPC response.", + "type": "object", + "required": [ + "api_version", + "dictionary_key", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "dictionary_key": { + "description": "The key under which the value is stored.", + "type": "string" + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_dictionary_item_example", + "params": [ + { + "name": "state_root_hash", + "value": "0808080808080808080808080808080808080808080808080808080808080808" + }, + { + "name": "dictionary_identifier", + "value": { + "URef": { + "seed_uref": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "dictionary_item_key": "a_unique_entry_identifier" + } + } + } + ], + "result": { + "name": "state_get_dictionary_item_example_result", + "value": { + "api_version": "1.5.3", + "dictionary_key": "dictionary-67518854aa916c97d4e53df8570c8217ccc259da2721b692102d76acd0ee8d1f", + "stored_value": { + "CLValue": { + "cl_type": "U64", + "bytes": "0100000000000000", + "parsed": 1 + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "query_global_state", + "summary": "a query to global state using either a Block hash or state root hash", + "params": [ + { + "name": "key", + "schema": { + "description": "The key under which to query.", + "$ref": "#/components/schemas/Key" + }, + "required": true + }, + { + "name": "state_identifier", + "schema": { + "description": "The identifier used for the query. If not provided, the tip of the chain will be used.", + "anyOf": [ + { + "$ref": "#/components/schemas/GlobalStateIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + }, + { + "name": "path", + "schema": { + "description": "The path components starting from the key as base.", + "default": [], + "type": "array", + "items": { + "type": "string" + } + }, + "required": false + } + ], + "result": { + "name": "query_global_state_result", + "schema": { + "description": "Result for \"query_global_state\" RPC response.", + "type": "object", + "required": [ + "api_version", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_header": { + "description": "The block header if a Block hash was provided.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockHeader" + }, + { + "type": "null" + } + ] + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "query_global_state_example", + "params": [ + { + "name": "state_identifier", + "value": { + "BlockHash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e" + } + }, + { + "name": "key", + "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" + }, + { + "name": "path", + "value": [] + } + ], + "result": { + "name": "query_global_state_example_result", + "value": { + "api_version": "1.5.3", + "block_header": { + "Version2": { + "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "body_hash": "0505050505050505050505050505050505050505050505050505050505050505", + "random_bit": true, + "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", + "era_end": { + "equivocators": [ + "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + ], + "inactive_validators": [ + "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" + ], + "next_era_validator_weights": [ + { + "validator": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29", + "weight": "123" + }, + { + "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", + "weight": "456" + }, + { + "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", + "weight": "789" + } + ], + "rewards": {} + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "protocol_version": "1.0.0" + } + }, + "stored_value": { + "Account": { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "named_keys": [ + { + "name": "main_purse", + "key": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + } + ], + "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "associated_keys": [ + { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "weight": 1 + } + ], + "action_thresholds": { + "deployment": 1, + "key_management": 1 + } + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "query_balance", + "summary": "query for a balance using a purse identifier and a state identifier", + "params": [ + { + "name": "purse_identifier", + "schema": { + "description": "The identifier to obtain the purse corresponding to balance query.", + "$ref": "#/components/schemas/PurseIdentifier" + }, + "required": true + }, + { + "name": "state_identifier", + "schema": { + "description": "The state identifier used for the query, if none is passed the tip of the chain will be used.", + "anyOf": [ + { + "$ref": "#/components/schemas/GlobalStateIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "query_balance_result", + "schema": { + "description": "Result for \"query_balance\" RPC response.", + "type": "object", + "required": [ + "api_version", + "balance" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "balance": { + "description": "The balance represented in motes.", + "$ref": "#/components/schemas/U512" + } + } + } + }, + "examples": [ + { + "name": "query_balance_example", + "params": [ + { + "name": "state_identifier", + "value": { + "BlockHash": "0707070707070707070707070707070707070707070707070707070707070707" + } + }, + { + "name": "purse_identifier", + "value": { + "main_purse_under_account_hash": "account-hash-0909090909090909090909090909090909090909090909090909090909090909" + } + } + ], + "result": { + "name": "query_balance_example_result", + "value": { + "api_version": "1.5.3", + "balance": "123456" + } + } + } + ] + }, + { + "name": "info_get_peers", + "summary": "returns a list of peers connected to the node", + "params": [], + "result": { + "name": "info_get_peers_result", + "schema": { + "description": "Result for \"info_get_peers\" RPC response.", + "type": "object", + "required": [ + "api_version", + "peers" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "peers": { + "description": "The node ID and network address of each connected peer.", + "$ref": "#/components/schemas/Peers" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_peers_example", + "params": [], + "result": { + "name": "info_get_peers_example_result", + "value": { + "api_version": "1.5.3", + "peers": [ + { + "node_id": "tls:0101..0101", + "address": "127.0.0.1:54321" + } + ] + } + } + } + ] + }, + { + "name": "info_get_status", + "summary": "returns the current status of the node", + "params": [], + "result": { + "name": "info_get_status_result", + "schema": { + "description": "Result for \"info_get_status\" RPC response.", + "type": "object", + "required": [ + "api_version", + "available_block_range", + "block_sync", + "build_version", + "chainspec_name", + "last_progress", + "peers", + "reactor_state", + "starting_state_root_hash", + "uptime" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "peers": { + "description": "The node ID and network address of each connected peer.", + "$ref": "#/components/schemas/Peers" + }, + "build_version": { + "description": "The compiled node version.", + "type": "string" + }, + "chainspec_name": { + "description": "The chainspec name.", + "type": "string" + }, + "starting_state_root_hash": { + "description": "The state root hash of the lowest block in the available block range.", + "$ref": "#/components/schemas/Digest" + }, + "last_added_block_info": { + "description": "The minimal info of the last block from the linear chain.", + "anyOf": [ + { + "$ref": "#/components/schemas/MinimalBlockInfo" + }, + { + "type": "null" + } + ] + }, + "our_public_signing_key": { + "description": "Our public signing key.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + }, + "round_length": { + "description": "The next round length if this node is a validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/TimeDiff" + }, + { + "type": "null" + } + ] + }, + "next_upgrade": { + "description": "Information about the next scheduled upgrade.", + "anyOf": [ + { + "$ref": "#/components/schemas/NextUpgrade" + }, + { + "type": "null" + } + ] + }, + "uptime": { + "description": "Time that passed since the node has started.", + "$ref": "#/components/schemas/TimeDiff" + }, + "reactor_state": { + "description": "The current state of node reactor.", + "$ref": "#/components/schemas/ReactorState" + }, + "last_progress": { + "description": "Timestamp of the last recorded progress in the reactor.", + "$ref": "#/components/schemas/Timestamp" + }, + "available_block_range": { + "description": "The available block range in storage.", + "$ref": "#/components/schemas/AvailableBlockRange" + }, + "block_sync": { + "description": "The status of the block synchronizer builders.", + "$ref": "#/components/schemas/BlockSynchronizerStatus" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_status_example", + "params": [], + "result": { + "name": "info_get_status_example_result", + "value": { + "api_version": "1.5.3", + "peers": [ + { + "node_id": "tls:0101..0101", + "address": "127.0.0.1:54321" + } + ], + "build_version": "1.0.0-xxxxxxxxx@DEBUG", + "chainspec_name": "casper-example", + "starting_state_root_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_added_block_info": { + "hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e", + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "creator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "our_public_signing_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "round_length": "1m 5s 536ms", + "next_upgrade": { + "activation_point": 42, + "protocol_version": "2.0.1" + }, + "uptime": "13s", + "reactor_state": "Initialize", + "last_progress": "1970-01-01T00:00:00.000Z", + "available_block_range": { + "low": 0, + "high": 0 + }, + "block_sync": { + "historical": { + "block_hash": "16ddf28e2b3d2e17f4cef36f8b58827eca917af225d139b0c77df3b4a67dc55e", + "block_height": 40, + "acquisition_state": "have strict finality(40) for: block hash 16dd..c55e" + }, + "forward": { + "block_hash": "59907b1e32a9158169c4d89d9ce5ac9164fc31240bfcfb0969227ece06d74983", + "block_height": 6701, + "acquisition_state": "have block body(6701) for: block hash 5990..4983" + } + } + } + } + } + ] + }, + { + "name": "info_get_validator_changes", + "summary": "returns status changes of active validators", + "params": [], + "result": { + "name": "info_get_validator_changes_result", + "schema": { + "description": "Result for the \"info_get_validator_changes\" RPC.", + "type": "object", + "required": [ + "api_version", + "changes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "changes": { + "description": "The validators' status changes.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorChanges" + } + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_validator_changes_example", + "params": [], + "result": { + "name": "info_get_validator_changes_example_result", + "value": { + "api_version": "1.5.3", + "changes": [ + { + "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "status_changes": [ + { + "era_id": 1, + "validator_change": "Added" + } + ] + } + ] + } + } + } + ] + }, + { + "name": "info_get_chainspec", + "summary": "returns the raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files", + "params": [], + "result": { + "name": "info_get_chainspec_result", + "schema": { + "description": "Result for the \"info_get_chainspec\" RPC.", + "type": "object", + "required": [ + "api_version", + "chainspec_bytes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "chainspec_bytes": { + "description": "The chainspec file bytes.", + "$ref": "#/components/schemas/ChainspecRawBytes" + } + } + } + }, + "examples": [ + { + "name": "info_get_chainspec_example", + "params": [], + "result": { + "name": "info_get_chainspec_example_result", + "value": { + "api_version": "1.5.3", + "chainspec_bytes": { + "chainspec_bytes": "2a2a", + "maybe_genesis_accounts_bytes": null, + "maybe_global_state_bytes": null + } + } + } + } + ] + }, + { + "name": "chain_get_block", + "summary": "returns a Block from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_block_result", + "schema": { + "description": "Result for \"chain_get_block\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_with_signatures": { + "description": "The block, if found.", + "anyOf": [ + { + "$ref": "#/components/schemas/JsonBlockWithSignatures" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_block_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e" + } + } + ], + "result": { + "name": "chain_get_block_example_result", + "value": { + "api_version": "1.5.3", + "block_with_signatures": { + "block": { + "Version2": { + "hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e", + "header": { + "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "body_hash": "e49c0b878951cb6685cbfe86aa830090b2f8dab96304cb46ffa466879fdc8ae4", + "random_bit": true, + "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", + "era_end": { + "equivocators": [ + "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + ], + "inactive_validators": [ + "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" + ], + "next_era_validator_weights": [ + { + "validator": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29", + "weight": "123" + }, + { + "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", + "weight": "456" + }, + { + "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", + "weight": "789" + } + ], + "rewards": {} + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "transfer": [ + { + "Version1": "1414141414141414141414141414141414141414141414141414141414141414" + } + ], + "staking": [ + { + "Version1": "1515151515151515151515151515151515151515151515151515151515151515" + } + ], + "install_upgrade": [ + { + "Version1": "1616161616161616161616161616161616161616161616161616161616161616" + } + ], + "standard": [ + { + "Version1": "1717171717171717171717171717171717171717171717171717171717171717" + } + ], + "rewarded_signatures": [] + } + } + }, + "proofs": [ + { + "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "010fff61ef78aa2bc5ba549b287b67c50ce23f828e81633a5c0eb832863c101351738d94ad114a74a33fd5872e9fabe1b6a2042dd2c084a53ec75a5316a87bbf0f" + } + ] + } + } + } + } + ] + }, + { + "name": "chain_get_block_transfers", + "summary": "returns all transfers for a Block from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block hash.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_block_transfers_result", + "schema": { + "description": "Result for \"chain_get_block_transfers\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_hash": { + "description": "The block hash, if found.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockHash" + }, + { + "type": "null" + } + ] + }, + "transfers": { + "description": "The block's transfers, if found.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/components/schemas/Transfer" + } + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_block_transfers_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "chain_get_block_transfers_example_result", + "value": { + "api_version": "1.5.3", + "block_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "transfers": [ + { + "deploy_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "from": "account-hash-0000000000000000000000000000000000000000000000000000000000000000", + "to": null, + "source": "uref-0000000000000000000000000000000000000000000000000000000000000000-000", + "target": "uref-0000000000000000000000000000000000000000000000000000000000000000-000", + "amount": "0", + "gas": "0", + "id": null + } + ] + } + } + } + ] + }, + { + "name": "chain_get_state_root_hash", + "summary": "returns a state root hash at a given Block", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block hash.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_state_root_hash_result", + "schema": { + "description": "Result for \"chain_get_state_root_hash\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "state_root_hash": { + "description": "Hex-encoded hash of the state root.", + "anyOf": [ + { + "$ref": "#/components/schemas/Digest" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_state_root_hash_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Height": 10 + } + } + ], + "result": { + "name": "chain_get_state_root_hash_example_result", + "value": { + "api_version": "1.5.3", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808" + } + } + } + ] + }, + { + "name": "state_get_item", + "summary": "returns a stored value from the network. This RPC is deprecated, use `query_global_state` instead.", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "Hash of the state root.", + "$ref": "#/components/schemas/Digest" + }, + "required": true + }, + { + "name": "key", + "schema": { + "description": "The key under which to query.", + "$ref": "#/components/schemas/Key" + }, + "required": true + }, + { + "name": "path", + "schema": { + "description": "The path components starting from the key as base.", + "default": [], + "type": "array", + "items": { + "type": "string" + } + }, + "required": false + } + ], + "result": { + "name": "state_get_item_result", + "schema": { + "description": "Result for \"state_get_item\" RPC response.", + "type": "object", + "required": [ + "api_version", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_item_example", + "params": [ + { + "name": "state_root_hash", + "value": "0808080808080808080808080808080808080808080808080808080808080808" + }, + { + "name": "key", + "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" + }, + { + "name": "path", + "value": [ + "inner" + ] + } + ], + "result": { + "name": "state_get_item_example_result", + "value": { + "api_version": "1.5.3", + "stored_value": { + "CLValue": { + "cl_type": "U64", + "bytes": "0100000000000000", + "parsed": 1 + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "state_get_balance", + "summary": "returns a purse's balance from the network", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "The hash of state root.", + "$ref": "#/components/schemas/Digest" + }, + "required": true + }, + { + "name": "purse_uref", + "schema": { + "description": "Formatted URef.", + "type": "string" + }, + "required": true + } + ], + "result": { + "name": "state_get_balance_result", + "schema": { + "description": "Result for \"state_get_balance\" RPC response.", + "type": "object", + "required": [ + "api_version", + "balance_value", + "merkle_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "balance_value": { + "description": "The balance value.", + "$ref": "#/components/schemas/U512" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_balance_example", + "params": [ + { + "name": "state_root_hash", + "value": "0808080808080808080808080808080808080808080808080808080808080808" + }, + { + "name": "purse_uref", + "value": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + } + ], + "result": { + "name": "state_get_balance_example_result", + "value": { + "api_version": "1.5.3", + "balance_value": "123456", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "chain_get_era_info_by_switch_block", + "summary": "returns an EraInfo from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_era_info_by_switch_block_result", + "schema": { + "description": "Result for \"chain_get_era_info\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "era_summary": { + "description": "The era summary.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraSummary" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_era_info_by_switch_block_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e" + } + } + ], + "result": { + "name": "chain_get_era_info_by_switch_block_example_result", + "value": { + "api_version": "1.5.3", + "era_summary": { + "block_hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e", + "era_id": 42, + "stored_value": { + "EraInfo": { + "seigniorage_allocations": [ + { + "Delegator": { + "delegator_public_key": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18", + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "1000" + } + }, + { + "Validator": { + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "2000" + } + } + ] + } + }, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + } + ] + }, + { + "name": "state_get_auction_info", + "summary": "returns the bids and validators as of either a specific block (by height or hash), or the most recently added block", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "state_get_auction_info_result", + "schema": { + "description": "Result for \"state_get_auction_info\" RPC response.", + "type": "object", + "required": [ + "api_version", + "auction_state" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "auction_state": { + "description": "The auction state.", + "$ref": "#/components/schemas/AuctionState" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_auction_info_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "state_get_auction_info_example_result", + "value": { + "api_version": "1.5.3", + "auction_state": { + "state_root_hash": "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", + "block_height": 10, + "era_validators": [ + { + "era_id": 10, + "validator_weights": [ + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "weight": "10" + } + ] + } + ], + "bids": [ + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bid": { + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bonding_purse": "uref-fafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafa-007", + "staked_amount": "20", + "delegation_rate": 0, + "vesting_schedule": null, + "delegators": [ + { + "delegator_public_key": "014508a07aa941707f3eb2db94c8897a80b2c1197476b6de213ac273df7d86c4ff", + "delegator": { + "delegator_public_key": "014508a07aa941707f3eb2db94c8897a80b2c1197476b6de213ac273df7d86c4ff", + "staked_amount": "10", + "bonding_purse": "uref-fbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfb-007", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "vesting_schedule": null + } + } + ], + "inactive": false + } + } + ] + } + } + } + } + ] + }, + { + "name": "chain_get_era_summary", + "summary": "returns the era summary at either a specific block (by height or hash), or the most recently added block", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_era_summary_result", + "schema": { + "description": "Result for \"chain_get_era_summary\" RPC response.", + "type": "object", + "required": [ + "api_version", + "era_summary" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "era_summary": { + "description": "The era summary.", + "$ref": "#/components/schemas/EraSummary" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_era_summary_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e" + } + } + ], + "result": { + "name": "chain_get_era_summary_example_result", + "value": { + "api_version": "1.5.3", + "era_summary": { + "block_hash": "9ccc716f5f3c7ac238bf7aaad113c2add3586921a7966faffb3a5a253aa1d75e", + "era_id": 42, + "stored_value": { + "EraInfo": { + "seigniorage_allocations": [ + { + "Delegator": { + "delegator_public_key": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18", + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "1000" + } + }, + { + "Validator": { + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "2000" + } + } + ] + } + }, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + } + ] + } + ], + "components": { + "schemas": { + "Deploy": { + "description": "A signed smart contract.", + "type": "object", + "required": [ + "approvals", + "hash", + "header", + "payment", + "session" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/DeployHash" + }, + "header": { + "$ref": "#/components/schemas/DeployHeader" + }, + "payment": { + "$ref": "#/components/schemas/ExecutableDeployItem" + }, + "session": { + "$ref": "#/components/schemas/ExecutableDeployItem" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployApproval" + }, + "uniqueItems": true + } + }, + "additionalProperties": false + }, + "DeployHash": { + "description": "Hex-encoded deploy hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "DeployHeader": { + "description": "The header portion of a [`Deploy`].", + "type": "object", + "required": [ + "account", + "body_hash", + "chain_name", + "dependencies", + "gas_price", + "timestamp", + "ttl" + ], + "properties": { + "account": { + "$ref": "#/components/schemas/PublicKey" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "ttl": { + "$ref": "#/components/schemas/TimeDiff" + }, + "gas_price": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "body_hash": { + "$ref": "#/components/schemas/Digest" + }, + "dependencies": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "chain_name": { + "type": "string" + } + }, + "additionalProperties": false + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "string" + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "string" + }, + "ExecutableDeployItem": { + "description": "The executable component of a [`Deploy`].", + "oneOf": [ + { + "description": "Executable specified as raw bytes that represent Wasm code and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "ModuleBytes" + ], + "properties": { + "ModuleBytes": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "module_bytes": { + "description": "Hex-encoded raw Wasm bytes.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByHash" + ], + "properties": { + "StoredContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract hash.", + "allOf": [ + { + "$ref": "#/components/schemas/AddressableEntityHash" + } + ] + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByName" + ], + "properties": { + "StoredContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by its [`PackageHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByHash" + ], + "properties": { + "StoredVersionedContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract package hash.", + "allOf": [ + { + "$ref": "#/components/schemas/PackageHash" + } + ] + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByName" + ], + "properties": { + "StoredVersionedContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "A native transfer which does not contain or reference a Wasm code.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "type": "object", + "required": [ + "args" + ], + "properties": { + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + }, + "RuntimeArgs": { + "description": "Represents a collection of arguments passed to a smart contract.", + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedArg" + } + }, + "NamedArg": { + "description": "Named arguments to a contract.", + "type": "array", + "items": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/CLValue" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "CLValue": { + "description": "A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n\nIt holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of the underlying data as a separate member.\n\nThe `parsed` field, representing the original value, is a convenience only available when a CLValue is encoded to JSON, and can always be set to null if preferred.", + "type": "object", + "required": [ + "bytes", + "cl_type" + ], + "properties": { + "cl_type": { + "$ref": "#/components/schemas/CLType" + }, + "bytes": { + "type": "string" + }, + "parsed": true + }, + "additionalProperties": false + }, + "CLType": { + "description": "Casper types, i.e. types which can be stored and manipulated by smart contracts.\n\nProvides a description of the underlying data type of a [`CLValue`](crate::CLValue).", + "oneOf": [ + { + "description": "`bool` primitive.", + "type": "string", + "enum": [ + "Bool" + ] + }, + { + "description": "`i32` primitive.", + "type": "string", + "enum": [ + "I32" + ] + }, + { + "description": "`i64` primitive.", + "type": "string", + "enum": [ + "I64" + ] + }, + { + "description": "`u8` primitive.", + "type": "string", + "enum": [ + "U8" + ] + }, + { + "description": "`u32` primitive.", + "type": "string", + "enum": [ + "U32" + ] + }, + { + "description": "`u64` primitive.", + "type": "string", + "enum": [ + "U64" + ] + }, + { + "description": "[`U128`] large unsigned integer type.", + "type": "string", + "enum": [ + "U128" + ] + }, + { + "description": "[`U256`] large unsigned integer type.", + "type": "string", + "enum": [ + "U256" + ] + }, + { + "description": "[`U512`] large unsigned integer type.", + "type": "string", + "enum": [ + "U512" + ] + }, + { + "description": "`()` primitive.", + "type": "string", + "enum": [ + "Unit" + ] + }, + { + "description": "`String` primitive.", + "type": "string", + "enum": [ + "String" + ] + }, + { + "description": "[`Key`] system type.", + "type": "string", + "enum": [ + "Key" + ] + }, + { + "description": "[`URef`] system type.", + "type": "string", + "enum": [ + "URef" + ] + }, + { + "description": "[`PublicKey`](crate::PublicKey) system type.", + "type": "string", + "enum": [ + "PublicKey" + ] + }, + { + "description": "`Option` of a `CLType`.", + "type": "object", + "required": [ + "Option" + ], + "properties": { + "Option": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Variable-length list of a single `CLType` (comparable to a `Vec`).", + "type": "object", + "required": [ + "List" + ], + "properties": { + "List": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", + "type": "object", + "required": [ + "ByteArray" + ], + "properties": { + "ByteArray": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", + "type": "object", + "required": [ + "Result" + ], + "properties": { + "Result": { + "type": "object", + "required": [ + "err", + "ok" + ], + "properties": { + "ok": { + "$ref": "#/components/schemas/CLType" + }, + "err": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Map with keys of a single `CLType` and values of a single `CLType`.", + "type": "object", + "required": [ + "Map" + ], + "properties": { + "Map": { + "type": "object", + "required": [ + "key", + "value" + ], + "properties": { + "key": { + "$ref": "#/components/schemas/CLType" + }, + "value": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "1-ary tuple of a `CLType`.", + "type": "object", + "required": [ + "Tuple1" + ], + "properties": { + "Tuple1": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 1, + "minItems": 1 + } + }, + "additionalProperties": false + }, + { + "description": "2-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple2" + ], + "properties": { + "Tuple2": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 2, + "minItems": 2 + } + }, + "additionalProperties": false + }, + { + "description": "3-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple3" + ], + "properties": { + "Tuple3": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 3, + "minItems": 3 + } + }, + "additionalProperties": false + }, + { + "description": "Unspecified type.", + "type": "string", + "enum": [ + "Any" + ] + } + ] + }, + "AddressableEntityHash": { + "description": "The hex-encoded address of the addressable entity.", + "type": "string" + }, + "PackageHash": { + "description": "The hex-encoded address of the Package.", + "type": "string" + }, + "DeployApproval": { + "description": "A struct containing a signature of a deploy hash and the public key of the signer.", + "type": "object", + "required": [ + "signature", + "signer" + ], + "properties": { + "signer": { + "$ref": "#/components/schemas/PublicKey" + }, + "signature": { + "$ref": "#/components/schemas/Signature" + } + }, + "additionalProperties": false + }, + "Signature": { + "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", + "type": "string" + }, + "Transaction": { + "description": "A versioned wrapper for a transaction or deploy.", + "oneOf": [ + { + "description": "A deploy.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/Deploy" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransactionV1" + } + }, + "additionalProperties": false + } + ] + }, + "TransactionV1": { + "description": "A unit of work sent by a client to the network, which when executed can cause global state to be altered.", + "type": "object", + "required": [ + "approvals", + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/TransactionV1Hash" + }, + "header": { + "$ref": "#/components/schemas/TransactionV1Header" + }, + "body": { + "$ref": "#/components/schemas/TransactionV1Body" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TransactionV1Approval" + }, + "uniqueItems": true + } + }, + "additionalProperties": false + }, + "TransactionV1Hash": { + "description": "Hex-encoded TransactionV1 hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "TransactionV1Header": { + "description": "The header portion of a TransactionV1.", + "type": "object", + "required": [ + "body_hash", + "chain_name", + "initiator_addr", + "pricing_mode", + "timestamp", + "ttl" + ], + "properties": { + "chain_name": { + "type": "string" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "ttl": { + "$ref": "#/components/schemas/TimeDiff" + }, + "body_hash": { + "$ref": "#/components/schemas/Digest" + }, + "pricing_mode": { + "$ref": "#/components/schemas/PricingMode" + }, + "payment_amount": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "initiator_addr": { + "$ref": "#/components/schemas/InitiatorAddr" + } + }, + "additionalProperties": false + }, + "PricingMode": { + "description": "Pricing mode of a Transaction.", + "oneOf": [ + { + "description": "Multiplies the gas used by the given amount.\n\nThis is the same behaviour as for the `Deploy::gas_price`.", + "type": "object", + "required": [ + "GasPriceMultiplier" + ], + "properties": { + "GasPriceMultiplier": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "First-in-first-out handling of transactions, i.e. pricing mode is irrelevant to ordering.", + "type": "string", + "enum": [ + "Fixed" + ] + }, + { + "description": "The payment for this transaction was previously reserved.", + "type": "string", + "enum": [ + "Reserved" + ] + } + ] + }, + "InitiatorAddr": { + "description": "The address of the initiator of a TransactionV1.", + "oneOf": [ + { + "description": "The public key of the initiator.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "The account hash derived from the public key of the initiator.", + "type": "object", + "required": [ + "AccountHash" + ], + "properties": { + "AccountHash": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Hex-encoded entity address of the initiator.", + "type": "object", + "required": [ + "EntityAddr" + ], + "properties": { + "EntityAddr": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "AccountHash": { + "description": "Account hash as a formatted string.", + "type": "string" + }, + "TransactionV1Body": { + "description": "Body of a `TransactionV1`.", + "type": "object", + "required": [ + "args", + "entry_point", + "scheduling", + "target" + ], + "properties": { + "args": { + "$ref": "#/components/schemas/RuntimeArgs" + }, + "target": { + "$ref": "#/components/schemas/TransactionTarget" + }, + "entry_point": { + "$ref": "#/components/schemas/TransactionEntryPoint" + }, + "scheduling": { + "$ref": "#/components/schemas/TransactionScheduling" + } + }, + "additionalProperties": false + }, + "TransactionTarget": { + "description": "Execution target of a Transaction.", + "oneOf": [ + { + "description": "The execution target is a native operation (e.g. a transfer).", + "type": "string", + "enum": [ + "Native" + ] + }, + { + "description": "The execution target is a stored entity or package.", + "type": "object", + "required": [ + "Stored" + ], + "properties": { + "Stored": { + "type": "object", + "required": [ + "id", + "runtime" + ], + "properties": { + "id": { + "description": "The identifier of the stored execution target.", + "allOf": [ + { + "$ref": "#/components/schemas/TransactionInvocationTarget" + } + ] + }, + "runtime": { + "description": "The execution runtime to use.", + "allOf": [ + { + "$ref": "#/components/schemas/TransactionRuntime" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The execution target is the included module bytes, i.e. compiled Wasm.", + "type": "object", + "required": [ + "Session" + ], + "properties": { + "Session": { + "type": "object", + "required": [ + "kind", + "module_bytes", + "runtime" + ], + "properties": { + "kind": { + "description": "The kind of session.", + "allOf": [ + { + "$ref": "#/components/schemas/TransactionSessionKind" + } + ] + }, + "module_bytes": { + "description": "The compiled Wasm.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "runtime": { + "description": "The execution runtime to use.", + "allOf": [ + { + "$ref": "#/components/schemas/TransactionRuntime" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "TransactionInvocationTarget": { + "description": "Identifier of a `Stored` transaction target.", + "oneOf": [ + { + "description": "Hex-encoded entity address identifying the invocable entity.", + "type": "object", + "required": [ + "InvocableEntity" + ], + "properties": { + "InvocableEntity": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "The alias identifying the invocable entity.", + "type": "object", + "required": [ + "InvocableEntityAlias" + ], + "properties": { + "InvocableEntityAlias": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "The address and optional version identifying the package.", + "type": "object", + "required": [ + "Package" + ], + "properties": { + "Package": { + "type": "object", + "required": [ + "addr" + ], + "properties": { + "addr": { + "description": "Hex-encoded address of the package.", + "type": "string" + }, + "version": { + "description": "The package version.\n\nIf `None`, the latest enabled version is implied.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The alias and optional version identifying the package.", + "type": "object", + "required": [ + "PackageAlias" + ], + "properties": { + "PackageAlias": { + "type": "object", + "required": [ + "alias" + ], + "properties": { + "alias": { + "description": "The package alias.", + "type": "string" + }, + "version": { + "description": "The package version.\n\nIf `None`, the latest enabled version is implied.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "TransactionRuntime": { + "description": "Runtime used to execute a Transaction.", + "oneOf": [ + { + "description": "The Casper Version 1 Virtual Machine.", + "type": "string", + "enum": [ + "VmCasperV1" + ] + } + ] + }, + "TransactionSessionKind": { + "description": "Session kind of a Transaction.", + "oneOf": [ + { + "description": "A standard (non-special-case) session.\n\nThis kind of session is not allowed to install or upgrade a stored contract, but can call stored contracts.", + "type": "string", + "enum": [ + "Standard" + ] + }, + { + "description": "A session which installs a stored contract.", + "type": "string", + "enum": [ + "Installer" + ] + }, + { + "description": "A session which upgrades a previously-installed stored contract. Such a session must have \"package_id: PackageIdentifier\" runtime arg present.", + "type": "string", + "enum": [ + "Upgrader" + ] + }, + { + "description": "A session which doesn't call any stored contracts.\n\nThis kind of session is not allowed to install or upgrade a stored contract.", + "type": "string", + "enum": [ + "Isolated" + ] + } + ] + }, + "TransactionEntryPoint": { + "description": "Entry point of a Transaction.", + "oneOf": [ + { + "description": "A non-native, arbitrary entry point.", + "type": "object", + "required": [ + "Custom" + ], + "properties": { + "Custom": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "The `transfer` native entry point, used to transfer `Motes` from a source purse to a target purse.", + "type": "string", + "enum": [ + "Transfer" + ] + }, + { + "description": "The `add_bid` native entry point, used to create or top off a bid purse.", + "type": "string", + "enum": [ + "AddBid" + ] + }, + { + "description": "The `withdraw_bid` native entry point, used to decrease a stake.", + "type": "string", + "enum": [ + "WithdrawBid" + ] + }, + { + "description": "The `delegate` native entry point, used to add a new delegator or increase an existing delegator's stake.", + "type": "string", + "enum": [ + "Delegate" + ] + }, + { + "description": "The `undelegate` native entry point, used to reduce a delegator's stake or remove the delegator if the remaining stake is 0.", + "type": "string", + "enum": [ + "Undelegate" + ] + }, + { + "description": "The `redelegate` native entry point, used to reduce a delegator's stake or remove the delegator if the remaining stake is 0, and after the unbonding delay, automatically delegate to a new validator.", + "type": "string", + "enum": [ + "Redelegate" + ] + } + ] + }, + "TransactionScheduling": { + "description": "Scheduling mode of a Transaction.", + "oneOf": [ + { + "description": "No special scheduling applied.", + "type": "string", + "enum": [ + "Standard" + ] + }, + { + "description": "Execution should be scheduled for the specified era.", + "type": "object", + "required": [ + "FutureEra" + ], + "properties": { + "FutureEra": { + "$ref": "#/components/schemas/EraId" + } + }, + "additionalProperties": false + }, + { + "description": "Execution should be scheduled for the specified timestamp or later.", + "type": "object", + "required": [ + "FutureTimestamp" + ], + "properties": { + "FutureTimestamp": { + "$ref": "#/components/schemas/Timestamp" + } + }, + "additionalProperties": false + } + ] + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "TransactionV1Approval": { + "description": "A struct containing a signature of a transaction hash and the public key of the signer.", + "type": "object", + "required": [ + "signature", + "signer" + ], + "properties": { + "signer": { + "$ref": "#/components/schemas/PublicKey" + }, + "signature": { + "$ref": "#/components/schemas/Signature" + } + }, + "additionalProperties": false + }, + "TransactionHash": { + "description": "A versioned wrapper for a transaction hash or deploy hash.", + "oneOf": [ + { + "description": "A deploy hash.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction hash.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransactionV1Hash" + } + }, + "additionalProperties": false + } + ] + }, + "BlockHash": { + "description": "Hex-encoded cryptographic hash of a block.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "ExecutionResult": { + "description": "The versioned result of executing a single deploy.", + "oneOf": [ + { + "description": "Version 1 of execution result type.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/ExecutionResultV1" + } + }, + "additionalProperties": false + }, + { + "description": "Version 2 of execution result type.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/ExecutionResultV2" + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionResultV1": { + "description": "The result of executing a single deploy.", + "oneOf": [ + { + "description": "The result of a failed execution.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "object", + "required": [ + "cost", + "effect", + "error_message", + "transfers" + ], + "properties": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of Transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "error_message": { + "description": "The error message associated with executing the deploy.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The result of a successful execution.", + "type": "object", + "required": [ + "Success" + ], + "properties": { + "Success": { + "type": "object", + "required": [ + "cost", + "effect", + "transfers" + ], + "properties": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of Transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionEffect": { + "description": "The sequence of execution transforms from a single deploy.", + "type": "object", + "required": [ + "operations", + "transforms" + ], + "properties": { + "operations": { + "description": "The resulting operations.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Operation" + } + }, + "transforms": { + "description": "The sequence of execution transforms.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransformEntry" + } + } + }, + "additionalProperties": false + }, + "Operation": { + "description": "An operation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "kind": { + "description": "The type of operation.", + "allOf": [ + { + "$ref": "#/components/schemas/OpKind" + } + ] + } + }, + "additionalProperties": false + }, + "OpKind": { + "description": "The type of operation performed while executing a deploy.", + "oneOf": [ + { + "description": "A read operation.", + "type": "string", + "enum": [ + "Read" + ] + }, + { + "description": "A write operation.", + "type": "string", + "enum": [ + "Write" + ] + }, + { + "description": "An addition.", + "type": "string", + "enum": [ + "Add" + ] + }, + { + "description": "An operation which has no effect.", + "type": "string", + "enum": [ + "NoOp" + ] + }, + { + "description": "A prune operation.", + "type": "string", + "enum": [ + "Prune" + ] + } + ] + }, + "TransformEntry": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "transform" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "transform": { + "description": "The transformation.", + "allOf": [ + { + "$ref": "#/components/schemas/TransformV1" + } + ] + } + }, + "additionalProperties": false + }, + "TransformV1": { + "description": "The actual transformation performed while executing a deploy.", + "oneOf": [ + { + "description": "A transform having no effect.", + "type": "string", + "enum": [ + "Identity" + ] + }, + { + "description": "Writes the given CLValue to global state.", + "type": "object", + "required": [ + "WriteCLValue" + ], + "properties": { + "WriteCLValue": { + "$ref": "#/components/schemas/CLValue" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Account to global state.", + "type": "object", + "required": [ + "WriteAccount" + ], + "properties": { + "WriteAccount": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Writes a smart contract as Wasm to global state.", + "type": "string", + "enum": [ + "WriteContractWasm" + ] + }, + { + "description": "Writes a smart contract to global state.", + "type": "string", + "enum": [ + "WriteContract" + ] + }, + { + "description": "Writes a smart contract package to global state.", + "type": "string", + "enum": [ + "WriteContractPackage" + ] + }, + { + "description": "Writes the given DeployInfo to global state.", + "type": "object", + "required": [ + "WriteDeployInfo" + ], + "properties": { + "WriteDeployInfo": { + "$ref": "#/components/schemas/DeployInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given EraInfo to global state.", + "type": "object", + "required": [ + "WriteEraInfo" + ], + "properties": { + "WriteEraInfo": { + "$ref": "#/components/schemas/EraInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Transfer to global state.", + "type": "object", + "required": [ + "WriteTransfer" + ], + "properties": { + "WriteTransfer": { + "$ref": "#/components/schemas/Transfer" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Bid to global state.", + "type": "object", + "required": [ + "WriteBid" + ], + "properties": { + "WriteBid": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Withdraw to global state.", + "type": "object", + "required": [ + "WriteWithdraw" + ], + "properties": { + "WriteWithdraw": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WithdrawPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `i32`.", + "type": "object", + "required": [ + "AddInt32" + ], + "properties": { + "AddInt32": { + "type": "integer", + "format": "int32" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `u64`.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U128`.", + "type": "object", + "required": [ + "AddUInt128" + ], + "properties": { + "AddUInt128": { + "$ref": "#/components/schemas/U128" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U256`.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/components/schemas/U256" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U512`.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given collection of named keys.", + "type": "object", + "required": [ + "AddKeys" + ], + "properties": { + "AddKeys": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedKey" + } + } + }, + "additionalProperties": false + }, + { + "description": "A failed transformation, containing an error message.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Unbonding to global state.", + "type": "object", + "required": [ + "WriteUnbonding" + ], + "properties": { + "WriteUnbonding": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondingPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Writes the addressable entity to global state.", + "type": "string", + "enum": [ + "WriteAddressableEntity" + ] + }, + { + "description": "Removes pathing to keyed value within global state. This is a form of soft delete; the underlying value remains in global state and is reachable from older global state root hashes where it was included in the hash up.", + "type": "object", + "required": [ + "Prune" + ], + "properties": { + "Prune": { + "$ref": "#/components/schemas/Key" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given BidKind to global state.", + "type": "object", + "required": [ + "WriteBidKind" + ], + "properties": { + "WriteBidKind": { + "$ref": "#/components/schemas/BidKind" + } + }, + "additionalProperties": false + } + ] + }, + "DeployInfo": { + "description": "Information relating to the given Deploy.", + "type": "object", + "required": [ + "deploy_hash", + "from", + "gas", + "source", + "transfers" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash.", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] + }, + "transfers": { + "description": "Transfers performed by the Deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "from": { + "description": "Account identifier of the creator of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "source": { + "description": "Source purse used for payment of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "gas": { + "description": "Gas cost of executing the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "TransferAddr": { + "description": "Hex-encoded transfer address.", + "type": "string" + }, + "URef": { + "description": "Hex-encoded, formatted URef.", + "type": "string" + }, + "U512": { + "description": "Decimal representation of a 512-bit integer.", + "type": "string" + }, + "EraInfo": { + "description": "Auction metadata. Intended to be recorded at each era.", + "type": "object", + "required": [ + "seigniorage_allocations" + ], + "properties": { + "seigniorage_allocations": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SeigniorageAllocation" + } + } + }, + "additionalProperties": false + }, + "SeigniorageAllocation": { + "description": "Information about a seigniorage allocation", + "oneOf": [ + { + "description": "Info about a seigniorage allocation for a validator", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "type": "object", + "required": [ + "amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Info about a seigniorage allocation for a delegator", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "type": "object", + "required": [ + "amount", + "delegator_public_key", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "Delegator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "Transfer": { + "description": "Represents a transfer from one purse to another", + "type": "object", + "required": [ + "amount", + "deploy_hash", + "from", + "gas", + "source", + "target" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash of Deploy that created the transfer.", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] + }, + "from": { + "description": "Account from which transfer was executed", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "to": { + "description": "Account to which funds are transferred", + "anyOf": [ + { + "$ref": "#/components/schemas/AccountHash" + }, + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "target": { + "description": "Target purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "amount": { + "description": "Transfer amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "gas": { + "description": "Gas", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "id": { + "description": "User-defined id", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Bid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "delegators", + "inactive", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "delegators": { + "description": "This validator's delegators, indexed by their public keys.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_PublicKeyAndDelegator" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\".", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "VestingSchedule": { + "type": "object", + "required": [ + "initial_release_timestamp_millis" + ], + "properties": { + "initial_release_timestamp_millis": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "locked_amounts": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/components/schemas/U512" + }, + "maxItems": 14, + "minItems": 14 + } + }, + "additionalProperties": false + }, + "Array_of_PublicKeyAndDelegator": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKeyAndDelegator" + } + }, + "PublicKeyAndDelegator": { + "description": "A delegator associated with the given validator.", + "type": "object", + "required": [ + "delegator", + "delegator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "The public key of the delegator.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "delegator": { + "description": "The delegator details.", + "allOf": [ + { + "$ref": "#/components/schemas/Delegator" + } + ] + } + } + }, + "Delegator": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_public_key", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "staked_amount": { + "$ref": "#/components/schemas/U512" + }, + "bonding_purse": { + "$ref": "#/components/schemas/URef" + }, + "validator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "vesting_schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "WithdrawPurse": { + "description": "A withdraw purse, a legacy structure.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "U128": { + "description": "Decimal representation of a 128-bit integer.", + "type": "string" + }, + "U256": { + "description": "Decimal representation of a 256-bit integer.", + "type": "string" + }, + "NamedKey": { + "description": "A key with a name.", + "type": "object", + "required": [ + "key", + "name" + ], + "properties": { + "name": { + "description": "The name of the entry.", + "type": "string" + }, + "key": { + "description": "The value of the entry: a casper `Key` type.", + "allOf": [ + { + "$ref": "#/components/schemas/Key" + } + ] + } + }, + "additionalProperties": false + }, + "Key": { + "description": "The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, user accounts) are stored in global state.", + "type": "string" + }, + "UnbondingPurse": { + "description": "Unbonding purse.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BidKind": { + "description": "Auction bid variants.", + "oneOf": [ + { + "description": "A unified record indexed on validator data, with an embedded collection of all delegator bids assigned to that validator. The Unified variant is for legacy retrograde support, new instances will not be created going forward.", + "type": "object", + "required": [ + "Unified" + ], + "properties": { + "Unified": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only validator data.", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "$ref": "#/components/schemas/ValidatorBid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only delegator data.", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "$ref": "#/components/schemas/Delegator" + } + }, + "additionalProperties": false + } + ] + }, + "ValidatorBid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "inactive", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\"", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "ExecutionResultV2": { + "description": "The result of executing a single deploy.", + "oneOf": [ + { + "description": "The result of a failed execution.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "object", + "required": [ + "cost", + "effects", + "error_message", + "transfers" + ], + "properties": { + "effects": { + "description": "The effects of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/Effects" + } + ] + }, + "transfers": { + "description": "A record of transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost in Motes of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "error_message": { + "description": "The error message associated with executing the deploy.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The result of a successful execution.", + "type": "object", + "required": [ + "Success" + ], + "properties": { + "Success": { + "type": "object", + "required": [ + "cost", + "effects", + "transfers" + ], + "properties": { + "effects": { + "description": "The effects of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/Effects" + } + ] + }, + "transfers": { + "description": "A record of transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost in Motes of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "Effects": { + "description": "A log of all transforms produced during execution.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransformV2" + } + }, + "TransformV2": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "$ref": "#/components/schemas/Key" + }, + "kind": { + "$ref": "#/components/schemas/TransformKind" + } + }, + "additionalProperties": false + }, + "TransformKind": { + "description": "Representation of a single transformation occurring during execution.\n\nNote that all arithmetic variants of [`TransformKind`] are commutative which means that a given collection of them can be executed in any order to produce the same end result.", + "oneOf": [ + { + "description": "An identity transformation that does not modify a value in the global state.\n\nCreated as a result of reading from the global state.", + "type": "string", + "enum": [ + "Identity" + ] + }, + { + "description": "Writes a new value in the global state.", + "type": "object", + "required": [ + "Write" + ], + "properties": { + "Write": { + "$ref": "#/components/schemas/StoredValue" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in the global state.", + "type": "object", + "required": [ + "AddInt32" + ], + "properties": { + "AddInt32": { + "type": "integer", + "format": "int32" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in the global state.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in the global state.", + "type": "object", + "required": [ + "AddUInt128" + ], + "properties": { + "AddUInt128": { + "$ref": "#/components/schemas/U128" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in the global state.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/components/schemas/U256" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in the global state.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + { + "description": "Adds new named keys to an existing entry in the global state.\n\nThis transform assumes that the existing stored value is either an Account or a Contract.", + "type": "object", + "required": [ + "AddKeys" + ], + "properties": { + "AddKeys": { + "$ref": "#/components/schemas/NamedKeys" + } + }, + "additionalProperties": false + }, + { + "description": "Removes the pathing to the global state entry of the specified key. The pruned element remains reachable from previously generated global state root hashes, but will not be included in the next generated global state root hash and subsequent state accumulated from it.", + "type": "object", + "required": [ + "Prune" + ], + "properties": { + "Prune": { + "$ref": "#/components/schemas/Key" + } + }, + "additionalProperties": false + }, + { + "description": "Represents the case where applying a transform would cause an error.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "$ref": "#/components/schemas/TransformError" + } + }, + "additionalProperties": false + } + ] + }, + "StoredValue": { + "description": "A value stored in Global State.", + "oneOf": [ + { + "description": "A CLValue.", + "type": "object", + "required": [ + "CLValue" + ], + "properties": { + "CLValue": { + "$ref": "#/components/schemas/CLValue" + } + }, + "additionalProperties": false + }, + { + "description": "An account.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/components/schemas/Account" + } + }, + "additionalProperties": false + }, + { + "description": "Contract wasm.", + "type": "object", + "required": [ + "ContractWasm" + ], + "properties": { + "ContractWasm": { + "$ref": "#/components/schemas/ContractWasm" + } + }, + "additionalProperties": false + }, + { + "description": "A contract.", + "type": "object", + "required": [ + "Contract" + ], + "properties": { + "Contract": { + "$ref": "#/components/schemas/Contract" + } + }, + "additionalProperties": false + }, + { + "description": "A contract package.", + "type": "object", + "required": [ + "ContractPackage" + ], + "properties": { + "ContractPackage": { + "$ref": "#/components/schemas/ContractPackage" + } + }, + "additionalProperties": false + }, + { + "description": "A `Transfer`.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "$ref": "#/components/schemas/Transfer" + } + }, + "additionalProperties": false + }, + { + "description": "Info about a deploy.", + "type": "object", + "required": [ + "DeployInfo" + ], + "properties": { + "DeployInfo": { + "$ref": "#/components/schemas/DeployInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Info about an era.", + "type": "object", + "required": [ + "EraInfo" + ], + "properties": { + "EraInfo": { + "$ref": "#/components/schemas/EraInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`Bid`].", + "type": "object", + "required": [ + "Bid" + ], + "properties": { + "Bid": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores withdraw information.", + "type": "object", + "required": [ + "Withdraw" + ], + "properties": { + "Withdraw": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WithdrawPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Unbonding information.", + "type": "object", + "required": [ + "Unbonding" + ], + "properties": { + "Unbonding": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondingPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "An `AddressableEntity`.", + "type": "object", + "required": [ + "AddressableEntity" + ], + "properties": { + "AddressableEntity": { + "$ref": "#/components/schemas/AddressableEntity" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`BidKind`].", + "type": "object", + "required": [ + "BidKind" + ], + "properties": { + "BidKind": { + "$ref": "#/components/schemas/BidKind" + } + }, + "additionalProperties": false + }, + { + "description": "A `Package`.", + "type": "object", + "required": [ + "Package" + ], + "properties": { + "Package": { + "$ref": "#/components/schemas/Package" + } + }, + "additionalProperties": false + }, + { + "description": "A record of byte code.", + "type": "object", + "required": [ + "ByteCode" + ], + "properties": { + "ByteCode": { + "$ref": "#/components/schemas/ByteCode" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message topic.", + "type": "object", + "required": [ + "MessageTopic" + ], + "properties": { + "MessageTopic": { + "$ref": "#/components/schemas/MessageTopicSummary" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message digest.", + "type": "object", + "required": [ + "Message" + ], + "properties": { + "Message": { + "$ref": "#/components/schemas/MessageChecksum" + } + }, + "additionalProperties": false + } + ] + }, + "Account": { + "description": "Represents an Account in the global state.", + "type": "object", + "required": [ + "account_hash", + "action_thresholds", + "associated_keys", + "main_purse", + "named_keys" + ], + "properties": { + "account_hash": { + "$ref": "#/components/schemas/AccountHash" + }, + "named_keys": { + "$ref": "#/components/schemas/NamedKeys" + }, + "main_purse": { + "$ref": "#/components/schemas/URef" + }, + "associated_keys": { + "$ref": "#/components/schemas/AccountAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/components/schemas/AccountActionThresholds" + } + }, + "additionalProperties": false + }, + "NamedKeys": { + "description": "A collection of named keys.", + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedKey" + } + }, + "AccountAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_AssociatedKey" + } + ] + }, + "Array_of_AssociatedKey": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AssociatedKey" + } + }, + "AssociatedKey": { + "description": "A weighted public key.", + "type": "object", + "required": [ + "account_hash", + "weight" + ], + "properties": { + "account_hash": { + "description": "The account hash of the public key.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "weight": { + "description": "The weight assigned to the public key.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "AccountAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "AccountActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "ContractWasm": { + "description": "A container for contract's WASM bytes.", + "type": "object", + "required": [ + "bytes" + ], + "properties": { + "bytes": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "Contract": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "contract_package_hash", + "contract_wasm_hash", + "entry_points", + "named_keys", + "protocol_version" + ], + "properties": { + "contract_package_hash": { + "$ref": "#/components/schemas/ContractPackageHash" + }, + "contract_wasm_hash": { + "$ref": "#/components/schemas/ContractWasmHash" + }, + "named_keys": { + "$ref": "#/components/schemas/NamedKeys" + }, + "entry_points": { + "$ref": "#/components/schemas/Array_of_NamedEntryPoint" + }, + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + } + } + }, + "ContractPackageHash": { + "description": "The hash address of the contract package", + "type": "string" + }, + "ContractWasmHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "Array_of_NamedEntryPoint": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedEntryPoint" + } + }, + "NamedEntryPoint": { + "type": "object", + "required": [ + "entry_point", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "entry_point": { + "allOf": [ + { + "$ref": "#/components/schemas/EntryPoint" + } + ] + } + } + }, + "EntryPoint": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Parameter" + } + }, + "ret": { + "$ref": "#/components/schemas/CLType" + }, + "access": { + "$ref": "#/components/schemas/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/components/schemas/EntryPointType" + } + } + }, + "Parameter": { + "description": "Parameter to a method", + "type": "object", + "required": [ + "cl_type", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "cl_type": { + "$ref": "#/components/schemas/CLType" + } + } + }, + "EntryPointAccess": { + "description": "Enum describing the possible access control options for a contract entry point (method).", + "oneOf": [ + { + "description": "Anyone can call this method (no access controls).", + "type": "string", + "enum": [ + "Public" + ] + }, + { + "description": "Only users from the listed groups may call this method. Note: if the list is empty then this method is not callable from outside the contract.", + "type": "object", + "required": [ + "Groups" + ], + "properties": { + "Groups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + } + }, + "additionalProperties": false + }, + { + "description": "Can't be accessed directly but are kept in the derived wasm bytes.", + "type": "string", + "enum": [ + "Template" + ] + } + ] + }, + "Group": { + "description": "A (labelled) \"user group\". Each method of a versioned contract may be associated with one or more user groups which are allowed to call it.", + "type": "string" + }, + "EntryPointType": { + "description": "Context of method execution\n\nMost significant bit represents version i.e. - 0b0 -> 0.x/1.x (session & contracts) - 0b1 -> 2.x and later (introduced installer, utility entry points)", + "oneOf": [ + { + "description": "Runs as session code (caller) Deprecated, retained to allow read back of legacy stored session.", + "type": "string", + "enum": [ + "Session" + ] + }, + { + "description": "Runs within called entity's context (called)", + "type": "string", + "enum": [ + "AddressableEntity" + ] + }, + { + "description": "This entry point is intended to extract a subset of bytecode. Runs within called entity's context (called)", + "type": "string", + "enum": [ + "Factory" + ] + } + ] + }, + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "ContractPackage": { + "description": "Contract definition, metadata, and security container.", + "type": "object", + "required": [ + "access_key", + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "access_key": { + "description": "Key used to add or disable versions", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "versions": { + "description": "All versions (enabled & disabled)", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ContractHash" + } + }, + "disabled_versions": { + "description": "Disabled versions", + "type": "array", + "items": { + "$ref": "#/components/schemas/ContractVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the contract. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a contract is locked", + "allOf": [ + { + "$ref": "#/components/schemas/ContractPackageStatus" + } + ] + } + } + }, + "ContractHash": { + "description": "The hash address of the contract", + "type": "string" + }, + "ContractVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `ContractVersion`.", + "type": "array", + "items": [ + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + ], + "maxItems": 2, + "minItems": 2 + }, + "Array_of_NamedUserGroup": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedUserGroup" + } + }, + "NamedUserGroup": { + "type": "object", + "required": [ + "group_name", + "group_users" + ], + "properties": { + "group_name": { + "allOf": [ + { + "$ref": "#/components/schemas/Group" + } + ] + }, + "group_users": { + "type": "array", + "items": { + "$ref": "#/components/schemas/URef" + }, + "uniqueItems": true + } + } + }, + "ContractPackageStatus": { + "description": "A enum to determine the lock status of the contract package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "AddressableEntity": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "action_thresholds", + "associated_keys", + "byte_code_hash", + "entry_points", + "main_purse", + "message_topics", + "named_keys", + "package_hash", + "protocol_version" + ], + "properties": { + "package_hash": { + "$ref": "#/components/schemas/PackageHash" + }, + "byte_code_hash": { + "$ref": "#/components/schemas/ByteCodeHash" + }, + "named_keys": { + "$ref": "#/components/schemas/NamedKeys" + }, + "entry_points": { + "$ref": "#/components/schemas/Array_of_NamedEntryPoint" + }, + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + }, + "main_purse": { + "$ref": "#/components/schemas/URef" + }, + "associated_keys": { + "$ref": "#/components/schemas/EntityAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/components/schemas/EntityActionThresholds" + }, + "message_topics": { + "$ref": "#/components/schemas/Array_of_MessageTopic" + } + } + }, + "ByteCodeHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "EntityAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_AssociatedKey" + } + ] + }, + "EntityActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management", + "upgrade_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + }, + "upgrade_management": { + "description": "Threshold for upgrading contracts.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + } + } + }, + "EntityAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "Array_of_MessageTopic": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MessageTopic" + } + }, + "MessageTopic": { + "type": "object", + "required": [ + "topic_name", + "topic_name_hash" + ], + "properties": { + "topic_name": { + "type": "string" + }, + "topic_name_hash": { + "allOf": [ + { + "$ref": "#/components/schemas/TopicNameHash" + } + ] + } + } + }, + "TopicNameHash": { + "description": "The hash of the name of the message topic.", + "type": "string" + }, + "Package": { + "description": "Entity definition, metadata, and security container.", + "type": "object", + "required": [ + "access_key", + "disabled_versions", + "groups", + "lock_status", + "package_kind", + "versions" + ], + "properties": { + "access_key": { + "description": "Key used to add or disable versions.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "versions": { + "description": "All versions (enabled & disabled).", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_EntityVersionAndHash" + } + ] + }, + "disabled_versions": { + "description": "Collection of disabled entity versions. The runtime will not permit disabled entity versions to be executed.", + "type": "array", + "items": { + "$ref": "#/components/schemas/EntityVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the entity. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a entity is locked", + "allOf": [ + { + "$ref": "#/components/schemas/PackageStatus" + } + ] + }, + "package_kind": { + "description": "The kind of package.", + "allOf": [ + { + "$ref": "#/components/schemas/PackageKind" + } + ] + } + } + }, + "Array_of_EntityVersionAndHash": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EntityVersionAndHash" + } + }, + "EntityVersionAndHash": { + "type": "object", + "required": [ + "addressable_entity_hash", + "entity_version_key" + ], + "properties": { + "entity_version_key": { + "allOf": [ + { + "$ref": "#/components/schemas/EntityVersionKey" + } + ] + }, + "addressable_entity_hash": { + "allOf": [ + { + "$ref": "#/components/schemas/AddressableEntityHash" + } + ] + } + } + }, + "EntityVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `EntityVersion`.", + "type": "object", + "required": [ + "entity_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "description": "Major element of `ProtocolVersion` a `ContractVersion` is compatible with.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "entity_version": { + "description": "Automatically incremented value for a contract version within a major `ProtocolVersion`.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + } + }, + "PackageStatus": { + "description": "A enum to determine the lock status of the package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "PackageKind": { + "description": "The type of Package.", + "oneOf": [ + { + "description": "Package associated with a native contract implementation.", + "type": "object", + "required": [ + "System" + ], + "properties": { + "System": { + "$ref": "#/components/schemas/SystemEntityType" + } + }, + "additionalProperties": false + }, + { + "description": "Package associated with an Account hash.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Packages associated with Wasm stored on chain.", + "type": "string", + "enum": [ + "SmartContract" + ] + } + ] + }, + "SystemEntityType": { + "description": "System contract types.\n\nUsed by converting to a `u32` and passing as the `system_contract_index` argument of `ext_ffi::casper_get_system_contract()`.", + "oneOf": [ + { + "description": "Mint contract.", + "type": "string", + "enum": [ + "Mint" + ] + }, + { + "description": "Handle Payment contract.", + "type": "string", + "enum": [ + "HandlePayment" + ] + }, + { + "description": "Standard Payment contract.", + "type": "string", + "enum": [ + "StandardPayment" + ] + }, + { + "description": "Auction contract.", + "type": "string", + "enum": [ + "Auction" + ] + } + ] + }, + "ByteCode": { + "description": "A container for contract's Wasm bytes.", + "type": "object", + "required": [ + "bytes", + "kind" + ], + "properties": { + "kind": { + "$ref": "#/components/schemas/ByteCodeKind" + }, + "bytes": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "ByteCodeKind": { + "description": "The type of Byte code.", + "oneOf": [ + { + "description": "Empty byte code.", + "type": "string", + "enum": [ + "Empty" + ] + }, + { + "description": "Byte code to be executed with the version 1 Casper execution engine.", + "type": "string", + "enum": [ + "V1CasperWasm" + ] + } + ] + }, + "MessageTopicSummary": { + "description": "Summary of a message topic that will be stored in global state.", + "type": "object", + "required": [ + "blocktime", + "message_count" + ], + "properties": { + "message_count": { + "description": "Number of messages in this topic.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "blocktime": { + "description": "Block timestamp in which these messages were emitted.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockTime" + } + ] + } + } + }, + "BlockTime": { + "description": "A newtype wrapping a [`u64`] which represents the block time.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "MessageChecksum": { + "description": "Message checksum as a formatted string.", + "type": "string" + }, + "TransformError": { + "description": "Error type for applying and combining transforms.\n\nA `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible (e.g. trying to add a number to a string).", + "oneOf": [ + { + "description": "Error while (de)serializing data.", + "type": "object", + "required": [ + "Serialization" + ], + "properties": { + "Serialization": { + "$ref": "#/components/schemas/BytesreprError" + } + }, + "additionalProperties": false + }, + { + "description": "Type mismatch error.", + "type": "object", + "required": [ + "TypeMismatch" + ], + "properties": { + "TypeMismatch": { + "$ref": "#/components/schemas/TypeMismatch" + } + }, + "additionalProperties": false + }, + { + "description": "Type no longer supported.", + "type": "string", + "enum": [ + "Deprecated" + ] + } + ] + }, + "BytesreprError": { + "description": "Serialization and deserialization errors.", + "oneOf": [ + { + "description": "Early end of stream while deserializing.", + "type": "string", + "enum": [ + "EarlyEndOfStream" + ] + }, + { + "description": "Formatting error while deserializing.", + "type": "string", + "enum": [ + "Formatting" + ] + }, + { + "description": "Not all input bytes were consumed in [`deserialize`].", + "type": "string", + "enum": [ + "LeftOverBytes" + ] + }, + { + "description": "Out of memory error.", + "type": "string", + "enum": [ + "OutOfMemory" + ] + }, + { + "description": "No serialized representation is available for a value.", + "type": "string", + "enum": [ + "NotRepresentable" + ] + }, + { + "description": "Exceeded a recursion depth limit.", + "type": "string", + "enum": [ + "ExceededRecursionDepth" + ] + } + ] + }, + "TypeMismatch": { + "description": "An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations.", + "type": "object", + "required": [ + "expected", + "found" + ], + "properties": { + "expected": { + "description": "The name of the expected type.", + "type": "string" + }, + "found": { + "description": "The actual type found.", + "type": "string" + } + } + }, + "AccountIdentifier": { + "description": "Identifier of an account.", + "anyOf": [ + { + "description": "The public key of an account", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + { + "description": "The account hash of an account", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + } + ] + }, + "BlockIdentifier": { + "description": "Identifier for possible ways to retrieve a block.", + "oneOf": [ + { + "description": "Identify and retrieve the block with its hash.", + "type": "object", + "required": [ + "Hash" + ], + "properties": { + "Hash": { + "$ref": "#/components/schemas/BlockHash" + } + }, + "additionalProperties": false + }, + { + "description": "Identify and retrieve the block with its height.", + "type": "object", + "required": [ + "Height" + ], + "properties": { + "Height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + ] + }, + "DictionaryIdentifier": { + "description": "Options for dictionary item lookups.", + "oneOf": [ + { + "description": "Lookup a dictionary item via an Account's named keys.", + "type": "object", + "required": [ + "AccountNamedKey" + ], + "properties": { + "AccountNamedKey": { + "type": "object", + "required": [ + "dictionary_item_key", + "dictionary_name", + "key" + ], + "properties": { + "key": { + "description": "The account key as a formatted string whose named keys contains dictionary_name.", + "type": "string" + }, + "dictionary_name": { + "description": "The named key under which the dictionary seed URef is stored.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via a Contract's named keys.", + "type": "object", + "required": [ + "ContractNamedKey" + ], + "properties": { + "ContractNamedKey": { + "type": "object", + "required": [ + "dictionary_item_key", + "dictionary_name", + "key" + ], + "properties": { + "key": { + "description": "The contract key as a formatted string whose named keys contains dictionary_name.", + "type": "string" + }, + "dictionary_name": { + "description": "The named key under which the dictionary seed URef is stored.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via its seed URef.", + "type": "object", + "required": [ + "URef" + ], + "properties": { + "URef": { + "type": "object", + "required": [ + "dictionary_item_key", + "seed_uref" + ], + "properties": { + "seed_uref": { + "description": "The dictionary's seed URef.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via its unique key.", + "type": "object", + "required": [ + "Dictionary" + ], + "properties": { + "Dictionary": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "GlobalStateIdentifier": { + "description": "Identifier for possible ways to query Global State", + "oneOf": [ + { + "description": "Query using a block hash.", + "type": "object", + "required": [ + "BlockHash" + ], + "properties": { + "BlockHash": { + "$ref": "#/components/schemas/BlockHash" + } + }, + "additionalProperties": false + }, + { + "description": "Query using a block height.", + "type": "object", + "required": [ + "BlockHeight" + ], + "properties": { + "BlockHeight": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "Query using the state root hash.", + "type": "object", + "required": [ + "StateRootHash" + ], + "properties": { + "StateRootHash": { + "$ref": "#/components/schemas/Digest" + } + }, + "additionalProperties": false + } + ] + }, + "BlockHeader": { + "description": "The versioned header portion of a block. It encapsulates different variants of the BlockHeader struct.", + "oneOf": [ + { + "description": "The legacy, initial version of the header portion of a block.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/BlockHeaderV1" + } + }, + "additionalProperties": false + }, + { + "description": "The version 2 of the header portion of a block.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/BlockHeaderV2" + } + }, + "additionalProperties": false + } + ] + }, + "BlockHeaderV1": { + "description": "The header portion of a block.", + "type": "object", + "required": [ + "accumulated_seed", + "body_hash", + "era_id", + "height", + "parent_hash", + "protocol_version", + "random_bit", + "state_root_hash", + "timestamp" + ], + "properties": { + "parent_hash": { + "description": "The parent block's hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "state_root_hash": { + "description": "The root hash of global state after the deploys in this block have been executed.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "body_hash": { + "description": "The hash of the block's body.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "random_bit": { + "description": "A random bit needed for initializing a future era.", + "type": "boolean" + }, + "accumulated_seed": { + "description": "A seed needed for initializing a future era.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "era_end": { + "description": "The `EraEnd` of a block if it is a switch block.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraEndV1" + }, + { + "type": "null" + } + ] + }, + "timestamp": { + "description": "The timestamp from when the block was proposed.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" + } + ] + }, + "era_id": { + "description": "The era ID in which this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "height": { + "description": "The height of this block, i.e. the number of ancestors.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "protocol_version": { + "description": "The protocol version of the network from when this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/ProtocolVersion" + } + ] + } + } + }, + "EraEndV1": { + "description": "Information related to the end of an era, and validator weights for the following era.", + "type": "object", + "required": [ + "era_report", + "next_era_validator_weights" + ], + "properties": { + "era_report": { + "description": "Equivocation, reward and validator inactivity information.", + "allOf": [ + { + "$ref": "#/components/schemas/EraReport_for_PublicKey" + } + ] + }, + "next_era_validator_weights": { + "description": "The validators for the upcoming era and their respective weights.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_ValidatorWeight" + } + ] + } + } + }, + "EraReport_for_PublicKey": { + "description": "Equivocation, reward and validator inactivity information.", + "type": "object", + "required": [ + "equivocators", + "inactive_validators", + "rewards" + ], + "properties": { + "equivocators": { + "description": "The set of equivocators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "rewards": { + "description": "Rewards for finalization of earlier blocks.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_EraReward" + } + ] + }, + "inactive_validators": { + "description": "Validators that haven't produced any unit during the era.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + } + } + }, + "Array_of_EraReward": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EraReward" + } + }, + "EraReward": { + "description": "A validator's public key paired with a measure of the value of its contribution to consensus, as a fraction of the configured maximum block reward.", + "type": "object", + "required": [ + "amount", + "validator" + ], + "properties": { + "validator": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "The reward amount.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + } + }, + "Array_of_ValidatorWeight": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ValidatorWeight" + } + }, + "ValidatorWeight": { + "description": "A validator's public key paired with its weight, i.e. the total number of motes staked by it and its delegators.", + "type": "object", + "required": [ + "validator", + "weight" + ], + "properties": { + "validator": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "weight": { + "description": "The validator's weight.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + } + }, + "BlockHeaderV2": { + "description": "The header portion of a block.", + "type": "object", + "required": [ + "accumulated_seed", + "body_hash", + "era_id", + "height", + "parent_hash", + "protocol_version", + "random_bit", + "state_root_hash", + "timestamp" + ], + "properties": { + "parent_hash": { + "description": "The parent block's hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "state_root_hash": { + "description": "The root hash of global state after the deploys in this block have been executed.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "body_hash": { + "description": "The hash of the block's body.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "random_bit": { + "description": "A random bit needed for initializing a future era.", + "type": "boolean" + }, + "accumulated_seed": { + "description": "A seed needed for initializing a future era.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "era_end": { + "description": "The `EraEnd` of a block if it is a switch block.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraEndV2" + }, + { + "type": "null" + } + ] + }, + "timestamp": { + "description": "The timestamp from when the block was proposed.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" + } + ] + }, + "era_id": { + "description": "The era ID in which this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "height": { + "description": "The height of this block, i.e. the number of ancestors.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "protocol_version": { + "description": "The protocol version of the network from when this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/ProtocolVersion" + } + ] + } + } + }, + "EraEndV2": { + "description": "Information related to the end of an era, and validator weights for the following era.", + "type": "object", + "required": [ + "equivocators", + "inactive_validators", + "next_era_validator_weights", + "rewards" + ], + "properties": { + "equivocators": { + "description": "The set of equivocators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "inactive_validators": { + "description": "Validators that haven't produced any unit during the era.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "next_era_validator_weights": { + "description": "The validators for the upcoming era and their respective weights.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_ValidatorWeight" + } + ] + }, + "rewards": { + "description": "The rewards distributed to the validators.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/U512" + } + } + } + }, + "PurseIdentifier": { + "description": "Identifier of a purse.", + "oneOf": [ + { + "description": "The main purse of the account identified by this public key.", + "type": "object", + "required": [ + "main_purse_under_public_key" + ], + "properties": { + "main_purse_under_public_key": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "The main purse of the account identified by this account hash.", + "type": "object", + "required": [ + "main_purse_under_account_hash" + ], + "properties": { + "main_purse_under_account_hash": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "The purse identified by this URef.", + "type": "object", + "required": [ + "purse_uref" + ], + "properties": { + "purse_uref": { + "$ref": "#/components/schemas/URef" + } + }, + "additionalProperties": false + } + ] + }, + "Peers": { + "description": "Map of peer IDs to network addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PeerEntry" + } + }, + "PeerEntry": { + "description": "Node peer entry.", + "type": "object", + "required": [ + "address", + "node_id" + ], + "properties": { + "node_id": { + "description": "Node id.", + "type": "string" + }, + "address": { + "description": "Node address.", + "type": "string" + } + }, + "additionalProperties": false + }, + "MinimalBlockInfo": { + "description": "Minimal info about a `Block` needed to satisfy the node status request.", + "type": "object", + "required": [ + "creator", + "era_id", + "hash", + "height", + "state_root_hash", + "timestamp" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/BlockHash" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "era_id": { + "$ref": "#/components/schemas/EraId" + }, + "height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "state_root_hash": { + "$ref": "#/components/schemas/Digest" + }, + "creator": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + "NextUpgrade": { + "description": "Information about the next protocol upgrade.", + "type": "object", + "required": [ + "activation_point", + "protocol_version" + ], + "properties": { + "activation_point": { + "$ref": "#/components/schemas/ActivationPoint" + }, + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + } + } + }, + "ActivationPoint": { + "description": "The first era to which the associated protocol version applies.", + "anyOf": [ + { + "description": "Era id.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + { + "description": "Genesis timestamp.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" + } + ] + } + ] + }, + "ReactorState": { + "description": "The state of the reactor.", + "oneOf": [ + { + "description": "Get all components and reactor state set up on start.", + "type": "string", + "enum": [ + "Initialize" + ] + }, + { + "description": "Orient to the network and attempt to catch up to tip.", + "type": "string", + "enum": [ + "CatchUp" + ] + }, + { + "description": "Running commit upgrade and creating immediate switch block.", + "type": "string", + "enum": [ + "Upgrading" + ] + }, + { + "description": "Stay caught up with tip.", + "type": "string", + "enum": [ + "KeepUp" + ] + }, + { + "description": "Node is currently caught up and is an active validator.", + "type": "string", + "enum": [ + "Validate" + ] + }, + { + "description": "Node should be shut down for upgrade.", + "type": "string", + "enum": [ + "ShutdownForUpgrade" + ] + } + ] + }, + "AvailableBlockRange": { + "description": "An unbroken, inclusive range of blocks.", + "type": "object", + "required": [ + "high", + "low" + ], + "properties": { + "low": { + "description": "The inclusive lower bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "high": { + "description": "The inclusive upper bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "BlockSynchronizerStatus": { + "description": "The status of the block synchronizer.", + "type": "object", + "properties": { + "historical": { + "description": "The status of syncing a historical block, if any.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSyncStatus" + }, + { + "type": "null" + } + ] + }, + "forward": { + "description": "The status of syncing a forward block, if any.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSyncStatus" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BlockSyncStatus": { + "description": "The status of syncing an individual block.", + "type": "object", + "required": [ + "acquisition_state", + "block_hash" + ], + "properties": { + "block_hash": { + "description": "The block hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "block_height": { + "description": "The height of the block, if known.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "acquisition_state": { + "description": "The state of acquisition of the data associated with the block.", + "type": "string" + } + }, + "additionalProperties": false + }, + "JsonValidatorChanges": { + "description": "The changes in a validator's status.", + "type": "object", + "required": [ + "public_key", + "status_changes" + ], + "properties": { + "public_key": { + "description": "The public key of the validator.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "status_changes": { + "description": "The set of changes to the validator's status.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorStatusChange" + } + } + }, + "additionalProperties": false + }, + "JsonValidatorStatusChange": { + "description": "A single change to a validator's status in the given era.", + "type": "object", + "required": [ + "era_id", + "validator_change" + ], + "properties": { + "era_id": { + "description": "The era in which the change occurred.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "validator_change": { + "description": "The change in validator status.", + "allOf": [ + { + "$ref": "#/components/schemas/ValidatorChange" + } + ] + } + }, + "additionalProperties": false + }, + "ValidatorChange": { + "description": "A change to a validator's status between two eras.", + "oneOf": [ + { + "description": "The validator got newly added to the validator set.", + "type": "string", + "enum": [ + "Added" + ] + }, + { + "description": "The validator was removed from the validator set.", + "type": "string", + "enum": [ + "Removed" + ] + }, + { + "description": "The validator was banned from this era.", + "type": "string", + "enum": [ + "Banned" + ] + }, + { + "description": "The validator was excluded from proposing new blocks in this era.", + "type": "string", + "enum": [ + "CannotPropose" + ] + }, + { + "description": "We saw the validator misbehave in this era.", + "type": "string", + "enum": [ + "SeenAsFaulty" + ] + } + ] + }, + "ChainspecRawBytes": { + "description": "The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.", + "type": "object", + "required": [ + "chainspec_bytes" + ], + "properties": { + "chainspec_bytes": { + "description": "Raw bytes of the current chainspec.toml file.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "maybe_genesis_accounts_bytes": { + "description": "Raw bytes of the current genesis accounts.toml file.", + "anyOf": [ + { + "$ref": "#/components/schemas/Bytes" + }, + { + "type": "null" + } + ] + }, + "maybe_global_state_bytes": { + "description": "Raw bytes of the current global_state.toml file.", + "anyOf": [ + { + "$ref": "#/components/schemas/Bytes" + }, + { + "type": "null" + } + ] + } + } + }, + "JsonBlockWithSignatures": { + "description": "A JSON-friendly representation of a block and the signatures for that block.", + "type": "object", + "required": [ + "block", + "proofs" + ], + "properties": { + "block": { + "description": "The block.", + "allOf": [ + { + "$ref": "#/components/schemas/Block" + } + ] + }, + "proofs": { + "description": "The proofs of the block, i.e. a collection of validators' signatures of the block hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_BlockProof" + } + ] + } + }, + "additionalProperties": false + }, + "Block": { + "description": "A block after execution.", + "oneOf": [ + { + "description": "The legacy, initial version of the block.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/BlockV1" + } + }, + "additionalProperties": false + }, + { + "description": "The version 2 of the block.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/BlockV2" + } + }, + "additionalProperties": false + } + ] + }, + "BlockV1": { + "description": "A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 1.", + "type": "object", + "required": [ + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "description": "The block hash identifying this block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "header": { + "description": "The header portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHeaderV1" + } + ] + }, + "body": { + "description": "The body portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockBodyV1" + } + ] + } + } + }, + "BlockBodyV1": { + "description": "The body portion of a block. Version 1.", + "type": "object", + "required": [ + "deploy_hashes", + "proposer", + "transfer_hashes" + ], + "properties": { + "proposer": { + "description": "The public key of the validator which proposed the block.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "deploy_hashes": { + "description": "The deploy hashes of the non-transfer deploys within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "transfer_hashes": { + "description": "The deploy hashes of the transfers within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + } + } + }, + "BlockV2": { + "description": "A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 2.", + "type": "object", + "required": [ + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "description": "The block hash identifying this block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "header": { + "description": "The header portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHeaderV2" + } + ] + }, + "body": { + "description": "The body portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockBodyV2" + } + ] + } + } + }, + "BlockBodyV2": { + "description": "The body portion of a block. Version 2.", + "type": "object", + "required": [ + "install_upgrade", + "proposer", + "rewarded_signatures", + "staking", + "standard", + "transfer" + ], + "properties": { + "proposer": { + "description": "The public key of the validator which proposed the block.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "transfer": { + "description": "The hashes of the transfer transactions within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransactionHash" + } + }, + "staking": { + "description": "The hashes of the non-transfer, native transactions within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransactionHash" + } + }, + "install_upgrade": { + "description": "The hashes of the installer/upgrader transactions within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransactionHash" + } + }, + "standard": { + "description": "The hashes of all other transactions within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransactionHash" + } + }, + "rewarded_signatures": { + "description": "List of identifiers for finality signatures for a particular past block.", + "allOf": [ + { + "$ref": "#/components/schemas/RewardedSignatures" + } + ] + } + } + }, + "RewardedSignatures": { + "description": "Describes finality signatures that will be rewarded in a block. Consists of a vector of `SingleBlockRewardedSignatures`, each of which describes signatures for a single ancestor block. The first entry represents the signatures for the parent block, the second for the parent of the parent, and so on.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SingleBlockRewardedSignatures" + } + }, + "SingleBlockRewardedSignatures": { + "description": "List of identifiers for finality signatures for a particular past block.\n\nThat past block height is current_height - signature_rewards_max_delay, the latter being defined in the chainspec.\n\nWe need to wait for a few blocks to pass (`signature_rewards_max_delay`) to store the finality signers because we need a bit of time to get the block finality.", + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "Array_of_BlockProof": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlockProof" + } + }, + "BlockProof": { + "description": "A validator's public key paired with a corresponding signature of a given block hash.", + "type": "object", + "required": [ + "public_key", + "signature" + ], + "properties": { + "public_key": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "signature": { + "description": "The validator's signature.", + "allOf": [ + { + "$ref": "#/components/schemas/Signature" + } + ] + } + } + }, + "EraSummary": { + "description": "The summary of an era", + "type": "object", + "required": [ + "block_hash", + "era_id", + "merkle_proof", + "state_root_hash", + "stored_value" + ], + "properties": { + "block_hash": { + "description": "The block hash", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "era_id": { + "description": "The era id", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "stored_value": { + "description": "The StoredValue containing era information", + "allOf": [ + { + "$ref": "#/components/schemas/StoredValue" + } + ] + }, + "state_root_hash": { + "description": "Hex-encoded hash of the state root", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "merkle_proof": { + "description": "The Merkle proof", + "type": "string" + } + }, + "additionalProperties": false + }, + "AuctionState": { + "description": "Data structure summarizing auction contract data.", + "type": "object", + "required": [ + "bids", + "block_height", + "era_validators", + "state_root_hash" + ], + "properties": { + "state_root_hash": { + "description": "Global state hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "block_height": { + "description": "Block height.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "era_validators": { + "description": "Era validators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonEraValidators" + } + }, + "bids": { + "description": "All bids.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_PublicKeyAndBid" + } + ] + } + }, + "additionalProperties": false + }, + "JsonEraValidators": { + "description": "The validators for the given era.", + "type": "object", + "required": [ + "era_id", + "validator_weights" + ], + "properties": { + "era_id": { + "$ref": "#/components/schemas/EraId" + }, + "validator_weights": { + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorWeights" + } + } + }, + "additionalProperties": false + }, + "JsonValidatorWeights": { + "description": "A validator's weight.", + "type": "object", + "required": [ + "public_key", + "weight" + ], + "properties": { + "public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "weight": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + "Array_of_PublicKeyAndBid": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKeyAndBid" + } + }, + "PublicKeyAndBid": { + "description": "A bid associated with the given public key.", + "type": "object", + "required": [ + "bid", + "public_key" + ], + "properties": { + "public_key": { + "description": "The public key of the bidder.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bid": { + "description": "The bid details.", + "allOf": [ + { + "$ref": "#/components/schemas/Bid" + } + ] + } + } + } + } + } +} \ No newline at end of file diff --git a/resources/test/schema_chainspec_bytes.json b/resources/test/schema_chainspec_bytes.json new file mode 100644 index 00000000..4ce0a7ac --- /dev/null +++ b/resources/test/schema_chainspec_bytes.json @@ -0,0 +1,69 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetChainspecResult", + "description": "Result for the \"info_get_chainspec\" RPC.", + "type": "object", + "required": [ + "api_version", + "chainspec_bytes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "chainspec_bytes": { + "description": "The chainspec file bytes.", + "allOf": [ + { + "$ref": "#/definitions/ChainspecRawBytes" + } + ] + } + }, + "definitions": { + "ChainspecRawBytes": { + "description": "The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.", + "type": "object", + "required": [ + "chainspec_bytes" + ], + "properties": { + "chainspec_bytes": { + "description": "Raw bytes of the current chainspec.toml file.", + "allOf": [ + { + "$ref": "#/definitions/Bytes" + } + ] + }, + "maybe_genesis_accounts_bytes": { + "description": "Raw bytes of the current genesis accounts.toml file.", + "anyOf": [ + { + "$ref": "#/definitions/Bytes" + }, + { + "type": "null" + } + ] + }, + "maybe_global_state_bytes": { + "description": "Raw bytes of the current global_state.toml file.", + "anyOf": [ + { + "$ref": "#/definitions/Bytes" + }, + { + "type": "null" + } + ] + } + } + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + } + } +} \ No newline at end of file diff --git a/resources/test/schema_rpc_schema.json b/resources/test/schema_rpc_schema.json new file mode 100644 index 00000000..7e0bf161 --- /dev/null +++ b/resources/test/schema_rpc_schema.json @@ -0,0 +1,642 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "OpenRpcSchema", + "description": "The main schema for the casper node's RPC server, compliant with [the OpenRPC Specification](https://spec.open-rpc.org).", + "type": "object", + "required": [ + "components", + "info", + "methods", + "openrpc", + "servers" + ], + "properties": { + "openrpc": { + "type": "string" + }, + "info": { + "$ref": "#/definitions/OpenRpcInfoField" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/OpenRpcServerEntry" + } + }, + "methods": { + "type": "array", + "items": { + "$ref": "#/definitions/Method" + } + }, + "components": { + "$ref": "#/definitions/Components" + } + }, + "definitions": { + "OpenRpcInfoField": { + "type": "object", + "required": [ + "contact", + "description", + "license", + "title", + "version" + ], + "properties": { + "version": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "contact": { + "$ref": "#/definitions/OpenRpcContactField" + }, + "license": { + "$ref": "#/definitions/OpenRpcLicenseField" + } + } + }, + "OpenRpcContactField": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "OpenRpcLicenseField": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "OpenRpcServerEntry": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "Method": { + "description": "The struct containing the documentation for the RPCs.", + "type": "object", + "required": [ + "examples", + "name", + "params", + "result", + "summary" + ], + "properties": { + "name": { + "type": "string" + }, + "summary": { + "type": "string" + }, + "params": { + "type": "array", + "items": { + "$ref": "#/definitions/SchemaParam" + } + }, + "result": { + "$ref": "#/definitions/ResponseResult" + }, + "examples": { + "type": "array", + "items": { + "$ref": "#/definitions/Example" + } + } + } + }, + "SchemaParam": { + "type": "object", + "required": [ + "name", + "required", + "schema" + ], + "properties": { + "name": { + "type": "string" + }, + "schema": { + "$ref": "#/definitions/Schema" + }, + "required": { + "type": "boolean" + } + } + }, + "Schema": { + "description": "A JSON Schema.", + "anyOf": [ + { + "description": "A trivial boolean JSON Schema.\n\nThe schema `true` matches everything (always passes validation), whereas the schema `false` matches nothing (always fails validation).", + "type": "boolean" + }, + { + "description": "A JSON Schema object.", + "allOf": [ + { + "$ref": "#/definitions/SchemaObject" + } + ] + } + ] + }, + "SchemaObject": { + "description": "A JSON Schema object.", + "type": "object", + "properties": { + "type": { + "description": "The `type` keyword.\n\nSee [JSON Schema Validation 6.1.1. \"type\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.1.1) and [JSON Schema 4.2.1. Instance Data Model](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-4.2.1).", + "anyOf": [ + { + "$ref": "#/definitions/SingleOrVec_for_InstanceType" + }, + { + "type": "null" + } + ] + }, + "format": { + "description": "The `format` keyword.\n\nSee [JSON Schema Validation 7. A Vocabulary for Semantic Content With \"format\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-7).", + "type": [ + "string", + "null" + ] + }, + "enum": { + "description": "The `enum` keyword.\n\nSee [JSON Schema Validation 6.1.2. \"enum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.1.2)", + "type": [ + "array", + "null" + ], + "items": true + }, + "const": { + "description": "The `const` keyword.\n\nSee [JSON Schema Validation 6.1.3. \"const\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.1.3)" + }, + "$ref": { + "description": "The `$ref` keyword.\n\nSee [JSON Schema 8.2.4.1. Direct References with \"$ref\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-8.2.4.1).", + "type": [ + "string", + "null" + ] + }, + "$id": { + "description": "The `$id` keyword.\n\nSee [JSON Schema 8.2.2. The \"$id\" Keyword](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-8.2.2).", + "type": [ + "string", + "null" + ] + }, + "title": { + "description": "The `title` keyword.\n\nSee [JSON Schema Validation 9.1. \"title\" and \"description\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.1).", + "type": [ + "string", + "null" + ] + }, + "description": { + "description": "The `description` keyword.\n\nSee [JSON Schema Validation 9.1. \"title\" and \"description\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.1).", + "type": [ + "string", + "null" + ] + }, + "default": { + "description": "The `default` keyword.\n\nSee [JSON Schema Validation 9.2. \"default\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.2)." + }, + "deprecated": { + "description": "The `deprecated` keyword.\n\nSee [JSON Schema Validation 9.3. \"deprecated\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.3).", + "type": "boolean" + }, + "readOnly": { + "description": "The `readOnly` keyword.\n\nSee [JSON Schema Validation 9.4. \"readOnly\" and \"writeOnly\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.4).", + "type": "boolean" + }, + "writeOnly": { + "description": "The `writeOnly` keyword.\n\nSee [JSON Schema Validation 9.4. \"readOnly\" and \"writeOnly\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.4).", + "type": "boolean" + }, + "examples": { + "description": "The `examples` keyword.\n\nSee [JSON Schema Validation 9.5. \"examples\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.5).", + "type": "array", + "items": true + }, + "allOf": { + "description": "The `allOf` keyword.\n\nSee [JSON Schema 9.2.1.1. \"allOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.1).", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Schema" + } + }, + "anyOf": { + "description": "The `anyOf` keyword.\n\nSee [JSON Schema 9.2.1.2. \"anyOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.2).", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Schema" + } + }, + "oneOf": { + "description": "The `oneOf` keyword.\n\nSee [JSON Schema 9.2.1.3. \"oneOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.3).", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Schema" + } + }, + "not": { + "description": "The `not` keyword.\n\nSee [JSON Schema 9.2.1.4. \"not\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.4).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "if": { + "description": "The `if` keyword.\n\nSee [JSON Schema 9.2.2.1. \"if\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.2.1).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "then": { + "description": "The `then` keyword.\n\nSee [JSON Schema 9.2.2.2. \"then\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.2.2).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "else": { + "description": "The `else` keyword.\n\nSee [JSON Schema 9.2.2.3. \"else\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.2.3).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "multipleOf": { + "description": "The `multipleOf` keyword.\n\nSee [JSON Schema Validation 6.2.1. \"multipleOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.1).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "maximum": { + "description": "The `maximum` keyword.\n\nSee [JSON Schema Validation 6.2.2. \"maximum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.2).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "exclusiveMaximum": { + "description": "The `exclusiveMaximum` keyword.\n\nSee [JSON Schema Validation 6.2.3. \"exclusiveMaximum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.3).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "minimum": { + "description": "The `minimum` keyword.\n\nSee [JSON Schema Validation 6.2.4. \"minimum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.4).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "exclusiveMinimum": { + "description": "The `exclusiveMinimum` keyword.\n\nSee [JSON Schema Validation 6.2.5. \"exclusiveMinimum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.5).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "maxLength": { + "description": "The `maxLength` keyword.\n\nSee [JSON Schema Validation 6.3.1. \"maxLength\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.3.1).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "minLength": { + "description": "The `minLength` keyword.\n\nSee [JSON Schema Validation 6.3.2. \"minLength\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.3.2).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "pattern": { + "description": "The `pattern` keyword.\n\nSee [JSON Schema Validation 6.3.3. \"pattern\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.3.3).", + "type": [ + "string", + "null" + ] + }, + "items": { + "description": "The `items` keyword.\n\nSee [JSON Schema 9.3.1.1. \"items\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.1.1).", + "anyOf": [ + { + "$ref": "#/definitions/SingleOrVec_for_Schema" + }, + { + "type": "null" + } + ] + }, + "additionalItems": { + "description": "The `additionalItems` keyword.\n\nSee [JSON Schema 9.3.1.2. \"additionalItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.1.2).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "maxItems": { + "description": "The `maxItems` keyword.\n\nSee [JSON Schema Validation 6.4.1. \"maxItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.4.1).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "minItems": { + "description": "The `minItems` keyword.\n\nSee [JSON Schema Validation 6.4.2. \"minItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.4.2).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "uniqueItems": { + "description": "The `uniqueItems` keyword.\n\nSee [JSON Schema Validation 6.4.3. \"uniqueItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.4.3).", + "type": [ + "boolean", + "null" + ] + }, + "contains": { + "description": "The `contains` keyword.\n\nSee [JSON Schema 9.3.1.4. \"contains\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.1.4).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "maxProperties": { + "description": "The `maxProperties` keyword.\n\nSee [JSON Schema Validation 6.5.1. \"maxProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.5.1).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "minProperties": { + "description": "The `minProperties` keyword.\n\nSee [JSON Schema Validation 6.5.2. \"minProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.5.2).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "required": { + "description": "The `required` keyword.\n\nSee [JSON Schema Validation 6.5.3. \"required\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.5.3).", + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "properties": { + "description": "The `properties` keyword.\n\nSee [JSON Schema 9.3.2.1. \"properties\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.1).", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Schema" + } + }, + "patternProperties": { + "description": "The `patternProperties` keyword.\n\nSee [JSON Schema 9.3.2.2. \"patternProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.2).", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Schema" + } + }, + "additionalProperties": { + "description": "The `additionalProperties` keyword.\n\nSee [JSON Schema 9.3.2.3. \"additionalProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.3).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "propertyNames": { + "description": "The `propertyNames` keyword.\n\nSee [JSON Schema 9.3.2.5. \"propertyNames\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.5).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true + }, + "SingleOrVec_for_InstanceType": { + "description": "A type which can be serialized as a single item, or multiple items.\n\nIn some contexts, a `Single` may be semantically distinct from a `Vec` containing only item.", + "anyOf": [ + { + "$ref": "#/definitions/InstanceType" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/InstanceType" + } + } + ] + }, + "InstanceType": { + "description": "The possible types of values in JSON Schema documents.\n\nSee [JSON Schema 4.2.1. Instance Data Model](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-4.2.1).", + "type": "string", + "enum": [ + "null", + "boolean", + "object", + "array", + "number", + "string", + "integer" + ] + }, + "SingleOrVec_for_Schema": { + "description": "A type which can be serialized as a single item, or multiple items.\n\nIn some contexts, a `Single` may be semantically distinct from a `Vec` containing only item.", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/Schema" + } + } + ] + }, + "ResponseResult": { + "type": "object", + "required": [ + "name", + "schema" + ], + "properties": { + "name": { + "type": "string" + }, + "schema": { + "$ref": "#/definitions/Schema" + } + } + }, + "Example": { + "description": "An example pair of request params and response result.", + "type": "object", + "required": [ + "name", + "params", + "result" + ], + "properties": { + "name": { + "type": "string" + }, + "params": { + "type": "array", + "items": { + "$ref": "#/definitions/ExampleParam" + } + }, + "result": { + "$ref": "#/definitions/ExampleResult" + } + } + }, + "ExampleParam": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": true + } + }, + "ExampleResult": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": true + } + }, + "Components": { + "type": "object", + "required": [ + "schemas" + ], + "properties": { + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Schema" + } + } + } + } + } +} \ No newline at end of file diff --git a/resources/test/schema_status.json b/resources/test/schema_status.json new file mode 100644 index 00000000..78496673 --- /dev/null +++ b/resources/test/schema_status.json @@ -0,0 +1,415 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetStatusResult", + "description": "Result for \"info_get_status\" RPC response.", + "type": "object", + "required": [ + "api_version", + "available_block_range", + "block_sync", + "build_version", + "chainspec_name", + "last_progress", + "peers", + "reactor_state", + "starting_state_root_hash", + "uptime" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "peers": { + "description": "The node ID and network address of each connected peer.", + "allOf": [ + { + "$ref": "#/definitions/Peers" + } + ] + }, + "build_version": { + "description": "The compiled node version.", + "type": "string" + }, + "chainspec_name": { + "description": "The chainspec name.", + "type": "string" + }, + "starting_state_root_hash": { + "description": "The state root hash of the lowest block in the available block range.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "last_added_block_info": { + "description": "The minimal info of the last block from the linear chain.", + "anyOf": [ + { + "$ref": "#/definitions/MinimalBlockInfo" + }, + { + "type": "null" + } + ] + }, + "our_public_signing_key": { + "description": "Our public signing key.", + "anyOf": [ + { + "$ref": "#/definitions/PublicKey" + }, + { + "type": "null" + } + ] + }, + "round_length": { + "description": "The next round length if this node is a validator.", + "anyOf": [ + { + "$ref": "#/definitions/TimeDiff" + }, + { + "type": "null" + } + ] + }, + "next_upgrade": { + "description": "Information about the next scheduled upgrade.", + "anyOf": [ + { + "$ref": "#/definitions/NextUpgrade" + }, + { + "type": "null" + } + ] + }, + "uptime": { + "description": "Time that passed since the node has started.", + "allOf": [ + { + "$ref": "#/definitions/TimeDiff" + } + ] + }, + "reactor_state": { + "description": "The current state of node reactor.", + "allOf": [ + { + "$ref": "#/definitions/ReactorState" + } + ] + }, + "last_progress": { + "description": "Timestamp of the last recorded progress in the reactor.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + }, + "available_block_range": { + "description": "The available block range in storage.", + "allOf": [ + { + "$ref": "#/definitions/AvailableBlockRange" + } + ] + }, + "block_sync": { + "description": "The status of the block synchronizer builders.", + "allOf": [ + { + "$ref": "#/definitions/BlockSynchronizerStatus" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "Peers": { + "description": "Map of peer IDs to network addresses.", + "type": "array", + "items": { + "$ref": "#/definitions/PeerEntry" + } + }, + "PeerEntry": { + "description": "Node peer entry.", + "type": "object", + "required": [ + "address", + "node_id" + ], + "properties": { + "node_id": { + "description": "Node id.", + "type": "string" + }, + "address": { + "description": "Node address.", + "type": "string" + } + }, + "additionalProperties": false + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "MinimalBlockInfo": { + "description": "Minimal info about a `Block` needed to satisfy the node status request.", + "type": "object", + "required": [ + "creator", + "era_id", + "hash", + "height", + "state_root_hash", + "timestamp" + ], + "properties": { + "hash": { + "$ref": "#/definitions/BlockHash" + }, + "timestamp": { + "$ref": "#/definitions/Timestamp" + }, + "era_id": { + "$ref": "#/definitions/EraId" + }, + "height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "state_root_hash": { + "$ref": "#/definitions/Digest" + }, + "creator": { + "$ref": "#/definitions/PublicKey" + } + }, + "additionalProperties": false + }, + "BlockHash": { + "description": "Hex-encoded cryptographic hash of a block.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "string" + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "string" + }, + "NextUpgrade": { + "description": "Information about the next protocol upgrade.", + "type": "object", + "required": [ + "activation_point", + "protocol_version" + ], + "properties": { + "activation_point": { + "$ref": "#/definitions/ActivationPoint" + }, + "protocol_version": { + "$ref": "#/definitions/ProtocolVersion" + } + } + }, + "ActivationPoint": { + "description": "The first era to which the associated protocol version applies.", + "anyOf": [ + { + "description": "Era id.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + { + "description": "Genesis timestamp.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + } + ] + }, + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "ReactorState": { + "description": "The state of the reactor.", + "oneOf": [ + { + "description": "Get all components and reactor state set up on start.", + "type": "string", + "enum": [ + "Initialize" + ] + }, + { + "description": "Orient to the network and attempt to catch up to tip.", + "type": "string", + "enum": [ + "CatchUp" + ] + }, + { + "description": "Running commit upgrade and creating immediate switch block.", + "type": "string", + "enum": [ + "Upgrading" + ] + }, + { + "description": "Stay caught up with tip.", + "type": "string", + "enum": [ + "KeepUp" + ] + }, + { + "description": "Node is currently caught up and is an active validator.", + "type": "string", + "enum": [ + "Validate" + ] + }, + { + "description": "Node should be shut down for upgrade.", + "type": "string", + "enum": [ + "ShutdownForUpgrade" + ] + } + ] + }, + "AvailableBlockRange": { + "description": "An unbroken, inclusive range of blocks.", + "type": "object", + "required": [ + "high", + "low" + ], + "properties": { + "low": { + "description": "The inclusive lower bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "high": { + "description": "The inclusive upper bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "BlockSynchronizerStatus": { + "description": "The status of the block synchronizer.", + "type": "object", + "properties": { + "historical": { + "description": "The status of syncing a historical block, if any.", + "anyOf": [ + { + "$ref": "#/definitions/BlockSyncStatus" + }, + { + "type": "null" + } + ] + }, + "forward": { + "description": "The status of syncing a forward block, if any.", + "anyOf": [ + { + "$ref": "#/definitions/BlockSyncStatus" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BlockSyncStatus": { + "description": "The status of syncing an individual block.", + "type": "object", + "required": [ + "acquisition_state", + "block_hash" + ], + "properties": { + "block_hash": { + "description": "The block hash.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "block_height": { + "description": "The height of the block, if known.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "acquisition_state": { + "description": "The state of acquisition of the data associated with the block.", + "type": "string" + } + }, + "additionalProperties": false + } + } +} \ No newline at end of file diff --git a/resources/test/schema_validator_changes.json b/resources/test/schema_validator_changes.json new file mode 100644 index 00000000..c7a7340d --- /dev/null +++ b/resources/test/schema_validator_changes.json @@ -0,0 +1,146 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetValidatorChangesResult", + "description": "Result for the \"info_get_validator_changes\" RPC.", + "type": "object", + "required": [ + "api_version", + "changes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "changes": { + "description": "The validators' status changes.", + "type": "array", + "items": { + "$ref": "#/definitions/JsonValidatorChanges" + } + } + }, + "additionalProperties": false, + "definitions": { + "JsonValidatorChanges": { + "description": "The changes in a validator's status.", + "type": "object", + "required": [ + "public_key", + "status_changes" + ], + "properties": { + "public_key": { + "description": "The public key of the validator.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "status_changes": { + "description": "The set of changes to the validator's status.", + "type": "array", + "items": { + "$ref": "#/definitions/JsonValidatorStatusChange" + } + } + }, + "additionalProperties": false + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "JsonValidatorStatusChange": { + "description": "A single change to a validator's status in the given era.", + "type": "object", + "required": [ + "era_id", + "validator_change" + ], + "properties": { + "era_id": { + "description": "The era in which the change occurred.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "validator_change": { + "description": "The change in validator status.", + "allOf": [ + { + "$ref": "#/definitions/ValidatorChange" + } + ] + } + }, + "additionalProperties": false + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "ValidatorChange": { + "description": "A change to a validator's status between two eras.", + "oneOf": [ + { + "description": "The validator got newly added to the validator set.", + "type": "string", + "enum": [ + "Added" + ] + }, + { + "description": "The validator was removed from the validator set.", + "type": "string", + "enum": [ + "Removed" + ] + }, + { + "description": "The validator was banned from this era.", + "type": "string", + "enum": [ + "Banned" + ] + }, + { + "description": "The validator was excluded from proposing new blocks in this era.", + "type": "string", + "enum": [ + "CannotPropose" + ] + }, + { + "description": "We saw the validator misbehave in this era.", + "type": "string", + "enum": [ + "SeenAsFaulty" + ] + } + ] + } + } +} \ No newline at end of file diff --git a/rpc_sidecar/Cargo.toml b/rpc_sidecar/Cargo.toml new file mode 100644 index 00000000..46f1ce52 --- /dev/null +++ b/rpc_sidecar/Cargo.toml @@ -0,0 +1,74 @@ +[package] +name = "casper-rpc-sidecar" +version = "1.0.0" +authors = ["Jacek Malec "] +edition = "2018" +description = "The Casper blockchain RPC sidecard" +documentation = "https://docs.rs/casper-rpc-sidecar" +readme = "README.md" +homepage = "https://casperlabs.io" +repository = "https://github.com/CasperLabs/casper-node/tree/master/rpc_sidecard" +license = "Apache-2.0" + +[dependencies] +anyhow = { workspace = true } +async-trait = "0.1.50" +backtrace = "0.3.50" +base16 = "0.2.1" +bincode = "1" +casper-json-rpc = { version = "1.0.0", path = "../json_rpc" } +casper-types-ver-2_0 = { workspace = true, features = ["datasize", "json-schema", "std"] } +datasize = { workspace = true, features = ["detailed", "fake_clock-types"] } +futures = { workspace = true } +http = "0.2.1" +hyper = "0.14.26" +juliet = { version ="0.2", features = ["tracing"] } +num_cpus = "1" +once_cell.workspace = true +rand = "0.8.3" +schemars = { version = "0.8.16", features = ["preserve_order", "impl_json_schema"] } +serde = { workspace = true, default-features = true, features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +structopt = "0.3.14" +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +toml = { workspace = true } +tower = { version = "0.4.6", features = ["limit"] } +tracing = { workspace = true, default-features = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] } +warp = { version = "0.3.6", features = ["compression"] } + +[dev-dependencies] +assert-json-diff = "2" +bytes = "1.5.0" +casper-types-ver-2_0 = { workspace = true, features = ["datasize", "json-schema", "std", "testing"] } +portpicker = "0.1.1" +pretty_assertions = "0.7.2" +regex = "1" +tempfile = "3" +tokio = { workspace = true, features = ["test-util"] } + +[build-dependencies] +vergen = { version = "8.2.1", default-features = false, features = [ + "git", + "gitoxide", +] } + +[features] +testing = ["casper-types-ver-2_0/testing"] + +[package.metadata.deb] +revision = "0" +assets = [ + ["../target/release/casper-rpc-sidecar", "/usr/bin/casper-rpc-sidecar", "755"] +] +maintainer-scripts = "../resources/maintainer_scripts/debian" +extended-description = """ +Package for Casper RPC sidecar. + +For information on using package, see https://github.com/casper-network/casper-node +""" + +[package.metadata.deb.systemd-units] +unit-scripts = "../resources/maintainer_scripts/casper_rpc_sidecar" +restart-after-upgrade = false diff --git a/rpc_sidecar/README.md b/rpc_sidecar/README.md new file mode 100644 index 00000000..e5652507 --- /dev/null +++ b/rpc_sidecar/README.md @@ -0,0 +1,28 @@ +# rpc-sidecar + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Build Status](https://drone-auto-casper-network.casperlabs.io/api/badges/casper-network/casper-node/status.svg?branch=dev)](http://drone-auto-casper-network.casperlabs.io/casper-network/casper-node) +[![Crates.io](https://img.shields.io/crates/v/casper-rpc-sidecar)](https://crates.io/crates/casper-rpc-sidecar) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) + +## Synopsis + +The sidecar is a process that runs alongside the Casper node and exposes a JSON-RPC interface for interacting with the node. The RPC protocol allows for basic operations like querying global state, sending transactions and deploys etc. All of the RPC methods are documented [here](https://docs.casper.network/developers/json-rpc/). + +## Protocol +The sidecar maintains a TCP connection with the node and communicates using a custom binary protocol built on top of [Juliet](https://github.com/casper-network/juliet). The protocol uses a request-response model where the sidecar sends simple self-contained requests and the node responds to them. The requests can be split into these main categories: +- read requests + - queries for transient in-memory information like the + current block height, peer list, component status etc. + - queries for database items, with both the database and the key + always being explicitly specified by the sidecar +- execute transaction requests + - request to submit a transaction for execution + - request to speculatively execute a transaction + +The node does not interpret the data it sends where it's not necessary. For example, most database items are sent as opaque byte arrays and the sidecar is responsible for interpreting them. This leaves the sidecar in control of the data it receives and allows it to be more flexible in how it handles it. + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/rpc_sidecar/build.rs b/rpc_sidecar/build.rs new file mode 100644 index 00000000..820ad1ce --- /dev/null +++ b/rpc_sidecar/build.rs @@ -0,0 +1,16 @@ +use std::env; + +use vergen::EmitBuilder; + +fn main() { + if let Err(error) = EmitBuilder::builder().fail_on_error().git_sha(true).emit() { + println!("cargo:warning={}", error); + println!("cargo:warning=casper-rpc-sidecar build version will not include git short hash"); + } + + // Make the build profile available to rustc at compile time. + println!( + "cargo:rustc-env=SIDECAR_BUILD_PROFILE={}", + env::var("PROFILE").unwrap() + ); +} diff --git a/rpc_sidecar/src/config.rs b/rpc_sidecar/src/config.rs new file mode 100644 index 00000000..41cffd1a --- /dev/null +++ b/rpc_sidecar/src/config.rs @@ -0,0 +1,363 @@ +use std::{ + convert::{TryFrom, TryInto}, + net::{IpAddr, Ipv4Addr, SocketAddr}, +}; + +use datasize::DataSize; +use serde::Deserialize; +use thiserror::Error; + +use crate::SpeculativeExecConfig; + +/// Default binding address for the JSON-RPC HTTP server. +/// +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_ADDRESS: &str = "0.0.0.0:0"; +/// Default rate limit in qps. +const DEFAULT_QPS_LIMIT: u64 = 100; +/// Default max body bytes. This is 2.5MB which should be able to accommodate the largest valid +/// JSON-RPC request, which would be an "account_put_deploy". +const DEFAULT_MAX_BODY_BYTES: u32 = 2_621_440; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct RpcServerConfigTarget { + pub main_server: RpcConfig, + pub speculative_exec_server: Option, + pub node_client: NodeClientConfigTarget, +} + +impl TryFrom for RpcServerConfig { + type Error = FieldParseError; + fn try_from(value: RpcServerConfigTarget) -> Result { + let node_client = value.node_client.try_into().map_err(|e: FieldParseError| { + FieldParseError::ParseError { + field_name: "node_client", + error: e.to_string(), + } + })?; + Ok(RpcServerConfig { + main_server: value.main_server, + speculative_exec_server: value.speculative_exec_server, + node_client, + }) + } +} + +#[derive(Error, Debug)] +pub enum FieldParseError { + #[error("failed to parse field {} with error: {}", .field_name, .error)] + ParseError { + field_name: &'static str, + error: String, + }, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +#[cfg_attr(any(feature = "testing", test), derive(Default))] +pub struct RpcServerConfig { + pub main_server: RpcConfig, + pub speculative_exec_server: Option, + pub node_client: NodeClientConfig, +} + +/// JSON-RPC HTTP server configuration. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct RpcConfig { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// Address to bind JSON-RPC HTTP server to. + pub address: String, + /// Maximum rate limit in queries per second. + pub qps_limit: u64, + /// Maximum number of bytes to accept in a single request body. + pub max_body_bytes: u32, + /// CORS origin. + pub cors_origin: String, +} + +impl RpcConfig { + /// Creates a default instance for `RpcServer`. + pub fn new() -> Self { + RpcConfig { + enable_server: true, + address: DEFAULT_ADDRESS.to_string(), + qps_limit: DEFAULT_QPS_LIMIT, + max_body_bytes: DEFAULT_MAX_BODY_BYTES, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), + } + } +} + +impl Default for RpcConfig { + fn default() -> Self { + RpcConfig::new() + } +} + +/// Default address to connect to the node. +// Change this to SocketAddr, once SocketAddr::new is const stable. +const DEFAULT_NODE_CONNECT_ADDRESS: (IpAddr, u16) = (IpAddr::V4(Ipv4Addr::LOCALHOST), 28104); +/// Default maximum payload size. +const DEFAULT_MAX_NODE_PAYLOAD_SIZE: u32 = 4 * 1024 * 1024; +/// Default request limit. +const DEFAULT_NODE_REQUEST_LIMIT: u16 = 3; +/// Default request buffer size. +const DEFAULT_REQUEST_BUFFER_SIZE: usize = 16; +/// Default exponential backoff base delay. +const DEFAULT_EXPONENTIAL_BACKOFF_BASE_MS: u64 = 1000; +/// Default exponential backoff maximum delay. +const DEFAULT_EXPONENTIAL_BACKOFF_MAX_MS: u64 = 64_000; +/// Default exponential backoff coefficient. +const DEFAULT_EXPONENTIAL_BACKOFF_COEFFICIENT: u64 = 2; + +/// Node client configuration. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct NodeClientConfig { + /// Address of the node. + pub address: SocketAddr, + /// Maximum size of a request in bytes. + pub max_request_size_bytes: u32, + /// Maximum size of a response in bytes. + pub max_response_size_bytes: u32, + /// Maximum number of in-flight node requests. + pub request_limit: u16, + /// Number of node requests that can be buffered. + pub request_buffer_size: usize, + /// Configuration for exponential backoff to be used for re-connects. + pub exponential_backoff: ExponentialBackoffConfig, +} + +impl NodeClientConfig { + /// Creates a default instance for `NodeClientConfig`. + pub fn new() -> Self { + NodeClientConfig { + address: DEFAULT_NODE_CONNECT_ADDRESS.into(), + request_limit: DEFAULT_NODE_REQUEST_LIMIT, + max_request_size_bytes: DEFAULT_MAX_NODE_PAYLOAD_SIZE, + max_response_size_bytes: DEFAULT_MAX_NODE_PAYLOAD_SIZE, + request_buffer_size: DEFAULT_REQUEST_BUFFER_SIZE, + exponential_backoff: ExponentialBackoffConfig { + initial_delay_ms: DEFAULT_EXPONENTIAL_BACKOFF_BASE_MS, + max_delay_ms: DEFAULT_EXPONENTIAL_BACKOFF_MAX_MS, + coefficient: DEFAULT_EXPONENTIAL_BACKOFF_COEFFICIENT, + max_attempts: MaxAttempts::Infinite, + }, + } + } + + #[cfg(test)] + pub fn finite_retries_config(port: u16, num_of_retries: usize) -> Self { + let local_socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port); + NodeClientConfig { + address: local_socket, + request_limit: DEFAULT_NODE_REQUEST_LIMIT, + max_request_size_bytes: DEFAULT_MAX_NODE_PAYLOAD_SIZE, + max_response_size_bytes: DEFAULT_MAX_NODE_PAYLOAD_SIZE, + request_buffer_size: DEFAULT_REQUEST_BUFFER_SIZE, + exponential_backoff: ExponentialBackoffConfig { + initial_delay_ms: 500, + max_delay_ms: 3000, + coefficient: 3, + max_attempts: MaxAttempts::Finite(num_of_retries), + }, + } + } +} + +impl Default for NodeClientConfig { + fn default() -> Self { + Self::new() + } +} + +/// Node client configuration. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct NodeClientConfigTarget { + /// Address of the node. + pub address: SocketAddr, + /// Maximum size of a request in bytes. + pub max_request_size_bytes: u32, + /// Maximum size of a response in bytes. + pub max_response_size_bytes: u32, + /// Maximum number of in-flight node requests. + pub request_limit: u16, + /// Number of node requests that can be buffered. + pub request_buffer_size: usize, + /// Configuration for exponential backoff to be used for re-connects. + pub exponential_backoff: ExponentialBackoffConfigTarget, +} + +impl TryFrom for NodeClientConfig { + type Error = FieldParseError; + fn try_from(value: NodeClientConfigTarget) -> Result { + let exponential_backoff = + value + .exponential_backoff + .try_into() + .map_err(|e: FieldParseError| FieldParseError::ParseError { + field_name: "exponential_backoff", + error: e.to_string(), + })?; + Ok(NodeClientConfig { + address: value.address, + request_limit: value.request_limit, + max_request_size_bytes: value.max_request_size_bytes, + max_response_size_bytes: value.max_response_size_bytes, + request_buffer_size: value.request_buffer_size, + exponential_backoff, + }) + } +} + +/// Exponential backoff configuration for re-connects. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct ExponentialBackoffConfig { + /// Initial wait time before the first re-connect attempt. + pub initial_delay_ms: u64, + /// Maximum wait time between re-connect attempts. + pub max_delay_ms: u64, + /// The multiplier to apply to the previous delay to get the next delay. + pub coefficient: u64, + /// Maximum number of connection attempts. + pub max_attempts: MaxAttempts, +} + +/// Exponential backoff configuration for re-connects. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct ExponentialBackoffConfigTarget { + /// Initial wait time before the first re-connect attempt. + pub initial_delay_ms: u64, + /// Maximum wait time between re-connect attempts. + pub max_delay_ms: u64, + /// The multiplier to apply to the previous delay to get the next delay. + pub coefficient: u64, + /// Maximum number of re-connect attempts. + pub max_attempts: MaxAttemptsTarget, +} + +impl TryFrom for ExponentialBackoffConfig { + type Error = FieldParseError; + fn try_from(value: ExponentialBackoffConfigTarget) -> Result { + let max_attempts = value + .max_attempts + .try_into() + .map_err(|e: MaxAttemptsError| FieldParseError::ParseError { + field_name: "max_attempts", + error: e.to_string(), + })?; + Ok(ExponentialBackoffConfig { + initial_delay_ms: value.initial_delay_ms, + max_delay_ms: value.max_delay_ms, + coefficient: value.coefficient, + max_attempts, + }) + } +} + +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +pub enum MaxAttempts { + Infinite, + Finite(usize), +} + +impl MaxAttempts { + pub fn can_attempt(&self, current_attempt: usize) -> bool { + match self { + MaxAttempts::Infinite => true, + MaxAttempts::Finite(max_attempts) => *max_attempts >= current_attempt, + } + } +} + +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +#[serde(untagged)] +pub enum MaxAttemptsTarget { + StringBased(String), + UsizeBased(usize), +} + +impl TryFrom for MaxAttempts { + type Error = MaxAttemptsError; + fn try_from(value: MaxAttemptsTarget) -> Result { + match value { + MaxAttemptsTarget::StringBased(s) => { + if s == "infinite" { + Ok(MaxAttempts::Infinite) + } else { + Err(MaxAttemptsError::UnexpectedValue(s)) + } + } + MaxAttemptsTarget::UsizeBased(u) => { + if u == 0 { + Err(MaxAttemptsError::UnexpectedValue(u.to_string())) + } else { + Ok(MaxAttempts::Finite(u)) + } + } + } + } +} + +#[derive(Error, Debug)] +pub enum MaxAttemptsError { + #[error("Max attempts must be either 'infinite' or a integer > 0. Got: {}", .0)] + UnexpectedValue(String), +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn test_should_deserialize_infinite() { + let json = r#""infinite""#.to_string(); + let deserialized: MaxAttempts = serde_json::from_str::(&json) + .unwrap() + .try_into() + .unwrap(); + assert_eq!(deserialized, MaxAttempts::Infinite); + } + + #[test] + fn test_should_deserialize_finite() { + let json = r#"125"#.to_string(); + let deserialized: MaxAttempts = serde_json::from_str::(&json) + .unwrap() + .try_into() + .unwrap(); + assert_eq!(deserialized, MaxAttempts::Finite(125)); + } + + #[test] + fn test_should_fail_on_other_inputs() { + assert_failing_deserialization(r#""x""#); + assert_failing_deserialization(r#""infiniteee""#); + assert_failing_deserialization(r#""infinite ""#); + assert_failing_deserialization(r#"" infinite""#); + let deserialized = serde_json::from_str::(r#"-1"#); + assert!(deserialized.is_err()); + } + + fn assert_failing_deserialization(input: &str) { + let deserialized: Result = + serde_json::from_str::(input) + .unwrap() + .try_into(); + assert!(deserialized.is_err(), "input = {}", input); + } +} diff --git a/rpc_sidecar/src/http_server.rs b/rpc_sidecar/src/http_server.rs new file mode 100644 index 00000000..8fc61ddf --- /dev/null +++ b/rpc_sidecar/src/http_server.rs @@ -0,0 +1,101 @@ +use std::sync::Arc; + +use hyper::server::{conn::AddrIncoming, Builder}; + +use casper_json_rpc::{CorsOrigin, RequestHandlersBuilder}; + +use crate::{ + rpcs::info::{GetPeers, GetStatus, GetTransaction}, + NodeClient, +}; + +use super::rpcs::{ + account::{PutDeploy, PutTransaction}, + chain::{ + GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetEraSummary, GetStateRootHash, + }, + docs::ListRpcs, + info::{GetChainspec, GetDeploy, GetValidatorChanges}, + state::{ + GetAccountInfo, GetAuctionInfo, GetBalance, GetDictionaryItem, GetItem, GetTrie, + QueryBalance, QueryGlobalState, + }, + RpcWithOptionalParams, RpcWithParams, RpcWithoutParams, +}; + +/// The URL path for all JSON-RPC requests. +pub const RPC_API_PATH: &str = "rpc"; + +pub const RPC_API_SERVER_NAME: &str = "JSON RPC"; + +/// Run the JSON-RPC server. +pub async fn run( + node: Arc, + builder: Builder, + qps_limit: u64, + max_body_bytes: u32, + cors_origin: String, +) { + let mut handlers = RequestHandlersBuilder::new(); + PutDeploy::register_as_handler(node.clone(), &mut handlers); + PutTransaction::register_as_handler(node.clone(), &mut handlers); + GetBlock::register_as_handler(node.clone(), &mut handlers); + GetBlockTransfers::register_as_handler(node.clone(), &mut handlers); + GetStateRootHash::register_as_handler(node.clone(), &mut handlers); + GetItem::register_as_handler(node.clone(), &mut handlers); + QueryGlobalState::register_as_handler(node.clone(), &mut handlers); + GetBalance::register_as_handler(node.clone(), &mut handlers); + GetAccountInfo::register_as_handler(node.clone(), &mut handlers); + GetDeploy::register_as_handler(node.clone(), &mut handlers); + GetTransaction::register_as_handler(node.clone(), &mut handlers); + GetPeers::register_as_handler(node.clone(), &mut handlers); + GetStatus::register_as_handler(node.clone(), &mut handlers); + GetEraInfoBySwitchBlock::register_as_handler(node.clone(), &mut handlers); + GetEraSummary::register_as_handler(node.clone(), &mut handlers); + GetAuctionInfo::register_as_handler(node.clone(), &mut handlers); + GetTrie::register_as_handler(node.clone(), &mut handlers); + GetValidatorChanges::register_as_handler(node.clone(), &mut handlers); + ListRpcs::register_as_handler(node.clone(), &mut handlers); + GetDictionaryItem::register_as_handler(node.clone(), &mut handlers); + GetChainspec::register_as_handler(node.clone(), &mut handlers); + QueryBalance::register_as_handler(node, &mut handlers); + let handlers = handlers.build(); + + match cors_origin.as_str() { + "" => { + super::rpcs::run( + builder, + handlers, + qps_limit, + max_body_bytes, + RPC_API_PATH, + RPC_API_SERVER_NAME, + ) + .await + } + "*" => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + RPC_API_PATH, + RPC_API_SERVER_NAME, + CorsOrigin::Any, + ) + .await + } + _ => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + RPC_API_PATH, + RPC_API_SERVER_NAME, + CorsOrigin::Specified(cors_origin), + ) + .await + } + } +} diff --git a/rpc_sidecar/src/lib.rs b/rpc_sidecar/src/lib.rs new file mode 100644 index 00000000..0a3035aa --- /dev/null +++ b/rpc_sidecar/src/lib.rs @@ -0,0 +1,243 @@ +mod config; +mod http_server; +mod node_client; +mod rpcs; +mod speculative_exec_config; +mod speculative_exec_server; +#[cfg(test)] +pub(crate) mod testing; + +use anyhow::Error; +use casper_types_ver_2_0::ProtocolVersion; +pub use config::{FieldParseError, RpcServerConfig, RpcServerConfigTarget}; +pub use config::{NodeClientConfig, RpcConfig}; +use futures::FutureExt; +pub use http_server::run as run_rpc_server; +use hyper::{ + server::{conn::AddrIncoming, Builder as ServerBuilder}, + Server, +}; +pub use node_client::{Error as ClientError, JulietNodeClient, NodeClient}; +pub use speculative_exec_config::Config as SpeculativeExecConfig; +pub use speculative_exec_server::run as run_speculative_exec_server; +use std::process::ExitCode; +use std::{ + net::{SocketAddr, ToSocketAddrs}, + sync::Arc, +}; +use tracing::warn; + +/// Minimal casper protocol version supported by this sidecar. +pub const SUPPORTED_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(1, 5, 4); + +/// The exit code is used to indicate that the client has shut down due to version mismatch. +pub const CLIENT_SHUTDOWN_EXIT_CODE: u8 = 0x3; + +pub async fn start_rpc_server(config: &RpcServerConfig) -> Result { + let (node_client, client_loop) = JulietNodeClient::new(config.node_client.clone()).await?; + let node_client: Arc = Arc::new(node_client); + + let rpc_server = config + .main_server + .enable_server + .then(|| run_rpc(&config.main_server, node_client.clone()).boxed()) + .unwrap_or_else(|| std::future::pending().boxed()); + + let spec_exec_server = config + .speculative_exec_server + .as_ref() + .filter(|conf| conf.enable_server) + .map_or_else( + || std::future::pending().boxed(), + |conf| run_speculative_exec(conf, node_client.clone()).boxed(), + ); + + tokio::select! { + result = rpc_server => result.map(|()| ExitCode::SUCCESS), + result = spec_exec_server => result.map(|()| ExitCode::SUCCESS), + result = client_loop => result.map(|()| ExitCode::from(CLIENT_SHUTDOWN_EXIT_CODE)), + } +} + +async fn run_rpc(config: &RpcConfig, node_client: Arc) -> Result<(), Error> { + run_rpc_server( + node_client, + start_listening(&config.address)?, + config.qps_limit, + config.max_body_bytes, + config.cors_origin.clone(), + ) + .await; + Ok(()) +} + +async fn run_speculative_exec( + config: &SpeculativeExecConfig, + node_client: Arc, +) -> anyhow::Result<()> { + run_speculative_exec_server( + node_client, + start_listening(&config.address)?, + config.qps_limit, + config.max_body_bytes, + config.cors_origin.clone(), + ) + .await; + Ok(()) +} + +fn start_listening(address: &str) -> anyhow::Result> { + let address = resolve_address(address).map_err(|error| { + warn!(%error, %address, "failed to start HTTP server, cannot parse address"); + error + })?; + + Server::try_bind(&address).map_err(|error| { + warn!(%error, %address, "failed to start HTTP server"); + error.into() + }) +} + +/// Parses a network address from a string, with DNS resolution. +fn resolve_address(address: &str) -> anyhow::Result { + address + .to_socket_addrs()? + .next() + .ok_or_else(|| anyhow::anyhow!("failed to resolve address")) +} + +#[cfg(test)] +mod tests { + use std::fs; + + use assert_json_diff::{assert_json_eq, assert_json_matches_no_panic, CompareMode, Config}; + use regex::Regex; + use serde_json::Value; + use std::io::Write; + + use crate::rpcs::docs::OPEN_RPC_SCHEMA; + + use crate::rpcs::{ + docs::OpenRpcSchema, + info::{GetChainspecResult, GetStatusResult, GetValidatorChangesResult}, + }; + use schemars::schema_for; + + #[test] + fn json_schema_check() { + let schema_path = format!( + "{}/../resources/test/rpc_schema.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&*OPEN_RPC_SCHEMA).unwrap(), + ); + + let schema = fs::read_to_string(&schema_path).unwrap(); + + // Check for the following pattern in the JSON as this points to a byte array or vec (e.g. + // a hash digest) not being represented as a hex-encoded string: + // + // ```json + // "type": "array", + // "items": { + // "type": "integer", + // "format": "uint8", + // "minimum": 0.0 + // }, + // ``` + // + // The type/variant in question (most easily identified from the git diff) might be easily + // fixed via application of a serde attribute, e.g. + // `#[serde(with = "serde_helpers::raw_32_byte_array")]`. It will likely require a + // schemars attribute too, indicating it is a hex-encoded string. See for example + // `TransactionInvocationTarget::Package::addr`. + let regex = Regex::new( + r#"\s*"type":\s*"array",\s*"items":\s*\{\s*"type":\s*"integer",\s*"format":\s*"uint8",\s*"minimum":\s*0\.0\s*\},"# + ).unwrap(); + assert!( + !regex.is_match(&schema), + "seems like a byte array is not hex-encoded - see comment in `json_schema_check` for \ + further info" + ); + } + + #[test] + fn json_schema_status_check() { + let schema_path = format!( + "{}/../resources/test/schema_status.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(GetStatusResult)).unwrap(), + ); + } + + #[test] + fn json_schema_validator_changes_check() { + let schema_path = format!( + "{}/../resources/test/schema_validator_changes.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(GetValidatorChangesResult)).unwrap(), + ); + } + + #[test] + fn json_schema_rpc_schema_check() { + let schema_path = format!( + "{}/../resources/test/schema_rpc_schema.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(OpenRpcSchema)).unwrap(), + ); + } + + #[test] + fn json_schema_chainspec_bytes_check() { + let schema_path = format!( + "{}/../resources/test/schema_chainspec_bytes.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(GetChainspecResult)).unwrap(), + ); + } + + /// Assert that the file at `schema_path` matches the provided `actual_schema`, which can be + /// derived from `schemars::schema_for!` or `schemars::schema_for_value!`, for example. This + /// method will create a temporary file with the actual schema and print the location if it + /// fails. + pub fn assert_schema(schema_path: &str, actual_schema: &str) { + let expected_schema = fs::read_to_string(schema_path).unwrap(); + let expected_schema: Value = serde_json::from_str(&expected_schema).unwrap(); + let mut temp_file = tempfile::Builder::new() + .suffix(".json") + .tempfile_in(env!("OUT_DIR")) + .unwrap(); + temp_file.write_all(actual_schema.as_bytes()).unwrap(); + let actual_schema: Value = serde_json::from_str(actual_schema).unwrap(); + let (_file, temp_file_path) = temp_file.keep().unwrap(); + + let result = assert_json_matches_no_panic( + &actual_schema, + &expected_schema, + Config::new(CompareMode::Strict), + ); + assert_eq!( + result, + Ok(()), + "schema does not match:\nexpected:\n{}\nactual:\n{}\n", + schema_path, + temp_file_path.display() + ); + assert_json_eq!(actual_schema, expected_schema); + } +} diff --git a/rpc_sidecar/src/node_client.rs b/rpc_sidecar/src/node_client.rs new file mode 100644 index 00000000..29f4bf16 --- /dev/null +++ b/rpc_sidecar/src/node_client.rs @@ -0,0 +1,612 @@ +use anyhow::Error as AnyhowError; +use async_trait::async_trait; +use serde::de::DeserializeOwned; +use std::{ + convert::{TryFrom, TryInto}, + future::Future, + net::SocketAddr, + sync::Arc, + time::Duration, +}; + +use crate::{config::ExponentialBackoffConfig, NodeClientConfig, SUPPORTED_PROTOCOL_VERSION}; +use casper_types_ver_2_0::{ + binary_port::{ + BinaryRequest, BinaryRequestHeader, BinaryResponse, BinaryResponseAndRequest, + ConsensusValidatorChanges, ErrorCode as BinaryPortError, GetRequest, GetTrieFullResult, + GlobalStateQueryResult, GlobalStateRequest, InformationRequest, NodeStatus, PayloadEntity, + RecordId, SpeculativeExecutionResult, TransactionWithExecutionInfo, + }, + bytesrepr::{self, FromBytes, ToBytes}, + AvailableBlockRange, BlockHash, BlockHeader, BlockIdentifier, ChainspecRawBytes, Digest, + GlobalStateIdentifier, Key, KeyTag, Peers, ProtocolVersion, SignedBlock, StoredValue, + Timestamp, Transaction, TransactionHash, Transfer, +}; +use juliet::{ + io::IoCoreBuilder, + protocol::ProtocolBuilder, + rpc::{JulietRpcClient, JulietRpcServer, RpcBuilder}, + ChannelConfiguration, ChannelId, +}; +use tokio::{ + net::{ + tcp::{OwnedReadHalf, OwnedWriteHalf}, + TcpStream, + }, + sync::{Notify, RwLock}, +}; +use tracing::{error, info, warn}; + +#[async_trait] +pub trait NodeClient: Send + Sync { + async fn send_request(&self, req: BinaryRequest) -> Result; + + async fn read_record( + &self, + record_id: RecordId, + key: &[u8], + ) -> Result { + let get = GetRequest::Record { + record_type_tag: record_id.into(), + key: key.to_vec(), + }; + self.send_request(BinaryRequest::Get(get)).await + } + + async fn read_info(&self, req: InformationRequest) -> Result { + let get = req.try_into().expect("should always be able to convert"); + self.send_request(BinaryRequest::Get(get)).await + } + + async fn query_global_state( + &self, + state_identifier: Option, + base_key: Key, + path: Vec, + ) -> Result, Error> { + let req = GlobalStateRequest::Item { + state_identifier, + base_key, + path, + }; + let resp = self + .send_request(BinaryRequest::Get(GetRequest::State(req))) + .await?; + parse_response::(&resp.into()) + } + + async fn query_global_state_by_tag( + &self, + state_identifier: Option, + key_tag: KeyTag, + ) -> Result, Error> { + let get = GlobalStateRequest::AllItems { + state_identifier, + key_tag, + }; + let resp = self + .send_request(BinaryRequest::Get(GetRequest::State(get))) + .await?; + parse_response::>(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_trie_bytes(&self, trie_key: Digest) -> Result>, Error> { + let req = GlobalStateRequest::Trie { trie_key }; + let resp = self + .send_request(BinaryRequest::Get(GetRequest::State(req))) + .await?; + let res = parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope)?; + Ok(res.into_inner().map(>::from)) + } + + async fn try_accept_transaction(&self, transaction: Transaction) -> Result<(), Error> { + let request = BinaryRequest::TryAcceptTransaction { transaction }; + let response = self.send_request(request).await?; + + if response.is_success() { + return Ok(()); + } else { + return Err(Error::from_error_code(response.error_code())); + } + } + + async fn exec_speculatively( + &self, + state_root_hash: Digest, + block_time: Timestamp, + protocol_version: ProtocolVersion, + transaction: Transaction, + exec_at_block: BlockHeader, + ) -> Result { + let request = BinaryRequest::TrySpeculativeExec { + transaction, + state_root_hash, + block_time, + protocol_version, + speculative_exec_at_block: exec_at_block, + }; + let resp = self.send_request(request).await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_block_transfers(&self, hash: BlockHash) -> Result>, Error> { + let key = hash.to_bytes().expect("should always serialize a digest"); + let resp = self.read_record(RecordId::Transfer, &key).await?; + parse_response_bincode::>(&resp.into()) + } + + async fn read_block_header( + &self, + block_identifier: Option, + ) -> Result, Error> { + let resp = self + .read_info(InformationRequest::BlockHeader(block_identifier)) + .await?; + parse_response::(&resp.into()) + } + + async fn read_signed_block( + &self, + block_identifier: Option, + ) -> Result, Error> { + let resp = self + .read_info(InformationRequest::SignedBlock(block_identifier)) + .await?; + parse_response::(&resp.into()) + } + + async fn read_transaction_with_execution_info( + &self, + transaction_hash: TransactionHash, + ) -> Result, Error> { + let resp = self + .read_info(InformationRequest::Transaction(transaction_hash)) + .await?; + parse_response::(&resp.into()) + } + + async fn read_peers(&self) -> Result { + let resp = self.read_info(InformationRequest::Peers).await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_available_block_range(&self) -> Result { + let resp = self + .read_info(InformationRequest::AvailableBlockRange) + .await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_chainspec_bytes(&self) -> Result { + let resp = self + .read_info(InformationRequest::ChainspecRawBytes) + .await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_validator_changes(&self) -> Result { + let resp = self + .read_info(InformationRequest::ConsensusValidatorChanges) + .await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_node_status(&self) -> Result { + let resp = self.read_info(InformationRequest::NodeStatus).await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } +} + +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +pub enum Error { + #[error("request error: {0}")] + RequestFailed(String), + #[error("failed to deserialize the envelope of a response: {0}")] + EnvelopeDeserialization(String), + #[error("failed to deserialize a response: {0}")] + Deserialization(String), + #[error("failed to serialize a request: {0}")] + Serialization(String), + #[error("unexpectedly received no response body")] + NoResponseBody, + #[error("unexpectedly received an empty envelope")] + EmptyEnvelope, + #[error("unexpected payload variant received in the response: {0}")] + UnexpectedVariantReceived(u8), + #[error("attempted to use a function that's disabled on the node")] + FunctionIsDisabled, + #[error("could not find the provided state root hash")] + UnknownStateRootHash, + #[error("the provided global state query failed to execute")] + QueryFailedToExecute, + #[error("could not execute the provided transaction")] + InvalidTransaction, + #[error("speculative execution has failed: {0}")] + SpecExecutionFailed(String), + #[error("received a response with an unsupported protocol version: {0}")] + UnsupportedProtocolVersion(ProtocolVersion), + #[error("received an unexpected node error: {message} ({code})")] + UnexpectedNodeError { message: String, code: u8 }, +} + +impl Error { + fn from_error_code(code: u8) -> Self { + match BinaryPortError::try_from(code) { + Ok(BinaryPortError::FunctionDisabled) => Self::FunctionIsDisabled, + Ok(BinaryPortError::InvalidTransaction) => Self::InvalidTransaction, + Ok(BinaryPortError::RootNotFound) => Self::UnknownStateRootHash, + Ok(BinaryPortError::QueryFailedToExecute) => Self::QueryFailedToExecute, + Ok( + err @ (BinaryPortError::WasmPreprocessing + | BinaryPortError::InvalidDeployItemVariant), + ) => Self::SpecExecutionFailed(err.to_string()), + Ok(err) => Self::UnexpectedNodeError { + message: err.to_string(), + code, + }, + Err(err) => Self::UnexpectedNodeError { + message: err.to_string(), + code, + }, + } + } +} + +const CHANNEL_COUNT: usize = 1; + +#[derive(Debug)] +pub struct JulietNodeClient { + client: Arc>>, + shutdown: Arc, +} + +impl JulietNodeClient { + pub async fn new( + config: NodeClientConfig, + ) -> Result<(Self, impl Future>), AnyhowError> { + let protocol_builder = ProtocolBuilder::<1>::with_default_channel_config( + ChannelConfiguration::default() + .with_request_limit(config.request_limit) + .with_max_request_payload_size(config.max_request_size_bytes) + .with_max_response_payload_size(config.max_response_size_bytes), + ); + let io_builder = IoCoreBuilder::new(protocol_builder) + .buffer_size(ChannelId::new(0), config.request_buffer_size); + let rpc_builder = RpcBuilder::new(io_builder); + + let stream = + Self::connect_with_retries(config.address, &config.exponential_backoff).await?; + let (reader, writer) = stream.into_split(); + let (client, server) = rpc_builder.build(reader, writer); + let client = Arc::new(RwLock::new(client)); + let shutdown = Arc::new(Notify::new()); + let server_loop = Self::server_loop( + config.address, + config.exponential_backoff.clone(), + rpc_builder, + Arc::clone(&client), + server, + shutdown.clone(), + ); + + Ok((Self { client, shutdown }, server_loop)) + } + + async fn server_loop( + addr: SocketAddr, + config: ExponentialBackoffConfig, + rpc_builder: RpcBuilder, + client: Arc>>, + mut server: JulietRpcServer, + shutdown: Arc, + ) -> Result<(), AnyhowError> { + loop { + tokio::select! { + req = server.next_request() => match req { + Ok(None) | Err(_) => { + error!("node connection closed, will attempt to reconnect"); + let (reader, writer) = + Self::connect_with_retries(addr, &config).await?.into_split(); + let (new_client, new_server) = rpc_builder.build(reader, writer); + + info!("connection with the node has been re-established"); + *client.write().await = new_client; + server = new_server; + } + Ok(Some(_)) => { + error!("node client received a request from the node, it's going to be ignored") + } + }, + _ = shutdown.notified() => { + info!("node client shutdown has been requested"); + return Ok(()) + } + } + } + } + + async fn connect_with_retries( + addr: SocketAddr, + config: &ExponentialBackoffConfig, + ) -> Result { + let mut wait = config.initial_delay_ms; + let mut current_attempt = 1; + loop { + match TcpStream::connect(addr).await { + Ok(server) => return Ok(server), + Err(err) => { + warn!(%err, "failed to connect to the node, waiting {wait}ms before retrying"); + current_attempt += 1; + if !config.max_attempts.can_attempt(current_attempt) { + anyhow::bail!( + "Couldn't connect to node {} after {} attempts", + addr, + current_attempt - 1 + ); + } + tokio::time::sleep(Duration::from_millis(wait)).await; + wait = (wait * config.coefficient).min(config.max_delay_ms); + } + } + } + } +} + +#[async_trait] +impl NodeClient for JulietNodeClient { + async fn send_request(&self, req: BinaryRequest) -> Result { + let payload = encode_request(&req).expect("should always serialize a request"); + let request_guard = self + .client + .read() + .await + .create_request(ChannelId::new(0)) + .with_payload(payload.into()) + .queue_for_sending() + .await; + let response = request_guard + .wait_for_response() + .await + .map_err(|err| Error::RequestFailed(err.to_string()))? + .ok_or(Error::NoResponseBody)?; + let resp = bytesrepr::deserialize_from_slice(&response) + .map_err(|err| Error::EnvelopeDeserialization(err.to_string()))?; + handle_response(resp, &self.shutdown) + } +} + +fn handle_response( + resp: BinaryResponseAndRequest, + shutdown: &Notify, +) -> Result { + let version = resp.response().protocol_version(); + + if version.is_compatible_with(&SUPPORTED_PROTOCOL_VERSION) { + Ok(resp) + } else { + info!("received a response with incompatible major version from the node {version}, shutting down"); + shutdown.notify_one(); + Err(Error::UnsupportedProtocolVersion(version)) + } +} + +fn encode_request(req: &BinaryRequest) -> Result, bytesrepr::Error> { + let header = BinaryRequestHeader::new(SUPPORTED_PROTOCOL_VERSION, req.tag()); + let mut bytes = Vec::with_capacity(header.serialized_length() + req.serialized_length()); + header.write_bytes(&mut bytes)?; + req.write_bytes(&mut bytes)?; + Ok(bytes) +} + +fn parse_response(resp: &BinaryResponse) -> Result, Error> +where + A: FromBytes + PayloadEntity, +{ + if resp.is_not_found() { + return Ok(None); + } + if !resp.is_success() { + return Err(Error::from_error_code(resp.error_code())); + } + match resp.returned_data_type_tag() { + Some(found) if found == u8::from(A::PAYLOAD_TYPE) => { + bytesrepr::deserialize_from_slice(resp.payload()) + .map(Some) + .map_err(|err| Error::Deserialization(err.to_string())) + } + Some(other) => Err(Error::UnexpectedVariantReceived(other)), + _ => Ok(None), + } +} + +fn parse_response_bincode(resp: &BinaryResponse) -> Result, Error> +where + A: DeserializeOwned + PayloadEntity, +{ + if resp.is_not_found() { + return Ok(None); + } + if !resp.is_success() { + return Err(Error::from_error_code(resp.error_code())); + } + match resp.returned_data_type_tag() { + Some(found) if found == u8::from(A::PAYLOAD_TYPE) => bincode::deserialize(resp.payload()) + .map(Some) + .map_err(|err| Error::Deserialization(err.to_string())), + Some(other) => Err(Error::UnexpectedVariantReceived(other)), + _ => Ok(None), + } +} + +#[cfg(test)] +mod tests { + use crate::testing::BinaryPortMock; + + use super::*; + use casper_types_ver_2_0::testing::TestRng; + use casper_types_ver_2_0::{CLValue, SemVer}; + use futures::FutureExt; + use tokio::task::JoinHandle; + use tokio::time::sleep; + + #[tokio::test] + async fn should_reject_bad_major_version() { + let notify = Notify::new(); + let bad_version = ProtocolVersion::from_parts(10, 0, 0); + + let result = handle_response( + BinaryResponseAndRequest::new( + BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, bad_version), + &[], + ), + ¬ify, + ); + + assert_eq!(result, Err(Error::UnsupportedProtocolVersion(bad_version))); + assert_eq!(notify.notified().now_or_never(), Some(())) + } + + #[tokio::test] + async fn should_accept_different_minor_version() { + let notify = Notify::new(); + let version = ProtocolVersion::new(SemVer { + minor: SUPPORTED_PROTOCOL_VERSION.value().minor + 1, + ..SUPPORTED_PROTOCOL_VERSION.value() + }); + + let result = handle_response( + BinaryResponseAndRequest::new( + BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), + &[], + ), + ¬ify, + ); + + assert_eq!( + result, + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), + &[], + )) + ); + assert_eq!(notify.notified().now_or_never(), None) + } + + #[tokio::test] + async fn should_accept_different_patch_version() { + let notify = Notify::new(); + let version = ProtocolVersion::new(SemVer { + patch: SUPPORTED_PROTOCOL_VERSION.value().patch + 1, + ..SUPPORTED_PROTOCOL_VERSION.value() + }); + + let result = handle_response( + BinaryResponseAndRequest::new( + BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), + &[], + ), + ¬ify, + ); + + assert_eq!( + result, + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0, version), + &[], + )) + ); + assert_eq!(notify.notified().now_or_never(), None) + } + + #[tokio::test] + async fn given_client_and_no_node_should_fail_after_tries() { + let config = NodeClientConfig::finite_retries_config(1111, 2); + let res = JulietNodeClient::new(config).await; + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + + assert!(error_message.starts_with("Couldn't connect to node")); + assert!(error_message.ends_with(" after 2 attempts")); + } + + #[tokio::test] + async fn given_client_and_node_should_connect_and_do_request() { + let port = get_port(); + let mut rng = TestRng::new(); + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value(port).await; + let config = NodeClientConfig::finite_retries_config(port, 2); + let (c, server_loop) = JulietNodeClient::new(config).await.unwrap(); + tokio::spawn(async move { + server_loop.await.unwrap(); + }); + + let res = query_global_state_for_string_value(&mut rng, &c) + .await + .unwrap(); + + assert_eq!(res, StoredValue::CLValue(CLValue::from_t("Foo").unwrap())) + } + + #[tokio::test] + async fn given_client_should_try_until_node_starts() { + let mut rng = TestRng::new(); + let port = get_port(); + tokio::spawn(async move { + sleep(Duration::from_secs(5)).await; + let _mock_server_handle = + start_mock_binary_port_responding_with_stored_value(port).await; + }); + let config = NodeClientConfig::finite_retries_config(port, 5); + let (client, server_loop) = JulietNodeClient::new(config).await.unwrap(); + tokio::spawn(async move { + server_loop.await.unwrap(); + }); + + let res = query_global_state_for_string_value(&mut rng, &client) + .await + .unwrap(); + + assert_eq!(res, StoredValue::CLValue(CLValue::from_t("Foo").unwrap())) + } + + async fn query_global_state_for_string_value( + rng: &mut TestRng, + client: &JulietNodeClient, + ) -> Result { + let state_root_hash = Digest::random(rng); + let base_key = Key::ChecksumRegistry; + client + .query_global_state( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + base_key, + vec![], + ) + .await? + .ok_or(Error::NoResponseBody) + .map(|query_res| query_res.into_inner().0) + } + + async fn start_mock_binary_port_responding_with_stored_value(port: u16) -> JoinHandle<()> { + let value = StoredValue::CLValue(CLValue::from_t("Foo").unwrap()); + let data = GlobalStateQueryResult::new(value, base16::encode_lower(&vec![])); + let protocol_version = ProtocolVersion::from_parts(1, 5, 4); + let val = BinaryResponse::from_value(data, protocol_version); + let request = []; + let response = BinaryResponseAndRequest::new(val, &request); + start_mock_binary_port(port, response.to_bytes().unwrap()).await + } + + async fn start_mock_binary_port(port: u16, data: Vec) -> JoinHandle<()> { + let handler = tokio::spawn(async move { + let binary_port = BinaryPortMock::new(port, data); + binary_port.start().await; + }); + sleep(Duration::from_secs(3)).await; // This should be handled differently, preferrably the mock binary port should inform that it already bound to the port + handler + } + + pub fn get_port() -> u16 { + portpicker::pick_unused_port().unwrap() + } +} diff --git a/rpc_sidecar/src/rpcs.rs b/rpc_sidecar/src/rpcs.rs new file mode 100644 index 00000000..a1c177d5 --- /dev/null +++ b/rpc_sidecar/src/rpcs.rs @@ -0,0 +1,618 @@ +//! The set of JSON-RPCs which the API server handles. + +use std::convert::{Infallible, TryFrom}; + +pub mod account; +pub mod chain; +mod common; +pub mod docs; +mod error; +mod error_code; +pub mod info; +pub mod speculative_exec; +pub mod state; + +use std::{fmt, str, sync::Arc, time::Duration}; + +use async_trait::async_trait; +use http::header::ACCEPT_ENCODING; +use hyper::server::{conn::AddrIncoming, Builder}; +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::Value; +use tokio::sync::oneshot; +use tower::ServiceBuilder; +use tracing::info; +use warp::Filter; + +use casper_json_rpc::{ + CorsOrigin, Error as RpcError, Params, RequestHandlers, RequestHandlersBuilder, + ReservedErrorCode, +}; +use casper_types_ver_2_0::SemVer; + +pub use common::ErrorData; +use docs::DocExample; +pub use error::Error; +pub use error_code::ErrorCode; + +use crate::{ClientError, NodeClient}; + +pub const CURRENT_API_VERSION: ApiVersion = ApiVersion(SemVer::new(1, 5, 3)); + +/// This setting causes the server to ignore extra fields in JSON-RPC requests other than the +/// standard 'id', 'jsonrpc', 'method', and 'params' fields. +/// +/// It will be changed to `false` for casper-node v2.0.0. +const ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST: bool = true; + +/// A JSON-RPC requiring the "params" field to be present. +#[async_trait] +pub(super) trait RpcWithParams { + /// The JSON-RPC "method" name. + const METHOD: &'static str; + + /// The JSON-RPC request's "params" type. + type RequestParams: Serialize + + for<'de> Deserialize<'de> + + JsonSchema + + DocExample + + Send + + 'static; + + /// The JSON-RPC response's "result" type. + type ResponseResult: Serialize + + for<'de> Deserialize<'de> + + PartialEq + + JsonSchema + + DocExample + + Send + + 'static; + + /// Tries to parse the incoming JSON-RPC request's "params" field as `RequestParams`. + fn try_parse_params(maybe_params: Option) -> Result { + let params = match maybe_params { + Some(params) => Value::from(params), + None => { + return Err(RpcError::new( + ReservedErrorCode::InvalidParams, + "Missing 'params' field", + )) + } + }; + serde_json::from_value::(params).map_err(|error| { + RpcError::new( + ReservedErrorCode::InvalidParams, + format!("Failed to parse 'params' field: {}", error), + ) + }) + } + + /// Registers this RPC as the handler for JSON-RPC requests whose "method" field is the same as + /// `Self::METHOD`. + fn register_as_handler( + node_client: Arc, + handlers_builder: &mut RequestHandlersBuilder, + ) { + let handler = move |maybe_params| { + let node_client = Arc::clone(&node_client); + async move { + let params = Self::try_parse_params(maybe_params)?; + Self::do_handle_request(node_client, params).await + } + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + /// Tries to parse the params, and on success, returns the doc example, regardless of the value + /// of the parsed params. + #[cfg(test)] + fn register_as_test_handler(handlers_builder: &mut RequestHandlersBuilder) { + let handler = move |maybe_params| async move { + let _params = Self::try_parse_params(maybe_params)?; + Ok(Self::ResponseResult::doc_example()) + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result; +} + +/// A JSON-RPC requiring the "params" field to be absent. +#[async_trait] +pub(super) trait RpcWithoutParams { + /// The JSON-RPC "method" name. + const METHOD: &'static str; + + /// The JSON-RPC response's "result" type. + type ResponseResult: Serialize + + for<'de> Deserialize<'de> + + PartialEq + + JsonSchema + + DocExample + + Send + + 'static; + + /// Returns an error if the incoming JSON-RPC request's "params" field is not `None` or an empty + /// Array or Object. + fn check_no_params(maybe_params: Option) -> Result<(), RpcError> { + if !maybe_params.unwrap_or_default().is_empty() { + return Err(RpcError::new( + ReservedErrorCode::InvalidParams, + "'params' field should be an empty Array '[]', an empty Object '{}' or absent", + )); + } + Ok(()) + } + + /// Registers this RPC as the handler for JSON-RPC requests whose "method" field is the same as + /// `Self::METHOD`. + fn register_as_handler( + node_client: Arc, + handlers_builder: &mut RequestHandlersBuilder, + ) { + let handler = move |maybe_params| { + let node_client = Arc::clone(&node_client); + async move { + Self::check_no_params(maybe_params)?; + Self::do_handle_request(node_client.clone()).await + } + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + /// Checks the params, and on success, returns the doc example. + #[cfg(test)] + fn register_as_test_handler(handlers_builder: &mut RequestHandlersBuilder) { + let handler = move |maybe_params| async move { + Self::check_no_params(maybe_params)?; + Ok(Self::ResponseResult::doc_example()) + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + async fn do_handle_request( + node_client: Arc, + ) -> Result; +} + +/// A JSON-RPC where the "params" field is optional. +/// +/// Note that "params" being an empty JSON Array or empty JSON Object is treated the same as if +/// the "params" field is absent - i.e. it represents the `None` case. +#[async_trait] +pub(super) trait RpcWithOptionalParams { + /// The JSON-RPC "method" name. + const METHOD: &'static str; + + /// The JSON-RPC request's "params" type. This will be passed to the handler wrapped in an + /// `Option`. + type OptionalRequestParams: Serialize + + for<'de> Deserialize<'de> + + JsonSchema + + DocExample + + Send + + 'static; + + /// The JSON-RPC response's "result" type. + type ResponseResult: Serialize + + for<'de> Deserialize<'de> + + PartialEq + + JsonSchema + + DocExample + + Send + + 'static; + + /// Tries to parse the incoming JSON-RPC request's "params" field as + /// `Option`. + fn try_parse_params( + maybe_params: Option, + ) -> Result, RpcError> { + let params = match maybe_params { + Some(params) => { + if params.is_empty() { + Value::Null + } else { + Value::from(params) + } + } + None => Value::Null, + }; + serde_json::from_value::>(params).map_err(|error| { + RpcError::new( + ReservedErrorCode::InvalidParams, + format!("Failed to parse 'params' field: {}", error), + ) + }) + } + + /// Registers this RPC as the handler for JSON-RPC requests whose "method" field is the same as + /// `Self::METHOD`. + fn register_as_handler( + node_client: Arc, + handlers_builder: &mut RequestHandlersBuilder, + ) { + let handler = move |maybe_params| { + let node_client = Arc::clone(&node_client); + async move { + let params = Self::try_parse_params(maybe_params)?; + Self::do_handle_request(node_client, params).await + } + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + /// Tries to parse the params, and on success, returns the doc example, regardless of the value + /// of the parsed params. + #[cfg(test)] + fn register_as_test_handler(handlers_builder: &mut RequestHandlersBuilder) { + let handler = move |maybe_params| async move { + let _params = Self::try_parse_params(maybe_params)?; + Ok(Self::ResponseResult::doc_example()) + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + async fn do_handle_request( + node_client: Arc, + params: Option, + ) -> Result; +} + +/// Start JSON RPC server with CORS enabled in a background. +pub(super) async fn run_with_cors( + builder: Builder, + handlers: RequestHandlers, + qps_limit: u64, + max_body_bytes: u32, + api_path: &'static str, + server_name: &'static str, + cors_header: CorsOrigin, +) { + let make_svc = hyper::service::make_service_fn(move |_| { + let service_routes = casper_json_rpc::route_with_cors( + api_path, + max_body_bytes, + handlers.clone(), + ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST, + &cors_header, + ); + + // Supports content negotiation for gzip responses. This is an interim fix until + // https://github.com/seanmonstar/warp/pull/513 moves forward. + let service_routes_gzip = warp::header::exact(ACCEPT_ENCODING.as_str(), "gzip") + .and(service_routes.clone()) + .with(warp::compression::gzip()); + + let service = warp::service(service_routes_gzip.or(service_routes)); + async move { Ok::<_, Infallible>(service.clone()) } + }); + + let make_svc = ServiceBuilder::new() + .rate_limit(qps_limit, Duration::from_secs(1)) + .service(make_svc); + + let server = builder.serve(make_svc); + info!(address = %server.local_addr(), "started {} server", server_name); + + let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); + let server_with_shutdown = server.with_graceful_shutdown(async { + shutdown_receiver.await.ok(); + }); + + let _ = tokio::spawn(server_with_shutdown).await; + let _ = shutdown_sender.send(()); + info!("{} server shut down", server_name); +} + +/// Start JSON RPC server in a background. +pub(super) async fn run( + builder: Builder, + handlers: RequestHandlers, + qps_limit: u64, + max_body_bytes: u32, + api_path: &'static str, + server_name: &'static str, +) { + let make_svc = hyper::service::make_service_fn(move |_| { + let service_routes = casper_json_rpc::route( + api_path, + max_body_bytes, + handlers.clone(), + ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST, + ); + + // Supports content negotiation for gzip responses. This is an interim fix until + // https://github.com/seanmonstar/warp/pull/513 moves forward. + let service_routes_gzip = warp::header::exact(ACCEPT_ENCODING.as_str(), "gzip") + .and(service_routes.clone()) + .with(warp::compression::gzip()); + + let service = warp::service(service_routes_gzip.or(service_routes)); + async move { Ok::<_, Infallible>(service.clone()) } + }); + + let make_svc = ServiceBuilder::new() + .rate_limit(qps_limit, Duration::from_secs(1)) + .service(make_svc); + + let server = builder.serve(make_svc); + info!(address = %server.local_addr(), "started {} server", server_name); + + let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); + let server_with_shutdown = server.with_graceful_shutdown(async { + shutdown_receiver.await.ok(); + }); + + let _ = tokio::spawn(server_with_shutdown).await; + let _ = shutdown_sender.send(()); + info!("{} server shut down", server_name); +} + +#[derive(Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct ApiVersion(SemVer); + +impl Serialize for ApiVersion { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + let str = format!("{}.{}.{}", self.0.major, self.0.minor, self.0.patch); + String::serialize(&str, serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ApiVersion { + fn deserialize>(deserializer: D) -> Result { + let semver = if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + SemVer::try_from(value_as_string.as_str()).map_err(SerdeError::custom)? + } else { + SemVer::deserialize(deserializer)? + }; + Ok(ApiVersion(semver)) + } +} + +impl fmt::Display for ApiVersion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg(test)] +mod tests { + use std::fmt::Write; + + use http::StatusCode; + use warp::{filters::BoxedFilter, Filter, Reply}; + + use casper_json_rpc::{filters, Response}; + use casper_types_ver_2_0::DeployHash; + + use super::*; + + async fn send_request( + method: &str, + maybe_params: Option<&str>, + filter: &BoxedFilter<(impl Reply + 'static,)>, + ) -> Response { + let mut body = format!(r#"{{"jsonrpc":"2.0","id":"a","method":"{}""#, method); + match maybe_params { + Some(params) => write!(body, r#","params":{}}}"#, params).unwrap(), + None => body += "}", + } + + let http_response = warp::test::request() + .body(body) + .filter(filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let body_bytes = hyper::body::to_bytes(http_response.into_body()) + .await + .unwrap(); + serde_json::from_slice(&body_bytes).unwrap() + } + + mod rpc_with_params { + use crate::rpcs::info::{GetDeploy, GetDeployParams, GetDeployResult}; + + use super::*; + + fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + GetDeploy::register_as_test_handler(&mut handlers); + let handlers = handlers.build(); + + filters::main_filter(handlers, ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST) + .recover(filters::handle_rejection) + .boxed() + } + + #[tokio::test] + async fn should_parse_params() { + let filter = main_filter_with_recovery(); + + let params = serde_json::to_string(&GetDeployParams { + deploy_hash: DeployHash::default(), + finalized_approvals: false, + }) + .unwrap(); + let params = Some(params.as_str()); + let rpc_response = send_request(GetDeploy::METHOD, params, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetDeployResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_return_error_if_missing_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetDeploy::METHOD, None, &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new(ReservedErrorCode::InvalidParams, "Missing 'params' field") + ); + + let rpc_response = send_request(GetDeploy::METHOD, Some("[]"), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "Failed to parse 'params' field: invalid length 0, expected struct \ + GetDeployParams with 2 elements" + ) + ); + } + + #[tokio::test] + async fn should_return_error_on_failure_to_parse_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetDeploy::METHOD, Some("[3]"), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "Failed to parse 'params' field: invalid type: integer `3`, expected a string" + ) + ); + } + } + + mod rpc_without_params { + + use crate::rpcs::info::{GetPeers, GetPeersResult}; + + use super::*; + + fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + GetPeers::register_as_test_handler(&mut handlers); + let handlers = handlers.build(); + + filters::main_filter(handlers, ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST) + .recover(filters::handle_rejection) + .boxed() + } + + #[tokio::test] + async fn should_check_no_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetPeers::METHOD, None, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetPeersResult::doc_example()) + ); + + let rpc_response = send_request(GetPeers::METHOD, Some("[]"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetPeersResult::doc_example()) + ); + + let rpc_response = send_request(GetPeers::METHOD, Some("{}"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetPeersResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_return_error_if_params_not_empty() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetPeers::METHOD, Some("[3]"), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "'params' field should be an empty Array '[]', an empty Object '{}' or absent" + ) + ); + } + } + + mod rpc_with_optional_params { + use casper_types_ver_2_0::BlockIdentifier; + + use crate::rpcs::chain::{GetBlock, GetBlockParams, GetBlockResult}; + + use super::*; + + fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + GetBlock::register_as_test_handler(&mut handlers); + let handlers = handlers.build(); + + filters::main_filter(handlers, ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST) + .recover(filters::handle_rejection) + .boxed() + } + + #[tokio::test] + async fn should_parse_without_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetBlock::METHOD, None, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + + let rpc_response = send_request(GetBlock::METHOD, Some("[]"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + + let rpc_response = send_request(GetBlock::METHOD, Some("{}"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_parse_with_params() { + let filter = main_filter_with_recovery(); + + let params = serde_json::to_string(&GetBlockParams { + block_identifier: BlockIdentifier::Height(1), + }) + .unwrap(); + let params = Some(params.as_str()); + + let rpc_response = send_request(GetBlock::METHOD, params, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_return_error_on_failure_to_parse_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetBlock::METHOD, Some(r#"["a"]"#), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "Failed to parse 'params' field: unknown variant `a`, expected `Hash` or \ + `Height`" + ) + ); + } + } +} diff --git a/rpc_sidecar/src/rpcs/account.rs b/rpc_sidecar/src/rpcs/account.rs new file mode 100644 index 00000000..d18ad81e --- /dev/null +++ b/rpc_sidecar/src/rpcs/account.rs @@ -0,0 +1,286 @@ +//! RPCs related to accounts. + +use std::{str, sync::Arc}; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types_ver_2_0::{Deploy, DeployHash, Transaction, TransactionHash}; + +use super::{ + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, ClientError, Error, NodeClient, RpcError, RpcWithParams, CURRENT_API_VERSION, +}; + +static PUT_DEPLOY_PARAMS: Lazy = Lazy::new(|| PutDeployParams { + deploy: Deploy::doc_example().clone(), +}); +static PUT_DEPLOY_RESULT: Lazy = Lazy::new(|| PutDeployResult { + api_version: DOCS_EXAMPLE_API_VERSION, + deploy_hash: *Deploy::doc_example().hash(), +}); + +static PUT_TRANSACTION_PARAMS: Lazy = Lazy::new(|| PutTransactionParams { + transaction: Transaction::doc_example().clone(), +}); +static PUT_TRANSACTION_RESULT: Lazy = Lazy::new(|| PutTransactionResult { + api_version: DOCS_EXAMPLE_API_VERSION, + transaction_hash: Transaction::doc_example().hash(), +}); + +/// Params for "account_put_deploy" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutDeployParams { + /// The `Deploy`. + pub deploy: Deploy, +} + +impl DocExample for PutDeployParams { + fn doc_example() -> &'static Self { + &PUT_DEPLOY_PARAMS + } +} + +/// Result for "account_put_deploy" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutDeployResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The deploy hash. + pub deploy_hash: DeployHash, +} + +impl DocExample for PutDeployResult { + fn doc_example() -> &'static Self { + &PUT_DEPLOY_RESULT + } +} + +/// "account_put_deploy" RPC +pub struct PutDeploy {} + +#[async_trait] +impl RpcWithParams for PutDeploy { + const METHOD: &'static str = "account_put_deploy"; + type RequestParams = PutDeployParams; + type ResponseResult = PutDeployResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let deploy_hash = *params.deploy.hash(); + match node_client + .try_accept_transaction(params.deploy.into()) + .await + { + Ok(()) => Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + deploy_hash, + }), + Err(err @ ClientError::InvalidTransaction) => { + Err(Error::InvalidDeploy(err.to_string()).into()) + } + Err(err) => Err(Error::NodeRequest("submitting a deploy", err).into()), + } + } +} + +/// Params for "account_put_transaction" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutTransactionParams { + /// The `Transaction`. + pub transaction: Transaction, +} + +impl DocExample for PutTransactionParams { + fn doc_example() -> &'static Self { + &PUT_TRANSACTION_PARAMS + } +} + +/// Result for "account_put_transaction" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutTransactionResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The transaction hash. + pub transaction_hash: TransactionHash, +} + +impl DocExample for PutTransactionResult { + fn doc_example() -> &'static Self { + &PUT_TRANSACTION_RESULT + } +} + +/// "account_put_transaction" RPC +pub struct PutTransaction {} + +#[async_trait] +impl RpcWithParams for PutTransaction { + const METHOD: &'static str = "account_put_transaction"; + type RequestParams = PutTransactionParams; + type ResponseResult = PutTransactionResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let transaction_hash = params.transaction.hash(); + match node_client.try_accept_transaction(params.transaction).await { + Ok(()) => Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + transaction_hash, + }), + Err(err @ ClientError::InvalidTransaction) => { + Err(Error::InvalidTransaction(err.to_string()).into()) + } + Err(err) => Err(Error::NodeRequest("submitting a transaction", err).into()), + } + } +} + +#[cfg(test)] +mod tests { + use casper_types_ver_2_0::{ + binary_port::{ + BinaryRequest, BinaryResponse, BinaryResponseAndRequest, + ErrorCode as BinaryPortErrorCode, + }, + testing::TestRng, + }; + + use crate::{rpcs::ErrorCode, SUPPORTED_PROTOCOL_VERSION}; + + use super::*; + + #[tokio::test] + async fn should_put_deploy() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::TryAcceptTransaction { .. } => { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), + &[], + )) + } + _ => unimplemented!(), + } + } + } + + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let res = PutDeploy::do_handle_request( + Arc::new(ClientMock), + PutDeployParams { + deploy: deploy.clone(), + }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + PutDeployResult { + api_version: CURRENT_API_VERSION, + deploy_hash: *deploy.hash(), + } + ) + } + + #[tokio::test] + async fn should_put_transaction() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::TryAcceptTransaction { .. } => { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(SUPPORTED_PROTOCOL_VERSION), + &[], + )) + } + _ => unimplemented!(), + } + } + } + + let rng = &mut TestRng::new(); + let transaction = Transaction::random(rng); + let res = PutTransaction::do_handle_request( + Arc::new(ClientMock), + PutTransactionParams { + transaction: transaction.clone(), + }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + PutTransactionResult { + api_version: CURRENT_API_VERSION, + transaction_hash: transaction.hash(), + } + ) + } + + #[tokio::test] + async fn should_handle_transaction_error() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::TryAcceptTransaction { .. } => { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_error( + BinaryPortErrorCode::InvalidTransaction, + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + _ => unimplemented!(), + } + } + } + + let rng = &mut TestRng::new(); + let transaction = Transaction::random(rng); + let err = PutTransaction::do_handle_request( + Arc::new(ClientMock), + PutTransactionParams { + transaction: transaction.clone(), + }, + ) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::InvalidTransaction as i64,) + } +} diff --git a/rpc_sidecar/src/rpcs/chain.rs b/rpc_sidecar/src/rpcs/chain.rs new file mode 100644 index 00000000..3c4593bf --- /dev/null +++ b/rpc_sidecar/src/rpcs/chain.rs @@ -0,0 +1,702 @@ +//! RPCs related to the block chain. + +mod era_summary; + +use std::{clone::Clone, str, sync::Arc}; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types_ver_2_0::{ + BlockHash, BlockHeader, BlockHeaderV2, BlockIdentifier, Digest, GlobalStateIdentifier, + JsonBlockWithSignatures, Key, StoredValue, Transfer, +}; + +use super::{ + common, + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithOptionalParams, CURRENT_API_VERSION, +}; +pub use era_summary::EraSummary; +use era_summary::ERA_SUMMARY; + +static GET_BLOCK_PARAMS: Lazy = Lazy::new(|| GetBlockParams { + block_identifier: BlockIdentifier::Hash(*JsonBlockWithSignatures::example().block.hash()), +}); +static GET_BLOCK_RESULT: Lazy = Lazy::new(|| GetBlockResult { + api_version: DOCS_EXAMPLE_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::example().clone()), +}); +static GET_BLOCK_TRANSFERS_PARAMS: Lazy = + Lazy::new(|| GetBlockTransfersParams { + block_identifier: BlockIdentifier::Hash(*BlockHash::example()), + }); +static GET_BLOCK_TRANSFERS_RESULT: Lazy = + Lazy::new(|| GetBlockTransfersResult { + api_version: DOCS_EXAMPLE_API_VERSION, + block_hash: Some(*BlockHash::example()), + transfers: Some(vec![Transfer::default()]), + }); +static GET_STATE_ROOT_HASH_PARAMS: Lazy = + Lazy::new(|| GetStateRootHashParams { + block_identifier: BlockIdentifier::Height(BlockHeaderV2::example().height()), + }); +static GET_STATE_ROOT_HASH_RESULT: Lazy = + Lazy::new(|| GetStateRootHashResult { + api_version: DOCS_EXAMPLE_API_VERSION, + state_root_hash: Some(*BlockHeaderV2::example().state_root_hash()), + }); +static GET_ERA_INFO_PARAMS: Lazy = Lazy::new(|| GetEraInfoParams { + block_identifier: BlockIdentifier::Hash(ERA_SUMMARY.block_hash), +}); +static GET_ERA_INFO_RESULT: Lazy = Lazy::new(|| GetEraInfoResult { + api_version: DOCS_EXAMPLE_API_VERSION, + era_summary: Some(ERA_SUMMARY.clone()), +}); +static GET_ERA_SUMMARY_PARAMS: Lazy = Lazy::new(|| GetEraSummaryParams { + block_identifier: BlockIdentifier::Hash(ERA_SUMMARY.block_hash), +}); +static GET_ERA_SUMMARY_RESULT: Lazy = Lazy::new(|| GetEraSummaryResult { + api_version: DOCS_EXAMPLE_API_VERSION, + era_summary: ERA_SUMMARY.clone(), +}); + +/// Params for "chain_get_block" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetBlockParams { + fn doc_example() -> &'static Self { + &GET_BLOCK_PARAMS + } +} + +/// Result for "chain_get_block" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The block, if found. + pub block_with_signatures: Option, +} + +impl DocExample for GetBlockResult { + fn doc_example() -> &'static Self { + &GET_BLOCK_RESULT + } +} + +/// "chain_get_block" RPC. +pub struct GetBlock {} + +#[async_trait] +impl RpcWithOptionalParams for GetBlock { + const METHOD: &'static str = "chain_get_block"; + type OptionalRequestParams = GetBlockParams; + type ResponseResult = GetBlockResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let (block, signatures) = common::get_signed_block(&*node_client, identifier) + .await? + .into_inner(); + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::new(block, Some(signatures))), + }) + } +} + +/// Params for "chain_get_block_transfers" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockTransfersParams { + /// The block hash. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetBlockTransfersParams { + fn doc_example() -> &'static Self { + &GET_BLOCK_TRANSFERS_PARAMS + } +} + +/// Result for "chain_get_block_transfers" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockTransfersResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The block hash, if found. + pub block_hash: Option, + /// The block's transfers, if found. + pub transfers: Option>, +} + +impl DocExample for GetBlockTransfersResult { + fn doc_example() -> &'static Self { + &GET_BLOCK_TRANSFERS_RESULT + } +} + +/// "chain_get_block_transfers" RPC. +pub struct GetBlockTransfers {} + +#[async_trait] +impl RpcWithOptionalParams for GetBlockTransfers { + const METHOD: &'static str = "chain_get_block_transfers"; + type OptionalRequestParams = GetBlockTransfersParams; + type ResponseResult = GetBlockTransfersResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let header = common::get_block_header(&*node_client, identifier).await?; + let transfers = node_client + .read_block_transfers(header.block_hash()) + .await + .map_err(|err| Error::NodeRequest("block transfers", err))?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + block_hash: Some(header.block_hash()), + transfers, + }) + } +} + +/// Params for "chain_get_state_root_hash" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetStateRootHashParams { + /// The block hash. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetStateRootHashParams { + fn doc_example() -> &'static Self { + &GET_STATE_ROOT_HASH_PARAMS + } +} + +/// Result for "chain_get_state_root_hash" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetStateRootHashResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// Hex-encoded hash of the state root. + pub state_root_hash: Option, +} + +impl DocExample for GetStateRootHashResult { + fn doc_example() -> &'static Self { + &GET_STATE_ROOT_HASH_RESULT + } +} + +/// "chain_get_state_root_hash" RPC. +pub struct GetStateRootHash {} + +#[async_trait] +impl RpcWithOptionalParams for GetStateRootHash { + const METHOD: &'static str = "chain_get_state_root_hash"; + type OptionalRequestParams = GetStateRootHashParams; + type ResponseResult = GetStateRootHashResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let block_header = common::get_block_header(&*node_client, identifier).await?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + state_root_hash: Some(*block_header.state_root_hash()), + }) + } +} + +/// Params for "chain_get_era_info" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraInfoParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetEraInfoParams { + fn doc_example() -> &'static Self { + &GET_ERA_INFO_PARAMS + } +} + +/// Result for "chain_get_era_info" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraInfoResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The era summary. + pub era_summary: Option, +} + +impl DocExample for GetEraInfoResult { + fn doc_example() -> &'static Self { + &GET_ERA_INFO_RESULT + } +} + +/// "chain_get_era_info_by_switch_block" RPC +pub struct GetEraInfoBySwitchBlock {} + +#[async_trait] +impl RpcWithOptionalParams for GetEraInfoBySwitchBlock { + const METHOD: &'static str = "chain_get_era_info_by_switch_block"; + type OptionalRequestParams = GetEraInfoParams; + type ResponseResult = GetEraInfoResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let block_header = common::get_block_header(&*node_client, identifier).await?; + let era_summary = if block_header.is_switch_block() { + Some(get_era_summary_by_block(node_client, &block_header).await?) + } else { + None + }; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + era_summary, + }) + } +} + +/// Params for "chain_get_era_summary" RPC response. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraSummaryParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetEraSummaryParams { + fn doc_example() -> &'static Self { + &GET_ERA_SUMMARY_PARAMS + } +} + +/// Result for "chain_get_era_summary" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraSummaryResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The era summary. + pub era_summary: EraSummary, +} + +impl DocExample for GetEraSummaryResult { + fn doc_example() -> &'static Self { + &GET_ERA_SUMMARY_RESULT + } +} + +/// "chain_get_era_summary" RPC +pub struct GetEraSummary {} + +#[async_trait] +impl RpcWithOptionalParams for GetEraSummary { + const METHOD: &'static str = "chain_get_era_summary"; + type OptionalRequestParams = GetEraSummaryParams; + type ResponseResult = GetEraSummaryResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let block_header = common::get_block_header(&*node_client, identifier).await?; + let era_summary = get_era_summary_by_block(node_client, &block_header).await?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + era_summary, + }) + } +} + +async fn get_era_summary_by_block( + node_client: Arc, + block_header: &BlockHeader, +) -> Result { + fn create_era_summary( + block_header: &BlockHeader, + stored_value: StoredValue, + merkle_proof: String, + ) -> EraSummary { + EraSummary { + block_hash: block_header.block_hash(), + era_id: block_header.era_id(), + stored_value, + state_root_hash: *block_header.state_root_hash(), + merkle_proof, + } + } + + let state_identifier = GlobalStateIdentifier::StateRootHash(*block_header.state_root_hash()); + let result = node_client + .query_global_state(Some(state_identifier), Key::EraSummary, vec![]) + .await + .map_err(|err| Error::NodeRequest("era summary", err))?; + + let era_summary = if let Some(result) = result { + let (value, merkle_proof) = result.into_inner(); + create_era_summary(block_header, value, merkle_proof) + } else { + let (result, merkle_proof) = node_client + .query_global_state( + Some(state_identifier), + Key::EraInfo(block_header.era_id()), + vec![], + ) + .await + .map_err(|err| Error::NodeRequest("era info", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + create_era_summary(block_header, result, merkle_proof) + }; + Ok(era_summary) +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use crate::{ClientError, SUPPORTED_PROTOCOL_VERSION}; + use casper_types_ver_2_0::{ + binary_port::{ + BinaryRequest, BinaryResponse, BinaryResponseAndRequest, GetRequest, + GlobalStateQueryResult, GlobalStateRequest, InformationRequestTag, RecordId, + }, + system::auction::EraInfo, + testing::TestRng, + Block, BlockSignatures, DeployHash, SignedBlock, TestBlockBuilder, TestBlockV1Builder, + }; + use rand::Rng; + + use super::*; + use pretty_assertions::assert_eq; + + #[tokio::test] + async fn should_read_block_v2() { + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + + let resp = GetBlock::do_handle_request( + Arc::new(ValidBlockMock { + block: SignedBlock::new( + block.clone(), + BlockSignatures::new(*block.hash(), block.era_id()), + ), + transfers: vec![], + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBlockResult { + api_version: CURRENT_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::new(block, None)), + } + ); + } + + #[tokio::test] + async fn should_read_block_v1() { + let rng = &mut TestRng::new(); + let block = TestBlockV1Builder::new().build(rng); + + let resp = GetBlock::do_handle_request( + Arc::new(ValidBlockMock { + block: SignedBlock::new( + Block::V1(block.clone()), + BlockSignatures::new(*block.hash(), block.era_id()), + ), + transfers: vec![], + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBlockResult { + api_version: CURRENT_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::new(Block::V1(block), None)), + } + ); + } + + #[tokio::test] + async fn should_read_block_transfers() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let mut transfers = vec![]; + for _ in 0..rng.gen_range(0..10) { + transfers.push(Transfer::new( + DeployHash::random(rng), + rng.gen(), + Some(rng.gen()), + rng.gen(), + rng.gen(), + rng.gen(), + rng.gen(), + Some(rng.gen()), + )); + } + + let resp = GetBlockTransfers::do_handle_request( + Arc::new(ValidBlockMock { + block: SignedBlock::new( + Block::V2(block.clone()), + BlockSignatures::new(*block.hash(), block.era_id()), + ), + transfers: transfers.clone(), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBlockTransfersResult { + api_version: CURRENT_API_VERSION, + block_hash: Some(*block.hash()), + transfers: Some(transfers), + } + ); + } + + #[tokio::test] + async fn should_read_block_state_root_hash() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let resp = GetStateRootHash::do_handle_request( + Arc::new(ValidBlockMock { + block: SignedBlock::new( + Block::V2(block.clone()), + BlockSignatures::new(*block.hash(), block.era_id()), + ), + transfers: vec![], + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetStateRootHashResult { + api_version: CURRENT_API_VERSION, + state_root_hash: Some(*block.state_root_hash()), + } + ); + } + + #[tokio::test] + async fn should_read_block_era_summary() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let resp = GetEraSummary::do_handle_request( + Arc::new(ValidEraSummaryMock { + block: Block::V2(block.clone()), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetEraSummaryResult { + api_version: CURRENT_API_VERSION, + era_summary: EraSummary { + block_hash: *block.hash(), + era_id: block.era_id(), + stored_value: StoredValue::EraInfo(EraInfo::new()), + state_root_hash: *block.state_root_hash(), + merkle_proof: String::new(), + } + } + ); + } + + #[tokio::test] + async fn should_read_block_era_info_by_switch_block() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().switch_block(true).build(rng); + + let resp = GetEraInfoBySwitchBlock::do_handle_request( + Arc::new(ValidEraSummaryMock { + block: Block::V2(block.clone()), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetEraInfoResult { + api_version: CURRENT_API_VERSION, + era_summary: Some(EraSummary { + block_hash: *block.hash(), + era_id: block.era_id(), + stored_value: StoredValue::EraInfo(EraInfo::new()), + state_root_hash: *block.state_root_hash(), + merkle_proof: String::new(), + }) + } + ); + } + + #[tokio::test] + async fn should_read_none_block_era_info_by_switch_block_for_non_switch() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().switch_block(false).build(rng); + + let resp = GetEraInfoBySwitchBlock::do_handle_request( + Arc::new(ValidEraSummaryMock { + block: Block::V2(block.clone()), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetEraInfoResult { + api_version: CURRENT_API_VERSION, + era_summary: None + } + ); + } + + struct ValidBlockMock { + block: SignedBlock, + transfers: Vec, + } + + #[async_trait] + impl NodeClient for ValidBlockMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::SignedBlock) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone(), SUPPORTED_PROTOCOL_VERSION), + &[], + )) + } + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.block.block().clone_header(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::Get(GetRequest::Record { + record_type_tag, .. + }) if RecordId::try_from(record_type_tag) == Ok(RecordId::Transfer) => { + Ok(BinaryResponseAndRequest::new_legacy_test_response( + RecordId::Transfer, + &self.transfers, + SUPPORTED_PROTOCOL_VERSION, + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct ValidEraSummaryMock { + block: Block, + } + + #[async_trait] + impl NodeClient for ValidEraSummaryMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.block.clone_header(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::EraSummary, + .. + })) => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + GlobalStateQueryResult::new( + StoredValue::EraInfo(EraInfo::new()), + String::new(), + ), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )), + req => unimplemented!("unexpected request: {:?}", req), + } + } + } +} diff --git a/rpc_sidecar/src/rpcs/chain/era_summary.rs b/rpc_sidecar/src/rpcs/chain/era_summary.rs new file mode 100644 index 00000000..bd861b38 --- /dev/null +++ b/rpc_sidecar/src/rpcs/chain/era_summary.rs @@ -0,0 +1,57 @@ +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types_ver_2_0::{ + system::auction::{EraInfo, SeigniorageAllocation}, + AsymmetricType, BlockHash, BlockV2, Digest, EraId, PublicKey, StoredValue, U512, +}; + +use crate::rpcs::common::MERKLE_PROOF; + +pub(super) static ERA_SUMMARY: Lazy = Lazy::new(|| { + let delegator_amount = U512::from(1000); + let validator_amount = U512::from(2000); + let delegator_public_key = + PublicKey::from_hex("01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18") + .unwrap(); + let validator_public_key = + PublicKey::from_hex("012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876") + .unwrap(); + let delegator = SeigniorageAllocation::delegator( + delegator_public_key, + validator_public_key, + delegator_amount, + ); + let validator = SeigniorageAllocation::validator( + PublicKey::from_hex("012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876") + .unwrap(), + validator_amount, + ); + let seigniorage_allocations = vec![delegator, validator]; + let mut era_info = EraInfo::new(); + *era_info.seigniorage_allocations_mut() = seigniorage_allocations; + EraSummary { + block_hash: *BlockV2::example().hash(), + era_id: EraId::from(42), + stored_value: StoredValue::EraInfo(era_info), + state_root_hash: *BlockV2::example().state_root_hash(), + merkle_proof: MERKLE_PROOF.clone(), + } +}); + +/// The summary of an era +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct EraSummary { + /// The block hash + pub block_hash: BlockHash, + /// The era id + pub era_id: EraId, + /// The StoredValue containing era information + pub stored_value: StoredValue, + /// Hex-encoded hash of the state root + pub state_root_hash: Digest, + /// The Merkle proof + pub merkle_proof: String, +} diff --git a/rpc_sidecar/src/rpcs/common.rs b/rpc_sidecar/src/rpcs/common.rs new file mode 100644 index 00000000..913bd661 --- /dev/null +++ b/rpc_sidecar/src/rpcs/common.rs @@ -0,0 +1,161 @@ +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::rpcs::error::Error; +use casper_types_ver_2_0::{ + account::AccountHash, AddressableEntity, AvailableBlockRange, BlockHeader, BlockIdentifier, + GlobalStateIdentifier, Key, SignedBlock, StoredValue, URef, U512, +}; + +use crate::NodeClient; + +use super::state::PurseIdentifier; + +pub(super) static MERKLE_PROOF: Lazy = Lazy::new(|| { + String::from( + "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e\ + 55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3\ + f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a\ + 7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41d\ + d035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce9450022\ + 6a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7\ + 725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60\ + bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d0000030\ + 00000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467\ + a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c\ + 1bcbcee522649d2b135fe510fe3") +}); + +/// An enum to be used as the `data` field of a JSON-RPC error response. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields, untagged)] +pub enum ErrorData { + /// The requested block of state root hash is not available on this node. + MissingBlockOrStateRoot { + /// Additional info. + message: String, + /// The height range (inclusive) of fully available blocks. + available_block_range: AvailableBlockRange, + }, +} + +pub async fn get_signed_block( + node_client: &dyn NodeClient, + identifier: Option, +) -> Result { + match node_client + .read_signed_block(identifier) + .await + .map_err(|err| Error::NodeRequest("signed block", err))? + { + Some(block) => Ok(block), + None => { + let available_range = node_client + .read_available_block_range() + .await + .map_err(|err| Error::NodeRequest("available block range", err))?; + Err(Error::NoBlockFound(identifier, available_range)) + } + } +} + +pub async fn get_block_header( + node_client: &dyn NodeClient, + identifier: Option, +) -> Result { + match node_client + .read_block_header(identifier) + .await + .map_err(|err| Error::NodeRequest("block header", err))? + { + Some(header) => Ok(header), + None => { + let available_range = node_client + .read_available_block_range() + .await + .map_err(|err| Error::NodeRequest("available block range", err))?; + Err(Error::NoBlockFound(identifier, available_range)) + } + } +} + +pub async fn get_account( + node_client: &dyn NodeClient, + account_hash: AccountHash, + state_identifier: Option, +) -> Result { + let account_key = Key::Account(account_hash); + let (value, _) = node_client + .query_global_state(state_identifier, account_key, vec![]) + .await + .map_err(|err| Error::NodeRequest("account stored value", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + match value { + StoredValue::Account(account) => Ok(account.into()), + StoredValue::CLValue(entity_key_as_clvalue) => { + let key: Key = entity_key_as_clvalue + .into_t() + .map_err(|_| Error::InvalidAccountInfo)?; + let (value, _) = node_client + .query_global_state(state_identifier, key, vec![]) + .await + .map_err(|err| Error::NodeRequest("account owning a purse", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + value + .into_addressable_entity() + .ok_or(Error::InvalidAccountInfo) + } + _ => Err(Error::InvalidAccountInfo), + } +} + +pub async fn get_main_purse( + node_client: &dyn NodeClient, + identifier: PurseIdentifier, + state_identifier: Option, +) -> Result { + let account_hash = match identifier { + PurseIdentifier::MainPurseUnderPublicKey(account_public_key) => { + account_public_key.to_account_hash() + } + PurseIdentifier::MainPurseUnderAccountHash(account_hash) => account_hash, + PurseIdentifier::PurseUref(purse_uref) => return Ok(purse_uref), + }; + let account = get_account(node_client, account_hash, state_identifier) + .await + .map_err(|_| Error::InvalidMainPurse)?; + Ok(account.main_purse()) +} + +pub async fn get_balance( + node_client: &dyn NodeClient, + uref: URef, + state_identifier: Option, +) -> Result, Error> { + let key = Key::Balance(uref.addr()); + let (value, merkle_proof) = node_client + .query_global_state(state_identifier, key, vec![]) + .await + .map_err(|err| Error::NodeRequest("balance by uref", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + let value = value + .into_cl_value() + .ok_or(Error::InvalidPurseBalance)? + .into_t() + .map_err(|_| Error::InvalidPurseBalance)?; + Ok(SuccessfulQueryResult { + value, + merkle_proof, + }) +} + +#[derive(Debug)] +pub struct SuccessfulQueryResult { + pub value: A, + pub merkle_proof: String, +} diff --git a/rpc_sidecar/src/rpcs/docs.rs b/rpc_sidecar/src/rpcs/docs.rs new file mode 100644 index 00000000..9a4ea782 --- /dev/null +++ b/rpc_sidecar/src/rpcs/docs.rs @@ -0,0 +1,600 @@ +//! RPCs related to finding information about currently supported RPCs. + +use std::sync::Arc; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::{ + gen::{SchemaGenerator, SchemaSettings}, + schema::Schema, + JsonSchema, Map, MapEntry, +}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; + +use super::{ + account::{PutDeploy, PutTransaction}, + chain::{ + GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetEraSummary, GetStateRootHash, + }, + info::{GetChainspec, GetDeploy, GetPeers, GetStatus, GetTransaction, GetValidatorChanges}, + state::{ + GetAccountInfo, GetAuctionInfo, GetBalance, GetDictionaryItem, GetItem, QueryBalance, + QueryGlobalState, + }, + ApiVersion, NodeClient, RpcError, RpcWithOptionalParams, RpcWithParams, RpcWithoutParams, + CURRENT_API_VERSION, +}; + +pub(crate) const DOCS_EXAMPLE_API_VERSION: ApiVersion = CURRENT_API_VERSION; + +const DEFINITIONS_PATH: &str = "#/components/schemas/"; + +// As per https://spec.open-rpc.org/#service-discovery-method. +pub(crate) static OPEN_RPC_SCHEMA: Lazy = Lazy::new(|| { + let contact = OpenRpcContactField { + name: "Casper Labs".to_string(), + url: "https://casperlabs.io".to_string(), + }; + let license = OpenRpcLicenseField { + name: "APACHE LICENSE, VERSION 2.0".to_string(), + url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(), + }; + let info = OpenRpcInfoField { + version: DOCS_EXAMPLE_API_VERSION.to_string(), + title: "Client API of Casper Node".to_string(), + description: "This describes the JSON-RPC 2.0 API of a node on the Casper network." + .to_string(), + contact, + license, + }; + + let server = OpenRpcServerEntry { + name: "any Casper Network node".to_string(), + url: "http://IP:PORT/rpc/".to_string(), + }; + + let mut schema = OpenRpcSchema { + openrpc: "1.0.0-rc1".to_string(), + info, + servers: vec![server], + methods: vec![], + components: Components { + schemas: Map::new(), + }, + }; + + schema.push_with_params::( + "receives a Deploy to be executed by the network (DEPRECATED: use \ + `account_put_transaction` instead)", + ); + schema + .push_with_params::("receives a Transaction to be executed by the network"); + schema.push_with_params::( + "returns a Deploy from the network (DEPRECATED: use `info_get_transaction` instead)", + ); + schema.push_with_params::("returns a Transaction from the network"); + schema.push_with_params::("returns an Account from the network"); + schema.push_with_params::("returns an item from a Dictionary"); + schema.push_with_params::( + "a query to global state using either a Block hash or state root hash", + ); + schema.push_with_params::( + "query for a balance using a purse identifier and a state identifier", + ); + schema.push_without_params::("returns a list of peers connected to the node"); + schema.push_without_params::("returns the current status of the node"); + schema + .push_without_params::("returns status changes of active validators"); + schema.push_without_params::( + "returns the raw bytes of the chainspec.toml, genesis accounts.toml, and \ + global_state.toml files", + ); + schema.push_with_optional_params::("returns a Block from the network"); + schema.push_with_optional_params::( + "returns all transfers for a Block from the network", + ); + schema.push_with_optional_params::( + "returns a state root hash at a given Block", + ); + schema.push_with_params::( + "returns a stored value from the network. This RPC is deprecated, use \ + `query_global_state` instead.", + ); + schema.push_with_params::("returns a purse's balance from the network"); + schema.push_with_optional_params::( + "returns an EraInfo from the network", + ); + schema.push_with_optional_params::( + "returns the bids and validators as of either a specific block (by height or hash), or \ + the most recently added block", + ); + schema.push_with_optional_params::( + "returns the era summary at either a specific block (by height or hash), or the most \ + recently added block", + ); + + schema +}); +static LIST_RPCS_RESULT: Lazy = Lazy::new(|| ListRpcsResult { + api_version: DOCS_EXAMPLE_API_VERSION, + name: "OpenRPC Schema".to_string(), + schema: OPEN_RPC_SCHEMA.clone(), +}); + +/// A trait used to generate a static hardcoded example of `Self`. +pub trait DocExample { + /// Generates a hardcoded example of `Self`. + fn doc_example() -> &'static Self; +} + +/// The main schema for the casper node's RPC server, compliant with +/// [the OpenRPC Specification](https://spec.open-rpc.org). +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct OpenRpcSchema { + openrpc: String, + info: OpenRpcInfoField, + servers: Vec, + methods: Vec, + components: Components, +} + +impl OpenRpcSchema { + fn new_generator() -> SchemaGenerator { + let settings = SchemaSettings::default().with(|settings| { + settings.definitions_path = DEFINITIONS_PATH.to_string(); + }); + settings.into_generator() + } + + fn push_with_params(&mut self, summary: &str) { + let mut generator = Self::new_generator(); + + let params_schema = T::RequestParams::json_schema(&mut generator); + let params = Self::make_params(params_schema); + + let result_schema = T::ResponseResult::json_schema(&mut generator); + let result = ResponseResult { + name: format!("{}_result", T::METHOD), + schema: result_schema, + }; + + let examples = vec![Example::from_rpc_with_params::()]; + + let method = Method { + name: T::METHOD.to_string(), + summary: summary.to_string(), + params, + result, + examples, + }; + + self.methods.push(method); + self.update_schemas::(); + self.update_schemas::(); + } + + fn push_without_params(&mut self, summary: &str) { + let mut generator = Self::new_generator(); + + let result_schema = T::ResponseResult::json_schema(&mut generator); + let result = ResponseResult { + name: format!("{}_result", T::METHOD), + schema: result_schema, + }; + + let examples = vec![Example::from_rpc_without_params::()]; + + let method = Method { + name: T::METHOD.to_string(), + summary: summary.to_string(), + params: vec![], + result, + examples, + }; + + self.methods.push(method); + self.update_schemas::(); + } + + fn push_with_optional_params(&mut self, summary: &str) { + let mut generator = Self::new_generator(); + + let params_schema = T::OptionalRequestParams::json_schema(&mut generator); + let params = Self::make_optional_params(params_schema); + + let result_schema = T::ResponseResult::json_schema(&mut generator); + let result = ResponseResult { + name: format!("{}_result", T::METHOD), + schema: result_schema, + }; + + let examples = vec![Example::from_rpc_with_optional_params::()]; + + // TODO - handle adding a description that the params may be omitted if desired. + let method = Method { + name: T::METHOD.to_string(), + summary: summary.to_string(), + params, + result, + examples, + }; + + self.methods.push(method); + self.update_schemas::(); + self.update_schemas::(); + } + + /// Convert the schema for the params type for T into the OpenRpc-compatible map of name, value + /// pairs. + /// + /// As per the standard, the required params must be sorted before the optional ones. + fn make_params(schema: Schema) -> Vec { + let schema_object = schema.into_object().object.expect("should be object"); + let mut required_params = schema_object + .properties + .iter() + .filter(|(name, _)| schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: true, + }) + .collect::>(); + let optional_params = schema_object + .properties + .iter() + .filter(|(name, _)| !schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: false, + }) + .collect::>(); + required_params.extend(optional_params); + required_params + } + + /// Convert the schema for the optional params type for T into the OpenRpc-compatible map of + /// name, value pairs. + /// + /// Since all params must be unanimously optional, mark all incorrectly tagged "required" fields + /// as false. + fn make_optional_params(schema: Schema) -> Vec { + let schema_object = schema.into_object().object.expect("should be object"); + schema_object + .properties + .iter() + .filter(|(name, _)| schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: false, + }) + .collect::>() + } + + /// Insert the new entries into the #/components/schemas/ map. Panic if we try to overwrite an + /// entry with a different value. + fn update_schemas(&mut self) { + let generator = Self::new_generator(); + let mut root_schema = generator.into_root_schema_for::(); + for (key, value) in root_schema.definitions.drain(..) { + match self.components.schemas.entry(key) { + MapEntry::Occupied(current_value) => { + assert_eq!( + current_value.get().clone().into_object().metadata, + value.into_object().metadata + ) + } + MapEntry::Vacant(vacant) => { + let _ = vacant.insert(value); + } + } + } + } + + #[cfg(test)] + fn give_params_schema(&self) -> Schema { + let mut generator = Self::new_generator(); + T::OptionalRequestParams::json_schema(&mut generator) + } +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct OpenRpcInfoField { + version: String, + title: String, + description: String, + contact: OpenRpcContactField, + license: OpenRpcLicenseField, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct OpenRpcContactField { + name: String, + url: String, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct OpenRpcLicenseField { + name: String, + url: String, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct OpenRpcServerEntry { + name: String, + url: String, +} + +/// The struct containing the documentation for the RPCs. +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct Method { + name: String, + summary: String, + params: Vec, + result: ResponseResult, + examples: Vec, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct SchemaParam { + name: String, + schema: Schema, + required: bool, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct ResponseResult { + name: String, + schema: Schema, +} + +/// An example pair of request params and response result. +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct Example { + name: String, + params: Vec, + result: ExampleResult, +} + +impl Example { + fn new(method_name: &str, maybe_params_obj: Option, result_value: Value) -> Self { + // Break the params struct into an array of param name and value pairs. + let params = match maybe_params_obj { + Some(params_obj) => params_obj + .as_object() + .unwrap() + .iter() + .map(|(name, value)| ExampleParam { + name: name.clone(), + value: value.clone(), + }) + .collect(), + None => vec![], + }; + + Example { + name: format!("{}_example", method_name), + params, + result: ExampleResult { + name: format!("{}_example_result", method_name), + value: result_value, + }, + } + } + + fn from_rpc_with_params() -> Self { + Self::new( + T::METHOD, + Some(json!(T::RequestParams::doc_example())), + json!(T::ResponseResult::doc_example()), + ) + } + + fn from_rpc_without_params() -> Self { + Self::new(T::METHOD, None, json!(T::ResponseResult::doc_example())) + } + + fn from_rpc_with_optional_params() -> Self { + Self::new( + T::METHOD, + Some(json!(T::OptionalRequestParams::doc_example())), + json!(T::ResponseResult::doc_example()), + ) + } +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct ExampleParam { + name: String, + value: Value, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct ExampleResult { + name: String, + value: Value, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct Components { + schemas: Map, +} + +/// Result for "rpc.discover" RPC response. +// +// Fields named as per https://spec.open-rpc.org/#service-discovery-method. +#[derive(Clone, PartialEq, Serialize, Deserialize, JsonSchema, Debug)] +#[serde(deny_unknown_fields)] +pub struct ListRpcsResult { + /// The RPC API version. + #[schemars(with = "String")] + api_version: ApiVersion, + name: String, + /// The list of supported RPCs. + #[schemars(skip)] + schema: OpenRpcSchema, +} + +impl DocExample for ListRpcsResult { + fn doc_example() -> &'static Self { + &LIST_RPCS_RESULT + } +} + +/// "rpc.discover" RPC. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +pub struct ListRpcs {} + +#[async_trait] +impl RpcWithoutParams for ListRpcs { + // Named as per https://spec.open-rpc.org/#service-discovery-method. + const METHOD: &'static str = "rpc.discover"; + type ResponseResult = ListRpcsResult; + + async fn do_handle_request( + _node_client: Arc, + ) -> Result { + Ok(ListRpcsResult::doc_example().clone()) + } +} + +mod doc_example_impls { + use casper_types_ver_2_0::{ + account::Account, AuctionState, Deploy, EraEndV1, EraEndV2, EraReport, PublicKey, + Timestamp, Transaction, + }; + + use super::DocExample; + + impl DocExample for Deploy { + fn doc_example() -> &'static Self { + Deploy::example() + } + } + + impl DocExample for Transaction { + fn doc_example() -> &'static Self { + Transaction::example() + } + } + + impl DocExample for Account { + fn doc_example() -> &'static Self { + Account::example() + } + } + + impl DocExample for EraEndV1 { + fn doc_example() -> &'static Self { + EraEndV1::example() + } + } + + impl DocExample for EraEndV2 { + fn doc_example() -> &'static Self { + EraEndV2::example() + } + } + + impl DocExample for EraReport { + fn doc_example() -> &'static Self { + EraReport::::example() + } + } + + impl DocExample for Timestamp { + fn doc_example() -> &'static Self { + Timestamp::example() + } + } + + impl DocExample for AuctionState { + fn doc_example() -> &'static Self { + AuctionState::example() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn check_optional_params_fields() -> Vec { + let contact = OpenRpcContactField { + name: "Casper Labs".to_string(), + url: "https://casperlabs.io".to_string(), + }; + let license = OpenRpcLicenseField { + name: "APACHE LICENSE, VERSION 2.0".to_string(), + url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(), + }; + let info = OpenRpcInfoField { + version: DOCS_EXAMPLE_API_VERSION.to_string(), + title: "Client API of Casper Node".to_string(), + description: "This describes the JSON-RPC 2.0 API of a node on the Casper network." + .to_string(), + contact, + license, + }; + + let server = OpenRpcServerEntry { + name: "any Casper Network node".to_string(), + url: "http://IP:PORT/rpc/".to_string(), + }; + + let schema = OpenRpcSchema { + openrpc: "1.0.0-rc1".to_string(), + info, + servers: vec![server], + methods: vec![], + components: Components { + schemas: Map::new(), + }, + }; + let params = schema.give_params_schema::(); + let schema_object = params.into_object().object.expect("should be object"); + schema_object + .properties + .iter() + .filter(|(name, _)| !schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: false, + }) + .collect::>() + } + + #[test] + fn check_chain_get_block_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_chain_get_block_transfers_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_chain_get_state_root_hash_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_chain_get_era_info_by_switch_block_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_state_get_auction_info_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } +} diff --git a/rpc_sidecar/src/rpcs/error.rs b/rpc_sidecar/src/rpcs/error.rs new file mode 100644 index 00000000..30391376 --- /dev/null +++ b/rpc_sidecar/src/rpcs/error.rs @@ -0,0 +1,110 @@ +use crate::node_client::Error as NodeClientError; +use casper_json_rpc::Error as RpcError; +use casper_types_ver_2_0::{ + AvailableBlockRange, BlockIdentifier, DeployHash, KeyFromStrError, KeyTag, TransactionHash, + URefFromStrError, +}; + +use super::{ErrorCode, ErrorData}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("request for {0} has failed: {1}")] + NodeRequest(&'static str, NodeClientError), + #[error("no block found for the provided identifier")] + NoBlockFound(Option, AvailableBlockRange), + #[error("no transaction for hash {0}")] + NoTransactionWithHash(TransactionHash), + #[error("no deploy for hash {0}")] + NoDeployWithHash(DeployHash), + #[error("found a transaction when searching for a deploy")] + FoundTransactionInsteadOfDeploy, + #[error("value was not found in the global state")] + GlobalStateEntryNotFound, + #[error("the requested purse URef was invalid: {0}")] + InvalidPurseURef(URefFromStrError), + #[error("the requested purse balance could not be parsed")] + InvalidPurseBalance, + #[error("the requested main purse was invalid")] + InvalidMainPurse, + #[error("the requested account info could not be parsed")] + InvalidAccountInfo, + #[error("the provided dictionary key was invalid: {0}")] + InvalidDictionaryKey(KeyFromStrError), + #[error("the provided dictionary key points at an unexpected type: {0}")] + InvalidTypeUnderDictionaryKey(String), + #[error("the provided dictionary key doesn't exist")] + DictionaryKeyNotFound, + #[error("the provided dictionary name doesn't exist")] + DictionaryNameNotFound, + #[error("the provided dictionary value is {0} instead of a URef")] + DictionaryValueIsNotAUref(KeyTag), + #[error("the provided dictionary key could not be parsed: {0}")] + DictionaryKeyCouldNotBeParsed(String), + #[error("the transaction was invalid: {0}")] + InvalidTransaction(String), + #[error("the deploy was invalid: {0}")] + InvalidDeploy(String), + #[error("the auction bids were invalid")] + InvalidAuctionBids, + #[error("the auction contract was invalid")] + InvalidAuctionContract, + #[error("the auction validators were invalid")] + InvalidAuctionValidators, + #[error("speculative execution returned nothing")] + SpecExecReturnedNothing, +} + +impl Error { + fn code(&self) -> ErrorCode { + match self { + Error::NoBlockFound(_, _) => ErrorCode::NoSuchBlock, + Error::NoTransactionWithHash(_) => ErrorCode::NoSuchTransaction, + Error::NoDeployWithHash(_) => ErrorCode::NoSuchDeploy, + Error::FoundTransactionInsteadOfDeploy => ErrorCode::VariantMismatch, + Error::NodeRequest(_, NodeClientError::UnknownStateRootHash) => { + ErrorCode::NoSuchStateRoot + } + Error::GlobalStateEntryNotFound => ErrorCode::QueryFailed, + Error::NodeRequest(_, NodeClientError::QueryFailedToExecute) => { + ErrorCode::QueryFailedToExecute + } + Error::NodeRequest(_, NodeClientError::FunctionIsDisabled) => { + ErrorCode::FunctionIsDisabled + } + Error::InvalidPurseURef(_) => ErrorCode::FailedToParseGetBalanceURef, + Error::InvalidPurseBalance => ErrorCode::FailedToGetBalance, + Error::InvalidAccountInfo => ErrorCode::NoSuchAccount, + Error::InvalidDictionaryKey(_) => ErrorCode::FailedToParseQueryKey, + Error::InvalidMainPurse => ErrorCode::NoSuchMainPurse, + Error::InvalidTypeUnderDictionaryKey(_) + | Error::DictionaryKeyNotFound + | Error::DictionaryNameNotFound + | Error::DictionaryValueIsNotAUref(_) + | Error::DictionaryKeyCouldNotBeParsed(_) => ErrorCode::FailedToGetDictionaryURef, + Error::InvalidTransaction(_) => ErrorCode::InvalidTransaction, + Error::NodeRequest(_, NodeClientError::SpecExecutionFailed(_)) + | Error::InvalidDeploy(_) + | Error::SpecExecReturnedNothing => ErrorCode::InvalidDeploy, + Error::InvalidAuctionBids + | Error::InvalidAuctionContract + | Error::InvalidAuctionValidators => ErrorCode::InvalidAuctionState, + Error::NodeRequest(_, _) => ErrorCode::NodeRequestFailed, + } + } +} + +impl From for RpcError { + fn from(value: Error) -> Self { + match value { + Error::NoBlockFound(_, available_block_range) => RpcError::new( + value.code(), + ErrorData::MissingBlockOrStateRoot { + message: value.to_string(), + available_block_range, + }, + ), + _ => RpcError::new(value.code(), value.to_string()), + } + } +} diff --git a/rpc_sidecar/src/rpcs/error_code.rs b/rpc_sidecar/src/rpcs/error_code.rs new file mode 100644 index 00000000..c1bae230 --- /dev/null +++ b/rpc_sidecar/src/rpcs/error_code.rs @@ -0,0 +1,93 @@ +use serde::{Deserialize, Serialize}; + +use casper_json_rpc::ErrorCodeT; + +/// The various codes which can be returned in the JSON-RPC Response's error object. +/// +/// **NOTE:** These values will be changed to lie outside the restricted range as defined in the +/// JSON-RPC spec as of casper-node v2.0.0. +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +#[repr(i64)] +pub enum ErrorCode { + /// The requested Deploy was not found. + NoSuchDeploy = -32000, + /// The requested Block was not found. + NoSuchBlock = -32001, + /// Parsing the Key for a query failed. + FailedToParseQueryKey = -32002, + /// The query failed to find a result. + QueryFailed = -32003, + /// Executing the query failed. + QueryFailedToExecute = -32004, + /// Parsing the URef while getting a balance failed. + FailedToParseGetBalanceURef = -32005, + /// Failed to get the requested balance. + FailedToGetBalance = -32006, + /// Executing the query to retrieve the balance failed. + GetBalanceFailedToExecute = -32007, + /// The given Deploy cannot be executed as it is invalid. + InvalidDeploy = -32008, + /// The given account was not found. + NoSuchAccount = -32009, + /// Failed to get the requested dictionary URef. + FailedToGetDictionaryURef = -32010, + /// Failed to get the requested dictionary trie. + FailedToGetTrie = -32011, + /// The requested state root hash was not found. + NoSuchStateRoot = -32012, + /// The main purse for a given account hash does not exist. + NoSuchMainPurse = -32013, + /// The requested Transaction was not found. + NoSuchTransaction = -32014, + /// Variant mismatch. + VariantMismatch = -32015, + /// The given Transaction cannot be executed as it is invalid. + InvalidTransaction = -32016, + /// The given Block could not be verified. + InvalidBlock = -32017, + /// Failed during a node request. + NodeRequestFailed = -32018, + /// Auction state could not be parsed. + InvalidAuctionState = -32019, + /// The request could not be satisfied because an underlying function is disabled. + FunctionIsDisabled = -32020, +} + +impl From for (i64, &'static str) { + fn from(error_code: ErrorCode) -> Self { + match error_code { + ErrorCode::NoSuchDeploy => (error_code as i64, "No such deploy"), + ErrorCode::NoSuchBlock => (error_code as i64, "No such block"), + ErrorCode::FailedToParseQueryKey => (error_code as i64, "Failed to parse query key"), + ErrorCode::QueryFailed => (error_code as i64, "Query failed"), + ErrorCode::QueryFailedToExecute => (error_code as i64, "Query failed to execute"), + ErrorCode::FailedToParseGetBalanceURef => { + (error_code as i64, "Failed to parse get-balance URef") + } + ErrorCode::FailedToGetBalance => (error_code as i64, "Failed to get balance"), + ErrorCode::GetBalanceFailedToExecute => { + (error_code as i64, "get-balance failed to execute") + } + ErrorCode::InvalidDeploy => (error_code as i64, "Invalid Deploy"), + ErrorCode::NoSuchAccount => (error_code as i64, "No such account"), + ErrorCode::FailedToGetDictionaryURef => { + (error_code as i64, "Failed to get dictionary URef") + } + ErrorCode::FailedToGetTrie => (error_code as i64, "Failed to get trie"), + ErrorCode::NoSuchStateRoot => (error_code as i64, "No such state root"), + ErrorCode::NoSuchMainPurse => (error_code as i64, "Failed to get main purse"), + ErrorCode::NoSuchTransaction => (error_code as i64, "No such transaction"), + ErrorCode::VariantMismatch => (error_code as i64, "Variant mismatch internal error"), + ErrorCode::InvalidTransaction => (error_code as i64, "Invalid transaction"), + ErrorCode::InvalidBlock => (error_code as i64, "Invalid block"), + ErrorCode::NodeRequestFailed => (error_code as i64, "Node request failure"), + ErrorCode::InvalidAuctionState => (error_code as i64, "Invalid auction state"), + ErrorCode::FunctionIsDisabled => ( + error_code as i64, + "Function needed to execute this request is disabled", + ), + } + } +} + +impl ErrorCodeT for ErrorCode {} diff --git a/rpc_sidecar/src/rpcs/info.rs b/rpc_sidecar/src/rpcs/info.rs new file mode 100644 index 00000000..e2f7fd6d --- /dev/null +++ b/rpc_sidecar/src/rpcs/info.rs @@ -0,0 +1,695 @@ +//! RPCs returning ancillary information. + +use std::{collections::BTreeMap, str, sync::Arc}; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types_ver_2_0::{ + binary_port::MinimalBlockInfo, + execution::{ExecutionResult, ExecutionResultV2}, + ActivationPoint, AvailableBlockRange, Block, BlockSynchronizerStatus, ChainspecRawBytes, + Deploy, DeployHash, Digest, EraId, ExecutionInfo, NextUpgrade, Peers, ProtocolVersion, + PublicKey, ReactorState, TimeDiff, Timestamp, Transaction, TransactionHash, ValidatorChange, +}; + +use super::{ + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithParams, RpcWithoutParams, CURRENT_API_VERSION, +}; + +static GET_DEPLOY_PARAMS: Lazy = Lazy::new(|| GetDeployParams { + deploy_hash: *Deploy::doc_example().hash(), + finalized_approvals: true, +}); +static GET_DEPLOY_RESULT: Lazy = Lazy::new(|| GetDeployResult { + api_version: DOCS_EXAMPLE_API_VERSION, + deploy: Deploy::doc_example().clone(), + execution_info: Some(ExecutionInfo { + block_hash: *Block::example().hash(), + block_height: Block::example().clone_header().height(), + execution_result: Some(ExecutionResult::from(ExecutionResultV2::example().clone())), + }), +}); +static GET_TRANSACTION_PARAMS: Lazy = Lazy::new(|| GetTransactionParams { + transaction_hash: Transaction::doc_example().hash(), + finalized_approvals: true, +}); +static GET_TRANSACTION_RESULT: Lazy = Lazy::new(|| GetTransactionResult { + api_version: DOCS_EXAMPLE_API_VERSION, + transaction: Transaction::doc_example().clone(), + execution_info: Some(ExecutionInfo { + block_hash: *Block::example().hash(), + block_height: Block::example().height(), + execution_result: Some(ExecutionResult::from(ExecutionResultV2::example().clone())), + }), +}); +static GET_PEERS_RESULT: Lazy = Lazy::new(|| GetPeersResult { + api_version: DOCS_EXAMPLE_API_VERSION, + peers: Some(("tls:0101..0101".to_owned(), "127.0.0.1:54321".to_owned())) + .into_iter() + .collect::>() + .into(), +}); +static GET_VALIDATOR_CHANGES_RESULT: Lazy = Lazy::new(|| { + let change = JsonValidatorStatusChange::new(EraId::new(1), ValidatorChange::Added); + let public_key = PublicKey::example().clone(); + let changes = vec![JsonValidatorChanges::new(public_key, vec![change])]; + GetValidatorChangesResult { + api_version: DOCS_EXAMPLE_API_VERSION, + changes, + } +}); +static GET_CHAINSPEC_RESULT: Lazy = Lazy::new(|| GetChainspecResult { + api_version: DOCS_EXAMPLE_API_VERSION, + chainspec_bytes: ChainspecRawBytes::new(vec![42, 42].into(), None, None), +}); + +static GET_STATUS_RESULT: Lazy = Lazy::new(|| GetStatusResult { + peers: GET_PEERS_RESULT.peers.clone(), + api_version: DOCS_EXAMPLE_API_VERSION, + chainspec_name: String::from("casper-example"), + starting_state_root_hash: Digest::default(), + last_added_block_info: Some(MinimalBlockInfo::from(Block::example().clone())), + our_public_signing_key: Some(PublicKey::example().clone()), + round_length: Some(TimeDiff::from_millis(1 << 16)), + next_upgrade: Some(NextUpgrade::new( + ActivationPoint::EraId(EraId::from(42)), + ProtocolVersion::from_parts(2, 0, 1), + )), + uptime: TimeDiff::from_seconds(13), + reactor_state: ReactorState::Initialize, + last_progress: Timestamp::from(0), + available_block_range: AvailableBlockRange::RANGE_0_0, + block_sync: BlockSynchronizerStatus::example().clone(), + #[cfg(not(test))] + build_version: version_string(), + + // Prevent these values from changing between test sessions + #[cfg(test)] + build_version: String::from("1.0.0-xxxxxxxxx@DEBUG"), +}); + +/// Params for "info_get_deploy" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDeployParams { + /// The deploy hash. + pub deploy_hash: DeployHash, + /// Whether to return the deploy with the finalized approvals substituted. If `false` or + /// omitted, returns the deploy with the approvals that were originally received by the node. + #[serde(default = "finalized_approvals_default")] + pub finalized_approvals: bool, +} + +/// The default for `GetDeployParams::finalized_approvals` and +/// `GetTransactionParams::finalized_approvals`. +fn finalized_approvals_default() -> bool { + false +} + +impl DocExample for GetDeployParams { + fn doc_example() -> &'static Self { + &GET_DEPLOY_PARAMS + } +} + +/// Result for "info_get_deploy" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDeployResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The deploy. + pub deploy: Deploy, + /// Execution info, if available. + #[serde(skip_serializing_if = "Option::is_none", flatten)] + pub execution_info: Option, +} + +impl DocExample for GetDeployResult { + fn doc_example() -> &'static Self { + &GET_DEPLOY_RESULT + } +} + +/// "info_get_deploy" RPC. +pub struct GetDeploy {} + +#[async_trait] +impl RpcWithParams for GetDeploy { + const METHOD: &'static str = "info_get_deploy"; + type RequestParams = GetDeployParams; + type ResponseResult = GetDeployResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let hash = TransactionHash::from(params.deploy_hash); + let (transaction, execution_info) = node_client + .read_transaction_with_execution_info(hash) + .await + .map_err(|err| Error::NodeRequest("transaction", err))? + .ok_or(Error::NoDeployWithHash(params.deploy_hash))? + .into_inner(); + + let deploy = match transaction { + Transaction::Deploy(deploy) => deploy, + Transaction::V1(_) => return Err(Error::FoundTransactionInsteadOfDeploy.into()), + }; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + deploy, + execution_info, + }) + } +} + +/// Params for "info_get_transaction" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTransactionParams { + /// The transaction hash. + pub transaction_hash: TransactionHash, + /// Whether to return the transaction with the finalized approvals substituted. If `false` or + /// omitted, returns the transaction with the approvals that were originally received by the + /// node. + #[serde(default = "finalized_approvals_default")] + pub finalized_approvals: bool, +} + +impl DocExample for GetTransactionParams { + fn doc_example() -> &'static Self { + &GET_TRANSACTION_PARAMS + } +} + +/// Result for "info_get_transaction" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTransactionResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The transaction. + pub transaction: Transaction, + /// Execution info, if available. + #[serde(skip_serializing_if = "Option::is_none", flatten)] + pub execution_info: Option, +} + +impl DocExample for GetTransactionResult { + fn doc_example() -> &'static Self { + &GET_TRANSACTION_RESULT + } +} + +/// "info_get_transaction" RPC. +pub struct GetTransaction {} + +#[async_trait] +impl RpcWithParams for GetTransaction { + const METHOD: &'static str = "info_get_transaction"; + type RequestParams = GetTransactionParams; + type ResponseResult = GetTransactionResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let (transaction, execution_info) = node_client + .read_transaction_with_execution_info(params.transaction_hash) + .await + .map_err(|err| Error::NodeRequest("transaction", err))? + .ok_or(Error::NoTransactionWithHash(params.transaction_hash))? + .into_inner(); + + Ok(Self::ResponseResult { + transaction, + api_version: CURRENT_API_VERSION, + execution_info, + }) + } +} + +/// Result for "info_get_peers" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetPeersResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The node ID and network address of each connected peer. + pub peers: Peers, +} + +impl DocExample for GetPeersResult { + fn doc_example() -> &'static Self { + &GET_PEERS_RESULT + } +} + +/// "info_get_peers" RPC. +pub struct GetPeers {} + +#[async_trait] +impl RpcWithoutParams for GetPeers { + const METHOD: &'static str = "info_get_peers"; + type ResponseResult = GetPeersResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let peers = node_client + .read_peers() + .await + .map_err(|err| Error::NodeRequest("peers", err))?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + peers, + }) + } +} + +/// A single change to a validator's status in the given era. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorStatusChange { + /// The era in which the change occurred. + era_id: EraId, + /// The change in validator status. + validator_change: ValidatorChange, +} + +impl JsonValidatorStatusChange { + pub(crate) fn new(era_id: EraId, validator_change: ValidatorChange) -> Self { + JsonValidatorStatusChange { + era_id, + validator_change, + } + } +} + +/// The changes in a validator's status. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorChanges { + /// The public key of the validator. + public_key: PublicKey, + /// The set of changes to the validator's status. + status_changes: Vec, +} + +impl JsonValidatorChanges { + pub(crate) fn new( + public_key: PublicKey, + status_changes: Vec, + ) -> Self { + JsonValidatorChanges { + public_key, + status_changes, + } + } +} + +/// Result for the "info_get_validator_changes" RPC. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetValidatorChangesResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The validators' status changes. + pub changes: Vec, +} + +impl GetValidatorChangesResult { + pub(crate) fn new(changes: BTreeMap>) -> Self { + let changes = changes + .into_iter() + .map(|(public_key, mut validator_changes)| { + validator_changes.sort(); + let status_changes = validator_changes + .into_iter() + .map(|(era_id, validator_change)| { + JsonValidatorStatusChange::new(era_id, validator_change) + }) + .collect(); + JsonValidatorChanges::new(public_key, status_changes) + }) + .collect(); + GetValidatorChangesResult { + api_version: CURRENT_API_VERSION, + changes, + } + } +} + +impl DocExample for GetValidatorChangesResult { + fn doc_example() -> &'static Self { + &GET_VALIDATOR_CHANGES_RESULT + } +} + +/// "info_get_validator_changes" RPC. +pub struct GetValidatorChanges {} + +#[async_trait] +impl RpcWithoutParams for GetValidatorChanges { + const METHOD: &'static str = "info_get_validator_changes"; + type ResponseResult = GetValidatorChangesResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let changes = node_client + .read_validator_changes() + .await + .map_err(|err| Error::NodeRequest("validator changes", err))?; + Ok(Self::ResponseResult::new(changes.into())) + } +} + +/// Result for the "info_get_chainspec" RPC. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct GetChainspecResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The chainspec file bytes. + pub chainspec_bytes: ChainspecRawBytes, +} + +impl DocExample for GetChainspecResult { + fn doc_example() -> &'static Self { + &GET_CHAINSPEC_RESULT + } +} + +/// "info_get_chainspec" RPC. +pub struct GetChainspec {} + +#[async_trait] +impl RpcWithoutParams for GetChainspec { + const METHOD: &'static str = "info_get_chainspec"; + type ResponseResult = GetChainspecResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let chainspec_bytes = node_client + .read_chainspec_bytes() + .await + .map_err(|err| Error::NodeRequest("chainspec bytes", err))?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + chainspec_bytes, + }) + } +} + +/// Result for "info_get_status" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetStatusResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The node ID and network address of each connected peer. + pub peers: Peers, + /// The compiled node version. + pub build_version: String, + /// The chainspec name. + pub chainspec_name: String, + /// The state root hash of the lowest block in the available block range. + pub starting_state_root_hash: Digest, + /// The minimal info of the last block from the linear chain. + pub last_added_block_info: Option, + /// Our public signing key. + pub our_public_signing_key: Option, + /// The next round length if this node is a validator. + pub round_length: Option, + /// Information about the next scheduled upgrade. + pub next_upgrade: Option, + /// Time that passed since the node has started. + pub uptime: TimeDiff, + /// The current state of node reactor. + pub reactor_state: ReactorState, + /// Timestamp of the last recorded progress in the reactor. + pub last_progress: Timestamp, + /// The available block range in storage. + pub available_block_range: AvailableBlockRange, + /// The status of the block synchronizer builders. + pub block_sync: BlockSynchronizerStatus, +} + +impl DocExample for GetStatusResult { + fn doc_example() -> &'static Self { + &GET_STATUS_RESULT + } +} + +/// "info_get_status" RPC. +pub struct GetStatus {} + +#[async_trait] +impl RpcWithoutParams for GetStatus { + const METHOD: &'static str = "info_get_status"; + type ResponseResult = GetStatusResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let status = node_client + .read_node_status() + .await + .map_err(|err| Error::NodeRequest("node status", err))?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + peers: status.peers, + chainspec_name: status.chainspec_name, + starting_state_root_hash: status.starting_state_root_hash, + last_added_block_info: status.last_added_block_info, + our_public_signing_key: status.our_public_signing_key, + round_length: status.round_length, + next_upgrade: status.next_upgrade, + uptime: status.uptime, + reactor_state: status.reactor_state, + last_progress: status.last_progress, + available_block_range: status.available_block_range, + block_sync: status.block_sync, + build_version: status.build_version, + }) + } +} + +#[cfg(not(test))] +fn version_string() -> String { + use std::env; + use tracing::warn; + + let mut version = env!("CARGO_PKG_VERSION").to_string(); + if let Ok(git_sha) = env::var("VERGEN_GIT_SHA") { + version = format!("{}-{}", version, git_sha); + } else { + warn!( + "vergen env var unavailable, casper-node build version will not include git short hash" + ); + } + + // Add a `@DEBUG` (or similar) tag to release string on non-release builds. + if env!("SIDECAR_BUILD_PROFILE") != "release" { + version += "@"; + let profile = env!("SIDECAR_BUILD_PROFILE").to_uppercase(); + version.push_str(&profile); + } + + version +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use crate::{rpcs::ErrorCode, ClientError, SUPPORTED_PROTOCOL_VERSION}; + use casper_types_ver_2_0::{ + binary_port::{ + BinaryRequest, BinaryResponse, BinaryResponseAndRequest, GetRequest, + InformationRequestTag, TransactionWithExecutionInfo, + }, + testing::TestRng, + BlockHash, TransactionV1, + }; + use pretty_assertions::assert_eq; + use rand::Rng; + + use super::*; + + #[tokio::test] + async fn should_read_transaction() { + let rng = &mut TestRng::new(); + let transaction = Transaction::from(TransactionV1::random(rng)); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + + let resp = GetTransaction::do_handle_request( + Arc::new(ValidTransactionMock { + transaction: TransactionWithExecutionInfo::new( + transaction.clone(), + Some(execution_info.clone()), + ), + }), + GetTransactionParams { + transaction_hash: transaction.hash(), + finalized_approvals: true, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetTransactionResult { + api_version: CURRENT_API_VERSION, + transaction, + execution_info: Some(execution_info), + } + ); + } + + #[tokio::test] + async fn should_read_deploy_via_get_transaction() { + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + + let resp = GetTransaction::do_handle_request( + Arc::new(ValidTransactionMock { + transaction: TransactionWithExecutionInfo::new( + Transaction::Deploy(deploy.clone()), + Some(execution_info.clone()), + ), + }), + GetTransactionParams { + transaction_hash: deploy.hash().into(), + finalized_approvals: true, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetTransactionResult { + api_version: CURRENT_API_VERSION, + transaction: deploy.into(), + execution_info: Some(execution_info), + } + ); + } + + #[tokio::test] + async fn should_read_deploy_via_get_deploy() { + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + + let resp = GetDeploy::do_handle_request( + Arc::new(ValidTransactionMock { + transaction: TransactionWithExecutionInfo::new( + Transaction::Deploy(deploy.clone()), + Some(execution_info.clone()), + ), + }), + GetDeployParams { + deploy_hash: *deploy.hash(), + finalized_approvals: true, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetDeployResult { + api_version: CURRENT_API_VERSION, + deploy, + execution_info: Some(execution_info), + } + ); + } + + #[tokio::test] + async fn should_reject_transaction_when_asking_for_deploy() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1::random(rng); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + + let err = GetDeploy::do_handle_request( + Arc::new(ValidTransactionMock { + transaction: TransactionWithExecutionInfo::new( + Transaction::V1(transaction.clone()), + Some(execution_info.clone()), + ), + }), + GetDeployParams { + deploy_hash: DeployHash::new(*transaction.hash().inner()), + finalized_approvals: true, + }, + ) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::VariantMismatch as i64); + } + + struct ValidTransactionMock { + transaction: TransactionWithExecutionInfo, + } + + #[async_trait] + impl NodeClient for ValidTransactionMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Transaction) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.transaction.clone(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } +} diff --git a/rpc_sidecar/src/rpcs/speculative_exec.rs b/rpc_sidecar/src/rpcs/speculative_exec.rs new file mode 100644 index 00000000..c3fc5d97 --- /dev/null +++ b/rpc_sidecar/src/rpcs/speculative_exec.rs @@ -0,0 +1,272 @@ +//! RPC related to speculative execution. + +use std::{str, sync::Arc}; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types_ver_2_0::{ + contract_messages::Messages, execution::ExecutionResultV2, BlockHash, BlockIdentifier, Deploy, + Transaction, +}; + +use super::{ + common, + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithParams, CURRENT_API_VERSION, +}; + +static SPECULATIVE_EXEC_TXN_PARAMS: Lazy = + Lazy::new(|| SpeculativeExecTxnParams { + block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), + transaction: Transaction::doc_example().clone(), + }); +static SPECULATIVE_EXEC_TXN_RESULT: Lazy = + Lazy::new(|| SpeculativeExecTxnResult { + api_version: DOCS_EXAMPLE_API_VERSION, + block_hash: *BlockHash::example(), + execution_result: ExecutionResultV2::example().clone(), + messages: Vec::new(), + }); +static SPECULATIVE_EXEC_PARAMS: Lazy = Lazy::new(|| SpeculativeExecParams { + block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), + deploy: Deploy::doc_example().clone(), +}); + +/// Params for "speculative_exec_txn" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecTxnParams { + /// Block hash on top of which to execute the transaction. + pub block_identifier: Option, + /// Transaction to execute. + pub transaction: Transaction, +} + +impl DocExample for SpeculativeExecTxnParams { + fn doc_example() -> &'static Self { + &SPECULATIVE_EXEC_TXN_PARAMS + } +} + +/// Result for "speculative_exec_txn" and "speculative_exec" RPC responses. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecTxnResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// Hash of the block on top of which the transaction was executed. + pub block_hash: BlockHash, + /// Result of the execution. + pub execution_result: ExecutionResultV2, + /// Messages emitted during execution. + pub messages: Messages, +} + +impl DocExample for SpeculativeExecTxnResult { + fn doc_example() -> &'static Self { + &SPECULATIVE_EXEC_TXN_RESULT + } +} + +/// "speculative_exec_txn" RPC +pub struct SpeculativeExecTxn {} + +#[async_trait] +impl RpcWithParams for SpeculativeExecTxn { + const METHOD: &'static str = "speculative_exec_txn"; + type RequestParams = SpeculativeExecTxnParams; + type ResponseResult = SpeculativeExecTxnResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + handle_request(node_client, params.block_identifier, params.transaction).await + } +} + +/// Params for "speculative_exec" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecParams { + /// Block hash on top of which to execute the deploy. + pub block_identifier: Option, + /// Deploy to execute. + pub deploy: Deploy, +} + +impl DocExample for SpeculativeExecParams { + fn doc_example() -> &'static Self { + &SPECULATIVE_EXEC_PARAMS + } +} + +/// "speculative_exec" RPC +pub struct SpeculativeExec {} + +#[async_trait] +impl RpcWithParams for SpeculativeExec { + const METHOD: &'static str = "speculative_exec"; + type RequestParams = SpeculativeExecParams; + type ResponseResult = SpeculativeExecTxnResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + handle_request(node_client, params.block_identifier, params.deploy.into()).await + } +} + +async fn handle_request( + node_client: Arc, + identifier: Option, + transaction: Transaction, +) -> Result { + let block_header = common::get_block_header(&*node_client, identifier).await?; + let block_hash = block_header.block_hash(); + let state_root_hash = *block_header.state_root_hash(); + let block_time = block_header.timestamp(); + let protocol_version = block_header.protocol_version(); + + let (execution_result, messages) = node_client + .exec_speculatively( + state_root_hash, + block_time, + protocol_version, + transaction, + block_header, + ) + .await + .map_err(|err| Error::NodeRequest("speculatively executing a transaction", err))? + .into_inner() + .ok_or(Error::SpecExecReturnedNothing)?; + + Ok(SpeculativeExecTxnResult { + api_version: CURRENT_API_VERSION, + block_hash, + execution_result, + messages, + }) +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use casper_types_ver_2_0::{ + binary_port::{ + BinaryRequest, BinaryResponse, BinaryResponseAndRequest, GetRequest, + InformationRequestTag, SpeculativeExecutionResult, + }, + testing::TestRng, + Block, TestBlockBuilder, + }; + + use crate::{ClientError, SUPPORTED_PROTOCOL_VERSION}; + + use super::*; + + #[tokio::test] + async fn should_spec_exec() { + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let execution_result = ExecutionResultV2::random(rng); + + let res = SpeculativeExec::do_handle_request( + Arc::new(ValidSpecExecMock { + block: block.clone(), + execution_result: execution_result.clone(), + }), + SpeculativeExecParams { + block_identifier: Some(BlockIdentifier::Hash(*block.hash())), + deploy, + }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + SpeculativeExecTxnResult { + block_hash: *block.hash(), + execution_result, + messages: Messages::new(), + api_version: CURRENT_API_VERSION, + } + ) + } + + #[tokio::test] + async fn should_spec_exec_txn() { + let rng = &mut TestRng::new(); + let transaction = Transaction::random(rng); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let execution_result = ExecutionResultV2::random(rng); + + let res = SpeculativeExecTxn::do_handle_request( + Arc::new(ValidSpecExecMock { + block: block.clone(), + execution_result: execution_result.clone(), + }), + SpeculativeExecTxnParams { + block_identifier: Some(BlockIdentifier::Hash(*block.hash())), + transaction, + }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + SpeculativeExecTxnResult { + block_hash: *block.hash(), + execution_result, + messages: Messages::new(), + api_version: CURRENT_API_VERSION, + } + ) + } + + struct ValidSpecExecMock { + block: Block, + execution_result: ExecutionResultV2, + } + + #[async_trait] + impl NodeClient for ValidSpecExecMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.block.clone_header(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::TrySpeculativeExec { .. } => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + SpeculativeExecutionResult::new(Some(( + self.execution_result.clone(), + Messages::new(), + ))), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )), + req => unimplemented!("unexpected request: {:?}", req), + } + } + } +} diff --git a/rpc_sidecar/src/rpcs/state.rs b/rpc_sidecar/src/rpcs/state.rs new file mode 100644 index 00000000..e614a37f --- /dev/null +++ b/rpc_sidecar/src/rpcs/state.rs @@ -0,0 +1,1385 @@ +//! RPCs related to the state. + +use std::{collections::BTreeMap, str, sync::Arc}; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{ + common, + common::MERKLE_PROOF, + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithOptionalParams, RpcWithParams, + CURRENT_API_VERSION, +}; +use casper_types_ver_2_0::{ + account::{Account, AccountHash}, + bytesrepr::Bytes, + package::PackageKindTag, + system::{ + auction::{ + EraValidators, SeigniorageRecipientsSnapshot, ValidatorWeights, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + }, + AUCTION, + }, + AddressableEntityHash, AuctionState, BlockHash, BlockHeader, BlockHeaderV2, BlockIdentifier, + BlockV2, CLValue, Digest, GlobalStateIdentifier, Key, KeyTag, PublicKey, SecretKey, + StoredValue, Tagged, URef, U512, +}; + +static GET_ITEM_PARAMS: Lazy = Lazy::new(|| GetItemParams { + state_root_hash: *BlockHeaderV2::example().state_root_hash(), + key: Key::from_formatted_str( + "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + ) + .unwrap(), + path: vec!["inner".to_string()], +}); +static GET_ITEM_RESULT: Lazy = Lazy::new(|| GetItemResult { + api_version: DOCS_EXAMPLE_API_VERSION, + stored_value: StoredValue::CLValue(CLValue::from_t(1u64).unwrap()), + merkle_proof: MERKLE_PROOF.clone(), +}); +static GET_BALANCE_PARAMS: Lazy = Lazy::new(|| GetBalanceParams { + state_root_hash: *BlockHeaderV2::example().state_root_hash(), + purse_uref: "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + .to_string(), +}); +static GET_BALANCE_RESULT: Lazy = Lazy::new(|| GetBalanceResult { + api_version: DOCS_EXAMPLE_API_VERSION, + balance_value: U512::from(123_456), + merkle_proof: MERKLE_PROOF.clone(), +}); +static GET_AUCTION_INFO_PARAMS: Lazy = Lazy::new(|| GetAuctionInfoParams { + block_identifier: BlockIdentifier::Hash(*BlockHash::example()), +}); +static GET_AUCTION_INFO_RESULT: Lazy = Lazy::new(|| GetAuctionInfoResult { + api_version: DOCS_EXAMPLE_API_VERSION, + auction_state: AuctionState::doc_example().clone(), +}); +static GET_ACCOUNT_INFO_PARAMS: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key = PublicKey::from(&secret_key); + GetAccountInfoParams { + account_identifier: AccountIdentifier::PublicKey(public_key), + block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), + } +}); +static GET_ACCOUNT_INFO_RESULT: Lazy = Lazy::new(|| GetAccountInfoResult { + api_version: DOCS_EXAMPLE_API_VERSION, + account: Account::doc_example().clone(), + merkle_proof: MERKLE_PROOF.clone(), +}); +static GET_DICTIONARY_ITEM_PARAMS: Lazy = + Lazy::new(|| GetDictionaryItemParams { + state_root_hash: *BlockHeaderV2::example().state_root_hash(), + dictionary_identifier: DictionaryIdentifier::URef { + seed_uref: "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + .to_string(), + dictionary_item_key: "a_unique_entry_identifier".to_string(), + }, + }); +static GET_DICTIONARY_ITEM_RESULT: Lazy = + Lazy::new(|| GetDictionaryItemResult { + api_version: DOCS_EXAMPLE_API_VERSION, + dictionary_key: + "dictionary-67518854aa916c97d4e53df8570c8217ccc259da2721b692102d76acd0ee8d1f" + .to_string(), + stored_value: StoredValue::CLValue(CLValue::from_t(1u64).unwrap()), + merkle_proof: MERKLE_PROOF.clone(), + }); +static QUERY_GLOBAL_STATE_PARAMS: Lazy = + Lazy::new(|| QueryGlobalStateParams { + state_identifier: Some(GlobalStateIdentifier::BlockHash(*BlockV2::example().hash())), + key: Key::from_formatted_str( + "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + ) + .unwrap(), + path: vec![], + }); +static QUERY_GLOBAL_STATE_RESULT: Lazy = + Lazy::new(|| QueryGlobalStateResult { + api_version: DOCS_EXAMPLE_API_VERSION, + block_header: Some(BlockHeaderV2::example().clone().into()), + stored_value: StoredValue::Account(Account::doc_example().clone()), + merkle_proof: MERKLE_PROOF.clone(), + }); +static GET_TRIE_PARAMS: Lazy = Lazy::new(|| GetTrieParams { + trie_key: *BlockHeaderV2::example().state_root_hash(), +}); +static GET_TRIE_RESULT: Lazy = Lazy::new(|| GetTrieResult { + api_version: DOCS_EXAMPLE_API_VERSION, + maybe_trie_bytes: None, +}); +static QUERY_BALANCE_PARAMS: Lazy = Lazy::new(|| QueryBalanceParams { + state_identifier: Some(GlobalStateIdentifier::BlockHash(*BlockHash::example())), + purse_identifier: PurseIdentifier::MainPurseUnderAccountHash(AccountHash::new([9u8; 32])), +}); +static QUERY_BALANCE_RESULT: Lazy = Lazy::new(|| QueryBalanceResult { + api_version: DOCS_EXAMPLE_API_VERSION, + balance: U512::from(123_456), +}); + +/// Params for "state_get_item" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetItemParams { + /// Hash of the state root. + pub state_root_hash: Digest, + /// The key under which to query. + pub key: Key, + /// The path components starting from the key as base. + #[serde(default)] + pub path: Vec, +} + +impl DocExample for GetItemParams { + fn doc_example() -> &'static Self { + &GET_ITEM_PARAMS + } +} + +/// Result for "state_get_item" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetItemResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The stored value. + pub stored_value: StoredValue, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetItemResult { + fn doc_example() -> &'static Self { + &GET_ITEM_RESULT + } +} + +/// "state_get_item" RPC. +pub struct GetItem {} + +#[async_trait] +impl RpcWithParams for GetItem { + const METHOD: &'static str = "state_get_item"; + type RequestParams = GetItemParams; + type ResponseResult = GetItemResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let state_identifier = GlobalStateIdentifier::StateRootHash(params.state_root_hash); + let (stored_value, merkle_proof) = node_client + .query_global_state(Some(state_identifier), params.key, params.path) + .await + .map_err(|err| Error::NodeRequest("global state item", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + stored_value, + merkle_proof, + }) + } +} + +/// Params for "state_get_balance" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBalanceParams { + /// The hash of state root. + pub state_root_hash: Digest, + /// Formatted URef. + pub purse_uref: String, +} + +impl DocExample for GetBalanceParams { + fn doc_example() -> &'static Self { + &GET_BALANCE_PARAMS + } +} + +/// Result for "state_get_balance" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBalanceResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The balance value. + pub balance_value: U512, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetBalanceResult { + fn doc_example() -> &'static Self { + &GET_BALANCE_RESULT + } +} + +/// "state_get_balance" RPC. +pub struct GetBalance {} + +#[async_trait] +impl RpcWithParams for GetBalance { + const METHOD: &'static str = "state_get_balance"; + type RequestParams = GetBalanceParams; + type ResponseResult = GetBalanceResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let purse_uref = + URef::from_formatted_str(¶ms.purse_uref).map_err(Error::InvalidPurseURef)?; + let state_identifier = GlobalStateIdentifier::StateRootHash(params.state_root_hash); + let result = common::get_balance(&*node_client, purse_uref, Some(state_identifier)).await?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + balance_value: result.value, + merkle_proof: result.merkle_proof, + }) + } +} + +/// Params for "state_get_auction_info" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAuctionInfoParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetAuctionInfoParams { + fn doc_example() -> &'static Self { + &GET_AUCTION_INFO_PARAMS + } +} + +/// Result for "state_get_auction_info" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAuctionInfoResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The auction state. + pub auction_state: AuctionState, +} + +impl DocExample for GetAuctionInfoResult { + fn doc_example() -> &'static Self { + &GET_AUCTION_INFO_RESULT + } +} + +/// "state_get_auction_info" RPC. +pub struct GetAuctionInfo {} + +#[async_trait] +impl RpcWithOptionalParams for GetAuctionInfo { + const METHOD: &'static str = "state_get_auction_info"; + type OptionalRequestParams = GetAuctionInfoParams; + type ResponseResult = GetAuctionInfoResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let block_identifier = maybe_params.map(|params| params.block_identifier); + let block_header = node_client + .read_block_header(block_identifier) + .await + .map_err(|err| Error::NodeRequest("block header", err))? + .unwrap(); + + let state_identifier = block_identifier.map(GlobalStateIdentifier::from); + let bid_stored_values = node_client + .query_global_state_by_tag(state_identifier, KeyTag::Bid) + .await + .map_err(|err| Error::NodeRequest("auction bids", err))?; + let bids = bid_stored_values + .into_iter() + .map(|bid| bid.into_bid_kind().ok_or(Error::InvalidAuctionBids)) + .collect::, Error>>()?; + + let (registry_value, _) = node_client + .query_global_state(state_identifier, Key::SystemContractRegistry, vec![]) + .await + .map_err(|err| Error::NodeRequest("system contract registry", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + let registry: BTreeMap = registry_value + .into_cl_value() + .ok_or(Error::InvalidAuctionContract)? + .into_t() + .map_err(|_| Error::InvalidAuctionContract)?; + + let &auction_hash = registry.get(AUCTION).ok_or(Error::InvalidAuctionContract)?; + let auction_key = Key::addressable_entity_key(PackageKindTag::System, auction_hash); + let (snapshot_value, _) = node_client + .query_global_state( + state_identifier, + auction_key, + vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_owned()], + ) + .await + .map_err(|err| Error::NodeRequest("auction snapshot", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + let snapshot = snapshot_value + .into_cl_value() + .ok_or(Error::InvalidAuctionValidators)? + .into_t() + .map_err(|_| Error::InvalidAuctionValidators)?; + + let validators = era_validators_from_snapshot(snapshot); + let auction_state = AuctionState::new( + *block_header.state_root_hash(), + block_header.height(), + validators, + bids, + ); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + auction_state, + }) + } +} + +/// Identifier of an account. +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(deny_unknown_fields, untagged)] +pub enum AccountIdentifier { + /// The public key of an account + PublicKey(PublicKey), + /// The account hash of an account + AccountHash(AccountHash), +} + +/// Params for "state_get_account_info" RPC request +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAccountInfoParams { + /// The public key of the Account. + #[serde(alias = "public_key")] + pub account_identifier: AccountIdentifier, + /// The block identifier. + pub block_identifier: Option, +} + +impl DocExample for GetAccountInfoParams { + fn doc_example() -> &'static Self { + &GET_ACCOUNT_INFO_PARAMS + } +} + +/// Result for "state_get_account_info" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAccountInfoResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The account. + pub account: Account, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetAccountInfoResult { + fn doc_example() -> &'static Self { + &GET_ACCOUNT_INFO_RESULT + } +} + +/// "state_get_account_info" RPC. +pub struct GetAccountInfo {} + +#[async_trait] +impl RpcWithParams for GetAccountInfo { + const METHOD: &'static str = "state_get_account_info"; + type RequestParams = GetAccountInfoParams; + type ResponseResult = GetAccountInfoResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let maybe_state_identifier = params.block_identifier.map(GlobalStateIdentifier::from); + let base_key = { + let account_hash = match params.account_identifier { + AccountIdentifier::PublicKey(public_key) => public_key.to_account_hash(), + AccountIdentifier::AccountHash(account_hash) => account_hash, + }; + Key::Account(account_hash) + }; + let (account_value, merkle_proof) = node_client + .query_global_state(maybe_state_identifier, base_key, vec![]) + .await + .map_err(|err| Error::NodeRequest("account info", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + let account = account_value + .into_account() + .ok_or(Error::InvalidAccountInfo)?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + account, + merkle_proof, + }) + } +} + +#[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] +/// Options for dictionary item lookups. +pub enum DictionaryIdentifier { + /// Lookup a dictionary item via an Account's named keys. + AccountNamedKey { + /// The account key as a formatted string whose named keys contains dictionary_name. + key: String, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via a Contract's named keys. + ContractNamedKey { + /// The contract key as a formatted string whose named keys contains dictionary_name. + key: String, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via its seed URef. + URef { + /// The dictionary's seed URef. + seed_uref: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via its unique key. + Dictionary(String), +} + +impl DictionaryIdentifier { + fn get_dictionary_address( + &self, + maybe_stored_value: Option, + ) -> Result { + match self { + DictionaryIdentifier::AccountNamedKey { + dictionary_name, + dictionary_item_key, + .. + } + | DictionaryIdentifier::ContractNamedKey { + dictionary_name, + dictionary_item_key, + .. + } => { + let named_keys = match &maybe_stored_value { + Some(StoredValue::Account(account)) => account.named_keys(), + Some(StoredValue::AddressableEntity(contract)) => contract.named_keys(), + Some(other) => { + return Err(Error::InvalidTypeUnderDictionaryKey(other.type_name())) + } + None => return Err(Error::DictionaryKeyNotFound), + }; + + let key_bytes = dictionary_item_key.as_str().as_bytes(); + let seed_uref = match named_keys.get(dictionary_name) { + Some(key) => *key + .as_uref() + .ok_or_else(|| Error::DictionaryValueIsNotAUref(key.tag()))?, + None => return Err(Error::DictionaryNameNotFound), + }; + + Ok(Key::dictionary(seed_uref, key_bytes)) + } + DictionaryIdentifier::URef { + seed_uref, + dictionary_item_key, + } => { + let key_bytes = dictionary_item_key.as_str().as_bytes(); + let seed_uref = URef::from_formatted_str(seed_uref) + .map_err(|error| Error::DictionaryKeyCouldNotBeParsed(error.to_string()))?; + Ok(Key::dictionary(seed_uref, key_bytes)) + } + DictionaryIdentifier::Dictionary(address) => Key::from_formatted_str(address) + .map_err(|error| Error::DictionaryKeyCouldNotBeParsed(error.to_string())), + } + } +} + +/// Params for "state_get_dictionary_item" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDictionaryItemParams { + /// Hash of the state root + pub state_root_hash: Digest, + /// The Dictionary query identifier. + pub dictionary_identifier: DictionaryIdentifier, +} + +impl DocExample for GetDictionaryItemParams { + fn doc_example() -> &'static Self { + &GET_DICTIONARY_ITEM_PARAMS + } +} + +/// Result for "state_get_dictionary_item" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDictionaryItemResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The key under which the value is stored. + pub dictionary_key: String, + /// The stored value. + pub stored_value: StoredValue, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetDictionaryItemResult { + fn doc_example() -> &'static Self { + &GET_DICTIONARY_ITEM_RESULT + } +} + +/// "state_get_dictionary_item" RPC. +pub struct GetDictionaryItem {} + +#[async_trait] +impl RpcWithParams for GetDictionaryItem { + const METHOD: &'static str = "state_get_dictionary_item"; + type RequestParams = GetDictionaryItemParams; + type ResponseResult = GetDictionaryItemResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let state_identifier = GlobalStateIdentifier::StateRootHash(params.state_root_hash); + let dictionary_key = match params.dictionary_identifier { + DictionaryIdentifier::AccountNamedKey { ref key, .. } + | DictionaryIdentifier::ContractNamedKey { ref key, .. } => { + let base_key = Key::from_formatted_str(key).map_err(Error::InvalidDictionaryKey)?; + let (value, _) = node_client + .query_global_state(Some(state_identifier), base_key, vec![]) + .await + .map_err(|err| Error::NodeRequest("dictionary key", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + params + .dictionary_identifier + .get_dictionary_address(Some(value))? + } + DictionaryIdentifier::URef { .. } | DictionaryIdentifier::Dictionary(_) => { + params.dictionary_identifier.get_dictionary_address(None)? + } + }; + let (stored_value, merkle_proof) = node_client + .query_global_state(Some(state_identifier), dictionary_key, vec![]) + .await + .map_err(|err| Error::NodeRequest("dictionary item", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + dictionary_key: dictionary_key.to_formatted_string(), + stored_value, + merkle_proof, + }) + } +} + +/// Params for "query_global_state" RPC +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct QueryGlobalStateParams { + /// The identifier used for the query. If not provided, the tip of the chain will be used. + pub state_identifier: Option, + /// The key under which to query. + pub key: Key, + /// The path components starting from the key as base. + #[serde(default)] + pub path: Vec, +} + +impl DocExample for QueryGlobalStateParams { + fn doc_example() -> &'static Self { + &QUERY_GLOBAL_STATE_PARAMS + } +} + +/// Result for "query_global_state" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct QueryGlobalStateResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The block header if a Block hash was provided. + pub block_header: Option, + /// The stored value. + pub stored_value: StoredValue, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for QueryGlobalStateResult { + fn doc_example() -> &'static Self { + &QUERY_GLOBAL_STATE_RESULT + } +} + +/// "query_global_state" RPC +pub struct QueryGlobalState {} + +#[async_trait] +impl RpcWithParams for QueryGlobalState { + const METHOD: &'static str = "query_global_state"; + type RequestParams = QueryGlobalStateParams; + type ResponseResult = QueryGlobalStateResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let block_header = match params.state_identifier { + Some(GlobalStateIdentifier::BlockHash(block_hash)) => { + let identifier = BlockIdentifier::Hash(block_hash); + Some(common::get_block_header(&*node_client, Some(identifier)).await?) + } + Some(GlobalStateIdentifier::BlockHeight(block_height)) => { + let identifier = BlockIdentifier::Height(block_height); + Some(common::get_block_header(&*node_client, Some(identifier)).await?) + } + _ => None, + }; + + let (stored_value, merkle_proof) = node_client + .query_global_state(params.state_identifier, params.key, params.path) + .await + .map_err(|err| Error::NodeRequest("global state item", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + block_header, + stored_value, + merkle_proof, + }) + } +} + +/// Identifier of a purse. +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] +pub enum PurseIdentifier { + /// The main purse of the account identified by this public key. + MainPurseUnderPublicKey(PublicKey), + /// The main purse of the account identified by this account hash. + MainPurseUnderAccountHash(AccountHash), + /// The purse identified by this URef. + PurseUref(URef), +} + +/// Params for "query_balance" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +pub struct QueryBalanceParams { + /// The state identifier used for the query, if none is passed + /// the tip of the chain will be used. + pub state_identifier: Option, + /// The identifier to obtain the purse corresponding to balance query. + pub purse_identifier: PurseIdentifier, +} + +impl DocExample for QueryBalanceParams { + fn doc_example() -> &'static Self { + &QUERY_BALANCE_PARAMS + } +} + +/// Result for "query_balance" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct QueryBalanceResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The balance represented in motes. + pub balance: U512, +} + +impl DocExample for QueryBalanceResult { + fn doc_example() -> &'static Self { + &QUERY_BALANCE_RESULT + } +} + +/// "query_balance" RPC. +pub struct QueryBalance {} + +#[async_trait] +impl RpcWithParams for QueryBalance { + const METHOD: &'static str = "query_balance"; + type RequestParams = QueryBalanceParams; + type ResponseResult = QueryBalanceResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let purse = common::get_main_purse( + &*node_client, + params.purse_identifier, + params.state_identifier, + ) + .await?; + let balance = common::get_balance(&*node_client, purse, params.state_identifier).await?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + balance: balance.value, + }) + } +} + +/// Parameters for "state_get_trie" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +pub struct GetTrieParams { + /// A trie key. + pub trie_key: Digest, +} + +impl DocExample for GetTrieParams { + fn doc_example() -> &'static Self { + &GET_TRIE_PARAMS + } +} + +/// Result for "state_get_trie" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTrieResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// A list of keys read under the specified prefix. + #[schemars( + with = "Option", + description = "A trie from global state storage, bytesrepr serialized and hex-encoded." + )] + pub maybe_trie_bytes: Option, +} + +impl DocExample for GetTrieResult { + fn doc_example() -> &'static Self { + &GET_TRIE_RESULT + } +} + +/// `state_get_trie` RPC. +pub struct GetTrie {} + +#[async_trait] +impl RpcWithParams for GetTrie { + const METHOD: &'static str = "state_get_trie"; + type RequestParams = GetTrieParams; + type ResponseResult = GetTrieResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let maybe_trie = node_client + .read_trie_bytes(params.trie_key) + .await + .map_err(|err| Error::NodeRequest("trie", err))?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + maybe_trie_bytes: maybe_trie.map(Into::into), + }) + } +} + +fn era_validators_from_snapshot(snapshot: SeigniorageRecipientsSnapshot) -> EraValidators { + snapshot + .into_iter() + .map(|(era_id, recipients)| { + let validator_weights = recipients + .into_iter() + .filter_map(|(public_key, bid)| bid.total_stake().map(|stake| (public_key, stake))) + .collect::(); + (era_id, validator_weights) + }) + .collect() +} + +#[cfg(test)] +mod tests { + use std::{convert::TryFrom, iter}; + + use crate::{ClientError, SUPPORTED_PROTOCOL_VERSION}; + use casper_types_ver_2_0::{ + addressable_entity::{ActionThresholds, AssociatedKeys, MessageTopics, NamedKeys}, + binary_port::{ + BinaryRequest, BinaryResponse, BinaryResponseAndRequest, GetRequest, + GlobalStateQueryResult, GlobalStateRequest, InformationRequestTag, + }, + system::auction::BidKind, + testing::TestRng, + AccessRights, AddressableEntity, Block, ByteCodeHash, EntryPoints, PackageHash, + ProtocolVersion, TestBlockBuilder, + }; + use rand::Rng; + + use super::*; + + #[tokio::test] + async fn should_read_state_item() { + let rng = &mut TestRng::new(); + let stored_value = StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()); + let merkle_proof = rng.random_string(10..20); + let expected = GlobalStateQueryResult::new(stored_value.clone(), merkle_proof.clone()); + + let resp = GetItem::do_handle_request( + Arc::new(ValidGlobalStateResultMock(expected.clone())), + GetItemParams { + state_root_hash: rng.gen(), + key: rng.gen(), + path: vec![], + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetItemResult { + api_version: CURRENT_API_VERSION, + stored_value, + merkle_proof, + } + ); + } + + #[tokio::test] + async fn should_read_balance() { + let rng = &mut TestRng::new(); + let balance_value: U512 = rng.gen(); + let merkle_proof = rng.random_string(10..20); + let result = GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(balance_value).unwrap()), + merkle_proof.clone(), + ); + + let resp = GetBalance::do_handle_request( + Arc::new(ValidGlobalStateResultMock(result.clone())), + GetBalanceParams { + state_root_hash: rng.gen(), + purse_uref: URef::new(rng.gen(), AccessRights::empty()).to_formatted_string(), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBalanceResult { + api_version: CURRENT_API_VERSION, + balance_value, + merkle_proof, + } + ); + } + + #[tokio::test] + async fn should_read_auction_info() { + struct ClientMock { + block: Block, + bids: Vec, + contract_hash: AddressableEntityHash, + snapshot: SeigniorageRecipientsSnapshot, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.block.clone_header(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::AllItems { + key_tag: KeyTag::Bid, + .. + })) => { + let bids = self + .bids + .iter() + .cloned() + .map(StoredValue::BidKind) + .collect::>(); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(bids, SUPPORTED_PROTOCOL_VERSION), + &[], + )) + } + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::SystemContractRegistry, + .. + })) => { + let system_contracts = + iter::once((AUCTION.to_string(), self.contract_hash)) + .collect::>(); + let result = GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(system_contracts).unwrap()), + String::default(), + ); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(result, SUPPORTED_PROTOCOL_VERSION), + &[], + )) + } + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::AddressableEntity(_, _), + .. + })) => { + let result = GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(self.snapshot.clone()).unwrap()), + String::default(), + ); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(result, SUPPORTED_PROTOCOL_VERSION), + &[], + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let resp = GetAuctionInfo::do_handle_request( + Arc::new(ClientMock { + block: Block::V2(block.clone()), + bids: Default::default(), + contract_hash: rng.gen(), + snapshot: Default::default(), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetAuctionInfoResult { + api_version: CURRENT_API_VERSION, + auction_state: AuctionState::new( + *block.state_root_hash(), + block.height(), + Default::default(), + Default::default() + ), + } + ); + } + + #[tokio::test] + async fn should_read_dictionary_item() { + let rng = &mut TestRng::new(); + let stored_value = StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()); + let merkle_proof = rng.random_string(10..20); + let expected = GlobalStateQueryResult::new(stored_value.clone(), merkle_proof.clone()); + + let uref = URef::new(rng.gen(), AccessRights::empty()); + let item_key = rng.random_string(5..10); + + let resp = GetDictionaryItem::do_handle_request( + Arc::new(ValidGlobalStateResultMock(expected.clone())), + GetDictionaryItemParams { + state_root_hash: rng.gen(), + dictionary_identifier: DictionaryIdentifier::URef { + seed_uref: uref.to_formatted_string(), + dictionary_item_key: item_key.clone(), + }, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetDictionaryItemResult { + api_version: CURRENT_API_VERSION, + dictionary_key: Key::dictionary(uref, item_key.as_bytes()).to_formatted_string(), + stored_value, + merkle_proof, + } + ); + } + + #[tokio::test] + async fn should_read_query_global_state_result() { + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let stored_value = StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()); + let merkle_proof = rng.random_string(10..20); + let expected = GlobalStateQueryResult::new(stored_value.clone(), merkle_proof.clone()); + + let resp = QueryGlobalState::do_handle_request( + Arc::new(ValidGlobalStateResultWithBlockMock { + block: block.clone(), + result: expected.clone(), + }), + QueryGlobalStateParams { + state_identifier: Some(GlobalStateIdentifier::BlockHash(*block.hash())), + key: rng.gen(), + path: vec![], + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + QueryGlobalStateResult { + api_version: CURRENT_API_VERSION, + block_header: Some(block.take_header()), + stored_value, + merkle_proof, + } + ); + } + + #[tokio::test] + async fn should_read_query_balance_by_uref_result() { + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let balance = rng.gen::(); + let stored_value = StoredValue::CLValue(CLValue::from_t(balance).unwrap()); + let expected = GlobalStateQueryResult::new(stored_value.clone(), rng.random_string(10..20)); + + let resp = QueryBalance::do_handle_request( + Arc::new(ValidGlobalStateResultWithBlockMock { + block: block.clone(), + result: expected.clone(), + }), + QueryBalanceParams { + state_identifier: None, + purse_identifier: PurseIdentifier::PurseUref(URef::new( + rng.gen(), + AccessRights::empty(), + )), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + QueryBalanceResult { + api_version: CURRENT_API_VERSION, + balance + } + ); + } + + #[tokio::test] + async fn should_read_query_balance_by_account_result() { + use casper_types_ver_2_0::account::{ActionThresholds, AssociatedKeys}; + + struct ClientMock { + block: Block, + account: Account, + balance: U512, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.block.clone_header(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::Account(_), + .. + })) => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + GlobalStateQueryResult::new( + StoredValue::Account(self.account.clone()), + String::default(), + ), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )), + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::Balance(_), + .. + })) => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(self.balance).unwrap()), + String::default(), + ), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )), + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let account = Account::new( + rng.gen(), + NamedKeys::default(), + rng.gen(), + AssociatedKeys::default(), + ActionThresholds::default(), + ); + + let balance = rng.gen::(); + + let resp = QueryBalance::do_handle_request( + Arc::new(ClientMock { + block: block.clone(), + account: account.clone(), + balance, + }), + QueryBalanceParams { + state_identifier: None, + purse_identifier: PurseIdentifier::MainPurseUnderAccountHash( + account.account_hash(), + ), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + QueryBalanceResult { + api_version: CURRENT_API_VERSION, + balance + } + ); + } + + #[tokio::test] + async fn should_read_query_balance_by_addressable_entity_result() { + struct ClientMock { + block: Block, + entity_hash: AddressableEntityHash, + entity: AddressableEntity, + balance: U512, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.block.clone_header(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::Account(_), + .. + })) => { + let key = + Key::addressable_entity_key(PackageKindTag::Account, self.entity_hash); + let value = CLValue::from_t(key).unwrap(); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + GlobalStateQueryResult::new( + StoredValue::CLValue(value), + String::default(), + ), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::AddressableEntity(_, _), + .. + })) => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + GlobalStateQueryResult::new( + StoredValue::AddressableEntity(self.entity.clone()), + String::default(), + ), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )), + BinaryRequest::Get(GetRequest::State(GlobalStateRequest::Item { + base_key: Key::Balance(_), + .. + })) => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(self.balance).unwrap()), + String::default(), + ), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )), + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let entity = AddressableEntity::new( + PackageHash::new(rng.gen()), + ByteCodeHash::new(rng.gen()), + NamedKeys::default(), + EntryPoints::default(), + ProtocolVersion::V1_0_0, + rng.gen(), + AssociatedKeys::default(), + ActionThresholds::default(), + MessageTopics::default(), + ); + + let balance: U512 = rng.gen(); + let entity_hash: AddressableEntityHash = rng.gen(); + + let resp = QueryBalance::do_handle_request( + Arc::new(ClientMock { + block: block.clone(), + entity_hash, + entity: entity.clone(), + balance, + }), + QueryBalanceParams { + state_identifier: None, + purse_identifier: PurseIdentifier::MainPurseUnderAccountHash(rng.gen()), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + QueryBalanceResult { + api_version: CURRENT_API_VERSION, + balance + } + ); + } + + struct ValidGlobalStateResultMock(GlobalStateQueryResult); + + #[async_trait] + impl NodeClient for ValidGlobalStateResultMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::State { .. }) => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.0.clone(), SUPPORTED_PROTOCOL_VERSION), + &[], + )), + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct ValidGlobalStateResultWithBlockMock { + block: Block, + result: GlobalStateQueryResult, + } + + #[async_trait] + impl NodeClient for ValidGlobalStateResultWithBlockMock { + async fn send_request( + &self, + req: BinaryRequest, + ) -> Result { + match req { + BinaryRequest::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.block.clone_header(), + SUPPORTED_PROTOCOL_VERSION, + ), + &[], + )) + } + BinaryRequest::Get(GetRequest::State { .. }) => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.result.clone(), SUPPORTED_PROTOCOL_VERSION), + &[], + )), + req => unimplemented!("unexpected request: {:?}", req), + } + } + } +} diff --git a/rpc_sidecar/src/speculative_exec_config.rs b/rpc_sidecar/src/speculative_exec_config.rs new file mode 100644 index 00000000..dea42d0c --- /dev/null +++ b/rpc_sidecar/src/speculative_exec_config.rs @@ -0,0 +1,49 @@ +use datasize::DataSize; +use serde::Deserialize; + +/// Default binding address for the speculative execution RPC HTTP server. +/// +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_ADDRESS: &str = "0.0.0.0:1"; +/// Default rate limit in qps. +const DEFAULT_QPS_LIMIT: u64 = 1; +/// Default max body bytes (2.5MB). +const DEFAULT_MAX_BODY_BYTES: u32 = 2_621_440; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; + +/// JSON-RPC HTTP server configuration. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct Config { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// Address to bind JSON-RPC speculative execution server to. + pub address: String, + /// Maximum rate limit in queries per second. + pub qps_limit: u64, + /// Maximum number of bytes to accept in a single request body. + pub max_body_bytes: u32, + /// CORS origin. + pub cors_origin: String, +} + +impl Config { + /// Creates a default instance for `RpcServer`. + pub fn new() -> Self { + Config { + enable_server: false, + address: DEFAULT_ADDRESS.to_string(), + qps_limit: DEFAULT_QPS_LIMIT, + max_body_bytes: DEFAULT_MAX_BODY_BYTES, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), + } + } +} + +impl Default for Config { + fn default() -> Self { + Config::new() + } +} diff --git a/rpc_sidecar/src/speculative_exec_server.rs b/rpc_sidecar/src/speculative_exec_server.rs new file mode 100644 index 00000000..5dfde0fc --- /dev/null +++ b/rpc_sidecar/src/speculative_exec_server.rs @@ -0,0 +1,70 @@ +use std::sync::Arc; + +use hyper::server::{conn::AddrIncoming, Builder}; + +use casper_json_rpc::{CorsOrigin, RequestHandlersBuilder}; + +use crate::{ + node_client::NodeClient, + rpcs::{ + speculative_exec::{SpeculativeExec, SpeculativeExecTxn}, + RpcWithParams, + }, +}; + +/// The URL path for all JSON-RPC requests. +pub const SPECULATIVE_EXEC_API_PATH: &str = "rpc"; + +pub const SPECULATIVE_EXEC_SERVER_NAME: &str = "speculative execution"; + +/// Run the speculative execution server. +pub async fn run( + node: Arc, + builder: Builder, + qps_limit: u64, + max_body_bytes: u32, + cors_origin: String, +) { + let mut handlers = RequestHandlersBuilder::new(); + SpeculativeExecTxn::register_as_handler(node.clone(), &mut handlers); + SpeculativeExec::register_as_handler(node, &mut handlers); + let handlers = handlers.build(); + + match cors_origin.as_str() { + "" => { + super::rpcs::run( + builder, + handlers, + qps_limit, + max_body_bytes, + SPECULATIVE_EXEC_API_PATH, + SPECULATIVE_EXEC_SERVER_NAME, + ) + .await; + } + "*" => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + SPECULATIVE_EXEC_API_PATH, + SPECULATIVE_EXEC_SERVER_NAME, + CorsOrigin::Any, + ) + .await + } + _ => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + SPECULATIVE_EXEC_API_PATH, + SPECULATIVE_EXEC_SERVER_NAME, + CorsOrigin::Specified(cors_origin), + ) + .await + } + } +} diff --git a/rpc_sidecar/src/testing/mod.rs b/rpc_sidecar/src/testing/mod.rs new file mode 100644 index 00000000..ed2dea49 --- /dev/null +++ b/rpc_sidecar/src/testing/mod.rs @@ -0,0 +1,72 @@ +use bytes::{BufMut, BytesMut}; +use juliet::{ + io::IoCoreBuilder, + protocol::ProtocolBuilder, + rpc::{IncomingRequest, RpcBuilder}, + ChannelConfiguration, ChannelId, +}; +use tokio::net::{TcpListener, TcpStream}; + +const LOCALHOST: &str = "127.0.0.1"; + +pub struct BinaryPortMock { + port: u16, + response: Vec, +} + +impl BinaryPortMock { + pub fn new(port: u16, response: Vec) -> Self { + Self { port, response } + } + + pub async fn start(&self) { + let port = self.port; + let addr = format!("{}:{}", LOCALHOST, port); + let protocol_builder = ProtocolBuilder::<1>::with_default_channel_config( + ChannelConfiguration::default() + .with_request_limit(300) + .with_max_request_payload_size(1000) + .with_max_response_payload_size(1000), + ); + + let io_builder = IoCoreBuilder::new(protocol_builder).buffer_size(ChannelId::new(0), 20); + + let rpc_builder = Box::leak(Box::new(RpcBuilder::new(io_builder))); + let listener = TcpListener::bind(addr.clone()) + .await + .expect("failed to listen"); + loop { + match listener.accept().await { + Ok((client, _addr)) => { + let response_payload = self.response.clone(); + tokio::spawn(handle_client(client, rpc_builder, response_payload)); + } + Err(io_err) => { + println!("acceptance failure: {:?}", io_err); + } + } + } + } +} + +async fn handle_client( + mut client: TcpStream, + rpc_builder: &RpcBuilder, + response: Vec, +) { + let (reader, writer) = client.split(); + let (client, mut server) = rpc_builder.build(reader, writer); + while let Ok(Some(incoming_request)) = server.next_request().await { + tokio::spawn(handle_request(incoming_request, response.clone())); + } + drop(client); +} + +async fn handle_request(incoming_request: IncomingRequest, response: Vec) { + let mut response_payload = BytesMut::new(); + let byt = response; + for b in byt { + response_payload.put_u8(b); + } + incoming_request.respond(Some(response_payload.freeze())); +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index bbd3374c..140037d9 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.74.0" +channel = "1.75.0" components = [ "rustfmt", "clippy" ] targets = [ "wasm32-unknown-unknown" ] profile = "minimal" \ No newline at end of file diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index ebc56509..f10a6111 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -1,94 +1,33 @@ [package] -name = "casper-event-sidecar" -authors = ["George Williamson ", "Jakub Zajkowski "] +name = "casper-sidecar" version = "1.0.0" -edition = "2018" +authors = ["Jakub Zajkowski "] +edition = "2021" +description = "Base module that spins up casper sidecar" readme = "README.md" -description = "App for storing and republishing sse events of a casper node" -license-file = "../LICENSE" -documentation = "README.md" -homepage = "https://github.com/CasperLabs/event-sidecar" -repository = "https://github.com/CasperLabs/event-sidecar" - -[features] -additional-metrics = ["casper-event-types/additional-metrics"] +homepage = "https://casperlabs.io" +repository = "https://github.com/CasperLabs/event-sidecar/tree/dev" +license = "Apache-2.0" [dependencies] -anyhow = { version = "1.0.44", default-features = false } -async-trait = "0.1.56" -bytes = "1.2.0" -casper-event-listener = { path = "../listener", version = "1.0.0" } -casper-event-types = { path = "../types", version = "1.0.0" } -casper-types = { version = "3.0.0", features = ["std", "json-schema"] } +anyhow = { workspace = true } +backtrace = "0.3.69" +casper-event-sidecar = { workspace = true } +casper-rpc-sidecar = { workspace = true } clap = { version = "4.0.32", features = ["derive"] } -derive-new = "0.5.9" -eventsource-stream = "0.2.3" -futures = "0.3.17" -hex = "0.4.3" -hex_fmt = "0.3.0" -http = "0.2.1" -hyper = "0.14.4" -indexmap = "2.0.0" -itertools = "0.10.3" -jsonschema = "0.17.1" -rand = "0.8.3" -regex = "1.6.0" -reqwest = "0.11.11" -schemars = "0.8.5" -sea-query = "0.30" -serde = { version = "1.0", features = ["derive", "rc"] } -serde_json = "1.0" -sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "any", "sqlite", "postgres"] } -thiserror = "1" -tokio = { version = "1.23.1", features = ["full"] } -tokio-stream = { version = "0.1.4", features = ["sync"] } -toml = "0.5.8" -tower = { version = "0.4.13", features = ["buffer", "limit", "make", "timeout"] } -tracing = "0.1" -tracing-subscriber = "0.3" -utoipa = { version = "3.4.4", features = ["rc_schema"]} -utoipa-swagger-ui = { version = "3.1.5" } -warp = { version = "0.3.6", features = ["compression"] } -wheelbuf = "0.2.0" -once_cell = { workspace = true } - -[target.'cfg(not(target_env = "msvc"))'.dependencies] -tikv-jemallocator = "0.5" +datasize = { workspace = true, features = ["detailed", "fake_clock-types"] } +futures = { workspace = true } +num_cpus = "1" +serde = { workspace = true, default-features = false, features = ["alloc", "derive"] } +tokio = { workspace = true, features = ["full"] } +toml = { workspace = true } +tracing = { workspace = true, default-features = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] } +thiserror = { workspace = true } [dev-dependencies] -async-stream = { workspace = true } -casper-event-types = { path = "../types", version = "1.0.0", features = ["sse-data-testing"] } -casper-types = { version = "3.0.0", features = ["std", "testing"] } -colored = "2.0.0" -futures-util = { workspace = true } -portpicker = "0.1.1" -pretty_assertions = "1.3.0" -reqwest = { version = "0.11.3", features = ["stream"] } -tabled = { version = "0.10.0", features = ["derive", "color"] } -tempfile = "3" -tokio-util = "0.7.8" -pg-embed = { git = "https://github.com/faokunega/pg-embed", tag = "v0.8.0" } - -[package.metadata.deb] -revision = "0" -assets = [ - ["../target/release/casper-event-sidecar", "/usr/bin/casper-event-sidecar", "755"], - ["../resources/ETC_README.md", "/etc/casper-event-sidecar/README.md", "644"], - ["../resources/default_config.toml", "/etc/casper-event-sidecar/config.toml", "644"] -] -maintainer-scripts = "../resources/maintainer_scripts/debian" -extended-description = """ -Package for Casper Event Sidecar -""" +casper-event-sidecar = { workspace = true, features = ["testing"] } +casper-rpc-sidecar = { workspace = true, features = ["testing"] } -[package.metadata.deb.systemd-units] -unit-scripts = "../resources/maintainer_scripts/casper_event_sidecar" -restart-after-upgrade = true - -[package.metadata.deb.variants.bionic] -name = "casper-event-sidecar" -revision = "0+bionic" - -[package.metadata.deb.variants.focal] -name = "casper-event-sidecar" -revision = "0+focal" +[target.'cfg(not(target_env = "msvc"))'.dependencies] +tikv-jemallocator = "0.5" diff --git a/sidecar/src/config.rs b/sidecar/src/config.rs new file mode 100644 index 00000000..83e800cf --- /dev/null +++ b/sidecar/src/config.rs @@ -0,0 +1,146 @@ +use anyhow::bail; +use casper_event_sidecar::{ + AdminApiServerConfig, DatabaseConfigError, RestApiServerConfig, SseEventServerConfig, + StorageConfig, StorageConfigSerdeTarget, +}; +use casper_rpc_sidecar::{FieldParseError, RpcServerConfig, RpcServerConfigTarget}; +use serde::Deserialize; +use thiserror::Error; + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +pub struct SidecarConfigTarget { + max_thread_count: Option, + max_blocking_thread_count: Option, + storage: Option, + rest_api_server: Option, + admin_api_server: Option, + sse_server: Option, + rpc_server: Option, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +#[cfg_attr(test, derive(Default))] +pub struct SidecarConfig { + pub max_thread_count: Option, + pub max_blocking_thread_count: Option, + pub sse_server: Option, + pub rpc_server: Option, + pub storage: Option, + pub rest_api_server: Option, + pub admin_api_server: Option, +} + +impl SidecarConfig { + pub fn validate(&self) -> Result<(), anyhow::Error> { + if self.rpc_server.is_none() && self.sse_server.is_none() { + bail!("At least one of RPC server or SSE server must be configured") + } + if self.storage.is_none() && self.sse_server.is_some() { + bail!("Can't run SSE server without storage defined") + } + if self.storage.is_none() && self.rest_api_server.is_some() { + bail!("Can't run Rest api server without storage defined") + } + Ok(()) + } +} + +impl TryFrom for SidecarConfig { + type Error = ConfigReadError; + + fn try_from(value: SidecarConfigTarget) -> Result { + let sse_server_config = value.sse_server; + let storage_config_res: Option> = + value.storage.map(|target| target.try_into()); + let storage_config = invert(storage_config_res)?; + let rpc_server_config_res: Option> = + value.rpc_server.map(|target| target.try_into()); + let rpc_server_config = invert(rpc_server_config_res)?; + Ok(SidecarConfig { + max_thread_count: value.max_thread_count, + max_blocking_thread_count: value.max_blocking_thread_count, + sse_server: sse_server_config, + rpc_server: rpc_server_config, + storage: storage_config, + rest_api_server: value.rest_api_server, + admin_api_server: value.admin_api_server, + }) + } +} + +fn invert(x: Option>) -> Result, E> { + x.map_or(Ok(None), |v| v.map(Some)) +} + +#[derive(Error, Debug)] +pub enum ConfigReadError { + #[error("failed to read sidecar configuration. Underlying reason: {}", .error)] + GeneralError { error: String }, +} + +impl From for ConfigReadError { + fn from(value: FieldParseError) -> Self { + ConfigReadError::GeneralError { + error: value.to_string(), + } + } +} + +impl From for ConfigReadError { + fn from(value: DatabaseConfigError) -> Self { + ConfigReadError::GeneralError { + error: value.to_string(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sidecar_config_should_fail_validation_when_sse_server_and_no_storage() { + let config = SidecarConfig { + sse_server: Some(SseEventServerConfig::default()), + ..Default::default() + }; + let res = config.validate(); + + assert!(res.is_err()); + assert!(res + .err() + .unwrap() + .to_string() + .contains("Can't run SSE server without storage defined")); + } + + #[test] + fn sidecar_config_should_fail_validation_when_rest_api_server_and_no_storage() { + let config = SidecarConfig { + rpc_server: Some(RpcServerConfig::default()), + rest_api_server: Some(RestApiServerConfig::default()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_err()); + assert!(res + .err() + .unwrap() + .to_string() + .contains("Can't run Rest api server without storage defined")); + } + + #[test] + fn sidecar_config_should_be_ok_if_rpc_is_defined_and_nothing_else() { + let config = SidecarConfig { + rpc_server: Some(RpcServerConfig::default()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_ok()); + } +} diff --git a/sidecar/src/config/speculative_exec_config.rs b/sidecar/src/config/speculative_exec_config.rs new file mode 100644 index 00000000..61cc9839 --- /dev/null +++ b/sidecar/src/config/speculative_exec_config.rs @@ -0,0 +1,49 @@ +use datasize::DataSize; +use serde::Deserialize; + +/// Default binding address for the speculative execution RPC HTTP server. +/// +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_ADDRESS: &str = "0.0.0.0:1"; +/// Default rate limit in qps. +const DEFAULT_QPS_LIMIT: u64 = 1; +/// Default max body bytes (2.5MB). +const DEFAULT_MAX_BODY_BYTES: u32 = 2_621_440; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; + +/// JSON-RPC HTTP server configuration. +#[derive(Clone, DataSize, Debug, Deserialize)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecConfig { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// Address to bind JSON-RPC speculative execution server to. + pub address: String, + /// Maximum rate limit in queries per second. + pub qps_limit: u64, + /// Maximum number of bytes to accept in a single request body. + pub max_body_bytes: u32, + /// CORS origin. + pub cors_origin: String, +} + +impl SpeculativeExecConfig { + /// Creates a default instance for `RpcServer`. + pub fn new() -> Self { + SpeculativeExecConfig { + enable_server: false, + address: DEFAULT_ADDRESS.to_string(), + qps_limit: DEFAULT_QPS_LIMIT, + max_body_bytes: DEFAULT_MAX_BODY_BYTES, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), + } + } +} + +impl Default for SpeculativeExecConfig { + fn default() -> Self { + SpeculativeExecConfig::new() + } +} diff --git a/sidecar/src/main.rs b/sidecar/src/main.rs index 413a0d2c..e3f0eb4a 100644 --- a/sidecar/src/main.rs +++ b/sidecar/src/main.rs @@ -1,66 +1,26 @@ -#![deny(clippy::complexity)] -#![deny(clippy::cognitive_complexity)] -#![deny(clippy::too_many_lines)] +mod config; -extern crate core; -mod admin_server; -mod api_version_manager; -mod database; -mod event_stream_server; -pub mod rest_server; -mod sql; -#[cfg(test)] -pub(crate) mod testing; -#[cfg(test)] -pub(crate) mod tests; -mod types; -mod utils; - -use std::collections::HashMap; -use std::convert::TryInto; -use std::{ - net::IpAddr, - path::{Path, PathBuf}, - str::FromStr, - time::Duration, -}; - -use crate::{ - admin_server::run_server as start_admin_server, - database::sqlite_database::SqliteDatabase, - event_stream_server::{Config as SseConfig, EventStreamServer}, - rest_server::run_server as start_rest_server, - types::{ - config::{read_config, Config}, - database::{DatabaseWriteError, DatabaseWriter}, - sse_events::*, - }, -}; use anyhow::{Context, Error}; -use api_version_manager::{ApiVersionManager, GuardedApiVersionManager}; -use casper_event_listener::{ - EventListener, EventListenerBuilder, NodeConnectionInterface, SseEvent, -}; -use casper_event_types::{metrics, sse_data::SseData, Filter}; +use backtrace::Backtrace; +use casper_event_sidecar::{run as run_sse_sidecar, run_admin_server, run_rest_server, Database}; +use casper_rpc_sidecar::start_rpc_server as run_rpc_sidecar; use clap::Parser; -use database::postgresql_database::PostgreSqlDatabase; -use futures::future::join_all; -use hex_fmt::HexFmt; +use config::{SidecarConfig, SidecarConfigTarget}; +use futures::FutureExt; +use std::{ + env, fmt, io, + panic::{self, PanicInfo}, + process::{self, ExitCode}, +}; #[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; -use tokio::{ - sync::mpsc::{channel as mpsc_channel, Receiver, Sender}, - task::JoinHandle, - time::sleep, -}; -use tracing::{debug, error, info, trace, warn}; -use types::config::Connection; -use types::{ - config::StorageConfig, - database::{Database, DatabaseReader}, +use tracing::{field::Field, info}; +use tracing_subscriber::{ + fmt::{format, format::Writer}, + EnvFilter, }; -#[cfg(feature = "additional-metrics")] -use utils::start_metrics_thread; + +const MAX_THREAD_COUNT: usize = 512; #[cfg(not(target_env = "msvc"))] #[global_allocator] @@ -74,795 +34,140 @@ struct CmdLineArgs { path_to_config: String, } -const DEFAULT_CHANNEL_SIZE: usize = 1000; - -#[tokio::main] -async fn main() -> Result<(), Error> { +fn main() -> Result { // Install global collector for tracing - tracing_subscriber::fmt::init(); + init_logging()?; let args = CmdLineArgs::parse(); let path_to_config = args.path_to_config; let config_serde = read_config(&path_to_config).context("Error constructing config")?; - let config = config_serde.try_into()?; - + let config: SidecarConfig = config_serde.try_into()?; + config.validate()?; info!("Configuration loaded"); - run(config).await -} - -async fn run(config: Config) -> Result<(), Error> { - validate_config(&config)?; - let (event_listeners, sse_data_receivers) = build_event_listeners(&config)?; - let admin_server_handle = build_and_start_admin_server(&config); - // This channel allows SseData to be sent from multiple connected nodes to the single EventStreamServer. - let (outbound_sse_data_sender, outbound_sse_data_receiver) = - mpsc_channel(config.outbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); - let connection_configs = config.connections.clone(); - let storage_config = config.storage.clone(); - let database = build_database(&storage_config).await?; - let rest_server_handle = build_and_start_rest_server(&config, database.clone()); - - // Task to manage incoming events from all three filters - let listening_task_handle = start_sse_processors( - connection_configs, - event_listeners, - sse_data_receivers, - database.clone(), - outbound_sse_data_sender.clone(), - ); - - let event_broadcasting_handle = - start_event_broadcasting(&config, &storage_config, outbound_sse_data_receiver); - - tokio::try_join!( - flatten_handle(event_broadcasting_handle), - flatten_handle(rest_server_handle), - flatten_handle(listening_task_handle), - flatten_handle(admin_server_handle), - ) - .map(|_| Ok(()))? -} - -fn start_event_broadcasting( - config: &Config, - storage_config: &StorageConfig, - mut outbound_sse_data_receiver: Receiver<(SseData, Option, Option)>, -) -> JoinHandle> { - let storage_path = storage_config.get_storage_path(); - let event_stream_server_port = config.event_stream_server.port; - let buffer_length = config.event_stream_server.event_stream_buffer_length; - let max_concurrent_subscribers = config.event_stream_server.max_concurrent_subscribers; - tokio::spawn(async move { - // Create new instance for the Sidecar's Event Stream Server - let mut event_stream_server = EventStreamServer::new( - SseConfig::new( - event_stream_server_port, - Some(buffer_length), - Some(max_concurrent_subscribers), - ), - PathBuf::from(storage_path), - ) - .context("Error starting EventStreamServer")?; - while let Some((sse_data, inbound_filter, maybe_json_data)) = - outbound_sse_data_receiver.recv().await - { - event_stream_server.broadcast(sse_data, inbound_filter, maybe_json_data); - } - Err::<(), Error>(Error::msg("Event broadcasting finished")) - }) -} - -fn start_sse_processors( - connection_configs: Vec, - event_listeners: Vec, - sse_data_receivers: Vec>, - database: Database, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, -) -> JoinHandle> { - tokio::spawn(async move { - let mut join_handles = Vec::with_capacity(event_listeners.len()); - let api_version_manager = ApiVersionManager::new(); - - for ((mut event_listener, connection_config), sse_data_receiver) in event_listeners - .into_iter() - .zip(connection_configs) - .zip(sse_data_receivers) - { - tokio::spawn(async move { - let res = event_listener.stream_aggregated_events().await; - if let Err(e) = res { - let addr = event_listener.get_node_interface().ip_address.to_string(); - error!("Disconnected from {}. Reason: {}", addr, e.to_string()); - } - }); - let join_handle = spawn_sse_processor( - &database, - sse_data_receiver, - &outbound_sse_data_sender, - connection_config, - &api_version_manager, - ); - join_handles.push(join_handle); - } - let _ = join_all(join_handles).await; - //Send Shutdown to the sidecar sse endpoint - let _ = outbound_sse_data_sender - .send((SseData::Shutdown, None, None)) - .await; - // Below sleep is a workaround to allow the above Shutdown to propagate. - // If we don't do this there is a race condition between handling of the message and dropping of the outbound server - // which happens when we leave this function and the `tokio::try_join!` exits due to this. This race condition causes 9 of 10 - // tries to not propagate the Shutdown (ususally drop happens faster than message propagation to outbound). - // Fixing this race condition would require rewriting a lot of code. AFAICT the only drawback to this workaround is that the - // rest server and the sse server will exit 200ms later than it would without it. - sleep(Duration::from_millis(200)).await; - Err::<(), Error>(Error::msg("Connected node(s) are unavailable")) - }) -} - -fn spawn_sse_processor( - database: &Database, - sse_data_receiver: Receiver, - outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, - connection_config: Connection, - api_version_manager: &std::sync::Arc>, -) -> JoinHandle> { - match database.clone() { - Database::SqliteDatabaseWrapper(db) => tokio::spawn(sse_processor( - sse_data_receiver, - outbound_sse_data_sender.clone(), - db.clone(), - false, - connection_config.enable_logging, - api_version_manager.clone(), - )), - Database::PostgreSqlDatabaseWrapper(db) => tokio::spawn(sse_processor( - sse_data_receiver, - outbound_sse_data_sender.clone(), - db.clone(), - true, - connection_config.enable_logging, - api_version_manager.clone(), - )), - } -} - -fn build_and_start_rest_server( - config: &Config, - database: Database, -) -> JoinHandle> { - let rest_server_config = config.rest_server.clone(); - tokio::spawn(async move { - match database { - Database::SqliteDatabaseWrapper(db) => { - start_rest_server(rest_server_config, db.clone()).await - } - Database::PostgreSqlDatabaseWrapper(db) => { - start_rest_server(rest_server_config, db.clone()).await - } - } - }) -} + let max_worker_threads = config.max_thread_count.unwrap_or_else(num_cpus::get); + let max_blocking_threads = config + .max_thread_count + .unwrap_or(MAX_THREAD_COUNT - max_worker_threads); + panic::set_hook(Box::new(panic_hook)); -fn build_and_start_admin_server(config: &Config) -> JoinHandle> { - let admin_server_config = config.admin_server.clone(); - tokio::spawn(async move { - if let Some(config) = admin_server_config { - start_admin_server(config).await - } else { - Ok(()) - } - }) -} - -async fn build_database(config: &StorageConfig) -> Result { - match config { - StorageConfig::SqliteDbConfig { - storage_path, - sqlite_config, - } => { - let path_to_database_dir = Path::new(storage_path); - let sqlite_database = SqliteDatabase::new(path_to_database_dir, sqlite_config.clone()) - .await - .context("Error instantiating sqlite database")?; - Ok(Database::SqliteDatabaseWrapper(sqlite_database)) - } - StorageConfig::PostgreSqlDbConfig { - postgresql_config, .. - } => { - let postgres_database = PostgreSqlDatabase::new(postgresql_config.clone()) - .await - .context("Error instantiating postgres database")?; - Ok(Database::PostgreSqlDatabaseWrapper(postgres_database)) - } - } + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(max_worker_threads) + .max_blocking_threads(max_blocking_threads) + .build() + .expect("Failed building sidecar runtime") + .block_on(run(config)) } -fn build_event_listeners( - config: &Config, -) -> Result<(Vec, Vec>), Error> { - let mut event_listeners = Vec::with_capacity(config.connections.len()); - let mut sse_data_receivers = Vec::new(); - for connection in &config.connections { - let (inbound_sse_data_sender, inbound_sse_data_receiver) = - mpsc_channel(config.inbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); - sse_data_receivers.push(inbound_sse_data_receiver); - let event_listener = builder(connection, inbound_sse_data_sender)?.build(); - event_listeners.push(event_listener?); - } - Ok((event_listeners, sse_data_receivers)) +pub fn read_config(config_path: &str) -> Result { + let toml_content = + std::fs::read_to_string(config_path).context("Error reading config file contents")?; + toml::from_str(&toml_content).context("Error parsing config into TOML format") } -fn builder( - connection: &Connection, - inbound_sse_data_sender: Sender, -) -> Result { - let node_interface = NodeConnectionInterface { - ip_address: IpAddr::from_str(&connection.ip_address)?, - sse_port: connection.sse_port, - rest_port: connection.rest_port, +async fn run(config: SidecarConfig) -> Result { + let maybe_database = if let Some(storage_config) = config.storage.as_ref() { + Some(Database::build(storage_config).await?) + } else { + None }; - let event_listener_builder = EventListenerBuilder { - node: node_interface, - max_connection_attempts: connection.max_attempts, - delay_between_attempts: Duration::from_secs( - connection.delay_between_retries_in_seconds as u64, - ), - allow_partial_connection: connection.allow_partial_connection, - sse_event_sender: inbound_sse_data_sender, - connection_timeout: Duration::from_secs( - connection.connection_timeout_in_seconds.unwrap_or(5) as u64, - ), - sleep_between_keep_alive_checks: Duration::from_secs( - connection - .sleep_between_keep_alive_checks_in_seconds - .unwrap_or(60) as u64, - ), - no_message_timeout: Duration::from_secs( - connection.no_message_timeout_in_seconds.unwrap_or(120) as u64, - ), + let admin_server = if let Some(config) = config.admin_api_server { + run_admin_server(config.clone()).boxed() + } else { + std::future::pending().boxed() }; - Ok(event_listener_builder) -} - -fn validate_config(config: &Config) -> Result<(), Error> { - if config - .connections - .iter() - .any(|connection| connection.max_attempts < 1) + let rest_server = if let (Some(rest_config), Some(database)) = + (config.rest_api_server, maybe_database.clone()) { - return Err(Error::msg( - "Unable to run: max_attempts setting must be above 0 for the sidecar to attempt connection" - )); - } - Ok(()) -} - -async fn flatten_handle(handle: JoinHandle>) -> Result { - match handle.await { - Ok(Ok(result)) => Ok(result), - Ok(Err(err)) => Err(err), - Err(join_err) => Err(Error::from(join_err)), - } -} - -async fn handle_database_save_result( - entity_name: &str, - entity_identifier: &str, - res: Result, - outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, - inbound_filter: Filter, - json_data: Option, - build_sse_data: F, -) where - F: FnOnce() -> SseData, -{ - match res { - Ok(_) => { - count_internal_event("main_inbound_sse_data", "db_save_end"); - count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_start"); - if let Err(error) = outbound_sse_data_sender - .send((build_sse_data(), Some(inbound_filter), json_data)) - .await - { - count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_end"); - debug!( - "Error when sending to outbound_sse_data_sender. Error: {}", - error - ); - } else { - count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_end"); - } - } - Err(DatabaseWriteError::UniqueConstraint(uc_err)) => { - count_internal_event("main_inbound_sse_data", "db_save_end"); - debug!( - "Already received {} ({}), logged in event_log", - entity_name, entity_identifier, - ); - trace!(?uc_err); - } - Err(other_err) => { - count_internal_event("main_inbound_sse_data", "db_save_end"); - count_error(format!("db_save_error_{}", entity_name).as_str()); - warn!(?other_err, "Unexpected error saving {}", entity_identifier); - } - } - count_internal_event("main_inbound_sse_data", "event_received_end"); -} + run_rest_server(rest_config.clone(), database).boxed() + } else { + std::future::pending().boxed() + }; -/// Function to handle single event in the sse_processor. -/// Returns false if the handling indicated that no other messages should be processed. -/// Returns true otherwise. -#[allow(clippy::too_many_lines)] -async fn handle_single_event( - sse_event: SseEvent, - database: Db, - enable_event_logging: bool, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - api_version_manager: GuardedApiVersionManager, -) { - match sse_event.data { - SseData::ApiVersion(_) | SseData::Shutdown => { - //don't do debug counting for ApiVersion since we don't store it - } - _ => { - count_internal_event("main_inbound_sse_data", "event_received_start"); - } - } - match sse_event.data { - SseData::SidecarVersion(_) => { - //Do nothing -> the inbound shouldn't produce this endpoint, it can be only produced by sidecar to the outbound - } - SseData::ApiVersion(version) => { - handle_api_version( - api_version_manager, - version, - &outbound_sse_data_sender, - sse_event.inbound_filter, - enable_event_logging, - ) - .await; - } - SseData::BlockAdded { block, block_hash } => { - if enable_event_logging { - let hex_block_hash = HexFmt(block_hash.inner()); - info!("Block Added: {:18}", hex_block_hash); - debug!("Block Added: {}", hex_block_hash); - } - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_block_added( - BlockAdded::new(block_hash, block.clone()), - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; - handle_database_save_result( - "BlockAdded", - HexFmt(block_hash.inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::BlockAdded { block, block_hash }, - ) - .await; - } - SseData::DeployAccepted { deploy } => { - if enable_event_logging { - let hex_deploy_hash = HexFmt(deploy.hash().inner()); - info!("Deploy Accepted: {:18}", hex_deploy_hash); - debug!("Deploy Accepted: {}", hex_deploy_hash); - } - let deploy_accepted = DeployAccepted::new(deploy.clone()); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_deploy_accepted( - deploy_accepted, - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; - handle_database_save_result( - "DeployAccepted", - HexFmt(deploy.hash().inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::DeployAccepted { deploy }, - ) - .await; - } - SseData::DeployExpired { deploy_hash } => { - if enable_event_logging { - let hex_deploy_hash = HexFmt(deploy_hash.inner()); - info!("Deploy Expired: {:18}", hex_deploy_hash); - debug!("Deploy Expired: {}", hex_deploy_hash); - } - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_deploy_expired( - DeployExpired::new(deploy_hash), - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; - handle_database_save_result( - "DeployExpired", - HexFmt(deploy_hash.inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::DeployExpired { deploy_hash }, - ) - .await; - } - SseData::DeployProcessed { - deploy_hash, - account, - timestamp, - ttl, - dependencies, - block_hash, - execution_result, - } => { - if enable_event_logging { - let hex_deploy_hash = HexFmt(deploy_hash.inner()); - info!("Deploy Processed: {:18}", hex_deploy_hash); - debug!("Deploy Processed: {}", hex_deploy_hash); - } - let deploy_processed = DeployProcessed::new( - deploy_hash.clone(), - account.clone(), - timestamp, - ttl, - dependencies.clone(), - block_hash.clone(), - execution_result.clone(), - ); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_deploy_processed( - deploy_processed.clone(), - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; + let sse_server = if let (Some(storage_config), Some(database), Some(sse_server_config)) = + (config.storage, maybe_database, config.sse_server) + { + // If sse server is configured, both storage config and database must be "Some" here. This should be ensured by prior validation. + run_sse_sidecar( + sse_server_config, + database.clone(), + storage_config.get_storage_path(), + ) + .boxed() + } else { + std::future::pending().boxed() + }; - handle_database_save_result( - "DeployProcessed", - HexFmt(deploy_hash.inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::DeployProcessed { - deploy_hash, - account, - timestamp, - ttl, - dependencies, - block_hash, - execution_result, - }, - ) - .await; - } - SseData::Fault { - era_id, - timestamp, - public_key, - } => { - let fault = Fault::new(era_id, public_key.clone(), timestamp); - warn!(%fault, "Fault reported"); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_fault( - fault.clone(), - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; + let rpc_server = config.rpc_server.as_ref().map_or_else( + || std::future::pending().boxed(), + |conf| run_rpc_sidecar(conf).boxed(), + ); - handle_database_save_result( - "Fault", - format!("{:#?}", fault).as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::Fault { - era_id, - timestamp, - public_key, - }, - ) - .await; - } - SseData::FinalitySignature(fs) => { - if enable_event_logging { - debug!( - "Finality Signature: {} for {}", - fs.signature(), - fs.block_hash() - ); - } - let finality_signature = FinalitySignature::new(fs.clone()); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_finality_signature( - finality_signature.clone(), - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; - handle_database_save_result( - "FinalitySignature", - "", - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::FinalitySignature(fs), - ) - .await; - } - SseData::Step { - era_id, - execution_effect, - } => { - let step = Step::new(era_id, execution_effect.clone()); - if enable_event_logging { - info!("Step at era: {}", era_id.value()); - } - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_step( - step, - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; - handle_database_save_result( - "Step", - format!("{}", era_id.value()).as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::Step { - era_id, - execution_effect, - }, - ) - .await; - } - SseData::Shutdown => handle_shutdown(sse_event, database, outbound_sse_data_sender).await, - } + let result = tokio::select! { + result = admin_server => result, + result = rest_server => result, + result = sse_server => result, + result = rpc_server => result, + }; + if let Err(error) = &result { + info!("The server has exited with an error: {}", error); + }; + result } -async fn handle_shutdown( - sse_event: SseEvent, - sqlite_database: Db, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, -) { - warn!("Node ({}) is unavailable", sse_event.source.to_string()); - let res = sqlite_database - .save_shutdown( - sse_event.id, - sse_event.source.to_string(), - sse_event.api_version, - ) - .await; - match res { - Ok(_) | Err(DatabaseWriteError::UniqueConstraint(_)) => { - // We push to outbound on UniqueConstraint error because in sse_server we match shutdowns to outbounds based on the filter they came from to prevent duplicates. - // But that also means that we need to pass through all the Shutdown events so the sse_server can determine to which outbound filters they need to be pushed (we - // don't store in DB the information from which filter did shutdown came). - if let Err(error) = outbound_sse_data_sender - .send(( - SseData::Shutdown, - Some(sse_event.inbound_filter), - sse_event.json_data, - )) - .await - { - debug!( - "Error when sending to outbound_sse_data_sender. Error: {}", - error - ); - } - } - Err(other_err) => { - count_error("db_save_error_shutdown"); - warn!(?other_err, "Unexpected error saving Shutdown") - } - } -} +fn panic_hook(info: &PanicInfo) { + let backtrace = Backtrace::new(); -async fn handle_api_version( - api_version_manager: std::sync::Arc>, - version: casper_types::ProtocolVersion, - outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, - filter: Filter, - enable_event_logging: bool, -) { - let mut manager_guard = api_version_manager.lock().await; - let changed_newest_version = manager_guard.store_version(version); - if changed_newest_version { - if let Err(error) = outbound_sse_data_sender - .send((SseData::ApiVersion(version), Some(filter), None)) - .await - { - debug!( - "Error when sending to outbound_sse_data_sender. Error: {}", - error - ); - } - } - drop(manager_guard); - if enable_event_logging { - info!(%version, "API Version"); - } -} + eprintln!("{:?}", backtrace); -async fn sse_processor( - inbound_sse_data_receiver: Receiver, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - database: Db, - database_supports_multithreaded_processing: bool, - enable_event_logging: bool, - api_version_manager: GuardedApiVersionManager, -) -> Result<(), Error> { - #[cfg(feature = "additional-metrics")] - let metrics_tx = start_metrics_thread("sse_save".to_string()); - // This task starts the listener pushing events to the sse_data_receiver - if database_supports_multithreaded_processing { - start_multi_threaded_events_consumer( - inbound_sse_data_receiver, - outbound_sse_data_sender, - database, - enable_event_logging, - api_version_manager, - #[cfg(feature = "additional-metrics")] - metrics_tx, - ) - .await; + // Print panic info + if let Some(s) = info.payload().downcast_ref::<&str>() { + eprintln!("sidecar panicked: {}", s); } else { - start_single_threaded_events_consumer( - inbound_sse_data_receiver, - outbound_sse_data_sender, - database, - enable_event_logging, - api_version_manager, - #[cfg(feature = "additional-metrics")] - metrics_tx, - ) - .await; + eprintln!("{}", info); } - Ok(()) + process::abort() } -fn handle_events_in_thread( - mut queue_rx: Receiver, - database: Db, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - api_version_manager: GuardedApiVersionManager, - enable_event_logging: bool, - #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, -) { - tokio::spawn(async move { - while let Some(sse_event) = queue_rx.recv().await { - handle_single_event( - sse_event, - database.clone(), - enable_event_logging, - outbound_sse_data_sender.clone(), - api_version_manager.clone(), - ) - .await; - #[cfg(feature = "additional-metrics")] - let _ = metrics_sender.send(()).await; - } - }); -} +fn init_logging() -> anyhow::Result<()> { + const LOG_CONFIGURATION_ENVVAR: &str = "RUST_LOG"; -fn build_queues(cache_size: usize) -> HashMap, Receiver)> { - let mut map = HashMap::new(); - map.insert(Filter::Deploys, mpsc_channel(cache_size)); - map.insert(Filter::Events, mpsc_channel(cache_size)); - map.insert(Filter::Main, mpsc_channel(cache_size)); - map.insert(Filter::Sigs, mpsc_channel(cache_size)); - map -} + const LOG_FIELD_MESSAGE: &str = "message"; + const LOG_FIELD_TARGET: &str = "log.target"; + const LOG_FIELD_MODULE: &str = "log.module_path"; + const LOG_FIELD_FILE: &str = "log.file"; + const LOG_FIELD_LINE: &str = "log.line"; -async fn start_multi_threaded_events_consumer< - Db: DatabaseReader + DatabaseWriter + Clone + Send + Sync + 'static, ->( - mut inbound_sse_data_receiver: Receiver, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - database: Db, - enable_event_logging: bool, - api_version_manager: GuardedApiVersionManager, - #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, -) { - let mut senders_and_receivers_map = build_queues(DEFAULT_CHANNEL_SIZE); - let mut senders_map = HashMap::new(); - for (filter, (tx, rx)) in senders_and_receivers_map.drain() { - handle_events_in_thread( - rx, - database.clone(), - outbound_sse_data_sender.clone(), - api_version_manager.clone(), - enable_event_logging, - #[cfg(feature = "additional-metrics")] - metrics_sender.clone(), - ); - senders_map.insert(filter, tx); - } + type FormatDebugFn = fn(&mut Writer, &Field, &dyn std::fmt::Debug) -> fmt::Result; - while let Some(sse_event) = inbound_sse_data_receiver.recv().await { - if let Some(tx) = senders_map.get(&sse_event.inbound_filter) { - tx.send(sse_event).await.unwrap() - } else { - error!( - "Failed to find an sse handler queue for inbound filter {}", - sse_event.inbound_filter - ); - break; + fn format_into_debug_writer( + writer: &mut Writer, + field: &Field, + value: &dyn fmt::Debug, + ) -> fmt::Result { + match field.name() { + LOG_FIELD_MESSAGE => write!(writer, "{:?}", value), + LOG_FIELD_TARGET | LOG_FIELD_MODULE | LOG_FIELD_FILE | LOG_FIELD_LINE => Ok(()), + _ => write!(writer, "; {}={:?}", field, value), } } -} -async fn start_single_threaded_events_consumer< - Db: DatabaseReader + DatabaseWriter + Clone + Send + Sync, ->( - mut inbound_sse_data_receiver: Receiver, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - database: Db, - enable_event_logging: bool, - api_version_manager: GuardedApiVersionManager, - #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, -) { - while let Some(sse_event) = inbound_sse_data_receiver.recv().await { - handle_single_event( - sse_event, - database.clone(), - enable_event_logging, - outbound_sse_data_sender.clone(), - api_version_manager.clone(), - ) - .await; - #[cfg(feature = "additional-metrics")] - let _ = metrics_sender.send(()).await; - } -} + let formatter = format::debug_fn(format_into_debug_writer as FormatDebugFn); -fn count_error(reason: &str) { - metrics::ERROR_COUNTS - .with_label_values(&["main", reason]) - .inc(); -} + let filter = EnvFilter::new( + env::var(LOG_CONFIGURATION_ENVVAR) + .as_deref() + .unwrap_or("warn,casper_rpc_sidecar=info"), + ); -/// This metric is used for debugging of possible issues -/// with sidecar to determine at which step of processing there was a hang. -/// If we determine that this issue was fixed completely this can be removed -/// (the corresponding metric also). -fn count_internal_event(category: &str, reason: &str) { - metrics::INTERNAL_EVENTS - .with_label_values(&[category, reason]) - .inc(); + let builder = tracing_subscriber::fmt() + .with_writer(io::stdout as fn() -> io::Stdout) + .with_env_filter(filter) + .fmt_fields(formatter) + .with_filter_reloading(); + builder.try_init().map_err(|error| anyhow::anyhow!(error))?; + Ok(()) } diff --git a/types/Cargo.toml b/types/Cargo.toml index 6e396279..4384c197 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -11,16 +11,16 @@ repository = "https://github.com/CasperLabs/event-sidecar" [dependencies] base16 = "0.2.1" blake2 = { version = "0.9.0", optional = true } -casper-types = { version = "3.0.0", features = ["std"] } +casper-types = { workspace = true, features = ["std"] } hex-buffer-serde = "0.3.0" hex_fmt = "0.3.0" -once_cell = {workspace = true} -prometheus = { version = "0.13.3", features = ["process"]} +once_cell = { workspace = true } +prometheus = { version = "0.13.3", features = ["process"] } rand = { version = "0.8.5", optional = true } -serde = { version = "1", features = ["derive", "rc"] } +serde = { workspace = true, default-features = true, features = ["derive", "rc"] } serde_json = { version = "1.0", default-features = false, features = ["alloc", "raw_value"] } -thiserror = "1.0.39" -utoipa = { version = "3.4.4", features = ["rc_schema"]} +thiserror = { workspace = true } +utoipa = { version = "3.4.4", features = ["rc_schema"] } [features] sse-data-testing = ["blake2", "casper-types/testing", "rand"] diff --git a/types/src/block.rs b/types/src/block.rs index 12441360..51359ad5 100644 --- a/types/src/block.rs +++ b/types/src/block.rs @@ -1,8 +1,6 @@ -use casper_types::{ - bytesrepr, EraId, ProtocolVersion, PublicKey, SecretKey, Signature, Timestamp, U512, -}; #[cfg(feature = "sse-data-testing")] -use casper_types::{bytesrepr::ToBytes, crypto, testing::TestRng}; +use casper_types::{bytesrepr, bytesrepr::ToBytes, crypto, testing::TestRng, SecretKey}; +use casper_types::{EraId, ProtocolVersion, PublicKey, Signature, Timestamp, U512}; #[cfg(feature = "sse-data-testing")] use rand::Rng; use serde::{Deserialize, Serialize}; diff --git a/types/src/deploy.rs b/types/src/deploy.rs index 148fbeb1..a5a39f7f 100644 --- a/types/src/deploy.rs +++ b/types/src/deploy.rs @@ -10,9 +10,11 @@ use rand::Rng; use serde::{Deserialize, Serialize}; #[cfg(feature = "sse-data-testing")] -use casper_types::{bytesrepr::ToBytes, testing::TestRng}; use casper_types::{ - bytesrepr::{self}, + bytesrepr::{self, ToBytes}, + testing::TestRng, +}; +use casper_types::{ runtime_args, PublicKey, RuntimeArgs, SecretKey, Signature, TimeDiff, Timestamp, U512, }; use utoipa::ToSchema; diff --git a/types/src/digest.rs b/types/src/digest.rs index c76675bd..7c14fdd6 100644 --- a/types/src/digest.rs +++ b/types/src/digest.rs @@ -13,9 +13,12 @@ use hex_fmt::HexFmt; use rand::Rng; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use casper_types::checksummed_hex; #[cfg(feature = "sse-data-testing")] -use casper_types::bytesrepr::{self, ToBytes}; -use casper_types::{checksummed_hex, testing::TestRng}; +use casper_types::{ + bytesrepr::{self, ToBytes}, + testing::TestRng, +}; use utoipa::ToSchema; /// The output of the hash function. @@ -95,7 +98,6 @@ impl Digest { } } -#[cfg(feature = "sse-data-testing")] impl AsRef<[u8]> for Digest { fn as_ref(&self) -> &[u8] { self.0.as_ref() diff --git a/types/src/executable_deploy_item.rs b/types/src/executable_deploy_item.rs index 4b15b2ec..0fffb857 100644 --- a/types/src/executable_deploy_item.rs +++ b/types/src/executable_deploy_item.rs @@ -6,11 +6,14 @@ use rand::{ }; use serde::{Deserialize, Serialize}; +use casper_types::{ + bytesrepr::Bytes, CLValue, ContractHash, ContractPackageHash, ContractVersion, RuntimeArgs, + U512, +}; #[cfg(feature = "sse-data-testing")] -use casper_types::bytesrepr::{self, Bytes, ToBytes}; use casper_types::{ - system::auction::ARG_AMOUNT, CLValue, ContractHash, ContractPackageHash, ContractVersion, - RuntimeArgs, U512, + bytesrepr::{self, ToBytes}, + system::auction::ARG_AMOUNT, }; use utoipa::ToSchema;