From a51aa0cf738c9ccb031657f3be6c8cbc2f201b2c Mon Sep 17 00:00:00 2001 From: Iulian Pascalau <36724586+iulianpascalau@users.noreply.github.com> Date: Tue, 5 Feb 2019 10:13:57 +0200 Subject: [PATCH] Bug/en 721 bugfix testnet v.0.5 (#55) The following corrections have been applied: - marshalize tx before broadcasting it - small changes to intercepted transaction - prettier base64 addresses for sender/receiver - node: added a test for sending a receiving a transaction + fixes - state: added tests for plainAddressConverter - interceptor: added Marshalizer() func, fixed hack - tests fixed, changed sk and pk to be encoded in hex format - removed extra requestBlockTransactions - updated Readme.md to contain pk/sk in hex format - updated genesis.json to have starttime = 0 - fixed node to work with hex and still have consensus with one node - added test for requesting and resolving a header - made p2p private key pseudo random, - eliminated hash when signing/verifying tx - local variables renaming in block interceptors. - add header in header nonce pool - node: added possibility to generate and broadcast bulk transactions for benchmarking purposes - create Empty block with non empty fields - fixed endless synchronization by canceling consensus round only in commit hash subround - integrationTests: refactored directory structure, split tests in multiple files - added interceptedResolverTx test - small change in persistence unit log error when trying to get a missing key - put objects instead of wrapped objects in pools - refactored routes for multipleTransaction generation - process: added sorting of transactions by nonce when generating tx block body - refactored routes for multipleTransaction generation - integrationTests: added tx block body tests (netw + mem), renamed files and tests, extracted methods as to keep tests small - fixed mutex on requestBlockTransactions - refactored routes for multipleTransaction generation - fixed mutex after discussions - integrationTests: added test for netw bulk tx send, state exec transaction when recv = send and balance should not get modified - process: bugfixed dropping of balance when sender = recv - print blockcchain on go routine in CommitBlock (process/block/process.go) - added some prints in createMiniBlocks (process/block/process.go) - node: fixed generating bulk transactions, added guards to check if output no of transactions respects required no of transactions to be generated. - displayed only last transaction from block to make info more readable in terminal - node: fixed sending signed header + tx block body after consensus rounds by calling topic.BroadcastBuff not Broadcast - store current and genesis header hashes for reuse - fix wrong previous hash on block after genesis - fixed ntp syncronization when requested time is not received - interceptor: fixed logging error that might cause code to panic. - fixed situation when no txs is added in mini block but method createMiniBlocks returns 1 empty miniblock which is created in the begining of the method - changed log level from error to debug in createMiniBlocks txs ordering - added print info on processBlockTransaction method - added print info on processBlockTransaction method - crypto/multisig: added mutexes when writing and reading maps of commitment hashes, commitments and sig shares - consensus/spos: changed to show pretty strings when commitment does not match received commitment hash - changed max transactions allowed in one block from 100000 to 15000 in sposConsensusWorker.go - fixed problem with ntp syncronization - remove from pool transactions with invalid nonce - added print info on displayLogInfo method - fix test for invalid transaction - added print info on processBlockTransaction method - crypto/multisig: fixed a RLock - Unlock bug - removed api_deprecated package - move checks for validity of commitments and signature shares from received commitment and signature subrounds - if the member in consensus is not selected in bitmap, then it needs to synchronize normally - changed log level from Error to Debug in getTransactionFromPool - added print info on processBlockTransaction method - added print info about remained txs in pool - node: creation of default data stores for headers and transactions - p2p: fix publishing onto pubsub using a chan, go routines for writing to that chan + a go routine that serialize objects writing by taking them one at a time from the chan - process/sync: minor refactoring - node: added 1us sleep time when generation bulk transactions - fixed situation when a leader could send in elastic mode a proposed block in subround Start and other nodes have not reached there in this next round because of some ntp difference in order of miliseconds. This will cause that these nodes could not participate as validators in this new round as they will init all the stuff after they will received the block proposed - do not add in pools already parsed objects - changed maxTxsInBlock to 20k and ntp sync to 1 hour - fixed race on spos in create empty block - move empty block log in create empty block - introduced roll back for invalid header hash received when bootstrapping - print hash in base64 - added print info on processBlockTransaction method - fixed canceled situation in spos worker - gopkg.lock and toml fixed to fetch pubsub from release version v0.11.10 - added integration test to execute 15000tx and then revert them. In node, broadcasting the consensus data object is done without copying the object (again) - added new error, ErrTimeOut, in process/error.go - repaired tests in process/block/process_test.go - refactored some print messages and fixed a bug in process/block/process.go when waitForTxHashes returns time out combined with calling of method VerifyStateRoot with bad parameter given (this method returns always true: bp.accounts.RootHash() is compared with the given parameter bp.accounts.RootHash()) - introduced a sleep time of 0.1 ms in createPubSub method from p2p/netMessagnger.go and all the things seems to work smoothly now - fixed broadcast of consensus data, added new methods to StartRound and Advance subrounds to ensure that the initialization job when a new round is started and the final round job when the current round is ended, will be done in any case. Introduced a sleep time of 0.1 ms in GenerateAndSendBulkTransactions method from node/node.go and all the tings seems to work smoothly now. - fixef the calculation of the next minute when the genesis time will start, to avoid some problem at the edge of the hour/day changing in cmd/bootnode/main.go - fixed a bug in chronology/chronology.go when it advance to the next round even if the whole round is canceled - introduced a new consensus messanger mechanism in consensus/spos/sposConsensusMessanger.go through which all the received consensus messages regardless of their order, will be kept and then executed when their time will come. - fixed some bugs in consensus/spos/sposConsensusWorker.go when network is busy and many consensus messages came latter and they were lost or much worse they were affect the next round state - fixed and improved consensus messenger mechanism in spos - replaced haveTime parameter with a function in process block - process block: added the ASCII table in log, not just printed on console - added print info on processBlockTransaction method - added a check fork mechanism - created Bootstraper interface - injected boostraper to sposworker - check in start round if node is on fork - storage: LRUCache changed Put func to add or rewrite the data stored in the "key" location. - process: added tests for interceptors - added forkDetector instance in main and injected it in node with option - changed default port to 32000 - removed ShouldSync from spos and called from process/sync/block.go - added some prints and fixed a bug in requestBlockTransactions (before fixed it was returning the len of shards and not of txs requested from network) from process/block/process.go - added some prints and check fork in ShouldSync method - implemented a new fork detector class - removed useless function and function called from validateHeader in process/block/process.go - added header in returns parameter to SyncBlock to can handle in ForkChoice a roll back case of an signed block - added some print info in sync and node - fixed some print messages when received header is nil - fixed reverting state on consensus round when in sync - used time stamp round in header time stamp and not relative time - added mechanism of creating a check point from which roll back should not happen (when the node will commit a block signed, all the past blocks stored in forkDetecor will be removed) - process: improved code coverage in process package, minor refactoring, removed some mocks. - chronology: refactor and added tests, typos fixed - api: added test on transaction routes - chronology: added one function which returns time stamp of one given round (this will help to avoid to take into consideration some old blocks which could arrive from network) - chronology: added some tests - consensus/spos: added sync mock and some other functionalities to multi sig mock - refactored spos package and added missing tests - fixed bug in consensus/spos where maxBlockProcessingTimePercent const should be of type float and not int - refactored node and facade - changed number of transactions in block to 16000 and commented out nonce checking - rollback to genesis should not set blockchain currentBlock flags - typo efficency -> efficiency - max txs 16000 -> 15000 - process/block: modified prints in header display and block display - process/block: fix sorting of tx by nonce, added test - sync/block: headers do not get erased from storage in any circumstances - process/block: skipped 2 tests for temporary fix by not erasing headers from storer when rollback - integrationTests: fixed skiped integration tests that use interceptors and resolvers - fixed typo from sigantures -> signatures - process/block/process.go: added some prints, added haveTime() function inside the loop in createMiniBlocks method, added another method to return unsorted txs - cmd/bbotnode/main.go: changed the genesis time to start after 1 minute - minor naming changes - changed from big.Int to *big.Int - minor code refactoring - pulled errors in variables for reuse - changed mutex logic inside interceptor/resolver containers and some typo fixes - add headline function to logger for pretty print purposes - fixed transaction capnproto unmarshal error - shardedData changed search func - errors are now exported in node package - minor code refactoring in p2p, node, main - added test for the modification when checking transaction signing in interceptedTransaction. - added test if the increaseNonce fails in transaction processor --- Gopkg.lock | 106 +- Gopkg.toml | 2 +- README.md | 92 +- api/address/mock/facade.go | 13 +- api/address/routes.go | 48 +- api/address/routes_test.go | 129 +- api/errors/errors.go | 53 + api/node/mock/facade.go | 17 +- api/node/routes.go | 37 +- api/node/routes_test.go | 106 +- api/transaction/mock/facade.go | 21 +- api/transaction/routes.go | 93 +- api/transaction/routes_test.go | 287 ++- api_deprecated/api.go | 23 - api_deprecated/api_test.go | 229 -- api_deprecated/facade.go | 25 - api_deprecated/node/handlers.go | 108 - api_deprecated/node/middleware.go | 10 - api_deprecated/node/routes.go | 27 - chronology/chronology.go | 53 +- chronology/chronology_test.go | 164 +- chronology/export_test.go | 9 + chronology/mock/subroundHandlerStub.go | 38 + chronology/mock/syncTimeMock.go | 24 + chronology/mock/syncTimeStub.go | 28 + chronology/ntp/syncTime.go | 32 +- chronology/ntp/syncTime_test.go | 4 +- cmd/bootnode/main.go | 206 +- cmd/facade/elrondNodeFacade.go | 47 +- cmd/facade/elrondNodeFacade_test.go | 102 +- cmd/facade/export_test.go | 17 + cmd/facade/interface.go | 59 +- cmd/facade/mock/nodeMock.go | 61 +- cmd/facade/mock/syncTimerMock.go | 33 + cmd/flags/flags.go | 6 +- cmd/keygenerator/main.go | 13 +- consensus/interface.go | 2 +- consensus/mock/validatorMock.go | 6 +- consensus/spos/consensus.go | 7 +- consensus/spos/consensus_test.go | 99 + consensus/spos/errors.go | 9 + consensus/spos/export_test.go | 51 + consensus/spos/mock/blockProcessorMock.go | 40 +- consensus/spos/mock/bootstrapMock.go | 9 + consensus/spos/mock/multiSigMock.go | 81 +- consensus/spos/mock/syncTimeMock.go | 24 + consensus/spos/roundConsensus.go | 15 + consensus/spos/roundConsensus_test.go | 25 + consensus/spos/sposConsensusMessenger.go | 131 + consensus/spos/sposConsensusMessenger_test.go | 717 ++++++ consensus/spos/sposConsensusWorker.go | 830 +++++-- consensus/spos/sposConsensusWorker_test.go | 2164 ++++++++++++++++- .../groupSelectors/indexHashedGroup_test.go | 76 +- crypto/interface.go | 14 +- crypto/multisig/belnev.go | 108 +- data/blockchain/blockchain.go | 18 +- data/interface.go | 4 +- data/mock/shardedDataStub.go | 11 +- data/shardedData/shardedData.go | 16 +- data/shardedData/shardedData_test.go | 18 +- data/state/account.go | 8 +- data/state/account_test.go | 8 +- data/state/accountsDB_test.go | 8 +- data/state/accountsDBreverts_test.go | 16 +- data/state/address.go | 4 +- data/state/hashAddressConverter.go | 4 +- data/state/interface.go | 4 +- data/state/journalEntries.go | 4 +- data/state/journalEntries_test.go | 18 +- data/state/journalizedAccountWrap.go | 2 +- data/state/journalizedAccountWrap_test.go | 2 +- data/state/mock/journalizedAccountWrapMock.go | 2 +- data/state/plainAddressConverter.go | 102 + data/state/plainAddressConverter_test.go | 213 ++ data/transaction/transaction.go | 22 +- docker-compose.yml | 19 +- docker/elrond/Dockerfile | 3 +- genesis.json | 48 +- integrationTests/block/common.go | 127 + .../block/interceptedRequestHdrMem_test.go | 85 + .../interceptedRequestTxBlockBodyMem_test.go | 84 + .../interceptedRequestTxBlockBodyNet_test.go | 174 ++ integrationTests/state/common.go | 45 + .../state/stateExecTransaction_test.go | 216 ++ .../stateTrie_test.go} | 100 +- integrationTests/transaction/common.go | 229 ++ .../transaction/interceptedBulkTxMem_test.go | 94 + .../transaction/interceptedBulkTxNet_test.go | 139 ++ .../interceptedResolvedTxMem_test.go | 88 + .../transaction/interceptedTxMem_test.go | 74 + logger/logger.go | 17 + logger/printerHook.go | 5 +- marshal/capnpMarshalizer_test.go | 59 +- node/createInterceptors.go | 138 -- node/createInterceptors_test.go | 814 ------- node/createResolvers.go | 138 -- node/createResolvers_test.go | 787 ------ node/defineOptions.go | 85 +- node/defineOptions_test.go | 110 +- node/errors.go | 60 +- node/export_test.go | 31 - node/mock/addressConverterFake.go | 67 + node/mock/blockProcessorStub.go | 15 +- node/mock/forkDetectorMock.go | 32 + node/mock/marshalizerFake.go | 47 + node/mock/messengerStub.go | 14 +- node/mock/multisignMock.go | 10 +- node/mock/processorCreatorMock.go | 33 + node/mock/shardedDataStub.go | 11 +- node/node.go | 694 +++--- node/node_test.go | 938 +++---- p2p/connectParams.go | 10 +- p2p/memMessenger.go | 23 +- p2p/netMessenger.go | 48 +- process/block/export_test.go | 27 +- process/block/interceptedBlocks.go | 20 + process/block/interceptedBlocks_test.go | 240 +- process/block/interceptors.go | 38 +- process/block/interceptors_test.go | 183 ++ process/block/process.go | 679 +++++- process/block/process_test.go | 1897 +++++++++++++-- process/block/resolvers.go | 66 +- process/block/resolvers_test.go | 42 +- process/errors.go | 44 +- process/factory/export_test.go | 9 + process/factory/factory.go | 417 ++++ process/factory/factory_test.go | 300 +++ process/interceptor/container.go | 78 + process/interceptor/topicInterceptor.go | 19 +- process/interceptor/topicInterceptor_test.go | 41 +- process/interface.go | 58 +- process/mock/blockProcessorMock.go | 37 +- process/mock/forkDetectorMock.go | 23 + process/mock/hasherMock.go | 2 +- process/mock/hasherStub.go | 22 + process/mock/interceptorConteinerMock.go | 42 + process/mock/interceptorStub.go | 6 + process/mock/journalizedAccountWrapMock.go | 11 +- process/mock/resolverContainerMock.go | 42 + process/mock/shardedDataStub.go | 11 +- process/mock/txProcessorMock.go | 4 +- process/mock/uint64ByteSliceConverterMock.go | 17 + process/resolver/container.go | 78 + process/sync/basicForkDetector.go | 139 ++ process/sync/basicForkDetector_test.go | 164 ++ process/sync/block.go | 384 ++- process/sync/block_test.go | 1094 ++++++++- process/sync/errors.go | 29 + process/sync/export_test.go | 46 +- process/sync/validator.go | 8 +- process/sync/validator_test.go | 18 +- process/transaction/export_test.go | 2 +- process/transaction/interceptedTransaction.go | 23 +- .../interceptedTransaction_test.go | 88 +- process/transaction/interceptor.go | 37 +- process/transaction/interceptor_test.go | 251 +- process/transaction/process.go | 54 +- process/transaction/process_test.go | 406 +++- process/transaction/resolver.go | 31 +- process/transaction/resolver_test.go | 24 +- storage/lrucache/lrucache.go | 8 +- storage/lrucache/lrucache_test.go | 96 +- storage/memorydb/memorydb.go | 3 +- storage/storageunit.go | 3 +- 164 files changed, 15198 insertions(+), 5088 deletions(-) create mode 100644 api/errors/errors.go delete mode 100644 api_deprecated/api.go delete mode 100644 api_deprecated/api_test.go delete mode 100644 api_deprecated/facade.go delete mode 100644 api_deprecated/node/handlers.go delete mode 100644 api_deprecated/node/middleware.go delete mode 100644 api_deprecated/node/routes.go create mode 100644 chronology/export_test.go create mode 100644 chronology/mock/subroundHandlerStub.go create mode 100644 chronology/mock/syncTimeMock.go create mode 100644 chronology/mock/syncTimeStub.go create mode 100644 cmd/facade/export_test.go create mode 100644 cmd/facade/mock/syncTimerMock.go create mode 100644 consensus/spos/export_test.go create mode 100644 consensus/spos/mock/bootstrapMock.go create mode 100644 consensus/spos/mock/syncTimeMock.go create mode 100644 consensus/spos/sposConsensusMessenger.go create mode 100644 consensus/spos/sposConsensusMessenger_test.go create mode 100644 data/state/plainAddressConverter.go create mode 100644 data/state/plainAddressConverter_test.go create mode 100644 integrationTests/block/common.go create mode 100644 integrationTests/block/interceptedRequestHdrMem_test.go create mode 100644 integrationTests/block/interceptedRequestTxBlockBodyMem_test.go create mode 100644 integrationTests/block/interceptedRequestTxBlockBodyNet_test.go create mode 100644 integrationTests/state/common.go create mode 100644 integrationTests/state/stateExecTransaction_test.go rename integrationTests/{testStateTrie_test.go => state/stateTrie_test.go} (91%) create mode 100644 integrationTests/transaction/common.go create mode 100644 integrationTests/transaction/interceptedBulkTxMem_test.go create mode 100644 integrationTests/transaction/interceptedBulkTxNet_test.go create mode 100644 integrationTests/transaction/interceptedResolvedTxMem_test.go create mode 100644 integrationTests/transaction/interceptedTxMem_test.go delete mode 100644 node/createInterceptors.go delete mode 100644 node/createInterceptors_test.go delete mode 100644 node/createResolvers.go delete mode 100644 node/createResolvers_test.go create mode 100644 node/mock/addressConverterFake.go create mode 100644 node/mock/forkDetectorMock.go create mode 100644 node/mock/marshalizerFake.go create mode 100644 node/mock/processorCreatorMock.go create mode 100644 process/factory/export_test.go create mode 100644 process/factory/factory.go create mode 100644 process/factory/factory_test.go create mode 100644 process/interceptor/container.go create mode 100644 process/mock/forkDetectorMock.go create mode 100644 process/mock/hasherStub.go create mode 100644 process/mock/interceptorConteinerMock.go create mode 100644 process/mock/resolverContainerMock.go create mode 100644 process/mock/uint64ByteSliceConverterMock.go create mode 100644 process/resolver/container.go create mode 100644 process/sync/basicForkDetector.go create mode 100644 process/sync/basicForkDetector_test.go create mode 100644 process/sync/errors.go diff --git a/Gopkg.lock b/Gopkg.lock index 7c782d6a3d0..3efdd0c0570 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -100,14 +100,6 @@ revision = "bad65a492f32121a87197f4a085905c35e2a367e" version = "v1.0.0" -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "github.com/fsnotify/fsnotify" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - [[projects]] digest = "1:2b59aca2665ff804f6606c8829eaee133ddd3aefbc841014660d961b0034f888" name = "github.com/gin-contrib/cors" @@ -245,25 +237,6 @@ revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" version = "v0.5.0" -[[projects]] - digest = "1:c0d19ab64b32ce9fe5cf4ddceba78d5bc9807f0016db6b1183599da3dcc24d10" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/printer", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token", - ] - pruneopts = "UT" - revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" - version = "v1.0.0" - [[projects]] branch = "master" digest = "1:3f5e7a4329f76e2617bf094eb5eec2ec2eff590d145c0beb32bac699628dea7d" @@ -531,15 +504,15 @@ version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:6e8a9d64217cf1a5d8aee323690f2384108ab14fd24c197da0a99f125479dcdc" + digest = "1:875ec2777e25f997cf505f4202d3a50b7cf06d75cacab7a52e1dfedba575cfc2" name = "github.com/libp2p/go-libp2p-pubsub" packages = [ ".", "pb", ] pruneopts = "UT" - revision = "6fc7deb2868603292d4f7afe139e86d27b03f3b8" + revision = "f736644fe805a9f5677c82aca25c82da7cde2c76" + version = "v0.11.10" [[projects]] digest = "1:1f4be308ef940c89130bee25cd1fcb5a9e39262dc17aa82902370cb18b0cdfa6" @@ -660,14 +633,6 @@ revision = "b2cd8129d1037bcb64047839ecc490c4a3c8be28" version = "v2.0.14" -[[projects]] - digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7" - name = "github.com/magiconair/properties" - packages = ["."] - pruneopts = "UT" - revision = "c2353362d570a7bfa228149c62842019201cfb71" - version = "v1.8.0" - [[projects]] digest = "1:c658e84ad3916da105a761660dcaeb01e63416c8ec7bc62256a9b411a05fcd67" name = "github.com/mattn/go-colorable" @@ -708,14 +673,6 @@ pruneopts = "UT" revision = "51976451ce1942acbb55707a983ed232fa027110" -[[projects]] - digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "UT" - revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" - version = "v1.1.2" - [[projects]] digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" name = "github.com/modern-go/concurrent" @@ -800,14 +757,6 @@ revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" version = "v1.0.2" -[[projects]] - digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" - name = "github.com/pelletier/go-toml" - packages = ["."] - pruneopts = "UT" - revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" - version = "v1.2.0" - [[projects]] digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" name = "github.com/pkg/errors" @@ -848,49 +797,6 @@ revision = "9f5d223c60793748f04a9d5b4b4eacddfc1f755d" version = "v1.1" -[[projects]] - digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - ] - pruneopts = "UT" - revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" - version = "v1.1.2" - -[[projects]] - digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f" - name = "github.com/spf13/cast" - packages = ["."] - pruneopts = "UT" - revision = "8965335b8c7107321228e3e3702cab9832751bac" - version = "v1.2.0" - -[[projects]] - digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - pruneopts = "UT" - revision = "4a4406e478ca629068e7768fc33f3f044173c0a6" - version = "v1.0.0" - -[[projects]] - digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" - -[[projects]] - digest = "1:de37e343c64582d7026bf8ab6ac5b22a72eac54f3a57020db31524affed9f423" - name = "github.com/spf13/viper" - packages = ["."] - pruneopts = "UT" - revision = "6d33b5a963d922d182c91e8a1c88d81fd150cfd4" - version = "v1.3.1" - [[projects]] digest = "1:18752d0b95816a1b777505a97f71c7467a8445b8ffb55631a7bf779f6ba4fa83" name = "github.com/stretchr/testify" @@ -1063,7 +969,7 @@ revision = "4497e2df6f9e69048a54498c7affbbec3294ad47" [[projects]] - digest = "1:4392fcf42d5cf0e3ff78c96b2acf8223d49e4fdc53eb77c99d2f8dfe4680e006" + digest = "1:aa4d6967a3237f8367b6bf91503964a77183ecf696f1273e8ad3551bb4412b5f" name = "golang.org/x/text" packages = [ "encoding", @@ -1078,14 +984,11 @@ "encoding/unicode", "internal/gen", "internal/tag", - "internal/triegen", - "internal/ucd", "internal/utf8internal", "language", "runes", "transform", "unicode/cldr", - "unicode/norm", ] pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" @@ -1181,7 +1084,6 @@ "github.com/pkg/errors", "github.com/satori/go.uuid", "github.com/sirupsen/logrus", - "github.com/spf13/viper", "github.com/stretchr/testify/assert", "github.com/syndtr/goleveldb/leveldb", "github.com/urfave/cli", diff --git a/Gopkg.toml b/Gopkg.toml index d06f0ee8053..7671f71c761 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -110,8 +110,8 @@ version = "1.0.0" [[constraint]] - branch = "master" name = "github.com/libp2p/go-libp2p-pubsub" + version = "0.11.10" [[constraint]] name = "github.com/libp2p/go-libp2p-secio" diff --git a/README.md b/README.md index fbd41c2c5cc..a03c61a3dc2 100644 --- a/README.md +++ b/README.md @@ -28,53 +28,53 @@ With the text below could be created a start.sh file in the same # Private keys: -# 1: ZBis8aK5I66x1hwD+fE8sIw2nwQR5EBlTM8EiAOLZwE= -# 2: unkVM1J1JvlNFqY3uo/CvAay6BsIL3IzDH9GDgmfUAA= -# 3: Or0C7+gvlr/kIZLS+tiBBQfbUQ+pqS9FTE3dXfs5Swg= -# 4: 0i3nK1VRtHEGKXkejwcU9JYfbtO/WwvapxF8qLfX/Ag= -# 5: lKUBBHFuqwbKLy4xJshK8I/WND3JcaHZ+P1Pk9W8YAg= -# 6: jSNFvjEnds2JvB9v74l5oBbsCsFZNxuvkwjUh1m+xQw= -# 7: HbfcRATSr697pGqawbQIllutzK1ChTUGB+BD+ZpOPAs= -# 8: p57gu5OHtDRIWmgwNqm0k77XIi73KUywHAJfuAExtwk= -# 9: QhNuOhB7/9MymO9izhC43x5aiwI4NPSfjfeSxfj8BwY= -# 10: 5WCLn8HHsEvu0NE51dPCuLPVfssd005Y4trYshr6sAc= -# 11: te4x8jjWXIyjLB77zRgcmNR4NBpZXkeVVcpGKoRo/wo= -# 12: BEBCSKoB2gBUj0+AZvs9sxFIe0rMkxRrNFNHt/fg7gk= -# 13: dE+/RIIP+UFC9RX+rAmwosjrjBIO16q07dqvCvp44ww= -# 14: T9KhsEpTlNEmpNbh8KJkIwFufYKnwxHoj8Si+Hf2ww4= -# 15: JaTuf9jrXhsGnVmWWxOFaa4IZG8bqGwp/RnZ2QV9nww= -# 16: rN4LSVhJQ4sfvgTilJ0yozErT0NEI3/TUZZVPzhYrAo= -# 17: UoazF8yMgjPQwJ4jYkw4hwjhmTOFMBXXUhLEOBQ/1Ak= -# 18: RLs0DJOwwxAX+yMA3Vyu3MRtPA/CTClKovMNfVsl9QA= -# 19: FYXMafB0++EsBK2F1X5dpdmNvau6l72jrJnH9zcYOAE= -# 20: //Cq4pEA6SW6NZeJtq4xzK/5JuZlblFbeyAPpl5/KgU= -# 21: fJFUv3dbGZuupDnTc22q5XRXCNLl1lEmmStyrguXwg0= +# 1: b5671723b8c64b16b3d4f5a2db9a2e3b61426e87c945b5453279f0701a10c70f +# 2: 8c7e9c79206f2bf7425050dc14d9b220596cee91c09bcbdd1579297572b63109 +# 3: 52964f3887b72ea0bd385ee3b60d2321034256d17cb4eec8333d4a4ce1692b08 +# 4: 2c5a2b1d724c21be3aebf46dfa1db841b8b58d063066c19e983ff03b3f955f08 +# 5: 6532ccdb32e95ca3f4e5fc5b43c41dda610a469f7f18c76c278b9b559779300a +# 6: 772f371cafb44da6ade4af11c5799bd1c25bbdfb17335f4fc102a81b2d66cc04 +# 7: 12ffff943b39b21f1c5f1455e06a2ab60d442ff9cb65451334551a0e84049409 +# 8: a7160d033389e99198331a4c9e3c7417722ecc29246f42049335e972e4df5b0f +# 9: 9cf7b345fdf3c6d2de2d6b28cc0019c02966ef88774069d530b636f760292c00 +# 10: f236b2f60ad8864ea89fd69bf74ec65f64bd82c2f310b81a8492ba93e8b6c402 +# 11: 0f04b269d382944c5c246264816567c9a33b2a9bf78f075d9a17b13e7b925603 +# 12: 8cf6e6aeb878ef01399e413bc7dd788a69221a37c29021bd3851f2f5fe67f203 +# 13: c7f48a69e4b2159fe209bdb4608410516f28186ad498ca78b16d8b2bebfb1f0f +# 14: 7579d506ff015e5e720b2e75e784c13a4662f48b6e2038af6e902b1157239101 +# 15: b7877c28e394ab4c89d80e8b2818ef1346ee8c0fdd6566a6d27088ad097e4f05 +# 16: 055ae06aad2c7f8d50ecd4bd7c4145cb19636b0b0126ffa4ee1326afb3876000 +# 17: c47b89db3e3ad067863af5a7b7f9e9dec0e47516e87d5d6d744e0af581a79404 +# 18: 843c4bea60b629fae50a0334ba9c7284f886b90502b740c8f95ab13a36a08c0e +# 19: 92561fd546014adcd13ff7776829f1c8c0886e83eb04fb723fc3636da8f2960b +# 20: 22a3922963cc1fe57a59178f021282223a8742fb4476f7a5c5b4c2c2aa2d4f0f +# 21: 02c9d56e503857832c07d78b0d75aabb8e6c109e9cec641b8681afaee2c9a701 # Public keys: -# 1: bCYAUf+qhQtYKFfgQ1g3JstkJFVTsA2KAH+0L+qZlO4= -# 2: gDI39ZN3loP1Cujru6+BJtu+gNwQnBB8g4yVW0wyuaA= -# 3: TLkPlhd8g07tiqE4Mgvq1kCp3EOEEjn8O3/DpyjUqFE= -# 4: YO0S6tNhNjWwsJJzTLrJMecEKDLeuZlJNznf7nU/TcM= -# 5: Gkfqv+PTR9ot2TILrOBFcPojmhwE9IC7Y2psLc9ZsZc= -# 6: nVnGbEbxPR4ab0thyeV/O1FZopNDTdNexNI5OPCGtRo= -# 7: xMonbskDZ1dHeW4vh3/AUzf7psbPIPTKfGz+J6gmpeA= -# 8: 6kVjlTw6NmSqV4kI6cEprxd+2f37FXCzpXnFsYYcsLU= -# 9: 0NNr5LuHjgSSiXFKp37uPAjGt1HYdQJTUmJ4ASKGZyQ= -# 10: JyK0pKNF3UnzIFtm9pVDXF3xArpfhrU4g2bbGxwHBwE= -# 11: bT4UtBU3A5MpKWvvpL5nAplLAGEWccoJb9NzdXJN1fk= -# 12: S+goExeC5GNLPx9xYnhXB8mVQQzGJpr0B1QCU/DqqdM= -# 13: 1KGnod6xGDML+y5qNiwrYq2t2M1Elgbt2YNfXLRSow0= -# 14: 84uQq3tJFjzSZ6KIcJwb7JZ1yLwuDytcPHYITVlnNjA= -# 15: 8pqG75sKgeqIKim2jR/P7ojM5QgSQkHLt6xpablZoM0= -# 16: E8u9qxcr8hQ3nM6RfOOLS4bzu9fV+whiTtOY5kjaDlE= -# 17: M+C7UJoK6poGvlPqkLVpFOuWao9dFtCoHVWaWv0ee3U= -# 18: NBu1klLKrye76DblD8IhexsHrai2TD4+8KdWBWWqaxc= -# 19: SBoD6sJA5oEmyhwK5CckOlH3ByrJkyJZyih+iy1tGno= -# 20: NUhC12eqQ0U5IIjpytuaWzBSSPYE2myNVro8I/3Rjpg= -# 21: +cmFvcxUN9roSQnW8AMpi3kq0LDSTZCIrL8f7Pc6ez4= +# 1: 5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419 +# 2: 8e0b815be8026a6732eea132e113913c12e2f5b19f25e86a8403afffbaf02088 +# 3: e6ec171959063bd0d61f95a52de73d6a16e649ba5fa8b12663b092b48cc99434 +# 4: 20ccf92c80065a0f9ce1f1b8e3dee1e0b9774f4eebf2af7e8fa3ac503923360d +# 5: 9a3b8e67f42aef9544e0888ea9daee77af90292c86336203d224691d55306d08 +# 6: 0740bccedc28084ab811065cb618fec4ee623384b4b3d5466190d11ff6d77007 +# 7: 0ccba0f98829ea9f337035a1f7b13cbd8e9ffb94f2c538e2cafb34ca7f2bcd24 +# 8: d9e9596c28a3945253d46bc1b9418963c0672a26a0b40ee7372cb9ec34d1ee07 +# 9: 86fbd8606e73b7a4f45a51b443270f3050aff571a29b9804d2444c081560d1dd +# 10: 2084f2493e68443a5b156ec42a8cd9072c47aa453df4acd20524792a4fd9f474 +# 11: f91d24256d918144aaacfa641cd113af05d56cfb7a5b8ba5885ebd8edd43fe1e +# 12: e8d4bcfe91c3c7788d8ab3704b192229900ec3fe3f1eb6f841c440e223d401a0 +# 13: 4bf7ee0e17a0b76d3837494d3950113d3e77db055b2c07c9cb443f529d73c8e3 +# 14: 20f12f7bdd4ab65321eb58ce8f90eec733e3e9a4cc9d6d5d7e57d2e86c6c2c76 +# 15: 34cf226f4d62a22e4993a1a2835f05a4bb2fb48304e16f2dc18f99b39c496f7d +# 16: b9f0fc3e1baa49c027205946af7d6c79b749481e5ab766356db3b878c0929558 +# 17: 6670b048a3f9d93fdacb4d60ff7c2f3bd7440d5175ca8b9d2475a444cd7a129b +# 18: d82b3f4490ccb2ffbba5695c1b7c345a5709584737a263999c77cc1a09136de1 +# 19: 29ba49f47e2b86b143418db31c696791215236925802ea1f219780e360a8209e +# 20: 199866d09b8385023c25f261460d4d20ae0d5bc72ddf1fa5c1b32768167a8fb0 +# 21: 0098f7634d7327139848a0f6ad926051596e5a0f692adfb671ab02092b77181d -gnome-terminal -- ./bootnode -port 4000 -max-allowed-peers 4 -private-key "ZBis8aK5I66x1hwD+fE8sIw2nwQR5EBlTM8EiAOLZwE=" -gnome-terminal -- ./bootnode -port 4001 -max-allowed-peers 4 -private-key "unkVM1J1JvlNFqY3uo/CvAay6BsIL3IzDH9GDgmfUAA=" -gnome-terminal -- ./bootnode -port 4002 -max-allowed-peers 4 -private-key "Or0C7+gvlr/kIZLS+tiBBQfbUQ+pqS9FTE3dXfs5Swg=" -``` \ No newline at end of file +gnome-terminal -- ./bootnode -port 4000 -max-allowed-peers 4 -private-key "b5671723b8c64b16b3d4f5a2db9a2e3b61426e87c945b5453279f0701a10c70f" +gnome-terminal -- ./bootnode -port 4001 -max-allowed-peers 4 -private-key "8c7e9c79206f2bf7425050dc14d9b220596cee91c09bcbdd1579297572b63109" +gnome-terminal -- ./bootnode -port 4002 -max-allowed-peers 4 -private-key "52964f3887b72ea0bd385ee3b60d2321034256d17cb4eec8333d4a4ce1692b08" +``` diff --git a/api/address/mock/facade.go b/api/address/mock/facade.go index 771a242e889..d0f9b621116 100644 --- a/api/address/mock/facade.go +++ b/api/address/mock/facade.go @@ -2,15 +2,26 @@ package mock import ( "math/big" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" ) +// Facade is the mock implementation of a address router handler type Facade struct { - BalanceHandler func(string) (*big.Int, error) + BalanceHandler func(string) (*big.Int, error) + GetAccountHandler func(address string) (*state.Account, error) } +// GetBalance is the mock implementation of a handler's GetBalance method func (f *Facade) GetBalance(address string) (*big.Int, error) { return f.BalanceHandler(address) } +// GetAccount is the mock implementation of a handler's GetAccount method +func (f *Facade) GetAccount(address string) (*state.Account, error) { + return f.GetAccountHandler(address) +} + +// WrongFacade is a struct that can be used as a wrong implementation of the address router handler type WrongFacade struct { } diff --git a/api/address/routes.go b/api/address/routes.go index c2b67bdba97..e33a77d6fd3 100644 --- a/api/address/routes.go +++ b/api/address/routes.go @@ -1,56 +1,82 @@ package address import ( + "fmt" "math/big" "net/http" + "github.com/ElrondNetwork/elrond-go-sandbox/api/errors" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/gin-gonic/gin" ) // Handler interface defines methods that can be used from `elrondFacade` context variable type Handler interface { GetBalance(address string) (*big.Int, error) + GetAccount(address string) (*state.Account, error) +} + +type accountResponse struct { + Address string `json:"address"` + Nonce uint64 `json:"nonce"` + Balance string `json:"balance"` + CodeHash []byte `json:"codeHash"` + RootHash []byte `json:"rootHash"` } // Routes defines address related routes func Routes(router *gin.RouterGroup) { - router.GET("/:address", GetAddress) + router.GET("/:address", GetAccount) router.GET("/:address/balance", GetBalance) } -//GetAddress returns the information about the address passed as parameter -func GetAddress(c *gin.Context) { - _, ok := c.MustGet("elrondFacade").(Handler) +// GetAccount returns an accountResponse containing information +// about the account corelated with provided address +func GetAccount(c *gin.Context) { + ef, ok := c.MustGet("elrondFacade").(Handler) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } - //TODO: add real implementation here addr := c.Param("address") - - c.JSON(http.StatusOK, gin.H{"message": addr}) + acc, err := ef.GetAccount(addr) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrCouldNotGetAccount.Error(), err.Error())}) + return + } + c.JSON(http.StatusOK, gin.H{"account": accountResponseFromBaseAccount(addr, acc)}) } //GetBalance returns the balance for the address parameter func GetBalance(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(Handler) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } addr := c.Param("address") if addr == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Get balance error: Address was empty"}) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrGetBalance.Error(), errors.ErrEmptyAddress.Error())}) return } balance, err := ef.GetBalance(addr) if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Get balance error: " + err.Error()}) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrGetBalance.Error(), err.Error())}) return } c.JSON(http.StatusOK, gin.H{"balance": balance}) } + +func accountResponseFromBaseAccount(address string, account *state.Account) accountResponse { + return accountResponse{ + Address: address, + Nonce: account.Nonce, + Balance: account.Balance.String(), + CodeHash: account.CodeHash, + RootHash: account.RootHash, + } +} diff --git a/api/address/routes_test.go b/api/address/routes_test.go index 77cd2254781..9e5f722aa53 100644 --- a/api/address/routes_test.go +++ b/api/address/routes_test.go @@ -13,22 +13,51 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/api/address" "github.com/ElrondNetwork/elrond-go-sandbox/api/address/mock" + errors2 "github.com/ElrondNetwork/elrond-go-sandbox/api/errors" "github.com/ElrondNetwork/elrond-go-sandbox/api/middleware" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" ) -//General response structure +// General response structure type GeneralResponse struct { - Message string `json:"message"` - Error string `json:"error"` + Error string `json:"error"` } -//Address Response structure -type AddressResponse struct { +//addressResponse structure +type addressResponse struct { GeneralResponse - Balance big.Int `json:"balance"` + Balance *big.Int `json:"balance"` +} + +func NewAddressResponse() *addressResponse { + return &addressResponse{ + Balance: big.NewInt(0), + } +} + +type AccountResponse struct { + GeneralResponse + Account struct { + Address string `json:"address"` + Nonce uint64 `json:"nonce"` + Balance string `json:"balance"` + CodeHash []byte `json:"codeHash"` + RootHash []byte `json:"rootHash"` + } `json:"account"` +} + +func TestAddressRoute_EmptyTrailReturns404(t *testing.T) { + t.Parallel() + facade := mock.Facade{} + ws := startNodeServer(&facade) + + req, _ := http.NewRequest("GET", "/address", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + assert.Equal(t, http.StatusNotFound, resp.Code) } func TestGetBalance_WithCorrectAddressShouldNotReturnError(t *testing.T) { @@ -47,10 +76,10 @@ func TestGetBalance_WithCorrectAddressShouldNotReturnError(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - addressResponse := AddressResponse{} + addressResponse := NewAddressResponse() loadResponse(resp.Body, &addressResponse) assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, *amount, addressResponse.Balance) + assert.Equal(t, amount, addressResponse.Balance) assert.Equal(t, "", addressResponse.Error) } @@ -69,19 +98,20 @@ func TestGetBalance_WithWrongAddressShouldReturnZero(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - addressResponse := AddressResponse{} + addressResponse := NewAddressResponse() loadResponse(resp.Body, &addressResponse) assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, *big.NewInt(0), addressResponse.Balance) + assert.Equal(t, big.NewInt(0), addressResponse.Balance) assert.Equal(t, "", addressResponse.Error) } func TestGetBalance_NodeGetBalanceReturnsError(t *testing.T) { t.Parallel() addr := "addr" + balanceError := errors.New("error") facade := mock.Facade{ BalanceHandler: func(s string) (i *big.Int, e error) { - return nil, errors.New("error") + return nil, balanceError }, } @@ -91,16 +121,17 @@ func TestGetBalance_NodeGetBalanceReturnsError(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - addressResponse := AddressResponse{} + addressResponse := NewAddressResponse() loadResponse(resp.Body, &addressResponse) assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.Equal(t, "Get balance error: error", addressResponse.Error) + assert.Equal(t, fmt.Sprintf("%s: %s", errors2.ErrGetBalance.Error(), balanceError.Error()), addressResponse.Error) } func TestGetBalance_WithEmptyAddressShouldReturnZeroAndError(t *testing.T) { t.Parallel() facade := mock.Facade{ BalanceHandler: func(s string) (i *big.Int, e error) { + panic("aaaa") return big.NewInt(0), errors.New("address was empty") }, } @@ -112,40 +143,90 @@ func TestGetBalance_WithEmptyAddressShouldReturnZeroAndError(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - addressResponse := AddressResponse{} + addressResponse := NewAddressResponse() loadResponse(resp.Body, &addressResponse) assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.Equal(t, *big.NewInt(0), addressResponse.Balance) + assert.Equal(t, big.NewInt(0), addressResponse.Balance) assert.NotEmpty(t, addressResponse.Error) - assert.True(t, strings.Contains(addressResponse.Error, "Get balance error: Address was empty")) + assert.True(t, strings.Contains(addressResponse.Error, + fmt.Sprintf("%s: %s", errors2.ErrGetBalance.Error(), errors2.ErrEmptyAddress.Error()), + )) } -func TestGetAddress_FailsWithWrongFacadeTypeConversion(t *testing.T) { +func TestGetBalance_FailsWithWrongFacadeTypeConversion(t *testing.T) { t.Parallel() ws := startNodeServerWrongFacade() - req, _ := http.NewRequest("GET", "/address/empty", nil) + req, _ := http.NewRequest("GET", "/address/empty/balance", nil) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - statusRsp := AddressResponse{} + statusRsp := NewAddressResponse() loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, statusRsp.Error, "Invalid app context") + assert.Equal(t, statusRsp.Error, errors2.ErrInvalidAppContext.Error()) } -func TestGetBalance_FailsWithWrongFacadeTypeConversion(t *testing.T) { +func TestGetAccount_FailsWithWrongFacadeTypeConversion(t *testing.T) { t.Parallel() ws := startNodeServerWrongFacade() - req, _ := http.NewRequest("GET", "/address/empty/balance", nil) + req, _ := http.NewRequest("GET", "/address/empty", nil) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - statusRsp := AddressResponse{} + statusRsp := NewAddressResponse() loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, statusRsp.Error, "Invalid app context") + assert.Equal(t, statusRsp.Error, errors2.ErrInvalidAppContext.Error()) +} + +func TestGetAccount_FailWhenFacadeGetAccountFails(t *testing.T) { + t.Parallel() + returnedError := "i am an error" + facade := mock.Facade{ + GetAccountHandler: func(address string) (*state.Account, error) { + return nil, errors.New(returnedError) + }, + } + ws := startNodeServer(&facade) + + req, _ := http.NewRequest("GET", "/address/test", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + accountResponse := AccountResponse{} + loadResponse(resp.Body, &accountResponse) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.Empty(t, accountResponse.Account) + assert.NotEmpty(t, accountResponse.Error) + assert.True(t, strings.Contains(accountResponse.Error, fmt.Sprintf("%s: %s", errors2.ErrCouldNotGetAccount.Error(), returnedError))) +} + +func TestGetAccount_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + facade := mock.Facade{ + GetAccountHandler: func(address string) (*state.Account, error) { + return &state.Account{ + Nonce: 1, + Balance: big.NewInt(100), + }, nil + }, + } + ws := startNodeServer(&facade) + + reqAddress := "test" + req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s", reqAddress), nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + accountResponse := AccountResponse{} + loadResponse(resp.Body, &accountResponse) + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, accountResponse.Account.Address, reqAddress) + assert.Equal(t, accountResponse.Account.Nonce, uint64(1)) + assert.Equal(t, accountResponse.Account.Balance, "100") + assert.Empty(t, accountResponse.Error) } func loadResponse(rsp io.Reader, destination interface{}) { diff --git a/api/errors/errors.go b/api/errors/errors.go new file mode 100644 index 00000000000..81ab53f41f3 --- /dev/null +++ b/api/errors/errors.go @@ -0,0 +1,53 @@ +package errors + +import ( + "errors" +) + +// ErrInvalidAppContext signals an invalid context passed to the routing system +var ErrInvalidAppContext = errors.New("invalid app context") + +// ErrCouldNotGetAccount signals that a requested account could not be retrieved +var ErrCouldNotGetAccount = errors.New("could not get requested account") + +// ErrGetBalance signals an error in getting the balance for an account +var ErrGetBalance = errors.New("get balance error") + +// ErrEmptyAddress signals an empty address was provided +var ErrEmptyAddress = errors.New("address was empty") + +// ErrNodeAlreadyRunning signals the node is already running +var ErrNodeAlreadyRunning = errors.New("node already running") + +// ErrNodeAlreadyStopped signals the node is already stopped +var ErrNodeAlreadyStopped = errors.New("node already stopped") + +// ErrCouldNotStopNode signals the node is already stopped +var ErrCouldNotStopNode = errors.New("could not stop node") + +// ErrBadInitOfNode signals the node is could not be started correctly +var ErrBadInitOfNode = errors.New("bad init of node") + +// ErrCouldNotParsePubKey signals that a given public key could not be parsed +var ErrCouldNotParsePubKey = errors.New("cound not parse node's public key") + +// ErrValidation signals an error in validation +var ErrValidation = errors.New("validation error") + +// ErrTxGenerationFailed signals an error generating a transaction +var ErrTxGenerationFailed = errors.New("transaction generation failed") + +// ErrMultipleTxGenerationFailed signals an error generating multiple transactions +var ErrMultipleTxGenerationFailed = errors.New("multiple transaction generation failed") + +// ErrInvalidSignatureHex signals a wrong hex value was provided for the signature +var ErrInvalidSignatureHex = errors.New("invalid signature, could not decode hex value") + +// ErrValidationEmptyTxHash signals an empty tx hash was provided +var ErrValidationEmptyTxHash = errors.New("TxHash is empty") + +// ErrGetTransaction signals an error happend trying to fetch a transaction +var ErrGetTransaction = errors.New("transaction getting failed") + +// ErrTxNotFound signals an error happend trying to fetch a transaction +var ErrTxNotFound = errors.New("transaction was not found") diff --git a/api/node/mock/facade.go b/api/node/mock/facade.go index 085c4c0de93..a6be7b1b1f2 100644 --- a/api/node/mock/facade.go +++ b/api/node/mock/facade.go @@ -4,16 +4,20 @@ import ( "github.com/pkg/errors" ) +// Facade is the mock implementation of a node router handler type Facade struct { - Running bool - ShouldErrorStart bool - ShouldErrorStop bool + Running bool + ShouldErrorStart bool + ShouldErrorStop bool + GetCurrentPublicKeyHandler func() string } +// IsNodeRunning is the mock implementation of a handler's IsNodeRunning method func (f *Facade) IsNodeRunning() bool { return f.Running } +// StartNode is the mock implementation of a handler's StartNode method func (f *Facade) StartNode() error { if f.ShouldErrorStart { return errors.New("error") @@ -21,6 +25,7 @@ func (f *Facade) StartNode() error { return nil } +// StopNode is the mock implementation of a handler's StopNode method func (f *Facade) StopNode() error { if f.ShouldErrorStop { return errors.New("error") @@ -29,5 +34,11 @@ func (f *Facade) StopNode() error { return nil } +// GetCurrentPublicKey is the mock implementation of a handler's StopNode method +func (f *Facade) GetCurrentPublicKey() string { + return f.GetCurrentPublicKeyHandler() +} + +// WrongFacade is a struct that can be used as a wrong implementation of the node router handler type WrongFacade struct { } diff --git a/api/node/routes.go b/api/node/routes.go index 8f9e2f886df..78eec3870d2 100644 --- a/api/node/routes.go +++ b/api/node/routes.go @@ -1,8 +1,11 @@ package node import ( + "fmt" "net/http" + "net/url" + "github.com/ElrondNetwork/elrond-go-sandbox/api/errors" "github.com/gin-gonic/gin" ) @@ -11,6 +14,7 @@ type Handler interface { IsNodeRunning() bool StartNode() error StopNode() error + GetCurrentPublicKey() string } // Routes defines node related routes @@ -18,13 +22,14 @@ func Routes(router *gin.RouterGroup) { router.GET("/start", StartNode) router.GET("/status", Status) router.GET("/stop", StopNode) + router.GET("/address", Address) } // Status returns the state of the node e.g. running/stopped func Status(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(Handler) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"message": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } @@ -35,39 +40,57 @@ func Status(c *gin.Context) { func StartNode(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(Handler) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"message": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } if ef.IsNodeRunning() { - c.JSON(http.StatusOK, gin.H{"message": "Node already running"}) + c.JSON(http.StatusOK, gin.H{"message": errors.ErrNodeAlreadyRunning.Error()}) return } err := ef.StartNode() if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"message": "Bad init of node: " + err.Error()}) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrBadInitOfNode.Error(), err.Error())}) return } c.JSON(http.StatusOK, gin.H{"message": "ok"}) } +// Address returns the information about the address passed as parameter +func Address(c *gin.Context) { + ef, ok := c.MustGet("elrondFacade").(Handler) + if !ok { + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) + return + } + + currentAddress := ef.GetCurrentPublicKey() + address, err := url.Parse(currentAddress) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrCouldNotParsePubKey.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"address": address.String()}) +} + // StopNode will stop the node instance func StopNode(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(Handler) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"message": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } if !ef.IsNodeRunning() { - c.JSON(http.StatusOK, gin.H{"message": "Node already stopped"}) + c.JSON(http.StatusOK, gin.H{"message": errors.ErrNodeAlreadyStopped.Error()}) return } err := ef.StopNode() if err != nil && ef.IsNodeRunning() { - c.JSON(http.StatusInternalServerError, gin.H{"message": "Could not stop node: " + err.Error()}) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrCouldNotStopNode.Error(), err.Error())}) return } c.JSON(http.StatusOK, gin.H{"message": "ok"}) diff --git a/api/node/routes_test.go b/api/node/routes_test.go index 728ad7364c6..4d6af899a6b 100644 --- a/api/node/routes_test.go +++ b/api/node/routes_test.go @@ -8,11 +8,11 @@ import ( "net/http/httptest" "testing" + "github.com/ElrondNetwork/elrond-go-sandbox/api/errors" "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" - "github.com/ElrondNetwork/elrond-go-sandbox/api/middleware" "github.com/ElrondNetwork/elrond-go-sandbox/api/node" "github.com/ElrondNetwork/elrond-go-sandbox/api/node/mock" ) @@ -27,6 +27,11 @@ type StatusResponse struct { Running bool `json:"running"` } +type AddressResponse struct { + GeneralResponse + Address string `json:"address"` +} + func TestStatus_FailsWithoutFacade(t *testing.T) { t.Parallel() ws := startNodeServer(nil) @@ -41,8 +46,6 @@ func TestStatus_FailsWithoutFacade(t *testing.T) { func TestStatus_FailsWithWrongFacadeTypeConversion(t *testing.T) { t.Parallel() - facade := mock.Facade{} - facade.Running = true ws := startNodeServerWrongFacade() req, _ := http.NewRequest("GET", "/node/status", nil) resp := httptest.NewRecorder() @@ -51,7 +54,7 @@ func TestStatus_FailsWithWrongFacadeTypeConversion(t *testing.T) { statusRsp := StatusResponse{} loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, statusRsp.Message, "Invalid app context") + assert.Equal(t, statusRsp.Error, errors.ErrInvalidAppContext.Error()) } func TestStatus_ReturnsCorrectResponseOnStart(t *testing.T) { @@ -107,7 +110,7 @@ func TestStartNode_FailsWithWrongFacadeTypeConversion(t *testing.T) { statusRsp := StatusResponse{} loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, statusRsp.Message, "Invalid app context") + assert.Equal(t, statusRsp.Error, errors.ErrInvalidAppContext.Error()) } func TestStartNode_AlreadyRunning(t *testing.T) { @@ -122,7 +125,7 @@ func TestStartNode_AlreadyRunning(t *testing.T) { statusRsp := StatusResponse{} loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusOK) - assert.Equal(t, statusRsp.Message, "Node already running") + assert.Equal(t, statusRsp.Message, errors.ErrNodeAlreadyRunning.Error()) } func TestStartNode_FromFacadeErrors(t *testing.T) { @@ -137,7 +140,7 @@ func TestStartNode_FromFacadeErrors(t *testing.T) { statusRsp := StatusResponse{} loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, statusRsp.Message, "Bad init of node: error") + assert.Equal(t, statusRsp.Error, fmt.Sprintf("%s: error", errors.ErrBadInitOfNode.Error())) } func TestStartNode(t *testing.T) { @@ -178,7 +181,7 @@ func TestStopNode_FailsWithWrongFacadeTypeConversion(t *testing.T) { statusRsp := StatusResponse{} loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, statusRsp.Message, "Invalid app context") + assert.Equal(t, statusRsp.Error, errors.ErrInvalidAppContext.Error()) } func TestStopNode_AlreadyStopped(t *testing.T) { @@ -193,7 +196,7 @@ func TestStopNode_AlreadyStopped(t *testing.T) { statusRsp := StatusResponse{} loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusOK) - assert.Equal(t, statusRsp.Message, "Node already stopped") + assert.Equal(t, statusRsp.Message, errors.ErrNodeAlreadyStopped.Error()) } func TestStopNode_FromFacadeErrors(t *testing.T) { @@ -209,7 +212,7 @@ func TestStopNode_FromFacadeErrors(t *testing.T) { statusRsp := StatusResponse{} loadResponse(resp.Body, &statusRsp) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, statusRsp.Message, "Could not stop node: error") + assert.Equal(t, statusRsp.Error, fmt.Sprintf("%s: error", errors.ErrCouldNotStopNode.Error())) } func TestStopNode(t *testing.T) { @@ -228,6 +231,66 @@ func TestStopNode(t *testing.T) { assert.Equal(t, statusRsp.Message, "ok") } +func TestAddress_FailsWithoutFacade(t *testing.T) { + t.Parallel() + ws := startNodeServer(nil) + defer func() { + r := recover() + assert.NotNil(t, r, "Not providing elrondFacade context should panic") + }() + req, _ := http.NewRequest("GET", "/node/address", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) +} + +func TestAddress_FailsWithWrongFacadeTypeConversion(t *testing.T) { + t.Parallel() + ws := startNodeServerWrongFacade() + req, _ := http.NewRequest("GET", "/node/address", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + addressRsp := AddressResponse{} + loadResponse(resp.Body, &addressRsp) + assert.Equal(t, resp.Code, http.StatusInternalServerError) + assert.Equal(t, addressRsp.Error, errors.ErrInvalidAppContext.Error()) +} + +func TestAddress_FailsWithInvalidUrlString(t *testing.T) { + facade := mock.Facade{} + facade.GetCurrentPublicKeyHandler = func() string { + // we return a malformed scheme so that url.Parse will error + return "cache_object:foo/bar" + } + ws := startNodeServer(&facade) + req, _ := http.NewRequest("GET", "/node/address", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + addressRsp := AddressResponse{} + loadResponse(resp.Body, &addressRsp) + assert.Equal(t, resp.Code, http.StatusInternalServerError) + assert.Equal(t, addressRsp.Error, errors.ErrCouldNotParsePubKey.Error()) +} + +func TestAddress_ReturnsSuccessfully(t *testing.T) { + facade := mock.Facade{} + address := "abcdefghijklmnopqrstuvwxyz" + facade.GetCurrentPublicKeyHandler = func() string { + // we return a malformed scheme so that url.Parse will error + return address + } + ws := startNodeServer(&facade) + req, _ := http.NewRequest("GET", "/node/address", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + addressRsp := AddressResponse{} + loadResponse(resp.Body, &addressRsp) + assert.Equal(t, resp.Code, http.StatusOK) + assert.Equal(t, addressRsp.Address, address) +} + func loadResponse(rsp io.Reader, destination interface{}) { jsonParser := json.NewDecoder(rsp) err := jsonParser.Decode(destination) @@ -243,24 +306,23 @@ func logError(err error) { } func startNodeServer(handler node.Handler) *gin.Engine { - gin.SetMode(gin.TestMode) - ws := gin.New() - ws.Use(cors.Default()) - nodeRoutes := ws.Group("/node") - if handler != nil { - nodeRoutes.Use(middleware.WithElrondFacade(handler)) - } - node.Routes(nodeRoutes) - return ws + return startNodeServerWithFacade(handler) } func startNodeServerWrongFacade() *gin.Engine { + return startNodeServerWithFacade(mock.WrongFacade{}) +} + +func startNodeServerWithFacade(facade interface{}) *gin.Engine { gin.SetMode(gin.TestMode) ws := gin.New() ws.Use(cors.Default()) - ws.Use(func(c *gin.Context) { - c.Set("elrondFacade", mock.WrongFacade{}) - }) + if facade != nil { + ws.Use(func(c *gin.Context) { + c.Set("elrondFacade", facade) + }) + } + nodeRoutes := ws.Group("/node") node.Routes(nodeRoutes) return ws diff --git a/api/transaction/mock/facade.go b/api/transaction/mock/facade.go index 3df00d95dea..106319cca72 100644 --- a/api/transaction/mock/facade.go +++ b/api/transaction/mock/facade.go @@ -6,26 +6,37 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" ) +// Facade is the mock implementation of a transaction router handler type Facade struct { - GenerateTransactionHandler func(sender string, receiver string, value big.Int, + GenerateTransactionHandler func(sender string, receiver string, value *big.Int, code string) (*transaction.Transaction, error) GetTransactionHandler func(hash string) (*transaction.Transaction, error) - SendTransactionHandler func(nonce uint64, sender string, receiver string, value big.Int, code string, signature string) (*transaction.Transaction, error) + SendTransactionHandler func(nonce uint64, sender string, receiver string, value *big.Int, code string, + signature []byte) (*transaction.Transaction, error) + GenerateAndSendBulkTransactionsHandler func(destination string, value *big.Int, nrTransactions uint64) error } -func (f *Facade) GenerateTransaction(sender string, receiver string, value big.Int, +// GenerateTransaction is the mock implementation of a handler's GenerateTransaction method +func (f *Facade) GenerateTransaction(sender string, receiver string, value *big.Int, code string) (*transaction.Transaction, error) { return f.GenerateTransactionHandler(sender, receiver, value, code) } +// GetTransaction is the mock implementation of a handler's GetTransaction method func (f *Facade) GetTransaction(hash string) (*transaction.Transaction, error) { return f.GetTransactionHandler(hash) } -// SendTransaction will send a new transaction on the topic channel -func (f *Facade) SendTransaction(nonce uint64, sender string, receiver string, value big.Int, code string, signature string) (*transaction.Transaction, error) { +// SendTransaction is the mock implementation of a handler's SendTransaction method +func (f *Facade) SendTransaction(nonce uint64, sender string, receiver string, value *big.Int, code string, signature []byte) (*transaction.Transaction, error) { return f.SendTransactionHandler(nonce, sender, receiver, value, code, signature) } +// GenerateAndSendBulkTransactions is the mock implementation of a handler's GenerateAndSendBulkTransactions method +func (f *Facade) GenerateAndSendBulkTransactions(destination string, value *big.Int, nrTransactions uint64) error { + return f.GenerateAndSendBulkTransactionsHandler(destination, value, nrTransactions) +} + +// WrongFacade is a struct that can be used as a wrong implementation of the node router handler type WrongFacade struct { } diff --git a/api/transaction/routes.go b/api/transaction/routes.go index b70a869860f..b2c25ef6edb 100644 --- a/api/transaction/routes.go +++ b/api/transaction/routes.go @@ -1,18 +1,22 @@ package transaction import ( + "encoding/hex" + "fmt" "math/big" "net/http" + "github.com/ElrondNetwork/elrond-go-sandbox/api/errors" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/gin-gonic/gin" ) // TxService interface defines methods that can be used from `elrondFacade` context variable type TxService interface { - GenerateTransaction(sender string, receiver string, value big.Int, code string) (*transaction.Transaction, error) - SendTransaction(nonce uint64, sender string, receiver string, value big.Int, code string, signature string) (*transaction.Transaction, error) + GenerateTransaction(sender string, receiver string, value *big.Int, code string) (*transaction.Transaction, error) + SendTransaction(nonce uint64, sender string, receiver string, value *big.Int, code string, signature []byte) (*transaction.Transaction, error) GetTransaction(hash string) (*transaction.Transaction, error) + GenerateAndSendBulkTransactions(string, *big.Int, uint64) error } // TxRequest represents the structure on which user input for generating a new transaction will validate against @@ -24,14 +28,15 @@ type TxRequest struct { //SecretKey string `form:"sk" json:"sk" binding:"skValidator"` } -// SendTxRequest represents the structure that maps and validates user input for publishing a new transaction -type SendTxRequest struct { - TxRequest - Signature []byte `form:"signature" json:"signature"` +// MultipleTxRequest represents the structure on which user input for generating a bulk of transactions will validate against +type MultipleTxRequest struct { + Receiver string `form:"receiver" json:"receiver"` + Value *big.Int `form:"value" json:"value"` + TxCount int `form:"txCount" json:"txCount"` } -//TxResponse represents the structure on which the response will be validated against -type TxResponse struct { +// SendTxRequest represents the structure that maps and validates user input for publishing a new transaction +type SendTxRequest struct { Sender string `form:"sender" json:"sender"` Receiver string `form:"receiver" json:"receiver"` Value *big.Int `form:"value" json:"value"` @@ -43,9 +48,15 @@ type TxResponse struct { Challenge string `form:"challenge" json:"challenge"` } +//TxResponse represents the structure on which the response will be validated against +type TxResponse struct { + SendTxRequest +} + // Routes defines transaction related routes func Routes(router *gin.RouterGroup) { router.POST("/generate", GenerateTransaction) + router.POST("/generate-and-send-multiple", GenerateAndSendBulkTransactions) router.POST("/send", SendTransaction) router.GET("/:txhash", GetTransaction) } @@ -54,20 +65,20 @@ func Routes(router *gin.RouterGroup) { func GenerateTransaction(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(TxService) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } - var gtx = SendTxRequest{} + var gtx = TxRequest{} err := c.ShouldBindJSON(>x) if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Validation error: " + err.Error()}) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), err.Error())}) return } - tx, err := ef.GenerateTransaction(gtx.Sender, gtx.Receiver, *gtx.Value, gtx.Data) + tx, err := ef.GenerateTransaction(gtx.Sender, gtx.Receiver, gtx.Value, gtx.Data) if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Transaction generation failed: " + err.Error()}) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), err.Error())}) return } @@ -78,49 +89,79 @@ func GenerateTransaction(c *gin.Context) { func SendTransaction(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(TxService) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } - var gtx = TxRequest{} + var gtx = SendTxRequest{} err := c.ShouldBindJSON(>x) if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Validation error: " + err.Error()}) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), err.Error())}) + return + } + + signature, err := hex.DecodeString(gtx.Signature) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrInvalidSignatureHex.Error(), err.Error())}) return } - tx, err := ef.GenerateTransaction(gtx.Sender, gtx.Receiver, *gtx.Value, gtx.Data) + tx, err := ef.SendTransaction(gtx.Nonce, gtx.Sender, gtx.Receiver, gtx.Value, gtx.Data, signature) if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Transaction generation failed: " + err.Error()}) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), err.Error())}) return } c.JSON(http.StatusOK, gin.H{"transaction": txResponseFromTransaction(tx)}) } +// GenerateAndSendBulkTransactions generates multipleTransactions +func GenerateAndSendBulkTransactions(c *gin.Context) { + ef, ok := c.MustGet("elrondFacade").(TxService) + if !ok { + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) + return + } + + var gtx = MultipleTxRequest{} + err := c.ShouldBindJSON(>x) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), err.Error())}) + return + } + + err = ef.GenerateAndSendBulkTransactions(gtx.Receiver, gtx.Value, uint64(gtx.TxCount)) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrMultipleTxGenerationFailed.Error(), err.Error())}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": fmt.Sprintf("%d", gtx.TxCount)}) +} + // GetTransaction returns transaction details for a given txhash func GetTransaction(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(TxService) if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid app context"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) return } txhash := c.Param("txhash") if txhash == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "TxHash is empty"}) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), errors.ErrValidationEmptyTxHash.Error())}) return } tx, err := ef.GetTransaction(txhash) if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Transaction getting failed"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrGetTransaction.Error()}) return } if tx == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Transaction was not found"}) + c.JSON(http.StatusNotFound, gin.H{"error": errors.ErrTxNotFound.Error()}) return } @@ -130,12 +171,12 @@ func GetTransaction(c *gin.Context) { func txResponseFromTransaction(tx *transaction.Transaction) TxResponse { response := TxResponse{} response.Nonce = tx.Nonce - response.Sender = string(tx.SndAddr) - response.Receiver = string(tx.RcvAddr) + response.Sender = hex.EncodeToString(tx.SndAddr) + response.Receiver = hex.EncodeToString(tx.RcvAddr) response.Data = string(tx.Data) - response.Signature = string(tx.Signature) + response.Signature = hex.EncodeToString(tx.Signature) response.Challenge = string(tx.Challenge) - response.Value = &tx.Value + response.Value = tx.Value response.GasLimit = big.NewInt(int64(tx.GasLimit)) response.GasPrice = big.NewInt(int64(tx.GasPrice)) diff --git a/api/transaction/routes_test.go b/api/transaction/routes_test.go index 83d63193e01..56f19d14d4a 100644 --- a/api/transaction/routes_test.go +++ b/api/transaction/routes_test.go @@ -2,6 +2,7 @@ package transaction_test import ( "bytes" + "encoding/hex" "encoding/json" "fmt" "io" @@ -10,12 +11,14 @@ import ( "net/http/httptest" "testing" + errors2 "github.com/ElrondNetwork/elrond-go-sandbox/api/errors" "github.com/ElrondNetwork/elrond-go-sandbox/api/middleware" "github.com/ElrondNetwork/elrond-go-sandbox/api/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/api/transaction/mock" tr "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -37,7 +40,7 @@ func TestGenerateTransaction_WithParametersShouldReturnTransaction(t *testing.T) data := "data" facade := mock.Facade{ - GenerateTransactionHandler: func(sender string, receiver string, value big.Int, code string) (transaction *tr.Transaction, e error) { + GenerateTransactionHandler: func(sender string, receiver string, value *big.Int, code string) (transaction *tr.Transaction, e error) { return &tr.Transaction{ SndAddr: []byte(sender), RcvAddr: []byte(receiver), @@ -67,12 +70,45 @@ func TestGenerateTransaction_WithParametersShouldReturnTransaction(t *testing.T) assert.Equal(t, http.StatusOK, resp.Code) assert.Equal(t, "", transactionResponse.Error) - assert.Equal(t, sender, txResp.Sender) - assert.Equal(t, receiver, txResp.Receiver) + assert.Equal(t, hex.EncodeToString([]byte(sender)), txResp.Sender) + assert.Equal(t, hex.EncodeToString([]byte(receiver)), txResp.Receiver) assert.Equal(t, value, txResp.Value) assert.Equal(t, data, txResp.Data) } +func TestGenerateAndSendMultipleTransaction_WithParametersShouldReturnNoError(t *testing.T) { + t.Parallel() + receiver := "multipleReceiver" + value := big.NewInt(5) + txCount := 10 + + facade := mock.Facade{ + GenerateAndSendBulkTransactionsHandler: func(receiver string, value *big.Int, + txCount uint64) error { + return nil + }, + } + + ws := startNodeServer(&facade) + + jsonStr := fmt.Sprintf( + `{"receiver":"%s",`+ + `"value":%s,`+ + `"txCount":%d}`, receiver, value, txCount) + + req, _ := http.NewRequest("POST", "/transaction/generate-and-send-multiple", bytes.NewBuffer([]byte(jsonStr))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + multipleTransactionResponse := GeneralResponse{} + loadResponse(resp.Body, &multipleTransactionResponse) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, "", multipleTransactionResponse.Error) + assert.Equal(t, fmt.Sprintf("%d", txCount), multipleTransactionResponse.Message) +} + func TestGetTransaction_WithCorrectHashShouldReturnTransaction(t *testing.T) { sender := "sender" receiver := "receiver" @@ -85,7 +121,7 @@ func TestGetTransaction_WithCorrectHashShouldReturnTransaction(t *testing.T) { SndAddr: []byte(sender), RcvAddr: []byte(receiver), Data: []byte(data), - Value: *value, + Value: value, }, nil }, } @@ -101,8 +137,8 @@ func TestGetTransaction_WithCorrectHashShouldReturnTransaction(t *testing.T) { txResp := transactionResponse.TxResp assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, sender, txResp.Sender) - assert.Equal(t, receiver, txResp.Receiver) + assert.Equal(t, hex.EncodeToString([]byte(sender)), txResp.Sender) + assert.Equal(t, hex.EncodeToString([]byte(receiver)), txResp.Receiver) assert.Equal(t, value, txResp.Value) assert.Equal(t, data, txResp.Data) } @@ -123,7 +159,7 @@ func TestGetTransaction_WithUnknownHashShouldReturnNil(t *testing.T) { SndAddr: []byte(sender), RcvAddr: []byte(receiver), Data: []byte(data), - Value: *value, + Value: value, }, nil }, } @@ -158,7 +194,28 @@ func TestGenerateTransaction_WithBadJsonShouldReturnBadRequest(t *testing.T) { loadResponse(resp.Body, &transactionResponse) assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.Contains(t, transactionResponse.Error, "Validation error: ") + assert.Contains(t, transactionResponse.Error, errors2.ErrValidation.Error()) +} + +func TestGenerateAndSendMultipleTransaction_WithBadJsonShouldReturnBadRequest(t *testing.T) { + t.Parallel() + + facade := mock.Facade{} + + ws := startNodeServer(&facade) + + badJsonString := "bad" + + req, _ := http.NewRequest("POST", "/transaction/generate-and-send-multiple", bytes.NewBuffer([]byte(badJsonString))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := GeneralResponse{} + loadResponse(resp.Body, &transactionResponse) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, transactionResponse.Error, errors2.ErrValidation.Error()) } func TestGetTransaction_FailsWithWrongFacadeTypeConversion(t *testing.T) { @@ -172,7 +229,7 @@ func TestGetTransaction_FailsWithWrongFacadeTypeConversion(t *testing.T) { transactionResponse := TransactionResponse{} loadResponse(resp.Body, &transactionResponse) assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, transactionResponse.Error, "Invalid app context") + assert.Equal(t, transactionResponse.Error, errors2.ErrInvalidAppContext.Error()) } func TestGenerateTransaction_WithBadJsonShouldReturnInternalServerError(t *testing.T) { @@ -191,7 +248,217 @@ func TestGenerateTransaction_WithBadJsonShouldReturnInternalServerError(t *testi loadResponse(resp.Body, &transactionResponse) assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.Contains(t, transactionResponse.Error, "Invalid app context") + assert.Contains(t, transactionResponse.Error, errors2.ErrInvalidAppContext.Error()) +} + +func TestGenerateAndSendMultipleTransaction_WithBadJsonShouldReturnInternalServerError(t *testing.T) { + t.Parallel() + + ws := startNodeServerWrongFacade() + + badJsonString := "bad" + + req, _ := http.NewRequest("POST", "/transaction/generate-and-send-multiple", bytes.NewBuffer([]byte(badJsonString))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := TransactionResponse{} + loadResponse(resp.Body, &transactionResponse) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.Contains(t, transactionResponse.Error, errors2.ErrInvalidAppContext.Error()) +} + +func TestGenerateTransaction_ErrorsWhenFacadeGenerateTransactionFails(t *testing.T) { + t.Parallel() + sender := "sender" + receiver := "receiver" + value := big.NewInt(10) + data := "data" + + errorString := "generate transaction error" + facade := mock.Facade{ + GenerateTransactionHandler: func(sender string, receiver string, value *big.Int, code string) (transaction *tr.Transaction, e error) { + return nil, errors.New(errorString) + }, + } + ws := startNodeServer(&facade) + + jsonStr := fmt.Sprintf( + `{"sender":"%s",`+ + `"receiver":"%s",`+ + `"value":%s,`+ + `"data":"%s"}`, sender, receiver, value, data) + + req, _ := http.NewRequest("POST", "/transaction/generate", bytes.NewBuffer([]byte(jsonStr))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := TransactionResponse{} + loadResponse(resp.Body, &transactionResponse) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.Equal(t, fmt.Sprintf("%s: %s", errors2.ErrTxGenerationFailed.Error(), errorString), transactionResponse.Error) + assert.Empty(t, transactionResponse.TxResp) +} + +func TestSendTransaction_ErrorWithWrongFacade(t *testing.T) { + t.Parallel() + + ws := startNodeServerWrongFacade() + req, _ := http.NewRequest("POST", "/transaction/send", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := TransactionResponse{} + loadResponse(resp.Body, &transactionResponse) + assert.Equal(t, resp.Code, http.StatusInternalServerError) + assert.Equal(t, transactionResponse.Error, errors2.ErrInvalidAppContext.Error()) +} + +func TestSendTransaction_WrongParametersShouldErrorOnValidation(t *testing.T) { + t.Parallel() + sender := "sender" + receiver := "receiver" + value := "ishouldbeint" + data := "data" + + facade := mock.Facade{} + ws := startNodeServer(&facade) + + jsonStr := fmt.Sprintf( + `{"sender":"%s",`+ + `"receiver":"%s",`+ + `"value":%s,`+ + `"data":"%s"}`, sender, receiver, value, data) + + req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer([]byte(jsonStr))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := TransactionResponse{} + loadResponse(resp.Body, &transactionResponse) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, transactionResponse.Error, errors2.ErrValidation.Error()) + assert.Empty(t, transactionResponse.TxResp) +} + +func TestSendTransaction_InvalidHexSignatureShouldError(t *testing.T) { + t.Parallel() + sender := "sender" + receiver := "receiver" + value := big.NewInt(10) + data := "data" + signature := "not#only$hex%characters^" + + facade := mock.Facade{} + ws := startNodeServer(&facade) + + jsonStr := fmt.Sprintf( + `{"sender":"%s",`+ + `"receiver":"%s",`+ + `"value":%s,`+ + `"signature":"%s",`+ + `"data":"%s"}`, sender, receiver, value, signature, data) + + req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer([]byte(jsonStr))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := TransactionResponse{} + loadResponse(resp.Body, &transactionResponse) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, transactionResponse.Error, errors2.ErrInvalidSignatureHex.Error()) + assert.Empty(t, transactionResponse.TxResp) +} + +func TestSendTransaction_ErrorWhenFacadeSendTransactionError(t *testing.T) { + t.Parallel() + sender := "sender" + receiver := "receiver" + value := big.NewInt(10) + data := "data" + signature := "aabbccdd" + errorString := "send transaction error" + + facade := mock.Facade{ + SendTransactionHandler: func(nonce uint64, sender string, receiver string, value *big.Int, + code string, signature []byte) (transaction *tr.Transaction, e error) { + return nil, errors.New(errorString) + }, + } + ws := startNodeServer(&facade) + + jsonStr := fmt.Sprintf( + `{"sender":"%s",`+ + `"receiver":"%s",`+ + `"value":%s,`+ + `"signature":"%s",`+ + `"data":"%s"}`, sender, receiver, value, signature, data) + + req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer([]byte(jsonStr))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := TransactionResponse{} + loadResponse(resp.Body, &transactionResponse) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.Contains(t, transactionResponse.Error, errorString) + assert.Empty(t, transactionResponse.TxResp) +} + +func TestSendTransaction_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + nonce := uint64(1) + sender := "sender" + receiver := "receiver" + value := big.NewInt(10) + data := "data" + signature := "aabbccdd" + + facade := mock.Facade{ + SendTransactionHandler: func(nonce uint64, sender string, receiver string, value *big.Int, + code string, signature []byte) (transaction *tr.Transaction, e error) { + return &tr.Transaction{ + Nonce: nonce, + SndAddr: []byte(sender), + RcvAddr: []byte(receiver), + Value: value, + Data: []byte(code), + Signature: signature, + }, nil + }, + } + ws := startNodeServer(&facade) + + jsonStr := fmt.Sprintf(`{ + "nonce": %d, + "sender": "%s", + "receiver": "%s", + "value": %s, + "signature": "%s", + "data": "%s" + }`, nonce, sender, receiver, value, signature, data) + + req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer([]byte(jsonStr))) + + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + transactionResponse := TransactionResponse{} + loadResponse(resp.Body, &transactionResponse) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Empty(t, transactionResponse.Error) + assert.Equal(t, transactionResponse.TxResp.Nonce, nonce) } func loadResponse(rsp io.Reader, destination interface{}) { diff --git a/api_deprecated/api.go b/api_deprecated/api.go deleted file mode 100644 index 74d4ae32c07..00000000000 --- a/api_deprecated/api.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/api_deprecated/node" - "github.com/gin-gonic/gin" - "github.com/spf13/viper" -) - -func main() { - r := gin.Default() - - node.CORSMiddleware(r) - node.Routes(r.Group("/node")) - - viper.SetDefault("address", "127.0.0.1") - viper.SetDefault("port", "8080") - viper.SetConfigName("web-server") - viper.SetConfigType("json") - viper.AddConfigPath(".") - viper.ReadInConfig() - - r.Run(viper.GetString("address") + ":" + viper.GetString("port")) -} diff --git a/api_deprecated/api_test.go b/api_deprecated/api_test.go deleted file mode 100644 index 6ff9ec15f26..00000000000 --- a/api_deprecated/api_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package main - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/api_deprecated/node" - "github.com/gin-gonic/gin" - "gotest.tools/assert" -) - -func TestAppStatusRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/appstatus", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestBalanceRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/balance", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestCheckFreePortRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/checkfreeport", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestExitRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/exit", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestGenerateKeyRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/generatepublickeyandprivateKey", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestGetBlockFromHashRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/getblockfromhash", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestGetNextPrivateKeyRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/getNextPrivateKey", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestGetShardRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/getprivatepublickeyshard", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestGetStatsRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/getStats", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestGetTransactionFromHashRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/gettransactionfromhash", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestPingRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/ping", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestReceiptRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/receipt", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestSendRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/send", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} -func TestSendMultipleTransactionsRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/sendMultipleTransactions", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestSendMultipleTransactionsToAllShardsRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/sendMultipleTransactionsToAllShards", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestShardOfAddressRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/shardofaddress", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestStartRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/start", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestStatusRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/status", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} - -func TestStopRoute(t *testing.T) { - router := gin.Default() - - node.Routes(router.Group("/node")) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/node/stop", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, 200, w.Code) -} diff --git a/api_deprecated/facade.go b/api_deprecated/facade.go deleted file mode 100644 index 93443f64161..00000000000 --- a/api_deprecated/facade.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import "github.com/gin-gonic/gin" - -type Handler interface { - AppStatus(*gin.Context) - Balance(*gin.Context) - CheckFreePort(*gin.Context) - Exit(*gin.Context) - GenerateKeys(*gin.Context) - GetBlockFromHash(*gin.Context) - GetNextPrivateKey(*gin.Context) - GetShard(*gin.Context) - GetStats(*gin.Context) - GetTransactionFromHash(*gin.Context) - Ping(*gin.Context) - Receipt(*gin.Context) - Send(*gin.Context) - SendMultipleTransactions(*gin.Context) - SendMultipleTransactionsToAllShards(*gin.Context) - ShardOfAddress(*gin.Context) - Start(*gin.Context) - Status(*gin.Context) - Stop(*gin.Context) -} diff --git a/api_deprecated/node/handlers.go b/api_deprecated/node/handlers.go deleted file mode 100644 index d862efee59f..00000000000 --- a/api_deprecated/node/handlers.go +++ /dev/null @@ -1,108 +0,0 @@ -package node - -import "github.com/gin-gonic/gin" - -type Api struct{} - -func (Api) AppStatus(c *gin.Context) { - c.JSON(200, gin.H{"ok": "/appstatus"}) -} - -func (Api) Balance(c *gin.Context) { - addr := c.Query("address") - c.JSON(200, gin.H{"ok": "/balance/" + addr}) -} - -func (Api) CheckFreePort(c *gin.Context) { - ip := c.Query("ipAddress") - port := c.Query("port") - c.JSON(200, gin.H{"ok": "/checkfreeport/" + ip + " " + port}) -} - -func (Api) Exit(c *gin.Context) { - c.JSON(200, gin.H{"ok": "/exit"}) -} - -func (Api) GenerateKeys(c *gin.Context) { - privateKey := c.Query("privateKey") - c.JSON(200, gin.H{"ok": "/generatepublickeyandprivateKey/" + privateKey}) -} - -func (Api) GetBlockFromHash(c *gin.Context) { - blockHash := c.Query("blockHash") - c.JSON(200, gin.H{"ok": "/getblockfromhash/" + blockHash}) -} - -func (Api) GetNextPrivateKey(c *gin.Context) { - c.JSON(200, gin.H{"ok": "/getNextPrivateKey"}) -} - -func (Api) GetShard(c *gin.Context) { - c.JSON(200, gin.H{"ok": "/getprivatepublickeyshard"}) -} - -func (Api) GetStats(c *gin.Context) { - c.JSON(200, gin.H{"ok": "/getStats"}) -} - -func (Api) GetTransactionFromHash(c *gin.Context) { - transactionHash := c.Query("transactionHash") - c.JSON(200, gin.H{"ok": "/gettransactionfromhash/" + transactionHash}) -} - -func (Api) Ping(c *gin.Context) { - ip := c.Query("ipAddress") - port := c.Query("port") - c.JSON(200, gin.H{"ok": "/ping/" + ip + " " + port}) - -} - -func (Api) Receipt(c *gin.Context) { - transactionHash := c.Query("transactionHash") - c.JSON(200, gin.H{"ok": "/receipt/" + transactionHash}) -} - -func (Api) Send(c *gin.Context) { - adderss := c.Query("address") - value := c.Query("value") - c.JSON(200, gin.H{"ok": "/send/" + adderss + " " + value}) - -} - -func (Api) SendMultipleTransactions(c *gin.Context) { - adderss := c.Query("address") - value := c.Query("value") - nrTransactions := c.Query("nrTransactions") - c.JSON(200, gin.H{"ok": "/sendMultipleTransactions/" + adderss + " " + value + " " + nrTransactions}) - -} - -func (Api) SendMultipleTransactionsToAllShards(c *gin.Context) { - value := c.Query("value") - nrTransactions := c.Query("nrTransactions") - c.JSON(200, gin.H{"ok": "/sendMultipleTransactionsToAllShards/" + value + " " + nrTransactions}) -} - -func (Api) ShardOfAddress(c *gin.Context) { - addr := c.Query("address") - c.JSON(200, gin.H{"ok": "/shardofaddress/" + addr}) -} - -func (Api) Start(c *gin.Context) { - nodeName := c.Query("nodeName") - port := c.Query("port") - masterPeerPort := c.Query("masterPeerPort") - masterPeerIpAddress := c.Query("masterPeerIpAddress") - privateKey := c.Query("privateKey") - mintValue := c.Query("mintValue") - bootstrapType := c.Query("bootstrapType") - c.JSON(200, gin.H{"ok": "/start/" + nodeName + " " + port + " " + masterPeerPort + " " + masterPeerIpAddress + " " + privateKey + " " + mintValue + " " + bootstrapType}) -} - -func (Api) Status(c *gin.Context) { - c.JSON(200, gin.H{"ok": "/status"}) -} - -func (Api) Stop(c *gin.Context) { - c.JSON(200, gin.H{"ok": "/stop"}) -} diff --git a/api_deprecated/node/middleware.go b/api_deprecated/node/middleware.go deleted file mode 100644 index 51f1b8e6115..00000000000 --- a/api_deprecated/node/middleware.go +++ /dev/null @@ -1,10 +0,0 @@ -package node - -import ( - "github.com/gin-contrib/cors" - "github.com/gin-gonic/gin" -) - -func CORSMiddleware(router *gin.Engine) { - router.Use(cors.Default()) -} diff --git a/api_deprecated/node/routes.go b/api_deprecated/node/routes.go deleted file mode 100644 index e39c8abf08d..00000000000 --- a/api_deprecated/node/routes.go +++ /dev/null @@ -1,27 +0,0 @@ -package node - -import "github.com/gin-gonic/gin" - -var api Api - -func Routes(router *gin.RouterGroup) { - router.GET("/appstatus", api.AppStatus) - router.GET("/balance", api.Balance) - router.GET("/checkfreeport", api.CheckFreePort) - router.GET("/exit", api.Exit) - router.GET("/generatepublickeyandprivateKey", api.GenerateKeys) - router.GET("/getblockfromhash", api.GetBlockFromHash) - router.GET("/getNextPrivateKey", api.GetNextPrivateKey) - router.GET("/getprivatepublickeyshard", api.GetShard) - router.GET("/getStats", api.GetStats) - router.GET("/gettransactionfromhash", api.GetTransactionFromHash) - router.GET("/ping", api.Ping) - router.GET("/receipt", api.Receipt) - router.GET("/send", api.Send) - router.GET("/sendMultipleTransactions", api.SendMultipleTransactions) - router.GET("/sendMultipleTransactionsToAllShards", api.SendMultipleTransactionsToAllShards) - router.GET("/shardofaddress", api.ShardOfAddress) - router.GET("/start", api.Start) - router.GET("/status", api.Status) - router.GET("/stop", api.Stop) -} diff --git a/chronology/chronology.go b/chronology/chronology.go index 3746c228abb..e830b457a79 100644 --- a/chronology/chronology.go +++ b/chronology/chronology.go @@ -2,7 +2,6 @@ package chronology import ( "fmt" - "sync" "time" "github.com/ElrondNetwork/elrond-go-sandbox/chronology/ntp" @@ -43,8 +42,6 @@ type Chronology struct { subrounds map[SubroundId]int syncTime ntp.SyncTimer - - mut sync.RWMutex } // NewChronology defines a new Chr object @@ -72,7 +69,7 @@ func NewChronology( return &chr } -// initRound is called when a new round begins and do the necesary initialization +// initRound is called when a new round begins and do the necessary initialization func (chr *Chronology) initRound() { chr.SetSelfSubround(-1) @@ -103,18 +100,34 @@ func (chr *Chronology) StartRounds() { // StartRound calls the current subround, given by the current time or by the finished tasks in this round func (chr *Chronology) StartRound() { - subRound := chr.updateRound() + subRoundId := chr.updateRound() - if chr.SelfSubround() == subRound { - sr := chr.LoadSubroundHandler(subRound) - if sr != nil { - if chr.Round().Index() >= 0 { - if sr.DoWork(chr.ComputeSubRoundId, chr.IsCancelled) { - chr.SetSelfSubround(sr.Next()) - } - } - } + chr.updateSelfSubroundIfNeeded(subRoundId) +} + +func (chr *Chronology) updateSelfSubroundIfNeeded(subRoundId SubroundId) { + if chr.SelfSubround() != subRoundId { + return } + + sr := chr.LoadSubroundHandler(subRoundId) + if sr == nil { + return + } + + if chr.Round().Index() < 0 { + return + } + + if !sr.DoWork(chr.ComputeSubRoundId, chr.IsCancelled) { + return + } + + if chr.IsCancelled() { + return + } + + chr.SetSelfSubround(sr.Next()) } // updateRound updates Rounds and subrounds inside round depending of the current time and sync mode @@ -132,6 +145,7 @@ func (chr *Chronology) updateRound() SubroundId { chr.SyncTime().FormattedCurrentTime(chr.ClockOffset()), chr.round.index, chr.SyncTime().CurrentTime(chr.ClockOffset()).Unix())) + chr.initRound() } @@ -182,6 +196,11 @@ func (chr *Chronology) GetSubroundFromDateTime(timeStamp time.Time) SubroundId { return -1 } +// RoundTimeStamp method returns time stamp of a round from a given index +func (chr *Chronology) RoundTimeStamp(index int32) uint64 { + return uint64(chr.genesisTime.Add(time.Duration(int64(index) * int64(chr.round.timeDuration))).Unix()) +} + // Round returns the current round object func (chr *Chronology) Round() *Round { return chr.round @@ -189,17 +208,11 @@ func (chr *Chronology) Round() *Round { // SelfSubround returns the subround, related to the finished tasks in the current round func (chr *Chronology) SelfSubround() SubroundId { - chr.mut.RLock() - defer chr.mut.RUnlock() - return chr.selfSubround } // SetSelfSubround set self subround depending of the finished tasks in the current round func (chr *Chronology) SetSelfSubround(subRound SubroundId) { - chr.mut.Lock() - defer chr.mut.Unlock() - chr.selfSubround = subRound } diff --git a/chronology/chronology_test.go b/chronology/chronology_test.go index b6d1dde31e9..4f6bc8b765f 100644 --- a/chronology/chronology_test.go +++ b/chronology/chronology_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-sandbox/chronology" + "github.com/ElrondNetwork/elrond-go-sandbox/chronology/mock" "github.com/ElrondNetwork/elrond-go-sandbox/chronology/ntp" "github.com/davecgh/go-spew/spew" "github.com/stretchr/testify/assert" @@ -284,7 +285,7 @@ func TestRoundState(t *testing.T) { currentTime := time.Now() rnd := chronology.NewRound(currentTime, currentTime, roundTimeDuration) - chr := chronology.NewChronology( true, rnd, currentTime, ntp.NewSyncTime(roundTimeDuration, nil)) + chr := chronology.NewChronology(true, rnd, currentTime, ntp.NewSyncTime(roundTimeDuration, nil)) state := chr.GetSubroundFromDateTime(currentTime) assert.Equal(t, chronology.SubroundId(-1), state) @@ -339,3 +340,164 @@ func TestGettersAndSetters(t *testing.T) { spew.Dump(chr.SubroundHandlers()) } + +func TestRoundTimeStamp_ShouldReturnCorrectTimeStamp(t *testing.T) { + genesisTime := time.Now() + currentTime := genesisTime + + rnd := chronology.NewRound(genesisTime, currentTime, roundTimeDuration) + chr := chronology.NewChronology(true, rnd, genesisTime, ntp.NewSyncTime(roundTimeDuration, nil)) + + timeStamp := chr.RoundTimeStamp(2) + + assert.Equal(t, genesisTime.Add(time.Duration(2*rnd.TimeDuration())).Unix(), int64(timeStamp)) +} + +//------- UpdateSelfSubroundIfNeeded + +func TestChronology_UpdateSelfSubroundIfNeededShouldNotChangeForDifferentSubroundId(t *testing.T) { + round := chronology.NewRound(time.Unix(0, 0), time.Unix(0, 0), time.Duration(4)) + syncer := &mock.SyncTimeMock{ + CurrentTimeCalled: func(duration time.Duration) time.Time { + return time.Unix(-1, 0) + }, + } + + subRoundId := chronology.SubroundId(-5) + + chr := chronology.NewChronology(true, round, time.Unix(0, 0), syncer) + chr.SetSelfSubround(subRoundId) + chr.UpdateSelfSubroundIfNeeded(-4) + + assert.Equal(t, subRoundId, chr.SelfSubround()) +} + +func TestChronology_UpdateSelfSubroundIfNeededShouldNotChangeForNotFoundSubroundHandler(t *testing.T) { + round := chronology.NewRound(time.Unix(0, 0), time.Unix(0, 0), time.Duration(4)) + syncer := &mock.SyncTimeMock{ + CurrentTimeCalled: func(duration time.Duration) time.Time { + return time.Unix(-1, 0) + }, + } + + subRoundId := chronology.SubroundId(-5) + + chr := chronology.NewChronology(true, round, time.Unix(0, 0), syncer) + chr.SetSelfSubround(subRoundId) + chr.UpdateSelfSubroundIfNeeded(subRoundId) + + assert.Equal(t, subRoundId, chr.SelfSubround()) +} + +func createStubSubroundHandler(subroundId int) *mock.SubroundHandlerStub { + return &mock.SubroundHandlerStub{ + CurrentCalled: func() chronology.SubroundId { + return chronology.SubroundId(subroundId) + }, + } +} + +func TestChronology_UpdateSelfSubroundIfNeededShouldNotChangeForNegativeRound(t *testing.T) { + round := chronology.NewRound(time.Unix(0, 0), time.Unix(-1, 0), time.Duration(4)) + syncer := &mock.SyncTimeMock{ + CurrentTimeCalled: func(duration time.Duration) time.Time { + return time.Unix(-1, 0) + }, + } + + subRoundId := chronology.SubroundId(2) + + chr := chronology.NewChronology(true, round, time.Unix(0, 0), syncer) + //add 3 stubs for subroundHandler + chr.AddSubround(createStubSubroundHandler(0)) + chr.AddSubround(createStubSubroundHandler(1)) + chr.AddSubround(createStubSubroundHandler(2)) + + chr.SetSelfSubround(subRoundId) + chr.UpdateSelfSubroundIfNeeded(subRoundId) + + assert.Equal(t, subRoundId, chr.SelfSubround()) +} + +func TestChronology_UpdateSelfSubroundIfNeededShouldNotChangeForDoWorkReturningFalse(t *testing.T) { + round := chronology.NewRound(time.Unix(0, 0), time.Unix(0, 0), time.Duration(4)) + syncer := &mock.SyncTimeMock{ + CurrentTimeCalled: func(duration time.Duration) time.Time { + return time.Unix(-1, 0) + }, + } + + subRoundId := chronology.SubroundId(2) + + chr := chronology.NewChronology(true, round, time.Unix(0, 0), syncer) + //add 3 stubs for subroundHandler + chr.AddSubround(createStubSubroundHandler(0)) + chr.AddSubround(createStubSubroundHandler(1)) + crtSubroundHandler := createStubSubroundHandler(2) + crtSubroundHandler.DoWorkCalled = func(id func() chronology.SubroundId, i func() bool) bool { + return false + } + chr.AddSubround(crtSubroundHandler) + + chr.SetSelfSubround(subRoundId) + chr.UpdateSelfSubroundIfNeeded(subRoundId) + + assert.Equal(t, subRoundId, chr.SelfSubround()) +} + +func TestChronology_UpdateSelfSubroundIfNeededShouldNotChangeWhileWorkingRoundIsCancelled(t *testing.T) { + round := chronology.NewRound(time.Unix(0, 0), time.Unix(0, 0), time.Duration(4)) + syncer := &mock.SyncTimeMock{ + CurrentTimeCalled: func(duration time.Duration) time.Time { + return time.Unix(-1, 0) + }, + } + + subRoundId := chronology.SubroundId(2) + + chr := chronology.NewChronology(true, round, time.Unix(0, 0), syncer) + //add 3 stubs for subroundHandler + chr.AddSubround(createStubSubroundHandler(0)) + chr.AddSubround(createStubSubroundHandler(1)) + crtSubroundHandler := createStubSubroundHandler(2) + crtSubroundHandler.DoWorkCalled = func(id func() chronology.SubroundId, i func() bool) bool { + chr.SetSelfSubround(-1) + return true + } + chr.AddSubround(crtSubroundHandler) + + chr.SetSelfSubround(subRoundId) + chr.UpdateSelfSubroundIfNeeded(subRoundId) + + assert.Equal(t, chronology.SubroundId(-1), chr.SelfSubround()) +} + +func TestChronology_UpdateSelfSubroundIfNeededShouldShouldChange(t *testing.T) { + round := chronology.NewRound(time.Unix(0, 0), time.Unix(0, 0), time.Duration(4)) + syncer := &mock.SyncTimeMock{ + CurrentTimeCalled: func(duration time.Duration) time.Time { + return time.Unix(-1, 0) + }, + } + + subRoundId := chronology.SubroundId(2) + nextSubRoundId := chronology.SubroundId(100) + + chr := chronology.NewChronology(true, round, time.Unix(0, 0), syncer) + //add 3 stubs for subroundHandler + chr.AddSubround(createStubSubroundHandler(0)) + chr.AddSubround(createStubSubroundHandler(1)) + crtSubroundHandler := createStubSubroundHandler(2) + crtSubroundHandler.DoWorkCalled = func(id func() chronology.SubroundId, i func() bool) bool { + return true + } + crtSubroundHandler.NextCalled = func() chronology.SubroundId { + return nextSubRoundId + } + chr.AddSubround(crtSubroundHandler) + + chr.SetSelfSubround(subRoundId) + chr.UpdateSelfSubroundIfNeeded(subRoundId) + + assert.Equal(t, nextSubRoundId, chr.SelfSubround()) +} diff --git a/chronology/export_test.go b/chronology/export_test.go new file mode 100644 index 00000000000..0e3f9983993 --- /dev/null +++ b/chronology/export_test.go @@ -0,0 +1,9 @@ +package chronology + +func (chr *Chronology) InitRound() { + chr.initRound() +} + +func (chr *Chronology) UpdateSelfSubroundIfNeeded(subRoundId SubroundId) { + chr.updateSelfSubroundIfNeeded(subRoundId) +} diff --git a/chronology/mock/subroundHandlerStub.go b/chronology/mock/subroundHandlerStub.go new file mode 100644 index 00000000000..bebe35ff734 --- /dev/null +++ b/chronology/mock/subroundHandlerStub.go @@ -0,0 +1,38 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/chronology" +) + +type SubroundHandlerStub struct { + DoWorkCalled func(func() chronology.SubroundId, func() bool) bool + NextCalled func() chronology.SubroundId + CurrentCalled func() chronology.SubroundId + EndTimeCalled func() int64 + NameCalled func() string + CheckCalled func() bool +} + +func (shs *SubroundHandlerStub) DoWork(handler1 func() chronology.SubroundId, handler2 func() bool) bool { + return shs.DoWorkCalled(handler1, handler2) +} + +func (shs *SubroundHandlerStub) Next() chronology.SubroundId { + return shs.NextCalled() +} + +func (shs *SubroundHandlerStub) Current() chronology.SubroundId { + return shs.CurrentCalled() +} + +func (shs *SubroundHandlerStub) EndTime() int64 { + return shs.EndTimeCalled() +} + +func (shs *SubroundHandlerStub) Name() string { + return shs.NameCalled() +} + +func (shs *SubroundHandlerStub) Check() bool { + return shs.CheckCalled() +} diff --git a/chronology/mock/syncTimeMock.go b/chronology/mock/syncTimeMock.go new file mode 100644 index 00000000000..6f4a0bbfe56 --- /dev/null +++ b/chronology/mock/syncTimeMock.go @@ -0,0 +1,24 @@ +package mock + +import ( + "time" +) + +type SyncTimeMock struct { + CurrentTimeCalled func(time.Duration) time.Time +} + +func (stm *SyncTimeMock) StartSync() { +} + +func (stm *SyncTimeMock) ClockOffset() time.Duration { + return time.Duration(0) +} + +func (stm *SyncTimeMock) FormattedCurrentTime(t time.Duration) string { + return "formatted time" +} + +func (stm *SyncTimeMock) CurrentTime(t time.Duration) time.Time { + return stm.CurrentTimeCalled(t) +} diff --git a/chronology/mock/syncTimeStub.go b/chronology/mock/syncTimeStub.go new file mode 100644 index 00000000000..8889738bcbe --- /dev/null +++ b/chronology/mock/syncTimeStub.go @@ -0,0 +1,28 @@ +package mock + +import ( + "time" +) + +type SyncTimeStub struct { + StartSyncCalled func() + ClockOffsetCalled func() time.Duration + FormattedCurrentTimeCalled func(time.Duration) string + CurrentTimeCalled func(time.Duration) time.Time +} + +func (sts *SyncTimeStub) StartSync() { + sts.StartSyncCalled() +} + +func (sts *SyncTimeStub) ClockOffset() time.Duration { + return sts.ClockOffsetCalled() +} + +func (sts *SyncTimeStub) FormattedCurrentTime(t time.Duration) string { + return sts.FormattedCurrentTimeCalled(t) +} + +func (sts *SyncTimeStub) CurrentTime(t time.Duration) time.Time { + return sts.CurrentTimeCalled(t) +} diff --git a/chronology/ntp/syncTime.go b/chronology/ntp/syncTime.go index aedb6c9c62e..363820d58cc 100644 --- a/chronology/ntp/syncTime.go +++ b/chronology/ntp/syncTime.go @@ -8,6 +8,9 @@ import ( "github.com/beevik/ntp" ) +// totalRequests defines the number of requests made to determine an accurate clock offset +const totalRequests = 10 + // SyncTimer defines an interface for time synchronization type SyncTimer interface { StartSync() @@ -30,7 +33,7 @@ func NewSyncTime(syncPeriod time.Duration, query func(host string) (*ntp.Respons return &s } -// StartSync method does the time syncronization at every syncPeriod time elapsed. This should be started +// StartSync method does the time synchronization at every syncPeriod time elapsed. This should be started // as a go routine func (s *syncTime) StartSync() { for { @@ -39,16 +42,27 @@ func (s *syncTime) StartSync() { } } -// sync method does the time syncronization and sets the current offset difference between local time -// and server time with wich it has done the syncronization +// sync method does the time synchronization and sets the current offset difference between local time +// and server time with which it has done the synchronization func (s *syncTime) sync() { if s.query != nil { - r, err := s.query("time.google.com") + clockOffsetSum := time.Duration(0) + succeededRequests := 0 + + for i := 0; i < totalRequests; i++ { + r, err := s.query("time.google.com") + + if err != nil { + continue + } + + succeededRequests++ + clockOffsetSum += r.ClockOffset + } - if err != nil { - s.setClockOffset(0) - } else { - s.setClockOffset(r.ClockOffset) + if succeededRequests > 0 { + averrageClockOffset := time.Duration(int64(clockOffsetSum) / int64(succeededRequests)) + s.setClockOffset(averrageClockOffset) } } } @@ -68,7 +82,7 @@ func (s *syncTime) setClockOffset(clockOffset time.Duration) { s.mut.Unlock() } -// FormattedCurrentTime method gets the formatted current time on wich is added a given offset +// FormattedCurrentTime method gets the formatted current time on which is added a given offset func (s *syncTime) FormattedCurrentTime(clockOffset time.Duration) string { return s.formatTime(s.CurrentTime(clockOffset)) } diff --git a/chronology/ntp/syncTime_test.go b/chronology/ntp/syncTime_test.go index 0a50f70fc76..41323f40d5d 100644 --- a/chronology/ntp/syncTime_test.go +++ b/chronology/ntp/syncTime_test.go @@ -67,12 +67,11 @@ func TestHandleErrorInDoSync(t *testing.T) { assert.Equal(t, st.ClockOffset(), time.Millisecond*0) - //manually put a value in Offset and observe if it goes to 0 as a result to error st.SetClockOffset(1234) st.Sync() - assert.Equal(t, st.ClockOffset(), time.Millisecond*0) + assert.Equal(t, st.ClockOffset(), time.Duration(1234)) } @@ -86,7 +85,6 @@ func TestValueInDoSync(t *testing.T) { st.Sync() assert.Equal(t, st.ClockOffset(), time.Nanosecond*23456) - //manually put a value in Offset and observe if it goes to 0 as a result to error st.SetClockOffset(1234) st.Sync() diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index ca0955f75fb..e2b9d114322 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -3,6 +3,7 @@ package main import ( "context" "encoding/base64" + "encoding/hex" "encoding/json" "fmt" "io/ioutil" @@ -36,6 +37,10 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/node" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" + "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + sync2 "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" "github.com/ElrondNetwork/elrond-go-sandbox/storage" @@ -73,6 +78,15 @@ type genesis struct { InitialNodes []initialNode `json:"initialNodes"` } +type netMessengerConfig struct { + ctx context.Context + port int + maxAllowedPeers int + marshalizer marshal.Marshalizer + hasher hashing.Hasher + pubSubStrategy p2p.PubSubStrategy +} + func main() { log := logger.NewDefaultLogger() log.SetLevel(logger.LogInfo) @@ -112,9 +126,17 @@ func startNode(ctx *cli.Context, log *logger.Logger) error { } log.Info(fmt.Sprintf("Initialized with genesis config from: %s", ctx.GlobalString(flags.GenesisFile.Name))) - syncer := ntp.NewSyncTime(time.Millisecond*time.Duration(genesisConfig.RoundDuration), beevikntp.Query) + syncer := ntp.NewSyncTime(time.Hour, beevikntp.Query) go syncer.StartSync() + // TODO: The next 5 lines should be deleted when we are done testing from a precalculated (not hard coded) + // timestamp + if genesisConfig.StartTime == 0 { + time.Sleep(1000 * time.Millisecond) + ntpTime := syncer.CurrentTime(syncer.ClockOffset()) + genesisConfig.StartTime = (ntpTime.Unix()/60 + 1) * 60 + } + startTime := time.Unix(genesisConfig.StartTime, 0) log.Info(fmt.Sprintf("Start time in seconds: %d", startTime.Unix())) @@ -138,7 +160,7 @@ func startNode(ctx *cli.Context, log *logger.Logger) error { log.Info("Bootstrapping node....") err = ef.StartNode() if err != nil { - log.Error("Starting node failed", err.Error()) + log.Error("starting node failed", err.Error()) } } @@ -158,14 +180,14 @@ func loadFile(dest interface{}, relativePath string, log *logger.Logger) error { path, err := filepath.Abs(relativePath) fmt.Println(path) if err != nil { - log.Error("Cannot create absolute path for the provided file", err.Error()) + log.Error("cannot create absolute path for the provided file", err.Error()) return err } f, err := os.Open(path) defer func() { err = f.Close() if err != nil { - log.Error("Cannot close file: ", err.Error()) + log.Error("cannot close file: ", err.Error()) } }() if err != nil { @@ -201,10 +223,10 @@ func loadGenesisConfiguration(genesisFilePath string, log *logger.Logger) (*gene func (g *genesis) initialNodesPubkeys(log *logger.Logger) []string { var pubKeys []string for _, in := range g.InitialNodes { - pubKey, err := base64.StdEncoding.DecodeString(in.PubKey) + pubKey, err := decodeAddress(in.PubKey) if err != nil { - log.Error(fmt.Sprintf("%s is not a valid public key. Ignored.", in)) + log.Error(fmt.Sprintf("%s is not a valid public key. Ignored", in)) continue } @@ -213,15 +235,20 @@ func (g *genesis) initialNodesPubkeys(log *logger.Logger) []string { return pubKeys } -func (g *genesis) initialNodesBalances(log *logger.Logger) map[string]big.Int { - var pubKeys = make(map[string]big.Int) +func (g *genesis) initialNodesBalances(log *logger.Logger) map[string]*big.Int { + var pubKeys = make(map[string]*big.Int) for _, in := range g.InitialNodes { balance, ok := new(big.Int).SetString(in.Balance, 10) if ok { - pubKeys[in.PubKey] = *balance + pubKey, err := decodeAddress(in.PubKey) + if err != nil { + log.Error(fmt.Sprintf("%s is not a valid public key. Ignored", in.PubKey)) + continue + } + pubKeys[string(pubKey)] = balance } else { - log.Warn(fmt.Sprintf("Error decoding balance %s for public key %s - setting to 0", in.Balance, in.PubKey)) - pubKeys[in.PubKey] = *big.NewInt(0) + log.Warn(fmt.Sprintf("error decoding balance %s for public key %s - setting to 0", in.Balance, in.PubKey)) + pubKeys[in.PubKey] = big.NewInt(0) } } @@ -246,7 +273,7 @@ func createNode(ctx *cli.Context, cfg *config.Config, genesisConfig *genesis, sy return nil, errors.New("error creating node: " + err.Error()) } - addressConverter, err := state.NewHashAddressConverter(hasher, cfg.Address.Length, cfg.Address.Prefix) + addressConverter, err := state.NewPlainAddressConverter(cfg.Address.Length, cfg.Address.Prefix) if err != nil { return nil, errors.New("could not create address converter: " + err.Error()) } @@ -268,15 +295,13 @@ func createNode(ctx *cli.Context, cfg *config.Config, genesisConfig *genesis, sy uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() - transient, err := createDataPoolFromConfig(cfg, uint64ByteSliceConverter) + datapool, err := createDataPoolFromConfig(cfg, uint64ByteSliceConverter) if err != nil { return nil, errors.New("could not create transient data pool: " + err.Error()) } shardCoordinator := &sharding.OneShardCoordinator{} - blockProcessor := block.NewBlockProcessor(transient.Transactions(), hasher, marshalizer, transactionProcessor, accountsAdapter, shardCoordinator) - initialPubKeys := genesisConfig.initialNodesPubkeys(log) keyGen, privKey, pubKey, err := getSigningParams(ctx, log) @@ -291,13 +316,79 @@ func createNode(ctx *cli.Context, cfg *config.Config, genesisConfig *genesis, sy return nil, err } + netMessenger, err := createNetMessenger(netMessengerConfig{ + ctx: appContext, + port: ctx.GlobalInt(flags.Port.Name), + maxAllowedPeers: ctx.GlobalInt(flags.MaxAllowedPeers.Name), + marshalizer: marshalizer, + hasher: hasher, + pubSubStrategy: p2p.GossipSub, + }) + if err != nil { + return nil, err + } + + interceptorsContainer := interceptor.NewContainer() + resolversContainer := resolver.NewContainer() + + processorFactory, err := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ + InterceptorContainer: interceptorsContainer, + ResolverContainer: resolversContainer, + Messenger: netMessenger, + Blockchain: blkc, + DataPool: datapool, + ShardCoordinator: shardCoordinator, + AddrConverter: addressConverter, + Hasher: hasher, + Marshalizer: marshalizer, + SingleSignKeyGen: keyGen, + Uint64ByteSliceConverter: uint64ByteSliceConverter, + }) + if err != nil { + return nil, err + } + + err = processorFactory.CreateInterceptors() + if err != nil { + return nil, err + } + + err = processorFactory.CreateResolvers() + if err != nil { + return nil, err + } + + forkDetector := sync2.NewBasicForkDetector() + + res, err := processorFactory.ResolverContainer().Get(string(factory.TransactionTopic)) + if err != nil { + return nil, err + } + txResolver, ok := res.(*transaction.TxResolver) + if !ok { + return nil, errors.New("tx resolver is not of type transaction.TxResolver") + } + + blockProcessor, err := block.NewBlockProcessor( + datapool, + hasher, + marshalizer, + transactionProcessor, + accountsAdapter, + shardCoordinator, + forkDetector, + createRequestTransactionHandler(txResolver, log), + ) + + if err != nil { + return nil, errors.New("could not create block processor: " + err.Error()) + } + nd, err := node.NewNode( + node.WithMessenger(netMessenger), node.WithHasher(hasher), node.WithContext(appContext), node.WithMarshalizer(marshalizer), - node.WithPubSubStrategy(p2p.GossipSub), - node.WithMaxAllowedPeers(ctx.GlobalInt(flags.MaxAllowedPeers.Name)), - node.WithPort(ctx.GlobalInt(flags.Port.Name)), node.WithInitialNodesPubKeys(initialPubKeys), node.WithInitialNodesBalances(genesisConfig.initialNodesBalances(log)), node.WithAddressConverter(addressConverter), @@ -309,22 +400,58 @@ func createNode(ctx *cli.Context, cfg *config.Config, genesisConfig *genesis, sy node.WithBlockProcessor(blockProcessor), node.WithGenesisTime(time.Unix(genesisConfig.StartTime, 0)), node.WithElasticSubrounds(genesisConfig.ElasticSubrounds), - node.WithDataPool(transient), + node.WithDataPool(datapool), node.WithShardCoordinator(shardCoordinator), node.WithUint64ByteSliceConverter(uint64ByteSliceConverter), node.WithMultisig(multisigner), node.WithSingleSignKeyGenerator(keyGen), node.WithPublicKey(pubKey), node.WithPrivateKey(privKey), + node.WithForkDetector(forkDetector), + node.WithProcessorCreator(processorFactory), ) if err != nil { return nil, errors.New("error creating node: " + err.Error()) } + err = nd.CreateShardedStores() + if err != nil { + return nil, err + } + return nd, nil } +func createRequestTransactionHandler(txResolver *transaction.TxResolver, log *logger.Logger) func(destShardID uint32, txHash []byte) { + return func(destShardID uint32, txHash []byte) { + _ = txResolver.RequestTransactionFromHash(txHash) + log.Debug(fmt.Sprintf("Requested tx for shard %d with hash %s from network\n", destShardID, toB64(txHash))) + } +} + +func createNetMessenger(config netMessengerConfig) (p2p.Messenger, error) { + if config.port == 0 { + return nil, errors.New("cannot start node on port 0") + } + + if config.maxAllowedPeers == 0 { + return nil, errors.New("cannot start node without providing maxAllowedPeers") + } + + //TODO check if libp2p provides a better random source + cp := &p2p.ConnectParams{} + cp.Port = config.port + cp.GeneratePrivPubKeys(time.Now().UnixNano()) + cp.GenerateIDFromPubKey() + + nm, err := p2p.NewNetMessenger(config.ctx, config.marshalizer, config.hasher, cp, config.maxAllowedPeers, config.pubSubStrategy) + if err != nil { + return nil, err + } + return nm, nil +} + func getSk(ctx *cli.Context) ([]byte, error) { if !ctx.GlobalIsSet(flags.PrivateKey.Name) { if ctx.GlobalString(flags.PrivateKey.Name) == "" { @@ -332,18 +459,11 @@ func getSk(ctx *cli.Context) ([]byte, error) { } } - b64sk, err := ioutil.ReadFile(ctx.GlobalString(flags.PrivateKey.Name)) - if err != nil { - b64sk = []byte(ctx.GlobalString(flags.PrivateKey.Name)) - } - decodedSk := make([]byte, base64.StdEncoding.DecodedLen(len(b64sk))) - l, err := base64.StdEncoding.Decode(decodedSk, b64sk) - + encodedSk, err := ioutil.ReadFile(ctx.GlobalString(flags.PrivateKey.Name)) if err != nil { - return nil, errors.New("could not decode private key: " + err.Error()) + encodedSk = []byte(ctx.GlobalString(flags.PrivateKey.Name)) } - - return decodedSk[:l], nil + return decodeAddress(string(encodedSk)) } func getSigningParams(ctx *cli.Context, log *logger.Logger) ( @@ -367,14 +487,13 @@ func getSigningParams(ctx *cli.Context, log *logger.Logger) ( pubKey = privKey.GeneratePublic() - base64sk := make([]byte, base64.StdEncoding.EncodedLen(len(sk))) - base64.StdEncoding.Encode(base64sk, sk) - log.Info("starting with private key: " + string(base64sk)) - pk, _ := pubKey.ToByteArray() - base64pk := make([]byte, base64.StdEncoding.EncodedLen(len(pk))) - base64.StdEncoding.Encode(base64pk, pk) - log.Info("starting with public key: " + string(base64pk)) + + skEncoded := encodeAddress(sk) + pkEncoded := encodeAddress(pk) + + log.Info("starting with private key: " + skEncoded) + log.Info("starting with public key: " + pkEncoded) return keyGen, privKey, pubKey, err } @@ -585,3 +704,18 @@ func createBlockChainFromConfig(config *config.Config) (*blockchain.BlockChain, return blockChain, err } + +func decodeAddress(address string) ([]byte, error) { + return hex.DecodeString(address) +} + +func encodeAddress(address []byte) string { + return hex.EncodeToString(address) +} + +func toB64(buff []byte) string { + if buff == nil { + return "" + } + return base64.StdEncoding.EncodeToString(buff) +} diff --git a/cmd/facade/elrondNodeFacade.go b/cmd/facade/elrondNodeFacade.go index 2393d5101a9..09be4ef10b8 100644 --- a/cmd/facade/elrondNodeFacade.go +++ b/cmd/facade/elrondNodeFacade.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/api" "github.com/ElrondNetwork/elrond-go-sandbox/chronology/ntp" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/logger" ) @@ -33,7 +34,7 @@ func (ef *ElrondNodeFacade) SetLogger(log *logger.Logger) { ef.log = log } -//SetSyncer sets the current syncer +// SetSyncer sets the current syncer func (ef *ElrondNodeFacade) SetSyncer(syncer ntp.SyncTimer) { ef.syncer = syncer } @@ -45,11 +46,6 @@ func (ef *ElrondNodeFacade) StartNode() error { return err } - err = ef.node.BindInterceptorsResolvers() - if err != nil { - return err - } - err = ef.node.StartConsensus() return err } @@ -86,19 +82,48 @@ func (ef *ElrondNodeFacade) GetBalance(address string) (*big.Int, error) { } // GenerateTransaction generates a transaction from a sender, receiver, value and data -func (ef *ElrondNodeFacade) GenerateTransaction(sender string, receiver string, value big.Int, +func (ef *ElrondNodeFacade) GenerateTransaction(senderHex string, receiverHex string, value *big.Int, data string) (*transaction.Transaction, error) { - return ef.node.GenerateTransaction(sender, receiver, value, data) + return ef.node.GenerateTransaction(senderHex, receiverHex, value, data) } // SendTransaction will send a new transaction on the topic channel -func (ef *ElrondNodeFacade) SendTransaction(nonce uint64, sender string, receiver string, - value big.Int, transactionData string, signature string) (*transaction.Transaction, error) { - return ef.node.SendTransaction(nonce, sender, receiver, value, transactionData, signature) +func (ef *ElrondNodeFacade) SendTransaction( + nonce uint64, + senderHex string, + receiverHex string, + value *big.Int, + transactionData string, + signature []byte, +) (*transaction.Transaction, error) { + + return ef.node.SendTransaction(nonce, senderHex, receiverHex, value, transactionData, signature) } // GetTransaction gets the transaction with a specified hash func (ef *ElrondNodeFacade) GetTransaction(hash string) (*transaction.Transaction, error) { return ef.node.GetTransaction(hash) } + +// GetAccount returns an accountResponse containing information +// about the account correlated with provided address +func (ef *ElrondNodeFacade) GetAccount(address string) (*state.Account, error) { + return ef.node.GetAccount(address) +} + +// GetCurrentPublicKey gets the current nodes public Key +func (ef *ElrondNodeFacade) GetCurrentPublicKey() string { + return ef.node.GetCurrentPublicKey() +} + +//GenerateAndSendBulkTransactions generates a number of nrTransactions of amount value +//for the receiver destination +func (ef *ElrondNodeFacade) GenerateAndSendBulkTransactions( + destination string, + value *big.Int, + nrTransactions uint64, +) error { + + return ef.node.GenerateAndSendBulkTransactions(destination, value, nrTransactions) +} diff --git a/cmd/facade/elrondNodeFacade_test.go b/cmd/facade/elrondNodeFacade_test.go index 3bff7386db6..2bc711e8e54 100644 --- a/cmd/facade/elrondNodeFacade_test.go +++ b/cmd/facade/elrondNodeFacade_test.go @@ -8,7 +8,9 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/cmd/facade" "github.com/ElrondNetwork/elrond-go-sandbox/cmd/facade/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/logger" "github.com/stretchr/testify/assert" ) @@ -30,8 +32,8 @@ func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) started = true return nil }, - P2PBootstrapHandler: func() { - return + P2PBootstrapHandler: func() error { + return nil }, IsRunningHandler: func() bool { return started @@ -39,9 +41,6 @@ func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) StartConsensusHandler: func() error { return nil }, - BindInterceptorsResolversHandler: func() error { - return nil - }, } ef := facade.NewElrondNodeFacade(node) @@ -80,8 +79,8 @@ func TestElrondFacade_StartNodeWithErrorOnStartConsensusShouldReturnError(t *tes started = true return nil }, - P2PBootstrapHandler: func() { - return + P2PBootstrapHandler: func() error { + return nil }, IsRunningHandler: func() bool { return started @@ -90,9 +89,6 @@ func TestElrondFacade_StartNodeWithErrorOnStartConsensusShouldReturnError(t *tes started = false return fmt.Errorf("error on StartConsensus") }, - BindInterceptorsResolversHandler: func() error { - return nil - }, } ef := facade.NewElrondNodeFacade(node) @@ -207,7 +203,7 @@ func TestElrondFacade_GetBalanceWithErrorOnNodeShouldReturnZeroBalanceAndError(t func TestElrondFacade_GenerateTransactionWithCorrectInputsShouldReturnNoError(t *testing.T) { sender := "sender" receiver := "receiver" - value := *big.NewInt(10) + value := big.NewInt(10) data := "code" tr := &transaction.Transaction{ @@ -217,7 +213,7 @@ func TestElrondFacade_GenerateTransactionWithCorrectInputsShouldReturnNoError(t Value: value} node := &mock.NodeMock{ - GenerateTransactionHandler: func(sender string, receiver string, value big.Int, + GenerateTransactionHandler: func(sender string, receiver string, value *big.Int, data string) (*transaction.Transaction, error) { return &transaction.Transaction{ SndAddr: []byte(sender), @@ -237,11 +233,11 @@ func TestElrondFacade_GenerateTransactionWithCorrectInputsShouldReturnNoError(t func TestElrondFacade_GenerateTransactionWithNilSenderShouldReturnError(t *testing.T) { receiver := "receiver" - amount := *big.NewInt(10) + amount := big.NewInt(10) code := "code" node := &mock.NodeMock{ - GenerateTransactionHandler: func(sender string, receiver string, amount big.Int, + GenerateTransactionHandler: func(sender string, receiver string, amount *big.Int, code string) (*transaction.Transaction, error) { if sender == "" { return nil, errors.New("nil sender") @@ -259,11 +255,11 @@ func TestElrondFacade_GenerateTransactionWithNilSenderShouldReturnError(t *testi func TestElrondFacade_GenerateTransactionWithNilReceiverShouldReturnError(t *testing.T) { sender := "sender" - amount := *big.NewInt(10) + amount := big.NewInt(10) code := "code" node := &mock.NodeMock{ - GenerateTransactionHandler: func(sender string, receiver string, amount big.Int, + GenerateTransactionHandler: func(sender string, receiver string, amount *big.Int, code string) (*transaction.Transaction, error) { if receiver == "" { return nil, errors.New("nil receiver") @@ -282,11 +278,11 @@ func TestElrondFacade_GenerateTransactionWithNilReceiverShouldReturnError(t *tes func TestElrondFacade_GenerateTransactionWithZeroAmountShouldReturnError(t *testing.T) { sender := "sender" receiver := "receiver" - amount := *big.NewInt(0) + amount := big.NewInt(0) code := "code" node := &mock.NodeMock{ - GenerateTransactionHandler: func(sender string, receiver string, amount big.Int, + GenerateTransactionHandler: func(sender string, receiver string, amount *big.Int, code string) (*transaction.Transaction, error) { if amount.Cmp(big.NewInt(0)) == 0 { return nil, errors.New("zero amount") @@ -305,11 +301,11 @@ func TestElrondFacade_GenerateTransactionWithZeroAmountShouldReturnError(t *test func TestElrondFacade_GenerateTransactionWithNegativeAmountShouldReturnError(t *testing.T) { sender := "sender" receiver := "receiver" - amount := *big.NewInt(-2) + amount := big.NewInt(-2) code := "code" node := &mock.NodeMock{ - GenerateTransactionHandler: func(sender string, receiver string, amount big.Int, + GenerateTransactionHandler: func(sender string, receiver string, amount *big.Int, code string) (*transaction.Transaction, error) { if amount.Cmp(big.NewInt(0)) < 0 { return nil, errors.New("negative amount") @@ -363,3 +359,69 @@ func TestElrondFacade_GetTransactionWithUnknowHashShouldReturnNilAndNoError(t *t assert.Nil(t, err) assert.Nil(t, tx) } + +func TestElrondNodeFacade_SetLogger(t *testing.T) { + node := &mock.NodeMock{} + + ef := facade.NewElrondNodeFacade(node) + log := logger.NewDefaultLogger() + ef.SetLogger(log) + assert.Equal(t, log, ef.GetLogger()) +} + +func TestElrondNodeFacade_SetSyncer(t *testing.T) { + node := &mock.NodeMock{} + + ef := facade.NewElrondNodeFacade(node) + sync := &mock.SyncTimerMock{} + ef.SetSyncer(sync) + assert.Equal(t, sync, ef.GetSyncer()) +} + +func TestElrondNodeFacade_SendTransaction(t *testing.T) { + called := 0 + node := &mock.NodeMock{} + node.SendTransactionHandler = func(nonce uint64, sender string, receiver string, amount *big.Int, code string, signature []byte) (i *transaction.Transaction, e error) { + called++ + return nil, nil + } + ef := facade.NewElrondNodeFacade(node) + ef.SendTransaction(1, "test", "test", big.NewInt(0), "code", []byte{}) + assert.Equal(t, called, 1) +} + +func TestElrondNodeFacade_GetAccount(t *testing.T) { + called := 0 + node := &mock.NodeMock{} + node.GetAccountHandler = func(address string) (account *state.Account, e error) { + called++ + return nil, nil + } + ef := facade.NewElrondNodeFacade(node) + ef.GetAccount("test") + assert.Equal(t, called, 1) +} + +func TestElrondNodeFacade_GetCurrentPublicKey(t *testing.T) { + called := 0 + node := &mock.NodeMock{} + node.GetCurrentPublicKeyHandler = func() string { + called++ + return "" + } + ef := facade.NewElrondNodeFacade(node) + ef.GetCurrentPublicKey() + assert.Equal(t, called, 1) +} + +func TestElrondNodeFacade_GenerateAndSendBulkTransactions(t *testing.T) { + called := 0 + node := &mock.NodeMock{} + node.GenerateAndSendBulkTransactionsHandler = func(destination string, value *big.Int, nrTransactions uint64) error { + called++ + return nil + } + ef := facade.NewElrondNodeFacade(node) + ef.GenerateAndSendBulkTransactions("", big.NewInt(0), 0) + assert.Equal(t, called, 1) +} diff --git a/cmd/facade/export_test.go b/cmd/facade/export_test.go new file mode 100644 index 00000000000..63e1cfd2766 --- /dev/null +++ b/cmd/facade/export_test.go @@ -0,0 +1,17 @@ +package facade + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/chronology/ntp" + + "github.com/ElrondNetwork/elrond-go-sandbox/logger" +) + +// GetLogger returns the current logger +func (ef *ElrondNodeFacade) GetLogger() *logger.Logger { + return ef.log +} + +// GetSyncer returns the current syncer +func (ef *ElrondNodeFacade) GetSyncer() ntp.SyncTimer { + return ef.syncer +} diff --git a/cmd/facade/interface.go b/cmd/facade/interface.go index 769967c5afa..b9a2b1af495 100644 --- a/cmd/facade/interface.go +++ b/cmd/facade/interface.go @@ -2,48 +2,14 @@ package facade import ( "math/big" - "sync" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/logger" ) -// Facade represents a facade for grouping the functionality needed for node, transaction and address -type Facade interface { - //StartNode starts the underlying node - StartNode() error - - //StopNode stops the underlying node - StopNode() error - - //StartBackgroundServices starts all background services needed for the correct functionality of the node - StartBackgroundServices(wg *sync.WaitGroup) - - //SetLogger sets the current logger - SetLogger(logger *logger.Logger) - - //IsNodeRunning gets if the underlying node is running - IsNodeRunning() bool - - //GetBalance gets the current balance for a specified address - GetBalance(address string) (*big.Int, error) - - //GenerateTransaction generates a transaction from a sender, receiver, value and data - GenerateTransaction(sender string, receiver string, value big.Int, data string) (*transaction.Transaction, error) - - //SendTransaction will send a new transaction on the topic channel - SendTransaction(nonce uint64, sender string, receiver string, value big.Int, transactionData string, signature string) (*transaction.Transaction, error) - - //GetTransaction gets the transaction with a specified hash - GetTransaction(hash string) (*transaction.Transaction, error) -} - //NodeWrapper contains all functions that a node should contain. type NodeWrapper interface { - // Address returns the first address of the running node - Address() (string, error) - // Start will create a new messenger and and set up the Node state as running Start() error @@ -51,17 +17,11 @@ type NodeWrapper interface { Stop() error // P2PBootstrap starts the peer discovery process and peer connection filtering - P2PBootstrap() + P2PBootstrap() error //IsRunning returns if the underlying node is running IsRunning() bool - // ConnectToAddresses will take a slice of addresses and try to connect to all of them. - ConnectToAddresses(addresses []string) error - - // BindInterceptorsResolvers will start the interceptors and resolvers - BindInterceptorsResolvers() error - // StartConsensus will start the consesus service for the current node StartConsensus() error @@ -69,11 +29,22 @@ type NodeWrapper interface { GetBalance(address string) (*big.Int, error) //GenerateTransaction generates a new transaction with sender, receiver, amount and code - GenerateTransaction(sender string, receiver string, amount big.Int, code string) (*transaction.Transaction, error) + GenerateTransaction(senderHex string, receiverHex string, amount *big.Int, code string) (*transaction.Transaction, error) //SendTransaction will send a new transaction on the topic channel - SendTransaction(nonce uint64, sender string, receiver string, value big.Int, transactionData string, signature string) (*transaction.Transaction, error) + SendTransaction(nonce uint64, senderHex string, receiverHex string, value *big.Int, transactionData string, signature []byte) (*transaction.Transaction, error) //GetTransaction gets the transaction GetTransaction(hash string) (*transaction.Transaction, error) + + // GetCurrentPublicKey gets the current nodes public Key + GetCurrentPublicKey() string + + // GenerateAndSendBulkTransactions generates a number of nrTransactions of amount value + // for the receiver destination + GenerateAndSendBulkTransactions(string, *big.Int, uint64) error + + // GetAccount returns an accountResponse containing information + // about the account corelated with provided address + GetAccount(address string) (*state.Account, error) } diff --git a/cmd/facade/mock/nodeMock.go b/cmd/facade/mock/nodeMock.go index 1272f378543..36a9a344583 100644 --- a/cmd/facade/mock/nodeMock.go +++ b/cmd/facade/mock/nodeMock.go @@ -3,22 +3,26 @@ package mock import ( "math/big" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/process" ) type NodeMock struct { - AddressHandler func() (string, error) - StartHandler func() error - StopHandler func() error - P2PBootstrapHandler func() - IsRunningHandler func() bool - ConnectToAddressesHandler func([]string) error - BindInterceptorsResolversHandler func() error - StartConsensusHandler func() error - GetBalanceHandler func(address string) (*big.Int, error) - GenerateTransactionHandler func(sender string, receiver string, amount big.Int, code string) (*transaction.Transaction, error) - GetTransactionHandler func(hash string) (*transaction.Transaction, error) - SendTransactionHandler func(nonce uint64, sender string, receiver string, amount big.Int, code string, signature string) (*transaction.Transaction, error) + AddressHandler func() (string, error) + StartHandler func() error + StopHandler func() error + P2PBootstrapHandler func() error + IsRunningHandler func() bool + ConnectToAddressesHandler func([]string) error + StartConsensusHandler func() error + GetBalanceHandler func(address string) (*big.Int, error) + GenerateTransactionHandler func(sender string, receiver string, amount *big.Int, code string) (*transaction.Transaction, error) + GetTransactionHandler func(hash string) (*transaction.Transaction, error) + SendTransactionHandler func(nonce uint64, sender string, receiver string, amount *big.Int, code string, signature []byte) (*transaction.Transaction, error) + GetAccountHandler func(address string) (*state.Account, error) + GetCurrentPublicKeyHandler func() string + GenerateAndSendBulkTransactionsHandler func(destination string, value *big.Int, nrTransactions uint64) error } func (nm *NodeMock) Address() (string, error) { @@ -33,8 +37,8 @@ func (nm *NodeMock) Stop() error { return nm.StopHandler() } -func (nm *NodeMock) P2PBootstrap() { - nm.P2PBootstrapHandler() +func (nm *NodeMock) P2PBootstrap() error { + return nm.P2PBootstrapHandler() } func (nm *NodeMock) IsRunning() bool { @@ -45,11 +49,6 @@ func (nm *NodeMock) ConnectToAddresses(addresses []string) error { return nm.ConnectToAddressesHandler(addresses) } -// BindInterceptorsResolvers will start the interceptors and resolvers -func (nm *NodeMock) BindInterceptorsResolvers() error { - return nm.BindInterceptorsResolversHandler() -} - func (nm *NodeMock) StartConsensus() error { return nm.StartConsensusHandler() } @@ -58,7 +57,7 @@ func (nm *NodeMock) GetBalance(address string) (*big.Int, error) { return nm.GetBalanceHandler(address) } -func (nm *NodeMock) GenerateTransaction(sender string, receiver string, amount big.Int, code string) (*transaction.Transaction, error) { +func (nm *NodeMock) GenerateTransaction(sender string, receiver string, amount *big.Int, code string) (*transaction.Transaction, error) { return nm.GenerateTransactionHandler(sender, receiver, amount, code) } @@ -66,6 +65,26 @@ func (nm *NodeMock) GetTransaction(hash string) (*transaction.Transaction, error return nm.GetTransactionHandler(hash) } -func (nm *NodeMock) SendTransaction(nonce uint64, sender string, receiver string, value big.Int, transactionData string, signature string) (*transaction.Transaction, error) { +func (nm *NodeMock) SendTransaction(nonce uint64, sender string, receiver string, value *big.Int, transactionData string, signature []byte) (*transaction.Transaction, error) { return nm.SendTransactionHandler(nonce, sender, receiver, value, transactionData, signature) } + +func (nm *NodeMock) GetInterceptors() []process.Interceptor { + return nil +} + +func (nm *NodeMock) GetResolvers() []process.Resolver { + return nil +} + +func (nm *NodeMock) GetCurrentPublicKey() string { + return nm.GetCurrentPublicKeyHandler() +} + +func (nm *NodeMock) GenerateAndSendBulkTransactions(receiverHex string, value *big.Int, noOfTx uint64) error { + return nm.GenerateAndSendBulkTransactionsHandler(receiverHex, value, noOfTx) +} + +func (nm *NodeMock) GetAccount(address string) (*state.Account, error) { + return nm.GetAccountHandler(address) +} diff --git a/cmd/facade/mock/syncTimerMock.go b/cmd/facade/mock/syncTimerMock.go new file mode 100644 index 00000000000..0f02f887058 --- /dev/null +++ b/cmd/facade/mock/syncTimerMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "time" +) + +// SyncTimerMock is a mock implementation of SyncTimer interface +type SyncTimerMock struct { + StartSyncCalled func() + ClockOffsetCalled func() time.Duration + FormattedCurrentTimeCalled func(time.Duration) string + CurrentTimeCalled func(time.Duration) time.Time +} + +// StartSync is a mock implementation for StartSync +func (s *SyncTimerMock) StartSync() { + s.StartSyncCalled() +} + +// ClockOffset is a mock implementation for ClockOffset +func (s *SyncTimerMock) ClockOffset() time.Duration { + return s.ClockOffsetCalled() +} + +// FormattedCurrentTime is a mock implementation for FormattedCurrentTime +func (s *SyncTimerMock) FormattedCurrentTime(t time.Duration) string { + return s.FormattedCurrentTimeCalled(t) +} + +// CurrentTime is a mock implementation for CurrentTime +func (s *SyncTimerMock) CurrentTime(t time.Duration) time.Time { + return s.CurrentTimeCalled(t) +} diff --git a/cmd/flags/flags.go b/cmd/flags/flags.go index eae9796c159..48dc3ce26e5 100644 --- a/cmd/flags/flags.go +++ b/cmd/flags/flags.go @@ -13,7 +13,7 @@ var ( PrivateKey = cli.StringFlag{ Name: "private-key", Usage: "Private key that the node will load on startup and will sign transactions - temporary until we have a wallet that can do that", - Value: "unkVM1J1JvlNFqY3uo/CvAay6BsIL3IzDH9GDgmfUAA=", + Value: "b5671723b8c64b16b3d4f5a2db9a2e3b61426e87c945b5453279f0701a10c70f", } // WithUI defines a flag for choosing the option of starting with/without UI. If false, the node will start automatically WithUI = cli.BoolTFlag{ @@ -24,12 +24,12 @@ var ( Port = cli.IntFlag{ Name: "port", Usage: "Port number on which the application will start", - Value: 4001, + Value: 32000, } // MaxAllowedPeers defines a flag for setting the maximum number of connections allowed at once MaxAllowedPeers = cli.IntFlag{ Name: "max-allowed-peers", Usage: "Maximum connections the user is willing to accept", - Value: 4, + Value: 10, } ) diff --git a/cmd/keygenerator/main.go b/cmd/keygenerator/main.go index 1c1e2bde67a..e5969cbed8b 100644 --- a/cmd/keygenerator/main.go +++ b/cmd/keygenerator/main.go @@ -1,7 +1,7 @@ package main import ( - "encoding/base64" + "encoding/hex" "fmt" "os" "path/filepath" @@ -44,17 +44,14 @@ func main() { fmt.Println("Cound not convert pk to byte array") } - base64sk := make([]byte, base64.StdEncoding.EncodedLen(len(skBytes))) - base64.StdEncoding.Encode(base64sk, []byte(skBytes)) + skHex := []byte(hex.EncodeToString(skBytes)) + pkHex := []byte(hex.EncodeToString(pkBytes)) - base64pk := make([]byte, base64.StdEncoding.EncodedLen(len(pkBytes))) - base64.StdEncoding.Encode(base64pk, []byte(pkBytes)) - - if _, err3 := fsk.Write(append(base64sk, '\n')); err3 != nil { + if _, err3 := fsk.Write(append(skHex, '\n')); err3 != nil { fmt.Println(err3) } - if _, err3 := fpk.Write(append(base64pk, '\n')); err3 != nil { + if _, err3 := fpk.Write(append(pkHex, '\n')); err3 != nil { fmt.Println(err3) } } diff --git a/consensus/interface.go b/consensus/interface.go index 0bdc84e2194..8dbb98d8cbc 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -6,7 +6,7 @@ import ( // Validator defines what a consensus validator implementation should do. type Validator interface { - Stake() big.Int + Stake() *big.Int Rating() int32 PubKey() []byte } diff --git a/consensus/mock/validatorMock.go b/consensus/mock/validatorMock.go index 8ea0369d04b..cd22a51ce85 100644 --- a/consensus/mock/validatorMock.go +++ b/consensus/mock/validatorMock.go @@ -5,16 +5,16 @@ import ( ) type ValidatorMock struct { - stake big.Int + stake *big.Int rating int32 pubKey []byte } -func NewValidatorMock(stake big.Int, rating int32, pubKey []byte) *ValidatorMock { +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} } -func (vm *ValidatorMock) Stake() big.Int { +func (vm *ValidatorMock) Stake() *big.Int { return vm.stake } diff --git a/consensus/spos/consensus.go b/consensus/spos/consensus.go index 65cc617fc25..69e720c38ee 100644 --- a/consensus/spos/consensus.go +++ b/consensus/spos/consensus.go @@ -122,7 +122,12 @@ func NewConsensus( return &cns } -// IsNodeLeaderInCurrentRound method checks if the node is leader in the current round +// IsSelfLeaderInCurrentRound method checks if the current node is leader in the current round +func (cns *Consensus) IsSelfLeaderInCurrentRound() bool { + return cns.IsNodeLeaderInCurrentRound(cns.selfPubKey) +} + +// IsNodeLeaderInCurrentRound method checks if the given node is leader in the current round func (cns *Consensus) IsNodeLeaderInCurrentRound(node string) bool { leader, err := cns.GetLeader() diff --git a/consensus/spos/consensus_test.go b/consensus/spos/consensus_test.go index 37bb3c67687..447bc54d888 100644 --- a/consensus/spos/consensus_test.go +++ b/consensus/spos/consensus_test.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/chronology" "github.com/ElrondNetwork/elrond-go-sandbox/chronology/ntp" "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" + "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos/mock" "github.com/stretchr/testify/assert" ) @@ -330,3 +331,101 @@ func TestConsensus_Log(t *testing.T) { assert.NotNil(t, cns) } + +func TestSetSelfJobDone_ShouldWork(t *testing.T) { + rCns := spos.NewRoundConsensus( + []string{"1", "2", "3"}, + "2") + + rCns.SetSelfJobDone(spos.SrBlock, true) + + jobDone, _ := rCns.GetJobDone("2", spos.SrBlock) + assert.True(t, jobDone) +} + +func TestGetSelfJobDone_ShouldReturnFalse(t *testing.T) { + rCns := spos.NewRoundConsensus( + []string{"1", "2", "3"}, + "2") + + for i := 0; i < len(rCns.ConsensusGroup()); i++ { + if rCns.ConsensusGroup()[i] == rCns.SelfPubKey() { + continue + } + + rCns.SetJobDone(rCns.ConsensusGroup()[i], spos.SrBlock, true) + } + + jobDone, _ := rCns.GetSelfJobDone(spos.SrBlock) + assert.False(t, jobDone) +} + +func TestGetSelfJobDone_ShouldReturnTrue(t *testing.T) { + rCns := spos.NewRoundConsensus( + []string{"1", "2", "3"}, + "2") + + rCns.SetJobDone("2", spos.SrBlock, true) + + jobDone, _ := rCns.GetSelfJobDone(spos.SrBlock) + assert.True(t, jobDone) +} + +func TestIsSelfLeaderInCurrentRound_ShouldReturnFalse(t *testing.T) { + rCns := spos.NewRoundConsensus( + []string{"1", "2", "3"}, + "2") + + genesisTime := time.Now() + currentTime := genesisTime + + rnd := chronology.NewRound(genesisTime, + currentTime, + RoundTimeDuration) + + chr := chronology.NewChronology( + true, + rnd, + genesisTime, + &mock.SyncTimerMock{}) + + cns := spos.NewConsensus( + nil, + rCns, + nil, + nil, + chr, + ) + + assert.False(t, cns.IsSelfLeaderInCurrentRound()) +} + +func TestIsSelfLeaderInCurrentRound_ShouldReturnTrue(t *testing.T) { + rCns := spos.NewRoundConsensus( + []string{"1", "2", "3"}, + "2") + + genesisTime := time.Now() + currentTime := genesisTime + + rnd := chronology.NewRound(genesisTime, + currentTime, + RoundTimeDuration) + + chr := chronology.NewChronology( + true, + rnd, + genesisTime, + &mock.SyncTimerMock{}) + + cns := spos.NewConsensus( + nil, + rCns, + nil, + nil, + chr, + ) + + rnd.UpdateRound(genesisTime, genesisTime.Add(RoundTimeDuration)) + assert.True(t, cns.IsSelfLeaderInCurrentRound()) +} diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index 0b4f44cd3f3..daf9b011537 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -34,6 +34,9 @@ var ErrNilConsensusData = errors.New("consensus data is nil") // ErrNilSignature is raised when a valid signature was expected but nil was used var ErrNilSignature = errors.New("signature is nil") +// ErrNilCommitment is raised when a valid commitment was expected but nil was used +var ErrNilCommitment = errors.New("commitment is nil") + // ErrNilKeyGenerator is raised when a valid key generator is expected but nil was used var ErrNilKeyGenerator = errors.New("key generator is nil") @@ -67,8 +70,14 @@ var ErrNilMarshalizer = errors.New("marshalizer is nil") // ErrNilBlockProcessor is raised when a valid block processor is expected but nil used var ErrNilBlockProcessor = errors.New("block processor is nil") +// ErrNilBlootstrap is raised when a valid block processor is expected but nil used +var ErrNilBlootstrap = errors.New("boostrap is nil") + // ErrInvalidKey is raised when an invalid key is used with a map var ErrInvalidKey = errors.New("map key is invalid") // ErrNilRoundState is raised when a valid round state is expected but nil used var ErrNilRoundState = errors.New("round state is nil") + +// ErrCommitmentHashDoesNotMatch is raised when the commitment hash does not match expected value +var ErrCommitmentHashDoesNotMatch = errors.New("commitment hash does not match") diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go new file mode 100644 index 00000000000..700cda0b716 --- /dev/null +++ b/consensus/spos/export_test.go @@ -0,0 +1,51 @@ +package spos + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/chronology" +) + +func (sposWorker *SPOSConsensusWorker) CheckSignaturesValidity(bitmap []byte) error { + return sposWorker.checkSignaturesValidity(bitmap) +} + +func (sposWorker *SPOSConsensusWorker) GenCommitmentHash() ([]byte, error) { + return sposWorker.genCommitmentHash() +} + +func (sposWorker *SPOSConsensusWorker) GenBitmap(subround chronology.SubroundId) []byte { + return sposWorker.genBitmap(subround) +} + +func (sposWorker *SPOSConsensusWorker) CheckCommitmentsValidity(bitmap []byte) error { + return sposWorker.checkCommitmentsValidity(bitmap) +} + +func (sposWorker *SPOSConsensusWorker) ShouldDropConsensusMessage(cnsDta *ConsensusData) bool { + return sposWorker.shouldDropConsensusMessage(cnsDta) +} + +func (sposWorker *SPOSConsensusWorker) CheckSignature(cnsData *ConsensusData) error { + return sposWorker.checkSignature(cnsData) +} + +func (sposWorker *SPOSConsensusWorker) ProcessReceivedBlock(cnsDta *ConsensusData) bool { + return sposWorker.processReceivedBlock(cnsDta) +} + +func (sposWorker *SPOSConsensusWorker) HaveTime() time.Duration { + return sposWorker.haveTime() +} + +func (sposWorker *SPOSConsensusWorker) InitReceivedMessages() { + sposWorker.initReceivedMessages() +} + +func (sposWorker *SPOSConsensusWorker) CleanReceivedMessages() { + sposWorker.cleanReceivedMessages() +} + +func (sposWorker *SPOSConsensusWorker) ExecuteMessage(cnsDtaList []*ConsensusData) { + sposWorker.executeMessage(cnsDtaList) +} diff --git a/consensus/spos/mock/blockProcessorMock.go b/consensus/spos/mock/blockProcessorMock.go index 327922f81cf..2b908b18791 100644 --- a/consensus/spos/mock/blockProcessorMock.go +++ b/consensus/spos/mock/blockProcessorMock.go @@ -2,6 +2,7 @@ package mock import ( "math/big" + "time" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" @@ -9,24 +10,30 @@ import ( // BlockProcessorMock mocks the implementation for a BlockProcessor type BlockProcessorMock struct { - ProcessBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error - ProcessAndCommitCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error - CommitBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, block *block.TxBlockBody) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]big.Int, shardId uint32) *block.StateBlockBody - CreateTxBlockCalled func(shardId uint32, maxTxInBlock int, round int32, haveTime func() bool) (*block.TxBlockBody, error) - RemoveBlockTxsFromPoolCalled func(body *block.TxBlockBody) error - GetRootHashCalled func() []byte + ProcessBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error + ProcessAndCommitCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, block *block.TxBlockBody) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int, shardId uint32) (*block.StateBlockBody, error) + CreateTxBlockCalled func(shardId uint32, maxTxInBlock int, round int32, haveTime func() bool) (*block.TxBlockBody, error) + CreateEmptyBlockBodyCalled func(shardId uint32, round int32) *block.TxBlockBody + RemoveBlockTxsFromPoolCalled func(body *block.TxBlockBody) error + GetRootHashCalled func() []byte + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) +} + +func (blProcMock *BlockProcessorMock) SetOnRequestTransaction(f func(destShardID uint32, txHash []byte)) { + blProcMock.SetOnRequestTransactionCalled(f) } // ProcessBlock mocks pocessing a block -func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { - return blProcMock.ProcessBlockCalled(blockChain, header, body) +func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { + return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } // ProcessAndCommit mocks processesing and committing a block -func (blProcMock *BlockProcessorMock) ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { - return blProcMock.ProcessAndCommitCalled(blockChain, header, body) +func (blProcMock *BlockProcessorMock) ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { + return blProcMock.ProcessAndCommitCalled(blockChain, header, body, haveTime) } // CommitBlock mocks the commit of a block @@ -40,8 +47,8 @@ func (blProcMock *BlockProcessorMock) RevertAccountState() { } // CreateGenesisBlockBody mocks the creation of a genesis block body -func (blProcMock *BlockProcessorMock) CreateGenesisBlockBody(balances map[string]big.Int, shardId uint32) *block.StateBlockBody { - panic("implement me") +func (blProcMock *BlockProcessorMock) CreateGenesisBlockBody(balances map[string]*big.Int, shardId uint32) (*block.StateBlockBody, error) { + return blProcMock.CreateGenesisBlockCalled(balances, shardId) } // CreateTxBlockBody mocks the creation of a transaction block body @@ -49,6 +56,11 @@ func (blProcMock *BlockProcessorMock) CreateTxBlockBody(shardId uint32, maxTxInB return blProcMock.CreateTxBlockCalled(shardId, maxTxInBlock, round, haveTime) } +// CreateEmptyBlockBody mocks the creation of an empty block body +func (blProcMock *BlockProcessorMock) CreateEmptyBlockBody(shardId uint32, round int32) *block.TxBlockBody { + return blProcMock.CreateEmptyBlockBodyCalled(shardId, round) +} + // RemoveBlockTxsFromPool mocks the removal of block transactions from transaction pools func (blProcMock *BlockProcessorMock) RemoveBlockTxsFromPool(body *block.TxBlockBody) error { // pretend we removed the data diff --git a/consensus/spos/mock/bootstrapMock.go b/consensus/spos/mock/bootstrapMock.go new file mode 100644 index 00000000000..82e4c45760b --- /dev/null +++ b/consensus/spos/mock/bootstrapMock.go @@ -0,0 +1,9 @@ +package mock + +type BootstrapMock struct { + ShouldSyncCalled func() bool +} + +func (boot *BootstrapMock) ShouldSync() bool { + return boot.ShouldSyncCalled() +} diff --git a/consensus/spos/mock/multiSigMock.go b/consensus/spos/mock/multiSigMock.go index 6787b9aa6eb..874de5156dd 100644 --- a/consensus/spos/mock/multiSigMock.go +++ b/consensus/spos/mock/multiSigMock.go @@ -23,9 +23,13 @@ type BelNevMock struct { CommitmentHashMock func(index uint16) ([]byte, error) CreateCommitmentMock func() ([]byte, []byte, error) AggregateCommitmentsMock func(bitmap []byte) ([]byte, error) - SignPartialMock func(bitmap []byte) ([]byte, error) - VerifyPartialMock func(index uint16, sig []byte, bitmap []byte) error + CreateSignatureShareMock func(bitmap []byte) ([]byte, error) + VerifySignatureShareMock func(index uint16, sig []byte, bitmap []byte) error AggregateSigsMock func(bitmap []byte) ([]byte, error) + AddCommitmentMock func(index uint16, value []byte) error + SetCommitmentSecretMock func([]byte) error + AddCommitmentHashMock func(uint16, []byte) error + CommitmentMock func(uint16) ([]byte, error) } func NewMultiSigner() *BelNevMock { @@ -76,41 +80,61 @@ func (bnm *BelNevMock) CreateCommitment() (commSecret []byte, commitment []byte, // SetCommitmentSecret sets the committment secret func (bnm *BelNevMock) SetCommitmentSecret(commSecret []byte) error { - bnm.commSecret = commSecret + if bnm.SetCommitmentSecretMock == nil { + bnm.commSecret = commSecret - return nil + return nil + } else { + return bnm.SetCommitmentSecretMock(commSecret) + } } // AddCommitmentHash adds a commitment hash to the list on the specified position func (bnm *BelNevMock) AddCommitmentHash(index uint16, commHash []byte) error { - bnm.commHash = commHash + if bnm.AddCommitmentHashMock == nil { + bnm.commHash = commHash - return nil + return nil + } else { + return bnm.AddCommitmentHashMock(index, commHash) + } } // CommitmentHash returns the commitment hash from the list on the specified position func (bnm *BelNevMock) CommitmentHash(index uint16) ([]byte, error) { - return bnm.commHash, nil + if bnm.CommitmentHashMock == nil { + return bnm.commHash, nil + } else { + return bnm.CommitmentHashMock(index) + } } // AddCommitment adds a commitment to the list on the specified position func (bnm *BelNevMock) AddCommitment(index uint16, value []byte) error { - if index >= uint16(len(bnm.commitments)) { - return crypto.ErrInvalidIndex - } + if bnm.AddCommitmentMock == nil { + if index >= uint16(len(bnm.commitments)) { + return crypto.ErrInvalidIndex + } - bnm.commitments[index] = value + bnm.commitments[index] = value - return nil + return nil + } else { + return bnm.AddCommitmentMock(index, value) + } } // Commitment returns the commitment from the list with the specified position func (bnm *BelNevMock) Commitment(index uint16) ([]byte, error) { - if index >= uint16(len(bnm.commitments)) { - return nil, crypto.ErrInvalidIndex + if bnm.CommitmentMock == nil { + if index >= uint16(len(bnm.commitments)) { + return nil, crypto.ErrInvalidIndex + } + + return bnm.commitments[index], nil + } else { + return bnm.CommitmentMock(index) } - - return bnm.commitments[index], nil } // AggregateCommitments aggregates the list of commitments @@ -125,13 +149,13 @@ func (bnm *BelNevMock) SetAggCommitment(aggCommitment []byte) error { return nil } -// SignPartial creates a partial signature -func (bnm *BelNevMock) SignPartial(bitmap []byte) ([]byte, error) { - return bnm.SignPartialMock(bitmap) +// CreateSignatureShare creates a partial signature +func (bnm *BelNevMock) CreateSignatureShare(bitmap []byte) ([]byte, error) { + return bnm.CreateSignatureShareMock(bitmap) } -// AddSignPartial adds the partial signature of the signer with specified position -func (bnm *BelNevMock) AddSignPartial(index uint16, sig []byte) error { +// AddSignatureShare adds the partial signature of the signer with specified position +func (bnm *BelNevMock) AddSignatureShare(index uint16, sig []byte) error { if index >= uint16(len(bnm.pubkeys)) { return crypto.ErrInvalidIndex } @@ -140,12 +164,21 @@ func (bnm *BelNevMock) AddSignPartial(index uint16, sig []byte) error { return nil } -// VerifyPartial verifies the partial signature of the signer with specified position -func (bnm *BelNevMock) VerifyPartial(index uint16, sig []byte, bitmap []byte) error { - return bnm.VerifyPartialMock(index, sig, bitmap) +// VerifySignatureShare verifies the partial signature of the signer with specified position +func (bnm *BelNevMock) VerifySignatureShare(index uint16, sig []byte, bitmap []byte) error { + return bnm.VerifySignatureShareMock(index, sig, bitmap) } // AggregateSigs aggregates all collected partial signatures func (bnm *BelNevMock) AggregateSigs(bitmap []byte) ([]byte, error) { return bnm.AggregateSigsMock(bitmap) } + +// SignatureShare +func (bnm *BelNevMock) SignatureShare(index uint16) ([]byte, error) { + if index >= uint16(len(bnm.sigs)) { + return nil, crypto.ErrInvalidIndex + } + + return bnm.sigs[index], nil +} diff --git a/consensus/spos/mock/syncTimeMock.go b/consensus/spos/mock/syncTimeMock.go new file mode 100644 index 00000000000..013eb3d0daa --- /dev/null +++ b/consensus/spos/mock/syncTimeMock.go @@ -0,0 +1,24 @@ +package mock + +import ( + "time" +) + +type SyncTimerMock struct { +} + +func (stm *SyncTimerMock) StartSync() { + panic("implement me") +} + +func (stm *SyncTimerMock) ClockOffset() time.Duration { + return time.Duration(0) +} + +func (stm *SyncTimerMock) FormattedCurrentTime(time.Duration) string { + panic("implement me") +} + +func (stm *SyncTimerMock) CurrentTime(time.Duration) time.Time { + panic("implement me") +} diff --git a/consensus/spos/roundConsensus.go b/consensus/spos/roundConsensus.go index d57288631e1..730523e804b 100644 --- a/consensus/spos/roundConsensus.go +++ b/consensus/spos/roundConsensus.go @@ -101,6 +101,11 @@ func (rCns *RoundConsensus) GetJobDone(key string, subroundId chronology.Subroun return retcode, nil } +// GetSelfJobDone returns the self state of the action done in subround given by the subroundId parameter +func (rCns *RoundConsensus) GetSelfJobDone(subroundId chronology.SubroundId) (bool, error) { + return rCns.GetJobDone(rCns.selfPubKey, subroundId) +} + // SetJobDone set the state of the action done, by the node represented by the key parameter, // in subround given by the subroundId parameter func (rCns *RoundConsensus) SetJobDone(key string, subroundId chronology.SubroundId, value bool) error { @@ -119,6 +124,11 @@ func (rCns *RoundConsensus) SetJobDone(key string, subroundId chronology.Subroun return nil } +// SetSelfJobDone set the self state of the action done in subround given by the subroundId parameter +func (rCns *RoundConsensus) SetSelfJobDone(subroundId chronology.SubroundId, value bool) error { + return rCns.SetJobDone(rCns.selfPubKey, subroundId, value) +} + // ResetRoundState method resets the state of each node from the current jobDone group, regarding to the // consensus validatorRoundStates func (rCns *RoundConsensus) ResetRoundState() { @@ -149,6 +159,11 @@ func (rCns *RoundConsensus) IsValidatorInBitmap(validator string) bool { return isJobDone } +// IsSelfInBitmap method checks if the current node is part of the bitmap received from leader +func (rCns *RoundConsensus) IsSelfInBitmap() bool { + return rCns.IsValidatorInBitmap(rCns.selfPubKey) +} + // IsNodeInConsensusGroup method checks if the node is part of the jobDone group of the current round func (rCns *RoundConsensus) IsNodeInConsensusGroup(node string) bool { for i := 0; i < len(rCns.consensusGroup); i++ { diff --git a/consensus/spos/roundConsensus_test.go b/consensus/spos/roundConsensus_test.go index 519b07bb734..c6a4ec68f85 100644 --- a/consensus/spos/roundConsensus_test.go +++ b/consensus/spos/roundConsensus_test.go @@ -363,3 +363,28 @@ func TestRoundConsensus_SetJobDoneShouldNotBeSetWhenValidatorIsNotInTheConsensus isJobDone, _ := rndc.GetJobDone("4", spos.SrBlock) assert.False(t, isJobDone) } + +func TestIsSelfInBitmapGroup_ShoudReturnFalse(t *testing.T) { + rCns := spos.NewRoundConsensus( + []string{"1", "2", "3"}, + "2") + + for i := 0; i < len(rCns.ConsensusGroup()); i++ { + if rCns.ConsensusGroup()[i] == rCns.SelfPubKey() { + continue + } + + rCns.SetJobDone(rCns.ConsensusGroup()[i], spos.SrBitmap, true) + } + + assert.False(t, rCns.IsSelfInBitmap()) +} + +func TestIsSelfInBitmapGroup_ShoudReturnTrue(t *testing.T) { + rCns := spos.NewRoundConsensus( + []string{"1", "2", "3"}, + "2") + + rCns.SetJobDone(rCns.SelfPubKey(), spos.SrBitmap, true) + assert.True(t, rCns.IsSelfInBitmap()) +} diff --git a/consensus/spos/sposConsensusMessenger.go b/consensus/spos/sposConsensusMessenger.go new file mode 100644 index 00000000000..35c4ab5c5c9 --- /dev/null +++ b/consensus/spos/sposConsensusMessenger.go @@ -0,0 +1,131 @@ +package spos + +func (sposWorker *SPOSConsensusWorker) initReceivedMessages() { + sposWorker.mutReceivedMessages.Lock() + + sposWorker.ReceivedMessages = make(map[MessageType][]*ConsensusData) + + sposWorker.ReceivedMessages[MtBlockBody] = make([]*ConsensusData, 0) + sposWorker.ReceivedMessages[MtBlockHeader] = make([]*ConsensusData, 0) + sposWorker.ReceivedMessages[MtCommitmentHash] = make([]*ConsensusData, 0) + sposWorker.ReceivedMessages[MtBitmap] = make([]*ConsensusData, 0) + sposWorker.ReceivedMessages[MtCommitment] = make([]*ConsensusData, 0) + sposWorker.ReceivedMessages[MtSignature] = make([]*ConsensusData, 0) + + sposWorker.mutReceivedMessages.Unlock() +} + +func (sposWorker *SPOSConsensusWorker) cleanReceivedMessages() { + sposWorker.mutReceivedMessages.Lock() + + for i := MtBlockBody; i <= MtSignature; i++ { + cnsDataList := sposWorker.ReceivedMessages[i] + + if len(cnsDataList) == 0 { + continue + } + + cleanedCnsDtaList := sposWorker.getCleanedList(cnsDataList) + sposWorker.ReceivedMessages[i] = cleanedCnsDtaList + } + + sposWorker.mutReceivedMessages.Unlock() +} + +func (sposWorker *SPOSConsensusWorker) executeReceivedMessages(cnsDta *ConsensusData) { + sposWorker.mutReceivedMessages.Lock() + + cnsDataList := sposWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + sposWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + for i := MtBlockBody; i <= MtSignature; i++ { + cnsDataList = sposWorker.ReceivedMessages[i] + + if len(cnsDataList) == 0 { + continue + } + + sposWorker.executeMessage(cnsDataList) + cleanedCnsDtaList := sposWorker.getCleanedList(cnsDataList) + sposWorker.ReceivedMessages[i] = cleanedCnsDtaList + } + + sposWorker.mutReceivedMessages.Unlock() +} + +func (sposWorker *SPOSConsensusWorker) executeMessage(cnsDtaList []*ConsensusData) { + for i, cnsDta := range cnsDtaList { + if cnsDta == nil { + continue + } + + if sposWorker.boot.ShouldSync() { + continue + } + + if sposWorker.shouldDropConsensusMessage(cnsDta) { + continue + } + + switch cnsDta.MsgType { + case MtBlockBody: + if sposWorker.Cns.Status(SrStartRound) != SsFinished { + continue + } + case MtBlockHeader: + if sposWorker.Cns.Status(SrStartRound) != SsFinished { + continue + } + case MtCommitmentHash: + if sposWorker.Cns.Status(SrBlock) != SsFinished { + continue + } + case MtBitmap: + if sposWorker.Cns.Status(SrBlock) != SsFinished { + continue + } + case MtCommitment: + if sposWorker.Cns.Status(SrBitmap) != SsFinished { + continue + } + case MtSignature: + if sposWorker.Cns.Status(SrBitmap) != SsFinished { + continue + } + } + + cnsDtaList[i] = nil + + if ch, ok := sposWorker.MessageChannels[cnsDta.MsgType]; ok { + ch <- cnsDta + } + } +} + +func (sposWorker *SPOSConsensusWorker) getCleanedList(cnsDataList []*ConsensusData) []*ConsensusData { + cleanedCnsDataList := make([]*ConsensusData, 0) + + for i := 0; i < len(cnsDataList); i++ { + if cnsDataList[i] == nil { + continue + } + + if sposWorker.shouldDropConsensusMessage(cnsDataList[i]) { + continue + } + + cleanedCnsDataList = append(cleanedCnsDataList, cnsDataList[i]) + } + + return cleanedCnsDataList +} + +func (sposWorker *SPOSConsensusWorker) checkReceivedMessageChannel() { + for { + select { + case cnsDta := <-sposWorker.ReceivedMessageChannel: + sposWorker.executeReceivedMessages(cnsDta) + } + } +} diff --git a/consensus/spos/sposConsensusMessenger_test.go b/consensus/spos/sposConsensusMessenger_test.go new file mode 100644 index 00000000000..c0d39a57f68 --- /dev/null +++ b/consensus/spos/sposConsensusMessenger_test.go @@ -0,0 +1,717 @@ +package spos_test + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" + "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" + "github.com/stretchr/testify/assert" +) + +func TestInitReceivedMessages_ShouldInitMap(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.ReceivedMessages = nil + + cnWorker.InitReceivedMessages() + + assert.NotNil(t, cnWorker.ReceivedMessages[spos.MtBlockBody]) +} + +func TestCleanReceivedMessages_ShouldCleanList(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + -1, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.CleanReceivedMessages() + + assert.Equal(t, 0, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestExecuteMessages_ShouldNotExecuteWhenConsensusDataIsNil(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, nil) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.Nil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteMessages_ShouldNotExecuteWhenShouldDropConsensusMessage(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + -1, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteMessages_ShouldNotExecuteWhenShouldSync(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return true + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteBlockBodyMessages_ShouldNotExecuteWhenStartRoundIsNotFinished(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteBlockHeaderMessages_ShouldNotExecuteWhenStartRoundIsNotFinished(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockHeader, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteCommitmentHashMessages_ShouldNotExecuteWhenBlockIsNotFinished(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtCommitmentHash, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteBitmapMessages_ShouldNotExecuteWhenBlockIsNotFinished(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBitmap, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteCommitmentMessages_ShouldNotExecuteWhenBitmapIsNotFinished(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtCommitment, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteSignatureMessages_ShouldNotExecuteWhenBitmapIsNotFinished(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtSignature, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.ExecuteMessage(cnsDataList) + + assert.NotNil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} + +func TestExecuteMessages_ShouldExecute(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnWorker.InitReceivedMessages() + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnsDataList := cnWorker.ReceivedMessages[cnsDta.MsgType] + cnsDataList = append(cnsDataList, cnsDta) + cnWorker.ReceivedMessages[cnsDta.MsgType] = cnsDataList + + cnWorker.Cns.SetStatus(spos.SrStartRound, spos.SsFinished) + + cnWorker.ExecuteMessage(cnsDataList) + + assert.Nil(t, cnWorker.ReceivedMessages[cnsDta.MsgType][0]) +} diff --git a/consensus/spos/sposConsensusWorker.go b/consensus/spos/sposConsensusWorker.go index 4507e486e0a..98d6f13d2c1 100644 --- a/consensus/spos/sposConsensusWorker.go +++ b/consensus/spos/sposConsensusWorker.go @@ -3,7 +3,10 @@ package spos import ( "bytes" "encoding/base64" + "encoding/hex" "fmt" + "sync" + "time" "github.com/ElrondNetwork/elrond-go-sandbox/chronology" "github.com/ElrondNetwork/elrond-go-sandbox/crypto" @@ -41,7 +44,14 @@ const ( const shardId = 0 //TODO: maximum transactions in one block (this should be injected, and this const should be removed later) -const maxTransactionsInBlock = 1000 +const maxTransactionsInBlock = 15000 + +// consensusSubrounds specifies how many subrounds of consensus are in this implementation +const consensusSubrounds = 6 + +// maxBlockProcessingTimePercent specifies which is the max allocated time percent, +// for processing block, from the total time of one round +const maxBlockProcessingTimePercent = float64(0.85) // MessageType specifies what type of message was received type MessageType int @@ -63,7 +73,7 @@ const ( MtSignature ) -// ConsensusData defines the data needed by spos to comunicate between nodes over network in all subrounds +// ConsensusData defines the data needed by spos to communicate between nodes over network in all subrounds type ConsensusData struct { BlockHeaderHash []byte SubRoundData []byte @@ -107,26 +117,27 @@ func (cd *ConsensusData) ID() string { return id } -// SPOSConsensusWorker defines the data needed by spos to comunicate between nodes which are in the validators group +// SPOSConsensusWorker defines the data needed by spos to communicate between nodes which are in the validators group type SPOSConsensusWorker struct { - Cns *Consensus - Header *block.Header - BlockBody *block.TxBlockBody - BlockChain *blockchain.BlockChain - Rounds int // only for statistic - RoundsWithBlock int // only for statistic - BlockProcessor process.BlockProcessor - MessageChannels map[MessageType]chan *ConsensusData - hasher hashing.Hasher - marshalizer marshal.Marshalizer - keyGen crypto.KeyGenerator - privKey crypto.PrivateKey - pubKey crypto.PublicKey - multiSigner crypto.MultiSigner - // this is a pointer to a function which actually send the message from a node to the network - SendMessage func(consensus *ConsensusData) - BroadcastHeader func([]byte) - BroadcastBlockBody func([]byte) + Cns *Consensus + Header *block.Header + BlockBody *block.TxBlockBody + BlockChain *blockchain.BlockChain + BlockProcessor process.BlockProcessor + boot process.Bootstraper + MessageChannels map[MessageType]chan *ConsensusData + ReceivedMessageChannel chan *ConsensusData + hasher hashing.Hasher + marshalizer marshal.Marshalizer + keyGen crypto.KeyGenerator + privKey crypto.PrivateKey + pubKey crypto.PublicKey + multiSigner crypto.MultiSigner + SendMessage func(consensus *ConsensusData) + BroadcastHeader func([]byte) + BroadcastBlockBody func([]byte) + ReceivedMessages map[MessageType][]*ConsensusData + mutReceivedMessages sync.RWMutex } // NewConsensusWorker creates a new SPOSConsensusWorker object @@ -136,6 +147,7 @@ func NewConsensusWorker( hasher hashing.Hasher, marshalizer marshal.Marshalizer, blockProcessor process.BlockProcessor, + boot process.Bootstraper, multisig crypto.MultiSigner, keyGen crypto.KeyGenerator, privKey crypto.PrivateKey, @@ -148,6 +160,7 @@ func NewConsensusWorker( hasher, marshalizer, blockProcessor, + boot, multisig, keyGen, privKey, @@ -164,6 +177,7 @@ func NewConsensusWorker( hasher: hasher, marshalizer: marshalizer, BlockProcessor: blockProcessor, + boot: boot, multiSigner: multisig, keyGen: keyGen, privKey: privKey, @@ -179,14 +193,19 @@ func NewConsensusWorker( nodes = len(cns.RoundConsensus.ConsensusGroup()) } - sposWorker.MessageChannels[MtBlockBody] = make(chan *ConsensusData, nodes) - sposWorker.MessageChannels[MtBlockHeader] = make(chan *ConsensusData, nodes) - sposWorker.MessageChannels[MtCommitmentHash] = make(chan *ConsensusData, nodes) - sposWorker.MessageChannels[MtBitmap] = make(chan *ConsensusData, nodes) - sposWorker.MessageChannels[MtCommitment] = make(chan *ConsensusData, nodes) - sposWorker.MessageChannels[MtSignature] = make(chan *ConsensusData, nodes) + sposWorker.MessageChannels[MtBlockBody] = make(chan *ConsensusData) + sposWorker.MessageChannels[MtBlockHeader] = make(chan *ConsensusData) + sposWorker.MessageChannels[MtCommitmentHash] = make(chan *ConsensusData) + sposWorker.MessageChannels[MtBitmap] = make(chan *ConsensusData) + sposWorker.MessageChannels[MtCommitment] = make(chan *ConsensusData) + sposWorker.MessageChannels[MtSignature] = make(chan *ConsensusData) + + sposWorker.ReceivedMessageChannel = make(chan *ConsensusData, nodes*consensusSubrounds) - go sposWorker.CheckChannels() + sposWorker.initReceivedMessages() + + go sposWorker.checkReceivedMessageChannel() + go sposWorker.checkChannels() return &sposWorker, nil } @@ -197,6 +216,7 @@ func checkNewConsensusWorkerParams( hasher hashing.Hasher, marshalizer marshal.Marshalizer, blockProcessor process.BlockProcessor, + boot process.Bootstraper, multisig crypto.MultiSigner, keyGen crypto.KeyGenerator, privKey crypto.PrivateKey, @@ -222,6 +242,10 @@ func checkNewConsensusWorkerParams( return ErrNilBlockProcessor } + if boot == nil { + return ErrNilBlootstrap + } + if multisig == nil { return ErrNilMultiSigner } @@ -241,7 +265,52 @@ func checkNewConsensusWorkerParams( return nil } -// DoStartRoundJob method is the function which actually do the job of the StartRound subround +func (sposWorker *SPOSConsensusWorker) checkSignaturesValidity(bitmap []byte) error { + nbBitsBitmap := len(bitmap) * 8 + consensusGroup := sposWorker.Cns.ConsensusGroup() + consensusGroupSize := len(consensusGroup) + size := consensusGroupSize + + if consensusGroupSize > nbBitsBitmap { + size = nbBitsBitmap + } + + for i := 0; i < size; i++ { + indexRequired := (bitmap[i/8] & (1 << uint16(i%8))) > 0 + + if !indexRequired { + continue + } + + pubKey := consensusGroup[i] + isSigJobDone, err := sposWorker.Cns.GetJobDone(pubKey, SrSignature) + + if err != nil { + return err + } + + if !isSigJobDone { + return ErrNilSignature + } + + signature, err := sposWorker.multiSigner.SignatureShare(uint16(i)) + + if err != nil { + return err + } + + // verify partial signature + err = sposWorker.multiSigner.VerifySignatureShare(uint16(i), signature, bitmap) + + if err != nil { + return err + } + } + + return nil +} + +// DoStartRoundJob method is the function which actually does the job of the StartRound subround // (it is used as the handler function of the doSubroundJob pointer variable function in Subround struct, // from spos package) func (sposWorker *SPOSConsensusWorker) DoStartRoundJob() bool { @@ -250,6 +319,7 @@ func (sposWorker *SPOSConsensusWorker) DoStartRoundJob() bool { sposWorker.Cns.Data = nil sposWorker.Cns.ResetRoundStatus() sposWorker.Cns.ResetRoundState() + sposWorker.cleanReceivedMessages() leader, err := sposWorker.Cns.GetLeader() @@ -264,23 +334,18 @@ func (sposWorker *SPOSConsensusWorker) DoStartRoundJob() bool { } log.Info(fmt.Sprintf("%sStep 0: Preparing for this round with leader %s%s\n", - sposWorker.Cns.getFormattedTime(), getPrettyByteArray([]byte(leader)), msg)) - - // TODO: Unccomment ShouldSync check - //if sposWorker.ShouldSync() { // if node is not synchronized yet, it has to continue the bootstrapping mechanism - // log.Info(fmt.Sprintf("%sCanceled round %d in subround %s, not synchronized", - // sposWorker.Cns.getFormattedTime(), sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBlock))) - // sposWorker.Cns.Chr.SetSelfSubround(-1) - // return false - //} + sposWorker.Cns.getFormattedTime(), hex.EncodeToString([]byte(leader)), msg)) pubKeys := sposWorker.Cns.ConsensusGroup() selfIndex, err := sposWorker.Cns.IndexSelfConsensusGroup() if err != nil { - log.Error(err.Error()) + log.Info(fmt.Sprintf("%sCanceled round %d in subround %s, NOT IN THE CONSENSUS GROUP\n", + sposWorker.Cns.getFormattedTime(), sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrStartRound))) + sposWorker.Cns.Chr.SetSelfSubround(-1) + return false } @@ -288,14 +353,18 @@ func (sposWorker *SPOSConsensusWorker) DoStartRoundJob() bool { if err != nil { log.Error(err.Error()) + sposWorker.Cns.Chr.SetSelfSubround(-1) + return false } + sposWorker.Cns.SetStatus(SrStartRound, SsFinished) + return true } -// DoEndRoundJob method is the function which actually do the job of the EndRound subround +// DoEndRoundJob method is the function which actually does the job of the EndRound subround // (it is used as the handler function of the doSubroundJob pointer variable function in Subround struct, // from spos package) func (sposWorker *SPOSConsensusWorker) DoEndRoundJob() bool { @@ -305,6 +374,13 @@ func (sposWorker *SPOSConsensusWorker) DoEndRoundJob() bool { bitmap := sposWorker.genBitmap(SrBitmap) + err := sposWorker.checkSignaturesValidity(bitmap) + + if err != nil { + log.Error(err.Error()) + return false + } + // Aggregate sig and add it to the block sig, err := sposWorker.multiSigner.AggregateSigs(bitmap) @@ -320,23 +396,34 @@ func (sposWorker *SPOSConsensusWorker) DoEndRoundJob() bool { if err != nil { log.Error(err.Error()) - sposWorker.BlockProcessor.RevertAccountState() return false } + sposWorker.Cns.SetStatus(SrEndRound, SsFinished) + + err = sposWorker.BlockProcessor.RemoveBlockTxsFromPool(sposWorker.BlockBody) + + if err != nil { + log.Error(err.Error()) + } + // broadcast block body - err = sposWorker.broadcastTxBlockBody() + err = sposWorker.broadcastTxBlockBody(sposWorker.BlockBody) + if err != nil { - log.Error(fmt.Sprintf("%s\n", err.Error())) + log.Error(err.Error()) } // broadcast header - err = sposWorker.broadcastHeader() + err = sposWorker.broadcastHeader(sposWorker.Header) + if err != nil { - log.Error(fmt.Sprintf("%s\n", err.Error())) + log.Error(err.Error()) } - if sposWorker.Cns.IsNodeLeaderInCurrentRound(sposWorker.Cns.SelfPubKey()) { + log.Info(fmt.Sprintf("%sStep 6: Commiting and broadcasting TxBlockBody and Header\n", sposWorker.Cns.getFormattedTime())) + + if sposWorker.Cns.IsSelfLeaderInCurrentRound() { log.Info(fmt.Sprintf("\n%s++++++++++++++++++++ ADDED PROPOSED BLOCK WITH NONCE %d IN BLOCKCHAIN ++++++++++++++++++++\n\n", sposWorker.Cns.getFormattedTime(), sposWorker.Header.Nonce)) } else { @@ -344,9 +431,6 @@ func (sposWorker *SPOSConsensusWorker) DoEndRoundJob() bool { sposWorker.Cns.getFormattedTime(), sposWorker.Header.Nonce)) } - sposWorker.Rounds++ // only for statistic - sposWorker.RoundsWithBlock++ // only for statistic - return true } @@ -354,7 +438,11 @@ func (sposWorker *SPOSConsensusWorker) DoEndRoundJob() bool { // (it is used as a handler function of the doSubroundJob pointer function declared in Subround struct, // from spos package) func (sposWorker *SPOSConsensusWorker) DoBlockJob() bool { - isBlockJobDone, err := sposWorker.Cns.GetJobDone(sposWorker.Cns.SelfPubKey(), SrBlock) + if sposWorker.boot.ShouldSync() { // if node is not synchronized yet, it has to continue the bootstrapping mechanism + return false + } + + isBlockJobDone, err := sposWorker.Cns.GetSelfJobDone(SrBlock) if err != nil { log.Error(err.Error()) @@ -363,7 +451,7 @@ func (sposWorker *SPOSConsensusWorker) DoBlockJob() bool { if sposWorker.Cns.Status(SrBlock) == SsFinished || // is subround Block already finished? isBlockJobDone || // has block already been sent? - !sposWorker.Cns.IsNodeLeaderInCurrentRound(sposWorker.Cns.SelfPubKey()) { // is another node leader in this round? + !sposWorker.Cns.IsSelfLeaderInCurrentRound() { // is another node leader in this round? return false } @@ -372,7 +460,7 @@ func (sposWorker *SPOSConsensusWorker) DoBlockJob() bool { return false } - err = sposWorker.Cns.SetJobDone(sposWorker.Cns.SelfPubKey(), SrBlock, true) + err = sposWorker.Cns.SetSelfJobDone(SrBlock, true) if err != nil { log.Error(err.Error()) @@ -386,13 +474,13 @@ func (sposWorker *SPOSConsensusWorker) DoBlockJob() bool { // SendBlockBody method send the proposed block body in the Block subround func (sposWorker *SPOSConsensusWorker) SendBlockBody() bool { - - currentSubRound := sposWorker.GetSubround() - + roundIndex := sposWorker.Cns.Chr.Round().Index() haveTime := func() bool { - if sposWorker.GetSubround() > currentSubRound { + if roundIndex < sposWorker.Cns.Chr.Round().Index() || + sposWorker.GetSubround() > chronology.SubroundId(SrBlock) { return false } + return true } @@ -416,7 +504,7 @@ func (sposWorker *SPOSConsensusWorker) SendBlockBody() bool { []byte(sposWorker.Cns.selfPubKey), nil, MtBlockBody, - sposWorker.GetTime(), + sposWorker.GetRoundTime(), sposWorker.Cns.Chr.Round().Index()) if !sposWorker.SendConsensusMessage(dta) { @@ -432,31 +520,25 @@ func (sposWorker *SPOSConsensusWorker) SendBlockBody() bool { // GetSubround method returns current subround taking in consideration the current time func (sposWorker *SPOSConsensusWorker) GetSubround() chronology.SubroundId { - return sposWorker.Cns.Chr.GetSubroundFromDateTime(sposWorker.Cns.Chr.SyncTime().CurrentTime(sposWorker.Cns.Chr.ClockOffset())) + chr := sposWorker.Cns.Chr + currentTime := chr.SyncTime().CurrentTime(chr.ClockOffset()) + + return chr.GetSubroundFromDateTime(currentTime) } // SendBlockHeader method send the proposed block header in the Block subround func (sposWorker *SPOSConsensusWorker) SendBlockHeader() bool { hdr := &block.Header{} + hdr.Round = uint32(sposWorker.Cns.Chr.Round().Index()) + hdr.TimeStamp = sposWorker.GetRoundTime() + if sposWorker.BlockChain.CurrentBlockHeader == nil { hdr.Nonce = 1 - hdr.Round = uint32(sposWorker.Cns.Chr.Round().Index()) - hdr.TimeStamp = sposWorker.GetTime() + hdr.PrevHash = sposWorker.BlockChain.GenesisHeaderHash } else { hdr.Nonce = sposWorker.BlockChain.CurrentBlockHeader.Nonce + 1 - hdr.Round = uint32(sposWorker.Cns.Chr.Round().Index()) - hdr.TimeStamp = sposWorker.GetTime() - - prevHeader, err := sposWorker.marshalizer.Marshal(sposWorker.BlockChain.CurrentBlockHeader) - - if err != nil { - log.Error(err.Error()) - return false - } - - prevHeaderHash := sposWorker.hasher.Compute(string(prevHeader)) - hdr.PrevHash = prevHeaderHash + hdr.PrevHash = sposWorker.BlockChain.CurrentBlockHeaderHash } blkStr, err := sposWorker.marshalizer.Marshal(sposWorker.BlockBody) @@ -483,7 +565,7 @@ func (sposWorker *SPOSConsensusWorker) SendBlockHeader() bool { []byte(sposWorker.Cns.SelfPubKey()), nil, MtBlockHeader, - sposWorker.GetTime(), + sposWorker.GetRoundTime(), sposWorker.Cns.Chr.Round().Index()) if !sposWorker.SendConsensusMessage(dta) { @@ -491,10 +573,10 @@ func (sposWorker *SPOSConsensusWorker) SendBlockHeader() bool { } log.Info(fmt.Sprintf("%sStep 1: Sending block header with nonce %d and hash %s\n", - sposWorker.Cns.getFormattedTime(), hdr.Nonce, getPrettyByteArray(hdrHash))) + sposWorker.Cns.getFormattedTime(), hdr.Nonce, toB64(hdrHash))) - sposWorker.Header = hdr sposWorker.Cns.Data = hdrHash + sposWorker.Header = hdr return true } @@ -549,7 +631,7 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentHashJob() bool { } } - isCommHashJobDone, err := sposWorker.Cns.GetJobDone(sposWorker.Cns.SelfPubKey(), SrCommitmentHash) + isCommHashJobDone, err := sposWorker.Cns.GetSelfJobDone(SrCommitmentHash) if err != nil { log.Error(err.Error()) @@ -575,7 +657,7 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentHashJob() bool { []byte(sposWorker.Cns.SelfPubKey()), nil, MtCommitmentHash, - sposWorker.GetTime(), + sposWorker.GetRoundTime(), sposWorker.Cns.Chr.Round().Index()) if !sposWorker.SendConsensusMessage(dta) { @@ -584,7 +666,7 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentHashJob() bool { log.Info(fmt.Sprintf("%sStep 2: Sending commitment hash\n", sposWorker.Cns.getFormattedTime())) - err = sposWorker.Cns.SetJobDone(sposWorker.Cns.SelfPubKey(), SrCommitmentHash, true) + err = sposWorker.Cns.SetSelfJobDone(SrCommitmentHash, true) if err != nil { log.Error(err.Error()) @@ -601,7 +683,8 @@ func (sposWorker *SPOSConsensusWorker) genBitmap(subround chronology.SubroundId) bitmap := make([]byte, sizeConsensus/8+1) for i := 0; i < sizeConsensus; i++ { - isJobDone, err := sposWorker.Cns.GetJobDone(sposWorker.Cns.ConsensusGroup()[i], subround) + pubKey := sposWorker.Cns.ConsensusGroup()[i] + isJobDone, err := sposWorker.Cns.GetJobDone(pubKey, subround) if err != nil { log.Error(err.Error()) @@ -630,7 +713,7 @@ func (sposWorker *SPOSConsensusWorker) DoBitmapJob() bool { } } - isBitmapJobDone, err := sposWorker.Cns.GetJobDone(sposWorker.Cns.SelfPubKey(), SrBitmap) + isBitmapJobDone, err := sposWorker.Cns.GetSelfJobDone(SrBitmap) if err != nil { log.Error(err.Error()) @@ -639,8 +722,9 @@ func (sposWorker *SPOSConsensusWorker) DoBitmapJob() bool { if sposWorker.Cns.Status(SrBitmap) == SsFinished || // is subround Bitmap already finished? isBitmapJobDone || // has been bitmap already sent? - !sposWorker.Cns.IsNodeLeaderInCurrentRound(sposWorker.Cns.SelfPubKey()) || // is another node leader in this round? + !sposWorker.Cns.IsSelfLeaderInCurrentRound() || // is another node leader in this round? sposWorker.Cns.Data == nil { // is consensus data not set? + return false } @@ -652,7 +736,7 @@ func (sposWorker *SPOSConsensusWorker) DoBitmapJob() bool { []byte(sposWorker.Cns.SelfPubKey()), nil, MtBitmap, - sposWorker.GetTime(), + sposWorker.GetRoundTime(), sposWorker.Cns.Chr.Round().Index()) if !sposWorker.SendConsensusMessage(dta) { @@ -662,7 +746,8 @@ func (sposWorker *SPOSConsensusWorker) DoBitmapJob() bool { log.Info(fmt.Sprintf("%sStep 3: Sending bitmap\n", sposWorker.Cns.getFormattedTime())) for i := 0; i < len(sposWorker.Cns.ConsensusGroup()); i++ { - isJobCommHashJobDone, err := sposWorker.Cns.GetJobDone(sposWorker.Cns.ConsensusGroup()[i], SrCommitmentHash) + pubKey := sposWorker.Cns.ConsensusGroup()[i] + isJobCommHashJobDone, err := sposWorker.Cns.GetJobDone(pubKey, SrCommitmentHash) if err != nil { log.Error(err.Error()) @@ -670,7 +755,7 @@ func (sposWorker *SPOSConsensusWorker) DoBitmapJob() bool { } if isJobCommHashJobDone { - err = sposWorker.Cns.SetJobDone(sposWorker.Cns.ConsensusGroup()[i], SrBitmap, true) + err = sposWorker.Cns.SetJobDone(pubKey, SrBitmap, true) if err != nil { log.Error(err.Error()) @@ -679,6 +764,8 @@ func (sposWorker *SPOSConsensusWorker) DoBitmapJob() bool { } } + sposWorker.Header.PubKeysBitmap = bitmap + return true } @@ -696,7 +783,7 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentJob() bool { } } - isCommJobDone, err := sposWorker.Cns.GetJobDone(sposWorker.Cns.SelfPubKey(), SrCommitment) + isCommJobDone, err := sposWorker.Cns.GetSelfJobDone(SrCommitment) if err != nil { log.Error(err.Error()) @@ -705,8 +792,9 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentJob() bool { if sposWorker.Cns.Status(SrCommitment) == SsFinished || // is subround Commitment already finished? isCommJobDone || // has been commitment already sent? - !sposWorker.Cns.IsValidatorInBitmap(sposWorker.Cns.SelfPubKey()) || // isn't node in the leader's bitmap? + !sposWorker.Cns.IsSelfInBitmap() || // isn't node in the leader's bitmap? sposWorker.Cns.Data == nil { // is consensus data not set? + return false } @@ -731,7 +819,7 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentJob() bool { []byte(sposWorker.Cns.SelfPubKey()), nil, MtCommitment, - sposWorker.GetTime(), + sposWorker.GetRoundTime(), sposWorker.Cns.Chr.Round().Index()) if !sposWorker.SendConsensusMessage(dta) { @@ -740,7 +828,7 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentJob() bool { log.Info(fmt.Sprintf("%sStep 4: Sending commitment\n", sposWorker.Cns.getFormattedTime())) - err = sposWorker.Cns.SetJobDone(sposWorker.Cns.SelfPubKey(), SrCommitment, true) + err = sposWorker.Cns.SetSelfJobDone(SrCommitment, true) if err != nil { log.Error(err.Error()) @@ -750,6 +838,59 @@ func (sposWorker *SPOSConsensusWorker) DoCommitmentJob() bool { return true } +func (sposWorker *SPOSConsensusWorker) checkCommitmentsValidity(bitmap []byte) error { + nbBitsBitmap := len(bitmap) * 8 + consensusGroup := sposWorker.Cns.ConsensusGroup() + consensusGroupSize := len(consensusGroup) + size := consensusGroupSize + + if consensusGroupSize > nbBitsBitmap { + size = nbBitsBitmap + } + + for i := 0; i < size; i++ { + indexRequired := (bitmap[i/8] & (1 << uint16(i%8))) > 0 + + if !indexRequired { + continue + } + + pubKey := consensusGroup[i] + isCommJobDone, err := sposWorker.Cns.GetJobDone(pubKey, SrCommitment) + + if err != nil { + return err + } + + if !isCommJobDone { + return ErrNilCommitment + } + + commitment, err := sposWorker.multiSigner.Commitment(uint16(i)) + + if err != nil { + return err + } + + computedCommitmentHash := sposWorker.hasher.Compute(string(commitment)) + receivedCommitmentHash, err := sposWorker.multiSigner.CommitmentHash(uint16(i)) + + if err != nil { + return err + } + + if !bytes.Equal(computedCommitmentHash, receivedCommitmentHash) { + log.Info(fmt.Sprintf("Commitment %s does not match, expected %s\n", + toB64(computedCommitmentHash), + toB64(receivedCommitmentHash))) + + return ErrCommitmentHashDoesNotMatch + } + } + + return nil +} + // DoSignatureJob method is the function which is actually used to send the Signature for the received block, // in the Signature subround (it is used as the handler function of the doSubroundJob pointer variable function // in Subround struct, from spos package) @@ -764,7 +905,7 @@ func (sposWorker *SPOSConsensusWorker) DoSignatureJob() bool { } } - isSignJobDone, err := sposWorker.Cns.GetJobDone(sposWorker.Cns.SelfPubKey(), SrSignature) + isSignJobDone, err := sposWorker.Cns.GetSelfJobDone(SrSignature) if err != nil { log.Error(err.Error()) @@ -773,22 +914,30 @@ func (sposWorker *SPOSConsensusWorker) DoSignatureJob() bool { if sposWorker.Cns.Status(SrSignature) == SsFinished || // is subround Signature already finished? isSignJobDone || // has been signature already sent? - !sposWorker.Cns.IsValidatorInBitmap(sposWorker.Cns.SelfPubKey()) || // isn't node in the leader's bitmap? + !sposWorker.Cns.IsSelfInBitmap() || // isn't node in the leader's bitmap? sposWorker.Cns.Data == nil { // is consensus data not set? + return false } bitmap := sposWorker.genBitmap(SrBitmap) + err = sposWorker.checkCommitmentsValidity(bitmap) + + if err != nil { + log.Error(err.Error()) + return false + } + // first compute commitment aggregation - _, err = sposWorker.multiSigner.AggregateCommitments(bitmap) + aggComm, err := sposWorker.multiSigner.AggregateCommitments(bitmap) if err != nil { log.Error(err.Error()) return false } - sigPart, err := sposWorker.multiSigner.SignPartial(bitmap) + sigPart, err := sposWorker.multiSigner.CreateSignatureShare(bitmap) if err != nil { log.Error(err.Error()) @@ -801,7 +950,7 @@ func (sposWorker *SPOSConsensusWorker) DoSignatureJob() bool { []byte(sposWorker.Cns.SelfPubKey()), nil, MtSignature, - sposWorker.GetTime(), + sposWorker.GetRoundTime(), sposWorker.Cns.Chr.Round().Index()) if !sposWorker.SendConsensusMessage(dta) { @@ -810,13 +959,45 @@ func (sposWorker *SPOSConsensusWorker) DoSignatureJob() bool { log.Info(fmt.Sprintf("%sStep 5: Sending signature\n", sposWorker.Cns.getFormattedTime())) - err = sposWorker.Cns.SetJobDone(sposWorker.Cns.SelfPubKey(), SrSignature, true) + selfIndex, err := sposWorker.Cns.IndexSelfConsensusGroup() if err != nil { log.Error(err.Error()) return false } + err = sposWorker.multiSigner.AddSignatureShare(uint16(selfIndex), sigPart) + + if err != nil { + log.Error(err.Error()) + return false + } + + err = sposWorker.Cns.SetSelfJobDone(SrSignature, true) + + if err != nil { + log.Error(err.Error()) + return false + } + + sposWorker.Header.Commitment = aggComm + + return true +} + +// DoAdvanceJob method is the function which actually does the job of the Advance subround (it is used as the handler +// function of the doSubroundJob pointer variable function in Subround struct, from spos package) +func (sposWorker *SPOSConsensusWorker) DoAdvanceJob() bool { + if sposWorker.Cns.Status(SrEndRound) == SsFinished { + return false + } + + sposWorker.BlockProcessor.RevertAccountState() + + log.Info(fmt.Sprintf("%sStep 7: Creating and broadcasting an empty block\n", sposWorker.Cns.getFormattedTime())) + + sposWorker.createEmptyBlock() + return true } @@ -859,12 +1040,12 @@ func (sposWorker *SPOSConsensusWorker) SendConsensusMessage(cnsDta *ConsensusDat return true } -func (sposWorker *SPOSConsensusWorker) broadcastTxBlockBody() error { - if sposWorker.BlockBody != nil { +func (sposWorker *SPOSConsensusWorker) broadcastTxBlockBody(blockBody *block.TxBlockBody) error { + if blockBody == nil { return ErrNilTxBlockBody } - message, err := sposWorker.marshalizer.Marshal(sposWorker.BlockBody) + message, err := sposWorker.marshalizer.Marshal(blockBody) if err != nil { return err @@ -880,12 +1061,12 @@ func (sposWorker *SPOSConsensusWorker) broadcastTxBlockBody() error { return nil } -func (sposWorker *SPOSConsensusWorker) broadcastHeader() error { - if sposWorker.Header == nil { +func (sposWorker *SPOSConsensusWorker) broadcastHeader(header *block.Header) error { + if header == nil { return ErrNilBlockHeader } - message, err := sposWorker.marshalizer.Marshal(sposWorker.Header) + message, err := sposWorker.marshalizer.Marshal(header) if err != nil { return err @@ -901,15 +1082,35 @@ func (sposWorker *SPOSConsensusWorker) broadcastHeader() error { return nil } +// ExtendStartRound method just call the DoStartRoundJob method to be sure that the init will be done +func (sposWorker *SPOSConsensusWorker) ExtendStartRound() { + sposWorker.Cns.SetStatus(SrStartRound, SsExtended) + + log.Info(fmt.Sprintf("%sStep 0: Extended the (START_ROUND) subround\n", sposWorker.Cns.getFormattedTime())) + + sposWorker.DoStartRoundJob() +} + // ExtendBlock method put this subround in the extended mode and print some messages func (sposWorker *SPOSConsensusWorker) ExtendBlock() { + if sposWorker.boot.ShouldSync() { + log.Info(fmt.Sprintf("Canceled round %d in subround %s, NOT SYNCRONIZED YET\n", + sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBlock))) + + sposWorker.Cns.Chr.SetSelfSubround(-1) + + return + } + sposWorker.Cns.SetStatus(SrBlock, SsExtended) + log.Info(fmt.Sprintf("%sStep 1: Extended the (BLOCK) subround\n", sposWorker.Cns.getFormattedTime())) } // ExtendCommitmentHash method put this subround in the extended mode and print some messages func (sposWorker *SPOSConsensusWorker) ExtendCommitmentHash() { sposWorker.Cns.SetStatus(SrCommitmentHash, SsExtended) + if sposWorker.Cns.ComputeSize(SrCommitmentHash) < sposWorker.Cns.Threshold(SrCommitmentHash) { log.Info(fmt.Sprintf("%sStep 2: Extended the (COMMITMENT_HASH) subround. Got only %d from %d commitment hashes which are not enough\n", sposWorker.Cns.getFormattedTime(), sposWorker.Cns.ComputeSize(SrCommitmentHash), len(sposWorker.Cns.ConsensusGroup()))) @@ -921,12 +1122,14 @@ func (sposWorker *SPOSConsensusWorker) ExtendCommitmentHash() { // ExtendBitmap method put this subround in the extended mode and print some messages func (sposWorker *SPOSConsensusWorker) ExtendBitmap() { sposWorker.Cns.SetStatus(SrBitmap, SsExtended) + log.Info(fmt.Sprintf("%sStep 3: Extended the (BITMAP) subround\n", sposWorker.Cns.getFormattedTime())) } // ExtendCommitment method put this subround in the extended mode and print some messages func (sposWorker *SPOSConsensusWorker) ExtendCommitment() { sposWorker.Cns.SetStatus(SrCommitment, SsExtended) + log.Info(fmt.Sprintf("%sStep 4: Extended the (COMMITMENT) subround. Got only %d from %d commitments which are not enough\n", sposWorker.Cns.getFormattedTime(), sposWorker.Cns.ComputeSize(SrCommitment), len(sposWorker.Cns.ConsensusGroup()))) } @@ -934,16 +1137,86 @@ func (sposWorker *SPOSConsensusWorker) ExtendCommitment() { // ExtendSignature method put this subround in the extended mode and print some messages func (sposWorker *SPOSConsensusWorker) ExtendSignature() { sposWorker.Cns.SetStatus(SrSignature, SsExtended) - log.Info(fmt.Sprintf("%sStep 5: Extended the (SIGNATURE) subround. Got only %d from %d sigantures which are not enough\n", + + log.Info(fmt.Sprintf("%sStep 5: Extended the (SIGNATURE) subround. Got only %d from %d signatures which are not enough\n", sposWorker.Cns.getFormattedTime(), sposWorker.Cns.ComputeSize(SrSignature), len(sposWorker.Cns.ConsensusGroup()))) } -// ExtendEndRound method just print some messages as no extend will be permited, because a new round -// will be start +// ExtendEndRound method just print some messages as no extend will be permited, because a new round will be start func (sposWorker *SPOSConsensusWorker) ExtendEndRound() { - log.Info(fmt.Sprintf("\n%s++++++++++++++++++++ THIS ROUND NO BLOCK WAS ADDED TO THE BLOCKCHAIN ++++++++++++++++++++\n\n", - sposWorker.Cns.getFormattedTime())) - sposWorker.Rounds++ // only for statistic + sposWorker.Cns.SetStatus(SrEndRound, SsExtended) + + log.Info(fmt.Sprintf("%sStep 6: Extended the (END_ROUND) subround\n", sposWorker.Cns.getFormattedTime())) +} + +// createEmptyBlock creates, commits and broadcasts an empty block at the end of the round if no block was proposed or +// syncronized in this round +func (sposWorker *SPOSConsensusWorker) createEmptyBlock() bool { + blk := sposWorker.BlockProcessor.CreateEmptyBlockBody( + shardId, + sposWorker.Cns.Chr.Round().Index()) + + hdr := &block.Header{} + hdr.Round = uint32(sposWorker.Cns.Chr.Round().Index()) + hdr.TimeStamp = sposWorker.GetRoundTime() + + var prevHeaderHash []byte + + if sposWorker.BlockChain.CurrentBlockHeader == nil { + hdr.Nonce = 1 + prevHeaderHash = sposWorker.BlockChain.GenesisHeaderHash + } else { + hdr.Nonce = sposWorker.BlockChain.CurrentBlockHeader.Nonce + 1 + prevHeaderHash = sposWorker.BlockChain.CurrentBlockHeaderHash + } + + hdr.PrevHash = prevHeaderHash + blkStr, err := sposWorker.marshalizer.Marshal(blk) + + if err != nil { + log.Info(err.Error()) + return false + } + + hdr.BlockBodyHash = sposWorker.hasher.Compute(string(blkStr)) + + cnsGroup := sposWorker.Cns.ConsensusGroup() + cnsGroupSize := len(cnsGroup) + + hdr.PubKeysBitmap = make([]byte, cnsGroupSize/8+1) + + // TODO: decide the signature for the empty block + headerStr, err := sposWorker.marshalizer.Marshal(hdr) + hdrHash := sposWorker.hasher.Compute(string(headerStr)) + hdr.Signature = hdrHash + hdr.Commitment = hdrHash + + // Commit the block (commits also the account state) + err = sposWorker.BlockProcessor.CommitBlock(sposWorker.BlockChain, hdr, blk) + + if err != nil { + log.Info(err.Error()) + return false + } + + // broadcast block body + err = sposWorker.broadcastTxBlockBody(blk) + + if err != nil { + log.Info(err.Error()) + } + + // broadcast header + err = sposWorker.broadcastHeader(hdr) + + if err != nil { + log.Info(err.Error()) + } + + log.Info(fmt.Sprintf("\n%s******************** ADDED EMPTY BLOCK WITH NONCE %d IN BLOCKCHAIN ********************\n\n", + sposWorker.Cns.getFormattedTime(), hdr.Nonce)) + + return true } // ReceivedMessage method redirects the received message to the channel which should handle it @@ -952,31 +1225,50 @@ func (sposWorker *SPOSConsensusWorker) ReceivedMessage(name string, data interfa return } - cnsData, ok := data.(*ConsensusData) + cnsDta, ok := data.(*ConsensusData) if !ok { return } - senderOK := sposWorker.Cns.IsNodeInConsensusGroup(string(cnsData.PubKey)) + senderOK := sposWorker.Cns.IsNodeInConsensusGroup(string(cnsDta.PubKey)) if !senderOK { return } - sigVerifErr := sposWorker.checkSignature(cnsData) + if sposWorker.shouldDropConsensusMessage(cnsDta) { + return + } + + if sposWorker.Cns.SelfPubKey() == string(cnsDta.PubKey) { + return + } + + sigVerifErr := sposWorker.checkSignature(cnsDta) if sigVerifErr != nil { return } - if ch, ok := sposWorker.MessageChannels[cnsData.MsgType]; ok { - ch <- cnsData + sposWorker.ReceivedMessageChannel <- cnsDta +} + +func (sposWorker *SPOSConsensusWorker) shouldDropConsensusMessage(cnsDta *ConsensusData) bool { + if cnsDta.RoundIndex < sposWorker.Cns.Chr.Round().Index() { + return true } + + if cnsDta.RoundIndex == sposWorker.Cns.Chr.Round().Index() && + sposWorker.GetSubround() > chronology.SubroundId(SrEndRound) { + return true + } + + return false } // CheckChannels method is used to listen to the channels through which node receives and consumes, // during the round, different messages from the nodes which are in the validators group -func (sposWorker *SPOSConsensusWorker) CheckChannels() { +func (sposWorker *SPOSConsensusWorker) checkChannels() { for { select { case rcvDta := <-sposWorker.MessageChannels[MtBlockBody]: @@ -1045,35 +1337,35 @@ func (sposWorker *SPOSConsensusWorker) checkSignature(cnsData *ConsensusData) er func (sposWorker *SPOSConsensusWorker) ReceivedBlockBody(cnsDta *ConsensusData) bool { node := string(cnsDta.PubKey) - if node == sposWorker.Cns.SelfPubKey() || // is block body received from myself? - !sposWorker.Cns.IsNodeLeaderInCurrentRound(node) || // is another node leader in this round? - sposWorker.BlockBody != nil { // is block body already received? + isBlockJobDone, err := sposWorker.Cns.RoundConsensus.GetJobDone(node, SrBlock) + + if err != nil { + log.Error(err.Error()) return false } - log.Info(fmt.Sprintf("%sStep 1: Received block body\n", sposWorker.Cns.getFormattedTime())) + if node == sposWorker.Cns.SelfPubKey() || // is block header received from myself? + sposWorker.Cns.Status(SrBlock) == SsFinished || // is subround Block already finished? + !sposWorker.Cns.IsNodeLeaderInCurrentRound(node) || // is another node leader in this round? + isBlockJobDone || // is block job of this node already done? + sposWorker.BlockBody != nil || // is block body already received? + cnsDta.RoundIndex != sposWorker.Cns.Chr.Round().Index() || // is this the consesnus data of this round? + sposWorker.GetSubround() > chronology.SubroundId(SrEndRound) { // is message received too late in this round? - sposWorker.BlockBody = sposWorker.DecodeBlockBody(cnsDta.SubRoundData) + return false + } - if sposWorker.BlockBody != nil && - sposWorker.Header != nil { - err := sposWorker.BlockProcessor.ProcessBlock(sposWorker.BlockChain, sposWorker.Header, sposWorker.BlockBody) + sposWorker.BlockBody = sposWorker.DecodeBlockBody(cnsDta.SubRoundData) - if err != nil { - log.Error(err.Error()) - return false - } + if sposWorker.BlockBody == nil { + return false + } - sposWorker.multiSigner.SetMessage(sposWorker.Cns.Data) - err = sposWorker.Cns.RoundConsensus.SetJobDone(node, SrBlock, true) + log.Info(fmt.Sprintf("%sStep 1: Received block body\n", sposWorker.Cns.getFormattedTime())) - if err != nil { - log.Error(err.Error()) - return false - } - } + blockProcessedWithSuccess := sposWorker.processReceivedBlock(cnsDta) - return true + return blockProcessedWithSuccess } // DecodeBlockBody method decodes block body which is marshalized in the received message @@ -1110,41 +1402,83 @@ func (sposWorker *SPOSConsensusWorker) ReceivedBlockHeader(cnsDta *ConsensusData if node == sposWorker.Cns.SelfPubKey() || // is block header received from myself? sposWorker.Cns.Status(SrBlock) == SsFinished || // is subround Block already finished? !sposWorker.Cns.IsNodeLeaderInCurrentRound(node) || // is another node leader in this round? - isBlockJobDone { // is block header already received? + isBlockJobDone || // is block job of this node already done? + sposWorker.Header != nil || // is block header already received? + sposWorker.Cns.Data != nil || // is consensus data already set? + cnsDta.RoundIndex != sposWorker.Cns.Chr.Round().Index() || // is this the consesnus data of this round? + sposWorker.GetSubround() > chronology.SubroundId(SrEndRound) { // is message received too late in this round? + return false } - hdr := sposWorker.DecodeBlockHeader(cnsDta.SubRoundData) + sposWorker.Cns.Data = cnsDta.BlockHeaderHash + sposWorker.Header = sposWorker.DecodeBlockHeader(cnsDta.SubRoundData) - if !sposWorker.CheckIfBlockIsValid(hdr) { - log.Info(fmt.Sprintf("Canceled round %d in subround %s\n", - sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBlock))) - sposWorker.Cns.Chr.SetSelfSubround(-1) + if sposWorker.Header == nil { return false } log.Info(fmt.Sprintf("%sStep 1: Received block header with nonce %d and hash %s\n", - sposWorker.Cns.getFormattedTime(), hdr.Nonce, getPrettyByteArray(cnsDta.BlockHeaderHash))) + sposWorker.Cns.getFormattedTime(), sposWorker.Header.Nonce, toB64(cnsDta.BlockHeaderHash))) - sposWorker.Header = hdr - sposWorker.Cns.Data = cnsDta.BlockHeaderHash + if !sposWorker.CheckIfBlockIsValid(sposWorker.Header) { + log.Info(fmt.Sprintf("Canceled round %d in subround %s, INVALID BLOCK\n", + sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBlock))) - if sposWorker.BlockBody != nil && - sposWorker.Header != nil { - err := sposWorker.BlockProcessor.ProcessBlock(sposWorker.BlockChain, sposWorker.Header, sposWorker.BlockBody) + return false + } - if err != nil { - log.Error(err.Error()) - return false - } + blockProcessedWithSuccess := sposWorker.processReceivedBlock(cnsDta) - sposWorker.multiSigner.SetMessage(sposWorker.Cns.Data) - err = sposWorker.Cns.RoundConsensus.SetJobDone(node, SrBlock, true) + return blockProcessedWithSuccess +} - if err != nil { - log.Error(err.Error()) - return false - } +func (sposWorker *SPOSConsensusWorker) processReceivedBlock(cnsDta *ConsensusData) bool { + + if sposWorker.BlockBody == nil || + sposWorker.Header == nil { + return false + } + + node := string(cnsDta.PubKey) + + err := sposWorker.BlockProcessor.ProcessBlock(sposWorker.BlockChain, sposWorker.Header, sposWorker.BlockBody, sposWorker.haveTime) + + if err != nil { + log.Info(fmt.Sprintf("Canceled round %d in subround %s, %s\n", + sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBlock), err.Error())) + + return false + } + + subround := sposWorker.GetSubround() + + if cnsDta.RoundIndex != sposWorker.Cns.Chr.Round().Index() { + log.Info(fmt.Sprintf("Canceled round %d in subround %s, CURRENT ROUND IS %d\n", + cnsDta.RoundIndex, sposWorker.Cns.GetSubroundName(SrBlock), sposWorker.Cns.Chr.Round().Index())) + + sposWorker.BlockProcessor.RevertAccountState() + + return false + } + + if subround > chronology.SubroundId(SrEndRound) { + log.Info(fmt.Sprintf("Canceled round %d in subround %s, CURRENT SUBROUND IS %s\n", + cnsDta.RoundIndex, sposWorker.Cns.GetSubroundName(SrBlock), sposWorker.Cns.GetSubroundName(subround))) + + sposWorker.BlockProcessor.RevertAccountState() + + return false + } + + sposWorker.multiSigner.SetMessage(sposWorker.Cns.Data) + err = sposWorker.Cns.RoundConsensus.SetJobDone(node, SrBlock, true) + + if err != nil { + log.Info(fmt.Sprintf("Canceled round %d in subround %s, %s\n", + sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBlock), err.Error())) + + return false } return true @@ -1183,17 +1517,20 @@ func (sposWorker *SPOSConsensusWorker) ReceivedCommitmentHash(cnsDta *ConsensusD if node == sposWorker.Cns.SelfPubKey() || // is commitment hash received from myself? sposWorker.Cns.Status(SrCommitmentHash) == SsFinished || // is subround CommitmentHash already finished? - !sposWorker.Cns.IsNodeInConsensusGroup(node) || // isn't node in the jobDone group? - isCommHashJobDone || // is commitment hash already received? + !sposWorker.Cns.IsNodeInConsensusGroup(node) || // isn't node in the consensus group? + isCommHashJobDone || // is commitment hash job of this node already done? sposWorker.Cns.Data == nil || // is consensus data not set? - !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) { // is this the consesnus data of this round? + !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) || // is this the consesnus data of this round? + sposWorker.GetSubround() > chronology.SubroundId(SrEndRound) { // is message received too late in this round? + return false } // if this node is leader in this round and already he received 2/3 + 1 of commitment hashes // he will ignore any others received later - if sposWorker.Cns.IsNodeLeaderInCurrentRound(sposWorker.Cns.SelfPubKey()) { - if sposWorker.Cns.IsCommitmentHashReceived(sposWorker.Cns.Threshold(SrCommitmentHash)) { + if sposWorker.Cns.IsSelfLeaderInCurrentRound() { + threshold := sposWorker.Cns.Threshold(SrCommitmentHash) + if sposWorker.Cns.IsCommitmentHashReceived(threshold) { return false } } @@ -1251,10 +1588,11 @@ func (sposWorker *SPOSConsensusWorker) ReceivedBitmap(cnsDta *ConsensusData) boo if node == sposWorker.Cns.SelfPubKey() || // is bitmap received from myself? sposWorker.Cns.Status(SrBitmap) == SsFinished || // is subround Bitmap already finished? !sposWorker.Cns.IsNodeLeaderInCurrentRound(node) || // is another node leader in this round? - isBitmapJobDone || // is bitmap already received? + isBitmapJobDone || // is bitmap job of this node already done? sposWorker.Cns.Data == nil || // is consensus data not set? + !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) || // is this the consesnus data of this round? + sposWorker.GetSubround() > chronology.SubroundId(SrEndRound) { // is message received too late in this round? - !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) { // is this the consesnus data of this round? return false } @@ -1264,9 +1602,9 @@ func (sposWorker *SPOSConsensusWorker) ReceivedBitmap(cnsDta *ConsensusData) boo nbSigners := countBitmapFlags(signersBitmap) if int(nbSigners) < sposWorker.Cns.Threshold(SrBitmap) { - log.Info(fmt.Sprintf("Canceled round %d in subround %s\n", + log.Info(fmt.Sprintf("Canceled round %d in subround %s, TOO FEW SIGNERS IN BITMAP\n", sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBitmap))) - sposWorker.Cns.Chr.SetSelfSubround(-1) + return false } @@ -1287,6 +1625,19 @@ func (sposWorker *SPOSConsensusWorker) ReceivedBitmap(cnsDta *ConsensusData) boo } } + if !sposWorker.Cns.IsValidatorInBitmap(sposWorker.Cns.selfPubKey) { + log.Info(fmt.Sprintf("Canceled round %d in subround %s, NOT INCLUDED IN THE BITMAP\n", + sposWorker.Cns.Chr.Round().Index(), sposWorker.Cns.GetSubroundName(SrBitmap))) + + sposWorker.Cns.Chr.SetSelfSubround(-1) + + sposWorker.BlockProcessor.RevertAccountState() + + return false + } + + sposWorker.Header.PubKeysBitmap = signersBitmap + return true } @@ -1306,9 +1657,11 @@ func (sposWorker *SPOSConsensusWorker) ReceivedCommitment(cnsDta *ConsensusData) if node == sposWorker.Cns.SelfPubKey() || // is commitment received from myself? sposWorker.Cns.Status(SrCommitment) == SsFinished || // is subround Commitment already finished? !sposWorker.Cns.IsValidatorInBitmap(node) || // isn't node in the bitmap group? - isCommJobDone || // is commitment already received? + isCommJobDone || // is commitment job of this node already done? sposWorker.Cns.Data == nil || // is consensus data not set? - !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) { // is this the consesnus data of this round? + !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) || // is this the consesnus data of this round? + sposWorker.GetSubround() > chronology.SubroundId(SrEndRound) { // is message received too late in this round? + return false } @@ -1319,17 +1672,10 @@ func (sposWorker *SPOSConsensusWorker) ReceivedCommitment(cnsDta *ConsensusData) return false } - computedCommitmentHash := sposWorker.hasher.Compute(string(cnsDta.SubRoundData)) - rcvCommitmentHash, err := sposWorker.multiSigner.CommitmentHash(uint16(index)) - - if !bytes.Equal(computedCommitmentHash, rcvCommitmentHash) { - log.Info(fmt.Sprintf("Commitment %s does not match, expected %s\n", computedCommitmentHash, rcvCommitmentHash)) - return false - } - err = sposWorker.multiSigner.AddCommitment(uint16(index), cnsDta.SubRoundData) if err != nil { + log.Info(err.Error()) return false } @@ -1359,30 +1705,22 @@ func (sposWorker *SPOSConsensusWorker) ReceivedSignature(cnsDta *ConsensusData) if node == sposWorker.Cns.SelfPubKey() || // is signature received from myself? sposWorker.Cns.Status(SrSignature) == SsFinished || // is subround Signature already finished? !sposWorker.Cns.IsValidatorInBitmap(node) || // isn't node in the bitmap group? - isSignJobDone || // is signature already received? + isSignJobDone || // is signature job of this node already done? sposWorker.Cns.Data == nil || // is consensus data not set? - !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) { // is this the consesnus data of this round? - return false - } - - index, err := sposWorker.Cns.ConsensusGroupIndex(node) + !bytes.Equal(cnsDta.BlockHeaderHash, sposWorker.Cns.Data) || // is this the consesnus data of this round? + sposWorker.GetSubround() > chronology.SubroundId(SrEndRound) { // is message received too late in this round? - if err != nil { - log.Error(err.Error()) return false } - bitmap := sposWorker.genBitmap(SrBitmap) - - // verify partial signature - err = sposWorker.multiSigner.VerifyPartial(uint16(index), cnsDta.SubRoundData, bitmap) + index, err := sposWorker.Cns.ConsensusGroupIndex(node) if err != nil { log.Error(err.Error()) return false } - err = sposWorker.multiSigner.AddSignPartial(uint16(index), cnsDta.SubRoundData) + err = sposWorker.multiSigner.AddSignatureShare(uint16(index), cnsDta.SubRoundData) if err != nil { log.Error(err.Error()) @@ -1401,31 +1739,28 @@ func (sposWorker *SPOSConsensusWorker) ReceivedSignature(cnsDta *ConsensusData) // CheckIfBlockIsValid method checks if the received block is valid func (sposWorker *SPOSConsensusWorker) CheckIfBlockIsValid(receivedHeader *block.Header) bool { - // TODO: This logic is temporary and it should be refactored after the bootstrap mechanism will be implemented - if sposWorker.BlockChain.CurrentBlockHeader == nil { if receivedHeader.Nonce == 1 { // first block after genesis - if bytes.Equal(receivedHeader.PrevHash, []byte("")) { + if bytes.Equal(receivedHeader.PrevHash, sposWorker.BlockChain.GenesisHeaderHash) { return true } log.Info(fmt.Sprintf("Hash not match: local block hash is empty and node received block with previous hash %s\n", - getPrettyByteArray(receivedHeader.PrevHash))) + toB64(receivedHeader.PrevHash))) + return false } - // to resolve the situation when a node comes later in the network and it has the - // bootstrap mechanism not implemented yet (he will accept the block received) log.Info(fmt.Sprintf("Nonce not match: local block nonce is 0 and node received block with nonce %d\n", receivedHeader.Nonce)) - log.Info(fmt.Sprintf("\n++++++++++++++++++++ ACCEPTED BLOCK WITH NONCE %d BECAUSE BOOSTRAP IS NOT IMPLEMENTED YET ++++++++++++++++++++\n\n", - receivedHeader.Nonce)) - return true + + return false } if receivedHeader.Nonce < sposWorker.BlockChain.CurrentBlockHeader.Nonce+1 { log.Info(fmt.Sprintf("Nonce not match: local block nonce is %d and node received block with nonce %d\n", sposWorker.BlockChain.CurrentBlockHeader.Nonce, receivedHeader.Nonce)) + return false } @@ -1437,37 +1772,15 @@ func (sposWorker *SPOSConsensusWorker) CheckIfBlockIsValid(receivedHeader *block } log.Info(fmt.Sprintf("Hash not match: local block hash is %s and node received block with previous hash %s\n", - getPrettyByteArray(prevHeaderHash), getPrettyByteArray(receivedHeader.PrevHash))) + toB64(prevHeaderHash), toB64(receivedHeader.PrevHash))) + return false } - // to resolve the situation when a node misses some Blocks and it has the bootstrap mechanism - // not implemented yet (he will accept the block received) log.Info(fmt.Sprintf("Nonce not match: local block nonce is %d and node received block with nonce %d\n", sposWorker.BlockChain.CurrentBlockHeader.Nonce, receivedHeader.Nonce)) - log.Info(fmt.Sprintf("\n++++++++++++++++++++ ACCEPTED BLOCK WITH NONCE %d BECAUSE BOOSTRAP IS NOT IMPLEMENTED YET ++++++++++++++++++++\n\n", - receivedHeader.Nonce)) - return true -} - -// ShouldSync method returns the synch state of the node. If it returns 'true', this means that the node -// is not synchronized yet and it has to continue the bootstrapping mechanism, otherwise the node is already -// synched and it can participate to the consensus, if it is in the jobDone group of this round -func (sposWorker *SPOSConsensusWorker) ShouldSync() bool { - if sposWorker.Cns == nil || - sposWorker.Cns.Chr == nil || - sposWorker.Cns.Chr.Round() == nil { - return true - } - - rnd := sposWorker.Cns.Chr.Round() - if sposWorker.BlockChain == nil || - sposWorker.BlockChain.CurrentBlockHeader == nil { - return rnd.Index() > 0 - } - - return sposWorker.BlockChain.CurrentBlockHeader.Round+1 < uint32(rnd.Index()) + return false } // GetMessageTypeName method returns the name of the message from a given message ID @@ -1488,13 +1801,16 @@ func (sposWorker *SPOSConsensusWorker) GetMessageTypeName(messageType MessageTyp case MtUnknown: return "(UNKNOWN)" default: - return "Undifined message type" + return "Undefined message type" } } -// GetTime method returns a string containing the current time -func (sposWorker *SPOSConsensusWorker) GetTime() uint64 { - return uint64(sposWorker.Cns.Chr.SyncTime().CurrentTime(sposWorker.Cns.Chr.ClockOffset()).Unix()) +// GetRoundTime method returns time stamp of the current round +func (sposWorker *SPOSConsensusWorker) GetRoundTime() uint64 { + chr := sposWorker.Cns.Chr + currentRoundIndex := chr.Round().Index() + + return chr.RoundTimeStamp(currentRoundIndex) } // CheckEndRoundConsensus method checks if the consensus is achieved in each subround from first subround to the given @@ -1520,6 +1836,10 @@ func (cns *Consensus) CheckBlockConsensus() bool { cns.mut.Lock() defer cns.mut.Unlock() + if cns.Chr.IsCancelled() { + return false + } + if cns.Status(SrBlock) == SsFinished { return true } @@ -1527,6 +1847,7 @@ func (cns *Consensus) CheckBlockConsensus() bool { if cns.IsBlockReceived(cns.Threshold(SrBlock)) { cns.PrintBlockCM() // only for printing block consensus messages cns.SetStatus(SrBlock, SsFinished) + return true } @@ -1538,25 +1859,31 @@ func (cns *Consensus) CheckCommitmentHashConsensus() bool { cns.mut.Lock() defer cns.mut.Unlock() + if cns.Chr.IsCancelled() { + return false + } + if cns.Status(SrCommitmentHash) == SsFinished { return true } threshold := cns.Threshold(SrCommitmentHash) - if !cns.IsNodeLeaderInCurrentRound(cns.selfPubKey) { + if !cns.IsSelfLeaderInCurrentRound() { threshold = len(cns.consensusGroup) } if cns.IsCommitmentHashReceived(threshold) { cns.PrintCommitmentHashCM() // only for printing commitment hash consensus messages cns.SetStatus(SrCommitmentHash, SsFinished) + return true } if cns.CommitmentHashesCollected(cns.Threshold(SrBitmap)) { cns.PrintCommitmentHashCM() // only for printing commitment hash consensus messages cns.SetStatus(SrCommitmentHash, SsFinished) + return true } @@ -1568,6 +1895,10 @@ func (cns *Consensus) CheckBitmapConsensus() bool { cns.mut.Lock() defer cns.mut.Unlock() + if cns.Chr.IsCancelled() { + return false + } + if cns.Status(SrBitmap) == SsFinished { return true } @@ -1575,6 +1906,7 @@ func (cns *Consensus) CheckBitmapConsensus() bool { if cns.CommitmentHashesCollected(cns.Threshold(SrBitmap)) { cns.PrintBitmapCM() // only for printing bitmap consensus messages cns.SetStatus(SrBitmap, SsFinished) + return true } @@ -1586,6 +1918,10 @@ func (cns *Consensus) CheckCommitmentConsensus() bool { cns.mut.Lock() defer cns.mut.Unlock() + if cns.Chr.IsCancelled() { + return false + } + if cns.Status(SrCommitment) == SsFinished { return true } @@ -1593,6 +1929,7 @@ func (cns *Consensus) CheckCommitmentConsensus() bool { if cns.CommitmentsCollected(cns.Threshold(SrCommitment)) { cns.PrintCommitmentCM() // only for printing commitment consensus messages cns.SetStatus(SrCommitment, SsFinished) + return true } @@ -1604,6 +1941,10 @@ func (cns *Consensus) CheckSignatureConsensus() bool { cns.mut.Lock() defer cns.mut.Unlock() + if cns.Chr.IsCancelled() { + return false + } + if cns.Status(SrSignature) == SsFinished { return true } @@ -1611,12 +1952,18 @@ func (cns *Consensus) CheckSignatureConsensus() bool { if cns.SignaturesCollected(cns.Threshold(SrSignature)) { cns.PrintSignatureCM() // only for printing signature consensus messages cns.SetStatus(SrSignature, SsFinished) + return true } return false } +// CheckAdvanceConsensus method checks if the consensus is achieved in the advance subround. +func (cns *Consensus) CheckAdvanceConsensus() bool { + return true +} + // GetSubroundName returns the name of each subround from a given subround ID func (cns *Consensus) GetSubroundName(subroundId chronology.SubroundId) string { switch subroundId { @@ -1637,21 +1984,23 @@ func (cns *Consensus) GetSubroundName(subroundId chronology.SubroundId) string { case SrAdvance: return "(ADVANCE)" default: - return "Undifined subround" + return "Undefined subround" } } // PrintBlockCM method prints the consensus messages func (cns *Consensus) PrintBlockCM() { - if !cns.IsNodeLeaderInCurrentRound(cns.selfPubKey) { + if !cns.IsSelfLeaderInCurrentRound() { log.Info(fmt.Sprintf("%sStep 1: Synchronized block\n", cns.getFormattedTime())) } + log.Info(fmt.Sprintf("%sStep 1: Subround (BLOCK) has been finished\n", cns.getFormattedTime())) } // PrintCommitmentHashCM method prints the consensus messages func (cns *Consensus) PrintCommitmentHashCM() { n := cns.ComputeSize(SrCommitmentHash) + if n == len(cns.consensusGroup) { log.Info(fmt.Sprintf("%sStep 2: Received all (%d from %d) commitment hashes\n", cns.getFormattedTime(), n, len(cns.consensusGroup))) @@ -1659,12 +2008,13 @@ func (cns *Consensus) PrintCommitmentHashCM() { log.Info(fmt.Sprintf("%sStep 2: Received %d from %d commitment hashes, which are enough\n", cns.getFormattedTime(), n, len(cns.consensusGroup))) } + log.Info(fmt.Sprintf("%sStep 2: Subround (COMMITMENT_HASH) has been finished\n", cns.getFormattedTime())) } // PrintBitmapCM method prints the consensus messages func (cns *Consensus) PrintBitmapCM() { - if !cns.IsNodeLeaderInCurrentRound(cns.selfPubKey) { + if !cns.IsSelfLeaderInCurrentRound() { msg := fmt.Sprintf("%sStep 3: Received bitmap from leader, matching with my own, and it got %d from %d commitment hashes, which are enough", cns.getFormattedTime(), cns.ComputeSize(SrBitmap), len(cns.consensusGroup)) @@ -1676,6 +2026,7 @@ func (cns *Consensus) PrintBitmapCM() { log.Info(msg) } + log.Info(fmt.Sprintf("%sStep 3: Subround (BITMAP) has been finished\n", cns.getFormattedTime())) } @@ -1683,6 +2034,7 @@ func (cns *Consensus) PrintBitmapCM() { func (cns *Consensus) PrintCommitmentCM() { log.Info(fmt.Sprintf("%sStep 4: Received %d from %d commitments, which are matching with bitmap and are enough\n", cns.getFormattedTime(), cns.ComputeSize(SrCommitment), len(cns.consensusGroup))) + log.Info(fmt.Sprintf("%sStep 4: Subround (COMMITMENT) has been finished\n", cns.getFormattedTime())) } @@ -1690,6 +2042,7 @@ func (cns *Consensus) PrintCommitmentCM() { func (cns *Consensus) PrintSignatureCM() { log.Info(fmt.Sprintf("%sStep 5: Received %d from %d signatures, which are matching with bitmap and are enough\n", cns.getFormattedTime(), cns.ComputeSize(SrSignature), len(cns.consensusGroup))) + log.Info(fmt.Sprintf("%sStep 5: Subround (SIGNATURE) has been finished\n", cns.getFormattedTime())) } @@ -1704,8 +2057,21 @@ func (sposWorker *SPOSConsensusWorker) getHeaderHash(hdr *block.Header) []byte { return sposWorker.hasher.Compute(string(headerMarsh)) } -func getPrettyByteArray(array []byte) string { - base64pk := make([]byte, base64.StdEncoding.EncodedLen(len(array))) - base64.StdEncoding.Encode(base64pk, array) - return string(base64pk) +func toB64(buff []byte) string { + if buff == nil { + return "" + } + + return base64.StdEncoding.EncodeToString(buff) +} + +func (sposWorker *SPOSConsensusWorker) haveTime() time.Duration { + chr := sposWorker.Cns.Chr + + roundStartTime := chr.Round().TimeStamp() + currentTime := chr.SyncTime().CurrentTime(chr.ClockOffset()) + elapsedTime := currentTime.Sub(roundStartTime) + haveTime := float64(chr.Round().TimeDuration())*maxBlockProcessingTimePercent - float64(elapsedTime) + + return time.Duration(haveTime) } diff --git a/consensus/spos/sposConsensusWorker_test.go b/consensus/spos/sposConsensusWorker_test.go index abce1dcc302..e1aba0605dc 100644 --- a/consensus/spos/sposConsensusWorker_test.go +++ b/consensus/spos/sposConsensusWorker_test.go @@ -13,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -57,6 +58,9 @@ func initConsensusWorker(cns *spos.Consensus) *spos.SPOSConsensusWorker { keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() multisigner := initMultisigner() blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} cnWorker, _ := spos.NewConsensusWorker( cns, @@ -64,12 +68,14 @@ func initConsensusWorker(cns *spos.Consensus) *spos.SPOSConsensusWorker { mock.HasherMock{}, mock.MarshalizerMock{}, blProcMock, + bootMock, multisigner, keyGenMock, privKeyMock, pubKeyMock, ) + cnWorker.Header = &block.Header{} cnWorker.SendMessage = SendMessage cnWorker.BroadcastBlockBody = BroadcastMessage cnWorker.BroadcastHeader = BroadcastMessage @@ -90,6 +96,8 @@ func initConsensus( rnds := initRoundStatus() dta := []byte("X") + chr.SetSelfSubround(0) + cns := spos.NewConsensus( dta, rndConsensus, @@ -171,14 +179,18 @@ func initMockBlockProcessor() *mock.BlockProcessorMock { blockProcMock.RevertAccountStateCalled = func() {} - blockProcMock.ProcessAndCommitCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { + blockProcMock.ProcessAndCommitCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { return nil } - blockProcMock.ProcessBlockCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { + blockProcMock.ProcessBlockCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { return nil } + blockProcMock.CreateEmptyBlockBodyCalled = func(shardId uint32, round int32) *block.TxBlockBody { + return &block.TxBlockBody{} + } + return blockProcMock } @@ -189,7 +201,7 @@ func initMultisigner() *mock.BelNevMock { return []byte("commSecret"), []byte("commitment"), nil } - multisigner.VerifyPartialMock = func(index uint16, sig []byte, bitmap []byte) error { + multisigner.VerifySignatureShareMock = func(index uint16, sig []byte, bitmap []byte) error { return nil } @@ -205,7 +217,7 @@ func initMultisigner() *mock.BelNevMock { return []byte("aggregatedCommitments"), nil } - multisigner.SignPartialMock = func(bitmap []byte) ([]byte, error) { + multisigner.CreateSignatureShareMock = func(bitmap []byte) ([]byte, error) { return []byte("partialSign"), nil } @@ -352,6 +364,9 @@ func TestNewConsensusWorker_ConsensusNilShouldFail(t *testing.T) { keyGen := &mock.KeyGenMock{} privKey := &mock.PrivateKeyMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusGroup, err := spos.NewConsensusWorker( consensus, @@ -359,6 +374,7 @@ func TestNewConsensusWorker_ConsensusNilShouldFail(t *testing.T) { hasher, marshalizer, blkProc, + bootMock, multisig, keyGen, privKey, @@ -383,6 +399,9 @@ func TestNewConsensusWorker_BlockChainNilShouldFail(t *testing.T) { keyGen := &mock.KeyGenMock{} privKey := &mock.PrivateKeyMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -390,6 +409,7 @@ func TestNewConsensusWorker_BlockChainNilShouldFail(t *testing.T) { hasher, marshalizer, blkProc, + bootMock, multisig, keyGen, privKey, @@ -414,6 +434,9 @@ func TestNewConsensusWorker_HasherNilShouldFail(t *testing.T) { keyGen := &mock.KeyGenMock{} privKey := &mock.PrivateKeyMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -421,6 +444,7 @@ func TestNewConsensusWorker_HasherNilShouldFail(t *testing.T) { nil, marshalizer, blkProc, + bootMock, multisig, keyGen, privKey, @@ -445,6 +469,9 @@ func TestNewConsensusWorker_MarshalizerNilShouldFail(t *testing.T) { keyGen := &mock.KeyGenMock{} privKey := &mock.PrivateKeyMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -452,6 +479,7 @@ func TestNewConsensusWorker_MarshalizerNilShouldFail(t *testing.T) { hasher, nil, blkProc, + bootMock, multisig, keyGen, privKey, @@ -476,6 +504,9 @@ func TestNewConsensusWorker_BlockProcessorNilShouldFail(t *testing.T) { keyGen := &mock.KeyGenMock{} privKey := &mock.PrivateKeyMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -483,6 +514,7 @@ func TestNewConsensusWorker_BlockProcessorNilShouldFail(t *testing.T) { hasher, marshalizer, nil, + bootMock, multisig, keyGen, privKey, @@ -507,6 +539,9 @@ func TestNewConsensusWorker_MultisigNilShouldFail(t *testing.T) { keyGen := &mock.KeyGenMock{} privKey := &mock.PrivateKeyMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -514,6 +549,7 @@ func TestNewConsensusWorker_MultisigNilShouldFail(t *testing.T) { hasher, marshalizer, blkProc, + bootMock, nil, keyGen, privKey, @@ -538,6 +574,9 @@ func TestNewConsensusWorker_KeyGenNilShouldFail(t *testing.T) { multisig := mock.NewMultiSigner() privKey := &mock.PrivateKeyMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -545,6 +584,7 @@ func TestNewConsensusWorker_KeyGenNilShouldFail(t *testing.T) { hasher, marshalizer, blkProc, + bootMock, multisig, nil, privKey, @@ -569,6 +609,9 @@ func TestNewConsensusWorker_PrivKeyNilShouldFail(t *testing.T) { multisig := mock.NewMultiSigner() keyGen := &mock.KeyGenMock{} pubKey := &mock.PublicKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -576,6 +619,7 @@ func TestNewConsensusWorker_PrivKeyNilShouldFail(t *testing.T) { hasher, marshalizer, blkProc, + bootMock, multisig, keyGen, nil, @@ -600,6 +644,9 @@ func TestNewConsensusWorker_PubKeyNilFail(t *testing.T) { multisig := mock.NewMultiSigner() keyGen := &mock.KeyGenMock{} privKey := &mock.PrivateKeyMock{} + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusWorker, err := spos.NewConsensusWorker( consensus, @@ -607,6 +654,7 @@ func TestNewConsensusWorker_PubKeyNilFail(t *testing.T) { hasher, marshalizer, blkProc, + bootMock, multisig, keyGen, privKey, @@ -659,12 +707,15 @@ func TestNewMessage(t *testing.T) { mock.HasherMock{}, mock.MarshalizerMock{}, &mock.BlockProcessorMock{}, + &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }}, &mock.BelNevMock{}, &mock.KeyGenMock{}, &mock.PrivateKeyMock{}, &mock.PublicKeyMock{}) - assert.Equal(t, len(cns.RoundConsensus.ConsensusGroup()), cap(msg2.MessageChannels[spos.MtBlockHeader])) + assert.Equal(t, 0, cap(msg2.MessageChannels[spos.MtBlockHeader])) } func TestMessage_StartRound(t *testing.T) { @@ -688,6 +739,9 @@ func TestSPOSConsensusWorker_DoEndRoundJobErrAggregatingSigShouldFail(t *testing keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() multisigner := initMultisigner() blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusGroupSize := 9 roundDuration := 100 * time.Millisecond @@ -713,6 +767,7 @@ func TestSPOSConsensusWorker_DoEndRoundJobErrAggregatingSigShouldFail(t *testing mock.HasherMock{}, mock.MarshalizerMock{}, blProcMock, + bootMock, multisigner, keyGenMock, privKeyMock, @@ -986,6 +1041,9 @@ func TestSPOSConsensusWorker_DoCommitmentHashJobErrCreateCommitmentShouldFail(t keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() multisigner := initMultisigner() blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} consensusGroupSize := 9 roundDuration := 100 * time.Millisecond @@ -1011,6 +1069,7 @@ func TestSPOSConsensusWorker_DoCommitmentHashJobErrCreateCommitmentShouldFail(t mock.HasherMock{}, mock.MarshalizerMock{}, blProcMock, + bootMock, multisigner, keyGenMock, privKeyMock, @@ -1137,6 +1196,7 @@ func TestMessage_SendSignature(t *testing.T) { dta := []byte("X") cnWorkers[0].Cns.Data = dta + cnWorkers[0].Cns.SetJobDone(cnWorkers[0].Cns.SelfPubKey(), spos.SrCommitment, true) r = cnWorkers[0].DoSignatureJob() assert.Equal(t, true, r) } @@ -1146,7 +1206,7 @@ func TestMessage_BroadcastMessage(t *testing.T) { hdr := &block.Header{} hdr.Nonce = 1 - hdr.TimeStamp = cnWorkers[0].GetTime() + hdr.TimeStamp = cnWorkers[0].GetRoundTime() message, err := mock.MarshalizerMock{}.Marshal(hdr) @@ -1164,7 +1224,7 @@ func TestMessage_BroadcastMessage(t *testing.T) { []byte(cnWorkers[0].Cns.SelfPubKey()), []byte("sig"), spos.MtBlockHeader, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1238,7 +1298,7 @@ func TestMessage_ReceivedMessageTxBlockBody(t *testing.T) { []byte(cnWorkers[0].Cns.SelfPubKey()), []byte("sig"), spos.MtBlockBody, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1250,7 +1310,7 @@ func TestMessage_ReceivedMessageUnknown(t *testing.T) { hdr := &block.Header{} hdr.Nonce = 1 - hdr.TimeStamp = cnWorkers[0].GetTime() + hdr.TimeStamp = cnWorkers[0].GetRoundTime() message, _ := mock.MarshalizerMock{}.Marshal(hdr) hdr.BlockBodyHash = mock.HasherMock{}.Compute(string(message)) @@ -1262,7 +1322,7 @@ func TestMessage_ReceivedMessageUnknown(t *testing.T) { []byte(cnWorkers[0].Cns.SelfPubKey()), []byte("sig"), spos.MtUnknown, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1297,7 +1357,7 @@ func TestMessage_DecodeBlockHeader(t *testing.T) { hdr := &block.Header{} hdr.Nonce = 1 - hdr.TimeStamp = cnWorkers[0].GetTime() + hdr.TimeStamp = cnWorkers[0].GetRoundTime() hdr.Signature = []byte(cnWorkers[0].Cns.SelfPubKey()) message, err := mock.MarshalizerMock{}.Marshal(hdr) @@ -1322,6 +1382,7 @@ func TestMessage_DecodeBlockHeader(t *testing.T) { func TestMessage_CheckChannelTxBlockBody(t *testing.T) { cnWorkers := InitMessage() + cnWorkers[0].Header = nil round := cnWorkers[0].Cns.Chr.Round() roundDuration := round.TimeDuration() round.UpdateRound(time.Now(), time.Now().Add(roundDuration)) @@ -1378,7 +1439,7 @@ func TestMessage_CheckChannelBlockHeader(t *testing.T) { []byte("sig"), spos.MtBlockBody, GetTime(cnWorkers[0]), - 0, + 1, ) cnWorkers[0].MessageChannels[spos.MtBlockBody] <- cnsDta @@ -1413,7 +1474,7 @@ func TestConsensus_CheckChannelsCommitmentHash(t *testing.T) { []byte(cnsGroup[1]), []byte("sig"), spos.MtCommitmentHash, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1448,7 +1509,7 @@ func TestConsensus_CheckChannelsBitmap(t *testing.T) { []byte(cnsGroup[1]), []byte("sig"), spos.MtBitmap, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1487,7 +1548,7 @@ func TestMessage_CheckChannelsCommitment(t *testing.T) { []byte(cnsGroup[1]), []byte("sig"), spos.MtCommitmentHash, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1535,7 +1596,7 @@ func TestMessage_CheckChannelsSignature(t *testing.T) { []byte(cnsGroup[1]), []byte("sig"), spos.MtBitmap, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1569,7 +1630,7 @@ func TestMessage_ReceivedBlock(t *testing.T) { []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), []byte("sig"), spos.MtBlockBody, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1591,10 +1652,12 @@ func TestMessage_ReceivedBlock(t *testing.T) { []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), []byte("sig"), spos.MtBlockHeader, - cnWorkers[0].GetTime(), - 0, + cnWorkers[0].GetRoundTime(), + 1, ) + cnWorkers[0].Header = nil + cnWorkers[0].Cns.Data = nil r = cnWorkers[0].ReceivedBlockHeader(cnsDta) assert.Equal(t, true, r) } @@ -1614,8 +1677,8 @@ func TestSPOSConsensusWorker_ReceivedBlockBodyHeaderReceivedJobDone(t *testing.T []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), []byte("sig"), spos.MtBlockBody, - cnWorkers[0].GetTime(), - 0, + cnWorkers[0].GetRoundTime(), + 1, ) cnWorkers[0].Header = &block.Header{} @@ -1639,7 +1702,7 @@ func TestSPOSConsensusWorker_ReceivedBlockBodyHeaderReceivedErrProcessBlockShoul []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), []byte("sig"), spos.MtBlockBody, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1647,7 +1710,7 @@ func TestSPOSConsensusWorker_ReceivedBlockBodyHeaderReceivedErrProcessBlockShoul blProcMock := initMockBlockProcessor() - blProcMock.ProcessBlockCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { + blProcMock.ProcessBlockCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { return process.ErrNilPreviousBlockHash } @@ -1669,7 +1732,7 @@ func TestMessage_ReceivedCommitmentHash(t *testing.T) { []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), nil, spos.MtCommitmentHash, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1707,7 +1770,7 @@ func TestMessage_ReceivedBitmap(t *testing.T) { []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), []byte("sig"), spos.MtCommitmentHash, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1757,7 +1820,7 @@ func TestMessage_ReceivedCommitment(t *testing.T) { []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), []byte("sig"), spos.MtCommitmentHash, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0) r := cnWorkers[0].ReceivedCommitmentHash(cnsDta) @@ -1794,7 +1857,7 @@ func TestMessage_ReceivedSignature(t *testing.T) { []byte(cnWorkers[0].Cns.ConsensusGroup()[1]), []byte("sig"), spos.MtSignature, - cnWorkers[0].GetTime(), + cnWorkers[0].GetRoundTime(), 0, ) @@ -1822,7 +1885,7 @@ func TestMessage_CheckIfBlockIsValid(t *testing.T) { hdr := &block.Header{} hdr.Nonce = 1 - hdr.TimeStamp = cnWorkers[0].GetTime() + hdr.TimeStamp = cnWorkers[0].GetRoundTime() hdr.PrevHash = []byte("X") @@ -1837,14 +1900,14 @@ func TestMessage_CheckIfBlockIsValid(t *testing.T) { hdr.Nonce = 2 r = cnWorkers[0].CheckIfBlockIsValid(hdr) - assert.True(t, r) + assert.False(t, r) hdr.Nonce = 1 cnWorkers[0].BlockChain.CurrentBlockHeader = hdr hdr = &block.Header{} hdr.Nonce = 1 - hdr.TimeStamp = cnWorkers[0].GetTime() + hdr.TimeStamp = cnWorkers[0].GetRoundTime() r = cnWorkers[0].CheckIfBlockIsValid(hdr) assert.False(t, r) @@ -1859,7 +1922,7 @@ func TestMessage_CheckIfBlockIsValid(t *testing.T) { hdr.PrevHash = []byte("") r = cnWorkers[0].CheckIfBlockIsValid(hdr) - assert.True(t, r) + assert.False(t, r) hdr.Nonce = 2 @@ -1895,7 +1958,7 @@ func TestMessage_GetMessageTypeName(t *testing.T) { assert.Equal(t, "(UNKNOWN)", r) r = cnWorkers[0].GetMessageTypeName(spos.MessageType(-1)) - assert.Equal(t, "Undifined message type", r) + assert.Equal(t, "Undefined message type", r) } func TestConsensus_CheckConsensus(t *testing.T) { @@ -1907,6 +1970,9 @@ func TestConsensus_CheckConsensus(t *testing.T) { mock.HasherMock{}, mock.MarshalizerMock{}, &mock.BlockProcessorMock{}, + &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }}, &mock.BelNevMock{}, &mock.KeyGenMock{}, &mock.PrivateKeyMock{}, @@ -2085,3 +2151,2037 @@ func TestConsensus_CheckSignatureConsensus(t *testing.T) { assert.True(t, ok) assert.Equal(t, spos.SsFinished, cns.Status(spos.SrSignature)) } + +func TestConsensusDataCreate_ShouldReturnTheSameObject(t *testing.T) { + dta := spos.NewConsensusData( + nil, + nil, + nil, + nil, + 0, + 0, + 0) + + assert.Equal(t, dta, dta.Create()) +} + +func TestConsensusDataID_ShouldReturnID(t *testing.T) { + dta := spos.NewConsensusData( + nil, + nil, + nil, + []byte("sig"), + spos.MtSignature, + 0, + 1) + + id := fmt.Sprintf("1-sig-6") + + assert.Equal(t, id, dta.ID()) +} + +func TestCheckSignaturesValidity_ShouldErrNilSignature(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + err := cnWorker.CheckSignaturesValidity([]byte(string(2))) + assert.Equal(t, spos.ErrNilSignature, err) +} + +func TestCheckSignaturesValidity_ShouldErrInvalidIndex(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.Reset(nil, 0) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.Cns.SetJobDone(consensusGroup[0], spos.SrSignature, true) + + err := cnWorker.CheckSignaturesValidity([]byte(string(1))) + assert.Equal(t, crypto.ErrInvalidIndex, err) +} + +func TestCheckSignaturesValidity_ShouldRetunNil(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.Cns.SetJobDone(consensusGroup[0], spos.SrSignature, true) + + err := cnWorker.CheckSignaturesValidity([]byte(string(1))) + assert.Equal(t, nil, err) +} + +func TestGenCommitmentHash_ShouldRetunErrOnCreateCommitment(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + err := errors.New("error create commitment") + + multisigner.CreateCommitmentMock = func() ([]byte, []byte, error) { + return nil, nil, err + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + _, err2 := cnWorker.GenCommitmentHash() + assert.Equal(t, err, err2) +} + +func TestGenCommitmentHash_ShouldRetunErrOnIndexSelfConsensusGroup(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cns.SetSelfPubKey("X") + + multisigner.CreateCommitmentMock = func() ([]byte, []byte, error) { + return nil, nil, nil + } + + multisigner.AddCommitmentMock = func(uint16, []byte) error { + return spos.ErrSelfNotFoundInConsensus + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + _, err := cnWorker.GenCommitmentHash() + assert.Equal(t, spos.ErrSelfNotFoundInConsensus, err) +} + +func TestGenCommitmentHash_ShouldRetunErrOnAddCommitment(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.CreateCommitmentMock = func() ([]byte, []byte, error) { + return nil, nil, nil + } + + err := errors.New("error add commitment") + + multisigner.AddCommitmentMock = func(uint16, []byte) error { + return err + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + _, err2 := cnWorker.GenCommitmentHash() + assert.Equal(t, err, err2) +} + +func TestGenCommitmentHash_ShouldRetunErrOnSetCommitmentSecret(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.CreateCommitmentMock = func() ([]byte, []byte, error) { + return nil, nil, nil + } + + multisigner.AddCommitmentMock = func(uint16, []byte) error { + return nil + } + + err := errors.New("error set commitment secret") + + multisigner.SetCommitmentSecretMock = func([]byte) error { + return err + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + _, err2 := cnWorker.GenCommitmentHash() + assert.Equal(t, err, err2) +} + +func TestGenCommitmentHash_ShouldRetunErrOnAddCommitmentHash(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.CreateCommitmentMock = func() ([]byte, []byte, error) { + return nil, nil, nil + } + + multisigner.AddCommitmentMock = func(uint16, []byte) error { + return nil + } + + multisigner.SetCommitmentSecretMock = func([]byte) error { + return nil + } + + err := errors.New("error add commitment hash") + + multisigner.AddCommitmentHashMock = func(uint16, []byte) error { + return err + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + _, err2 := cnWorker.GenCommitmentHash() + assert.Equal(t, err, err2) +} + +func TestGenCommitmentHash_ShouldRetunNil(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 22 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.CreateCommitmentMock = func() ([]byte, []byte, error) { + return nil, nil, nil + } + + multisigner.AddCommitmentMock = func(uint16, []byte) error { + return nil + } + + multisigner.SetCommitmentSecretMock = func([]byte) error { + return nil + } + + multisigner.AddCommitmentHashMock = func(uint16, []byte) error { + return nil + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + _, err := cnWorker.GenCommitmentHash() + assert.Equal(t, nil, err) +} + +func TestCheckCommitmentsValidity_ShouldErrNilCommitmet(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + err := cnWorker.CheckCommitmentsValidity([]byte(string(2))) + assert.Equal(t, spos.ErrNilCommitment, err) +} + +func TestCheckCommitmentsValidity_ShouldErrInvalidIndex(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.Reset(nil, 0) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.Cns.SetJobDone(consensusGroup[0], spos.SrCommitment, true) + + err := cnWorker.CheckCommitmentsValidity([]byte(string(1))) + assert.Equal(t, crypto.ErrInvalidIndex, err) +} + +func TestCheckCommitmentsValidity_ShouldErrOnCommitmentHash(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + err := errors.New("error commitment hash") + multisigner.CommitmentHashMock = func(uint16) ([]byte, error) { + return nil, err + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.Cns.SetJobDone(consensusGroup[0], spos.SrCommitment, true) + + err2 := cnWorker.CheckCommitmentsValidity([]byte(string(1))) + assert.Equal(t, err, err2) +} + +func TestCheckCommitmentsValidity_ShouldErrCommitmentHashDoesNotMatch(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.CommitmentHashMock = func(uint16) ([]byte, error) { + return []byte("X"), nil + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.Cns.SetJobDone(consensusGroup[0], spos.SrCommitment, true) + + err := cnWorker.CheckCommitmentsValidity([]byte(string(1))) + assert.Equal(t, spos.ErrCommitmentHashDoesNotMatch, err) +} + +func TestCheckCommitmentsValidity_ShouldReturnNil(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + multisigner.CommitmentMock = func(uint16) ([]byte, error) { + return []byte("X"), nil + } + + multisigner.CommitmentHashMock = func(uint16) ([]byte, error) { + return mock.HasherMock{}.Compute(string([]byte("X"))), nil + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.Cns.SetJobDone(consensusGroup[0], spos.SrCommitment, true) + + err := cnWorker.CheckCommitmentsValidity([]byte(string(1))) + assert.Equal(t, nil, err) +} + +func TestDoAdvanceJob_ShouldReturnFalse(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.Cns.SetStatus(spos.SrEndRound, spos.SsFinished) + + assert.False(t, cnWorker.DoAdvanceJob()) +} + +func TestDoAdvanceJob_ShouldReturnTrue(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + assert.True(t, cnWorker.DoAdvanceJob()) +} + +func TestDoExtendStartRound_ShouldSetStartRoundFinished(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.ExtendStartRound() + + assert.Equal(t, spos.SsFinished, cnWorker.Cns.Status(spos.SrStartRound)) +} + +func TestDoExtendBlock_ShouldNotSetBlockExtended(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return true + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.ExtendBlock() + + assert.NotEqual(t, spos.SsExtended, cnWorker.Cns.Status(spos.SrBlock)) +} + +func TestDoExtendBlock_ShouldSetBlockExtended(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.ExtendBlock() + + assert.Equal(t, spos.SsExtended, cnWorker.Cns.Status(spos.SrBlock)) +} + +func TestReceivedMessage_ShouldReturnWhenIsCanceled(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.SelfPubKey()), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.Cns.Chr.SetSelfSubround(-1) + cnWorker.ReceivedMessage(string(consensusTopic), cnsDta, nil) + + assert.Equal(t, 0, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestReceivedMessage_ShouldReturnWhenDataReceivedIsInvalid(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + cnWorker.ReceivedMessage(string(consensusTopic), nil, nil) + + assert.Equal(t, 0, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestReceivedMessage_ShouldReturnWhenNodeIsNotInTheConsensusGroup(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte("X"), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.ReceivedMessage(string(consensusTopic), cnsDta, nil) + + assert.Equal(t, 0, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestReceivedMessage_ShouldReturnWhenShouldDropMessage(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.SelfPubKey()), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + -1, + ) + + cnWorker.ReceivedMessage(string(consensusTopic), cnsDta, nil) + + assert.Equal(t, 0, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestReceivedMessage_ShouldReturnWhenReceivedMessageIsFromSelf(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.SelfPubKey()), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.ReceivedMessage(string(consensusTopic), cnsDta, nil) + + assert.Equal(t, 0, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestReceivedMessage_ShouldReturnWhenSignatureIsInvalid(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + nil, + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.ReceivedMessage(string(consensusTopic), cnsDta, nil) + + assert.Equal(t, 0, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestReceivedMessage_ShouldSendReceivedMesageOnChannel(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.ReceivedMessage(string(consensusTopic), cnsDta, nil) + + time.Sleep(100 * time.Millisecond) + + assert.Equal(t, 1, len(cnWorker.ReceivedMessages[spos.MtBlockBody])) +} + +func TestShouldDropConsensusMessage_ShouldReturnTrueWhenMessageReceivedIsFromThePastRounds(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + -1, + ) + + assert.True(t, cnWorker.ShouldDropConsensusMessage(cnsDta)) +} + +func TestShouldDropConsensusMessage_ShouldReturnTrueWhenMessageIsReceivedAfterEndRound(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.Cns.Chr.AddSubround(spos.NewSubround( + chronology.SubroundId(spos.SrAdvance), + -1, + int64(roundDuration*100/100), + cnWorker.Cns.GetSubroundName(spos.SrAdvance), + nil, + nil, + nil)) + + assert.True(t, cnWorker.ShouldDropConsensusMessage(cnsDta)) +} + +func TestShouldDropConsensusMessage_ShouldReturnFalse(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.Cns.Chr.AddSubround(spos.NewSubround( + chronology.SubroundId(spos.SrEndRound), + -1, + int64(roundDuration*100/100), + cnWorker.Cns.GetSubroundName(spos.SrEndRound), + nil, + nil, + nil)) + + assert.False(t, cnWorker.ShouldDropConsensusMessage(cnsDta)) +} + +func TestCheckSignature_ShouldReturnErrNilConsensusData(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + err := cnWorker.CheckSignature(nil) + + assert.Equal(t, spos.ErrNilConsensusData, err) +} + +func TestCheckSignature_ShouldReturnErrNilPublicKey(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + nil, + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + err := cnWorker.CheckSignature(cnsDta) + + assert.Equal(t, spos.ErrNilPublicKey, err) +} + +func TestCheckSignature_ShouldReturnErrNilSignature(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + nil, + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + err := cnWorker.CheckSignature(cnsDta) + + assert.Equal(t, spos.ErrNilSignature, err) +} + +func TestCheckSignature_ShouldReturnPublicKeyFromByteArrayErr(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + err := errors.New("error public key from byte array") + keyGenMock.PublicKeyFromByteArrayMock = func(b []byte) (crypto.PublicKey, error) { + return nil, err + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + err2 := cnWorker.CheckSignature(cnsDta) + + assert.Equal(t, err, err2) +} + +func TestCheckSignature_ShouldReturnNilErr(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + err := cnWorker.CheckSignature(cnsDta) + + assert.Nil(t, err) +} + +func TestProcessReceivedBlock_ShouldReturnFalseWhenBodyAndHeaderAreNotSet(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + assert.False(t, cnWorker.ProcessReceivedBlock(cnsDta)) +} + +func TestProcessReceivedBlock_ShouldReturnFalseWhenProcessBlockFails(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + err := errors.New("error process block") + blProcMock.ProcessBlockCalled = func(*blockchain.BlockChain, *block.Header, *block.TxBlockBody, func() time.Duration) error { + return err + } + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + hdr := &block.Header{} + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.Header = hdr + cnWorker.BlockBody = blk + + assert.False(t, cnWorker.ProcessReceivedBlock(cnsDta)) +} + +func TestProcessReceivedBlock_ShouldReturnFalseWhenProcessBlockReturnsInNextRound(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + hdr := &block.Header{} + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + -1, + ) + + cnWorker.Header = hdr + cnWorker.BlockBody = blk + + assert.False(t, cnWorker.ProcessReceivedBlock(cnsDta)) +} + +func TestProcessReceivedBlock_ShouldReturnFalseWhenProcessBlockReturnsTooLate(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + hdr := &block.Header{} + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.Header = hdr + cnWorker.BlockBody = blk + + cnWorker.Cns.Chr.AddSubround(spos.NewSubround( + chronology.SubroundId(spos.SrAdvance), + -1, + int64(roundDuration*100/100), + cnWorker.Cns.GetSubroundName(spos.SrAdvance), + nil, + nil, + nil)) + + assert.False(t, cnWorker.ProcessReceivedBlock(cnsDta)) +} + +func TestProcessReceivedBlock_ShouldReturnTrue(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + hdr := &block.Header{} + blk := &block.TxBlockBody{} + message, _ := mock.MarshalizerMock{}.Marshal(blk) + + cnsDta := spos.NewConsensusData( + message, + nil, + []byte(cnWorker.Cns.ConsensusGroup()[1]), + []byte("sig"), + spos.MtBlockBody, + cnWorker.GetRoundTime(), + 0, + ) + + cnWorker.Header = hdr + cnWorker.BlockBody = blk + + assert.True(t, cnWorker.ProcessReceivedBlock(cnsDta)) +} + +func TestHaveTime_ShouldReturnNegativeValue(t *testing.T) { + blkc := blockchain.BlockChain{} + keyGenMock, privKeyMock, pubKeyMock := initSingleSigning() + multisigner := initMultisigner() + blProcMock := initMockBlockProcessor() + bootMock := &mock.BootstrapMock{ShouldSyncCalled: func() bool { + return false + }} + + consensusGroupSize := 9 + roundDuration := 100 * time.Millisecond + genesisTime := time.Now() + // create consensus group list + consensusGroup := CreateConsensusGroup(consensusGroupSize) + + cns := initConsensus( + genesisTime, + roundDuration, + consensusGroup, + consensusGroupSize, + 0, + ) + + cnWorker, _ := spos.NewConsensusWorker( + cns, + &blkc, + mock.HasherMock{}, + mock.MarshalizerMock{}, + blProcMock, + bootMock, + multisigner, + keyGenMock, + privKeyMock, + pubKeyMock, + ) + + time.Sleep(roundDuration) + ret := cnWorker.HaveTime() + + assert.True(t, ret < 0) +} diff --git a/consensus/validators/groupSelectors/indexHashedGroup_test.go b/consensus/validators/groupSelectors/indexHashedGroup_test.go index dfbc9447dd9..d31708bae91 100644 --- a/consensus/validators/groupSelectors/indexHashedGroup_test.go +++ b/consensus/validators/groupSelectors/indexHashedGroup_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" ) -func convertBigIntToBytes(value big.Int) []byte { +func convertBigIntToBytes(value *big.Int) []byte { return value.Bytes() } @@ -58,8 +58,8 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) list := []consensus.Validator{ - mock.NewValidatorMock(*big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(*big.NewInt(2), 3, []byte("pk1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } err := ihgs.LoadEligibleList(list) @@ -84,8 +84,8 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *te ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) list := []consensus.Validator{ - mock.NewValidatorMock(*big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(*big.NewInt(2), 3, []byte("pk1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } _ = ihgs.LoadEligibleList(list) @@ -100,8 +100,8 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, mock.HasherMock{}) list := []consensus.Validator{ - mock.NewValidatorMock(*big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(*big.NewInt(2), 3, []byte("pk1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } _ = ihgs.LoadEligibleList(list) @@ -118,7 +118,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSa ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) list := []consensus.Validator{ - mock.NewValidatorMock(*big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), } _ = ihgs.LoadEligibleList(list) @@ -139,11 +139,11 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi //element 1 will be the second hasher.ComputeCalled = func(s string) []byte { if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(*big.NewInt(0)) + return convertBigIntToBytes(big.NewInt(0)) } if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(*big.NewInt(1)) + return convertBigIntToBytes(big.NewInt(1)) } return nil @@ -152,8 +152,8 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) list := []consensus.Validator{ - mock.NewValidatorMock(*big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(*big.NewInt(2), 3, []byte("pk1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } _ = ihgs.LoadEligibleList(list) @@ -174,11 +174,11 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd //element 1 will be the first hasher.ComputeCalled = func(s string) []byte { if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(*big.NewInt(1)) + return convertBigIntToBytes(big.NewInt(1)) } if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(*big.NewInt(0)) + return convertBigIntToBytes(big.NewInt(0)) } return nil @@ -186,8 +186,8 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) - validator0 := mock.NewValidatorMock(*big.NewInt(1), 2, []byte("pk0")) - validator1 := mock.NewValidatorMock(*big.NewInt(2), 3, []byte("pk1")) + validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) list := []consensus.Validator{ validator0, @@ -213,11 +213,11 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex //element 1 will be the second as the same index is being returned and 0 is already in list hasher.ComputeCalled = func(s string) []byte { if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(*big.NewInt(0)) + return convertBigIntToBytes(big.NewInt(0)) } if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(*big.NewInt(0)) + return convertBigIntToBytes(big.NewInt(0)) } return nil @@ -226,8 +226,8 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) list := []consensus.Validator{ - mock.NewValidatorMock(*big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(*big.NewInt(2), 3, []byte("pk1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } _ = ihgs.LoadEligibleList(list) @@ -252,13 +252,13 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho // 3 is the 4-th element // for index 5, hasher will return 9 which will translate to 9, so 9, 0, 1, 2, 3 are already picked, 4 is the 5-th element - script := make(map[string]big.Int) - script[string(uint64ToBytes(0))+randomness] = *big.NewInt(11) //will translate to 1, add 1 - script[string(uint64ToBytes(1))+randomness] = *big.NewInt(1) //will translate to 1, add 2 - script[string(uint64ToBytes(2))+randomness] = *big.NewInt(9) //will translate to 9, add 9 - script[string(uint64ToBytes(3))+randomness] = *big.NewInt(9) //will translate to 9, add 0 - script[string(uint64ToBytes(4))+randomness] = *big.NewInt(0) //will translate to 0, add 3 - script[string(uint64ToBytes(5))+randomness] = *big.NewInt(9) //will translate to 9, add 4 + script := make(map[string]*big.Int) + script[string(uint64ToBytes(0))+randomness] = big.NewInt(11) //will translate to 1, add 1 + script[string(uint64ToBytes(1))+randomness] = big.NewInt(1) //will translate to 1, add 2 + script[string(uint64ToBytes(2))+randomness] = big.NewInt(9) //will translate to 9, add 9 + script[string(uint64ToBytes(3))+randomness] = big.NewInt(9) //will translate to 9, add 0 + script[string(uint64ToBytes(4))+randomness] = big.NewInt(0) //will translate to 0, add 3 + script[string(uint64ToBytes(5))+randomness] = big.NewInt(9) //will translate to 9, add 4 hasher.ComputeCalled = func(s string) []byte { val, ok := script[s] @@ -272,16 +272,16 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(6, hasher) - validator0 := mock.NewValidatorMock(*big.NewInt(1), 1, []byte("pk0")) - validator1 := mock.NewValidatorMock(*big.NewInt(2), 2, []byte("pk1")) - validator2 := mock.NewValidatorMock(*big.NewInt(3), 3, []byte("pk2")) - validator3 := mock.NewValidatorMock(*big.NewInt(4), 4, []byte("pk3")) - validator4 := mock.NewValidatorMock(*big.NewInt(5), 5, []byte("pk4")) - validator5 := mock.NewValidatorMock(*big.NewInt(6), 6, []byte("pk5")) - validator6 := mock.NewValidatorMock(*big.NewInt(7), 7, []byte("pk6")) - validator7 := mock.NewValidatorMock(*big.NewInt(8), 8, []byte("pk7")) - validator8 := mock.NewValidatorMock(*big.NewInt(9), 9, []byte("pk8")) - validator9 := mock.NewValidatorMock(*big.NewInt(10), 10, []byte("pk9")) + validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) + validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2")) + validator3 := mock.NewValidatorMock(big.NewInt(4), 4, []byte("pk3")) + validator4 := mock.NewValidatorMock(big.NewInt(5), 5, []byte("pk4")) + validator5 := mock.NewValidatorMock(big.NewInt(6), 6, []byte("pk5")) + validator6 := mock.NewValidatorMock(big.NewInt(7), 7, []byte("pk6")) + validator7 := mock.NewValidatorMock(big.NewInt(8), 8, []byte("pk7")) + validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8")) + validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9")) list := []consensus.Validator{ validator0, @@ -321,7 +321,7 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. //generate 400 validators for i := 0; i < 400; i++ { - list = append(list, mock.NewValidatorMock(*big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)))) + list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)))) } _ = ihgs.LoadEligibleList(list) diff --git a/crypto/interface.go b/crypto/interface.go index 0e5c3f07429..b111def7769 100644 --- a/crypto/interface.go +++ b/crypto/interface.go @@ -49,12 +49,14 @@ type MultiSigner interface { AggregateCommitments(bitmap []byte) ([]byte, error) // SetAggCommitment sets the aggregated commitment SetAggCommitment(aggCommitment []byte) error - // SignPartial creates a partial signature - SignPartial(bitmap []byte) ([]byte, error) - // AddSignPartial adds the partial signature of the signer with specified position - AddSignPartial(index uint16, sig []byte) error - // VerifyPartial verifies the partial signature of the signer with specified position - VerifyPartial(index uint16, sig []byte, bitmap []byte) error + // CreateSignatureShare creates a partial signature + CreateSignatureShare(bitmap []byte) ([]byte, error) + // AddSignatureShare adds the partial signature of the signer with specified position + AddSignatureShare(index uint16, sig []byte) error + // SignatureShare returns the partial signature set for given index + SignatureShare(index uint16) ([]byte, error) + // VerifySignatureShare verifies the partial signature of the signer with specified position + VerifySignatureShare(index uint16, sig []byte, bitmap []byte) error // AggregateSigs aggregates all collected partial signatures AggregateSigs(bitmap []byte) ([]byte, error) } diff --git a/crypto/multisig/belnev.go b/crypto/multisig/belnev.go index 1ba5d4283aa..c401ff81c31 100644 --- a/crypto/multisig/belnev.go +++ b/crypto/multisig/belnev.go @@ -1,24 +1,29 @@ package multisig import ( + "sync" + "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/crypto/schnorr" "github.com/ElrondNetwork/elrond-go-sandbox/hashing" ) type belNev struct { - message []byte - pubKeys []crypto.PublicKey - privKey crypto.PrivateKey - commHashes [][]byte - commSecret []byte - commitments [][]byte - aggCommitment []byte - sigShares [][]byte - aggSig []byte - ownIndex uint16 - hasher hashing.Hasher - keyGen crypto.KeyGenerator + message []byte + pubKeys []crypto.PublicKey + privKey crypto.PrivateKey + mutCommHashes *sync.RWMutex + commHashes [][]byte + commSecret []byte + mutCommitments *sync.RWMutex + commitments [][]byte + aggCommitment []byte + mutSigShares *sync.RWMutex + sigShares [][]byte + aggSig []byte + ownIndex uint16 + hasher hashing.Hasher + keyGen crypto.KeyGenerator } // NewBelNevMultisig creates a new Bellare Neven multi-signer @@ -59,14 +64,17 @@ func NewBelNevMultisig( // own index is used only for signing return &belNev{ - pubKeys: pk, - privKey: privKey, - ownIndex: ownIndex, - hasher: hasher, - keyGen: keyGen, - commHashes: commHashes, - commitments: commitments, - sigShares: sigShares, + pubKeys: pk, + privKey: privKey, + ownIndex: ownIndex, + hasher: hasher, + keyGen: keyGen, + mutCommHashes: &sync.RWMutex{}, + commHashes: commHashes, + mutCommitments: &sync.RWMutex{}, + commitments: commitments, + mutSigShares: &sync.RWMutex{}, + sigShares: sigShares, }, nil } @@ -104,11 +112,20 @@ func (bn *belNev) Reset(pubKeys []string, index uint16) error { bn.ownIndex = index bn.pubKeys = pk bn.commSecret = nil - bn.commitments = make([][]byte, sizeConsensus) bn.aggCommitment = nil bn.aggSig = nil + + bn.mutCommHashes.Lock() bn.commHashes = make([][]byte, sizeConsensus) + bn.mutCommHashes.Unlock() + + bn.mutCommitments.Lock() + bn.commitments = make([][]byte, sizeConsensus) + bn.mutCommitments.Unlock() + + bn.mutSigShares.Lock() bn.sigShares = make([][]byte, sizeConsensus) + bn.mutSigShares.Unlock() return nil } @@ -120,16 +137,22 @@ func (bn *belNev) SetMessage(msg []byte) { // AddCommitmentHash sets a commitment Hash func (bn *belNev) AddCommitmentHash(index uint16, commHash []byte) error { + bn.mutCommHashes.Lock() if int(index) >= len(bn.commHashes) { + bn.mutCommHashes.Unlock() return crypto.ErrInvalidIndex } bn.commHashes[index] = commHash + bn.mutCommHashes.Unlock() return nil } // CommitmentHash returns the commitment hash from the list on the specified position func (bn *belNev) CommitmentHash(index uint16) ([]byte, error) { + bn.mutCommHashes.RLock() + defer bn.mutCommHashes.RUnlock() + if int(index) >= len(bn.commHashes) { return nil, crypto.ErrInvalidIndex } @@ -169,17 +192,22 @@ func (bn *belNev) SetCommitmentSecret(commSecret []byte) error { // AddCommitment adds a commitment to the list on the specified position func (bn *belNev) AddCommitment(index uint16, commitment []byte) error { + bn.mutCommitments.Lock() if int(index) >= len(bn.commitments) { + bn.mutCommitments.Unlock() return crypto.ErrInvalidIndex } bn.commitments[index] = commitment - + bn.mutCommitments.Unlock() return nil } // Commitment returns the commitment from the list with the specified position func (bn *belNev) Commitment(index uint16) ([]byte, error) { + bn.mutCommitments.RLock() + defer bn.mutCommitments.RUnlock() + if int(index) >= len(bn.commitments) { return nil, crypto.ErrInvalidIndex } @@ -193,7 +221,7 @@ func (bn *belNev) Commitment(index uint16) ([]byte, error) { // AggregateCommitments aggregates the list of commitments func (bn *belNev) AggregateCommitments(bitmap []byte) ([]byte, error) { - // TODO + // TODO, do not forget about mutCommitments return []byte("implement me"), nil } @@ -204,34 +232,52 @@ func (bn *belNev) SetAggCommitment(aggCommitment []byte) error { return nil } -// SignPartial creates a partial signature -func (bn *belNev) SignPartial(bitmap []byte) ([]byte, error) { +// CreateSignatureShare creates a partial signature +func (bn *belNev) CreateSignatureShare(bitmap []byte) ([]byte, error) { // TODO return []byte("implement me"), nil } -// VerifyPartial verifies the partial signature of the signer with specified position -func (bn *belNev) VerifyPartial(index uint16, sig []byte, bitmap []byte) error { +// VerifySignatureShare verifies the partial signature of the signer with specified position +func (bn *belNev) VerifySignatureShare(index uint16, sig []byte, bitmap []byte) error { // TODO return nil } -// AddSignPartial adds the partial signature of the signer with specified position -func (bn *belNev) AddSignPartial(index uint16, sig []byte) error { +// AddSignatureShare adds the partial signature of the signer with specified position +func (bn *belNev) AddSignatureShare(index uint16, sig []byte) error { + bn.mutSigShares.Lock() if int(index) >= len(bn.sigShares) { + bn.mutSigShares.Unlock() return crypto.ErrInvalidIndex } bn.sigShares[index] = sig - + bn.mutSigShares.Unlock() return nil } +// SignatureShare returns the partial signature set for given index +func (bn *belNev) SignatureShare(index uint16) ([]byte, error) { + bn.mutSigShares.RLock() + defer bn.mutSigShares.RUnlock() + + if int(index) >= len(bn.sigShares) { + return nil, crypto.ErrInvalidIndex + } + + if bn.sigShares[index] == nil { + return nil, crypto.ErrNilElement + } + + return bn.sigShares[index], nil +} + // AggregateSigs aggregates all collected partial signatures func (bn *belNev) AggregateSigs(bitmap []byte) ([]byte, error) { - // TODO + // TODO, do not forget about mutSigShares return []byte("implement me"), nil } diff --git a/data/blockchain/blockchain.go b/data/blockchain/blockchain.go index 375dbd5a3ef..46380c89107 100644 --- a/data/blockchain/blockchain.go +++ b/data/blockchain/blockchain.go @@ -49,14 +49,16 @@ type StorageService interface { // The BlockChain also holds pointers to the Genesis block, the current block // the height of the local chain and the perceived height of the chain in the network. type BlockChain struct { - lock sync.RWMutex - GenesisBlock *block.Header // Genesys Block pointer - CurrentBlockHeader *block.Header // Current Block pointer - CurrentTxBlockBody *block.TxBlockBody // Current Tx Block Body pointer - LocalHeight int64 // Height of the local chain - NetworkHeight int64 // Percieved height of the network chain - badBlocks storage.Cacher // Bad blocks cache - chain map[UnitType]storage.Storer // chains for each unit type. Together they form the blockchain + lock sync.RWMutex // Lock for accessing the storers chain + GenesisBlock *block.Header // Genesys Block Header pointer + GenesisHeaderHash []byte // Genesis Block Header hash + CurrentBlockHeader *block.Header // Current Block pointer + CurrentBlockHeaderHash []byte // Current Block Header hash + CurrentTxBlockBody *block.TxBlockBody // Current Tx Block Body pointer + LocalHeight int64 // Height of the local chain + NetworkHeight int64 // Percieved height of the network chain + badBlocks storage.Cacher // Bad blocks cache + chain map[UnitType]storage.Storer // chains for each unit type. Together they form the blockchain } // NewBlockChain returns an initialized blockchain diff --git a/data/interface.go b/data/interface.go index 21302aa5cbd..4c50cf1123e 100644 --- a/data/interface.go +++ b/data/interface.go @@ -15,7 +15,8 @@ type ShardedDataCacherNotifier interface { ShardDataStore(shardID uint32) (c storage.Cacher) AddData(key []byte, data interface{}, destShardID uint32) - SearchData(key []byte) (shardValuesPairs map[uint32]interface{}) + //SearchData(key []byte) (shardValuesPairs map[uint32]interface{}) + SearchFirstData(key []byte) (value interface{}, ok bool) RemoveData(key []byte, destShardID uint32) RemoveSetOfDataFromPool(keys [][]byte, destShardID uint32) RemoveDataFromAllShards(key []byte) @@ -23,6 +24,7 @@ type ShardedDataCacherNotifier interface { MoveData(sourceShardID, destShardID uint32, key [][]byte) Clear() ClearShardStore(shardID uint32) + CreateShardStore(destShardID uint32) } // Uint64Cacher defines a cacher-type struct that uses uint64 keys and []byte values (usually hashes) diff --git a/data/mock/shardedDataStub.go b/data/mock/shardedDataStub.go index ffa31631323..f49ab3b0685 100644 --- a/data/mock/shardedDataStub.go +++ b/data/mock/shardedDataStub.go @@ -8,7 +8,7 @@ type ShardedDataStub struct { RegisterHandlerCalled func(func(key []byte)) ShardDataStoreCalled func(shardID uint32) (c storage.Cacher) AddDataCalled func(key []byte, data interface{}, destShardID uint32) - SearchDataCalled func(key []byte) (shardValuesPairs map[uint32]interface{}) + SearchFirstDataCalled func(key []byte) (value interface{}, ok bool) RemoveDataCalled func(key []byte, destShardID uint32) RemoveDataFromAllShardsCalled func(key []byte) MergeShardStoresCalled func(sourceShardID, destShardID uint32) @@ -16,6 +16,7 @@ type ShardedDataStub struct { ClearCalled func() ClearShardStoreCalled func(shardID uint32) RemoveSetOfDataFromPoolCalled func(keys [][]byte, destShardID uint32) + CreateShardStoreCalled func(destShardID uint32) } func (sd *ShardedDataStub) RegisterHandler(handler func(key []byte)) { @@ -30,8 +31,8 @@ func (sd *ShardedDataStub) AddData(key []byte, data interface{}, destShardID uin sd.AddDataCalled(key, data, destShardID) } -func (sd *ShardedDataStub) SearchData(key []byte) (shardValuesPairs map[uint32]interface{}) { - return sd.SearchDataCalled(key) +func (sd *ShardedDataStub) SearchFirstData(key []byte) (value interface{}, ok bool) { + return sd.SearchFirstDataCalled(key) } func (sd *ShardedDataStub) RemoveData(key []byte, destShardID uint32) { @@ -61,3 +62,7 @@ func (sd *ShardedDataStub) ClearShardStore(shardID uint32) { func (sd *ShardedDataStub) RemoveSetOfDataFromPool(keys [][]byte, destShardID uint32) { sd.RemoveSetOfDataFromPoolCalled(keys, destShardID) } + +func (sd *ShardedDataStub) CreateShardStore(destShardID uint32) { + sd.CreateShardStoreCalled(destShardID) +} diff --git a/data/shardedData/shardedData.go b/data/shardedData/shardedData.go index d66159476a2..c7bae966bd3 100644 --- a/data/shardedData/shardedData.go +++ b/data/shardedData/shardedData.go @@ -65,9 +65,9 @@ func newShardStore(destShardID uint32, cacherConfig storage.CacheConfig) (*shard }, nil } -// NewShardStore is a ShardedData method that is responsible for creating +// CreateShardStore is a ShardedData method that is responsible for creating // a new shardStore at the destShardID index in the shardedDataStore map -func (sd *shardedData) NewShardStore(destShardID uint32) { +func (sd *shardedData) CreateShardStore(destShardID uint32) { sd.mutShardedDataStore.Lock() sd.newShardStoreNoLock(destShardID) sd.mutShardedDataStore.Unlock() @@ -121,10 +121,8 @@ func (sd *shardedData) AddData(key []byte, data interface{}, destShardID uint32) } } -// SearchData searches the key against all shard data store, retrieving found data in a map -func (sd *shardedData) SearchData(key []byte) (shardValuesPairs map[uint32]interface{}) { - shardValuesPairs = make(map[uint32]interface{}) - +// SearchFirstData searches the key against all shard data store, retrieving first value found +func (sd *shardedData) SearchFirstData(key []byte) (value interface{}, ok bool) { sd.mutShardedDataStore.RLock() for k := range sd.shardedDataStore { m := sd.shardedDataStore[k] @@ -133,12 +131,14 @@ func (sd *shardedData) SearchData(key []byte) (shardValuesPairs map[uint32]inter } if m.DataStore.Has(key) { - shardValuesPairs[k], _ = m.DataStore.Get(key) + value, _ = m.DataStore.Get(key) + sd.mutShardedDataStore.RUnlock() + return value, true } } sd.mutShardedDataStore.RUnlock() - return shardValuesPairs + return nil, false } // RemoveSetOfDataFromPool removes a list of keys from the corresponding pool diff --git a/data/shardedData/shardedData_test.go b/data/shardedData/shardedData_test.go index 6e7070ad7f1..ad23f6f1139 100644 --- a/data/shardedData/shardedData_test.go +++ b/data/shardedData/shardedData_test.go @@ -292,17 +292,17 @@ func TestShardedData_RegisterAddedDataHandlerNotAddedShouldNotCall(t *testing.T) assert.Equal(t, 1, len(sd.AddedDataHandlers())) } -func TestShardedData_SearchNotFoundShouldRetEmptyMap(t *testing.T) { +func TestShardedData_SearchFirstDataNotFoundShouldRetNilAndFalse(t *testing.T) { t.Parallel() sd, _ := shardedData.NewShardedData(defaultTestConfig) - resp := sd.SearchData([]byte("aaaa")) - assert.NotNil(t, resp) - assert.Equal(t, 0, len(resp)) + value, ok := sd.SearchFirstData([]byte("aaaa")) + assert.Nil(t, value) + assert.False(t, ok) } -func TestShardedData_SearchFoundShouldRetResults(t *testing.T) { +func TestShardedData_SearchFirstDataFoundShouldRetResults(t *testing.T) { t.Parallel() sd, _ := shardedData.NewShardedData(defaultTestConfig) @@ -311,9 +311,7 @@ func TestShardedData_SearchFoundShouldRetResults(t *testing.T) { sd.AddData([]byte("aaaa"), "a2", 4) sd.AddData([]byte("aaaa"), "a3", 5) - resp := sd.SearchData([]byte("aaaa")) - assert.NotNil(t, resp) - assert.Equal(t, 2, len(resp)) - assert.Equal(t, "a2", resp[4]) - assert.Equal(t, "a3", resp[5]) + value, ok := sd.SearchFirstData([]byte("aaa")) + assert.NotNil(t, value) + assert.True(t, ok) } diff --git a/data/state/account.go b/data/state/account.go index ede114fe0a2..be42d6ff897 100644 --- a/data/state/account.go +++ b/data/state/account.go @@ -18,7 +18,7 @@ const ( type RegistrationData struct { OriginatorPubKey []byte NodePubKey []byte - Stake big.Int + Stake *big.Int Action ActionRequested RoundIndex int32 EpochIndex int32 @@ -27,7 +27,7 @@ type RegistrationData struct { // Account is the struct used in serialization/deserialization type Account struct { Nonce uint64 - Balance big.Int + Balance *big.Int CodeHash []byte RootHash []byte RegistrationData []RegistrationData @@ -35,7 +35,9 @@ type Account struct { // NewAccount creates a new account object func NewAccount() *Account { - return &Account{} + return &Account{ + Balance: big.NewInt(0), + } } //TODO add Cap'N'Proto converter funcs diff --git a/data/state/account_test.go b/data/state/account_test.go index 8f7b111ab69..9630ed9ec5e 100644 --- a/data/state/account_test.go +++ b/data/state/account_test.go @@ -14,7 +14,7 @@ func TestAccount_MarshalUnmarshalNilSlice_ShouldWork(t *testing.T) { acnt := &state.Account{ Nonce: 8, - Balance: *big.NewInt(56), + Balance: big.NewInt(56), CodeHash: nil, RootHash: nil, RegistrationData: nil, @@ -35,7 +35,7 @@ func TestAccount_MarshalUnmarshalEmptySlice_ShouldWork(t *testing.T) { acnt := &state.Account{ Nonce: 8, - Balance: *big.NewInt(56), + Balance: big.NewInt(56), CodeHash: nil, RootHash: nil, RegistrationData: make([]state.RegistrationData, 0), @@ -56,14 +56,14 @@ func TestAccount_MarshalUnmarshalWithRegData_ShouldWork(t *testing.T) { acnt := &state.Account{ Nonce: 8, - Balance: *big.NewInt(56), + Balance: big.NewInt(56), CodeHash: nil, RootHash: nil, RegistrationData: []state.RegistrationData{ { OriginatorPubKey: []byte("a"), NodePubKey: []byte("b"), - Stake: *big.NewInt(5), + Stake: big.NewInt(5), Action: state.ArRegister, }, }, diff --git a/data/state/accountsDB_test.go b/data/state/accountsDB_test.go index f7bfbb800ac..e8068fe0c62 100644 --- a/data/state/accountsDB_test.go +++ b/data/state/accountsDB_test.go @@ -420,7 +420,7 @@ func TestAccountsDBGetJournalizedAccountNotFoundShouldCreateEmpty(t *testing.T) assert.Nil(t, err) assert.Equal(t, uint64(0), account.BaseAccount().Nonce) - assert.Equal(t, *big.NewInt(0), account.BaseAccount().Balance) + assert.Equal(t, big.NewInt(0), account.BaseAccount().Balance) assert.Equal(t, []byte(nil), account.BaseAccount().CodeHash) assert.Equal(t, []byte(nil), account.BaseAccount().RootHash) assert.Equal(t, adr, account.AddressContainer()) @@ -558,7 +558,7 @@ func TestAccountsDBGetAccountAccountNotFound(t *testing.T) { //Step 1. Create an account + its DbAccount representation testAccount := state.NewAccount() testAccount.Nonce = 1 - testAccount.Balance = *big.NewInt(45) + testAccount.Balance = big.NewInt(45) //Step 2. marshalize the DbAccount marshalizer := mock.MarshalizerMock{} @@ -845,7 +845,7 @@ func TestAccountsDBTestCreateModifyComitSaveGet(t *testing.T) { assert.Nil(t, err) err = journalizedAccount.SetNonceWithJournal(34) assert.Nil(t, err) - err = journalizedAccount.SetBalanceWithJournal(*big.NewInt(45)) + err = journalizedAccount.SetBalanceWithJournal(big.NewInt(45)) assert.Nil(t, err) err = adb.PutCode(journalizedAccount, []byte("Test SC code to be executed")) assert.Nil(t, err) @@ -863,7 +863,7 @@ func TestAccountsDBTestCreateModifyComitSaveGet(t *testing.T) { assert.Nil(t, err) assert.Equal(t, uint64(34), recoveredAccount.BaseAccount().Nonce) - assert.Equal(t, *big.NewInt(45), recoveredAccount.BaseAccount().Balance) + assert.Equal(t, big.NewInt(45), recoveredAccount.BaseAccount().Balance) assert.Equal(t, []byte("Test SC code to be executed"), recoveredAccount.Code()) value, err := recoveredAccount.RetrieveValue([]byte("a key")) assert.Nil(t, err) diff --git a/data/state/accountsDBreverts_test.go b/data/state/accountsDBreverts_test.go index 4b07b47fc4b..665dcfb3917 100644 --- a/data/state/accountsDBreverts_test.go +++ b/data/state/accountsDBreverts_test.go @@ -30,13 +30,13 @@ func accountsDBRevertEmulateBalanceTxExecution(acntSrc, acntDest state.Journaliz } //substract value from src - err := acntSrc.SetBalanceWithJournal(*srcVal.Sub(&srcVal, value)) + err := acntSrc.SetBalanceWithJournal(srcVal.Sub(srcVal, value)) if err != nil { return err } //add value to dest - err = acntDest.SetBalanceWithJournal(*destVal.Add(&destVal, value)) + err = acntDest.SetBalanceWithJournal(destVal.Add(destVal, value)) if err != nil { return err } @@ -167,12 +167,12 @@ func TestAccountsDBRevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { snapshotPreSet := adb.JournalLen() //Step 3. Set balances and save data - err = state1.SetBalanceWithJournal(*big.NewInt(40)) + err = state1.SetBalanceWithJournal(big.NewInt(40)) assert.Nil(t, err) hrWithBalance1 := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - account with balance 40: %v\n", hrWithBalance1) - err = state2.SetBalanceWithJournal(*big.NewInt(50)) + err = state2.SetBalanceWithJournal(big.NewInt(50)) assert.Nil(t, err) hrWithBalance2 := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - account with balance 50: %v\n", hrWithBalance2) @@ -402,7 +402,7 @@ func TestAccountsDBExecBalanceTxExecution(t *testing.T) { assert.Nil(t, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(1000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(1000)) assert.Nil(t, err) hrOriginal := base64.StdEncoding.EncodeToString(adb.RootHash()) @@ -449,7 +449,7 @@ func TestAccountsDBExecALotOfBalanceTxOK(t *testing.T) { assert.Nil(t, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(10000000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(10000000)) assert.Nil(t, err) hrOriginal := base64.StdEncoding.EncodeToString(adb.RootHash()) @@ -480,7 +480,7 @@ func TestAccountsDBExecALotOfBalanceTxOKorNOK(t *testing.T) { assert.Nil(t, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(10000000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(10000000)) assert.Nil(t, err) hrOriginal := base64.StdEncoding.EncodeToString(adb.RootHash()) @@ -516,7 +516,7 @@ func BenchmarkTxExecution(b *testing.B) { assert.Nil(b, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(10000000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(10000000)) assert.Nil(b, err) b.ResetTimer() diff --git a/data/state/address.go b/data/state/address.go index 334758e8fb0..772b192dfd2 100644 --- a/data/state/address.go +++ b/data/state/address.go @@ -5,8 +5,8 @@ type address struct { bytes []byte } -// newAddress creates a new Address with the same byte slice as the parameter received -func newAddress(adr []byte) *address { +// NewAddress creates a new Address with the same byte slice as the parameter received +func NewAddress(adr []byte) *address { return &address{bytes: adr} } diff --git a/data/state/hashAddressConverter.go b/data/state/hashAddressConverter.go index 840c6ac96cc..57c91f1802d 100644 --- a/data/state/hashAddressConverter.go +++ b/data/state/hashAddressConverter.go @@ -50,7 +50,7 @@ func (hac *HashAddressConverter) CreateAddressFromPublicKeyBytes(pubKey []byte) hash = hash[len(hash)-hac.addressLen:] } - return newAddress(hash), nil + return NewAddress(hash), nil } // ConvertToHex returns the hex string representation of the address. @@ -89,7 +89,7 @@ func (hac *HashAddressConverter) CreateAddressFromHex(hexAddress string) (Addres return nil, err } - return newAddress(buff), nil + return NewAddress(buff), nil } // PrepareAddressBytes checks and returns the slice compatible to the address format diff --git a/data/state/interface.go b/data/state/interface.go index 9e3736578f1..0b72090b36f 100644 --- a/data/state/interface.go +++ b/data/state/interface.go @@ -10,7 +10,7 @@ import ( const HashLength = 32 // RegistrationAddress holds the defined registration address -var RegistrationAddress = newAddress(make([]byte, 32)) +var RegistrationAddress = NewAddress(make([]byte, 32)) // AddressConverter is used to convert to/from AddressContainer type AddressConverter interface { @@ -57,7 +57,7 @@ type JournalizedAccountWrapper interface { TrackableDataAccountWrapper SetNonceWithJournal(uint64) error - SetBalanceWithJournal(big.Int) error + SetBalanceWithJournal(*big.Int) error SetCodeHashWithJournal([]byte) error SetRootHashWithJournal([]byte) error AppendDataRegistrationWithJournal(*RegistrationData) error diff --git a/data/state/journalEntries.go b/data/state/journalEntries.go index e2dd4b24b21..8700ca695d8 100644 --- a/data/state/journalEntries.go +++ b/data/state/journalEntries.go @@ -20,7 +20,7 @@ type JournalEntryNonce struct { // JournalEntryBalance is used to revert a balance change type JournalEntryBalance struct { jurnalizedAccount JournalizedAccountWrapper - oldBalance big.Int + oldBalance *big.Int } // JournalEntryCodeHash is used to revert a code hash change @@ -115,7 +115,7 @@ func (jen *JournalEntryNonce) DirtiedAddress() AddressContainer { //------- JournalEntryBalance // NewJournalEntryBalance outputs a new JournalEntry implementation used to revert a balance change -func NewJournalEntryBalance(jurnalizedAccount JournalizedAccountWrapper, oldBalance big.Int) *JournalEntryBalance { +func NewJournalEntryBalance(jurnalizedAccount JournalizedAccountWrapper, oldBalance *big.Int) *JournalEntryBalance { return &JournalEntryBalance{ jurnalizedAccount: jurnalizedAccount, oldBalance: oldBalance, diff --git a/data/state/journalEntries_test.go b/data/state/journalEntries_test.go index 8ef4d8a95aa..1fb2fde82bc 100644 --- a/data/state/journalEntries_test.go +++ b/data/state/journalEntries_test.go @@ -170,21 +170,21 @@ func TestJournalEntryBalance_RevertOkValsShouldWork(t *testing.T) { adr := mock.NewAddressMock() acnt := mock.NewJournalizedAccountWrapMock(adr) - acnt.Balance = *big.NewInt(445) + acnt.Balance = big.NewInt(445) - jec := state.NewJournalEntryBalance(acnt, *big.NewInt(2)) + jec := state.NewJournalEntryBalance(acnt, big.NewInt(2)) err := jec.Revert(acntAdapter) assert.Nil(t, err) assert.True(t, wasCalled) assert.Equal(t, adr, jec.DirtiedAddress()) - assert.Equal(t, *big.NewInt(2), acnt.Balance) + assert.Equal(t, big.NewInt(2), acnt.Balance) } func TestJournalEntryBalance_RevertNilAccountShouldErr(t *testing.T) { t.Parallel() - jeb := state.NewJournalEntryBalance(nil, *big.NewInt(2)) + jeb := state.NewJournalEntryBalance(nil, big.NewInt(2)) err := jeb.Revert(mock.NewAccountsAdapterMock()) assert.NotNil(t, err) } @@ -194,7 +194,7 @@ func TestJournalEntryBalance_RevertNilAccountAdapterShouldErr(t *testing.T) { adr := mock.NewAddressMock() acnt := mock.NewJournalizedAccountWrapMock(adr) - jeb := state.NewJournalEntryBalance(acnt, *big.NewInt(2)) + jeb := state.NewJournalEntryBalance(acnt, big.NewInt(2)) err := jeb.Revert(nil) assert.NotNil(t, err) @@ -204,7 +204,7 @@ func TestJournalEntryBalance_RevertNilAddressShouldErr(t *testing.T) { t.Parallel() acnt := mock.NewJournalizedAccountWrapMock(nil) - jen := state.NewJournalEntryBalance(acnt, *big.NewInt(2)) + jen := state.NewJournalEntryBalance(acnt, big.NewInt(2)) err := jen.Revert(mock.NewAccountsAdapterMock()) assert.NotNil(t, err) } @@ -225,15 +225,15 @@ func TestJournalEntryBalance_RevertAccountAdapterErrorShouldErr(t *testing.T) { return nil } - acnt.Balance = *big.NewInt(445) + acnt.Balance = big.NewInt(445) - jeb := state.NewJournalEntryBalance(acnt, *big.NewInt(2)) + jeb := state.NewJournalEntryBalance(acnt, big.NewInt(2)) err := jeb.Revert(acntAdapter) assert.Nil(t, err) assert.True(t, wasCalled) assert.Equal(t, adr, jeb.DirtiedAddress()) - assert.Equal(t, *big.NewInt(2), acnt.Balance) + assert.Equal(t, big.NewInt(2), acnt.Balance) } //------- JournalEntryCodeHash diff --git a/data/state/journalizedAccountWrap.go b/data/state/journalizedAccountWrap.go index 3aa73559a44..a26be607f0b 100644 --- a/data/state/journalizedAccountWrap.go +++ b/data/state/journalizedAccountWrap.go @@ -68,7 +68,7 @@ func (jaw *JournalizedAccountWrap) SetNonceWithJournal(nonce uint64) error { } // SetBalanceWithJournal sets the account's balance, saving the old balance before changing -func (jaw *JournalizedAccountWrap) SetBalanceWithJournal(balance big.Int) error { +func (jaw *JournalizedAccountWrap) SetBalanceWithJournal(balance *big.Int) error { jaw.accounts.AddJournalEntry(NewJournalEntryBalance(jaw, jaw.BaseAccount().Balance)) jaw.BaseAccount().Balance = balance diff --git a/data/state/journalizedAccountWrap_test.go b/data/state/journalizedAccountWrap_test.go index 95d0c4c8abb..4c9760df7cb 100644 --- a/data/state/journalizedAccountWrap_test.go +++ b/data/state/journalizedAccountWrap_test.go @@ -93,7 +93,7 @@ func TestJournalizedAccountWrap_SetBalanceWithJournal(t *testing.T) { jaw, err := state.NewJournalizedAccountWrap(mock.NewTrackableAccountWrapMock(), acntAdapter) assert.Nil(t, err) - err = jaw.SetBalanceWithJournal(*big.NewInt(1)) + err = jaw.SetBalanceWithJournal(big.NewInt(1)) assert.Nil(t, err) assert.True(t, wasCalledSave) assert.NotNil(t, jeAdded) diff --git a/data/state/mock/journalizedAccountWrapMock.go b/data/state/mock/journalizedAccountWrapMock.go index 260ffbc016c..8749d827a56 100644 --- a/data/state/mock/journalizedAccountWrapMock.go +++ b/data/state/mock/journalizedAccountWrapMock.go @@ -117,7 +117,7 @@ func (jawm *JournalizedAccountWrapMock) SetNonceWithJournal(uint64) error { panic("implement me") } -func (jawm *JournalizedAccountWrapMock) SetBalanceWithJournal(big.Int) error { +func (jawm *JournalizedAccountWrapMock) SetBalanceWithJournal(*big.Int) error { panic("implement me") } diff --git a/data/state/plainAddressConverter.go b/data/state/plainAddressConverter.go new file mode 100644 index 00000000000..38bf5945e6b --- /dev/null +++ b/data/state/plainAddressConverter.go @@ -0,0 +1,102 @@ +package state + +import ( + "encoding/hex" + "strings" +) + +// PlainAddressConverter is used to convert the address from/to different structures +type PlainAddressConverter struct { + addressLen int + prefix string +} + +// NewPlainAddressConverter creates a new instance of HashAddressConverter +func NewPlainAddressConverter(addressLen int, prefix string) (*PlainAddressConverter, error) { + if addressLen < 0 { + return nil, ErrNegativeValue + } + + return &PlainAddressConverter{ + addressLen: addressLen, + prefix: prefix, + }, nil +} + +// CreateAddressFromPublicKeyBytes returns the bytes received as parameters, trimming if necessary +// and outputs a new AddressContainer obj +func (pac *PlainAddressConverter) CreateAddressFromPublicKeyBytes(pubKey []byte) (AddressContainer, error) { + if pubKey == nil { + return nil, ErrNilPubKeysBytes + } + + if len(pubKey) < pac.addressLen { + return nil, NewErrorWrongSize(pac.addressLen, len(pubKey)) + } + + newPubKey := make([]byte, len(pubKey)) + copy(newPubKey, pubKey) + + //check size, trimming as necessary + if len(newPubKey) > pac.addressLen { + newPubKey = newPubKey[len(newPubKey)-pac.addressLen:] + } + + return NewAddress(newPubKey), nil +} + +// ConvertToHex returns the hex string representation of the address. +func (pac *PlainAddressConverter) ConvertToHex(addressContainer AddressContainer) (string, error) { + if addressContainer == nil { + return "", ErrNilAddressContainer + } + + return pac.prefix + hex.EncodeToString(addressContainer.Bytes()), nil +} + +// CreateAddressFromHex creates the address from hex string +func (pac *PlainAddressConverter) CreateAddressFromHex(hexAddress string) (AddressContainer, error) { + if len(hexAddress) == 0 { + return nil, ErrEmptyAddress + } + + //to lower + hexAddress = strings.ToLower(hexAddress) + + //check if it has prefix, trimming as necessary + if strings.HasPrefix(hexAddress, strings.ToLower(pac.prefix)) { + hexAddress = hexAddress[len(pac.prefix):] + } + + //check lengths + if len(hexAddress) != pac.addressLen*2 { + return nil, NewErrorWrongSize(pac.addressLen*2, len(hexAddress)) + } + + //decode hex + buff := make([]byte, pac.addressLen) + _, err := hex.Decode(buff, []byte(hexAddress)) + + if err != nil { + return nil, err + } + + return NewAddress(buff), nil +} + +// PrepareAddressBytes checks and returns the slice compatible to the address format +func (pac *PlainAddressConverter) PrepareAddressBytes(addressBytes []byte) ([]byte, error) { + if addressBytes == nil { + return nil, ErrNilAddressContainer + } + + if len(addressBytes) == 0 { + return nil, ErrEmptyAddress + } + + if len(addressBytes) != pac.addressLen { + return nil, NewErrorWrongSize(pac.addressLen, len(addressBytes)) + } + + return addressBytes, nil +} diff --git a/data/state/plainAddressConverter_test.go b/data/state/plainAddressConverter_test.go new file mode 100644 index 00000000000..c7cae6b8294 --- /dev/null +++ b/data/state/plainAddressConverter_test.go @@ -0,0 +1,213 @@ +package state_test + +import ( + "encoding/hex" + "fmt" + "math/rand" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state/mock" + "github.com/stretchr/testify/assert" +) + +//------- NewPlainAddressConverter + +func TestNewPlainAddressConverter_NegativeSizeShouldErr(t *testing.T) { + t.Parallel() + + _, err := state.NewPlainAddressConverter(-1, "") + assert.Equal(t, state.ErrNegativeValue, err) +} + +func TestNewPlainAddressConverter_OkValsShouldWork(t *testing.T) { + t.Parallel() + + _, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) +} + +//------- CreateAddressFromPublicKeyBytes + +func TestPlainAddressConverter_CreateAddressFromPublicKeyBytesNilBytesShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + _, err = ac.CreateAddressFromPublicKeyBytes(nil) + assert.Equal(t, state.ErrNilPubKeysBytes, err) +} + +func TestPlainAddressConverter_CreateAddressFromPublicKeyBytesLenLowerAddrlenShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + + for i := 0; i < 32; i++ { + _, err = ac.CreateAddressFromPublicKeyBytes(make([]byte, i)) + assert.NotNil(t, err) + fmt.Printf("%v\n", err) + } +} + +func TestPlainAddressConverter_CreateAddressFromPublicKeyBytesOkValsShouldWork(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + + //generating a random byte slice with len > AdrLen + buff := make([]byte, 32) + _, err = rand.Read(buff) + assert.Nil(t, err) + + adr, err := ac.CreateAddressFromPublicKeyBytes(buff) + + assert.Equal(t, buff, adr.Bytes()) +} + +func TestPlainAddressConverter_CreateAddressFromPublicKeyBytesOkValsTrimShouldWork(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + + //generating a random byte slice with len > AdrLen + buff := make([]byte, 320) + _, err = rand.Read(buff) + assert.Nil(t, err) + + adr, err := ac.CreateAddressFromPublicKeyBytes(buff) + + assert.Equal(t, buff[320-32:], adr.Bytes()) +} + +//------- ConvertToHex + +func TestPlainAddressConverter_ConvertToHexNilAddressShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + + _, err = ac.ConvertToHex(nil) + assert.Equal(t, state.ErrNilAddressContainer, err) +} + +func TestPlainAddressConverter_ConvertToHexOkValsShouldWork(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "0x") + assert.Nil(t, err) + + adr := mock.NewAddressDummy([]byte{255}, []byte{255}) + + str, err := ac.ConvertToHex(adr) + assert.Nil(t, err) + assert.Equal(t, "0xff", strings.ToLower(str)) +} + +//------- CreateAddressFromHex + +func TestPlainAddressConverter_CreateAddressFromHexEmptyHexAddressShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + + _, err = ac.CreateAddressFromHex("") + assert.Equal(t, state.ErrEmptyAddress, err) +} + +func TestPlainAddressConverter_CreateAddressFromHexEmptyBadLengthShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + + _, err = ac.CreateAddressFromHex("A") + assert.NotNil(t, err) +} + +func TestPlainAddressConverter_CreateAddressFromHexEmptyBadStringShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "") + assert.Nil(t, err) + + adr := "" + for i := 0; i < 64; i++ { + adr = adr + "t" + } + + _, err = ac.CreateAddressFromHex(adr) + assert.NotNil(t, err) +} + +func TestPlainAddressConverter_FromHexAddressValidDataWithPrefixShouldWork(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "0x") + assert.Nil(t, err) + + //generating a random byte slice + buff := make([]byte, 32) + _, err = rand.Read(buff) + assert.Nil(t, err) + + str := "0x" + hex.EncodeToString(buff) + + adr, err := ac.CreateAddressFromHex(str) + assert.Nil(t, err) + assert.Equal(t, buff, adr.Bytes()) +} + +//------- PrepareAddressBytes + +func TestPlainAddressConverter_PrepareAddressBytesNilAddrShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "0x") + assert.Nil(t, err) + + _, err = ac.PrepareAddressBytes(nil) + assert.Equal(t, state.ErrNilAddressContainer, err) +} + +func TestPlainAddressConverter_PrepareAddressBytesEmptyAddrShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "0x") + assert.Nil(t, err) + + _, err = ac.PrepareAddressBytes(make([]byte, 0)) + assert.Equal(t, state.ErrEmptyAddress, err) +} + +func TestPlainAddressConverter_PrepareAddressBytesWrongSizeShouldErr(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "0x") + assert.Nil(t, err) + + _, err = ac.PrepareAddressBytes(make([]byte, 31)) + assert.NotNil(t, err) +} + +func TestPlainAddressConverter_PrepareAddressBytesOkValsShouldWork(t *testing.T) { + t.Parallel() + + ac, err := state.NewPlainAddressConverter(32, "0x") + assert.Nil(t, err) + + //generating a random byte slice + buff := make([]byte, 32) + _, err = rand.Read(buff) + assert.Nil(t, err) + + checked, err := ac.PrepareAddressBytes(buff) + assert.Nil(t, err) + assert.Equal(t, buff, checked) +} diff --git a/data/transaction/transaction.go b/data/transaction/transaction.go index 963c7fe6ea2..6765c2c9f25 100644 --- a/data/transaction/transaction.go +++ b/data/transaction/transaction.go @@ -10,15 +10,15 @@ import ( // Transaction holds all the data needed for a value transfer type Transaction struct { - Nonce uint64 `capid:"0"` - Value big.Int `capid:"1"` - RcvAddr []byte `capid:"2"` - SndAddr []byte `capid:"3"` - GasPrice uint64 `capid:"4"` - GasLimit uint64 `capid:"5"` - Data []byte `capid:"6"` - Signature []byte `capid:"7"` - Challenge []byte `capid:"8"` + Nonce uint64 `capid:"0"` + Value *big.Int `capid:"1"` + RcvAddr []byte `capid:"2"` + SndAddr []byte `capid:"3"` + GasPrice uint64 `capid:"4"` + GasLimit uint64 `capid:"5"` + Data []byte `capid:"6"` + Signature []byte `capid:"7"` + Challenge []byte `capid:"8"` } // Save saves the serialized data of a Transaction into a stream through Capnp protocol @@ -46,6 +46,10 @@ func TransactionCapnToGo(src capnp.TransactionCapn, dest *Transaction) *Transact dest = &Transaction{} } + if dest.Value == nil { + dest.Value = big.NewInt(0) + } + // Nonce dest.Nonce = src.Nonce() // Value diff --git a/docker-compose.yml b/docker-compose.yml index 9bc5034a265..a608ab4fad4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,11 +3,28 @@ version: '3' services: elrond: build: - context: docker/elrond + context: ./ + dockerfile: ./docker/elrond/Dockerfile command: bash -c "go run cmd/bootnode/main.go -port 4000 -max-allowed-peers 4 -private-key \"ZBis8aK5I66x1hwD+fE8sIw2nwQR5EBlTM8EiAOLZwE=\"" ports: - 8080:8080 + elrond2: + build: + context: ./ + dockerfile: ./docker/elrond/Dockerfile + + command: bash -c "go run cmd/bootnode/main.go -port 4001 -max-allowed-peers 4 -private-key \"unkVM1J1JvlNFqY3uo/CvAay6BsIL3IzDH9GDgmfUAA=\"" + ports: + - 8081:8080 + elrond3: + build: + context: ./ + dockerfile: ./docker/elrond/Dockerfile + + command: bash -c "go run cmd/bootnode/main.go -port 4002 -max-allowed-peers 4 -private-key \"Or0C7+gvlr/kIZLS+tiBBQfbUQ+pqS9FTE3dXfs5Swg=\"" + ports: + - 8082:8080 filebeat: build: docker/filebeat environment: diff --git a/docker/elrond/Dockerfile b/docker/elrond/Dockerfile index a12f0d1d432..a752abcce22 100644 --- a/docker/elrond/Dockerfile +++ b/docker/elrond/Dockerfile @@ -25,7 +25,8 @@ RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh WORKDIR src/github.com/ElrondNetwork RUN ls -RUN git clone -b master https://elrondtester:a73e39f0fe868e584d3c27b3b4d8aacb2e925532@github.com/ElrondNetwork/elrond-go-sandbox +COPY ./ ./elrond-go-sandbox/ +#RUN git clone -b bug/EN-721-Bugfix-testnet-v.0.5 https://elrondtester:a73e39f0fe868e584d3c27b3b4d8aacb2e925532@github.com/ElrondNetwork/elrond-go-sandbox WORKDIR elrond-go-sandbox RUN >> skipP2PMessengerTests RUN ls diff --git a/genesis.json b/genesis.json index 7a95080eea5..709509366f8 100644 --- a/genesis.json +++ b/genesis.json @@ -1,91 +1,91 @@ { - "startTime": 1547134652, + "startTime": 0, "roundDuration": 4000, - "consensusGroupSize": 2, - "elasticSubrounds": false, + "consensusGroupSize": 1, + "elasticSubrounds": true, "initialNodes": [ { - "pubkey": "bCYAUf+qhQtYKFfgQ1g3JstkJFVTsA2KAH+0L+qZlO4=", + "pubkey": "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419", "balance": "10000000" }, { - "pubkey": "gDI39ZN3loP1Cujru6+BJtu+gNwQnBB8g4yVW0wyuaA=", + "pubkey": "8e0b815be8026a6732eea132e113913c12e2f5b19f25e86a8403afffbaf02088", "balance": "10000000" }, { - "pubkey": "TLkPlhd8g07tiqE4Mgvq1kCp3EOEEjn8O3/DpyjUqFE=", + "pubkey": "e6ec171959063bd0d61f95a52de73d6a16e649ba5fa8b12663b092b48cc99434", "balance": "10000000" }, { - "pubkey": "YO0S6tNhNjWwsJJzTLrJMecEKDLeuZlJNznf7nU/TcM=", + "pubkey": "20ccf92c80065a0f9ce1f1b8e3dee1e0b9774f4eebf2af7e8fa3ac503923360d", "balance": "10000000" }, { - "pubkey": "Gkfqv+PTR9ot2TILrOBFcPojmhwE9IC7Y2psLc9ZsZc=", + "pubkey": "9a3b8e67f42aef9544e0888ea9daee77af90292c86336203d224691d55306d08", "balance": "10000000" }, { - "pubkey": "nVnGbEbxPR4ab0thyeV/O1FZopNDTdNexNI5OPCGtRo=", + "pubkey": "0740bccedc28084ab811065cb618fec4ee623384b4b3d5466190d11ff6d77007", "balance": "10000000" }, { - "pubkey": "xMonbskDZ1dHeW4vh3/AUzf7psbPIPTKfGz+J6gmpeA=", + "pubkey": "0ccba0f98829ea9f337035a1f7b13cbd8e9ffb94f2c538e2cafb34ca7f2bcd24", "balance": "10000000" }, { - "pubkey": "6kVjlTw6NmSqV4kI6cEprxd+2f37FXCzpXnFsYYcsLU=", + "pubkey": "d9e9596c28a3945253d46bc1b9418963c0672a26a0b40ee7372cb9ec34d1ee07", "balance": "10000000" }, { - "pubkey": "0NNr5LuHjgSSiXFKp37uPAjGt1HYdQJTUmJ4ASKGZyQ=", + "pubkey": "86fbd8606e73b7a4f45a51b443270f3050aff571a29b9804d2444c081560d1dd", "balance": "10000000" }, { - "pubkey": "JyK0pKNF3UnzIFtm9pVDXF3xArpfhrU4g2bbGxwHBwE=", + "pubkey": "2084f2493e68443a5b156ec42a8cd9072c47aa453df4acd20524792a4fd9f474", "balance": "10000000" }, { - "pubkey": "bT4UtBU3A5MpKWvvpL5nAplLAGEWccoJb9NzdXJN1fk=", + "pubkey": "f91d24256d918144aaacfa641cd113af05d56cfb7a5b8ba5885ebd8edd43fe1e", "balance": "10000000" }, { - "pubkey": "S+goExeC5GNLPx9xYnhXB8mVQQzGJpr0B1QCU/DqqdM=", + "pubkey": "e8d4bcfe91c3c7788d8ab3704b192229900ec3fe3f1eb6f841c440e223d401a0", "balance": "10000000" }, { - "pubkey": "1KGnod6xGDML+y5qNiwrYq2t2M1Elgbt2YNfXLRSow0=", + "pubkey": "4bf7ee0e17a0b76d3837494d3950113d3e77db055b2c07c9cb443f529d73c8e3", "balance": "10000000" }, { - "pubkey": "84uQq3tJFjzSZ6KIcJwb7JZ1yLwuDytcPHYITVlnNjA=", + "pubkey": "20f12f7bdd4ab65321eb58ce8f90eec733e3e9a4cc9d6d5d7e57d2e86c6c2c76", "balance": "10000000" }, { - "pubkey": "8pqG75sKgeqIKim2jR/P7ojM5QgSQkHLt6xpablZoM0=", + "pubkey": "34cf226f4d62a22e4993a1a2835f05a4bb2fb48304e16f2dc18f99b39c496f7d", "balance": "10000000" }, { - "pubkey": "E8u9qxcr8hQ3nM6RfOOLS4bzu9fV+whiTtOY5kjaDlE=", + "pubkey": "b9f0fc3e1baa49c027205946af7d6c79b749481e5ab766356db3b878c0929558", "balance": "10000000" }, { - "pubkey": "M+C7UJoK6poGvlPqkLVpFOuWao9dFtCoHVWaWv0ee3U=", + "pubkey": "6670b048a3f9d93fdacb4d60ff7c2f3bd7440d5175ca8b9d2475a444cd7a129b", "balance": "10000000" }, { - "pubkey": "NBu1klLKrye76DblD8IhexsHrai2TD4+8KdWBWWqaxc=", + "pubkey": "d82b3f4490ccb2ffbba5695c1b7c345a5709584737a263999c77cc1a09136de1", "balance": "10000000" }, { - "pubkey": "SBoD6sJA5oEmyhwK5CckOlH3ByrJkyJZyih+iy1tGno=", + "pubkey": "29ba49f47e2b86b143418db31c696791215236925802ea1f219780e360a8209e", "balance": "10000000" }, { - "pubkey": "NUhC12eqQ0U5IIjpytuaWzBSSPYE2myNVro8I/3Rjpg=", + "pubkey": "199866d09b8385023c25f261460d4d20ae0d5bc72ddf1fa5c1b32768167a8fb0", "balance": "10000000" }, { - "pubkey": "+cmFvcxUN9roSQnW8AMpi3kq0LDSTZCIrL8f7Pc6ez4=", + "pubkey": "0098f7634d7327139848a0f6ad926051596e5a0f692adfb671ab02092b77181d", "balance": "10000000" } ] diff --git a/integrationTests/block/common.go b/integrationTests/block/common.go new file mode 100644 index 00000000000..1bafedbec18 --- /dev/null +++ b/integrationTests/block/common.go @@ -0,0 +1,127 @@ +package block + +import ( + "context" + + "github.com/ElrondNetwork/elrond-go-sandbox/crypto/schnorr" + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" + "github.com/ElrondNetwork/elrond-go-sandbox/data/dataPool" + "github.com/ElrondNetwork/elrond-go-sandbox/data/shardedData" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/node" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" + "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/ElrondNetwork/elrond-go-sandbox/storage/memorydb" +) + +func createTestBlockChain() *blockchain.BlockChain { + + cfgCache := storage.CacheConfig{Size: 100, Type: storage.LRUCache} + + badBlockCache, _ := storage.NewCache(cfgCache.Type, cfgCache.Size) + + blockChain, _ := blockchain.NewBlockChain( + badBlockCache, + createMemUnit(), + createMemUnit(), + createMemUnit(), + createMemUnit(), + createMemUnit()) + + return blockChain +} + +func createMemUnit() storage.Storer { + cache, _ := storage.NewCache(storage.LRUCache, 10) + persist, _ := memorydb.New() + + unit, _ := storage.NewStorageUnit(cache, persist) + return unit +} + +func createTestDataPool() data.TransientDataHolder { + txPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100, Type: storage.LRUCache}) + hdrPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100, Type: storage.LRUCache}) + + cacherCfg := storage.CacheConfig{Size: 100, Type: storage.LRUCache} + hdrNoncesCacher, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + hdrNonces, _ := dataPool.NewNonceToHashCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + + cacherCfg = storage.CacheConfig{Size: 100, Type: storage.LRUCache} + txBlockBody, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + + cacherCfg = storage.CacheConfig{Size: 100, Type: storage.LRUCache} + peerChangeBlockBody, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + + cacherCfg = storage.CacheConfig{Size: 100, Type: storage.LRUCache} + stateBlockBody, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + + dPool, _ := dataPool.NewDataPool( + txPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + stateBlockBody, + ) + + return dPool +} + +func createMemNode(port int, dPool data.TransientDataHolder) (*node.Node, p2p.Messenger, process.ProcessorFactory) { + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + cp, _ := p2p.NewConnectParamsFromPort(port) + mes, _ := p2p.NewMemMessenger(marshalizer, hasher, cp) + + addrConverter, _ := state.NewPlainAddressConverter(32, "0x") + + keyGen := schnorr.NewKeyGenerator() + blockChain := createTestBlockChain() + shardCoordinator := &sharding.OneShardCoordinator{} + uint64Converter := uint64ByteSlice.NewBigEndianConverter() + + pFactory, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ + InterceptorContainer: interceptor.NewContainer(), + ResolverContainer: resolver.NewContainer(), + Messenger: mes, + Blockchain: blockChain, + DataPool: dPool, + ShardCoordinator: shardCoordinator, + AddrConverter: addrConverter, + Hasher: hasher, + Marshalizer: marshalizer, + SingleSignKeyGen: keyGen, + Uint64ByteSliceConverter: uint64Converter, + }) + + n, _ := node.NewNode( + node.WithMessenger(mes), + node.WithMarshalizer(marshalizer), + node.WithHasher(hasher), + node.WithContext(context.Background()), + node.WithDataPool(dPool), + node.WithAddressConverter(addrConverter), + node.WithSingleSignKeyGenerator(keyGen), + node.WithShardCoordinator(shardCoordinator), + node.WithBlockChain(blockChain), + node.WithUint64ByteSliceConverter(uint64Converter), + node.WithMessenger(mes), + node.WithProcessorCreator(pFactory), + ) + + _ = pFactory.CreateInterceptors() + _ = pFactory.CreateResolvers() + + return n, mes, pFactory +} diff --git a/integrationTests/block/interceptedRequestHdrMem_test.go b/integrationTests/block/interceptedRequestHdrMem_test.go new file mode 100644 index 00000000000..41b6aa50e28 --- /dev/null +++ b/integrationTests/block/interceptedRequestHdrMem_test.go @@ -0,0 +1,85 @@ +package block + +import ( + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/stretchr/testify/assert" +) + +func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + dPoolRequestor := createTestDataPool() + dPoolResolver := createTestDataPool() + + nRequestor, _, pFactory1 := createMemNode(1, dPoolRequestor) + nResolver, _, _ := createMemNode(2, dPoolResolver) + + nRequestor.Start() + nResolver.Start() + defer func() { + _ = nRequestor.Stop() + _ = nResolver.Stop() + }() + + defer p2p.ReInitializeGloballyRegisteredPeers() + + time.Sleep(time.Second) + + //Step 1. Generate a header + hdr := block.Header{ + Nonce: 0, + PubKeysBitmap: []byte{255, 0}, + Commitment: []byte("commitment"), + Signature: []byte("signature"), + BlockBodyHash: []byte("block body hash"), + PrevHash: []byte("prev hash"), + TimeStamp: uint64(time.Now().Unix()), + Round: 1, + Epoch: 2, + ShardId: 0, + BlockBodyType: block.TxBlock, + } + + hdrBuff, _ := marshalizer.Marshal(&hdr) + hdrHash := hasher.Compute(string(hdrBuff)) + + //Step 2. resolver has the header + dPoolResolver.Headers().AddData(hdrHash, &hdr, 0) + dPoolResolver.HeadersNonces().HasOrAdd(0, hdrHash) + + //Step 3. wire up a received handler + chanDone := make(chan bool) + + dPoolRequestor.Headers().RegisterHandler(func(key []byte) { + hdrStored, _ := dPoolRequestor.Headers().ShardDataStore(0).Get(key) + + if reflect.DeepEqual(hdrStored, &hdr) && hdr.Signature != nil { + chanDone <- true + } + + assert.Equal(t, hdrStored, &hdr) + + }) + + //Step 4. request header + res, err := pFactory1.ResolverContainer().Get(string(factory.HeadersTopic)) + assert.Nil(t, err) + hdrResolver := res.(*block2.HeaderResolver) + hdrResolver.RequestHeaderFromNonce(0) + + select { + case <-chanDone: + case <-time.After(time.Second * 10): + assert.Fail(t, "timeout") + } +} diff --git a/integrationTests/block/interceptedRequestTxBlockBodyMem_test.go b/integrationTests/block/interceptedRequestTxBlockBodyMem_test.go new file mode 100644 index 00000000000..736a50afebe --- /dev/null +++ b/integrationTests/block/interceptedRequestTxBlockBodyMem_test.go @@ -0,0 +1,84 @@ +package block + +import ( + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/stretchr/testify/assert" +) + +func TestNode_GenerateSendInterceptTxBlockBodyWithMemMessenger(t *testing.T) { + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + dPoolRequestor := createTestDataPool() + dPoolResolver := createTestDataPool() + + nRequestor, _, pFactory1 := createMemNode(1, dPoolRequestor) + nResolver, _, _ := createMemNode(2, dPoolResolver) + + nRequestor.Start() + nResolver.Start() + defer func() { + _ = nRequestor.Stop() + _ = nResolver.Stop() + }() + + defer p2p.ReInitializeGloballyRegisteredPeers() + + time.Sleep(time.Second) + + //Step 1. Generate a block body + txBlock := block.TxBlockBody{ + MiniBlocks: []block.MiniBlock{ + { + ShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("tx1"), + }, + }, + }, + StateBlockBody: block.StateBlockBody{ + RootHash: hasher.Compute("root hash"), + ShardID: 0, + }, + } + + txBlockBodyBuff, _ := marshalizer.Marshal(&txBlock) + txBlockBodyHash := hasher.Compute(string(txBlockBodyBuff)) + + //Step 2. resolver has the tx block body + dPoolResolver.TxBlocks().HasOrAdd(txBlockBodyHash, &txBlock) + + //Step 3. wire up a received handler + chanDone := make(chan bool) + + dPoolRequestor.TxBlocks().RegisterHandler(func(key []byte) { + txBlockBodyStored, _ := dPoolRequestor.TxBlocks().Get(key) + + if reflect.DeepEqual(txBlockBodyStored, &txBlock) { + chanDone <- true + } + + assert.Equal(t, txBlockBodyStored, &txBlock) + + }) + + //Step 4. request tx block body + res, _ := pFactory1.ResolverContainer().Get(string(factory.TxBlockBodyTopic)) + hdrResolver := res.(*block2.GenericBlockBodyResolver) + hdrResolver.RequestBlockBodyFromHash(txBlockBodyHash) + + select { + case <-chanDone: + case <-time.After(time.Second * 10): + assert.Fail(t, "timeout") + } +} diff --git a/integrationTests/block/interceptedRequestTxBlockBodyNet_test.go b/integrationTests/block/interceptedRequestTxBlockBodyNet_test.go new file mode 100644 index 00000000000..0d55165ae6e --- /dev/null +++ b/integrationTests/block/interceptedRequestTxBlockBodyNet_test.go @@ -0,0 +1,174 @@ +package block + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/crypto/schnorr" + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/node" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" + "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/stretchr/testify/assert" +) + +func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { + t.Skip("TODO: fix tests that run on the same local network") + + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + keyGen := schnorr.NewKeyGenerator() + + dPoolRequestor := createTestDataPool() + dPoolResolver := createTestDataPool() + + addrConverter, _ := state.NewPlainAddressConverter(32, "0x") + + blkcRequestor := createTestBlockChain() + blkcResolver := createTestBlockChain() + reqMessenger := createMessenger(context.Background(), marshalizer, hasher, 4, 32000) + resMessenger := createMessenger(context.Background(), marshalizer, hasher, 4, 32001) + shardCoordinatorReq := &sharding.OneShardCoordinator{} + shardCoordinatorRes := &sharding.OneShardCoordinator{} + uint64BsReq := uint64ByteSlice.NewBigEndianConverter() + uint64BsRes := uint64ByteSlice.NewBigEndianConverter() + + pFactoryReq, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ + InterceptorContainer: interceptor.NewContainer(), + ResolverContainer: resolver.NewContainer(), + Messenger: reqMessenger, + Blockchain: blkcRequestor, + DataPool: dPoolRequestor, + ShardCoordinator: shardCoordinatorReq, + AddrConverter: addrConverter, + Hasher: hasher, + Marshalizer: marshalizer, + SingleSignKeyGen: keyGen, + Uint64ByteSliceConverter: uint64BsReq, + }) + + pFactoryRes, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ + InterceptorContainer: interceptor.NewContainer(), + ResolverContainer: resolver.NewContainer(), + Messenger: resMessenger, + Blockchain: blkcResolver, + DataPool: dPoolResolver, + ShardCoordinator: shardCoordinatorRes, + AddrConverter: addrConverter, + Hasher: hasher, + Marshalizer: marshalizer, + SingleSignKeyGen: keyGen, + Uint64ByteSliceConverter: uint64BsRes, + }) + + nRequestor, _ := node.NewNode( + node.WithMarshalizer(marshalizer), + node.WithHasher(hasher), + node.WithContext(context.Background()), + node.WithDataPool(dPoolRequestor), + node.WithAddressConverter(addrConverter), + node.WithSingleSignKeyGenerator(keyGen), + node.WithShardCoordinator(shardCoordinatorReq), + node.WithBlockChain(blkcRequestor), + node.WithUint64ByteSliceConverter(uint64BsReq), + node.WithMessenger(reqMessenger), + node.WithProcessorCreator(pFactoryReq), + ) + + nResolver, _ := node.NewNode( + node.WithMarshalizer(marshalizer), + node.WithHasher(hasher), + node.WithContext(context.Background()), + node.WithDataPool(dPoolResolver), + node.WithAddressConverter(addrConverter), + node.WithSingleSignKeyGenerator(keyGen), + node.WithShardCoordinator(shardCoordinatorRes), + node.WithBlockChain(blkcResolver), + node.WithUint64ByteSliceConverter(uint64BsRes), + node.WithMessenger(resMessenger), + node.WithProcessorCreator(pFactoryRes), + ) + + nRequestor.Start() + nResolver.Start() + + defer nRequestor.Stop() + defer nResolver.Stop() + + nRequestor.P2PBootstrap() + nResolver.P2PBootstrap() + + time.Sleep(time.Second) + + //TODO remove this + time.Sleep(time.Second) + + //Step 1. Generate a block body + txBlock := block.TxBlockBody{ + MiniBlocks: []block.MiniBlock{ + { + ShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("tx1"), + }, + }, + }, + StateBlockBody: block.StateBlockBody{ + RootHash: hasher.Compute("root hash"), + ShardID: 0, + }, + } + + txBlockBodyBuff, _ := marshalizer.Marshal(&txBlock) + txBlockBodyHash := hasher.Compute(string(txBlockBodyBuff)) + + //Step 2. resolver has the tx block body + dPoolResolver.TxBlocks().HasOrAdd(txBlockBodyHash, &txBlock) + + //Step 3. wire up a received handler + chanDone := make(chan bool) + + dPoolRequestor.TxBlocks().RegisterHandler(func(key []byte) { + txBlockBodyStored, _ := dPoolRequestor.TxBlocks().Get(key) + + if reflect.DeepEqual(txBlockBodyStored, &txBlock) { + chanDone <- true + } + + assert.Equal(t, txBlockBodyStored, &txBlock) + + }) + + //Step 4. request tx block body + res, _ := pFactoryRes.ResolverContainer().Get(string(factory.TxBlockBodyTopic)) + txBlockBodyResolver := res.(*block2.GenericBlockBodyResolver) + txBlockBodyResolver.RequestBlockBodyFromHash(txBlockBodyHash) + + select { + case <-chanDone: + case <-time.After(time.Second * 10): + assert.Fail(t, "timeout") + } +} + +func createMessenger(ctx context.Context, marshalizer marshal.Marshalizer, hasher hashing.Hasher, maxAllowedPeers int, port int) p2p.Messenger { + cp := &p2p.ConnectParams{} + cp.Port = port + cp.GeneratePrivPubKeys(time.Now().UnixNano()) + cp.GenerateIDFromPubKey() + + nm, _ := p2p.NewNetMessenger(ctx, marshalizer, hasher, cp, maxAllowedPeers, p2p.GossipSub) + return nm +} diff --git a/integrationTests/state/common.go b/integrationTests/state/common.go new file mode 100644 index 00000000000..666b1ce6ba1 --- /dev/null +++ b/integrationTests/state/common.go @@ -0,0 +1,45 @@ +package state + +import ( + "math/rand" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/ElrondNetwork/elrond-go-sandbox/storage/memorydb" +) + +func createDummyAddress() state.AddressContainer { + buff := make([]byte, sha256.Sha256{}.Size()) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + r.Read(buff) + + return state.NewAddress(buff) +} + +func createMemUnit() storage.Storer { + cache, _ := storage.NewCache(storage.LRUCache, 10) + persist, _ := memorydb.New() + + unit, _ := storage.NewStorageUnit(cache, persist) + return unit +} + +func createDummyHexAddress(chars int) string { + if chars < 1 { + return "" + } + + var characters = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'} + + rdm := rand.New(rand.NewSource(time.Now().UnixNano())) + + buff := make([]byte, chars) + for i := 0; i < chars; i++ { + buff[i] = characters[rdm.Int()%16] + } + + return string(buff) +} diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go new file mode 100644 index 00000000000..c8bd9d199ad --- /dev/null +++ b/integrationTests/state/stateExecTransaction_test.go @@ -0,0 +1,216 @@ +package state + +import ( + "encoding/base64" + "fmt" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + transaction2 "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" + "github.com/stretchr/testify/assert" +) + +func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { + accnts := adbCreateAccountsDB() + + pubKeyBuff := createDummyHexAddress(64) + + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + addrConv, _ := state.NewPlainAddressConverter(32, "0x") + + txProcessor, _ := transaction.NewTxProcessor(accnts, hasher, addrConv, marshalizer) + + nonce := uint64(6) + balance := big.NewInt(10000) + + //Step 1. create account with a nonce and a balance + address, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) + account, _ := accnts.GetJournalizedAccount(address) + account.SetNonceWithJournal(nonce) + account.SetBalanceWithJournal(balance) + + hashCreated, _ := accnts.Commit() + + //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff + tx := &transaction2.Transaction{ + Nonce: nonce, + Value: big.NewInt(1), + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), + } + + err := txProcessor.ProcessTransaction(tx, 0) + assert.Nil(t, err) + + hashAfterExec, _ := accnts.Commit() + assert.NotEqual(t, hashCreated, hashAfterExec) + + accountAfterExec, _ := accnts.GetJournalizedAccount(address) + assert.Equal(t, nonce+1, accountAfterExec.BaseAccount().Nonce) + assert.Equal(t, balance, accountAfterExec.BaseAccount().Balance) +} + +func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { + accnts := adbCreateAccountsDB() + + pubKeyBuff := createDummyHexAddress(64) + + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + addrConv, _ := state.NewPlainAddressConverter(32, "0x") + + txProcessor, _ := transaction.NewTxProcessor(accnts, hasher, addrConv, marshalizer) + + nonce := uint64(6) + balance := big.NewInt(10000) + + //Step 1. create account with a nonce and a balance + address, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) + account, _ := accnts.GetJournalizedAccount(address) + account.SetNonceWithJournal(nonce) + account.SetBalanceWithJournal(balance) + + accnts.Commit() + + //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff + tx := &transaction2.Transaction{ + Nonce: nonce, + Value: big.NewInt(1), + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), + } + + err := txProcessor.ProcessTransaction(tx, 0) + assert.Nil(t, err) + + _ = accnts.RevertToSnapshot(0) + + accountAfterExec, _ := accnts.GetJournalizedAccount(address) + assert.Equal(t, nonce, accountAfterExec.BaseAccount().Nonce) + assert.Equal(t, balance, accountAfterExec.BaseAccount().Balance) +} + +func TestExecTransaction_MoreTransactionsWithRevertShouldWork(t *testing.T) { + accnts := adbCreateAccountsDB() + + nonce := uint64(6) + initialBalance := int64(100000) + balance := big.NewInt(initialBalance) + + addrConv, _ := state.NewPlainAddressConverter(32, "0x") + pubKeyBuff := createDummyHexAddress(64) + sender, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) + + pubKeyBuff = createDummyHexAddress(64) + receiver, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) + + account, _ := accnts.GetJournalizedAccount(sender) + account.SetNonceWithJournal(nonce) + account.SetBalanceWithJournal(balance) + + initialHash, _ := accnts.Commit() + fmt.Printf("Initial hash: %s\n", base64.StdEncoding.EncodeToString(initialHash)) + + testExecTransactionsMoreTxWithRevert(t, accnts, sender, receiver, initialHash, nonce, initialBalance) +} + +func testExecTransactionsMoreTxWithRevert( + t *testing.T, + accnts state.AccountsAdapter, + sender state.AddressContainer, + receiver state.AddressContainer, + initialHash []byte, + initialNonce uint64, + initialBalance int64) { + + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + addrConv, _ := state.NewPlainAddressConverter(32, "0x") + + txProcessor, _ := transaction.NewTxProcessor(accnts, hasher, addrConv, marshalizer) + + txToGenerate := 15000 + + //Step 1. execute a lot moving transactions from pubKeyBuff to another pubKeyBuff + for i := 0; i < txToGenerate; i++ { + tx := &transaction2.Transaction{ + Nonce: initialNonce + uint64(i), + Value: big.NewInt(1), + SndAddr: sender.Bytes(), + RcvAddr: receiver.Bytes(), + } + + err := txProcessor.ProcessTransaction(tx, 0) + assert.Nil(t, err) + } + + modifiedHash := accnts.RootHash() + fmt.Printf("Modified hash: %s\n", base64.StdEncoding.EncodeToString(modifiedHash)) + + //Step 2. test that accounts have correct nonces and balances + newAccount, _ := accnts.GetJournalizedAccount(receiver) + account, _ := accnts.GetJournalizedAccount(sender) + + assert.Equal(t, account.BaseAccount().Balance, big.NewInt(initialBalance-int64(txToGenerate))) + assert.Equal(t, account.BaseAccount().Nonce, uint64(txToGenerate)+initialNonce) + + assert.Equal(t, newAccount.BaseAccount().Balance, big.NewInt(int64(txToGenerate))) + assert.Equal(t, newAccount.BaseAccount().Nonce, uint64(0)) + + assert.NotEqual(t, initialHash, modifiedHash) + + fmt.Printf("Journalized: %d modifications to the state\n", accnts.JournalLen()) + + //Step 3. Revert and test again nonces, balances and root hash + err := accnts.RevertToSnapshot(0) + + assert.Nil(t, err) + + revertedHash := accnts.RootHash() + fmt.Printf("Reverted hash: %s\n", base64.StdEncoding.EncodeToString(revertedHash)) + + receiver2, _ := accnts.GetExistingAccount(receiver) + account, _ = accnts.GetJournalizedAccount(sender) + + assert.Equal(t, account.BaseAccount().Balance, big.NewInt(initialBalance)) + assert.Equal(t, account.BaseAccount().Nonce, initialNonce) + + assert.Nil(t, receiver2) + + assert.Equal(t, initialHash, revertedHash) +} + +func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *testing.T) { + t.Skip("This is a very long test") + + accnts := adbCreateAccountsDB() + + nonce := uint64(6) + initialBalance := int64(100000) + balance := big.NewInt(initialBalance) + + addrConv, _ := state.NewPlainAddressConverter(32, "0x") + pubKeyBuff := createDummyHexAddress(64) + sender, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) + + pubKeyBuff = createDummyHexAddress(64) + receiver, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) + + account, _ := accnts.GetJournalizedAccount(sender) + account.SetNonceWithJournal(nonce) + account.SetBalanceWithJournal(balance) + + initialHash, _ := accnts.Commit() + fmt.Printf("Initial hash: %s\n", base64.StdEncoding.EncodeToString(initialHash)) + + for i := 0; i < 10000; i++ { + fmt.Printf("Iteration: %d\n", i) + + testExecTransactionsMoreTxWithRevert(t, accnts, sender, receiver, initialHash, nonce, initialBalance) + } +} diff --git a/integrationTests/testStateTrie_test.go b/integrationTests/state/stateTrie_test.go similarity index 91% rename from integrationTests/testStateTrie_test.go rename to integrationTests/state/stateTrie_test.go index 54153bffc8a..f8f9af87221 100644 --- a/integrationTests/testStateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -1,4 +1,4 @@ -package integrationTests +package state import ( "encoding/base64" @@ -10,36 +10,26 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" - "github.com/ElrondNetwork/elrond-go-sandbox/data/state/mock" "github.com/ElrondNetwork/elrond-go-sandbox/data/trie" - mock2 "github.com/ElrondNetwork/elrond-go-sandbox/data/trie/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/stretchr/testify/assert" ) //------- Helper funcs func adbCreateAccountsDB() *state.AccountsDB { - marsh := mock.MarshalizerMock{} - dbw, err := trie.NewDBWriteCache(mock2.NewMemoryStorerMock()) - if err != nil { - panic(err) - } - - tr, err := trie.NewTrie(make([]byte, 32), dbw, mock.HasherMock{}) - if err != nil { - panic(err) - } + marsh := &marshal.JsonMarshalizer{} - adb, err := state.NewAccountsDB(tr, mock.HasherMock{}, &marsh) - if err != nil { - panic(err) - } + dbw, _ := trie.NewDBWriteCache(createMemUnit()) + tr, _ := trie.NewTrie(make([]byte, 32), dbw, sha256.Sha256{}) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marsh) return adb } func generateAddressJurnalAccountAccountsDB() (state.AddressContainer, state.JournalizedAccountWrapper, *state.AccountsDB) { - adr := mock.NewAddressMock() + adr := createDummyAddress() adb := adbCreateAccountsDB() jaw, err := state.NewJournalizedAccountWrapFromAccountContainer(adr, state.NewAccount(), adb) @@ -60,13 +50,13 @@ func adbEmulateBalanceTxExecution(acntSrc, acntDest state.JournalizedAccountWrap } //substract value from src - err := acntSrc.SetBalanceWithJournal(*srcVal.Sub(&srcVal, value)) + err := acntSrc.SetBalanceWithJournal(srcVal.Sub(srcVal, value)) if err != nil { return err } //add value to dest - err = acntDest.SetBalanceWithJournal(*destVal.Add(&destVal, value)) + err = acntDest.SetBalanceWithJournal(destVal.Add(destVal, value)) if err != nil { return err } @@ -204,7 +194,7 @@ func TestAccountsDB_GetJournalizedAccountReturnExistingAccntShouldWork(t *testin t.Parallel() adr, jaw, adb := generateAddressJurnalAccountAccountsDB() - err := jaw.SetBalanceWithJournal(*big.NewInt(40)) + err := jaw.SetBalanceWithJournal(big.NewInt(40)) assert.Nil(t, err) err = adb.SaveJournalizedAccount(jaw) @@ -213,7 +203,7 @@ func TestAccountsDB_GetJournalizedAccountReturnExistingAccntShouldWork(t *testin acnt, err := adb.GetJournalizedAccount(adr) assert.Nil(t, err) assert.NotNil(t, acnt) - assert.Equal(t, acnt.BaseAccount().Balance, *big.NewInt(40)) + assert.Equal(t, acnt.BaseAccount().Balance, big.NewInt(40)) } func TestAccountsDB_GetJournalizedAccountReturnNotFoundAccntShouldWork(t *testing.T) { @@ -226,7 +216,7 @@ func TestAccountsDB_GetJournalizedAccountReturnNotFoundAccntShouldWork(t *testin acnt, err := adb.GetJournalizedAccount(adr) assert.Nil(t, err) assert.NotNil(t, acnt) - assert.Equal(t, acnt.BaseAccount().Balance, *big.NewInt(0)) + assert.Equal(t, acnt.BaseAccount().Balance, big.NewInt(0)) } func TestAccountsDB_Commit2OkAccountsShouldWork(t *testing.T) { @@ -235,21 +225,21 @@ func TestAccountsDB_Commit2OkAccountsShouldWork(t *testing.T) { t.Parallel() adr1, _, adb := generateAddressJurnalAccountAccountsDB() - buff := make([]byte, mock.HasherMock{}.Size()) + buff := make([]byte, sha256.Sha256{}.Size()) rand.Read(buff) - adr2 := mock.NewAddressMock() + adr2 := createDummyAddress() //first account has the balance of 40 state1, err := adb.GetJournalizedAccount(adr1) assert.Nil(t, err) - err = state1.SetBalanceWithJournal(*big.NewInt(40)) + err = state1.SetBalanceWithJournal(big.NewInt(40)) assert.Nil(t, err) //second account has the balance of 50 and some data state2, err := adb.GetJournalizedAccount(adr2) assert.Nil(t, err) - err = state2.SetBalanceWithJournal(*big.NewInt(50)) + err = state2.SetBalanceWithJournal(big.NewInt(50)) assert.Nil(t, err) state2.SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) err = adb.SaveData(state2) @@ -269,12 +259,12 @@ func TestAccountsDB_Commit2OkAccountsShouldWork(t *testing.T) { //checking state1 newState1, err := adb.GetJournalizedAccount(adr1) assert.Nil(t, err) - assert.Equal(t, newState1.BaseAccount().Balance, *big.NewInt(40)) + assert.Equal(t, newState1.BaseAccount().Balance, big.NewInt(40)) //checking state2 newState2, err := adb.GetJournalizedAccount(adr2) assert.Nil(t, err) - assert.Equal(t, newState2.BaseAccount().Balance, *big.NewInt(50)) + assert.Equal(t, newState2.BaseAccount().Balance, big.NewInt(50)) assert.NotNil(t, newState2.BaseAccount().RootHash) //get data err = adb.LoadDataTrie(newState2) @@ -297,7 +287,7 @@ func TestAccountsDB_CommitAccountDataShouldWork(t *testing.T) { hrCreated := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - created account: %v\n", hrCreated) - err = state1.SetBalanceWithJournal(*big.NewInt(40)) + err = state1.SetBalanceWithJournal(big.NewInt(40)) assert.Nil(t, err) hrWithBalance := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - account with balance 40: %v\n", hrWithBalance) @@ -310,7 +300,7 @@ func TestAccountsDB_CommitAccountDataShouldWork(t *testing.T) { //commit hash == account with balance assert.Equal(t, hrCommit, hrWithBalance) - err = state1.SetBalanceWithJournal(*big.NewInt(0)) + err = state1.SetBalanceWithJournal(big.NewInt(0)) assert.Nil(t, err) //root hash == hrCreated @@ -330,8 +320,8 @@ func TestAccountsDB_CommitAccountDataShouldWork(t *testing.T) { func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { t.Parallel() - adr1 := mock.NewAddressMock() - adr2 := mock.NewAddressMock() + adr1 := createDummyAddress() + adr2 := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -387,8 +377,8 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { t.Parallel() - adr1 := mock.NewAddressMock() - adr2 := mock.NewAddressMock() + adr1 := createDummyAddress() + adr2 := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -418,12 +408,12 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { snapshotPreSet := adb.JournalLen() //Step 3. Set balances and save data - err = state1.SetBalanceWithJournal(*big.NewInt(40)) + err = state1.SetBalanceWithJournal(big.NewInt(40)) assert.Nil(t, err) hrWithBalance1 := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - account with balance 40: %v\n", hrWithBalance1) - err = state2.SetBalanceWithJournal(*big.NewInt(50)) + err = state2.SetBalanceWithJournal(big.NewInt(50)) assert.Nil(t, err) hrWithBalance2 := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - account with balance 50: %v\n", hrWithBalance2) @@ -447,8 +437,8 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { //adr1 puts code hash + code inside trie. adr2 has the same code hash //revert should work - adr1 := mock.NewAddressMock() - adr2 := mock.NewAddressMock() + adr1 := createDummyAddress() + adr2 := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -503,8 +493,8 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { //adr1 puts data inside trie. adr2 puts the same data //revert should work - adr1 := mock.NewAddressMock() - adr2 := mock.NewAddressMock() + adr1 := createDummyAddress() + adr2 := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -564,8 +554,8 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test //adr1 puts data inside trie. adr2 puts the same data //revert should work - adr1 := mock.NewAddressMock() - adr2 := mock.NewAddressMock() + adr1 := createDummyAddress() + adr2 := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -641,8 +631,8 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { t.Parallel() - adrSrc := mock.NewAddressMock() - adrDest := mock.NewAddressMock() + adrSrc := createDummyAddress() + adrDest := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -653,7 +643,7 @@ func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { assert.Nil(t, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(1000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(1000)) assert.Nil(t, err) hrOriginal := base64.StdEncoding.EncodeToString(adb.RootHash()) @@ -688,8 +678,8 @@ func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { t.Parallel() - adrSrc := mock.NewAddressMock() - adrDest := mock.NewAddressMock() + adrSrc := createDummyAddress() + adrDest := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -700,7 +690,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { assert.Nil(t, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(10000000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(10000000)) assert.Nil(t, err) hrOriginal := base64.StdEncoding.EncodeToString(adb.RootHash()) @@ -719,8 +709,8 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { t.Parallel() - adrSrc := mock.NewAddressMock() - adrDest := mock.NewAddressMock() + adrSrc := createDummyAddress() + adrDest := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -731,7 +721,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { assert.Nil(t, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(10000000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(10000000)) assert.Nil(t, err) hrOriginal := base64.StdEncoding.EncodeToString(adb.RootHash()) @@ -755,8 +745,8 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { } func BenchmarkTxExecution(b *testing.B) { - adrSrc := mock.NewAddressMock() - adrDest := mock.NewAddressMock() + adrSrc := createDummyAddress() + adrDest := createDummyAddress() //Step 1. create accounts objects adb := adbCreateAccountsDB() @@ -767,7 +757,7 @@ func BenchmarkTxExecution(b *testing.B) { assert.Nil(b, err) //Set a high balance to src's account - err = acntSrc.SetBalanceWithJournal(*big.NewInt(10000000)) + err = acntSrc.SetBalanceWithJournal(big.NewInt(10000000)) assert.Nil(b, err) b.ResetTimer() diff --git a/integrationTests/transaction/common.go b/integrationTests/transaction/common.go new file mode 100644 index 00000000000..c3d667ee22f --- /dev/null +++ b/integrationTests/transaction/common.go @@ -0,0 +1,229 @@ +package transaction + +import ( + "context" + "math/rand" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/crypto" + "github.com/ElrondNetwork/elrond-go-sandbox/crypto/schnorr" + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" + "github.com/ElrondNetwork/elrond-go-sandbox/data/dataPool" + "github.com/ElrondNetwork/elrond-go-sandbox/data/shardedData" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/trie" + "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/node" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" + "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/ElrondNetwork/elrond-go-sandbox/storage/memorydb" +) + +func createTestBlockChain() *blockchain.BlockChain { + + cfgCache := storage.CacheConfig{Size: 100, Type: storage.LRUCache} + + badBlockCache, _ := storage.NewCache(cfgCache.Type, cfgCache.Size) + + blockChain, _ := blockchain.NewBlockChain( + badBlockCache, + createMemUnit(), + createMemUnit(), + createMemUnit(), + createMemUnit(), + createMemUnit()) + + return blockChain +} + +func createMemUnit() storage.Storer { + cache, _ := storage.NewCache(storage.LRUCache, 10) + persist, _ := memorydb.New() + + unit, _ := storage.NewStorageUnit(cache, persist) + return unit +} + +func createTestDataPool() data.TransientDataHolder { + txPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100000, Type: storage.LRUCache}) + hdrPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100000, Type: storage.LRUCache}) + + cacherCfg := storage.CacheConfig{Size: 100000, Type: storage.LRUCache} + hdrNoncesCacher, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + hdrNonces, _ := dataPool.NewNonceToHashCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + + cacherCfg = storage.CacheConfig{Size: 100000, Type: storage.LRUCache} + txBlockBody, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + + cacherCfg = storage.CacheConfig{Size: 100000, Type: storage.LRUCache} + peerChangeBlockBody, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + + cacherCfg = storage.CacheConfig{Size: 100000, Type: storage.LRUCache} + stateBlockBody, _ := storage.NewCache(cacherCfg.Type, cacherCfg.Size) + + dPool, _ := dataPool.NewDataPool( + txPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + stateBlockBody, + ) + + return dPool +} + +func createDummyHexAddress(chars int) string { + if chars < 1 { + return "" + } + + var characters = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'} + + rdm := rand.New(rand.NewSource(time.Now().UnixNano())) + + buff := make([]byte, chars) + for i := 0; i < chars; i++ { + buff[i] = characters[rdm.Int()%16] + } + + return string(buff) +} + +func adbCreateAccountsDB() *state.AccountsDB { + marsh := &marshal.JsonMarshalizer{} + + dbw, _ := trie.NewDBWriteCache(createMemUnit()) + tr, _ := trie.NewTrie(make([]byte, 32), dbw, sha256.Sha256{}) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marsh) + + return adb +} + +func createMemNode(port int, dPool data.TransientDataHolder, accntAdapter state.AccountsAdapter) ( + *node.Node, + p2p.Messenger, + crypto.PrivateKey, + process.ProcessorFactory) { + + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + cp, _ := p2p.NewConnectParamsFromPort(port) + mes, _ := p2p.NewMemMessenger(marshalizer, hasher, cp) + + addrConverter, _ := state.NewPlainAddressConverter(32, "0x") + + keyGen := schnorr.NewKeyGenerator() + sk, pk := keyGen.GeneratePair() + blockChain := createTestBlockChain() + shardCoordinator := &sharding.OneShardCoordinator{} + uint64Converter := uint64ByteSlice.NewBigEndianConverter() + + pFactory, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ + InterceptorContainer: interceptor.NewContainer(), + ResolverContainer: resolver.NewContainer(), + Messenger: mes, + Blockchain: blockChain, + DataPool: dPool, + ShardCoordinator: shardCoordinator, + AddrConverter: addrConverter, + Hasher: hasher, + Marshalizer: marshalizer, + SingleSignKeyGen: keyGen, + Uint64ByteSliceConverter: uint64Converter, + }) + + n, _ := node.NewNode( + node.WithMessenger(mes), + node.WithMarshalizer(marshalizer), + node.WithHasher(hasher), + node.WithContext(context.Background()), + node.WithDataPool(dPool), + node.WithAddressConverter(addrConverter), + node.WithAccountsAdapter(accntAdapter), + node.WithSingleSignKeyGenerator(keyGen), + node.WithShardCoordinator(shardCoordinator), + node.WithBlockChain(blockChain), + node.WithUint64ByteSliceConverter(uint64Converter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), + node.WithProcessorCreator(pFactory), + ) + + _ = pFactory.CreateInterceptors() + _ = pFactory.CreateResolvers() + + return n, mes, sk, pFactory +} + +func createNetNode(port int, dPool data.TransientDataHolder, accntAdapter state.AccountsAdapter) ( + *node.Node, + p2p.Messenger, + crypto.PrivateKey) { + + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + messenger := createMessenger(context.Background(), marshalizer, hasher, 4, port) + + addrConverter, _ := state.NewPlainAddressConverter(32, "0x") + + keyGen := schnorr.NewKeyGenerator() + sk, pk := keyGen.GeneratePair() + blkc := createTestBlockChain() + shardCoordinator := &sharding.OneShardCoordinator{} + uint64Converter := uint64ByteSlice.NewBigEndianConverter() + + pFactory, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ + InterceptorContainer: interceptor.NewContainer(), + ResolverContainer: resolver.NewContainer(), + Messenger: messenger, + Blockchain: blkc, + DataPool: dPool, + ShardCoordinator: shardCoordinator, + AddrConverter: addrConverter, + Hasher: hasher, + Marshalizer: marshalizer, + SingleSignKeyGen: keyGen, + Uint64ByteSliceConverter: uint64Converter, + }) + + n, _ := node.NewNode( + node.WithMessenger(messenger), + node.WithMarshalizer(marshalizer), + node.WithHasher(hasher), + node.WithContext(context.Background()), + node.WithDataPool(dPool), + node.WithAddressConverter(addrConverter), + node.WithAccountsAdapter(accntAdapter), + node.WithSingleSignKeyGenerator(keyGen), + node.WithShardCoordinator(shardCoordinator), + node.WithBlockChain(blkc), + node.WithUint64ByteSliceConverter(uint64Converter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), + node.WithProcessorCreator(pFactory), + ) + + return n, nil, sk +} + +func createMessenger(ctx context.Context, marshalizer marshal.Marshalizer, hasher hashing.Hasher, maxAllowedPeers int, port int) p2p.Messenger { + cp := &p2p.ConnectParams{} + cp.Port = port + cp.GeneratePrivPubKeys(time.Now().UnixNano()) + cp.GenerateIDFromPubKey() + + nm, _ := p2p.NewNetMessenger(ctx, marshalizer, hasher, cp, maxAllowedPeers, p2p.GossipSub) + return nm +} diff --git a/integrationTests/transaction/interceptedBulkTxMem_test.go b/integrationTests/transaction/interceptedBulkTxMem_test.go new file mode 100644 index 00000000000..bfd23dd225e --- /dev/null +++ b/integrationTests/transaction/interceptedBulkTxMem_test.go @@ -0,0 +1,94 @@ +package transaction + +import ( + "math/big" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/stretchr/testify/assert" +) + +func TestNode_GenerateSendInterceptBulkTransactionsWithMemMessenger(t *testing.T) { + dPool := createTestDataPool() + + startingNonce := uint64(6) + + addrConverter, _ := state.NewPlainAddressConverter(32, "0x") + accntAdapter := adbCreateAccountsDB() + + //TODO change when injecting a messenger is possible + n, _, sk, _ := createMemNode(1, dPool, accntAdapter) + + n.Start() + defer func() { _ = n.Stop() }() + + defer p2p.ReInitializeGloballyRegisteredPeers() + + //set the account's nonce to startingNonce + nodePubKeyBytes, _ := sk.GeneratePublic().ToByteArray() + nodeAddress, _ := addrConverter.CreateAddressFromPublicKeyBytes(nodePubKeyBytes) + nodeAccount, _ := accntAdapter.GetJournalizedAccount(nodeAddress) + nodeAccount.SetNonceWithJournal(startingNonce) + accntAdapter.Commit() + + noOfTx := 50 + + wg := sync.WaitGroup{} + wg.Add(noOfTx) + + chanDone := make(chan bool) + + go func() { + wg.Wait() + + chanDone <- true + }() + + mut := sync.Mutex{} + txHashes := make([][]byte, 0) + + //wire up handler + dPool.Transactions().RegisterHandler(func(key []byte) { + mut.Lock() + defer mut.Unlock() + + txHashes = append(txHashes, key) + wg.Done() + }) + + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), uint64(noOfTx)) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + return + } + + assert.Equal(t, noOfTx, len(txHashes)) + + bitmap := make([]bool, noOfTx+int(startingNonce)) + //set for each nonce from found tx a true flag in bitmap + for i := 0; i < noOfTx; i++ { + tx, _ := dPool.Transactions().ShardDataStore(0).Get(txHashes[i]) + + assert.NotNil(t, tx) + bitmap[tx.(*transaction.Transaction).Nonce] = true + } + + //for the first startingNonce values, the bitmap should be false + //for the rest, true + for i := 0; i < noOfTx+int(startingNonce); i++ { + if i < int(startingNonce) { + assert.False(t, bitmap[i]) + continue + } + + assert.True(t, bitmap[i]) + } +} diff --git a/integrationTests/transaction/interceptedBulkTxNet_test.go b/integrationTests/transaction/interceptedBulkTxNet_test.go new file mode 100644 index 00000000000..9c0182a928f --- /dev/null +++ b/integrationTests/transaction/interceptedBulkTxNet_test.go @@ -0,0 +1,139 @@ +package transaction + +import ( + "encoding/base64" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/stretchr/testify/assert" +) + +func TestNode_GenerateSendInterceptBulkTransactionsWithNetMessenger(t *testing.T) { + t.Skip("TODO: fix tests that run on the same local network") + + dPool := createTestDataPool() + + startingNonce := uint64(6) + + addrConverter, _ := state.NewPlainAddressConverter(32, "0x") + accntAdapter := adbCreateAccountsDB() + + //TODO change when injecting a messenger is possible + n, _, sk := createNetNode(4000, dPool, accntAdapter) + + n.Start() + defer n.Stop() + + n.P2PBootstrap() + + time.Sleep(time.Second) + + //set the account's nonce to startingNonce + nodePubKeyBytes, _ := sk.GeneratePublic().ToByteArray() + nodeAddress, _ := addrConverter.CreateAddressFromPublicKeyBytes(nodePubKeyBytes) + nodeAccount, _ := accntAdapter.GetJournalizedAccount(nodeAddress) + nodeAccount.SetNonceWithJournal(startingNonce) + accntAdapter.Commit() + + noOfTx := 100000 + + time.Sleep(time.Second) + + wg := sync.WaitGroup{} + wg.Add(noOfTx) + + chanDone := make(chan bool) + + go func() { + wg.Wait() + + chanDone <- true + }() + + mut := sync.Mutex{} + txHashes := make([][]byte, 0) + transactions := make([]*transaction.Transaction, 0) + + //wire up handler + dPool.Transactions().RegisterHandler(func(key []byte) { + mut.Lock() + defer mut.Unlock() + + txHashes = append(txHashes, key) + + dataStore := dPool.Transactions().ShardDataStore(0) + val, _ := dataStore.Get(key) + + if val == nil { + assert.Fail(t, fmt.Sprintf("key %s not in store?", base64.StdEncoding.EncodeToString(key))) + return + } + + transactions = append(transactions, val.(*transaction.Transaction)) + wg.Done() + }) + + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), uint64(noOfTx)) + + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 60): + assert.Fail(t, "timeout") + return + } + + if noOfTx != len(txHashes) { + + for i := startingNonce; i < startingNonce+uint64(noOfTx); i++ { + found := false + + for _, tx := range transactions { + if tx.Nonce == i { + found = true + break + } + } + + if !found { + fmt.Printf("tx with nonce %d is missing\n", i) + } + + } + + assert.Fail(t, fmt.Sprintf("should have been %d, got %d", noOfTx, len(txHashes))) + + return + } + + bitmap := make([]bool, noOfTx+int(startingNonce)) + //set for each nonce from found tx a true flag in bitmap + for i := 0; i < noOfTx; i++ { + val, _ := dPool.Transactions().ShardDataStore(0).Get(txHashes[i]) + + if val == nil { + continue + } + + tx := val.(*transaction.Transaction) + + bitmap[tx.Nonce] = true + } + + //for the first startingNonce values, the bitmap should be false + //for the rest, true + for i := 0; i < noOfTx+int(startingNonce); i++ { + if i < int(startingNonce) { + assert.False(t, bitmap[i]) + continue + } + + assert.True(t, bitmap[i]) + } +} diff --git a/integrationTests/transaction/interceptedResolvedTxMem_test.go b/integrationTests/transaction/interceptedResolvedTxMem_test.go new file mode 100644 index 00000000000..fa058b9fe0e --- /dev/null +++ b/integrationTests/transaction/interceptedResolvedTxMem_test.go @@ -0,0 +1,88 @@ +package transaction + +import ( + "fmt" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + transaction2 "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" + "github.com/stretchr/testify/assert" +) + +func TestNode_RequestInterceptTransactionWithMemMessenger(t *testing.T) { + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + dPoolRequestor := createTestDataPool() + dPoolResolver := createTestDataPool() + + nRequestor, _, sk1, pf := createMemNode(1, dPoolRequestor, adbCreateAccountsDB()) + nResolver, _, _, _ := createMemNode(2, dPoolResolver, adbCreateAccountsDB()) + + nRequestor.Start() + nResolver.Start() + defer func() { + _ = nRequestor.Stop() + _ = nResolver.Stop() + }() + + defer p2p.ReInitializeGloballyRegisteredPeers() + + time.Sleep(time.Second) + + buffPk1, _ := sk1.GeneratePublic().ToByteArray() + + //Step 1. Generate a signed transaction + tx := transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: hasher.Compute("receiver"), + SndAddr: buffPk1, + Data: []byte("tx notarized data"), + } + + txBuff, _ := marshalizer.Marshal(&tx) + tx.Signature, _ = sk1.Sign(txBuff) + + signedTxBuff, _ := marshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(signedTxBuff)) + + chanDone := make(chan bool) + + txHash := hasher.Compute(string(signedTxBuff)) + + //step 2. wire up a received handler for requestor + dPoolRequestor.Transactions().RegisterHandler(func(key []byte) { + txStored, _ := dPoolRequestor.Transactions().ShardDataStore(0).Get(key) + + if reflect.DeepEqual(txStored, &tx) && tx.Signature != nil { + chanDone <- true + } + + assert.Equal(t, txStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + dPoolResolver.Transactions().AddData(txHash, &tx, 0) + + //Step 4. request tx + res, _ := pf.ResolverContainer().Get(string(factory.TransactionTopic)) + txResolver := res.(*transaction2.TxResolver) + err := txResolver.RequestTransactionFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } +} diff --git a/integrationTests/transaction/interceptedTxMem_test.go b/integrationTests/transaction/interceptedTxMem_test.go new file mode 100644 index 00000000000..0f04718fc1a --- /dev/null +++ b/integrationTests/transaction/interceptedTxMem_test.go @@ -0,0 +1,74 @@ +package transaction + +import ( + "encoding/hex" + "fmt" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/stretchr/testify/assert" +) + +func TestNode_GenerateSendInterceptTransactionWithMemMessenger(t *testing.T) { + hasher := sha256.Sha256{} + marshalizer := &marshal.JsonMarshalizer{} + + dPool := createTestDataPool() + + n, _, sk, _ := createMemNode(1, dPool, adbCreateAccountsDB()) + + n.Start() + defer func() { _ = n.Stop() }() + + defer p2p.ReInitializeGloballyRegisteredPeers() + + pkBuff, _ := sk.GeneratePublic().ToByteArray() + + //Step 1. Generate a transaction + tx := transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: hasher.Compute("receiver"), + SndAddr: pkBuff, + Data: []byte("tx notarized data"), + } + + //Step 2. Sign transaction + txBuff, _ := marshalizer.Marshal(&tx) + tx.Signature, _ = sk.Sign(txBuff) + + signedTxBuff, _ := marshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(signedTxBuff)) + + chanDone := make(chan bool) + + //step 3. wire up a received handler + dPool.Transactions().RegisterHandler(func(key []byte) { + txStored, _ := dPool.Transactions().ShardDataStore(0).Get(key) + + if reflect.DeepEqual(txStored, &tx) && tx.Signature != nil { + chanDone <- true + } + + assert.Equal(t, txStored, &tx) + + }) + + //Step 4. Send Tx + _, err := n.SendTransaction(tx.Nonce, hex.EncodeToString(tx.SndAddr), hex.EncodeToString(tx.RcvAddr), + tx.Value, string(tx.Data), tx.Signature) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } +} diff --git a/logger/logger.go b/logger/logger.go index d6cf11658ef..4f94b563a24 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -1,10 +1,12 @@ package logger import ( + "fmt" "io" "os" "path/filepath" "runtime" + "strings" "time" log "github.com/sirupsen/logrus" @@ -22,6 +24,7 @@ const ( const ( defaultLogPath = "logs" defaultStackTraceDepth = 2 + maxHeadlineLength = 100 ) // Logger represents the application logger. @@ -156,6 +159,20 @@ func (el *Logger) LogIfError(err error) { cl.Error(err.Error()) } +// Headline will build a headline message given a delimiter string +// timestamp parameter will be printed before the repeating delimiter +func (el *Logger) Headline(message string, timestamp string, delimiter string) string { + if len(delimiter) > 1 { + delimiter = delimiter[:1] + } + if len(message) >= maxHeadlineLength { + return message + } + delimiterLength := (maxHeadlineLength - len(message)) / 2 + delimiterText := strings.Repeat(delimiter, delimiterLength) + return fmt.Sprintf("\n%s %s %s %s\n\n", timestamp, delimiterText, message, delimiterText) +} + func (el *Logger) defaultFields() *log.Entry { _, file, line, ok := runtime.Caller(el.stackTraceDepth) return el.logger.WithFields(log.Fields{ diff --git a/logger/printerHook.go b/logger/printerHook.go index 624bffc9c54..f6532fe2911 100644 --- a/logger/printerHook.go +++ b/logger/printerHook.go @@ -7,8 +7,8 @@ import ( ) // printerHook is a logrus hook that prints out in the console only the message -// from the logged line. It is used to easlily follow logged messages -// instead of trying to decypher through the full logged json +// from the logged line. It is used to easily follow logged messages +// instead of trying to decrypt through the full logged json type printerHook struct { Writer io.Writer } @@ -16,7 +16,6 @@ type printerHook struct { // Levels returns the array of levels for which the hook will be applicable func (h *printerHook) Levels() []log.Level { return []log.Level{ - log.DebugLevel, log.InfoLevel, log.WarnLevel, log.ErrorLevel, diff --git a/marshal/capnpMarshalizer_test.go b/marshal/capnpMarshalizer_test.go index f2991ade470..d618e353db1 100644 --- a/marshal/capnpMarshalizer_test.go +++ b/marshal/capnpMarshalizer_test.go @@ -94,6 +94,63 @@ func benchUnmarshal(b *testing.B, m marshal.Marshalizer, obj interface{}, valida } } +func TestCapnpMarshalizer_TransactionUnmarshalShouldWork(t *testing.T) { + tx := &Transaction{} + cmr := &marshal.CapnpMarshalizer{} + + dArray := tx.GenerateDummyArray() + length := len(dArray) + serialized := make([][]byte, length) + + for i, obj := range dArray { + mar, _ := cmr.Marshal(obj) + t := make([]byte, len(mar)) + + _ = copy(t, mar) + serialized[i] = t + } + + for i := 0; i < length; i++ { + err := cmr.Unmarshal(tx, serialized[i]) + + assert.Nil(t, err) + + // Check unmarshaled data as expected + orig := dArray[i] + assert.Equal(t, orig, tx) + } + +} + +func TestCapnpMarshalizer_TransactionMarshalUnmarshalShouldWork(t *testing.T) { + tx := &transaction.Transaction{ + Nonce: 1, + Value: big.NewInt(2), + Signature: []byte("sig"), + Data: []byte("data"), + RcvAddr: []byte("recvAddr"), + SndAddr: []byte("sndAddr"), + Challenge: []byte("challenge"), + GasLimit: 3, + GasPrice: 4, + } + + cmr := &marshal.CapnpMarshalizer{} + + buff, err := cmr.Marshal(tx) + assert.Nil(t, err) + + tx2 := &transaction.Transaction{} + + err = cmr.Unmarshal(tx2, buff) + assert.Nil(t, err) + + assert.Equal(t, tx, tx2) + assert.False(t, tx == tx2) + + assert.False(t, tx.Value == tx2.Value) +} + // benchmarks func BenchmarkCapnprotoTransactionMarshal(b *testing.B) { tx := &Transaction{} @@ -400,7 +457,7 @@ func (tx *Transaction) GenerateDummyArray() []data.CapnpHelper { transactions = append(transactions, &Transaction{ Transaction: transaction.Transaction{ Nonce: uint64(rand.Int63n(10000)), - Value: *val, + Value: val, RcvAddr: []byte(RandomStr(32)), SndAddr: []byte(RandomStr(32)), GasPrice: uint64(rand.Int63n(10000)), diff --git a/node/createInterceptors.go b/node/createInterceptors.go deleted file mode 100644 index 4d1228b9217..00000000000 --- a/node/createInterceptors.go +++ /dev/null @@ -1,138 +0,0 @@ -package node - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/process/block" - "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" - "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" -) - -func (n *Node) createInterceptors() error { - err := n.createTxInterceptor() - if err != nil { - return err - } - - err = n.createHdrInterceptor() - if err != nil { - return err - } - - err = n.createTxBlockBodyInterceptor() - if err != nil { - return err - } - - err = n.createPeerChBlockBodyInterceptor() - if err != nil { - return err - } - - err = n.createStateBlockBodyInterceptor() - if err != nil { - return err - } - - return nil -} - -func (n *Node) createTxInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(TransactionTopic), n.messenger, transaction.NewInterceptedTransaction()) - if err != nil { - return err - } - txInterceptor, err := transaction.NewTxInterceptor( - intercept, - n.dataPool.Transactions(), - n.addrConverter, - n.hasher, - n.singleSignKeyGen, - n.shardCoordinator) - - if err != nil { - return err - } - - n.interceptors = append(n.interceptors, txInterceptor) - return nil -} - -func (n *Node) createHdrInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(HeadersTopic), n.messenger, block.NewInterceptedHeader()) - if err != nil { - return err - } - hdrInterceptor, err := block.NewHeaderInterceptor( - intercept, - n.dataPool.Headers(), - n.dataPool.HeadersNonces(), - n.hasher, - n.shardCoordinator, - ) - - if err != nil { - return err - } - - n.interceptors = append(n.interceptors, hdrInterceptor) - return nil -} - -func (n *Node) createTxBlockBodyInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(TxBlockBodyTopic), n.messenger, block.NewInterceptedTxBlockBody()) - if err != nil { - return err - } - txBlockBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( - intercept, - n.dataPool.TxBlocks(), - n.hasher, - n.shardCoordinator, - ) - - if err != nil { - return err - } - - n.interceptors = append(n.interceptors, txBlockBodyInterceptor) - return nil -} - -func (n *Node) createPeerChBlockBodyInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(PeerChBodyTopic), n.messenger, block.NewInterceptedPeerBlockBody()) - if err != nil { - return err - } - peerChBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( - intercept, - n.dataPool.PeerChangesBlocks(), - n.hasher, - n.shardCoordinator, - ) - - if err != nil { - return err - } - - n.interceptors = append(n.interceptors, peerChBodyInterceptor) - return nil -} - -func (n *Node) createStateBlockBodyInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(StateBodyTopic), n.messenger, block.NewInterceptedStateBlockBody()) - if err != nil { - return err - } - stateBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( - intercept, - n.dataPool.StateBlocks(), - n.hasher, - n.shardCoordinator, - ) - - if err != nil { - return err - } - - n.interceptors = append(n.interceptors, stateBodyInterceptor) - return nil -} diff --git a/node/createInterceptors_test.go b/node/createInterceptors_test.go deleted file mode 100644 index 8dc8db29c8a..00000000000 --- a/node/createInterceptors_test.go +++ /dev/null @@ -1,814 +0,0 @@ -package node - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/data" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/node/mock" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/storage" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/stretchr/testify/assert" -) - -func createRequiredObjects(marshalizer marshal.Marshalizer) (*p2p.Topic, *mock.TransientDataPoolMock) { - topic := p2p.NewTopic("", &mock.StringCreatorMock{}, marshalizer) - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - dataPool := &mock.TransientDataPoolMock{} - - return topic, dataPool -} - -//------- createTxInterceptor - -func TestCreateTxInterceptor_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createTxInterceptor() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateTxInterceptor_IncompleteSettingsShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TransactionTopic) { - return topic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - //nil hasher - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createTxInterceptor() - - assert.Equal(t, "nil Hasher", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateTxInterceptor_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TransactionTopic) { - return topic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createTxInterceptor() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.interceptors)) -} - -//------- createHdrInterceptor - -func TestCreateHdrInterceptor_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createHdrInterceptor() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateHdrInterceptor_IncompleteSettingsShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(HeadersTopic) { - return topic - } - - return nil - } - - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - node.dataPool = dataPool - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createHdrInterceptor() - - assert.Equal(t, "nil Hasher", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateHdrInterceptor_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(HeadersTopic) { - return topic - } - - return nil - } - - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - node.dataPool = dataPool - node.hasher = mock.HasherMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createHdrInterceptor() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.interceptors)) -} - -//------- createTxBlockBodyInterceptor - -func TestCreateTxBlockBodyInterceptor_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createTxBlockBodyInterceptor() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateTxBlockBodyInterceptor_IncompleteSettingsShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TxBlockBodyTopic) { - return topic - } - - return nil - } - - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createTxBlockBodyInterceptor() - assert.Equal(t, "nil Hasher", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateTxBlockBodyInterceptor_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TxBlockBodyTopic) { - return topic - } - - return nil - } - - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.hasher = mock.HasherMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createTxBlockBodyInterceptor() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.interceptors)) -} - -//------- createPeerChBlockBodyInterceptor - -func TestCreatePeerChBlockBodyInterceptor_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createPeerChBlockBodyInterceptor() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreatePeerChBlockBodyInterceptor_IncompleteSettingsShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(PeerChBodyTopic) { - return topic - } - - return nil - } - - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createPeerChBlockBodyInterceptor() - assert.Equal(t, "nil Hasher", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreatePeerChBlockBodyInterceptor_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(PeerChBodyTopic) { - return topic - } - - return nil - } - - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.hasher = mock.HasherMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createPeerChBlockBodyInterceptor() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.interceptors)) -} - -//------- createStateBlockBodyInterceptor - -func TestCreateStateBlockBodyInterceptor_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createStateBlockBodyInterceptor() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateStateBlockBodyInterceptor_IncompleteSettingsShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(StateBodyTopic) { - return topic - } - - return nil - } - - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createStateBlockBodyInterceptor() - assert.Equal(t, "nil Hasher", err.Error()) - assert.Equal(t, 0, len(node.interceptors)) -} - -func TestCreateStateBlockBodyInterceptor_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(StateBodyTopic) { - return topic - } - - return nil - } - - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.hasher = mock.HasherMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createStateBlockBodyInterceptor() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.interceptors)) -} - -//------- createInterceptors - -func TestCreateInterceptors_NilTransactionsShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - genericTopic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return genericTopic - case string(HeadersTopic): - return genericTopic - case string(TxBlockBodyTopic): - return genericTopic - case string(PeerChBodyTopic): - return genericTopic - case string(StateBodyTopic): - return genericTopic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return nil - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createInterceptors() - - assert.Equal(t, "nil transaction data pool", err.Error()) -} - -func TestCreateInterceptors_NilHeadersShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - genericTopic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return genericTopic - case string(HeadersTopic): - return genericTopic - case string(TxBlockBodyTopic): - return genericTopic - case string(PeerChBodyTopic): - return genericTopic - case string(StateBodyTopic): - return genericTopic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return nil - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createInterceptors() - - assert.Equal(t, "nil headers data pool", err.Error()) -} - -func TestCreateInterceptors_NilHeadersNoncesShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - genericTopic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return genericTopic - case string(HeadersTopic): - return genericTopic - case string(TxBlockBodyTopic): - return genericTopic - case string(PeerChBodyTopic): - return genericTopic - case string(StateBodyTopic): - return genericTopic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return nil - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createInterceptors() - - assert.Equal(t, "nil headers nonces cache", err.Error()) -} - -func TestCreateInterceptors_NilTxBlockBodyShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - genericTopic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return genericTopic - case string(HeadersTopic): - return genericTopic - case string(TxBlockBodyTopic): - return genericTopic - case string(PeerChBodyTopic): - return genericTopic - case string(StateBodyTopic): - return genericTopic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return nil - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createInterceptors() - - assert.Equal(t, "nil cacher", err.Error()) -} - -func TestCreateInterceptors_NilPeerBlockBodyShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - genericTopic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return genericTopic - case string(HeadersTopic): - return genericTopic - case string(TxBlockBodyTopic): - return genericTopic - case string(PeerChBodyTopic): - return genericTopic - case string(StateBodyTopic): - return genericTopic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return nil - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createInterceptors() - - assert.Equal(t, "nil cacher", err.Error()) -} - -func TestCreateInterceptors_NilStateBlockBodyShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - genericTopic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return genericTopic - case string(HeadersTopic): - return genericTopic - case string(TxBlockBodyTopic): - return genericTopic - case string(PeerChBodyTopic): - return genericTopic - case string(StateBodyTopic): - return genericTopic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return nil - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createInterceptors() - - assert.Equal(t, "nil cacher", err.Error()) -} - -func TestCreateInterceptors_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - genericTopic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return genericTopic - case string(HeadersTopic): - return genericTopic - case string(TxBlockBodyTopic): - return genericTopic - case string(PeerChBodyTopic): - return genericTopic - case string(StateBodyTopic): - return genericTopic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.addrConverter = mock.AddressConverterStub{} - node.hasher = mock.HasherMock{} - node.singleSignKeyGen = &mock.SingleSignKeyGenMock{} - node.shardCoordinator = mock.NewOneShardCoordinatorMock() - - err := node.createInterceptors() - - assert.Nil(t, err) - assert.Equal(t, 5, len(node.interceptors)) -} diff --git a/node/createResolvers.go b/node/createResolvers.go deleted file mode 100644 index 9cc1d9d8c8f..00000000000 --- a/node/createResolvers.go +++ /dev/null @@ -1,138 +0,0 @@ -package node - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" - "github.com/ElrondNetwork/elrond-go-sandbox/process/block" - "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" - "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" -) - -func (n *Node) createResolvers() error { - err := n.createTxResolver() - if err != nil { - return err - } - - err = n.createHdrResolver() - if err != nil { - return err - } - - err = n.createTxBlockBodyResolver() - if err != nil { - return err - } - - err = n.createPeerChBlockBodyResolver() - if err != nil { - return err - } - - err = n.createStateBlockBodyResolver() - if err != nil { - return err - } - - return nil -} - -func (n *Node) createTxResolver() error { - resolve, err := resolver.NewTopicResolver(string(TransactionTopic), n.messenger, n.marshalizer) - if err != nil { - return err - } - - txResolver, err := transaction.NewTxResolver( - resolve, - n.dataPool.Transactions(), - n.blkc.GetStorer(blockchain.TransactionUnit), - n.marshalizer) - - if err != nil { - return err - } - - n.resolvers = append(n.resolvers, txResolver) - return nil -} - -func (n *Node) createHdrResolver() error { - resolve, err := resolver.NewTopicResolver(string(HeadersTopic), n.messenger, n.marshalizer) - if err != nil { - return err - } - - hdrResolver, err := block.NewHeaderResolver( - resolve, - n.dataPool, - n.blkc.GetStorer(blockchain.BlockHeaderUnit), - n.marshalizer, - n.uint64ByteSliceConverter) - - if err != nil { - return err - } - - n.resolvers = append(n.resolvers, hdrResolver) - return nil -} - -func (n *Node) createTxBlockBodyResolver() error { - resolve, err := resolver.NewTopicResolver(string(TxBlockBodyTopic), n.messenger, n.marshalizer) - if err != nil { - return err - } - - txBlkResolver, err := block.NewGenericBlockBodyResolver( - resolve, - n.dataPool.TxBlocks(), - n.blkc.GetStorer(blockchain.TxBlockBodyUnit), - n.marshalizer) - - if err != nil { - return err - } - - n.resolvers = append(n.resolvers, txBlkResolver) - return nil -} - -func (n *Node) createPeerChBlockBodyResolver() error { - resolve, err := resolver.NewTopicResolver(string(PeerChBodyTopic), n.messenger, n.marshalizer) - if err != nil { - return err - } - - peerChBlkResolver, err := block.NewGenericBlockBodyResolver( - resolve, - n.dataPool.PeerChangesBlocks(), - n.blkc.GetStorer(blockchain.PeerBlockBodyUnit), - n.marshalizer) - - if err != nil { - return err - } - - n.resolvers = append(n.resolvers, peerChBlkResolver) - return nil -} - -func (n *Node) createStateBlockBodyResolver() error { - resolve, err := resolver.NewTopicResolver(string(StateBodyTopic), n.messenger, n.marshalizer) - if err != nil { - return err - } - - stateBlkResolver, err := block.NewGenericBlockBodyResolver( - resolve, - n.dataPool.StateBlocks(), - n.blkc.GetStorer(blockchain.StateBlockBodyUnit), - n.marshalizer) - - if err != nil { - return err - } - - n.resolvers = append(n.resolvers, stateBlkResolver) - return nil -} diff --git a/node/createResolvers_test.go b/node/createResolvers_test.go deleted file mode 100644 index 56ac6e878f4..00000000000 --- a/node/createResolvers_test.go +++ /dev/null @@ -1,787 +0,0 @@ -package node - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/data" - "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" - "github.com/ElrondNetwork/elrond-go-sandbox/node/mock" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/storage" - "github.com/stretchr/testify/assert" -) - -func createBlockchain() *blockchain.BlockChain { - blkc, _ := blockchain.NewBlockChain( - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}) - - return blkc -} - -//------- createTxResolver - -func TestCreateTxResolver_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createTxResolver() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateTxResolver_NilHeadersDataPoolShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TransactionTopic) { - return topic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return nil - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - - err := node.createTxResolver() - - assert.Equal(t, "nil transaction data pool", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateTxResolver_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TransactionTopic) { - return topic - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - - node.dataPool = dataPool - node.marshalizer = mock.MarshalizerMock{} - node.blkc = createBlockchain() - - err := node.createTxResolver() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.resolvers)) -} - -//------- createHdrResolver - -func TestCreateHdrResolver_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createHdrResolver() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateHdrResolver_NilTransactionDataPoolShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(HeadersTopic) { - return topic - } - - return nil - } - - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return nil - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createHdrResolver() - - assert.Equal(t, "nil headers data pool", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateHdrResolver_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(HeadersTopic) { - return topic - } - - return nil - } - - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createHdrResolver() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.resolvers)) -} - -//------- createTxBlockBodyResolver - -func TestCreateTxBlockBodyResolver_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createTxBlockBodyResolver() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateTxBlockBodyResolver_NilDataPoolCacherDataPoolShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TxBlockBodyTopic) { - return topic - } - - return nil - } - - dataPool.TxBlocksCalled = func() storage.Cacher { - return nil - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - - err := node.createTxBlockBodyResolver() - - assert.Equal(t, "nil block body pool", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateTxBlockBodyResolver_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(TxBlockBodyTopic) { - return topic - } - - return nil - } - - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - - err := node.createTxBlockBodyResolver() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.resolvers)) -} - -//------- createPeerChBlockBodyResolver - -func TestCreatePeerChBlockBodyResolver_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createPeerChBlockBodyResolver() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreatePeerChBlockBodyResolver_NilDataPoolCacherDataPoolShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(PeerChBodyTopic) { - return topic - } - - return nil - } - - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return nil - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - - err := node.createPeerChBlockBodyResolver() - - assert.Equal(t, "nil block body pool", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreatePeerChBlockBodyResolver_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(PeerChBodyTopic) { - return topic - } - - return nil - } - - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - - err := node.createPeerChBlockBodyResolver() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.resolvers)) -} - -//------- createStateBlockBodyResolver - -func TestCreateStateBlockBodyResolver_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - err := node.createStateBlockBodyResolver() - - assert.Equal(t, "nil Messenger", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateStateBlockBodyResolver_NilDataPoolCacherDataPoolShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(StateBodyTopic) { - return topic - } - - return nil - } - - dataPool.StateBlocksCalled = func() storage.Cacher { - return nil - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - - err := node.createStateBlockBodyResolver() - - assert.Equal(t, "nil block body pool", err.Error()) - assert.Equal(t, 0, len(node.resolvers)) -} - -func TestCreateStateBlockBodyResolver_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topic, dataPool := createRequiredObjects(mes.Marshalizer()) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(StateBodyTopic) { - return topic - } - - return nil - } - - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.blkc = createBlockchain() - node.marshalizer = mock.MarshalizerMock{} - - err := node.createStateBlockBodyResolver() - - assert.Nil(t, err) - assert.Equal(t, 1, len(node.resolvers)) -} - -//------- createResolvers - -func TestCreateResolvers_NilTransactionsShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topicHdr := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTxBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicPeerBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicStateBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - - dataPool := &mock.TransientDataPoolMock{} - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return nil - case string(HeadersTopic): - return topicHdr - case string(TxBlockBodyTopic): - return topicTxBlk - case string(PeerChBodyTopic): - return topicPeerBlk - case string(StateBodyTopic): - return topicStateBlk - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.marshalizer = mock.MarshalizerMock{} - node.blkc = createBlockchain() - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createResolvers() - - assert.Equal(t, "nil topic", err.Error()) -} - -func TestCreateResolvers_NilHeadersShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTxBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicPeerBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicStateBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - - dataPool := &mock.TransientDataPoolMock{} - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return topicTx - case string(HeadersTopic): - return nil - case string(TxBlockBodyTopic): - return topicTxBlk - case string(PeerChBodyTopic): - return topicPeerBlk - case string(StateBodyTopic): - return topicStateBlk - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.marshalizer = mock.MarshalizerMock{} - node.blkc = createBlockchain() - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createResolvers() - - assert.Equal(t, "nil topic", err.Error()) -} - -func TestCreateResolvers_NilTxBlocksShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicHdr := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicPeerBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicStateBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - - dataPool := &mock.TransientDataPoolMock{} - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return topicTx - case string(HeadersTopic): - return topicHdr - case string(TxBlockBodyTopic): - return nil - case string(PeerChBodyTopic): - return topicPeerBlk - case string(StateBodyTopic): - return topicStateBlk - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.marshalizer = mock.MarshalizerMock{} - node.blkc = createBlockchain() - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createResolvers() - - assert.Equal(t, "nil topic", err.Error()) -} - -func TestCreateResolvers_NilPeerBlocksShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicHdr := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTxBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicStateBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - - dataPool := &mock.TransientDataPoolMock{} - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return topicTx - case string(HeadersTopic): - return topicHdr - case string(TxBlockBodyTopic): - return topicTxBlk - case string(PeerChBodyTopic): - return nil - case string(StateBodyTopic): - return topicStateBlk - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.marshalizer = mock.MarshalizerMock{} - node.blkc = createBlockchain() - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createResolvers() - - assert.Equal(t, "nil topic", err.Error()) -} - -func TestCreateResolvers_NilStateBlocksShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicHdr := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTxBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicPeerBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - - dataPool := &mock.TransientDataPoolMock{} - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return topicTx - case string(HeadersTopic): - return topicHdr - case string(TxBlockBodyTopic): - return topicTxBlk - case string(PeerChBodyTopic): - return topicPeerBlk - case string(StateBodyTopic): - return nil - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.marshalizer = mock.MarshalizerMock{} - node.blkc = createBlockchain() - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createResolvers() - - assert.Equal(t, "nil topic", err.Error()) -} - -func TestCreateResolvers_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - mes := mock.NewMessengerStub() - node.messenger = mes - - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicHdr := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTxBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicPeerBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicStateBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - - dataPool := &mock.TransientDataPoolMock{} - - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(TransactionTopic): - return topicTx - case string(HeadersTopic): - return topicHdr - case string(TxBlockBodyTopic): - return topicTxBlk - case string(PeerChBodyTopic): - return topicPeerBlk - case string(StateBodyTopic): - return topicStateBlk - } - - return nil - } - - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - - node.dataPool = dataPool - node.marshalizer = mock.MarshalizerMock{} - node.blkc = createBlockchain() - node.uint64ByteSliceConverter = mock.NewNonceHashConverterMock() - - err := node.createResolvers() - - assert.Nil(t, err) - assert.Equal(t, 5, len(node.resolvers)) -} diff --git a/node/defineOptions.go b/node/defineOptions.go index 3670463819b..ea919f658e2 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -18,10 +18,13 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) -// WithPort sets up the port option for the Node -func WithPort(port int) Option { +// WithMessenger sets up the messenger option for the Node +func WithMessenger(mes p2p.Messenger) Option { return func(n *Node) error { - n.port = port + if mes == nil { + return ErrNilMessenger + } + n.messenger = mes return nil } } @@ -30,7 +33,7 @@ func WithPort(port int) Option { func WithMarshalizer(marshalizer marshal.Marshalizer) Option { return func(n *Node) error { if marshalizer == nil { - return errNilMarshalizer + return ErrNilMarshalizer } n.marshalizer = marshalizer return nil @@ -41,7 +44,7 @@ func WithMarshalizer(marshalizer marshal.Marshalizer) Option { func WithContext(ctx context.Context) Option { return func(n *Node) error { if ctx == nil { - return errNilContext + return ErrNilContext } n.ctx = ctx return nil @@ -52,34 +55,18 @@ func WithContext(ctx context.Context) Option { func WithHasher(hasher hashing.Hasher) Option { return func(n *Node) error { if hasher == nil { - return errNilHasher + return ErrNilHasher } n.hasher = hasher return nil } } -// WithMaxAllowedPeers sets up the maxAllowedPeers option for the Node -func WithMaxAllowedPeers(maxAllowedPeers int) Option { - return func(n *Node) error { - n.maxAllowedPeers = maxAllowedPeers - return nil - } -} - -// WithPubSubStrategy sets up the strategy option for the Node -func WithPubSubStrategy(strategy p2p.PubSubStrategy) Option { - return func(n *Node) error { - n.pubSubStrategy = strategy - return nil - } -} - // WithAccountsAdapter sets up the accounts adapter option for the Node func WithAccountsAdapter(accounts state.AccountsAdapter) Option { return func(n *Node) error { if accounts == nil { - return errNilAccountsAdapter + return ErrNilAccountsAdapter } n.accounts = accounts return nil @@ -90,7 +77,7 @@ func WithAccountsAdapter(accounts state.AccountsAdapter) Option { func WithAddressConverter(addrConverter state.AddressConverter) Option { return func(n *Node) error { if addrConverter == nil { - return errNilAddressConverter + return ErrNilAddressConverter } n.addrConverter = addrConverter return nil @@ -101,7 +88,7 @@ func WithAddressConverter(addrConverter state.AddressConverter) Option { func WithBlockChain(blkc *blockchain.BlockChain) Option { return func(n *Node) error { if blkc == nil { - return errNilBlockchain + return ErrNilBlockchain } n.blkc = blkc return nil @@ -112,7 +99,7 @@ func WithBlockChain(blkc *blockchain.BlockChain) Option { func WithPrivateKey(sk crypto.PrivateKey) Option { return func(n *Node) error { if sk == nil { - return errNilPrivateKey + return ErrNilPrivateKey } n.privateKey = sk return nil @@ -123,7 +110,7 @@ func WithPrivateKey(sk crypto.PrivateKey) Option { func WithSingleSignKeyGenerator(keyGen crypto.KeyGenerator) Option { return func(n *Node) error { if keyGen == nil { - return errNilSingleSignKeyGen + return ErrNilSingleSignKeyGen } n.singleSignKeyGen = keyGen return nil @@ -142,7 +129,7 @@ func WithInitialNodesPubKeys(pubKeys []string) Option { func WithPublicKey(pk crypto.PublicKey) Option { return func(n *Node) error { if pk == nil { - return errNilPublicKey + return ErrNilPublicKey } n.publicKey = pk @@ -154,7 +141,7 @@ func WithPublicKey(pk crypto.PublicKey) Option { func WithRoundDuration(roundDuration uint64) Option { return func(n *Node) error { if roundDuration == 0 { - return errZeroRoundDurationNotSupported + return ErrZeroRoundDurationNotSupported } n.roundDuration = roundDuration return nil @@ -165,7 +152,7 @@ func WithRoundDuration(roundDuration uint64) Option { func WithConsensusGroupSize(consensusGroupSize int) Option { return func(n *Node) error { if consensusGroupSize < 1 { - return errNegativeOrZeroConsensusGroupSize + return ErrNegativeOrZeroConsensusGroupSize } n.consensusGroupSize = consensusGroupSize return nil @@ -176,7 +163,7 @@ func WithConsensusGroupSize(consensusGroupSize int) Option { func WithSyncer(syncer ntp.SyncTimer) Option { return func(n *Node) error { if syncer == nil { - return errNilSyncTimer + return ErrNilSyncTimer } n.syncer = syncer return nil @@ -187,7 +174,7 @@ func WithSyncer(syncer ntp.SyncTimer) Option { func WithBlockProcessor(blockProcessor process.BlockProcessor) Option { return func(n *Node) error { if blockProcessor == nil { - return errNilBlockProcessor + return ErrNilBlockProcessor } n.blockProcessor = blockProcessor return nil @@ -214,7 +201,7 @@ func WithElasticSubrounds(elasticSubrounds bool) Option { func WithDataPool(dataPool data.TransientDataHolder) Option { return func(n *Node) error { if dataPool == nil { - return errNilDataPool + return ErrNilDataPool } n.dataPool = dataPool return nil @@ -225,7 +212,7 @@ func WithDataPool(dataPool data.TransientDataHolder) Option { func WithShardCoordinator(shardCoordinator sharding.ShardCoordinator) Option { return func(n *Node) error { if shardCoordinator == nil { - return errNilShardCoordinator + return ErrNilShardCoordinator } n.shardCoordinator = shardCoordinator return nil @@ -236,7 +223,7 @@ func WithShardCoordinator(shardCoordinator sharding.ShardCoordinator) Option { func WithUint64ByteSliceConverter(converter typeConverters.Uint64ByteSliceConverter) Option { return func(n *Node) error { if converter == nil { - return errNilUint64ByteSliceConverter + return ErrNilUint64ByteSliceConverter } n.uint64ByteSliceConverter = converter return nil @@ -244,10 +231,10 @@ func WithUint64ByteSliceConverter(converter typeConverters.Uint64ByteSliceConver } // WithInitialNodesBalances sets up the initial map of nodes public keys and their respective balances -func WithInitialNodesBalances(balances map[string]big.Int) Option { +func WithInitialNodesBalances(balances map[string]*big.Int) Option { return func(n *Node) error { if balances == nil { - return errNilBalances + return ErrNilBalances } n.initialNodesBalances = balances return nil @@ -258,9 +245,31 @@ func WithInitialNodesBalances(balances map[string]big.Int) Option { func WithMultisig(multisig crypto.MultiSigner) Option { return func(n *Node) error { if multisig == nil { - return errNilMultiSig + return ErrNilMultiSig } n.multisig = multisig return nil } } + +// WithForkDetector sets up the multisig option for the Node +func WithForkDetector(forkDetector process.ForkDetector) Option { + return func(n *Node) error { + if forkDetector == nil { + return ErrNilForkDetector + } + n.forkDetector = forkDetector + return nil + } +} + +// WithProcessorCreator sets up the processor factory option for the Node +func WithProcessorCreator(processorCreator process.ProcessorFactory) Option { + return func(n *Node) error { + if processorCreator == nil { + return ErrNilForkDetector + } + n.processorCreator = processorCreator + return nil + } +} diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index e1816e3c47f..b6561942285 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -8,24 +8,9 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/node/mock" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/stretchr/testify/assert" ) -func TestWithPort(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - port := 4455 - - opt := WithPort(port) - err := opt(node) - - assert.Equal(t, port, node.port) - assert.Nil(t, err) -} - func TestWithMarshalizer_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() @@ -35,7 +20,7 @@ func TestWithMarshalizer_NilMarshalizerShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.marshalizer) - assert.Equal(t, errNilMarshalizer, err) + assert.Equal(t, ErrNilMarshalizer, err) } func TestWithMarshalizer_ShouldWork(t *testing.T) { @@ -61,7 +46,7 @@ func TestWithContext_NilContextShouldErr(t *testing.T) { err := opt(node) assert.Equal(t, context.Background(), node.ctx) - assert.Equal(t, errNilContext, err) + assert.Equal(t, ErrNilContext, err) } func TestWithContext_ShouldWork(t *testing.T) { @@ -87,7 +72,7 @@ func TestWithHasher_NilHasherShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.hasher) - assert.Equal(t, errNilHasher, err) + assert.Equal(t, ErrNilHasher, err) } func TestWithHasher_ShouldWork(t *testing.T) { @@ -104,34 +89,6 @@ func TestWithHasher_ShouldWork(t *testing.T) { assert.Nil(t, err) } -func TestWithMaxAllowedPeers(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - maxAllowedPeers := 456 - - opt := WithMaxAllowedPeers(maxAllowedPeers) - err := opt(node) - - assert.Equal(t, maxAllowedPeers, node.maxAllowedPeers) - assert.Nil(t, err) -} - -func TestWithPubSubStrategy(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - pubStrategy := p2p.PubSubStrategy(p2p.GossipSub) - - opt := WithPubSubStrategy(pubStrategy) - err := opt(node) - - assert.Equal(t, pubStrategy, node.pubSubStrategy) - assert.Nil(t, err) -} - func TestWithAccountsAdapter_NilAccountsShouldErr(t *testing.T) { t.Parallel() @@ -141,7 +98,7 @@ func TestWithAccountsAdapter_NilAccountsShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.accounts) - assert.Equal(t, errNilAccountsAdapter, err) + assert.Equal(t, ErrNilAccountsAdapter, err) } func TestWithAccountsAdapter_ShouldWork(t *testing.T) { @@ -167,7 +124,7 @@ func TestWithAddressConverter_NilConverterShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.addrConverter) - assert.Equal(t, errNilAddressConverter, err) + assert.Equal(t, ErrNilAddressConverter, err) } func TestWithAddressConverter_ShouldWork(t *testing.T) { @@ -193,7 +150,7 @@ func TestWithBlockChain_NilBlockchainrShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.blkc) - assert.Equal(t, errNilBlockchain, err) + assert.Equal(t, ErrNilBlockchain, err) } func TestWithBlockChain_ShouldWork(t *testing.T) { @@ -225,7 +182,7 @@ func TestWithPrivateKey_NilPrivateKeyShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.privateKey) - assert.Equal(t, errNilPrivateKey, err) + assert.Equal(t, ErrNilPrivateKey, err) } func TestWithPrivateKey_ShouldWork(t *testing.T) { @@ -251,7 +208,7 @@ func TestWithSingleSignKeyGenerator_NilPrivateKeyShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.singleSignKeyGen) - assert.Equal(t, errNilSingleSignKeyGen, err) + assert.Equal(t, ErrNilSingleSignKeyGen, err) } func TestWithSingleSignKeyGenerator_ShouldWork(t *testing.T) { @@ -305,7 +262,7 @@ func TestWithPublicKey_NilPublicKeyShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.publicKey) - assert.Equal(t, errNilPublicKey, err) + assert.Equal(t, ErrNilPublicKey, err) } func TestWithPublicKey_ShouldWork(t *testing.T) { @@ -331,7 +288,7 @@ func TestWithRoundDuration_ZeroDurationShouldErr(t *testing.T) { err := opt(node) assert.Equal(t, uint64(0), node.roundDuration) - assert.Equal(t, errZeroRoundDurationNotSupported, err) + assert.Equal(t, ErrZeroRoundDurationNotSupported, err) } func TestWithRoundDuration_ShouldWork(t *testing.T) { @@ -357,7 +314,7 @@ func TestWithConsensusGroupSize_NegativeGroupSizeShouldErr(t *testing.T) { err := opt(node) assert.Equal(t, 0, node.consensusGroupSize) - assert.Equal(t, errNegativeOrZeroConsensusGroupSize, err) + assert.Equal(t, ErrNegativeOrZeroConsensusGroupSize, err) } func TestWithConsensusGroupSize_ShouldWork(t *testing.T) { @@ -383,7 +340,7 @@ func TestWithSyncer_NilSyncerShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.syncer) - assert.Equal(t, errNilSyncTimer, err) + assert.Equal(t, ErrNilSyncTimer, err) } func TestWithSyncer_ShouldWork(t *testing.T) { @@ -409,7 +366,7 @@ func TestWithBlockProcessor_NilProcessorShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.syncer) - assert.Equal(t, errNilBlockProcessor, err) + assert.Equal(t, ErrNilBlockProcessor, err) } func TestWithBlockProcessor_ShouldWork(t *testing.T) { @@ -473,7 +430,7 @@ func TestWithDataPool_NilDataPoolShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.dataPool) - assert.Equal(t, errNilDataPool, err) + assert.Equal(t, ErrNilDataPool, err) } func TestWithDataPool_ShouldWork(t *testing.T) { @@ -499,7 +456,7 @@ func TestWithShardCoordinator_NilShardCoordinatorShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.shardCoordinator) - assert.Equal(t, errNilShardCoordinator, err) + assert.Equal(t, ErrNilShardCoordinator, err) } func TestWithShardCoordinator_ShouldWork(t *testing.T) { @@ -525,7 +482,7 @@ func TestWithUint64ByteSliceConverter_NilConverterShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.uint64ByteSliceConverter) - assert.Equal(t, errNilUint64ByteSliceConverter, err) + assert.Equal(t, ErrNilUint64ByteSliceConverter, err) } func TestWithUint64ByteSliceConverter_ShouldWork(t *testing.T) { @@ -551,7 +508,7 @@ func TestWithInitialNodesBalances_NilBalancesShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.initialNodesBalances) - assert.Equal(t, errNilBalances, err) + assert.Equal(t, ErrNilBalances, err) } func TestWithInitialNodesBalances_ShouldWork(t *testing.T) { @@ -559,9 +516,9 @@ func TestWithInitialNodesBalances_ShouldWork(t *testing.T) { node, _ := NewNode() - balances := map[string]big.Int{ - "pk1": *big.NewInt(45), - "pk2": *big.NewInt(56), + balances := map[string]*big.Int{ + "pk1": big.NewInt(45), + "pk2": big.NewInt(56), } opt := WithInitialNodesBalances(balances) @@ -580,7 +537,7 @@ func TestWithMultisig_NilMultisigShouldErr(t *testing.T) { err := opt(node) assert.Nil(t, node.multisig) - assert.Equal(t, errNilMultiSig, err) + assert.Equal(t, ErrNilMultiSig, err) } func TestWithMultisig_ShouldWork(t *testing.T) { @@ -596,3 +553,28 @@ func TestWithMultisig_ShouldWork(t *testing.T) { assert.True(t, node.multisig == multisigner) assert.Nil(t, err) } + +func TestWithForkDetector_shouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + forkDetector := &mock.ForkDetectorMock{} + opt := WithForkDetector(forkDetector) + err := opt(node) + + assert.True(t, node.forkDetector == forkDetector) + assert.Nil(t, err) +} + +func TestWithForkDetector_NilForkDetectorShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithForkDetector(nil) + err := opt(node) + + assert.Nil(t, node.forkDetector) + assert.Equal(t, ErrNilForkDetector, err) +} diff --git a/node/errors.go b/node/errors.go index c9509f20285..f8aa34a7385 100644 --- a/node/errors.go +++ b/node/errors.go @@ -4,42 +4,62 @@ import ( "errors" ) -var errNodeNotStarted = errors.New("node is not started yet") +// ErrNilMarshalizer signals that a nil marshalizer has been provided +var ErrNilMarshalizer = errors.New("trying to set nil marshalizer") -var errNilMarshalizer = errors.New("trying to set nil marshalizer") +// ErrNilMessenger signals that a nil messenger has been provided +var ErrNilMessenger = errors.New("nil messenger") -var errNilContext = errors.New("trying to set nil context") +// ErrNilContext signals that a nil context has been provided +var ErrNilContext = errors.New("trying to set nil context") -var errNilHasher = errors.New("trying to set nil hasher") +// ErrNilHasher signals that a nil hasher has been provided +var ErrNilHasher = errors.New("trying to set nil hasher") -var errNilAccountsAdapter = errors.New("trying to set nil accounts adapter") +// ErrNilAccountsAdapter signals that a nil accounts adapter has been provided +var ErrNilAccountsAdapter = errors.New("trying to set nil accounts adapter") -var errNilAddressConverter = errors.New("trying to set nil address converter") +// ErrNilAddressConverter signals that a nil address converter has been provided +var ErrNilAddressConverter = errors.New("trying to set nil address converter") -var errNilBlockchain = errors.New("nil blockchain") +// ErrNilBlockchain signals that a nil blockchain structure has been provided +var ErrNilBlockchain = errors.New("nil blockchain") -var errNilPrivateKey = errors.New("trying to set nil private key") +// ErrNilPrivateKey signals that a nil private key has been provided +var ErrNilPrivateKey = errors.New("trying to set nil private key") -var errNilSingleSignKeyGen = errors.New("trying to set nil single sign key generator") +// ErrNilSingleSignKeyGen signals that a nil single key generator has been provided +var ErrNilSingleSignKeyGen = errors.New("trying to set nil single sign key generator") -var errNilPublicKey = errors.New("trying to set nil public key") +// ErrNilPublicKey signals that a nil public key has been provided +var ErrNilPublicKey = errors.New("trying to set nil public key") -var errZeroRoundDurationNotSupported = errors.New("0 round duration time is not supported") +// ErrZeroRoundDurationNotSupported signals that 0 seconds round duration is not supported +var ErrZeroRoundDurationNotSupported = errors.New("0 round duration time is not supported") -var errNegativeOrZeroConsensusGroupSize = errors.New("group size should be a strict positive number") +// ErrNegativeOrZeroConsensusGroupSize signals that 0 elements consensus group is not supported +var ErrNegativeOrZeroConsensusGroupSize = errors.New("group size should be a strict positive number") -var errNilSyncTimer = errors.New("trying to set nil sync timer") +// ErrNilSyncTimer signals that a nil sync timer has been provided +var ErrNilSyncTimer = errors.New("trying to set nil sync timer") -var errNilBlockProcessor = errors.New("trying to set nil block processor") +// ErrNilBlockProcessor signals that a nil block processor has been provided +var ErrNilBlockProcessor = errors.New("trying to set nil block processor") -var errNilDataPool = errors.New("trying to set nil data pool") +// ErrNilDataPool signals that a nil data pool has been provided +var ErrNilDataPool = errors.New("trying to set nil data pool") -var errNilShardCoordinator = errors.New("trying to set nil shard coordinator") +// ErrNilShardCoordinator signals that a nil shard coordinator has been provided +var ErrNilShardCoordinator = errors.New("trying to set nil shard coordinator") -var errNilUint64ByteSliceConverter = errors.New("trying to set nil uint64 - byte slice converter") +// ErrNilUint64ByteSliceConverter signals that a nil uint64 <-> byte slice converter has been provided +var ErrNilUint64ByteSliceConverter = errors.New("trying to set nil uint64 - byte slice converter") -var errNilBalances = errors.New("trying to set nil balances") +// ErrNilBalances signals that a nil list of initial balances has been provided +var ErrNilBalances = errors.New("trying to set nil balances") -var errNilMultiSig = errors.New("trying to set nil multisig") +// ErrNilMultiSig signals that a nil multisig object has been provided +var ErrNilMultiSig = errors.New("trying to set nil multisig") -var errNilSposWorker = errors.New("nil spos worker") +// ErrNilForkDetector signals that a nil forkdetector object has been provided +var ErrNilForkDetector = errors.New("nil fork detector") diff --git a/node/export_test.go b/node/export_test.go index 3e1f2ff17e4..7e202bfff40 100644 --- a/node/export_test.go +++ b/node/export_test.go @@ -1,40 +1,9 @@ package node import ( - "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" - "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" ) func (n *Node) SetMessenger(mes p2p.Messenger) { n.messenger = mes } - -func (n *Node) Interceptors() []process.Interceptor { - return n.interceptors -} - -func (n *Node) Resolvers() []process.Resolver { - return n.resolvers -} - -func (n *Node) ComputeNewNoncePrevHash( - sposWrk *spos.SPOSConsensusWorker, - hdr *block.Header, - txBlock *block.TxBlockBody, - prevHash []byte) (uint64, []byte, []byte, error) { - - return n.computeNewNoncePrevHash(sposWrk, hdr, txBlock, prevHash) -} - -func (n *Node) DisplayLogInfo( - header *block.Header, - txBlock *block.TxBlockBody, - headerHash []byte, - prevHash []byte, - sposWrk *spos.SPOSConsensusWorker, - blockHash []byte, -) { - n.displayLogInfo(header, txBlock, headerHash, prevHash, sposWrk, blockHash) -} diff --git a/node/mock/addressConverterFake.go b/node/mock/addressConverterFake.go new file mode 100644 index 00000000000..1364f1b7a50 --- /dev/null +++ b/node/mock/addressConverterFake.go @@ -0,0 +1,67 @@ +package mock + +import ( + "encoding/hex" + "strings" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/pkg/errors" +) + +type AddressConverterFake struct { + addressLen int + prefix string +} + +func NewAddressConverterFake(addressLen int, prefix string) *AddressConverterFake { + return &AddressConverterFake{ + addressLen: addressLen, + prefix: prefix, + } +} + +func (acf *AddressConverterFake) CreateAddressFromPublicKeyBytes(pubKey []byte) (state.AddressContainer, error) { + newPubKey := make([]byte, len(pubKey)) + copy(newPubKey, pubKey) + + //check size, trimming as necessary + if len(newPubKey) > acf.addressLen { + newPubKey = newPubKey[len(newPubKey)-acf.addressLen:] + } + + return state.NewAddress(newPubKey), nil +} + +func (acf *AddressConverterFake) ConvertToHex(addressContainer state.AddressContainer) (string, error) { + return acf.prefix + hex.EncodeToString(addressContainer.Bytes()), nil +} + +func (acf *AddressConverterFake) CreateAddressFromHex(hexAddress string) (state.AddressContainer, error) { + + //to lower + hexAddress = strings.ToLower(hexAddress) + + //check if it has prefix, trimming as necessary + if strings.HasPrefix(hexAddress, strings.ToLower(acf.prefix)) { + hexAddress = hexAddress[len(acf.prefix):] + } + + //check lengths + if len(hexAddress) != acf.addressLen*2 { + return nil, errors.New("wrong size") + } + + //decode hex + buff := make([]byte, acf.addressLen) + _, err := hex.Decode(buff, []byte(hexAddress)) + + if err != nil { + return nil, err + } + + return state.NewAddress(buff), nil +} + +func (acf *AddressConverterFake) PrepareAddressBytes(addressBytes []byte) ([]byte, error) { + return addressBytes, nil +} diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 8a37b18a1f3..5ee2d16908d 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -2,6 +2,7 @@ package mock import ( "math/big" + "time" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" @@ -10,7 +11,11 @@ import ( type BlockProcessorStub struct { } -func (bps *BlockProcessorStub) ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { +func (bps *BlockProcessorStub) SetOnRequestTransaction(f func(destShardID uint32, txHash []byte)) { + panic("implement me") +} + +func (bps *BlockProcessorStub) ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { panic("implement me") } @@ -22,11 +27,11 @@ func (bps *BlockProcessorStub) RevertAccountState() { panic("implement me") } -func (bps *BlockProcessorStub) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { +func (bps *BlockProcessorStub) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { panic("implement me") } -func (bps *BlockProcessorStub) CreateGenesisBlockBody(balances map[string]big.Int, shardId uint32) *block.StateBlockBody { +func (bps *BlockProcessorStub) CreateGenesisBlockBody(balances map[string]*big.Int, shardId uint32) (*block.StateBlockBody, error) { panic("implement me") } @@ -34,6 +39,10 @@ func (bps *BlockProcessorStub) CreateTxBlockBody(shardId uint32, maxTxInBlock in panic("implement me") } +func (bps *BlockProcessorStub) CreateEmptyBlockBody(shardId uint32, round int32) *block.TxBlockBody { + panic("implement me") +} + func (bps *BlockProcessorStub) RemoveBlockTxsFromPool(body *block.TxBlockBody) error { panic("implement me") } diff --git a/node/mock/forkDetectorMock.go b/node/mock/forkDetectorMock.go new file mode 100644 index 00000000000..0ef34f5d5d5 --- /dev/null +++ b/node/mock/forkDetectorMock.go @@ -0,0 +1,32 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" +) + +// ForkDetectorMock is a mock implementation for the ForkDetector interface +type ForkDetectorMock struct { + RemoveHeadersCalled func(nonce uint64) + AddHeaderCalled func(header *block.Header, hash []byte, isReceived bool) error + RemoveHeaderCalled func(nonce uint64) + CheckForkCalled func() bool +} + +func (f *ForkDetectorMock) RemoveHeaders(nonce uint64) { + f.RemoveHeadersCalled(nonce) +} + +// AddHeader is a mock implementation for AddHeader +func (f *ForkDetectorMock) AddHeader(header *block.Header, hash []byte, isReceived bool) error { + return f.AddHeaderCalled(header, hash, isReceived) +} + +// RemoveHeader is a mock implementation for RemoveHeader +func (f *ForkDetectorMock) RemoveHeader(nonce uint64) { + f.RemoveHeaderCalled(nonce) +} + +// CheckFork is a mock implementation for CheckFork +func (f *ForkDetectorMock) CheckFork() bool { + return f.CheckForkCalled() +} diff --git a/node/mock/marshalizerFake.go b/node/mock/marshalizerFake.go new file mode 100644 index 00000000000..85d630bd5e3 --- /dev/null +++ b/node/mock/marshalizerFake.go @@ -0,0 +1,47 @@ +package mock + +import ( + "encoding/json" + "errors" +) + +var errMockMarshalizer = errors.New("MarshalizerMock generic error") + +// MarshalizerFake that will be used for testing +type MarshalizerFake struct { + Fail bool +} + +// Marshal converts the input object in a slice of bytes +func (mm *MarshalizerFake) Marshal(obj interface{}) ([]byte, error) { + if mm.Fail { + return nil, errMockMarshalizer + } + + if obj == nil { + return nil, errors.New("nil object to serilize from") + } + + return json.Marshal(obj) +} + +// Unmarshal applies the serialized values over an instantiated object +func (mm *MarshalizerFake) Unmarshal(obj interface{}, buff []byte) error { + if mm.Fail { + return errMockMarshalizer + } + + if obj == nil { + return errors.New("nil object to serilize to") + } + + if buff == nil { + return errors.New("nil byte buffer to deserialize from") + } + + if len(buff) == 0 { + return errors.New("empty byte buffer to deserialize from") + } + + return json.Unmarshal(buff, obj) +} diff --git a/node/mock/messengerStub.go b/node/mock/messengerStub.go index 873a6929efc..34a3b6f60e3 100644 --- a/node/mock/messengerStub.go +++ b/node/mock/messengerStub.go @@ -13,10 +13,12 @@ import ( ) type MessengerStub struct { - marshalizer marshal.Marshalizer - HasherObj hashing.Hasher - AddTopicCalled func(t *p2p.Topic) error - GetTopicCalled func(name string) *p2p.Topic + marshalizer marshal.Marshalizer + HasherObj hashing.Hasher + CloseCalled func() error + AddTopicCalled func(t *p2p.Topic) error + GetTopicCalled func(name string) *p2p.Topic + BootstrapCalled func(ctx context.Context) } func NewMessengerStub() *MessengerStub { @@ -27,7 +29,7 @@ func NewMessengerStub() *MessengerStub { } func (ms *MessengerStub) Close() error { - panic("implement me") + return ms.CloseCalled() } func (ms *MessengerStub) ID() peer.ID { @@ -63,7 +65,7 @@ func (ms *MessengerStub) ConnectToAddresses(ctx context.Context, addresses []str } func (ms *MessengerStub) Bootstrap(ctx context.Context) { - panic("implement me") + ms.BootstrapCalled(ctx) } func (ms *MessengerStub) PrintConnected() { diff --git a/node/mock/multisignMock.go b/node/mock/multisignMock.go index 910697c164d..334821307a4 100644 --- a/node/mock/multisignMock.go +++ b/node/mock/multisignMock.go @@ -51,15 +51,19 @@ func (mm *MultisignMock) SetAggCommitment(aggCommitment []byte) error { panic("implement me") } -func (mm *MultisignMock) SignPartial(bitmap []byte) ([]byte, error) { +func (mm *MultisignMock) CreateSignatureShare(bitmap []byte) ([]byte, error) { panic("implement me") } -func (mm *MultisignMock) AddSignPartial(index uint16, sig []byte) error { +func (mm *MultisignMock) AddSignatureShare(index uint16, sig []byte) error { panic("implement me") } -func (mm *MultisignMock) VerifyPartial(index uint16, sig []byte, bitmap []byte) error { +func (mm *MultisignMock) VerifySignatureShare(index uint16, sig []byte, bitmap []byte) error { + panic("implement me") +} + +func (mm *MultisignMock) SignatureShare(index uint16) ([]byte, error) { panic("implement me") } diff --git a/node/mock/processorCreatorMock.go b/node/mock/processorCreatorMock.go new file mode 100644 index 00000000000..2e10de6f924 --- /dev/null +++ b/node/mock/processorCreatorMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// ProcessorCreatorMock is a mock implementation of ProcessorFactory +type ProcessorCreatorMock struct { + CreateInterceptorsCalled func() error + CreateResolversCalled func() error + InterceptorContainerCalled func() process.InterceptorContainer + ResolverContainerCalled func() process.ResolverContainer +} + +// CreateInterceptors is a mock function for creating interceptors +func (p *ProcessorCreatorMock) CreateInterceptors() error { + return p.CreateInterceptorsCalled() +} + +// CreateResolvers is a mock function for creating resolvers +func (p *ProcessorCreatorMock) CreateResolvers() error { + return p.CreateResolversCalled() +} + +// InterceptorContainer is a mock getter for the interceptor container +func (p *ProcessorCreatorMock) InterceptorContainer() process.InterceptorContainer { + return p.InterceptorContainerCalled() +} + +// ResolverContainer is a mock getter for the resolver container +func (p *ProcessorCreatorMock) ResolverContainer() process.ResolverContainer { + return p.ResolverContainerCalled() +} diff --git a/node/mock/shardedDataStub.go b/node/mock/shardedDataStub.go index ffa31631323..21460f57be9 100644 --- a/node/mock/shardedDataStub.go +++ b/node/mock/shardedDataStub.go @@ -8,7 +8,7 @@ type ShardedDataStub struct { RegisterHandlerCalled func(func(key []byte)) ShardDataStoreCalled func(shardID uint32) (c storage.Cacher) AddDataCalled func(key []byte, data interface{}, destShardID uint32) - SearchDataCalled func(key []byte) (shardValuesPairs map[uint32]interface{}) + SearchFirstDataCalled func(key []byte) (value interface{}, ok bool) RemoveDataCalled func(key []byte, destShardID uint32) RemoveDataFromAllShardsCalled func(key []byte) MergeShardStoresCalled func(sourceShardID, destShardID uint32) @@ -16,6 +16,7 @@ type ShardedDataStub struct { ClearCalled func() ClearShardStoreCalled func(shardID uint32) RemoveSetOfDataFromPoolCalled func(keys [][]byte, destShardID uint32) + CreateShardStoreCalled func(destShardID uint32) } func (sd *ShardedDataStub) RegisterHandler(handler func(key []byte)) { @@ -30,8 +31,8 @@ func (sd *ShardedDataStub) AddData(key []byte, data interface{}, destShardID uin sd.AddDataCalled(key, data, destShardID) } -func (sd *ShardedDataStub) SearchData(key []byte) (shardValuesPairs map[uint32]interface{}) { - return sd.SearchDataCalled(key) +func (sd *ShardedDataStub) SearchFirstData(key []byte) (value interface{}, ok bool) { + return sd.SearchFirstDataCalled(key) } func (sd *ShardedDataStub) RemoveData(key []byte, destShardID uint32) { @@ -46,6 +47,10 @@ func (sd *ShardedDataStub) MergeShardStores(sourceShardID, destShardID uint32) { sd.MergeShardStoresCalled(sourceShardID, destShardID) } +func (sd *ShardedDataStub) CreateShardStore(destShardID uint32) { + sd.CreateShardStoreCalled(destShardID) +} + func (sd *ShardedDataStub) MoveData(sourceShardID, destShardID uint32, key [][]byte) { sd.MoveDataCalled(sourceShardID, destShardID, key) } diff --git a/node/node.go b/node/node.go index c58f5d06563..693ed9be905 100644 --- a/node/node.go +++ b/node/node.go @@ -3,9 +3,9 @@ package node import ( "context" "encoding/base64" - "encoding/hex" "fmt" "math/big" + gosync "sync" "time" "github.com/ElrondNetwork/elrond-go-sandbox/chronology" @@ -18,32 +18,25 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters" - "github.com/ElrondNetwork/elrond-go-sandbox/display" "github.com/ElrondNetwork/elrond-go-sandbox/hashing" "github.com/ElrondNetwork/elrond-go-sandbox/logger" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" + block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" "github.com/pkg/errors" ) -type topicName string +// WaitTime defines the time in milliseconds until node waits the requested info from the network +const WaitTime = time.Duration(2000 * time.Millisecond) -const ( - // TransactionTopic is the topic used for sharing transactions - TransactionTopic topicName = "tx" - // ConsensusTopic is the topic used in consensus algorithm - ConsensusTopic topicName = "cns" - // HeadersTopic is the topic used for sharing block headers - HeadersTopic topicName = "hdr" - // TxBlockBodyTopic is the topic used for sharing transactions block bodies - TxBlockBodyTopic topicName = "txBlk" - // PeerChBodyTopic is used for sharing peer change block bodies - PeerChBodyTopic topicName = "peerCh" - // StateBodyTopic is used for sharing state block bodies - StateBodyTopic topicName = "state" -) +// ConsensusTopic is the topic used in consensus algorithm +const ConsensusTopic topicName = "consensus" + +type topicName string var log = logger.NewDefaultLogger() @@ -54,14 +47,11 @@ type Option func(*Node) error // Node is a structure that passes the configuration parameters and initializes // required services as requested type Node struct { - port int marshalizer marshal.Marshalizer ctx context.Context hasher hashing.Hasher - maxAllowedPeers int - pubSubStrategy p2p.PubSubStrategy initialNodesPubkeys []string - initialNodesBalances map[string]big.Int + initialNodesBalances map[string]*big.Int roundDuration uint64 consensusGroupSize int messenger p2p.Messenger @@ -72,32 +62,19 @@ type Node struct { accounts state.AccountsAdapter addrConverter state.AddressConverter uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + processorCreator process.ProcessorFactory privateKey crypto.PrivateKey publicKey crypto.PublicKey singleSignKeyGen crypto.KeyGenerator multisig crypto.MultiSigner + forkDetector process.ForkDetector blkc *blockchain.BlockChain dataPool data.TransientDataHolder shardCoordinator sharding.ShardCoordinator - interceptors []process.Interceptor - resolvers []process.Resolver -} - -// NewNode creates a new Node instance -func NewNode(opts ...Option) (*Node, error) { - node := &Node{ - ctx: context.Background(), - } - for _, opt := range opts { - err := opt(node) - if err != nil { - return nil, errors.New("error applying option: " + err.Error()) - } - } - return node, nil + isRunning bool } // ApplyOptions can set up different configurable options of a Node instance @@ -114,28 +91,32 @@ func (n *Node) ApplyOptions(opts ...Option) error { return nil } -// IsRunning will return the current state of the node -func (n *Node) IsRunning() bool { - return n.messenger != nil +// NewNode creates a new Node instance +func NewNode(opts ...Option) (*Node, error) { + node := &Node{ + ctx: context.Background(), + } + for _, opt := range opts { + err := opt(node) + if err != nil { + return nil, errors.New("error applying option: " + err.Error()) + } + } + return node, nil } -// Address returns the first address of the running node -func (n *Node) Address() (string, error) { - if !n.IsRunning() { - return "", errors.New("node is not started yet") - } - return n.messenger.Addresses()[0], nil +// IsRunning will return the current state of the node +func (n *Node) IsRunning() bool { + return n.isRunning } // Start will create a new messenger and and set up the Node state as running func (n *Node) Start() error { - messenger, err := n.createNetMessenger() - if err != nil { - return err + err := n.P2PBootstrap() + if err == nil { + n.isRunning = true } - n.messenger = messenger - n.P2PBootstrap() - return nil + return err } // Stop closes the messenger and undos everything done in Start @@ -153,33 +134,40 @@ func (n *Node) Stop() error { } // P2PBootstrap will try to connect to many peers as possible -func (n *Node) P2PBootstrap() { +func (n *Node) P2PBootstrap() error { + if n.messenger == nil { + return ErrNilMessenger + } n.messenger.Bootstrap(n.ctx) + return nil } -// ConnectToAddresses will take a slice of addresses and try to connect to all of them. -func (n *Node) ConnectToAddresses(addresses []string) error { - if !n.IsRunning() { - return errNodeNotStarted +// CreateShardedStores instantiate sharded cachers for Transactions and Headers +func (n *Node) CreateShardedStores() error { + if n.shardCoordinator == nil { + return ErrNilShardCoordinator } - n.messenger.ConnectToAddresses(n.ctx, addresses) - return nil -} -// BindInterceptorsResolvers will start the interceptors and resolvers -func (n *Node) BindInterceptorsResolvers() error { - if !n.IsRunning() { - return errNodeNotStarted + if n.dataPool == nil { + return ErrNilDataPool } - err := n.createInterceptors() - if err != nil { - return err + transactionsDataStore := n.dataPool.Transactions() + headersDataStore := n.dataPool.Headers() + + if transactionsDataStore == nil { + return errors.New("nil transaction sharded data store") } - err = n.createResolvers() - if err != nil { - return err + if headersDataStore == nil { + return errors.New("nil header sharded data store") + } + + shards := n.shardCoordinator.NoShards() + + for i := uint32(0); i < shards; i++ { + transactionsDataStore.CreateShardStore(i) + headersDataStore.CreateShardStore(i) } return nil @@ -188,18 +176,27 @@ func (n *Node) BindInterceptorsResolvers() error { // StartConsensus will start the consesus service for the current node func (n *Node) StartConsensus() error { - genessisBlock, err := n.createGenesisBlock() + genesisHeader, genesisHeaderHash, err := n.createGenesisBlock() if err != nil { return err } - n.blkc.GenesisBlock = genessisBlock + n.blkc.GenesisBlock = genesisHeader + n.blkc.GenesisHeaderHash = genesisHeaderHash + round := n.createRound() chr := n.createChronology(round) + + boot, err := n.createBootstrap(round) + + if err != nil { + return err + } + rndc := n.createRoundConsensus() rth := n.createRoundThreshold() rnds := n.createRoundStatus() cns := n.createConsensus(rndc, rth, rnds, chr) - sposWrk, err := n.createConsensusWorker(cns) + sposWrk, err := n.createConsensusWorker(cns, boot) if err != nil { return err @@ -216,7 +213,130 @@ func (n *Node) StartConsensus() error { n.addSubroundsToChronology(sposWrk) go sposWrk.Cns.Chr.StartRounds() - go n.blockchainLog(sposWrk) + + return nil +} + +// GetBalance gets the balance for a specific address +func (n *Node) GetBalance(addressHex string) (*big.Int, error) { + if n.addrConverter == nil || n.accounts == nil { + return nil, errors.New("initialize AccountsAdapter and AddressConverter first") + } + + address, err := n.addrConverter.CreateAddressFromHex(addressHex) + if err != nil { + return nil, errors.New("invalid address, could not decode from hex: " + err.Error()) + } + account, err := n.accounts.GetExistingAccount(address) + if err != nil { + return nil, errors.New("could not fetch sender address from provided param: " + err.Error()) + } + + if account == nil { + return big.NewInt(0), nil + } + + return account.BaseAccount().Balance, nil +} + +// GenerateAndSendBulkTransactions is a method for generating and propagating a set +// of transactions to be processed. It is mainly used for demo purposes +func (n *Node) GenerateAndSendBulkTransactions(receiverHex string, value *big.Int, noOfTx uint64) error { + if noOfTx == 0 { + return errors.New("can not generate and broadcast 0 transactions") + } + + if n.publicKey == nil { + return ErrNilPublicKey + } + senderAddressBytes, err := n.publicKey.ToByteArray() + if err != nil { + return err + } + + if n.addrConverter == nil { + return ErrNilAddressConverter + } + senderAddress, err := n.addrConverter.CreateAddressFromPublicKeyBytes(senderAddressBytes) + if err != nil { + return err + } + + receiverAddress, err := n.addrConverter.CreateAddressFromHex(receiverHex) + if err != nil { + return errors.New("could not create receiver address from provided param: " + err.Error()) + } + + if n.accounts == nil { + return ErrNilAccountsAdapter + } + senderAccount, err := n.accounts.GetExistingAccount(senderAddress) + if err != nil { + return errors.New("could not fetch sender account from provided param: " + err.Error()) + } + newNonce := uint64(0) + if senderAccount != nil { + newNonce = senderAccount.BaseAccount().Nonce + } + + wg := gosync.WaitGroup{} + wg.Add(int(noOfTx)) + + mutTransactions := gosync.RWMutex{} + transactions := make([][]byte, 0) + + mutErrFound := gosync.Mutex{} + var errFound error + + for nonce := newNonce; nonce < newNonce+noOfTx; nonce++ { + go func(crtNonce uint64) { + _, signedTxBuff, err := n.generateAndSignTx( + crtNonce, + value, + receiverAddress.Bytes(), + senderAddressBytes, + nil, + ) + + if err != nil { + mutErrFound.Lock() + errFound = errors.New(fmt.Sprintf("failure generating transaction %d: %s", crtNonce, err.Error())) + mutErrFound.Unlock() + + wg.Done() + return + } + + mutTransactions.Lock() + transactions = append(transactions, signedTxBuff) + mutTransactions.Unlock() + wg.Done() + }(nonce) + } + + wg.Wait() + + if errFound != nil { + return errFound + } + + topic := n.messenger.GetTopic(string(factory.TransactionTopic)) + if topic == nil { + return errors.New("could not get transaction topic") + } + + if len(transactions) != int(noOfTx) { + return errors.New(fmt.Sprintf("generated only %d from required %d transactions", len(transactions), noOfTx)) + } + + for i := 0; i < len(transactions); i++ { + err = topic.BroadcastBuff(transactions[i]) + time.Sleep(time.Microsecond * 100) + + if err != nil { + return errors.New("could not broadcast transaction: " + err.Error()) + } + } return nil } @@ -242,6 +362,57 @@ func (n *Node) createChronology(round *chronology.Round) *chronology.Chronology return chr } +func (n *Node) createBootstrap(round *chronology.Round) (*sync.Bootstrap, error) { + bootstrap, err := sync.NewBootstrap(n.dataPool, n.blkc, round, n.blockProcessor, WaitTime, n.marshalizer, n.forkDetector) + + if err != nil { + return nil, err + } + + resH, err := n.processorCreator.ResolverContainer().Get(string(factory.HeadersTopic)) + if err != nil { + return nil, errors.New("cannot find headers topic resolver") + } + hdrRes := resH.(*block2.HeaderResolver) + + resT, err := n.processorCreator.ResolverContainer().Get(string(factory.TxBlockBodyTopic)) + if err != nil { + return nil, errors.New("cannot find tx block body topic resolver") + + } + gbbrRes := resT.(*block2.GenericBlockBodyResolver) + + bootstrap.RequestHeaderHandler = createRequestHeaderHandler(hdrRes) + bootstrap.RequestTxBodyHandler = cerateRequestTxBodyHandler(gbbrRes) + + bootstrap.StartSync() + + return bootstrap, nil +} + +func createRequestHeaderHandler(hdrRes *block2.HeaderResolver) func(nonce uint64) { + return func(nonce uint64) { + err := hdrRes.RequestHeaderFromNonce(nonce) + + log.Info(fmt.Sprintf("requested header with nonce %d from network\n", nonce)) + if err != nil { + log.Error("RequestHeaderFromNonce error: ", err.Error()) + } + } +} + +func cerateRequestTxBodyHandler(gbbrRes *block2.GenericBlockBodyResolver) func(hash []byte) { + return func(hash []byte) { + err := gbbrRes.RequestBlockBodyFromHash(hash) + + log.Info(fmt.Sprintf("requested tx body with hash %s from network\n", toB64(hash))) + if err != nil { + log.Error("RequestBlockBodyFromHash error: ", err.Error()) + return + } + } +} + // createRoundConsensus method creates a RoundConsensus object func (n *Node) createRoundConsensus() *spos.RoundConsensus { @@ -300,13 +471,14 @@ func (n *Node) createConsensus(rndc *spos.RoundConsensus, rth *spos.RoundThresho } // createConsensusWorker method creates a ConsensusWorker object -func (n *Node) createConsensusWorker(cns *spos.Consensus) (*spos.SPOSConsensusWorker, error) { +func (n *Node) createConsensusWorker(cns *spos.Consensus, boot *sync.Bootstrap) (*spos.SPOSConsensusWorker, error) { sposWrk, err := spos.NewConsensusWorker( cns, n.blkc, n.hasher, n.marshalizer, n.blockProcessor, + boot, n.multisig, n.singleSignKeyGen, n.privateKey, @@ -318,6 +490,8 @@ func (n *Node) createConsensusWorker(cns *spos.Consensus) (*spos.SPOSConsensusWo } sposWrk.SendMessage = n.sendMessage + sposWrk.BroadcastBlockBody = n.broadcastBlockBody + sposWrk.BroadcastHeader = n.broadcastHeader return sposWrk, nil } @@ -338,7 +512,7 @@ func (n *Node) addSubroundsToChronology(sposWrk *spos.SPOSConsensusWorker) { chronology.SubroundId(spos.SrBlock), int64(roundDuration*5/100), sposWrk.Cns.GetSubroundName(spos.SrStartRound), sposWrk.DoStartRoundJob, - nil, + sposWrk.ExtendStartRound, sposWrk.Cns.CheckStartRoundConsensus)) sposWrk.Cns.Chr.AddSubround(spos.NewSubround( @@ -400,34 +574,56 @@ func (n *Node) addSubroundsToChronology(sposWrk *spos.SPOSConsensusWorker) { -1, int64(roundDuration*100/100), sposWrk.Cns.GetSubroundName(spos.SrAdvance), + sposWrk.DoAdvanceJob, nil, - nil, - nil)) + sposWrk.Cns.CheckAdvanceConsensus)) } -// GetBalance gets the balance for a specific address -func (n *Node) GetBalance(address string) (*big.Int, error) { - if n.addrConverter == nil || n.accounts == nil { - return nil, errors.New("initialize AccountsAdapter and AddressConverter first") +func (n *Node) generateAndSignTx( + nonce uint64, + value *big.Int, + rcvAddrBytes []byte, + sndAddrBytes []byte, + dataBytes []byte, +) (*transaction.Transaction, []byte, error) { + + tx := transaction.Transaction{ + Nonce: nonce, + Value: value, + RcvAddr: rcvAddrBytes, + SndAddr: sndAddrBytes, + Data: dataBytes, + } + + if n.marshalizer == nil { + return nil, nil, ErrNilMarshalizer } - accAddress, err := n.addrConverter.CreateAddressFromHex(address) + + if n.privateKey == nil { + return nil, nil, ErrNilPrivateKey + } + + marshalizedTx, err := n.marshalizer.Marshal(&tx) if err != nil { - return nil, errors.New("invalid address: " + err.Error()) + return nil, nil, errors.New("could not marshal transaction") } - account, err := n.accounts.GetExistingAccount(accAddress) + + sig, err := n.privateKey.Sign(marshalizedTx) if err != nil { - return nil, errors.New("could not fetch sender address from provided param") + return nil, nil, errors.New("could not sign the transaction") } + tx.Signature = sig - if account == nil { - return big.NewInt(0), nil + signedMarshalizedTx, err := n.marshalizer.Marshal(&tx) + if err != nil { + return nil, nil, errors.New("could not marshal signed transaction") } - return &account.BaseAccount().Balance, nil + return &tx, signedMarshalizedTx, nil } //GenerateTransaction generates a new transaction with sender, receiver, amount and code -func (n *Node) GenerateTransaction(sender string, receiver string, amount big.Int, code string) (*transaction.Transaction, error) { +func (n *Node) GenerateTransaction(senderHex string, receiverHex string, value *big.Int, transactionData string) (*transaction.Transaction, error) { if n.addrConverter == nil || n.accounts == nil { return nil, errors.New("initialize AccountsAdapter and AddressConverter first") } @@ -436,7 +632,11 @@ func (n *Node) GenerateTransaction(sender string, receiver string, amount big.In return nil, errors.New("initialize PrivateKey first") } - senderAddress, err := n.addrConverter.CreateAddressFromHex(sender) + receiverAddress, err := n.addrConverter.CreateAddressFromHex(receiverHex) + if err != nil { + return nil, errors.New("could not create receiver address from provided param") + } + senderAddress, err := n.addrConverter.CreateAddressFromHex(senderHex) if err != nil { return nil, errors.New("could not create sender address from provided param") } @@ -449,52 +649,55 @@ func (n *Node) GenerateTransaction(sender string, receiver string, amount big.In newNonce = senderAccount.BaseAccount().Nonce } - tx := transaction.Transaction{ - Nonce: newNonce, - Value: amount, - RcvAddr: []byte(receiver), - SndAddr: []byte(sender), - } - - txToByteArray, err := n.marshalizer.Marshal(tx) - if err != nil { - return nil, errors.New("could not create byte array representation of the transaction") - } + tx, _, err := n.generateAndSignTx( + newNonce, + value, + receiverAddress.Bytes(), + senderAddress.Bytes(), + []byte(transactionData)) - sig, err := n.privateKey.Sign(txToByteArray) - if err != nil { - return nil, errors.New("could not sign the transaction") - } - tx.Signature = sig - - return &tx, nil + return tx, err } // SendTransaction will send a new transaction on the topic channel func (n *Node) SendTransaction( nonce uint64, - sender string, - receiver string, - value big.Int, + senderHex string, + receiverHex string, + value *big.Int, transactionData string, - signature string) (*transaction.Transaction, error) { + signature []byte) (*transaction.Transaction, error) { + + sender, err := n.addrConverter.CreateAddressFromHex(senderHex) + if err != nil { + return nil, err + } + receiver, err := n.addrConverter.CreateAddressFromHex(receiverHex) + if err != nil { + return nil, err + } tx := transaction.Transaction{ Nonce: nonce, Value: value, - RcvAddr: []byte(receiver), - SndAddr: []byte(sender), + RcvAddr: receiver.Bytes(), + SndAddr: sender.Bytes(), Data: []byte(transactionData), - Signature: []byte(signature), + Signature: signature, } - topic := n.messenger.GetTopic(string(TransactionTopic)) + topic := n.messenger.GetTopic(string(factory.TransactionTopic)) if topic == nil { return nil, errors.New("could not get transaction topic") } - err := topic.Broadcast(tx) + marshalizedTx, err := n.marshalizer.Marshal(&tx) + if err != nil { + return nil, errors.New("could not marshal transaction") + } + + err = topic.BroadcastBuff(marshalizedTx) if err != nil { return nil, errors.New("could not broadcast transaction: " + err.Error()) } @@ -506,236 +709,106 @@ func (n *Node) GetTransaction(hash string) (*transaction.Transaction, error) { return nil, fmt.Errorf("not yet implemented") } -func (n *Node) createNetMessenger() (p2p.Messenger, error) { - if n.port == 0 { - return nil, errors.New("Cannot start node on port 0") +// GetCurrentPublicKey will return the current node's public key +func (n *Node) GetCurrentPublicKey() string { + if n.publicKey != nil { + pkey, _ := n.publicKey.ToByteArray() + return fmt.Sprintf("%x", pkey) } + return "" +} - if n.maxAllowedPeers == 0 { - return nil, errors.New("Cannot start node without providing maxAllowedPeers") +// GetAccount will return acount details for a given address +func (n *Node) GetAccount(address string) (*state.Account, error) { + if n.addrConverter == nil || n.accounts == nil { + return nil, errors.New("initialize AccountsAdapter and AddressConverter first") } - cp, err := p2p.NewConnectParamsFromPort(n.port) + addr, err := n.addrConverter.CreateAddressFromHex(address) if err != nil { - return nil, err + return nil, errors.New("could not create address object from provided string") } - - nm, err := p2p.NewNetMessenger(n.ctx, n.marshalizer, n.hasher, cp, n.maxAllowedPeers, n.pubSubStrategy) + account, err := n.accounts.GetExistingAccount(addr) if err != nil { - return nil, err + return nil, errors.New("could not fetch sender address from provided param") } - return nm, nil + return account.BaseAccount(), nil } -func (n *Node) createGenesisBlock() (*block.Header, error) { - blockBody := n.blockProcessor.CreateGenesisBlockBody(n.initialNodesBalances, 0) +func (n *Node) createGenesisBlock() (*block.Header, []byte, error) { + blockBody, err := n.blockProcessor.CreateGenesisBlockBody(n.initialNodesBalances, 0) + if err != nil { + return nil, nil, err + } + marshalizedBody, err := n.marshalizer.Marshal(blockBody) if err != nil { - return nil, err + return nil, nil, err } blockBodyHash := n.hasher.Compute(string(marshalizedBody)) - return &block.Header{ + header := &block.Header{ Nonce: 0, ShardId: blockBody.ShardID, TimeStamp: uint64(n.genesisTime.Unix()), BlockBodyHash: blockBodyHash, BlockBodyType: block.StateBlock, Signature: blockBodyHash, - }, nil -} - -func (n *Node) blockchainLog(sposWrk *spos.SPOSConsensusWorker) { - // TODO: this method and its call should be removed after initial testing of our first version of testnet - oldNonce := uint64(0) - prevHeaderHash := []byte("") - recheckPeriod := sposWrk.Cns.Chr.Round().TimeDuration() * 5 / 100 - - for { - time.Sleep(recheckPeriod) - - hdr := sposWrk.BlockChain.CurrentBlockHeader - txBlock := sposWrk.BlockChain.CurrentTxBlockBody - - if hdr == nil || txBlock == nil { - continue - } + } - if hdr.Nonce > oldNonce { - newNonce, newPrevHash, blockHash, err := n.computeNewNoncePrevHash(sposWrk, hdr, txBlock, prevHeaderHash) + marshalizedHeader, err := n.marshalizer.Marshal(header) - if err != nil { - log.Error(err.Error()) - continue - } + if err != nil { + return nil, nil, err + } - n.displayLogInfo(hdr, txBlock, newPrevHash, prevHeaderHash, sposWrk, blockHash) + blockHeaderHash := n.hasher.Compute(string(marshalizedHeader)) - oldNonce = newNonce - prevHeaderHash = newPrevHash - } - } + return header, blockHeaderHash, nil } -func (n *Node) computeNewNoncePrevHash( - sposWrk *spos.SPOSConsensusWorker, - hdr *block.Header, - txBlock *block.TxBlockBody, - prevHash []byte, -) (uint64, []byte, []byte, error) { - - if sposWrk == nil { - return 0, nil, nil, errNilSposWorker - } +func (n *Node) sendMessage(cnsDta *spos.ConsensusData) { + topic := n.messenger.GetTopic(string(ConsensusTopic)) - if sposWrk.BlockChain == nil { - return 0, nil, nil, errNilBlockchain + if topic == nil { + log.Debug(fmt.Sprintf("could not get consensus topic")) + return } - headerMarsh, err := n.marshalizer.Marshal(hdr) - if err != nil { - return 0, nil, nil, err - } + err := topic.Broadcast(cnsDta) - txBlkMarsh, err := n.marshalizer.Marshal(txBlock) if err != nil { - return 0, nil, nil, err + log.Debug(fmt.Sprintf("could not broadcast message: " + err.Error())) } - - headerHash := n.hasher.Compute(string(headerMarsh)) - blockHash := n.hasher.Compute(string(txBlkMarsh)) - - return hdr.Nonce, headerHash, blockHash, nil } -func (n *Node) displayLogInfo( - header *block.Header, - txBlock *block.TxBlockBody, - headerHash []byte, - prevHash []byte, - sposWrk *spos.SPOSConsensusWorker, - blockHash []byte, -) { +func (n *Node) broadcastBlockBody(msg []byte) { + topic := n.messenger.GetTopic(string(factory.TxBlockBodyTopic)) - log.Info(fmt.Sprintf("Block with nonce %d and hash %s was added into the blockchain. Previous block hash was %s\n\n", header.Nonce, toB64(headerHash), toB64(prevHash))) + if topic == nil { + log.Debug(fmt.Sprintf("could not get tx block body topic")) + return + } - dispHeader, dispLines := createDisplayableHeaderAndBlockBody(header, txBlock, blockHash) + err := topic.BroadcastBuff(msg) - tblString, err := display.CreateTableString(dispHeader, dispLines) if err != nil { - log.Error(err.Error()) + log.Debug(fmt.Sprintf("could not broadcast message: " + err.Error())) } - fmt.Println(tblString) - - log.Info(fmt.Sprintf("\n********** There was %d rounds and was proposed %d blocks, which means %.2f%% hit rate **********\n", - sposWrk.Rounds, sposWrk.RoundsWithBlock, float64(sposWrk.RoundsWithBlock)*100/float64(sposWrk.Rounds))) -} - -func createDisplayableHeaderAndBlockBody( - hdr *block.Header, - txBody *block.TxBlockBody, - txBlockHash []byte) ([]string, []*display.LineData) { - - header := []string{"Part", "Parameter", "Value"} - - lines := displayHeader(hdr) - - if hdr.BlockBodyType == block.TxBlock { - lines = displayTxBlockBody(lines, txBody, txBlockHash) - - return header, lines - } - - //TODO: implement the other block bodies - - lines = append(lines, display.NewLineData(false, []string{"Unknown", "", ""})) - return header, lines -} - -func displayHeader(hdr *block.Header) []*display.LineData { - lines := make([]*display.LineData, 0) - - lines = append(lines, display.NewLineData(false, []string{ - "Header", - "Nonce", - fmt.Sprintf("%d", hdr.Nonce)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Shard", - fmt.Sprintf("%d", hdr.ShardId)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Epoch", - fmt.Sprintf("%d", hdr.Epoch)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Round", - fmt.Sprintf("%d", hdr.Round)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Timestamp", - fmt.Sprintf("%d", hdr.TimeStamp)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Prev hash", - toB64(hdr.PrevHash)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Body type", - hdr.BlockBodyType.String()})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Body hash", - toB64(hdr.BlockBodyHash)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Pub keys bitmap", - toHex(hdr.PubKeysBitmap)})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Commitment", - toB64(hdr.Commitment)})) - lines = append(lines, display.NewLineData(true, []string{ - "", - "Signature", - toB64(hdr.Signature)})) - - return lines -} - -func displayTxBlockBody(lines []*display.LineData, txBody *block.TxBlockBody, hash []byte) []*display.LineData { - lines = append(lines, display.NewLineData(false, []string{"TxBody", "Block hash", toB64(hash)})) - lines = append(lines, display.NewLineData(true, []string{"", "Root hash", toB64(txBody.RootHash)})) - - for i := 0; i < len(txBody.MiniBlocks); i++ { - miniBlock := txBody.MiniBlocks[i] - - part := fmt.Sprintf("TxBody_%d", miniBlock.ShardID) - - if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { - lines = append(lines, display.NewLineData(false, []string{ - part, "", " or "})) - } - - for j := 0; j < len(miniBlock.TxHashes); j++ { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("Tx hash %d", j), - toB64(miniBlock.TxHashes[j])})) +} - part = "" - } +func (n *Node) broadcastHeader(msg []byte) { + topic := n.messenger.GetTopic(string(factory.HeadersTopic)) - lines[len(lines)-1].HorizontalRuleAfter = true + if topic == nil { + log.Debug(fmt.Sprintf("could not get header topic")) + return } - return lines -} + err := topic.BroadcastBuff(msg) -func toHex(buff []byte) string { - if buff == nil { - return "" + if err != nil { + log.Debug(fmt.Sprintf("could not broadcast message: " + err.Error())) } - return "0x" + hex.EncodeToString(buff) } func toB64(buff []byte) string { @@ -744,18 +817,3 @@ func toB64(buff []byte) string { } return base64.StdEncoding.EncodeToString(buff) } - -func (n *Node) sendMessage(cnsDta *spos.ConsensusData) { - topic := n.messenger.GetTopic(string(ConsensusTopic)) - - if topic == nil { - log.Debug(fmt.Sprintf("could not get consensus topic")) - return - } - - err := topic.Broadcast(*cnsDta) - - if err != nil { - log.Debug(fmt.Sprintf("could not broadcast message: " + err.Error())) - } -} diff --git a/node/node_test.go b/node/node_test.go index 91fc3603787..f1aec3ce3fd 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -4,18 +4,21 @@ import ( "context" "fmt" "math/big" + "math/rand" + "strings" + "sync" "testing" + "time" - "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" + "github.com/ElrondNetwork/elrond-go-sandbox/crypto/schnorr" "github.com/ElrondNetwork/elrond-go-sandbox/data" - "github.com/ElrondNetwork/elrond-go-sandbox/data/block" - "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/node" "github.com/ElrondNetwork/elrond-go-sandbox/node/mock" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/storage" - "github.com/libp2p/go-libp2p-pubsub" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + transaction2 "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -27,20 +30,17 @@ func logError(err error) { } func TestNewNode(t *testing.T) { - n, err := node.NewNode() assert.NotNil(t, n) assert.Nil(t, err) } func TestNewNode_NotRunning(t *testing.T) { - n, _ := node.NewNode() assert.False(t, n.IsRunning()) } func TestNewNode_NilOptionShouldError(t *testing.T) { - _, err := node.NewNode(node.WithAccountsAdapter(nil)) assert.NotNil(t, err) } @@ -52,47 +52,21 @@ func TestNewNode_ApplyNilOptionShouldError(t *testing.T) { assert.NotNil(t, err) } -func TestStart_NoPort(t *testing.T) { - +func TestStart_NoMessenger(t *testing.T) { n, _ := node.NewNode() err := n.Start() defer func() { _ = n.Stop() }() assert.NotNil(t, err) } -func TestStart_NoMarshalizer(t *testing.T) { - - n, _ := node.NewNode(node.WithPort(4000)) - err := n.Start() - defer func() { _ = n.Stop() }() - assert.NotNil(t, err) -} - -func TestStart_NoHasher(t *testing.T) { - - n, _ := node.NewNode(node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{})) - err := n.Start() - defer func() { _ = n.Stop() }() - assert.NotNil(t, err) -} - -func TestStart_NoMaxAllowedPeers(t *testing.T) { - - n, _ := node.NewNode(node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{})) - err := n.Start() - defer func() { _ = n.Stop() }() - assert.NotNil(t, err) -} - func TestStart_CorrectParams(t *testing.T) { + messenger := getMessenger() n, _ := node.NewNode( - node.WithPort(4000), + node.WithMessenger(messenger), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), ) @@ -105,11 +79,11 @@ func TestStart_CorrectParams(t *testing.T) { func TestStart_CorrectParamsApplyingOptions(t *testing.T) { n, _ := node.NewNode() + messenger := getMessenger() err := n.ApplyOptions( - node.WithPort(4000), + node.WithMessenger(messenger), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), ) @@ -124,18 +98,18 @@ func TestStart_CorrectParamsApplyingOptions(t *testing.T) { func TestApplyOptions_NodeStarted(t *testing.T) { + messenger := getMessenger() n, _ := node.NewNode( - node.WithPort(4000), + node.WithMessenger(messenger), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), ) err := n.Start() defer func() { _ = n.Stop() }() logError(err) err = n.ApplyOptions( - node.WithMaxAllowedPeers(4), + node.WithContext(context.Background()), ) assert.NotNil(t, err) @@ -145,30 +119,9 @@ func TestApplyOptions_NodeStarted(t *testing.T) { func TestStop_NotStartedYet(t *testing.T) { n, _ := node.NewNode( - node.WithPort(4000), - node.WithMarshalizer(mock.MarshalizerMock{}), - node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), - node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), - ) - err := n.Start() - defer func() { _ = n.Stop() }() - logError(err) - err = n.Stop() - assert.Nil(t, err) - assert.False(t, n.IsRunning()) -} - -func TestStop(t *testing.T) { - - n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), ) err := n.Stop() @@ -176,78 +129,47 @@ func TestStop(t *testing.T) { assert.False(t, n.IsRunning()) } -func TestConnectToAddresses_NodeNotStarted(t *testing.T) { - - n2, _ := node.NewNode( - node.WithPort(4001), - node.WithMarshalizer(mock.MarshalizerMock{}), - node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), - ) - err := n2.Start() - defer func() { _ = n2.Stop() }() - assert.Nil(t, err) - addr, _ := n2.Address() - +func TestStop_MessengerCloseErrors(t *testing.T) { + errorString := "messenger close error" + messenger := getMessenger() + messenger.CloseCalled = func() error { + return errors.New(errorString) + } n, _ := node.NewNode( - node.WithPort(4000), + node.WithMessenger(messenger), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), + node.WithContext(context.Background()), ) - err = n.ConnectToAddresses([]string{addr}) + n.Start() + + err := n.Stop() assert.NotNil(t, err) + assert.Contains(t, err.Error(), errorString) } -func TestConnectToAddresses(t *testing.T) { - - n2, _ := node.NewNode( - node.WithPort(4001), - node.WithMarshalizer(mock.MarshalizerMock{}), - node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), - ) - err := n2.Start() - defer func() { _ = n2.Stop() }() - assert.Nil(t, err) - addr, _ := n2.Address() +func TestStop(t *testing.T) { n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), + node.WithContext(context.Background()), ) - err = n.Start() - defer func() { _ = n.Stop() }() - assert.Nil(t, err) + err := n.Start() + logError(err) - err = n.ConnectToAddresses([]string{addr}) + err = n.Stop() assert.Nil(t, err) -} - -func TestAddress_NodeNotStarted(t *testing.T) { - - n, _ := node.NewNode( - node.WithPort(4000), - node.WithMarshalizer(mock.MarshalizerMock{}), - node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), - ) - _, err := n.Address() - assert.NotNil(t, err) + assert.False(t, n.IsRunning()) } func TestGetBalance_NoAddrConverterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), node.WithPrivateKey(&mock.PrivateKeyStub{}), ) @@ -259,12 +181,9 @@ func TestGetBalance_NoAddrConverterShouldError(t *testing.T) { func TestGetBalance_NoAccAdapterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithPrivateKey(&mock.PrivateKeyStub{}), ) @@ -275,7 +194,7 @@ func TestGetBalance_NoAccAdapterShouldError(t *testing.T) { func TestGetBalance_CreateAddressFailsShouldError(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(0)) + accAdapter := getAccAdapter(big.NewInt(0)) addrConverter := mock.AddressConverterStub{ CreateAddressFromHexHandler: func(hexAddress string) (state.AddressContainer, error) { // Return that will result in a correct run of GenerateTransaction -> will fail test @@ -287,12 +206,9 @@ func TestGetBalance_CreateAddressFailsShouldError(t *testing.T) { } privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), @@ -309,22 +225,36 @@ func TestGetBalance_GetAccountFailsShouldError(t *testing.T) { return nil, errors.New("error") }, } - addrConverter := getAddressConverter() + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GetBalance("address") + _, err := n.GetBalance(createDummyHexAddress(64)) assert.NotNil(t, err) - assert.Equal(t, "could not fetch sender address from provided param", err.Error()) + assert.Contains(t, err.Error(), "could not fetch sender address from provided param") +} + +func createDummyHexAddress(chars int) string { + if chars < 1 { + return "" + } + + var characters = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'} + + rdm := rand.New(rand.NewSource(time.Now().Unix())) + + buff := make([]byte, chars) + for i := 0; i < chars; i++ { + buff[i] = characters[rdm.Int()%16] + } + + return string(buff) } func TestGetBalance_GetAccountReturnsNil(t *testing.T) { @@ -334,118 +264,94 @@ func TestGetBalance_GetAccountReturnsNil(t *testing.T) { return nil, nil }, } - addrConverter := getAddressConverter() + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - balance, err := n.GetBalance("address") + balance, err := n.GetBalance(createDummyHexAddress(64)) assert.Nil(t, err) assert.Equal(t, big.NewInt(0), balance) } func TestGetBalance(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(100)) - addrConverter := getAddressConverter() + accAdapter := getAccAdapter(big.NewInt(100)) + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - balance, err := n.GetBalance("address") + balance, err := n.GetBalance(createDummyHexAddress(64)) assert.Nil(t, err) assert.Equal(t, big.NewInt(100), balance) } +//------- GenerateTransaction + func TestGenerateTransaction_NoAddrConverterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), node.WithPrivateKey(&mock.PrivateKeyStub{}), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction("sender", "receiver", big.NewInt(10), "code") assert.NotNil(t, err) } func TestGenerateTransaction_NoAccAdapterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithPrivateKey(&mock.PrivateKeyStub{}), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction("sender", "receiver", big.NewInt(10), "code") assert.NotNil(t, err) } func TestGenerateTransaction_NoPrivateKeyShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction("sender", "receiver", big.NewInt(10), "code") assert.NotNil(t, err) } func TestGenerateTransaction_CreateAddressFailsShouldError(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(0)) - addrConverter := mock.AddressConverterStub{ - CreateAddressFromHexHandler: func(hexAddress string) (state.AddressContainer, error) { - // Return that will result in a correct run of GenerateTransaction -> will fail test - /*return mock.AddressContainerStub{ - }, nil*/ - - return nil, errors.New("error") - }, - } + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction("sender", "receiver", big.NewInt(10), "code") assert.NotNil(t, err) } @@ -456,20 +362,17 @@ func TestGenerateTransaction_GetAccountFailsShouldError(t *testing.T) { return nil, errors.New("error") }, } - addrConverter := getAddressConverter() + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction(createDummyHexAddress(64), createDummyHexAddress(64), big.NewInt(10), "code") assert.NotNil(t, err) } @@ -480,47 +383,41 @@ func TestGenerateTransaction_GetAccountReturnsNilShouldWork(t *testing.T) { return nil, nil }, } - addrConverter := getAddressConverter() + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction(createDummyHexAddress(64), createDummyHexAddress(64), big.NewInt(10), "code") assert.Nil(t, err) } func TestGenerateTransaction_GetExistingAccountShouldWork(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(0)) - addrConverter := getAddressConverter() + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction(createDummyHexAddress(64), createDummyHexAddress(64), big.NewInt(10), "code") assert.Nil(t, err) } func TestGenerateTransaction_MarshalErrorsShouldError(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(0)) - addrConverter := getAddressConverter() + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() marshalizer := mock.MarshalizerMock{ MarshalHandler: func(obj interface{}) ([]byte, error) { @@ -528,48 +425,42 @@ func TestGenerateTransaction_MarshalErrorsShouldError(t *testing.T) { }, } n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(marshalizer), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction("sender", "receiver", big.NewInt(10), "code") assert.NotNil(t, err) } func TestGenerateTransaction_SignTxErrorsShouldError(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(0)) - addrConverter := getAddressConverter() + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := mock.PrivateKeyStub{ SignHandler: func(message []byte) ([]byte, error) { return nil, errors.New("error") }, } n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction(createDummyHexAddress(64), createDummyHexAddress(64), big.NewInt(10), "code") assert.NotNil(t, err) } func TestGenerateTransaction_ShouldSetCorrectSignature(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(0)) - addrConverter := getAddressConverter() + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") signature := []byte{69} privateKey := mock.PrivateKeyStub{ SignHandler: func(message []byte) ([]byte, error) { @@ -578,18 +469,15 @@ func TestGenerateTransaction_ShouldSetCorrectSignature(t *testing.T) { } n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - tx, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + tx, err := n.GenerateTransaction(createDummyHexAddress(64), createDummyHexAddress(64), big.NewInt(10), "code") assert.Nil(t, err) assert.Equal(t, signature, tx.Signature) } @@ -603,228 +491,257 @@ func TestGenerateTransaction_ShouldSetCorrectNonce(t *testing.T) { BaseAccountHandler: func() *state.Account { return &state.Account{ Nonce: nonce, - Balance: *big.NewInt(0), + Balance: big.NewInt(0), } }, }, nil }, } - addrConverter := getAddressConverter() + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - tx, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + tx, err := n.GenerateTransaction(createDummyHexAddress(64), createDummyHexAddress(64), big.NewInt(10), "code") assert.Nil(t, err) assert.Equal(t, nonce, tx.Nonce) } func TestGenerateTransaction_CorrectParamsShouldNotError(t *testing.T) { - accAdapter := getAccAdapter(*big.NewInt(0)) - addrConverter := getAddressConverter() + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithPort(4000), node.WithMarshalizer(mock.MarshalizerMock{}), node.WithHasher(mock.HasherMock{}), - node.WithMaxAllowedPeers(4), node.WithContext(context.Background()), - node.WithPubSubStrategy(p2p.GossipSub), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), node.WithPrivateKey(privateKey), ) - _, err := n.GenerateTransaction("sender", "receiver", *big.NewInt(10), "code") + _, err := n.GenerateTransaction(createDummyHexAddress(64), createDummyHexAddress(64), big.NewInt(10), "code") assert.Nil(t, err) } -func getAccAdapter(balance big.Int) mock.AccountsAdapterStub { - return mock.AccountsAdapterStub{ - GetExistingAccountHandler: func(addrContainer state.AddressContainer) (state.AccountWrapper, error) { - return mock.AccountWrapperStub{ - BaseAccountHandler: func() *state.Account { - return &state.Account{ - Nonce: 1, - Balance: balance, - } - }, - }, nil - }, - } -} +//------- GenerateAndSendBulkTransactions -func getPrivateKey() mock.PrivateKeyStub { - return mock.PrivateKeyStub{ - SignHandler: func(message []byte) ([]byte, error) { - return []byte{2}, nil - }, - } +func TestGenerateAndSendBulkTransactions_ZeroTxShouldErr(t *testing.T) { + n, _ := node.NewNode() + + err := n.GenerateAndSendBulkTransactions("", big.NewInt(0), 0) + assert.Equal(t, "can not generate and broadcast 0 transactions", err.Error()) } -func getAddressConverter() mock.AddressConverterStub { - return mock.AddressConverterStub{ - CreateAddressFromHexHandler: func(hexAddress string) (state.AddressContainer, error) { - // Return that will result in a correct run of GenerateTransaction -> will fail test - return mock.AddressContainerStub{}, nil - }, +func TestGenerateAndSendBulkTransactions_NilAccountAdapterShouldErr(t *testing.T) { + marshalizer := &mock.MarshalizerFake{} + + mes := &mock.MessengerStub{} + mes.GetTopicCalled = func(name string) *p2p.Topic { + return nil } -} -func TestBindInterceptorsResolvers_NodeNotStartedShouldErr(t *testing.T) { - n, _ := node.NewNode() + addrConverter := mock.NewAddressConverterFake(32, "0x") + sk, pk := schnorr.NewKeyGenerator().GeneratePair() - err := n.BindInterceptorsResolvers() + n, _ := node.NewNode( + node.WithMarshalizer(marshalizer), + node.WithHasher(mock.HasherMock{}), + node.WithContext(context.Background()), + node.WithAddressConverter(addrConverter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), + ) - assert.Equal(t, "node is not started yet", err.Error()) + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(0), 1) + assert.Equal(t, node.ErrNilAccountsAdapter, err) } -func TestBindInterceptorsResolvers_ShouldWork(t *testing.T) { +func TestGenerateAndSendBulkTransactions_NilAddressConverterShouldErr(t *testing.T) { + marshalizer := &mock.MarshalizerFake{} + accAdapter := getAccAdapter(big.NewInt(0)) + sk, pk := schnorr.NewKeyGenerator().GeneratePair() n, _ := node.NewNode( - node.WithDataPool(createDataPoolMock()), - node.WithAddressConverter(mock.AddressConverterStub{}), + node.WithMarshalizer(marshalizer), node.WithHasher(mock.HasherMock{}), - node.WithSingleSignKeyGenerator(&mock.SingleSignKeyGenMock{}), - node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), - node.WithMarshalizer(&mock.MarshalizerMock{}), - node.WithBlockChain(createStubBlockchain()), - node.WithUint64ByteSliceConverter(mock.NewNonceHashConverterMock()), + node.WithContext(context.Background()), + node.WithAccountsAdapter(accAdapter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), ) - mes := mock.NewMessengerStub() - n.SetMessenger(mes) - - prepareMessenger(mes) + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(0), 1) + assert.Equal(t, node.ErrNilAddressConverter, err) +} - err := n.BindInterceptorsResolvers() +func TestGenerateAndSendBulkTransactions_NilPrivateKeyShouldErr(t *testing.T) { + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") + _, pk := schnorr.NewKeyGenerator().GeneratePair() + n, _ := node.NewNode( + node.WithAccountsAdapter(accAdapter), + node.WithAddressConverter(addrConverter), + node.WithPublicKey(pk), + node.WithMarshalizer(&mock.MarshalizerFake{}), + ) - assert.Nil(t, err) + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(0), 1) + assert.True(t, strings.Contains(err.Error(), "trying to set nil private key")) } -func createDataPoolMock() *mock.TransientDataPoolMock { - dataPool := &mock.TransientDataPoolMock{} +func TestGenerateAndSendBulkTransactions_NilPublicKeyShouldErr(t *testing.T) { + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") + sk, _ := schnorr.NewKeyGenerator().GeneratePair() + n, _ := node.NewNode( + node.WithAccountsAdapter(accAdapter), + node.WithAddressConverter(addrConverter), + node.WithPrivateKey(sk), + ) - dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - dataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - dataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - dataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } + err := n.GenerateAndSendBulkTransactions("", big.NewInt(0), 1) + assert.Equal(t, "trying to set nil public key", err.Error()) +} + +func TestGenerateAndSendBulkTransactions_InvalidReceiverAddressShouldErr(t *testing.T) { + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") + sk, pk := schnorr.NewKeyGenerator().GeneratePair() + n, _ := node.NewNode( + node.WithAccountsAdapter(accAdapter), + node.WithAddressConverter(addrConverter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), + ) - return dataPool + err := n.GenerateAndSendBulkTransactions("", big.NewInt(0), 1) + assert.Contains(t, err.Error(), "could not create receiver address from provided param") } -func prepareMessenger(mes *mock.MessengerStub) { - registration := func(v pubsub.Validator) error { - return nil +func TestGenerateAndSendBulkTransactions_CreateAddressFromPublicKeyBytesErrorsShouldErr(t *testing.T) { + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := &mock.AddressConverterStub{} + addrConverter.CreateAddressFromPublicKeyBytesHandler = func(pubKey []byte) (container state.AddressContainer, e error) { + return nil, errors.New("error") } + sk, pk := schnorr.NewKeyGenerator().GeneratePair() + n, _ := node.NewNode( + node.WithAccountsAdapter(accAdapter), + node.WithAddressConverter(addrConverter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), + ) - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTx.RegisterTopicValidator = registration - topicHdr := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicHdr.RegisterTopicValidator = registration - topicTxBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTxBlk.RegisterTopicValidator = registration - topicPeerBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicPeerBlk.RegisterTopicValidator = registration - topicStateBlk := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicStateBlk.RegisterTopicValidator = registration + err := n.GenerateAndSendBulkTransactions("", big.NewInt(0), 1) + assert.Equal(t, "error", err.Error()) +} - mes.GetTopicCalled = func(name string) *p2p.Topic { - switch name { - case string(node.TransactionTopic): - return topicTx - case string(node.HeadersTopic): - return topicHdr - case string(node.TxBlockBodyTopic): - return topicTxBlk - case string(node.PeerChBodyTopic): - return topicPeerBlk - case string(node.StateBodyTopic): - return topicStateBlk - } +func TestGenerateAndSendBulkTransactions_MarshalizerErrorsShouldErr(t *testing.T) { + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") + marshalizer := &mock.MarshalizerFake{} + marshalizer.Fail = true + sk, pk := schnorr.NewKeyGenerator().GeneratePair() + n, _ := node.NewNode( + node.WithAccountsAdapter(accAdapter), + node.WithAddressConverter(addrConverter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), + node.WithMarshalizer(marshalizer), + ) - return nil - } + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), 1) + assert.True(t, strings.Contains(err.Error(), "could not marshal transaction")) } -func createStubBlockchain() *blockchain.BlockChain { - blkc, _ := blockchain.NewBlockChain( - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}) +func TestGenerateAndSendBulkTransactions_ShouldWork(t *testing.T) { + marshalizer := &mock.MarshalizerFake{} - return blkc -} + noOfTx := 1000 + mutRecoveredTransactions := &sync.RWMutex{} + recoveredTransactions := make(map[uint64]*transaction.Transaction) -func TestBindInterceptorsResolvers_CreateInterceptorFailsShouldErr(t *testing.T) { - n, _ := node.NewNode( - node.WithDataPool(createDataPoolMock()), - node.WithHasher(mock.HasherMock{}), - node.WithSingleSignKeyGenerator(&mock.SingleSignKeyGenMock{}), - node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), - node.WithMarshalizer(&mock.MarshalizerMock{}), - node.WithBlockChain(createStubBlockchain()), - node.WithUint64ByteSliceConverter(mock.NewNonceHashConverterMock()), - ) + topic := p2p.NewTopic(string(factory.TransactionTopic), transaction2.NewInterceptedTransaction(), marshalizer) + topic.SendData = func(data []byte) error { + //handler to capture sent data + tx := transaction.Transaction{} - mes := mock.NewMessengerStub() - n.SetMessenger(mes) + err := marshalizer.Unmarshal(&tx, data) + if err != nil { + return err + } - prepareMessenger(mes) + mutRecoveredTransactions.Lock() + recoveredTransactions[tx.Nonce] = &tx + mutRecoveredTransactions.Unlock() - err := n.BindInterceptorsResolvers() + return nil + } - assert.Equal(t, "nil AddressConverter", err.Error()) -} + mes := &mock.MessengerStub{} + mes.GetTopicCalled = func(name string) *p2p.Topic { + if name == string(factory.TransactionTopic) { + return topic + } + + return nil + } -func TestBindInterceptorsResolvers_CreateResolversFailsShouldErr(t *testing.T) { + accAdapter := getAccAdapter(big.NewInt(0)) + addrConverter := mock.NewAddressConverterFake(32, "0x") + sk, pk := schnorr.NewKeyGenerator().GeneratePair() n, _ := node.NewNode( - node.WithDataPool(createDataPoolMock()), - node.WithAddressConverter(mock.AddressConverterStub{}), + node.WithMarshalizer(marshalizer), node.WithHasher(mock.HasherMock{}), - node.WithSingleSignKeyGenerator(&mock.SingleSignKeyGenMock{}), - node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), - node.WithMarshalizer(&mock.MarshalizerMock{}), - node.WithBlockChain(createStubBlockchain()), + node.WithContext(context.Background()), + node.WithAddressConverter(addrConverter), + node.WithAccountsAdapter(accAdapter), + node.WithPrivateKey(sk), + node.WithPublicKey(pk), ) - mes := mock.NewMessengerStub() n.SetMessenger(mes) - prepareMessenger(mes) + err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), uint64(noOfTx)) + assert.Nil(t, err) + mutRecoveredTransactions.RLock() + assert.Equal(t, noOfTx, len(recoveredTransactions)) + mutRecoveredTransactions.RUnlock() +} - err := n.BindInterceptorsResolvers() +func getAccAdapter(balance *big.Int) mock.AccountsAdapterStub { + return mock.AccountsAdapterStub{ + GetExistingAccountHandler: func(addrContainer state.AddressContainer) (state.AccountWrapper, error) { + return mock.AccountWrapperStub{ + BaseAccountHandler: func() *state.Account { + return &state.Account{ + Nonce: 1, + Balance: balance, + } + }, + }, nil + }, + } +} - assert.Equal(t, "nil nonce converter", err.Error()) +func getPrivateKey() mock.PrivateKeyStub { + return mock.PrivateKeyStub{ + SignHandler: func(message []byte) ([]byte, error) { + return []byte{2}, nil + }, + } } func TestSendTransaction_TopicDoesNotExistsShouldErr(t *testing.T) { - n, _ := node.NewNode() + n, _ := node.NewNode( + node.WithAddressConverter(mock.NewAddressConverterFake(32, "0x")), + ) mes := mock.NewMessengerStub() n.SetMessenger(mes) @@ -834,11 +751,11 @@ func TestSendTransaction_TopicDoesNotExistsShouldErr(t *testing.T) { } nonce := uint64(50) - value := *big.NewInt(567) - sender := "sender" - receiver := "receiver" + value := big.NewInt(567) + sender := createDummyHexAddress(64) + receiver := createDummyHexAddress(64) txData := "data" - signature := "signature" + signature := []byte("signature") tx, err := n.SendTransaction( nonce, @@ -853,20 +770,23 @@ func TestSendTransaction_TopicDoesNotExistsShouldErr(t *testing.T) { } func TestSendTransaction_BroadcastErrShouldErr(t *testing.T) { - n, _ := node.NewNode() + n, _ := node.NewNode( + node.WithMarshalizer(&mock.MarshalizerFake{}), + node.WithAddressConverter(mock.NewAddressConverterFake(32, "0x")), + ) mes := mock.NewMessengerStub() n.SetMessenger(mes) broadcastErr := errors.New("failure") - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) + topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, &mock.MarshalizerMock{}) topicTx.SendData = func(data []byte) error { return broadcastErr } mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(node.TransactionTopic) { + if name == string(factory.TransactionTopic) { return topicTx } @@ -874,11 +794,11 @@ func TestSendTransaction_BroadcastErrShouldErr(t *testing.T) { } nonce := uint64(50) - value := *big.NewInt(567) - sender := "sender" - receiver := "receiver" + value := big.NewInt(567) + sender := createDummyHexAddress(64) + receiver := createDummyHexAddress(64) txData := "data" - signature := "signature" + signature := []byte("signature") tx, err := n.SendTransaction( nonce, @@ -893,7 +813,10 @@ func TestSendTransaction_BroadcastErrShouldErr(t *testing.T) { } func TestSendTransaction_ShouldWork(t *testing.T) { - n, _ := node.NewNode() + n, _ := node.NewNode( + node.WithMarshalizer(&mock.MarshalizerFake{}), + node.WithAddressConverter(mock.NewAddressConverterFake(32, "0x")), + ) mes := mock.NewMessengerStub() n.SetMessenger(mes) @@ -907,7 +830,7 @@ func TestSendTransaction_ShouldWork(t *testing.T) { } mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(node.TransactionTopic) { + if name == string(factory.TransactionTopic) { return topicTx } @@ -915,11 +838,11 @@ func TestSendTransaction_ShouldWork(t *testing.T) { } nonce := uint64(50) - value := *big.NewInt(567) - sender := "sender" - receiver := "receiver" + value := big.NewInt(567) + sender := createDummyHexAddress(64) + receiver := createDummyHexAddress(64) txData := "data" - signature := "signature" + signature := []byte("signature") tx, err := n.SendTransaction( nonce, @@ -934,202 +857,149 @@ func TestSendTransaction_ShouldWork(t *testing.T) { assert.True(t, txSent) } -//------- ComputeNewNoncePrevHash - -func TestNode_ComputeNewNoncePrevHashNilSposWrkShouldErr(t *testing.T) { - n, _ := node.NewNode() - - hdr, txBlock := createTestHdrTxBlockBody() - - newNonce, newPrevHash, blockHash, err := n.ComputeNewNoncePrevHash(nil, hdr, txBlock, []byte("prev hash")) - - assert.Equal(t, uint64(0), newNonce) - assert.Nil(t, newPrevHash) - assert.Nil(t, blockHash) - assert.Equal(t, "nil spos worker", err.Error()) -} - -func TestNode_ComputeNewNoncePrevHashBlockchainShouldErr(t *testing.T) { - sposWrk := &spos.SPOSConsensusWorker{} - n, _ := node.NewNode() - - hdr, txBlock := createTestHdrTxBlockBody() - - newNonce, newPrevHash, blockHash, err := n.ComputeNewNoncePrevHash(sposWrk, hdr, txBlock, []byte("prev hash")) - - assert.Equal(t, uint64(0), newNonce) - assert.Nil(t, newPrevHash) - assert.Nil(t, blockHash) - assert.Equal(t, "nil blockchain", err.Error()) -} - -func TestNode_ComputeNewNoncePrevHashMarshalizerFail1ShouldErr(t *testing.T) { - sposWrk := &spos.SPOSConsensusWorker{} - sposWrk.BlockChain = createStubBlockchain() - - marshalizer := &mock.MarshalizerMock{} +func TestCreateShardedStores_NilShardCoordinatorShouldError(t *testing.T) { + messenger := getMessenger() + dataPool := &mock.TransientDataPoolMock{} n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMessenger(messenger), + node.WithDataPool(dataPool), + node.WithMarshalizer(mock.MarshalizerMock{}), + node.WithHasher(mock.HasherMock{}), + node.WithContext(context.Background()), + node.WithAddressConverter(&mock.AddressConverterStub{}), + node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), ) - - hdr, txBlock := createTestHdrTxBlockBody() - - expectedError := errors.New("marshalizer fail") - - marshalizer.MarshalHandler = func(obj interface{}) (bytes []byte, e error) { - if hdr == obj { - return nil, expectedError - } - - if txBlock == obj { - return []byte("txBlockBodyMarshalized"), nil - } - return nil, nil - } - - newNonce, newPrevHash, blockHash, err := n.ComputeNewNoncePrevHash(sposWrk, hdr, txBlock, []byte("prev hash")) - - assert.Equal(t, uint64(0), newNonce) - assert.Nil(t, newPrevHash) - assert.Nil(t, blockHash) - assert.Equal(t, expectedError, err) + err := n.Start() + logError(err) + defer func() { _ = n.Stop() }() + err = n.CreateShardedStores() + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil shard coordinator") } -func TestNode_ComputeNewNoncePrevHashMarshalizerFail2ShouldErr(t *testing.T) { - sposWrk := &spos.SPOSConsensusWorker{} - sposWrk.BlockChain = createStubBlockchain() - - marshalizer := &mock.MarshalizerMock{} - +func TestCreateShardedStores_NilDataPoolShouldError(t *testing.T) { + messenger := getMessenger() + shardCoordinator := mock.NewOneShardCoordinatorMock() n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMessenger(messenger), + node.WithShardCoordinator(shardCoordinator), + node.WithMarshalizer(mock.MarshalizerMock{}), + node.WithHasher(mock.HasherMock{}), + node.WithContext(context.Background()), + node.WithAddressConverter(&mock.AddressConverterStub{}), + node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), ) - - hdr, txBlock := createTestHdrTxBlockBody() - - expectedError := errors.New("marshalizer fail") - - marshalizer.MarshalHandler = func(obj interface{}) (bytes []byte, e error) { - if hdr == obj { - return []byte("hdrHeaderMarshalized"), nil - } - if txBlock == obj { - return nil, expectedError - } - return nil, nil - } - - newNonce, newPrevHash, blockHash, err := n.ComputeNewNoncePrevHash(sposWrk, hdr, txBlock, []byte("prev hash")) - - assert.Equal(t, uint64(0), newNonce) - assert.Nil(t, newPrevHash) - assert.Nil(t, blockHash) - assert.Equal(t, expectedError, err) + err := n.Start() + logError(err) + defer func() { _ = n.Stop() }() + err = n.CreateShardedStores() + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil data pool") } -func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { - sposWrk := &spos.SPOSConsensusWorker{} - sposWrk.BlockChain = createStubBlockchain() - - marshalizer := &mock.MarshalizerMock{} - hasher := &mock.HasherMock{} - +func TestCreateShardedStores_NilTransactionDataPoolShouldError(t *testing.T) { + messenger := getMessenger() + shardCoordinator := mock.NewOneShardCoordinatorMock() + dataPool := &mock.TransientDataPoolMock{} + dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return nil + } + dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), - node.WithHasher(hasher), + node.WithMessenger(messenger), + node.WithShardCoordinator(shardCoordinator), + node.WithDataPool(dataPool), + node.WithMarshalizer(mock.MarshalizerMock{}), + node.WithHasher(mock.HasherMock{}), + node.WithContext(context.Background()), + node.WithAddressConverter(&mock.AddressConverterStub{}), + node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), ) + err := n.Start() + logError(err) + defer func() { _ = n.Stop() }() + err = n.CreateShardedStores() + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil transaction sharded data store") +} - hdr, txBlock := createTestHdrTxBlockBody() - - marshalizer.MarshalHandler = func(obj interface{}) (bytes []byte, e error) { - if hdr == obj { - return []byte("hdrHeaderMarshalized"), nil - } - if txBlock == obj { - return []byte("txBlockBodyMarshalized"), nil - } - return nil, nil +func TestCreateShardedStores_NilHeaderDataPoolShouldError(t *testing.T) { + messenger := getMessenger() + shardCoordinator := mock.NewOneShardCoordinatorMock() + dataPool := &mock.TransientDataPoolMock{} + dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} } - hasher.ComputeCalled = func(s string) []byte { - if s == "hdrHeaderMarshalized" { - return []byte("hdr hash") - } - if s == "txBlockBodyMarshalized" { - return []byte("tx block body hash") - } + dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { return nil } - - newNonce, newPrevHash, blockHash, err := n.ComputeNewNoncePrevHash(sposWrk, hdr, txBlock, []byte("prev hash")) - - assert.NotEqual(t, uint64(0), newNonce) - assert.Equal(t, []byte("hdr hash"), newPrevHash) - assert.Equal(t, []byte("tx block body hash"), blockHash) - assert.Nil(t, err) + n, _ := node.NewNode( + node.WithMessenger(messenger), + node.WithShardCoordinator(shardCoordinator), + node.WithDataPool(dataPool), + node.WithMarshalizer(mock.MarshalizerMock{}), + node.WithHasher(mock.HasherMock{}), + node.WithContext(context.Background()), + node.WithAddressConverter(&mock.AddressConverterStub{}), + node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), + ) + err := n.Start() + logError(err) + defer func() { _ = n.Stop() }() + err = n.CreateShardedStores() + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil header sharded data store") } -func createTestHdrTxBlockBody() (*block.Header, *block.TxBlockBody) { - hasher := mock.HasherFake{} - - hdr := &block.Header{ - Nonce: 1, - ShardId: 2, - Epoch: 3, - Round: 4, - TimeStamp: uint64(11223344), - PrevHash: hasher.Compute("prev hash"), - BlockBodyHash: hasher.Compute("tx block body hash"), - PubKeysBitmap: []byte{255, 0, 128}, - Commitment: hasher.Compute("commitment"), - Signature: hasher.Compute("signature"), +func TestCreateShardedStores_ReturnsSuccessfully(t *testing.T) { + messenger := getMessenger() + shardCoordinator := mock.NewOneShardCoordinatorMock() + nrOfShards := uint32(2) + shardCoordinator.SetNoShards(nrOfShards) + dataPool := &mock.TransientDataPoolMock{} + var txShardedDataResult uint32 + txShardedData := &mock.ShardedDataStub{} + txShardedData.CreateShardStoreCalled = func(destShardID uint32) { + txShardedDataResult = destShardID } - - txBlock := &block.TxBlockBody{ - StateBlockBody: block.StateBlockBody{ - RootHash: hasher.Compute("root hash"), - }, - MiniBlocks: []block.MiniBlock{ - { - ShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_0_1"), - hasher.Compute("txHash_0_2"), - }, - }, - { - ShardID: 1, - TxHashes: [][]byte{ - hasher.Compute("txHash_1_1"), - hasher.Compute("txHash_1_2"), - }, - }, - { - ShardID: 2, - TxHashes: [][]byte{ - hasher.Compute("txHash_2_1"), - }, - }, - { - ShardID: 3, - TxHashes: make([][]byte, 0), - }, - }, + var headerShardedDataResult uint32 + headerShardedData := &mock.ShardedDataStub{} + headerShardedData.CreateShardStoreCalled = func(destShardID uint32) { + headerShardedDataResult = destShardID } - - return hdr, txBlock + dataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return txShardedData + } + dataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + return headerShardedData + } + n, _ := node.NewNode( + node.WithMessenger(messenger), + node.WithShardCoordinator(shardCoordinator), + node.WithDataPool(dataPool), + node.WithMarshalizer(mock.MarshalizerMock{}), + node.WithHasher(mock.HasherMock{}), + node.WithContext(context.Background()), + node.WithAddressConverter(&mock.AddressConverterStub{}), + node.WithAccountsAdapter(&mock.AccountsAdapterStub{}), + ) + err := n.Start() + logError(err) + defer func() { _ = n.Stop() }() + err = n.CreateShardedStores() + assert.Nil(t, err) + assert.Equal(t, txShardedDataResult, nrOfShards-1) + assert.Equal(t, headerShardedDataResult, nrOfShards-1) } -//------- ComputeNewNoncePrevHash - -func TestNode_DisplayLogInfo(t *testing.T) { - hasher := mock.HasherFake{} - hdr, txBlock := createTestHdrTxBlockBody() - - sposWrk := &spos.SPOSConsensusWorker{} - - n, _ := node.NewNode() - - n.DisplayLogInfo(hdr, txBlock, hasher.Compute("header hash"), - hasher.Compute("prev hash"), sposWrk, hasher.Compute("block hash")) +func getMessenger() *mock.MessengerStub { + messenger := mock.NewMessengerStub() + messenger.BootstrapCalled = func(ctx context.Context) {} + messenger.CloseCalled = func() error { + return nil + } + return messenger } diff --git a/p2p/connectParams.go b/p2p/connectParams.go index 2cc37e286a6..53c0d0e3142 100644 --- a/p2p/connectParams.go +++ b/p2p/connectParams.go @@ -22,11 +22,9 @@ type ConnectParams struct { Port int } -// GeneratePrivPubKeys will generate a new private key by using the port -// as a seed for the random generation object -// SHOULD BE USED ONLY IN TESTING!!! -func (params *ConnectParams) GeneratePrivPubKeys(seed int) { - r := rand.New(rand.NewSource(int64(seed))) +// GeneratePrivPubKeys will generate a new private/public key pair starting from a seed +func (params *ConnectParams) GeneratePrivPubKeys(seed int64) { + r := rand.New(rand.NewSource(seed)) prvKey, err := ecdsa.GenerateKey(btcec.S256(), r) @@ -56,7 +54,7 @@ func NewConnectParamsFromPort(port int) (*ConnectParams, error) { params := new(ConnectParams) params.Port = port - params.GeneratePrivPubKeys(port) + params.GeneratePrivPubKeys(int64(port)) params.GenerateIDFromPubKey() return params, nil diff --git a/p2p/memMessenger.go b/p2p/memMessenger.go index 70aa539f9e5..ea5b9ad5fcc 100644 --- a/p2p/memMessenger.go +++ b/p2p/memMessenger.go @@ -24,16 +24,23 @@ import ( const signPrefix = "libp2p-pubsub:" -var mutGloballyRegPeers *sync.Mutex +var mutGloballyRegPeers *sync.RWMutex // globallyRegisteredPeers is the main map used for in memory communication var globallyRegisteredPeers map[peer.ID]*MemMessenger func init() { - mutGloballyRegPeers = &sync.Mutex{} + mutGloballyRegPeers = &sync.RWMutex{} globallyRegisteredPeers = make(map[peer.ID]*MemMessenger) } +// ReInitializeGloballyRegisteredPeers will clean all known memMessenger instances +func ReInitializeGloballyRegisteredPeers() { + mutGloballyRegPeers.Lock() + globallyRegisteredPeers = make(map[peer.ID]*MemMessenger) + mutGloballyRegPeers.Unlock() +} + // MemMessenger is a fake memory Messenger used for testing // TODO keep up with NetMessenger modifications type MemMessenger struct { @@ -174,9 +181,9 @@ func (mm *MemMessenger) ConnectToAddresses(ctx context.Context, addresses []stri for i := 0; i < len(addresses); i++ { addr := peer.ID(base58.Decode(addresses[i])) - mutGloballyRegPeers.Lock() + mutGloballyRegPeers.RLock() val, ok := globallyRegisteredPeers[addr] - mutGloballyRegPeers.Unlock() + mutGloballyRegPeers.RUnlock() if !ok { log.Error(fmt.Sprintf("Bootstrapping the peer '%v' failed! [not found]\n", addresses[i])) @@ -213,7 +220,7 @@ func (mm *MemMessenger) doBootstrap() { temp := make(map[peer.ID]*MemMessenger, 0) - mutGloballyRegPeers.Lock() + mutGloballyRegPeers.RLock() for k, v := range globallyRegisteredPeers { if !mm.rt.Has(k) { mm.rt.Update(k) @@ -221,7 +228,7 @@ func (mm *MemMessenger) doBootstrap() { temp[k] = v } } - mutGloballyRegPeers.Unlock() + mutGloballyRegPeers.RUnlock() mm.mutConnectedPeers.Lock() for k, v := range temp { @@ -249,9 +256,9 @@ func (mm *MemMessenger) PrintConnected() { // AddAddress adds a new address to peer store func (mm *MemMessenger) AddAddress(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) { - mutGloballyRegPeers.Lock() + mutGloballyRegPeers.RLock() val, ok := globallyRegisteredPeers[p] - mutGloballyRegPeers.Unlock() + mutGloballyRegPeers.RUnlock() if !ok { val = nil diff --git a/p2p/netMessenger.go b/p2p/netMessenger.go index fb5e5e0ced6..ee62cf288c8 100644 --- a/p2p/netMessenger.go +++ b/p2p/netMessenger.go @@ -41,6 +41,11 @@ const ( GossipSub ) +type message struct { + buff []byte + topic string +} + // NetMessenger implements a libP2P node with added functionality type NetMessenger struct { context context.Context @@ -59,8 +64,10 @@ type NetMessenger struct { closed bool mutTopics sync.RWMutex topics map[string]*Topic - mutGossipCache sync.Mutex + mutGossipCache sync.RWMutex gossipCache *TimeCache + + chSendMessages chan *message } // NewNetMessenger creates a new instance of NetMessenger. @@ -80,8 +87,9 @@ func NewNetMessenger(ctx context.Context, marsh marshal.Marshalizer, hasher hash marsh: marsh, hasher: hasher, topics: make(map[string]*Topic, 0), - mutGossipCache: sync.Mutex{}, + mutGossipCache: sync.RWMutex{}, gossipCache: NewTimeCache(durTimeCache), + chSendMessages: make(chan *message), } node.cn = NewConnNotifier(maxAllowedPeers) @@ -157,6 +165,19 @@ func (nm *NetMessenger) createPubSub(hostP2P host.Host, pubsubStrategy PubSubStr return errors.New("unknown pubsub strategy") } + go func(ps *pubsub.PubSub, ch chan *message) { + for { + select { + case msg := <-ch: + err := ps.Publish(msg.topic, msg.buff) + + log.LogIfError(err) + } + + time.Sleep(time.Microsecond * 100) + } + }(nm.ps, nm.chSendMessages) + return nil } @@ -458,7 +479,15 @@ func (nm *NetMessenger) AddTopic(t *Topic) error { return nil } nm.mutClosed.RUnlock() - return nm.ps.Publish(t.Name(), data) + + go func(topicName string, buffer []byte) { + nm.chSendMessages <- &message{ + buff: buffer, + topic: topicName, + } + }(t.Name(), data) + + return nil } // validator registration func @@ -500,9 +529,9 @@ func (nm *NetMessenger) createRequestTopicAndBind(t *Topic, subscriberRequest *p //test whether we also should broadcast the message (others might have broadcast it just before us) has := false - nm.mutGossipCache.Lock() + nm.mutGossipCache.RLock() has = nm.gossipCache.Has(string(buff)) - nm.mutGossipCache.Unlock() + nm.mutGossipCache.RUnlock() if !has { //only if the current peer did not receive an equal object to cloner, @@ -517,7 +546,14 @@ func (nm *NetMessenger) createRequestTopicAndBind(t *Topic, subscriberRequest *p //wire-up a plain func for publishing on request channel t.Request = func(hash []byte) error { - return nm.ps.Publish(t.Name()+requestTopicSuffix, hash) + go func(topicName string, buffer []byte) { + nm.chSendMessages <- &message{ + buff: buffer, + topic: topicName, + } + }(t.Name()+requestTopicSuffix, hash) + + return nil } //wire-up the validator diff --git a/process/block/export_test.go b/process/block/export_test.go index 433998b5703..4f2c0a8f18e 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -1,22 +1,25 @@ package block import ( + "time" + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) func (bp *blockProcessor) GetTransactionFromPool(destShardID uint32, txHash []byte) *transaction.Transaction { return bp.getTransactionFromPool(destShardID, txHash) } -func (bp *blockProcessor) RequestTransactionFromNetwork(body *block.TxBlockBody) { - bp.requestBlockTransactions(body) +func (bp *blockProcessor) RequestTransactionFromNetwork(body *block.TxBlockBody) int { + return bp.requestBlockTransactions(body) } -func (bp *blockProcessor) WaitForTxHashes() { - bp.waitForTxHashes() +func (bp *blockProcessor) WaitForTxHashes(waitTime time.Duration) { + bp.waitForTxHashes(waitTime) } func (bp *blockProcessor) ReceivedTransaction(txHash []byte) { @@ -31,10 +34,22 @@ func (gbbi *GenericBlockBodyInterceptor) ProcessBodyBlock(bodyBlock p2p.Creator, return gbbi.processBodyBlock(bodyBlock, rawData) } -func (hdrRes *headerResolver) ResolveHdrRequest(rd process.RequestData) ([]byte, error) { +func (hdrRes *HeaderResolver) ResolveHdrRequest(rd process.RequestData) ([]byte, error) { return hdrRes.resolveHdrRequest(rd) } -func (gbbRes *genericBlockBodyResolver) ResolveBlockBodyRequest(rd process.RequestData) ([]byte, error) { +func (gbbRes *GenericBlockBodyResolver) ResolveBlockBodyRequest(rd process.RequestData) ([]byte, error) { return gbbRes.resolveBlockBodyRequest(rd) } + +func (bp *blockProcessor) ComputeHeaderHash(hdr *block.Header) ([]byte, error) { + return bp.computeHeaderHash(hdr) +} + +func (bp *blockProcessor) DisplayLogInfo(header *block.Header, txBlock *block.TxBlockBody, headerHash []byte) { + bp.displayLogInfo(header, txBlock, headerHash) +} + +func SortTxByNonce(txShardStore storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { + return sortTxByNonce(txShardStore) +} diff --git a/process/block/interceptedBlocks.go b/process/block/interceptedBlocks.go index f82c6e595d6..029a5353645 100644 --- a/process/block/interceptedBlocks.go +++ b/process/block/interceptedBlocks.go @@ -69,6 +69,11 @@ func (inHdr *InterceptedHeader) GetHeader() *block.Header { return inHdr.Header } +// GetUnderlyingObject returns the underlying object +func (inHdr *InterceptedHeader) GetUnderlyingObject() interface{} { + return inHdr.Header +} + //------- InterceptedPeerBlockBody // NewInterceptedPeerBlockBody creates a new instance of InterceptedPeerBlockBody struct @@ -103,6 +108,11 @@ func (inPeerBlkBdy *InterceptedPeerBlockBody) Shard() uint32 { return inPeerBlkBdy.ShardID } +// GetUnderlyingObject returns the underlying object +func (inPeerBlkBdy *InterceptedPeerBlockBody) GetUnderlyingObject() interface{} { + return inPeerBlkBdy.PeerBlockBody +} + //------- InterceptedStateBlockBody // NewInterceptedStateBlockBody creates a new instance of InterceptedStateBlockBody struct @@ -137,6 +147,11 @@ func (inStateBlkBdy *InterceptedStateBlockBody) Shard() uint32 { return inStateBlkBdy.ShardID } +// GetUnderlyingObject returns the underlying object +func (inStateBlkBdy *InterceptedStateBlockBody) GetUnderlyingObject() interface{} { + return inStateBlkBdy.StateBlockBody +} + //------- InterceptedTxBlockBody // NewInterceptedTxBlockBody creates a new instance of InterceptedTxBlockBody struct @@ -170,3 +185,8 @@ func (inTxBlkBdy *InterceptedTxBlockBody) ID() string { func (inTxBlkBdy *InterceptedTxBlockBody) Shard() uint32 { return inTxBlkBdy.ShardID } + +// GetUnderlyingObject returns the underlying object +func (inTxBlkBdy *InterceptedTxBlockBody) GetUnderlyingObject() interface{} { + return inTxBlkBdy.TxBlockBody +} diff --git a/process/block/interceptedBlocks_test.go b/process/block/interceptedBlocks_test.go index e4e7ddd497f..31e218e6085 100644 --- a/process/block/interceptedBlocks_test.go +++ b/process/block/interceptedBlocks_test.go @@ -3,108 +3,262 @@ package block_test import ( "testing" - block2 "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/process/block" "github.com/stretchr/testify/assert" ) //------- InterceptedHeader -func TestInterceptedHeader_AllGettersAndSettersShouldWork(t *testing.T) { +func TestInterceptedHeader_NewShouldNotCreateNilHeader(t *testing.T) { t.Parallel() hdr := block.NewInterceptedHeader() - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.BlockBodyHash = make([]byte, 0) - hdr.BlockBodyType = block2.TxBlock - hdr.Signature = make([]byte, 0) - hdr.Commitment = make([]byte, 0) - hdr.ShardId = 56 + assert.NotNil(t, hdr.Header) +} + +func TestInterceptedHeader_GetUnderlingObjectShouldReturnHeader(t *testing.T) { + t.Parallel() + + hdr := block.NewInterceptedHeader() + + assert.True(t, hdr.GetUnderlyingObject() == hdr.Header) +} + +func TestInterceptedHeader_GetHeaderShouldReturnHeader(t *testing.T) { + t.Parallel() + + hdr := block.NewInterceptedHeader() + + assert.True(t, hdr.GetHeader() == hdr.Header) +} + +func TestInterceptedHeader_GetterSetterHashID(t *testing.T) { + t.Parallel() - hash := []byte("aaaa") + hash := []byte("hash") + + hdr := block.NewInterceptedHeader() hdr.SetHash(hash) + assert.Equal(t, hash, hdr.Hash()) assert.Equal(t, string(hash), hdr.ID()) +} + +func TestInterceptedHeader_ShardShouldWork(t *testing.T) { + t.Parallel() + + shard := uint32(78) + + hdr := block.NewInterceptedHeader() + hdr.ShardId = shard + + assert.Equal(t, shard, hdr.Shard()) +} + +func TestInterceptedHeader_CreateShouldNotProduceNils(t *testing.T) { + t.Parallel() + + hdr := block.NewInterceptedHeader() + hdrCreated := hdr.Create() - newHdr := hdr.Create() - assert.NotNil(t, newHdr) - assert.NotNil(t, newHdr.(*block.InterceptedHeader).Header) + assert.NotNil(t, hdrCreated) + assert.NotNil(t, hdrCreated.(*block.InterceptedHeader).Header) +} + +func TestInterceptedHeader_CreateShouldNotProduceSameObject(t *testing.T) { + t.Parallel() + + hdr := block.NewInterceptedHeader() + hdrCreated := hdr.Create() - assert.Equal(t, uint32(56), hdr.Shard()) + assert.False(t, hdrCreated == hdr) + assert.False(t, hdrCreated.(*block.InterceptedHeader).Header == hdr.Header) } //------- InterceptedPeerBlockBody -func TestInterceptedPeerBlockBody_AllGettersAndSettersShouldWork(t *testing.T) { +func TestInterceptedPeerBlockBody_NewShouldNotCreateNilBlock(t *testing.T) { t.Parallel() peerBlockBody := block.NewInterceptedPeerBlockBody() - peerBlockBody.PeerBlockBody.ShardID = 45 - peerBlockBody.PeerBlockBody.Changes = make([]block2.PeerChange, 0) + assert.NotNil(t, peerBlockBody.PeerBlockBody) + assert.NotNil(t, peerBlockBody.StateBlockBody) +} - assert.Equal(t, uint32(45), peerBlockBody.Shard()) - assert.Equal(t, 0, len(peerBlockBody.Changes)) +func TestInterceptedPeerBlockBody_GetUnderlingObjectShouldReturnBlock(t *testing.T) { + t.Parallel() - hash := []byte("aaaa") + peerBlockBody := block.NewInterceptedPeerBlockBody() + + assert.True(t, peerBlockBody.GetUnderlyingObject() == peerBlockBody.PeerBlockBody) +} + +func TestInterceptedPeerBlockBody_GetterSetterHashID(t *testing.T) { + t.Parallel() + + hash := []byte("hash") + + peerBlockBody := block.NewInterceptedPeerBlockBody() peerBlockBody.SetHash(hash) + assert.Equal(t, hash, peerBlockBody.Hash()) assert.Equal(t, string(hash), peerBlockBody.ID()) +} + +func TestInterceptedPeerBlockBody_ShardShouldWork(t *testing.T) { + t.Parallel() - newPeerBB := peerBlockBody.Create() - assert.NotNil(t, newPeerBB) - assert.NotNil(t, newPeerBB.(*block.InterceptedPeerBlockBody).PeerBlockBody) + shard := uint32(78) - assert.Equal(t, uint32(45), peerBlockBody.Shard()) + peerBlockBody := block.NewInterceptedPeerBlockBody() + peerBlockBody.ShardID = shard + + assert.Equal(t, shard, peerBlockBody.Shard()) +} + +func TestInterceptedPeerBlockBody_CreateShouldNotProduceNils(t *testing.T) { + t.Parallel() + + peerBlockBody := block.NewInterceptedPeerBlockBody() + peerBlockCreated := peerBlockBody.Create() + + assert.NotNil(t, peerBlockCreated) + assert.NotNil(t, peerBlockCreated.(*block.InterceptedPeerBlockBody).PeerBlockBody) +} + +func TestInterceptedPeerBlockBody_CreateShouldNotProduceSameObject(t *testing.T) { + t.Parallel() + + peerBlockBody := block.NewInterceptedPeerBlockBody() + peerBlockCreated := peerBlockBody.Create() + + assert.False(t, peerBlockBody == peerBlockCreated) + assert.False(t, peerBlockCreated.(*block.InterceptedPeerBlockBody).PeerBlockBody == peerBlockBody.PeerBlockBody) } //------- InterceptedStateBlockBody -func TestInterceptedStateBlockBody_AllGettersAndSettersShouldWork(t *testing.T) { +func TestInterceptedStateBlockBody_NewShouldNotCreateNilBlock(t *testing.T) { t.Parallel() stateBlockBody := block.NewInterceptedStateBlockBody() - stateBlockBody.ShardID = 45 - stateBlockBody.RootHash = []byte("aaa") + assert.NotNil(t, stateBlockBody.StateBlockBody) +} + +func TestInterceptedStateBlockBody_GetUnderlingObjectShouldReturnBlock(t *testing.T) { + t.Parallel() - assert.Equal(t, uint32(45), stateBlockBody.Shard()) + stateBlockBody := block.NewInterceptedStateBlockBody() - hash := []byte("aaaa") + assert.True(t, stateBlockBody.GetUnderlyingObject() == stateBlockBody.StateBlockBody) +} + +func TestInterceptedStateBlockBody_GetterSetterHashID(t *testing.T) { + t.Parallel() + + hash := []byte("hash") + + stateBlockBody := block.NewInterceptedStateBlockBody() stateBlockBody.SetHash(hash) + assert.Equal(t, hash, stateBlockBody.Hash()) assert.Equal(t, string(hash), stateBlockBody.ID()) +} - newBB := stateBlockBody.Create() - assert.NotNil(t, newBB) - assert.NotNil(t, newBB.(*block.InterceptedStateBlockBody).StateBlockBody) +func TestInterceptedStateBlockBody_ShardShouldWork(t *testing.T) { + t.Parallel() + + shard := uint32(78) + + stateBlockBody := block.NewInterceptedStateBlockBody() + stateBlockBody.ShardID = shard - assert.Equal(t, uint32(45), stateBlockBody.Shard()) + assert.Equal(t, shard, stateBlockBody.Shard()) +} + +func TestInterceptedStateBlockBody_CreateShouldNotProduceNils(t *testing.T) { + t.Parallel() + + stateBlockBody := block.NewInterceptedStateBlockBody() + stateBlockCreated := stateBlockBody.Create() + + assert.NotNil(t, stateBlockCreated) + assert.NotNil(t, stateBlockCreated.(*block.InterceptedStateBlockBody).StateBlockBody) +} + +func TestInterceptedStateBlockBody_CreateShouldNotProduceSameObject(t *testing.T) { + t.Parallel() + + stateBlockBody := block.NewInterceptedStateBlockBody() + stateBlockCreated := stateBlockBody.Create() + + assert.False(t, stateBlockBody == stateBlockCreated) + assert.False(t, stateBlockCreated.(*block.InterceptedStateBlockBody).StateBlockBody == + stateBlockBody.StateBlockBody) } //------- InterceptedTxBlockBody -func TestInterceptedTxBlockBody_AllGettersAndSettersShouldWork(t *testing.T) { +func TestInterceptedTxBlockBody_NewShouldNotCreateNilBlock(t *testing.T) { t.Parallel() txBlockBody := block.NewInterceptedTxBlockBody() - txBlockBody.TxBlockBody.ShardID = 45 - txBlockBody.TxBlockBody.MiniBlocks = make([]block2.MiniBlock, 0) + assert.NotNil(t, txBlockBody.TxBlockBody) + assert.NotNil(t, txBlockBody.StateBlockBody) +} + +func TestInterceptedTxBlockBody_GetUnderlingObjectShouldReturnBlock(t *testing.T) { + t.Parallel() + + txBlockBody := block.NewInterceptedTxBlockBody() - assert.Equal(t, uint32(45), txBlockBody.Shard()) - assert.Equal(t, 0, len(txBlockBody.MiniBlocks)) + assert.True(t, txBlockBody.GetUnderlyingObject() == txBlockBody.TxBlockBody) +} - hash := []byte("aaaa") +func TestInterceptedTxBlockBody_GetterSetterHashID(t *testing.T) { + t.Parallel() + + hash := []byte("hash") + + txBlockBody := block.NewInterceptedTxBlockBody() txBlockBody.SetHash(hash) + assert.Equal(t, hash, txBlockBody.Hash()) assert.Equal(t, string(hash), txBlockBody.ID()) +} + +func TestInterceptedTxBlockBody_ShardShouldWork(t *testing.T) { + t.Parallel() - newTxBB := txBlockBody.Create() - assert.NotNil(t, newTxBB) - assert.NotNil(t, newTxBB.(*block.InterceptedTxBlockBody).TxBlockBody) + shard := uint32(78) + + txBlockBody := block.NewInterceptedTxBlockBody() + txBlockBody.ShardID = shard + + assert.Equal(t, shard, txBlockBody.Shard()) +} + +func TestInterceptedTxBlockBody_CreateShouldNotProduceNils(t *testing.T) { + t.Parallel() + + txBlockBody := block.NewInterceptedTxBlockBody() + txBlockCreated := txBlockBody.Create() + + assert.NotNil(t, txBlockCreated) + assert.NotNil(t, txBlockCreated.(*block.InterceptedTxBlockBody).TxBlockBody) +} + +func TestInterceptedTxBlockBody_CreateShouldNotProduceSameObject(t *testing.T) { + t.Parallel() + + txBlockBody := block.NewInterceptedTxBlockBody() + txBlockCreated := txBlockBody.Create() - assert.Equal(t, uint32(45), txBlockBody.Shard()) + assert.False(t, txBlockBody == txBlockCreated) + assert.False(t, txBlockCreated.(*block.InterceptedTxBlockBody).TxBlockBody == + txBlockBody.TxBlockBody) } diff --git a/process/block/interceptors.go b/process/block/interceptors.go index b9c78d02476..f75d475de66 100644 --- a/process/block/interceptors.go +++ b/process/block/interceptors.go @@ -13,6 +13,7 @@ import ( type HeaderInterceptor struct { process.Interceptor headers data.ShardedDataCacherNotifier + storer storage.Storer headersNonces data.Uint64Cacher hasher hashing.Hasher shardCoordinator sharding.ShardCoordinator @@ -23,6 +24,7 @@ type GenericBlockBodyInterceptor struct { process.Interceptor cache storage.Cacher hasher hashing.Hasher + storer storage.Storer shardCoordinator sharding.ShardCoordinator } @@ -34,6 +36,7 @@ func NewHeaderInterceptor( interceptor process.Interceptor, headers data.ShardedDataCacherNotifier, headersNonces data.Uint64Cacher, + storer storage.Storer, hasher hashing.Hasher, shardCoordinator sharding.ShardCoordinator, ) (*HeaderInterceptor, error) { @@ -50,6 +53,10 @@ func NewHeaderInterceptor( return nil, process.ErrNilHeadersNoncesDataPool } + if storer == nil { + return nil, process.ErrNilHeadersStorage + } + if hasher == nil { return nil, process.ErrNilHasher } @@ -62,6 +69,7 @@ func NewHeaderInterceptor( Interceptor: interceptor, headers: headers, headersNonces: headersNonces, + storer: storer, hasher: hasher, shardCoordinator: shardCoordinator, } @@ -86,8 +94,8 @@ func (hi *HeaderInterceptor) processHdr(hdr p2p.Creator, rawData []byte) error { return process.ErrBadInterceptorTopicImplementation } - hash := hi.hasher.Compute(string(rawData)) - hdrIntercepted.SetHash(hash) + hashWithSig := hi.hasher.Compute(string(rawData)) + hdrIntercepted.SetHash(hashWithSig) err := hdrIntercepted.IntegrityAndValidity(hi.shardCoordinator) if err != nil { @@ -99,9 +107,16 @@ func (hi *HeaderInterceptor) processHdr(hdr p2p.Creator, rawData []byte) error { return err } - hi.headers.AddData(hash, hdrIntercepted, hdrIntercepted.Shard()) + isHeaderInStorage, _ := hi.storer.Has(hashWithSig) + + if isHeaderInStorage { + log.Debug("intercepted block header already processed") + return nil + } + + hi.headers.AddData(hashWithSig, hdrIntercepted.GetHeader(), hdrIntercepted.Shard()) if hi.checkHeaderForCurrentShard(hdrIntercepted) { - _, _ = hi.headersNonces.HasOrAdd(hdrIntercepted.GetHeader().Nonce, hash) + _, _ = hi.headersNonces.HasOrAdd(hdrIntercepted.GetHeader().Nonce, hashWithSig) } return nil } @@ -118,6 +133,7 @@ func (hi *HeaderInterceptor) checkHeaderForCurrentShard(header process.HeaderInt func NewGenericBlockBodyInterceptor( interceptor process.Interceptor, cache storage.Cacher, + storer storage.Storer, hasher hashing.Hasher, shardCoordinator sharding.ShardCoordinator, ) (*GenericBlockBodyInterceptor, error) { @@ -130,6 +146,10 @@ func NewGenericBlockBodyInterceptor( return nil, process.ErrNilCacher } + if storer == nil { + return nil, process.ErrNilBlockBodyStorage + } + if hasher == nil { return nil, process.ErrNilHasher } @@ -141,6 +161,7 @@ func NewGenericBlockBodyInterceptor( bbIntercept := &GenericBlockBodyInterceptor{ Interceptor: interceptor, cache: cache, + storer: storer, hasher: hasher, shardCoordinator: shardCoordinator, } @@ -173,6 +194,13 @@ func (gbbi *GenericBlockBodyInterceptor) processBodyBlock(bodyBlock p2p.Creator, return err } - _ = gbbi.cache.Put(hash, blockBodyIntercepted) + isBlockInStorage, _ := gbbi.storer.Has(hash) + + if isBlockInStorage { + log.Debug("intercepted block body already processed") + return nil + } + + _ = gbbi.cache.Put(hash, blockBodyIntercepted.GetUnderlyingObject()) return nil } diff --git a/process/block/interceptors_test.go b/process/block/interceptors_test.go index 7543ebd0c2d..9243a9ea87a 100644 --- a/process/block/interceptors_test.go +++ b/process/block/interceptors_test.go @@ -20,11 +20,13 @@ func TestNewHeaderInterceptor_NilMessengerShouldErr(t *testing.T) { headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, err := NewHeaderInterceptor( nil, headers, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -37,11 +39,13 @@ func TestNewHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { interceptor := &mock.InterceptorStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, err := NewHeaderInterceptor( interceptor, nil, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -54,11 +58,13 @@ func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { interceptor := &mock.InterceptorStub{} headers := &mock.ShardedDataStub{} + storer := &mock.StorerStub{} hi, err := NewHeaderInterceptor( interceptor, headers, nil, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -66,17 +72,38 @@ func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { assert.Nil(t, hi) } +func TestNewHeaderInterceptor_NilStorerShouldErr(t *testing.T) { + t.Parallel() + + interceptor := &mock.InterceptorStub{} + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + + hi, err := NewHeaderInterceptor( + interceptor, + headers, + headersNonces, + nil, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilHeadersStorage, err) + assert.Nil(t, hi) +} + func TestNewHeaderInterceptor_NilHasherShouldErr(t *testing.T) { t.Parallel() interceptor := &mock.InterceptorStub{} headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, err := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, nil, mock.NewOneShardCoordinatorMock()) @@ -90,11 +117,13 @@ func TestNewHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { interceptor := &mock.InterceptorStub{} headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, err := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, mock.HasherMock{}, nil) @@ -111,11 +140,13 @@ func TestNewHeaderInterceptor_OkValsShouldWork(t *testing.T) { headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, err := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -134,11 +165,13 @@ func TestHeaderInterceptor_ProcessHdrNilHdrShouldErr(t *testing.T) { headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, _ := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -154,11 +187,13 @@ func TestHeaderInterceptor_ProcessHdrNilDataToProcessShouldErr(t *testing.T) { headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, _ := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -174,11 +209,13 @@ func TestHeaderInterceptor_ProcessHdrWrongTypeOfCreatorShouldErr(t *testing.T) { headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, _ := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -195,11 +232,13 @@ func TestHeaderInterceptor_ProcessHdrSanityCheckFailedShouldErr(t *testing.T) { headers := &mock.ShardedDataStub{} headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} hi, _ := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -234,10 +273,16 @@ func TestHeaderInterceptor_ProcessOkValsShouldWork(t *testing.T) { return } + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return false, nil + } + hi, _ := NewHeaderInterceptor( interceptor, headers, headersNonces, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -256,6 +301,62 @@ func TestHeaderInterceptor_ProcessOkValsShouldWork(t *testing.T) { assert.Equal(t, 2, wasCalled) } +func TestHeaderInterceptor_ProcessIsInStorageShouldNotAdd(t *testing.T) { + t.Parallel() + + interceptor := &mock.InterceptorStub{} + interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { + } + + wasCalled := 0 + + testedNonce := uint64(67) + + headers := &mock.ShardedDataStub{} + headers.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { + aaaHash := mock.HasherMock{}.Compute("aaa") + if bytes.Equal(aaaHash, key) { + wasCalled++ + } + } + + headersNonces := &mock.Uint64CacherStub{} + headersNonces.HasOrAddCalled = func(u uint64, i []byte) (b bool, b2 bool) { + if u == testedNonce { + wasCalled++ + } + + return + } + + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return true, nil + } + + hi, _ := NewHeaderInterceptor( + interceptor, + headers, + headersNonces, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + hdr := NewInterceptedHeader() + hdr.Nonce = testedNonce + hdr.ShardId = 0 + hdr.PrevHash = make([]byte, 0) + hdr.PubKeysBitmap = make([]byte, 0) + hdr.BlockBodyHash = make([]byte, 0) + hdr.BlockBodyType = block2.TxBlock + hdr.Signature = make([]byte, 0) + hdr.Commitment = make([]byte, 0) + hdr.SetHash([]byte("aaa")) + + assert.Nil(t, hi.ProcessHdr(hdr, []byte("aaa"))) + assert.Equal(t, 0, wasCalled) +} + //------- BlockBodyInterceptor //NewBlockBodyInterceptor @@ -264,10 +365,12 @@ func TestNewBlockBodyInterceptor_NilMessengerShouldErr(t *testing.T) { t.Parallel() cache := &mock.CacherStub{} + storer := &mock.StorerStub{} gbbi, err := NewGenericBlockBodyInterceptor( nil, cache, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -279,10 +382,12 @@ func TestNewBlockBodyInterceptor_NilPoolShouldErr(t *testing.T) { t.Parallel() interceptor := &mock.InterceptorStub{} + storer := &mock.StorerStub{} gbbi, err := NewGenericBlockBodyInterceptor( interceptor, nil, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -290,15 +395,34 @@ func TestNewBlockBodyInterceptor_NilPoolShouldErr(t *testing.T) { assert.Nil(t, gbbi) } +func TestNewBlockBodyInterceptor_NilStorerShouldErr(t *testing.T) { + t.Parallel() + + interceptor := &mock.InterceptorStub{} + cache := &mock.CacherStub{} + + gbbi, err := NewGenericBlockBodyInterceptor( + interceptor, + cache, + nil, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilBlockBodyStorage, err) + assert.Nil(t, gbbi) +} + func TestNewBlockBodyInterceptor_NilHasherShouldErr(t *testing.T) { t.Parallel() cache := &mock.CacherStub{} interceptor := &mock.InterceptorStub{} + storer := &mock.StorerStub{} gbbi, err := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, nil, mock.NewOneShardCoordinatorMock()) @@ -311,10 +435,12 @@ func TestNewBlockBodyInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { cache := &mock.CacherStub{} interceptor := &mock.InterceptorStub{} + storer := &mock.StorerStub{} gbbi, err := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, mock.HasherMock{}, nil) @@ -329,10 +455,12 @@ func TestNewBlockBodyInterceptor_OkValsShouldWork(t *testing.T) { interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + storer := &mock.StorerStub{} gbbi, err := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -349,10 +477,12 @@ func TestBlockBodyInterceptor_ProcessNilHdrShouldErr(t *testing.T) { interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + storer := &mock.StorerStub{} gbbi, _ := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -366,10 +496,12 @@ func TestBlockBodyInterceptor_ProcessNilDataToProcessShouldErr(t *testing.T) { interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + storer := &mock.StorerStub{} gbbi, _ := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -384,10 +516,12 @@ func TestBlockBodyInterceptor_ProcessHdrWrongTypeOfNewerShouldErr(t *testing.T) interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + storer := &mock.StorerStub{} gbbi, _ := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -402,10 +536,12 @@ func TestBlockBodyInterceptor_ProcessHdrSanityCheckFailedShouldErr(t *testing.T) interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + storer := &mock.StorerStub{} gbbi, _ := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -426,6 +562,10 @@ func TestBlockBodyInterceptor_ProcessOkValsShouldRetTrue(t *testing.T) { return } + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return false, nil + } interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { @@ -434,6 +574,7 @@ func TestBlockBodyInterceptor_ProcessOkValsShouldRetTrue(t *testing.T) { gbbi, _ := NewGenericBlockBodyInterceptor( interceptor, cache, + storer, mock.HasherMock{}, mock.NewOneShardCoordinatorMock()) @@ -449,3 +590,45 @@ func TestBlockBodyInterceptor_ProcessOkValsShouldRetTrue(t *testing.T) { assert.Nil(t, gbbi.ProcessBodyBlock(txBody, []byte("aaa"))) assert.Equal(t, 1, wasCalled) } + +func TestBlockBodyInterceptor_ProcessIsInStorageShouldNotAdd(t *testing.T) { + t.Parallel() + + wasCalled := 0 + + cache := &mock.CacherStub{} + cache.PutCalled = func(key []byte, value interface{}) (evicted bool) { + if bytes.Equal(mock.HasherMock{}.Compute("aaa"), key) { + wasCalled++ + } + + return + } + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return true, nil + } + + interceptor := &mock.InterceptorStub{} + interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { + } + + gbbi, _ := NewGenericBlockBodyInterceptor( + interceptor, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + miniBlock := block2.MiniBlock{} + miniBlock.TxHashes = append(miniBlock.TxHashes, []byte{65}) + + txBody := NewInterceptedTxBlockBody() + txBody.ShardID = 0 + txBody.MiniBlocks = make([]block2.MiniBlock, 0) + txBody.MiniBlocks = append(txBody.MiniBlocks, miniBlock) + txBody.RootHash = make([]byte, 0) + + assert.Nil(t, gbbi.ProcessBodyBlock(txBody, []byte("aaa"))) + assert.Equal(t, 0, wasCalled) +} diff --git a/process/block/process.go b/process/block/process.go index f2f6178cfe9..0b3539d2f6f 100644 --- a/process/block/process.go +++ b/process/block/process.go @@ -2,7 +2,12 @@ package block import ( "bytes" + "encoding/base64" + "encoding/hex" + "fmt" "math/big" + "sort" + "strconv" "sync" "time" @@ -11,21 +16,25 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/display" "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" "github.com/ElrondNetwork/elrond-go-sandbox/logger" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/pkg/errors" ) -// WaitTime defines the time in milliseconds until node waits the requested info from the network -const WaitTime = time.Duration(2000 * time.Millisecond) - var log = logger.NewDefaultLogger() +var txsCurrentBlockProcessed = 0 +var txsTotalProcessed = 0 + // blockProcessor implements BlockProcessor interface and actually it tries to execute block type blockProcessor struct { - txPool data.ShardedDataCacherNotifier + dataPool data.TransientDataHolder hasher hashing.Hasher marshalizer marshal.Marshalizer txProcessor process.TransactionProcessor @@ -35,46 +44,98 @@ type blockProcessor struct { mut sync.RWMutex accounts state.AccountsAdapter shardCoordinator sharding.ShardCoordinator + forkDetector process.ForkDetector } // NewBlockProcessor creates a new blockProcessor object func NewBlockProcessor( - txPool data.ShardedDataCacherNotifier, + dataPool data.TransientDataHolder, hasher hashing.Hasher, marshalizer marshal.Marshalizer, txProcessor process.TransactionProcessor, accounts state.AccountsAdapter, shardCoordinator sharding.ShardCoordinator, -) *blockProcessor { - //TODO: check nil values + forkDetector process.ForkDetector, + requestTransactionHandler func(destShardID uint32, txHash []byte), +) (*blockProcessor, error) { + + if dataPool == nil { + return nil, process.ErrNilDataPoolHolder + } + + if hasher == nil { + return nil, process.ErrNilHasher + } + + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + + if txProcessor == nil { + return nil, process.ErrNilTxProcessor + } + + if accounts == nil { + return nil, process.ErrNilAccountsAdapter + } + + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + if forkDetector == nil { + return nil, process.ErrNilForkDetector + } + + if requestTransactionHandler == nil { + return nil, process.ErrNilTransactionHandler + } bp := blockProcessor{ - txPool: txPool, + dataPool: dataPool, hasher: hasher, marshalizer: marshalizer, txProcessor: txProcessor, accounts: accounts, shardCoordinator: shardCoordinator, + forkDetector: forkDetector, } bp.ChRcvAllTxs = make(chan bool) + bp.OnRequestTransaction = requestTransactionHandler + + transactionPool := bp.dataPool.Transactions() - bp.txPool.RegisterHandler(bp.receivedTransaction) + if transactionPool == nil { + return nil, process.ErrNilTransactionPool + } + + transactionPool.RegisterHandler(bp.receivedTransaction) - return &bp + return &bp, nil } // ProcessAndCommit takes each transaction from the transactions block body received as parameter // and processes it, updating at the same time the state trie and the associated root hash // if transaction is not valid or not found it will return error. // If all ok it will commit the block and state. -func (bp *blockProcessor) ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { - err := bp.validateHeader(blockChain, header) +func (bp *blockProcessor) ProcessAndCommit( + blockChain *blockchain.BlockChain, + header *block.Header, + body *block.TxBlockBody, + haveTime func() time.Duration, +) error { + err := checkForNils(blockChain, header, body) + if err != nil { + return err + } + + err = bp.validateHeader(blockChain, header) if err != nil { return err } - err = bp.ProcessBlock(blockChain, header, body) + err = bp.processBlock(blockChain, header, body, haveTime) defer func() { if err != nil { @@ -86,7 +147,7 @@ func (bp *blockProcessor) ProcessAndCommit(blockChain *blockchain.BlockChain, he return err } - if !bp.VerifyStateRoot(bp.accounts.RootHash()) { + if !bp.VerifyStateRoot(body.RootHash) { err = process.ErrRootStateMissmatch return err } @@ -99,7 +160,23 @@ func (bp *blockProcessor) ProcessAndCommit(blockChain *blockchain.BlockChain, he return nil } -// RevertAccountState reverets the account state for cleanup failed process +func checkForNils(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { + if blockChain == nil { + return process.ErrNilBlockChain + } + + if header == nil { + return process.ErrNilBlockHeader + } + + if body == nil { + return process.ErrNilTxBlockBody + } + + return nil +} + +// RevertAccountState reverts the account state for cleanup failed process func (bp *blockProcessor) RevertAccountState() { err := bp.accounts.RevertToSnapshot(0) @@ -109,14 +186,39 @@ func (bp *blockProcessor) RevertAccountState() { } // ProcessBlock processes a block. It returns nil if all ok or the speciffic error -func (bp *blockProcessor) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { +func (bp *blockProcessor) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { + err := checkForNils(blockChain, header, body) + if err != nil { + return err + } + + if haveTime == nil { + return process.ErrNilHaveTimeHandler + } + + return bp.processBlock(blockChain, header, body, haveTime) +} + +func (bp *blockProcessor) processBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { err := bp.validateBlockBody(body) if err != nil { return err } - bp.requestBlockTransactions(body) - bp.waitForTxHashes() + requestedTxs := bp.requestBlockTransactions(body) + + if requestedTxs > 0 { + + log.Info(fmt.Sprintf("requested %d missing txs\n", requestedTxs)) + + err := bp.waitForTxHashes(haveTime()) + + log.Info(fmt.Sprintf("received %d missing txs\n", requestedTxs-len(bp.requestedTxHashes))) + + if err != nil { + return err + } + } if bp.accounts.JournalLen() != 0 { return process.ErrAccountStateDirty @@ -128,7 +230,7 @@ func (bp *blockProcessor) ProcessBlock(blockChain *blockchain.BlockChain, header } }() - err = bp.processBlockTransactions(body, int32(header.Round)) + err = bp.processBlockTransactions(body, int32(header.Round), haveTime) if err != nil { return err @@ -143,15 +245,21 @@ func (bp *blockProcessor) RemoveBlockTxsFromPool(body *block.TxBlockBody) error return process.ErrNilTxBlockBody } + transactionPool := bp.dataPool.Transactions() + + if transactionPool == nil { + return process.ErrNilTransactionPool + } + for i := 0; i < len(body.MiniBlocks); i++ { - bp.txPool.RemoveSetOfDataFromPool(body.MiniBlocks[i].TxHashes, + transactionPool.RemoveSetOfDataFromPool(body.MiniBlocks[i].TxHashes, body.MiniBlocks[i].ShardID) } return nil } -// VerifyStateRoot verifies the state root hash given as parameter agains the +// VerifyStateRoot verifies the state root hash given as parameter against the // Merkle trie root hash stored for accounts and returns if equal or not func (bp *blockProcessor) VerifyStateRoot(rootHash []byte) bool { return bytes.Equal(bp.accounts.RootHash(), rootHash) @@ -179,17 +287,29 @@ func (bp *blockProcessor) CreateTxBlockBody(shardId uint32, maxTxInBlock int, ro return blk, nil } -// CreateGenesisBlockBody creates the genesis block body from map of account balances -func (bp *blockProcessor) CreateGenesisBlockBody(balances map[string]big.Int, shardId uint32) *block.StateBlockBody { - if bp.txProcessor == nil { - panic("transaction Processor is nil") +// CreateEmptyBlockBody creates a new block body without any tx hash +func (bp *blockProcessor) CreateEmptyBlockBody(shardId uint32, round int32) *block.TxBlockBody { + miniBlocks := make([]block.MiniBlock, 0) + + rootHash := bp.accounts.RootHash() + + blk := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: rootHash, + ShardID: shardId, + }, + MiniBlocks: miniBlocks, } + return blk +} + +// CreateGenesisBlockBody creates the genesis block body from map of account balances +func (bp *blockProcessor) CreateGenesisBlockBody(balances map[string]*big.Int, shardId uint32) (*block.StateBlockBody, error) { rootHash, err := bp.txProcessor.SetBalancesToTrie(balances) if err != nil { - // cannot create Genesis block - panic(err) + return nil, errors.New("can not create genesis block body " + err.Error()) } stateBlockBody := &block.StateBlockBody{ @@ -197,7 +317,7 @@ func (bp *blockProcessor) CreateGenesisBlockBody(balances map[string]big.Int, sh ShardID: shardId, } - return stateBlockBody + return stateBlockBody, nil } // GetRootHash returns the accounts merkle tree root hash @@ -224,7 +344,15 @@ func (bp *blockProcessor) validateHeader(blockChain *blockchain.BlockChain, head return process.ErrWrongNonceInBlock } - if !bytes.Equal(header.PrevHash, blockChain.CurrentBlockHeader.BlockBodyHash) { + if !bytes.Equal(header.PrevHash, blockChain.CurrentBlockHeaderHash) { + + log.Info(fmt.Sprintf( + "header.Nonce = %d has header.PrevHash = %s and blockChain.CurrentBlockHeader.Nonce = %d has blockChain.CurrentBlockHeaderHash = %s\n", + header.Nonce, + toB64(header.PrevHash), + blockChain.CurrentBlockHeader.Nonce, + toB64(blockChain.CurrentBlockHeaderHash))) + return process.ErrInvalidBlockHash } } @@ -257,7 +385,7 @@ func (bp *blockProcessor) isFirstBlockInEpoch(header *block.Header) bool { return header.Round == 0 } -func (bp *blockProcessor) processBlockTransactions(body *block.TxBlockBody, round int32) error { +func (bp *blockProcessor) processBlockTransactions(body *block.TxBlockBody, round int32, haveTime func() time.Duration) error { txbWrapper := TxBlockBodyWrapper{ TxBlockBody: body, } @@ -267,14 +395,31 @@ func (bp *blockProcessor) processBlockTransactions(body *block.TxBlockBody, roun return err } + txPool := bp.dataPool.Transactions() + for i := 0; i < len(body.MiniBlocks); i++ { miniBlock := body.MiniBlocks[i] shardId := miniBlock.ShardID + //TODO: Remove this display + bp.displayTxsInfo(&miniBlock, shardId) + for j := 0; j < len(miniBlock.TxHashes); j++ { + if haveTime() < 0 { + return process.ErrTimeIsOut + } + txHash := miniBlock.TxHashes[j] tx := bp.getTransactionFromPool(shardId, txHash) - err := bp.txProcessor.ProcessTransaction(tx, round) + + err := bp.processAndRemoveBadTransaction( + txHash, + tx, + txPool, + round, + miniBlock.ShardID, + ) + if err != nil { return err } @@ -284,7 +429,11 @@ func (bp *blockProcessor) processBlockTransactions(body *block.TxBlockBody, roun } // CommitBlock commits the block in the blockchain if everything was checked successfully -func (bp *blockProcessor) CommitBlock(blockChain *blockchain.BlockChain, header *block.Header, block *block.TxBlockBody) error { +func (bp *blockProcessor) CommitBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { + err := checkForNils(blockChain, header, body) + if err != nil { + return err + } buff, err := bp.marshalizer.Marshal(header) if err != nil { @@ -293,25 +442,29 @@ func (bp *blockProcessor) CommitBlock(blockChain *blockchain.BlockChain, header headerHash := bp.hasher.Compute(string(buff)) err = blockChain.Put(blockchain.BlockHeaderUnit, headerHash, buff) - if err != nil { return process.ErrPersistWithoutSuccess } - buff, err = bp.marshalizer.Marshal(block) - + buff, err = bp.marshalizer.Marshal(body) if err != nil { return process.ErrMarshalWithoutSuccess } err = blockChain.Put(blockchain.TxBlockBodyUnit, header.BlockBodyHash, buff) - if err != nil { return process.ErrPersistWithoutSuccess } - for i := 0; i < len(block.MiniBlocks); i++ { - miniBlock := block.MiniBlocks[i] + headerNoncePool := bp.dataPool.HeadersNonces() + if headerNoncePool == nil { + return process.ErrNilDataPoolHolder + } + + _ = headerNoncePool.Put(header.Nonce, headerHash) + + for i := 0; i < len(body.MiniBlocks); i++ { + miniBlock := body.MiniBlocks[i] for j := 0; j < len(miniBlock.TxHashes); j++ { txHash := miniBlock.TxHashes[j] tx := bp.getTransactionFromPool(miniBlock.ShardID, txHash) @@ -320,83 +473,101 @@ func (bp *blockProcessor) CommitBlock(blockChain *blockchain.BlockChain, header } buff, err = bp.marshalizer.Marshal(tx) - if err != nil { return process.ErrMarshalWithoutSuccess } err = blockChain.Put(blockchain.TransactionUnit, txHash, buff) - if err != nil { return process.ErrPersistWithoutSuccess } } } - err = bp.RemoveBlockTxsFromPool(block) + err = bp.RemoveBlockTxsFromPool(body) if err != nil { log.Error(err.Error()) } _, err = bp.accounts.Commit() - - if err == nil { - blockChain.CurrentTxBlockBody = block - blockChain.CurrentBlockHeader = header - blockChain.LocalHeight = int64(header.Nonce) + if err != nil { + return err } + blockChain.CurrentTxBlockBody = body + blockChain.CurrentBlockHeader = header + blockChain.CurrentBlockHeaderHash = headerHash + err = bp.forkDetector.AddHeader(header, headerHash, false) + + go bp.displayBlockchain(blockChain) + return err } // getTransactionFromPool gets the transaction from a given shard id and a given transaction hash func (bp *blockProcessor) getTransactionFromPool(destShardID uint32, txHash []byte) *transaction.Transaction { - txStore := bp.txPool.ShardDataStore(destShardID) + txPool := bp.dataPool.Transactions() + if txPool == nil { + log.Error(process.ErrNilTransactionPool.Error()) + return nil + } + txStore := txPool.ShardDataStore(destShardID) if txStore == nil { + log.Error(process.ErrNilTxStorage.Error()) return nil } val, ok := txStore.Get(txHash) - if !ok { return nil } - return val.(*transaction.Transaction) + v := val.(*transaction.Transaction) + + return v } // receivedTransaction is a call back function which is called when a new transaction // is added in the transaction pool func (bp *blockProcessor) receivedTransaction(txHash []byte) { bp.mut.Lock() - if bp.requestedTxHashes[string(txHash)] { - delete(bp.requestedTxHashes, string(txHash)) - } + if len(bp.requestedTxHashes) > 0 { + if bp.requestedTxHashes[string(txHash)] { + delete(bp.requestedTxHashes, string(txHash)) + } + lenReqTxHashes := len(bp.requestedTxHashes) + bp.mut.Unlock() - if len(bp.requestedTxHashes) == 0 { - bp.ChRcvAllTxs <- true + if lenReqTxHashes == 0 { + bp.ChRcvAllTxs <- true + } + return } bp.mut.Unlock() } -func (bp *blockProcessor) requestBlockTransactions(body *block.TxBlockBody) { +func (bp *blockProcessor) requestBlockTransactions(body *block.TxBlockBody) int { bp.mut.Lock() + requestedTxs := 0 missingTxsForShards := bp.computeMissingTxsForShards(body) bp.requestedTxHashes = make(map[string]bool) if bp.OnRequestTransaction != nil { for shardId, txHashes := range missingTxsForShards { for _, txHash := range txHashes { + requestedTxs++ bp.requestedTxHashes[string(txHash)] = true bp.OnRequestTransaction(shardId, txHash) } } } bp.mut.Unlock() + return requestedTxs } func (bp *blockProcessor) computeMissingTxsForShards(body *block.TxBlockBody) map[uint32][][]byte { missingTxsForShard := make(map[uint32][][]byte) + for i := 0; i < len(body.MiniBlocks); i++ { miniBlock := body.MiniBlocks[i] shardId := miniBlock.ShardID @@ -410,12 +581,35 @@ func (bp *blockProcessor) computeMissingTxsForShards(body *block.TxBlockBody) ma currentShardMissingTransactions = append(currentShardMissingTransactions, txHash) } } - missingTxsForShard[shardId] = currentShardMissingTransactions + + if len(currentShardMissingTransactions) > 0 { + missingTxsForShard[shardId] = currentShardMissingTransactions + } } return missingTxsForShard } +func (bp *blockProcessor) processAndRemoveBadTransaction( + transactionHash []byte, + transaction *transaction.Transaction, + txPool data.ShardedDataCacherNotifier, + round int32, + shardId uint32, +) error { + if txPool == nil { + return process.ErrNilTransactionPool + } + + err := bp.txProcessor.ProcessTransaction(transaction, round) + + if err == process.ErrLowerNonceInTransaction { + txPool.RemoveData(transactionHash, shardId) + } + + return err +} + func (bp *blockProcessor) createMiniBlocks(noShards uint32, maxTxInBlock int, round int32, haveTime func() bool) ([]block.MiniBlock, error) { miniBlocks := make([]block.MiniBlock, 0) @@ -424,13 +618,32 @@ func (bp *blockProcessor) createMiniBlocks(noShards uint32, maxTxInBlock int, ro } if !haveTime() { + log.Info(fmt.Sprintf("time is up after entered in createMiniBlocks method\n")) return miniBlocks, nil } + txPool := bp.dataPool.Transactions() + + if txPool == nil { + return nil, process.ErrNilTransactionPool + } + for i, txs := 0, 0; i < int(noShards); i++ { - txStore := bp.txPool.ShardDataStore(uint32(i)) + txStore := txPool.ShardDataStore(uint32(i)) - if txStore == nil { + timeBefore := time.Now() + orderedTxes, orderedTxHashes, err := getTxs(txStore) + timeAfter := time.Now() + + if !haveTime() { + log.Info(fmt.Sprintf("time is up after ordered %d txs in %v sec\n", len(orderedTxes), timeAfter.Sub(timeBefore).Seconds())) + return miniBlocks, nil + } + + log.Info(fmt.Sprintf("time elapsed to ordered %d txs: %v sec\n", len(orderedTxes), timeAfter.Sub(timeBefore).Seconds())) + + if err != nil { + log.Debug(fmt.Sprintf("when trying to order txs: %s", err.Error())) continue } @@ -438,10 +651,14 @@ func (bp *blockProcessor) createMiniBlocks(noShards uint32, maxTxInBlock int, ro miniBlock.ShardID = uint32(i) miniBlock.TxHashes = make([][]byte, 0) - for _, txHash := range txStore.Keys() { - snapshot := bp.accounts.JournalLen() + log.Info(fmt.Sprintf("creating mini blocks has been started: have %d txs in pool for shard id %d\n", len(orderedTxes), miniBlock.ShardID)) - tx := bp.getTransactionFromPool(miniBlock.ShardID, txHash) + for index, tx := range orderedTxes { + if !haveTime() { + break + } + + snapshot := bp.accounts.JournalLen() if tx == nil { log.Error("did not find transaction in pool") @@ -449,7 +666,13 @@ func (bp *blockProcessor) createMiniBlocks(noShards uint32, maxTxInBlock int, ro } // execute transaction to change the trie root hash - err := bp.txProcessor.ProcessTransaction(tx, round) + err := bp.processAndRemoveBadTransaction( + orderedTxHashes[index], + orderedTxes[index], + txPool, + round, + miniBlock.ShardID, + ) if err != nil { err = bp.accounts.RevertToSnapshot(snapshot) @@ -457,31 +680,351 @@ func (bp *blockProcessor) createMiniBlocks(noShards uint32, maxTxInBlock int, ro continue } - miniBlock.TxHashes = append(miniBlock.TxHashes, txHash) + miniBlock.TxHashes = append(miniBlock.TxHashes, orderedTxHashes[index]) txs++ if txs >= maxTxInBlock { // max transactions count in one block was reached - miniBlocks = append(miniBlocks, miniBlock) + log.Info(fmt.Sprintf("max txs accepted in one block is reached: added %d txs from %d txs\n", len(miniBlock.TxHashes), len(orderedTxes))) + + if len(miniBlock.TxHashes) > 0 { + miniBlocks = append(miniBlocks, miniBlock) + } + + log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) + return miniBlocks, nil } } if !haveTime() { - miniBlocks = append(miniBlocks, miniBlock) + log.Info(fmt.Sprintf("time is up: added %d txs from %d txs\n", len(miniBlock.TxHashes), len(orderedTxes))) + + if len(miniBlock.TxHashes) > 0 { + miniBlocks = append(miniBlocks, miniBlock) + } + + log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) + return miniBlocks, nil } - miniBlocks = append(miniBlocks, miniBlock) + if len(miniBlock.TxHashes) > 0 { + miniBlocks = append(miniBlocks, miniBlock) + } } + log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) + return miniBlocks, nil } -func (bp *blockProcessor) waitForTxHashes() { +func (bp *blockProcessor) waitForTxHashes(waitTime time.Duration) error { select { case <-bp.ChRcvAllTxs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } +} + +func (bp *blockProcessor) displayBlockchain(blkc *blockchain.BlockChain) { + if blkc == nil { + return + } + + blockHeader := blkc.CurrentBlockHeader + txBlockBody := blkc.CurrentTxBlockBody + + if blockHeader == nil || txBlockBody == nil { return - case <-time.After(WaitTime): + } + + headerHash, err := bp.computeHeaderHash(blockHeader) + + if err != nil { + log.Error(err.Error()) return } + + bp.displayLogInfo(blockHeader, txBlockBody, headerHash) +} + +func (bp *blockProcessor) computeHeaderHash(hdr *block.Header) ([]byte, error) { + headerMarsh, err := bp.marshalizer.Marshal(hdr) + if err != nil { + return nil, err + } + + headerHash := bp.hasher.Compute(string(headerMarsh)) + + return headerHash, nil +} + +func (bp *blockProcessor) displayLogInfo( + header *block.Header, + txBlock *block.TxBlockBody, + headerHash []byte, +) { + dispHeader, dispLines := createDisplayableHeaderAndBlockBody(header, txBlock) + + tblString, err := display.CreateTableString(dispHeader, dispLines) + if err != nil { + log.Error(err.Error()) + } + + tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\nTotal txs "+ + "processed until now: %d. Total txs processed for this block: %d. Total txs remained in pool: %d\n", + toB64(headerHash), + txsTotalProcessed, + txsCurrentBlockProcessed, + bp.getTxsFromPool(txBlock.ShardID)) + + log.Info(tblString) +} + +func createDisplayableHeaderAndBlockBody( + header *block.Header, + txBlockBody *block.TxBlockBody, +) ([]string, []*display.LineData) { + + tableHeader := []string{"Part", "Parameter", "Value"} + + lines := displayHeader(header) + + if header.BlockBodyType == block.TxBlock { + lines = displayTxBlockBody(lines, txBlockBody, header.BlockBodyHash) + + return tableHeader, lines + } + + //TODO: implement the other block bodies + + lines = append(lines, display.NewLineData(false, []string{"Unknown", "", ""})) + return tableHeader, lines +} + +func displayHeader(header *block.Header) []*display.LineData { + lines := make([]*display.LineData, 0) + + //TODO really remove this mock prints + aggrCommits := sha256.Sha256{}.Compute(string(sha256.Sha256{}.Compute(string(header.Commitment) + strconv.Itoa(int(header.Round))))) + aggrSigs := sha256.Sha256{}.Compute(string(sha256.Sha256{}.Compute(string(aggrCommits)))) + + lines = append(lines, display.NewLineData(false, []string{ + "Header", + "Nonce", + fmt.Sprintf("%d", header.Nonce)})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Shard", + fmt.Sprintf("%d", header.ShardId)})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Epoch", + fmt.Sprintf("%d", header.Epoch)})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Round", + fmt.Sprintf("%d", header.Round)})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Timestamp", + fmt.Sprintf("%d", header.TimeStamp)})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Prev hash", + toB64(header.PrevHash)})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Body type", + header.BlockBodyType.String()})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Body hash", + toB64(header.BlockBodyHash)})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Pub keys bitmap", + toHex(header.PubKeysBitmap)})) + + //TODO uncomment as this + //lines = append(lines, display.NewLineData(false, []string{ + // "", + // "Commitment", + // toB64(header.Commitment)})) + //lines = append(lines, display.NewLineData(true, []string{ + // "", + // "Signature", + // toB64(header.Signature)})) + + //TODO remove this + lines = append(lines, display.NewLineData(false, []string{ + "", + "Commitment", + toB64(aggrCommits)})) + lines = append(lines, display.NewLineData(true, []string{ + "", + "Signature", + toB64(aggrSigs)})) + + return lines +} + +func displayTxBlockBody(lines []*display.LineData, txBlockBody *block.TxBlockBody, blockBodyHash []byte) []*display.LineData { + lines = append(lines, display.NewLineData(false, []string{"TxBody", "Block blockBodyHash", toB64(blockBodyHash)})) + lines = append(lines, display.NewLineData(true, []string{"", "Root blockBodyHash", toB64(txBlockBody.RootHash)})) + + txsCurrentBlockProcessed = 0 + + for i := 0; i < len(txBlockBody.MiniBlocks); i++ { + miniBlock := txBlockBody.MiniBlocks[i] + + part := fmt.Sprintf("TxBody_%d", miniBlock.ShardID) + + if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { + lines = append(lines, display.NewLineData(false, []string{ + part, "", " or "})) + } + + txsCurrentBlockProcessed += len(miniBlock.TxHashes) + txsTotalProcessed += len(miniBlock.TxHashes) + + for j := 0; j < len(miniBlock.TxHashes); j++ { + if j == 0 || j >= len(miniBlock.TxHashes)-1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("Tx blockBodyHash %d", j+1), + toB64(miniBlock.TxHashes[j])})) + + part = "" + } else if j == 1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("..."), + fmt.Sprintf("...")})) + + part = "" + } + } + + lines[len(lines)-1].HorizontalRuleAfter = true + } + + return lines +} + +func toHex(buff []byte) string { + if buff == nil { + return "" + } + return "0x" + hex.EncodeToString(buff) +} + +func toB64(buff []byte) string { + if buff == nil { + return "" + } + return base64.StdEncoding.EncodeToString(buff) +} + +func sortTxByNonce(txShardStore storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { + if txShardStore == nil { + return nil, nil, process.ErrNilCacher + } + + transactions := make([]*transaction.Transaction, 0) + txHashes := make([][]byte, 0) + + mTxHashes := make(map[uint64][][]byte) + mTransactions := make(map[uint64][]*transaction.Transaction) + + nonces := make([]uint64, 0) + + for _, key := range txShardStore.Keys() { + val, _ := txShardStore.Get(key) + if val == nil { + continue + } + + tx, ok := val.(*transaction.Transaction) + if !ok { + continue + } + + if mTxHashes[tx.Nonce] == nil { + nonces = append(nonces, tx.Nonce) + mTxHashes[tx.Nonce] = make([][]byte, 0) + mTransactions[tx.Nonce] = make([]*transaction.Transaction, 0) + } + + mTxHashes[tx.Nonce] = append(mTxHashes[tx.Nonce], key) + mTransactions[tx.Nonce] = append(mTransactions[tx.Nonce], tx) + } + + sort.Slice(nonces, func(i, j int) bool { + return nonces[i] < nonces[j] + }) + + for _, nonce := range nonces { + keys := mTxHashes[nonce] + + for idx, key := range keys { + txHashes = append(txHashes, key) + transactions = append(transactions, mTransactions[nonce][idx]) + } + } + + return transactions, txHashes, nil +} + +func (bp *blockProcessor) displayTxsInfo(miniBlock *block.MiniBlock, shardId uint32) { + if miniBlock == nil || miniBlock.TxHashes == nil { + return + } + + txsInPool := bp.getTxsFromPool(shardId) + + log.Info(fmt.Sprintf("PROCESS BLOCK TRANSACTION STARTED: Have %d txs in pool and need to process %d txs from the received block for shard id %d\n", txsInPool, len(miniBlock.TxHashes), shardId)) +} + +func (bp *blockProcessor) getTxsFromPool(shardId uint32) int { + txPool := bp.dataPool.Transactions() + + if txPool == nil { + return 0 + } + + txStore := txPool.ShardDataStore(shardId) + + if txStore == nil { + return 0 + } + + return txStore.Len() +} + +func getTxs(txShardStore storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { + if txShardStore == nil { + return nil, nil, process.ErrNilCacher + } + + transactions := make([]*transaction.Transaction, 0) + txHashes := make([][]byte, 0) + + for _, key := range txShardStore.Keys() { + val, _ := txShardStore.Get(key) + if val == nil { + continue + } + + tx, ok := val.(*transaction.Transaction) + if !ok { + continue + } + + txHashes = append(txHashes, key) + transactions = append(transactions, tx) + } + + return transactions, txHashes, nil } diff --git a/process/block/process_test.go b/process/block/process_test.go index 602eca95b60..852500bbcf1 100644 --- a/process/block/process_test.go +++ b/process/block/process_test.go @@ -1,236 +1,429 @@ package block_test import ( + "bytes" + "fmt" "math/big" + "math/rand" + "reflect" "testing" + "time" + "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" + "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" - "github.com/ElrondNetwork/elrond-go-sandbox/data/shardedData" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/process" blproc "github.com/ElrondNetwork/elrond-go-sandbox/process/block" "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/ElrondNetwork/elrond-go-sandbox/storage/memorydb" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) -var testCacherConfig = storage.CacheConfig{ - Size: 1000, - Type: storage.LRUCache, +func haveTime() time.Duration { + return time.Duration(2000 * time.Millisecond) } -func createBlockchain() (*blockchain.BlockChain, error) { - cacher := storage.CacheConfig{Type: storage.LRUCache, Size: 100} - bloom := storage.BloomConfig{Size: 2048, HashFunc: []storage.HasherType{storage.Keccak, storage.Blake2b, storage.Fnv}} - persisterTxBlockBodyStorage := storage.DBConfig{Type: storage.LvlDB, FilePath: "TxBlockBodyStorage"} - persisterStateBlockBodyStorage := storage.DBConfig{Type: storage.LvlDB, FilePath: "StateBlockBodyStorage"} - persisterPeerBlockBodyStorage := storage.DBConfig{Type: storage.LvlDB, FilePath: "PeerBlockBodyStorage"} - persisterBlockHeaderStorage := storage.DBConfig{Type: storage.LvlDB, FilePath: "BlockHeaderStorage"} - persisterTxStorage := storage.DBConfig{Type: storage.LvlDB, FilePath: "TxStorage"} +func createTestBlockchain() *blockchain.BlockChain { + blockChain, _ := blockchain.NewBlockChain( + generateTestCache(), + generateTestUnit(), + generateTestUnit(), + generateTestUnit(), + generateTestUnit(), + generateTestUnit(), + ) - var headerUnit, peerBlockUnit, stateBlockUnit, txBlockUnit, txUnit *storage.Unit + return blockChain +} - badBlockCache, err := storage.NewCache( - cacher.Type, - cacher.Size) +func generateTestCache() storage.Cacher { + cache, _ := storage.NewCache(storage.LRUCache, 1000) + return cache +} - if err == nil { - txUnit, err = storage.NewStorageUnitFromConf( - cacher, - persisterTxStorage, - bloom) - } +func generateTestUnit() storage.Storer { + memDB, _ := memorydb.New() - if err == nil { - txBlockUnit, err = storage.NewStorageUnitFromConf( - cacher, - persisterTxBlockBodyStorage, - bloom) - } + storer, _ := storage.NewStorageUnit( + generateTestCache(), + memDB, + ) - if err == nil { - stateBlockUnit, err = storage.NewStorageUnitFromConf( - cacher, - persisterStateBlockBodyStorage, - bloom) - } + return storer +} - if err == nil { - peerBlockUnit, err = storage.NewStorageUnitFromConf( - cacher, - persisterPeerBlockBodyStorage, - bloom) +func initDataPool() *mock.TransientDataPoolStub { + tdp := &mock.TransientDataPoolStub{ + TransactionsCalled: func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(shardID uint32) (c storage.Cacher) { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, destShardID uint32) {}, + } + }, + HeadersNoncesCalled: func() data.Uint64Cacher { + return &mock.Uint64CacherStub{ + PutCalled: func(u uint64, i []byte) bool { + return true + }, + } + }, } - if err == nil { - headerUnit, err = storage.NewStorageUnitFromConf( - cacher, - persisterBlockHeaderStorage, - bloom) - } + return tdp +} - if err == nil { - blockChain, err := blockchain.NewBlockChain( - badBlockCache, - txUnit, - txBlockUnit, - stateBlockUnit, - peerBlockUnit, - headerUnit) +//------- NewBlockProcessor - return blockChain, err - } +func TestNewBlockProcessor_NilDataPoolShouldErr(t *testing.T) { + t.Parallel() - // cleanup - if err != nil { - if headerUnit != nil { - _ = headerUnit.DestroyUnit() - } - if peerBlockUnit != nil { - _ = peerBlockUnit.DestroyUnit() - } - if stateBlockUnit != nil { - _ = stateBlockUnit.DestroyUnit() - } - if txBlockUnit != nil { - _ = txBlockUnit.DestroyUnit() - } - if txUnit != nil { - _ = txUnit.DestroyUnit() - } - } - return nil, err + be, err := blproc.NewBlockProcessor( + nil, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + assert.Equal(t, process.ErrNilDataPoolHolder, err) + assert.Nil(t, be) } -func TestNewBlockProcessor(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) +func TestNewBlockProcessor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() - be := blproc.NewBlockProcessor( - tp, - &mock.HasherMock{}, + be, err := blproc.NewBlockProcessor( + tdp, + nil, &mock.MarshalizerMock{}, &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, be) +} + +func TestNewBlockProcessor_NilMarshalizerShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + nil, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, be) +} + +func TestNewBlockProcessor_NilTxProcessorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, nil, - mock.NewOneShardCoordinatorMock()) + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) - assert.NotNil(t, be) + assert.Equal(t, process.ErrNilTxProcessor, err) + assert.Nil(t, be) } -func TestBlockProc_GetTransactionFromPool(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) +func TestNewBlockProcessor_NilAccountsAdapterShouldErr(t *testing.T) { + t.Parallel() - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, + tdp := initDataPool() + + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.TxProcessorMock{}, nil, - mock.NewOneShardCoordinatorMock()) + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) - txHash := []byte("tx1_hash") + assert.Equal(t, process.ErrNilAccountsAdapter, err) + assert.Nil(t, be) +} - tx := be.GetTransactionFromPool(1, txHash) - assert.Nil(t, tp.ShardDataStore(1)) - assert.Nil(t, tx) +func TestNewBlockProcessor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + nil, + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) - tp.NewShardStore(1) + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, be) +} - tx = be.GetTransactionFromPool(1, txHash) - assert.NotNil(t, tp.ShardDataStore(1)) - assert.Nil(t, tx) +func TestNewBlockProcessor_NilForkDetectorShouldErr(t *testing.T) { + t.Parallel() - testedNonce := uint64(1) - tp.AddData(txHash, &transaction.Transaction{Nonce: testedNonce}, 1) + tdp := initDataPool() - tx = be.GetTransactionFromPool(1, txHash) - assert.NotNil(t, tp.ShardDataStore(1)) - assert.NotNil(t, tx) - assert.Equal(t, testedNonce, tx.Nonce) + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + nil, + func(destShardID uint32, txHash []byte) { + }, + ) + + assert.Equal(t, process.ErrNilForkDetector, err) + assert.Nil(t, be) } -func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) +func TestNewBlockProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, nil, - mock.NewOneShardCoordinatorMock()) + ) - shardId := uint32(1) - txHash1 := []byte("tx1_hash1") + assert.Equal(t, process.ErrNilTransactionHandler, err) + assert.Nil(t, be) +} - blk := block.TxBlockBody{} - mBlocks := make([]block.MiniBlock, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash1) - mBlk := block.MiniBlock{ShardID: shardId, TxHashes: txHashes} - mBlocks = append(mBlocks, mBlk) - blk.MiniBlocks = mBlocks - tx1 := &transaction.Transaction{Nonce: 7} - tp.AddData(txHash1, tx1, 1) +func TestNewBlockProcessor_NilTransactionPoolShouldErr(t *testing.T) { + t.Parallel() - be.RequestTransactionFromNetwork(&blk) - be.WaitForTxHashes() - tx, _ := tp.ShardStore(shardId).DataStore.Get(txHash1) + tdp := initDataPool() + tdp.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return nil + } + + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) - assert.Equal(t, tx1, tx) + assert.Equal(t, process.ErrNilTransactionPool, err) + assert.Nil(t, be) } -func TestBlockProcessor_ProcessBlockWithNilTxBlockBodyShouldErr(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) +func TestNewBlockProcessor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, err := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + assert.Nil(t, err) - tpm := mock.TxProcessorMock{} - // set accounts dirty - journalLen := func() int { return 3 } - revToSnapshot := func(snapshot int) error { return nil } + assert.NotNil(t, be) +} - blkc, _ := createBlockchain() +//------- CreateEmptyBlockBody - hdr := block.Header{ - Nonce: 0, - BlockBodyHash: []byte("bodyHash"), - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - Signature: []byte("signature"), - Commitment: []byte("commitment"), +func TestCreateEmptyBlockBody_ShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + rootHash := []byte("test root hash") + shardId := uint32(4) + round := int32(5) + + accounts := &mock.AccountsStub{} + accounts.RootHashCalled = func() []byte { + return rootHash } - // cleanup after tests - defer func() { - _ = blkc.Destroy() - }() + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + accounts, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + txBlockBody := be.CreateEmptyBlockBody(shardId, round) + + expectedTxBlockBody := &block.TxBlockBody{ + MiniBlocks: make([]block.MiniBlock, 0), + StateBlockBody: block.StateBlockBody{ + ShardID: shardId, + RootHash: rootHash, + }, + } + + assert.Equal(t, expectedTxBlockBody, txBlockBody) +} + +//------- ProcessAndCommit + +func TestBlockProcessor_ProcessAndCommitNilBlockchainShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, &mock.MarshalizerMock{}, - &tpm, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot}, - mock.NewOneShardCoordinatorMock(), + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) - // should return err - err = be.ProcessAndCommit(blkc, &hdr, nil) + err := be.ProcessAndCommit(nil, &block.Header{}, &block.TxBlockBody{}, haveTime) + + assert.Equal(t, process.ErrNilBlockChain, err) +} + +func TestBlockProcessor_ProcessAndCommitNilHeaderShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + err := be.ProcessAndCommit(createTestBlockchain(), nil, &block.TxBlockBody{}, haveTime) + + assert.Equal(t, process.ErrNilBlockHeader, err) +} + +func TestBlockProcessor_ProcessAndCommitNilTxBlockBodyShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + err := be.ProcessAndCommit(createTestBlockchain(), &block.Header{}, nil, haveTime) - assert.NotNil(t, err) assert.Equal(t, process.ErrNilTxBlockBody, err) } -func TestBlockProc_ProcessBlockWithDirtyAccountShouldErr(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) +func TestBlockProcessor_ProcessAndCommitBlockWithDirtyAccountShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + tpm := mock.TxProcessorMock{} // set accounts dirty journalLen := func() int { return 3 } revToSnapshot := func(snapshot int) error { return nil } - blkc, _ := createBlockchain() + blkc := createTestBlockchain() hdr := block.Header{ Nonce: 0, @@ -251,34 +444,33 @@ func TestBlockProc_ProcessBlockWithDirtyAccountShouldErr(t *testing.T) { MiniBlocks: miniblocks, } - // cleanup after tests - defer func() { - _ = blkc.Destroy() - }() - - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, &mock.MarshalizerMock{}, &tpm, &mock.AccountsStub{ JournalLenCalled: journalLen, RevertToSnapshotCalled: revToSnapshot, }, - mock.NewOneShardCoordinatorMock(), + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) // should return err - err = be.ProcessAndCommit(blkc, &hdr, &txBody) + err := be.ProcessAndCommit(blkc, &hdr, &txBody, haveTime) assert.NotNil(t, err) assert.Equal(t, err, process.ErrAccountStateDirty) } -func TestBlockProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) +func TestBlockProcessor_ProcessAndCommitBlockWithInvalidTransactionShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + txHash := []byte("tx_hash1") - tp.AddData(txHash, &transaction.Transaction{Nonce: 1}, 0) // invalid transaction txProcess := func(transaction *transaction.Transaction, round int32) error { @@ -286,7 +478,7 @@ func TestBlockProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T } tpm := mock.TxProcessorMock{ProcessTransactionCalled: txProcess} - blkc, _ := createBlockchain() + blkc := createTestBlockchain() hdr := block.Header{ Nonce: 0, PrevHash: []byte(""), @@ -312,44 +504,865 @@ func TestBlockProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T RootHash: []byte("root hash"), ShardID: 0, }, - MiniBlocks: miniblocks, + MiniBlocks: miniblocks, + } + + // set accounts dirty + journalLen := func() int { return 0 } + // set revertToSnapshot + revertToSnapshot := func(snapshot int) error { return nil } + + rootHashCalled := func() []byte { + return []byte("rootHash") + } + + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &tpm, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + go func() { + be.ChRcvAllTxs <- true + }() + + // should return err + err := be.ProcessAndCommit(blkc, &hdr, &txBody, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) +} + +func TestBlockProcessor_ProcessAndCommitHeaderNotPassingIntegrityShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdr := &block.Header{ + Nonce: 0, + BlockBodyHash: nil, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + err := be.ProcessAndCommit(createTestBlockchain(), hdr, txBody, haveTime) + + assert.Equal(t, "nil block body hash", err.Error()) +} + +func TestBlockProcessor_ProcessAndCommitHeaderNotFirstShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdr := &block.Header{ + Nonce: 0, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + blkc := createTestBlockchain() + + err := be.ProcessAndCommit(blkc, hdr, txBody, haveTime) + + assert.Equal(t, process.ErrWrongNonceInBlock, err) +} + +func TestBlockProcessor_ProcessAndCommitHeaderNotCorrectNonceShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdr := &block.Header{ + Nonce: 0, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + blkc := createTestBlockchain() + blkc.CurrentBlockHeader = &block.Header{ + Nonce: 0, + } + + err := be.ProcessAndCommit(blkc, hdr, txBody, haveTime) + + assert.Equal(t, process.ErrWrongNonceInBlock, err) +} + +func TestBlockProcessor_ProcessAndCommitHeaderNotCorrectPrevHashShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + blkc := createTestBlockchain() + blkc.CurrentBlockHeader = &block.Header{ + Nonce: 0, + } + blkc.CurrentBlockHeaderHash = []byte("bbb") + + err := be.ProcessAndCommit(blkc, hdr, txBody, haveTime) + + assert.Equal(t, process.ErrInvalidBlockHash, err) +} + +//------- ProcessBlock + +func TestBlockProcessor_ProcessBlockNilBlockchainShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + err := be.ProcessBlock(nil, &block.Header{}, &block.TxBlockBody{}, func() time.Duration { + return time.Second + }) + + assert.Equal(t, process.ErrNilBlockChain, err) +} + +func TestBlockProcessor_ProcessBlockNilHaveTimeFuncShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + err := be.ProcessBlock(createTestBlockchain(), &block.Header{}, &block.TxBlockBody{}, nil) + + assert.Equal(t, process.ErrNilHaveTimeHandler, err) +} + +//------- CommitBlock + +func TestBlockProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + err := be.CommitBlock(nil, &block.Header{}, &block.TxBlockBody{}) + + assert.Equal(t, process.ErrNilBlockChain, err) +} + +func TestBlockProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + errMarshalizer := errors.New("failure") + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + if reflect.DeepEqual(obj, hdr) { + return nil, errMarshalizer + } + + return []byte("obj"), nil + }, + } + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + marshalizer, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + blkc := createTestBlockchain() + + err := be.CommitBlock(blkc, hdr, txBody) + + assert.Equal(t, process.ErrMarshalWithoutSuccess, err) +} + +func TestBlockProcessor_CommitBlockMarshalizerFailForTxBodyShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + errMarshalizer := errors.New("failure") + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + if reflect.DeepEqual(obj, txBody) { + return nil, errMarshalizer + } + + return []byte("obj"), nil + }, + } + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + marshalizer, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + blkc := createTestBlockchain() + + err := be.CommitBlock(blkc, hdr, txBody) + + assert.Equal(t, process.ErrMarshalWithoutSuccess, err) +} + +func TestBlockProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + errPersister := errors.New("failure") + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdrUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + return errPersister + }, + } + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + generateTestUnit(), + generateTestUnit(), + generateTestUnit(), + generateTestUnit(), + hdrUnit, + ) + + err := be.CommitBlock(blkc, hdr, txBody) + + assert.Equal(t, process.ErrPersistWithoutSuccess, err) +} + +func TestBlockProcessor_CommitBlockStorageFailsForBodyShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + errPersister := errors.New("failure") + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + txBlockUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + return errPersister + }, + } + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + generateTestUnit(), + txBlockUnit, + generateTestUnit(), + generateTestUnit(), + generateTestUnit(), + ) + + err := be.CommitBlock(blkc, hdr, txBody) + + assert.Equal(t, process.ErrPersistWithoutSuccess, err) +} + +func TestBlockProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + miniblocks := make([]block.MiniBlock, 0) + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: miniblocks, + } + + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + tdp.HeadersNoncesCalled = func() data.Uint64Cacher { + return nil + } + + blkc := createTestBlockchain() + err := be.CommitBlock(blkc, hdr, txBody) + + assert.Equal(t, process.ErrNilDataPoolHolder, err) +} + +func TestBlockProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + txHash := []byte("txHash") + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + mb := block.MiniBlock{ + TxHashes: [][]byte{[]byte(txHash)}, + } + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: []block.MiniBlock{mb}, + } + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + } + + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header *block.Header, hash []byte, isReceived bool) error { + return nil + }, + } + + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + + be, _ := blproc.NewBlockProcessor( + tdp, + hasher, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + accounts, + mock.NewOneShardCoordinatorMock(), + fd, + func(destShardID uint32, txHash []byte) { + }, + ) + + txCache := &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + LenCalled: func() int { + return 0 + }, + } + + tdp.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(shardID uint32) (c storage.Cacher) { + return txCache + }, + + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, destShardID uint32) { + }, + } + + } + + blkc := createTestBlockchain() + err := be.CommitBlock(blkc, hdr, txBody) + + assert.Equal(t, process.ErrMissingTransaction, err) +} + +func TestBlockProcessor_CommitBlockOkValsShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + txHash := []byte("txHash") + tx := &transaction.Transaction{} + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + BlockBodyHash: []byte("block body hash"), + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + Signature: []byte("signature"), + Commitment: []byte("commitment"), + } + + mb := block.MiniBlock{ + TxHashes: [][]byte{[]byte(txHash)}, + } + + txBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: []byte("root hash"), + ShardID: 0, + }, + MiniBlocks: []block.MiniBlock{mb}, + } + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + } + + forkDetectorAddCalled := false + + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header *block.Header, hash []byte, isReceived bool) error { + if header == hdr { + forkDetectorAddCalled = true + return nil + } + + return errors.New("should have not got here") + }, + } + + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + + be, _ := blproc.NewBlockProcessor( + tdp, + hasher, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + accounts, + mock.NewOneShardCoordinatorMock(), + fd, + func(destShardID uint32, txHash []byte) { + }, + ) + + txCache := &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(txHash, key) { + return tx, true + } + return nil, false + }, + LenCalled: func() int { + return 0 + }, + } + + removeTxWasCalled := false + + tdp.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(shardID uint32) (c storage.Cacher) { + return txCache + }, + + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, destShardID uint32) { + if bytes.Equal(keys[0], []byte(txHash)) && len(keys) == 1 { + removeTxWasCalled = true + } + }, + } + + } + + blkc := createTestBlockchain() + err := be.CommitBlock(blkc, hdr, txBody) + + assert.Nil(t, err) + assert.True(t, removeTxWasCalled) + assert.True(t, forkDetectorAddCalled) + assert.True(t, blkc.CurrentTxBlockBody == txBody) + assert.True(t, blkc.CurrentBlockHeader == hdr) + assert.Equal(t, hdrHash, blkc.CurrentBlockHeaderHash) + + //this should sleep as there is an async call to display current header and block in CommitBlock + time.Sleep(time.Second) +} + +func TestVerifyStateRoot_ShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + rootHash := []byte("root hash to be tested") + + accounts := &mock.AccountsStub{ + RootHashCalled: func() []byte { + return rootHash + }, } - // cleanup after tests - defer func() { - _ = blkc.Destroy() - }() + be, _ := blproc.NewBlockProcessor( + tdp, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + accounts, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) - // set accounts dirty - journalLen := func() int { return 0 } - // set revertToSnapshot - revertToSnapshot := func(snapshot int) error { return nil } + assert.True(t, be.VerifyStateRoot(rootHash)) +} - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, +func TestBlockProc_GetTransactionFromPool(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, &mock.MarshalizerMock{}, - &tpm, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { }, + ) + + txHash := []byte("tx1_hash") + tx := be.GetTransactionFromPool(1, txHash) + + assert.NotNil(t, tx) + assert.Equal(t, uint64(10), tx.Nonce) +} + +func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) - // should return err - err = be.ProcessAndCommit(blkc, &hdr, &txBody) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) + shardId := uint32(1) + txHash1 := []byte("tx1_hash1") + + blk := block.TxBlockBody{} + mBlocks := make([]block.MiniBlock, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash1) + mBlk := block.MiniBlock{ShardID: shardId, TxHashes: txHashes} + mBlocks = append(mBlocks, mBlk) + blk.MiniBlocks = mBlocks + + //TODO refactor the test + + if be.RequestTransactionFromNetwork(&blk) > 0 { + be.WaitForTxHashes(haveTime()) + } } func TestBlockProc_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() + tpm := mock.TxProcessorMock{} journalLen := func() int { return 3 } revToSnapshot := func(snapshot int) error { return nil } - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, &mock.MarshalizerMock{}, &tpm, @@ -357,8 +1370,10 @@ func TestBlockProc_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T) { JournalLenCalled: journalLen, RevertToSnapshotCalled: revToSnapshot, }, - mock.NewOneShardCoordinatorMock(), - + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) bl, err := be.CreateTxBlockBody(0, 100, 0, func() bool { return true }) @@ -370,23 +1385,28 @@ func TestBlockProc_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T) { } func TestBlockProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() + tpm := mock.TxProcessorMock{} journalLen := func() int { return 0 } rootHashfunc := func() []byte { return []byte("roothash") } revToSnapshot := func(snapshot int) error { return nil } - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, &mock.MarshalizerMock{}, &tpm, &mock.AccountsStub{ - JournalLenCalled: journalLen, - RootHashCalled: rootHashfunc, + JournalLenCalled: journalLen, + RootHashCalled: rootHashfunc, RevertToSnapshotCalled: revToSnapshot, }, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) haveTime := func() bool { @@ -402,8 +1422,10 @@ func TestBlockProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T } func TestBlockProcessor_CreateTxBlockBodyOK(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() + //process transaction. return nil for no error procTx := func(transaction *transaction.Transaction, round int32) error { return nil @@ -420,8 +1442,8 @@ func TestBlockProcessor_CreateTxBlockBodyOK(t *testing.T) { return true } - be := blproc.NewBlockProcessor( - tp, &mock.HasherMock{}, + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, &mock.MarshalizerMock{}, &tpm, &mock.AccountsStub{ @@ -429,6 +1451,9 @@ func TestBlockProcessor_CreateTxBlockBodyOK(t *testing.T) { RootHashCalled: rootHashfunc, }, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) blk, err := be.CreateTxBlockBody(0, 100, 0, haveTime) @@ -438,15 +1463,19 @@ func TestBlockProcessor_CreateTxBlockBodyOK(t *testing.T) { } func TestBlockProcessor_CreateGenesisBlockBodyWithNilTxProcessorShouldPanic(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() - be := blproc.NewBlockProcessor( - tp, nil, + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, nil, nil, nil, nil, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) createGenesis := func() { @@ -457,14 +1486,15 @@ func TestBlockProcessor_CreateGenesisBlockBodyWithNilTxProcessorShouldPanic(t *t } func TestBlockProcessor_CreateGenesisBlockBodyWithFailSetBalanceShouldPanic(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() txProcess := func(transaction *transaction.Transaction, round int32) error { return nil } - setBalances := func(accBalance map[string]big.Int) (rootHash []byte, err error) { + setBalances := func(accBalance map[string]*big.Int) (rootHash []byte, err error) { return nil, process.ErrAccountStateDirty } @@ -473,12 +1503,15 @@ func TestBlockProcessor_CreateGenesisBlockBodyWithFailSetBalanceShouldPanic(t *t SetBalancesToTrieCalled: setBalances, } - be := blproc.NewBlockProcessor( - tp, nil, + be, _ := blproc.NewBlockProcessor( + tdp, nil, nil, &txProc, nil, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) createGenesis := func() { @@ -489,14 +1522,15 @@ func TestBlockProcessor_CreateGenesisBlockBodyWithFailSetBalanceShouldPanic(t *t } func TestBlockProcessor_CreateGenesisBlockBodyOK(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() txProcess := func(transaction *transaction.Transaction, round int32) error { return nil } - setBalances := func(accBalance map[string]big.Int) (rootHash []byte, err error) { + setBalances := func(accBalance map[string]*big.Int) (rootHash []byte, err error) { return []byte("stateRootHash"), nil } @@ -505,47 +1539,59 @@ func TestBlockProcessor_CreateGenesisBlockBodyOK(t *testing.T) { SetBalancesToTrieCalled: setBalances, } - be := blproc.NewBlockProcessor( - tp, nil, - nil, + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, + &mock.MarshalizerMock{}, &txProc, - nil, + &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) - stBlock := be.CreateGenesisBlockBody(nil, 0) + stBlock, err := be.CreateGenesisBlockBody(nil, 0) + assert.Nil(t, err) assert.NotNil(t, stBlock) assert.Equal(t, stBlock.RootHash, []byte("stateRootHash")) } func TestBlockProcessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() - be := blproc.NewBlockProcessor( - tp, nil, - nil, - nil, - nil, + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) - err = be.RemoveBlockTxsFromPool(nil) + err := be.RemoveBlockTxsFromPool(nil) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilTxBlockBody) } func TestBlockProcessor_RemoveBlockTxsFromPoolOK(t *testing.T) { - tp, err := shardedData.NewShardedData(testCacherConfig) - assert.Nil(t, err) + t.Parallel() - be := blproc.NewBlockProcessor( - tp, nil, - nil, - nil, - nil, + tdp := initDataPool() + + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, ) miniblocks := make([]block.MiniBlock, 0) @@ -565,7 +1611,386 @@ func TestBlockProcessor_RemoveBlockTxsFromPoolOK(t *testing.T) { MiniBlocks: miniblocks, } - err = be.RemoveBlockTxsFromPool(&txBody) + err := be.RemoveBlockTxsFromPool(&txBody) + + assert.Nil(t, err) +} + +//------- ComputeNewNoncePrevHash + +func TestBlockProcessor_computeHeaderHashMarshalizerFail1ShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + marshalizer := &mock.MarshalizerStub{} + + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, + marshalizer, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdr, txBlock := createTestHdrTxBlockBody() + + expectedError := errors.New("marshalizer fail") + + marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { + if hdr == obj { + return nil, expectedError + } + + if txBlock == obj { + return []byte("txBlockBodyMarshalized"), nil + } + return nil, nil + } + + _, err := be.ComputeHeaderHash(hdr) + + assert.Equal(t, expectedError, err) +} + +func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + sposWrk := &spos.SPOSConsensusWorker{} + sposWrk.BlockChain = createTestBlockchain() + + marshalizer := &mock.MarshalizerStub{} + hasher := &mock.HasherStub{} + + be, _ := blproc.NewBlockProcessor( + tdp, hasher, + marshalizer, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdr, txBlock := createTestHdrTxBlockBody() + + marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { + if hdr == obj { + return []byte("hdrHeaderMarshalized"), nil + } + if txBlock == obj { + return []byte("txBlockBodyMarshalized"), nil + } + return nil, nil + } + hasher.ComputeCalled = func(s string) []byte { + if s == "hdrHeaderMarshalized" { + return []byte("hdr hash") + } + if s == "txBlockBodyMarshalized" { + return []byte("tx block body hash") + } + return nil + } + + _, err := be.ComputeHeaderHash(hdr) + + assert.Nil(t, err) +} + +func createTestHdrTxBlockBody() (*block.Header, *block.TxBlockBody) { + hasher := mock.HasherMock{} + + hdr := &block.Header{ + Nonce: 1, + ShardId: 2, + Epoch: 3, + Round: 4, + TimeStamp: uint64(11223344), + PrevHash: hasher.Compute("prev hash"), + BlockBodyHash: hasher.Compute("tx block body hash"), + PubKeysBitmap: []byte{255, 0, 128}, + Commitment: hasher.Compute("commitment"), + Signature: hasher.Compute("signature"), + } + + txBlock := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{ + RootHash: hasher.Compute("root hash"), + }, + MiniBlocks: []block.MiniBlock{ + { + ShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_0_1"), + hasher.Compute("txHash_0_2"), + }, + }, + { + ShardID: 1, + TxHashes: [][]byte{ + hasher.Compute("txHash_1_1"), + hasher.Compute("txHash_1_2"), + }, + }, + { + ShardID: 2, + TxHashes: [][]byte{ + hasher.Compute("txHash_2_1"), + }, + }, + { + ShardID: 3, + TxHashes: make([][]byte, 0), + }, + }, + } + + return hdr, txBlock +} + +//------- ComputeNewNoncePrevHash + +func TestBlockProcessor_DisplayLogInfo(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + hasher := mock.HasherMock{} + hdr, txBlock := createTestHdrTxBlockBody() + + be, _ := blproc.NewBlockProcessor( + tdp, &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + &mock.ForkDetectorMock{}, + func(destShardID uint32, txHash []byte) { + }, + ) + + hdr.PrevHash = hasher.Compute("prev hash") + hdr.BlockBodyHash = hasher.Compute("block hash") + + be.DisplayLogInfo(hdr, txBlock, hasher.Compute("header hash")) +} + +//------- SortTxByNonce + +func TestSortTxByNonce_NilCacherShouldErr(t *testing.T) { + t.Parallel() + + transactions, txHashes, err := blproc.SortTxByNonce(nil) + + assert.Nil(t, transactions) + assert.Nil(t, txHashes) + assert.Equal(t, process.ErrNilCacher, err) +} + +func TestSortTxByNonce_EmptyCacherShouldReturnEmpty(t *testing.T) { + t.Parallel() + + cacher, _ := storage.NewCache(storage.LRUCache, 100) + transactions, txHashes, err := blproc.SortTxByNonce(cacher) + + assert.Equal(t, 0, len(transactions)) + assert.Equal(t, 0, len(txHashes)) + assert.Nil(t, err) +} + +func TestSortTxByNonce_OneTxShouldWork(t *testing.T) { + t.Parallel() + + cacher, _ := storage.NewCache(storage.LRUCache, 100) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + hash, tx := createRandTx(r) + + cacher.HasOrAdd(hash, tx) + + transactions, txHashes, err := blproc.SortTxByNonce(cacher) + + assert.Equal(t, 1, len(transactions)) + assert.Equal(t, 1, len(txHashes)) + assert.Nil(t, err) + + assert.True(t, hashInSlice(hash, txHashes)) + assert.True(t, txInSlice(tx, transactions)) +} + +func createRandTx(rand *rand.Rand) ([]byte, *transaction.Transaction) { + tx := &transaction.Transaction{ + Nonce: rand.Uint64(), + } + + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + + return hash, tx +} + +func hashInSlice(hash []byte, hashes [][]byte) bool { + for _, h := range hashes { + if bytes.Equal(h, hash) { + return true + } + } + + return false +} + +func txInSlice(tx *transaction.Transaction, transactions []*transaction.Transaction) bool { + for _, t := range transactions { + if reflect.DeepEqual(tx, t) { + return true + } + } + + return false +} + +func TestSortTxByNonce_MoreTransactionsShouldNotErr(t *testing.T) { + t.Parallel() + + cache, _, _ := genCacherTransactionsHashes(100) + + _, _, err := blproc.SortTxByNonce(cache) assert.Nil(t, err) } + +func TestSortTxByNonce_MoreTransactionsShouldRetSameSize(t *testing.T) { + t.Parallel() + + cache, genTransactions, _ := genCacherTransactionsHashes(100) + + transactions, txHashes, _ := blproc.SortTxByNonce(cache) + + assert.Equal(t, len(genTransactions), len(transactions)) + assert.Equal(t, len(genTransactions), len(txHashes)) +} + +func TestSortTxByNonce_MoreTransactionsShouldContainSameElements(t *testing.T) { + t.Parallel() + + cache, genTransactions, genHashes := genCacherTransactionsHashes(100) + + transactions, txHashes, _ := blproc.SortTxByNonce(cache) + + for i := 0; i < len(genTransactions); i++ { + assert.True(t, hashInSlice(genHashes[i], txHashes)) + assert.True(t, txInSlice(genTransactions[i], transactions)) + } +} + +func TestSortTxByNonce_MoreTransactionsShouldContainSortedElements(t *testing.T) { + t.Parallel() + + cache, _, _ := genCacherTransactionsHashes(100) + + transactions, _, _ := blproc.SortTxByNonce(cache) + + lastNonce := uint64(0) + + for i := 0; i < len(transactions); i++ { + tx := transactions[i] + + assert.True(t, lastNonce <= tx.Nonce) + + fmt.Println(tx.Nonce) + + lastNonce = tx.Nonce + } +} + +func TestSortTxByNonce_TransactionsWithSameNonceShouldGetSorted(t *testing.T) { + t.Parallel() + + transactions := []*transaction.Transaction{ + {Nonce: 1, Signature: []byte("sig1")}, + {Nonce: 2, Signature: []byte("sig2")}, + {Nonce: 1, Signature: []byte("sig3")}, + {Nonce: 2, Signature: []byte("sig4")}, + {Nonce: 3, Signature: []byte("sig5")}, + } + + cache, _ := storage.NewCache(storage.LRUCache, uint32(len(transactions))) + + for _, tx := range transactions { + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + + cache.Put(hash, tx) + } + + sortedTxs, _, _ := blproc.SortTxByNonce(cache) + + lastNonce := uint64(0) + + for i := 0; i < len(sortedTxs); i++ { + tx := sortedTxs[i] + + assert.True(t, lastNonce <= tx.Nonce) + + fmt.Printf("tx.Nonce: %d, tx.Sig: %s\n", tx.Nonce, tx.Signature) + + lastNonce = tx.Nonce + } + + assert.Equal(t, len(sortedTxs), len(transactions)) + + //test if one transaction from transactions might not be in sortedTx + for _, tx := range transactions { + found := false + + for _, stx := range sortedTxs { + if reflect.DeepEqual(tx, stx) { + found = true + break + } + } + + if !found { + assert.Fail(t, "Not found tx in sorted slice for sig: "+string(tx.Signature)) + } + } +} + +func genCacherTransactionsHashes(noOfTx int) (storage.Cacher, []*transaction.Transaction, [][]byte) { + cacher, _ := storage.NewCache(storage.LRUCache, uint32(noOfTx)) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + genHashes := make([][]byte, 0) + genTransactions := make([]*transaction.Transaction, 0) + + for i := 0; i < noOfTx; i++ { + hash, tx := createRandTx(r) + cacher.HasOrAdd(hash, tx) + + genHashes = append(genHashes, hash) + genTransactions = append(genTransactions, tx) + } + + return cacher, genTransactions, genHashes +} + +func BenchmarkSortTxByNonce1(b *testing.B) { + cache, _, _ := genCacherTransactionsHashes(10000) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, _, _ = blproc.SortTxByNonce(cache) + } +} diff --git a/process/block/resolvers.go b/process/block/resolvers.go index f28bd743b08..27f5c09bb96 100644 --- a/process/block/resolvers.go +++ b/process/block/resolvers.go @@ -8,8 +8,8 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) -// headerResolver is a wrapper over Resolver that is specialized in resolving headers requests -type headerResolver struct { +// HeaderResolver is a wrapper over Resolver that is specialized in resolving headers requests +type HeaderResolver struct { process.Resolver hdrPool data.ShardedDataCacherNotifier hdrNonces data.Uint64Cacher @@ -18,8 +18,8 @@ type headerResolver struct { nonceConverter typeConverters.Uint64ByteSliceConverter } -// genericBlockBodyResolver is a wrapper over Resolver that is specialized in resolving block body requests -type genericBlockBodyResolver struct { +// GenericBlockBodyResolver is a wrapper over Resolver that is specialized in resolving block body requests +type GenericBlockBodyResolver struct { process.Resolver blockBodyPool storage.Cacher blockStorage storage.Storer @@ -35,7 +35,7 @@ func NewHeaderResolver( hdrStorage storage.Storer, marshalizer marshal.Marshalizer, nonceConverter typeConverters.Uint64ByteSliceConverter, -) (*headerResolver, error) { +) (*HeaderResolver, error) { if resolver == nil { return nil, process.ErrNilResolver @@ -67,7 +67,7 @@ func NewHeaderResolver( return nil, process.ErrNilNonceConverter } - hdrResolver := &headerResolver{ + hdrResolver := &HeaderResolver{ Resolver: resolver, hdrPool: transient.Headers(), hdrNonces: transient.HeadersNonces(), @@ -80,7 +80,7 @@ func NewHeaderResolver( return hdrResolver, nil } -func (hdrRes *headerResolver) resolveHdrRequest(rd process.RequestData) ([]byte, error) { +func (hdrRes *HeaderResolver) resolveHdrRequest(rd process.RequestData) ([]byte, error) { if rd.Value == nil { return nil, process.ErrNilValue } @@ -100,22 +100,22 @@ func (hdrRes *headerResolver) resolveHdrRequest(rd process.RequestData) ([]byte, return buff, err } -func (hdrRes *headerResolver) resolveHeaderFromHash(key []byte) ([]byte, error) { - dataMap := hdrRes.hdrPool.SearchData(key) - for _, v := range dataMap { - //since there might be multiple entries, it shall return the first one that it finds - buff, err := hdrRes.marshalizer.Marshal(v) - if err != nil { - return nil, err - } +func (hdrRes *HeaderResolver) resolveHeaderFromHash(key []byte) ([]byte, error) { + value, ok := hdrRes.hdrPool.SearchFirstData(key) - return buff, nil + if !ok { + return hdrRes.hdrStorage.Get(key) + } + + buff, err := hdrRes.marshalizer.Marshal(value) + if err != nil { + return nil, err } - return hdrRes.hdrStorage.Get(key) + return buff, nil } -func (hdrRes *headerResolver) resolveHeaderFromNonce(key []byte) ([]byte, error) { +func (hdrRes *HeaderResolver) resolveHeaderFromNonce(key []byte) ([]byte, error) { //key is now an encoded nonce (uint64) //Step 1. decode the nonce from the key @@ -131,22 +131,22 @@ func (hdrRes *headerResolver) resolveHeaderFromNonce(key []byte) ([]byte, error) } //Step 3. search header by key (hash) - dataMap := hdrRes.hdrPool.SearchData(hash) - for _, v := range dataMap { - //since there might be multiple entries, it shall return the first one that it finds - buff, err := hdrRes.marshalizer.Marshal(v) - if err != nil { - return nil, err - } + value, ok := hdrRes.hdrPool.SearchFirstData(hash) + if !ok { + return hdrRes.hdrStorage.Get(hash) + } - return buff, nil + //since there might be multiple entries, it shall return the first one that it finds + buff, err := hdrRes.marshalizer.Marshal(value) + if err != nil { + return nil, err } - return hdrRes.hdrStorage.Get(hash) + return buff, nil } // RequestHeaderFromHash requests a header from other peers having input the hdr hash -func (hdrRes *headerResolver) RequestHeaderFromHash(hash []byte) error { +func (hdrRes *HeaderResolver) RequestHeaderFromHash(hash []byte) error { return hdrRes.RequestData(process.RequestData{ Type: process.HashType, Value: hash, @@ -154,7 +154,7 @@ func (hdrRes *headerResolver) RequestHeaderFromHash(hash []byte) error { } // RequestHeaderFromNonce requests a header from other peers having input the hdr nonce -func (hdrRes *headerResolver) RequestHeaderFromNonce(nonce uint64) error { +func (hdrRes *HeaderResolver) RequestHeaderFromNonce(nonce uint64) error { return hdrRes.RequestData(process.RequestData{ Type: process.NonceType, Value: hdrRes.nonceConverter.ToByteSlice(nonce), @@ -168,7 +168,7 @@ func NewGenericBlockBodyResolver( resolver process.Resolver, blockBodyPool storage.Cacher, blockBodyStorage storage.Storer, - marshalizer marshal.Marshalizer) (*genericBlockBodyResolver, error) { + marshalizer marshal.Marshalizer) (*GenericBlockBodyResolver, error) { if resolver == nil { return nil, process.ErrNilResolver @@ -186,7 +186,7 @@ func NewGenericBlockBodyResolver( return nil, process.ErrNilMarshalizer } - bbResolver := &genericBlockBodyResolver{ + bbResolver := &GenericBlockBodyResolver{ Resolver: resolver, blockBodyPool: blockBodyPool, blockStorage: blockBodyStorage, @@ -197,7 +197,7 @@ func NewGenericBlockBodyResolver( return bbResolver, nil } -func (gbbRes *genericBlockBodyResolver) resolveBlockBodyRequest(rd process.RequestData) ([]byte, error) { +func (gbbRes *GenericBlockBodyResolver) resolveBlockBodyRequest(rd process.RequestData) ([]byte, error) { if rd.Type != process.HashType { return nil, process.ErrResolveNotHashType } @@ -220,7 +220,7 @@ func (gbbRes *genericBlockBodyResolver) resolveBlockBodyRequest(rd process.Reque } // RequestBlockBodyFromHash requests a block body from other peers having input the block body hash -func (gbbRes *genericBlockBodyResolver) RequestBlockBodyFromHash(hash []byte) error { +func (gbbRes *GenericBlockBodyResolver) RequestBlockBodyFromHash(hash []byte) error { return gbbRes.RequestData(process.RequestData{ Type: process.HashType, Value: hash, diff --git a/process/block/resolvers_test.go b/process/block/resolvers_test.go index 5a5509f53df..b17e3a09018 100644 --- a/process/block/resolvers_test.go +++ b/process/block/resolvers_test.go @@ -266,14 +266,11 @@ func TestHeaderResolver_ResolveHdrRequestHashTypeFoundInHdrPoolShouldRetValue(t transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { headers := &mock.ShardedDataStub{} - headers.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal(requestedData, key) { - return map[uint32]interface{}{ - 0: resolvedData, - } + return resolvedData, true } - - return nil + return nil, false } return headers @@ -314,14 +311,11 @@ func TestHeaderResolver_ResolveHdrRequestHashTypeFoundInHdrPoolMarshalizerFailsS transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { headers := &mock.ShardedDataStub{} - headers.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal(requestedData, key) { - return map[uint32]interface{}{ - 0: resolvedData, - } + return resolvedData, true } - - return nil + return nil, false } return headers @@ -362,8 +356,8 @@ func TestHeaderResolver_ResolveHdrRequestRetFromStorageShouldRetVal(t *testing.T transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { headers := &mock.ShardedDataStub{} - headers.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - return make(map[uint32]interface{}) + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false } return headers @@ -409,8 +403,8 @@ func TestHeaderResolver_ResolveHdrRequestRetFromStorageCheckRetError(t *testing. transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { headers := &mock.ShardedDataStub{} - headers.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - return make(map[uint32]interface{}) + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false } return headers @@ -525,14 +519,12 @@ func TestHeaderResolver_ResolveHdrRequestNonceTypeFoundInHdrNoncePoolShouldRetFr transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { headers := &mock.ShardedDataStub{} - headers.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal(key, []byte("aaaa")) { - return map[uint32]interface{}{ - 0: resolvedData, - } + return resolvedData, true } - return nil + return nil, false } return headers @@ -585,8 +577,8 @@ func TestHeaderResolver_ResolveHdrRequestNonceTypeFoundInHdrNoncePoolShouldRetFr transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { headers := &mock.ShardedDataStub{} - headers.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - return make(map[uint32]interface{}) + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false } return headers @@ -644,8 +636,8 @@ func TestHeaderResolver_ResolveHdrRequestNonceTypeFoundInHdrNoncePoolCheckRetErr transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { headers := &mock.ShardedDataStub{} - headers.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - return make(map[uint32]interface{}) + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false } return headers diff --git a/process/errors.go b/process/errors.go index 6fcf5ccbb55..924b8908bdf 100644 --- a/process/errors.go +++ b/process/errors.go @@ -70,9 +70,6 @@ var ErrNilPreviousBlockHash = errors.New("nil previous block header hash") // ErrNilSignature signals that a operation has been attempted with a nil signature var ErrNilSignature = errors.New("nil signature") -// ErrNilChallenge signals that a operation has been attempted with a nil challenge -var ErrNilChallenge = errors.New("nil challenge") - // ErrNilCommitment signals that a operation has been attempted with a nil commitment var ErrNilCommitment = errors.New("nil commitment") @@ -85,7 +82,7 @@ var ErrNilTxHashes = errors.New("nil transaction hashes") // ErrNilRootHash signals that an operation has been attempted with a nil root hash var ErrNilRootHash = errors.New("root hash is nil") -// ErrWrongNonceInBlock signals the nonce in block is different than expected nounce +// ErrWrongNonceInBlock signals the nonce in block is different than expected nonce var ErrWrongNonceInBlock = errors.New("wrong nonce in block") // ErrInvalidBlockHash signals the hash of the block is not matching with the previous one @@ -229,3 +226,42 @@ var ErrNilBlockBodyPool = errors.New("nil block body pool") // ErrNilBlockBodyStorage signals that a nil block body storage has been provided var ErrNilBlockBodyStorage = errors.New("nil block body storage") + +// ErrNilTransactionPool signals that a nil transaction pool was used +var ErrNilTransactionPool = errors.New("nil transaction pool") + +// ErrNilTxProcessor signals that a nil transactions processor was used +var ErrNilTxProcessor = errors.New("nil transactions processor") + +// ErrNilDataPoolHolder signals that the data pool holder is nil +var ErrNilDataPoolHolder = errors.New("nil data pool holder") + +// ErrTimeIsOut signals that time is out +var ErrTimeIsOut = errors.New("time is out") + +// ErrNilForkDetector signals that the fork detector is nil +var ErrNilForkDetector = errors.New("nil fork detector") + +// ErrNilContainerElement signals when trying to add a nil element in the container +var ErrNilContainerElement = errors.New("element cannot be nil") + +// ErrInvalidContainerKey signals that an element does not exist in the container's map +var ErrInvalidContainerKey = errors.New("element does not exist in container") + +// ErrContainerKeyAlreadyExists signals that an element was already set in the container's map +var ErrContainerKeyAlreadyExists = errors.New("provided key already exists in container") + +// ErrNilUint64ByteSliceConverter signals that a nil byte slice converter was provided +var ErrNilUint64ByteSliceConverter = errors.New("nil byte slice converter") + +// ErrNilInterceptorContainer signals that a nil interceptor container was provided +var ErrNilInterceptorContainer = errors.New("nil interceptor container") + +// ErrNilResolverContainer signals that a nil resolver container was provided +var ErrNilResolverContainer = errors.New("nil resolver container") + +// ErrNilTransactionHandler signals that a nil transaction handler func was provided +var ErrNilTransactionHandler = errors.New("nil request transaction handler") + +// ErrNilHaveTimeHandler signals that a nil have time handler func was provided +var ErrNilHaveTimeHandler = errors.New("nil have time handler") diff --git a/process/factory/export_test.go b/process/factory/export_test.go new file mode 100644 index 00000000000..58d472ac0aa --- /dev/null +++ b/process/factory/export_test.go @@ -0,0 +1,9 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +func (p *processorsCreator) SetMessenger(messenger p2p.Messenger) { + p.messenger = messenger +} diff --git a/process/factory/factory.go b/process/factory/factory.go new file mode 100644 index 00000000000..4bebcc22ead --- /dev/null +++ b/process/factory/factory.go @@ -0,0 +1,417 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/crypto" + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" + "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" +) + +type topicName string + +const ( + // TransactionTopic is the topic used for sharing transactions + TransactionTopic topicName = "transactions" + // HeadersTopic is the topic used for sharing block headers + HeadersTopic topicName = "headers" + // TxBlockBodyTopic is the topic used for sharing transactions block bodies + TxBlockBodyTopic topicName = "txBlockBodies" + // PeerChBodyTopic is used for sharing peer change block bodies + PeerChBodyTopic topicName = "peerChangeBlockBodies" + // StateBodyTopic is used for sharing state block bodies + StateBodyTopic topicName = "stateBlockBodies" +) + +type processorsCreator struct { + interceptorContainer process.InterceptorContainer + resolverContainer process.ResolverContainer + + messenger p2p.Messenger + blockchain *blockchain.BlockChain + dataPool data.TransientDataHolder + shardCoordinator sharding.ShardCoordinator + addrConverter state.AddressConverter + hasher hashing.Hasher + marshalizer marshal.Marshalizer + singleSignKeyGen crypto.KeyGenerator + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter +} + +// ProcessorsCreatorConfig is the struct containing the needed params to be +// provided when initialising a new processorsCreator +type ProcessorsCreatorConfig struct { + InterceptorContainer process.InterceptorContainer + ResolverContainer process.ResolverContainer + + Messenger p2p.Messenger + Blockchain *blockchain.BlockChain + DataPool data.TransientDataHolder + ShardCoordinator sharding.ShardCoordinator + AddrConverter state.AddressConverter + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + SingleSignKeyGen crypto.KeyGenerator + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter +} + +// NewProcessorsCreator is responsible for creating a new processorsCreator object +func NewProcessorsCreator(config ProcessorsCreatorConfig) (*processorsCreator, error) { + err := validateRequiredProcessCreatorParams(config) + if err != nil { + return nil, err + } + return &processorsCreator{ + interceptorContainer: config.InterceptorContainer, + resolverContainer: config.ResolverContainer, + messenger: config.Messenger, + blockchain: config.Blockchain, + dataPool: config.DataPool, + shardCoordinator: config.ShardCoordinator, + addrConverter: config.AddrConverter, + hasher: config.Hasher, + marshalizer: config.Marshalizer, + singleSignKeyGen: config.SingleSignKeyGen, + uint64ByteSliceConverter: config.Uint64ByteSliceConverter, + }, nil +} + +// CreateInterceptors creates the interceptors and initializes the interceptor container +func (p *processorsCreator) CreateInterceptors() error { + err := p.createTxInterceptor() + if err != nil { + return err + } + + err = p.createHdrInterceptor() + if err != nil { + return err + } + + err = p.createTxBlockBodyInterceptor() + if err != nil { + return err + } + + err = p.createPeerChBlockBodyInterceptor() + if err != nil { + return err + } + + err = p.createStateBlockBodyInterceptor() + if err != nil { + return err + } + + return nil +} + +// CreateResolvers creates the resolvers and initializes the resolver container +func (p *processorsCreator) CreateResolvers() error { + err := p.createTxResolver() + if err != nil { + return err + } + + err = p.createHdrResolver() + if err != nil { + return err + } + + err = p.createTxBlockBodyResolver() + if err != nil { + return err + } + + err = p.createPeerChBlockBodyResolver() + if err != nil { + return err + } + + err = p.createStateBlockBodyResolver() + if err != nil { + return err + } + + return nil +} + +// InterceptorContainer is a getter for interceptorContainer property +func (p *processorsCreator) InterceptorContainer() process.InterceptorContainer { + return p.interceptorContainer +} + +// ResolverContainer is a getter for resolverContainer property +func (p *processorsCreator) ResolverContainer() process.ResolverContainer { + return p.resolverContainer +} + +func (p *processorsCreator) createTxInterceptor() error { + intercept, err := interceptor.NewTopicInterceptor(string(TransactionTopic), p.messenger, transaction.NewInterceptedTransaction()) + if err != nil { + return err + } + + txStorer := p.blockchain.GetStorer(blockchain.TransactionUnit) + + txInterceptor, err := transaction.NewTxInterceptor( + intercept, + p.dataPool.Transactions(), + txStorer, + p.addrConverter, + p.hasher, + p.singleSignKeyGen, + p.shardCoordinator) + + if err != nil { + return err + } + + err = p.interceptorContainer.Add(string(TransactionTopic), txInterceptor) + return err +} + +func (p *processorsCreator) createHdrInterceptor() error { + intercept, err := interceptor.NewTopicInterceptor(string(HeadersTopic), p.messenger, block.NewInterceptedHeader()) + if err != nil { + return err + } + + headerStorer := p.blockchain.GetStorer(blockchain.BlockHeaderUnit) + + hdrInterceptor, err := block.NewHeaderInterceptor( + intercept, + p.dataPool.Headers(), + p.dataPool.HeadersNonces(), + headerStorer, + p.hasher, + p.shardCoordinator, + ) + + if err != nil { + return err + } + + err = p.interceptorContainer.Add(string(HeadersTopic), hdrInterceptor) + return err +} + +func (p *processorsCreator) createTxBlockBodyInterceptor() error { + intercept, err := interceptor.NewTopicInterceptor(string(TxBlockBodyTopic), p.messenger, block.NewInterceptedTxBlockBody()) + if err != nil { + return err + } + + txBlockBodyStorer := p.blockchain.GetStorer(blockchain.TxBlockBodyUnit) + + txBlockBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( + intercept, + p.dataPool.TxBlocks(), + txBlockBodyStorer, + p.hasher, + p.shardCoordinator, + ) + + if err != nil { + return err + } + + err = p.interceptorContainer.Add(string(TxBlockBodyTopic), txBlockBodyInterceptor) + return err +} + +func (p *processorsCreator) createPeerChBlockBodyInterceptor() error { + intercept, err := interceptor.NewTopicInterceptor(string(PeerChBodyTopic), p.messenger, block.NewInterceptedPeerBlockBody()) + if err != nil { + return err + } + + peerBlockBodyStorer := p.blockchain.GetStorer(blockchain.PeerBlockBodyUnit) + + peerChBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( + intercept, + p.dataPool.PeerChangesBlocks(), + peerBlockBodyStorer, + p.hasher, + p.shardCoordinator, + ) + + if err != nil { + return err + } + + err = p.interceptorContainer.Add(string(PeerChBodyTopic), peerChBodyInterceptor) + return err +} + +func (p *processorsCreator) createStateBlockBodyInterceptor() error { + intercept, err := interceptor.NewTopicInterceptor(string(StateBodyTopic), p.messenger, block.NewInterceptedStateBlockBody()) + if err != nil { + return err + } + + stateBlockBodyStorer := p.blockchain.GetStorer(blockchain.StateBlockBodyUnit) + + stateBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( + intercept, + p.dataPool.StateBlocks(), + stateBlockBodyStorer, + p.hasher, + p.shardCoordinator, + ) + + if err != nil { + return err + } + + err = p.interceptorContainer.Add(string(StateBodyTopic), stateBodyInterceptor) + return err +} + +func (p *processorsCreator) createTxResolver() error { + resolve, err := resolver.NewTopicResolver(string(TransactionTopic), p.messenger, p.marshalizer) + if err != nil { + return err + } + + txResolver, err := transaction.NewTxResolver( + resolve, + p.dataPool.Transactions(), + p.blockchain.GetStorer(blockchain.TransactionUnit), + p.marshalizer) + + if err != nil { + return err + } + + err = p.resolverContainer.Add(string(TransactionTopic), txResolver) + return err +} + +func (p *processorsCreator) createHdrResolver() error { + resolve, err := resolver.NewTopicResolver(string(HeadersTopic), p.messenger, p.marshalizer) + if err != nil { + return err + } + + hdrResolver, err := block.NewHeaderResolver( + resolve, + p.dataPool, + p.blockchain.GetStorer(blockchain.BlockHeaderUnit), + p.marshalizer, + p.uint64ByteSliceConverter) + + if err != nil { + return err + } + + err = p.resolverContainer.Add(string(HeadersTopic), hdrResolver) + return err +} + +func (p *processorsCreator) createTxBlockBodyResolver() error { + resolve, err := resolver.NewTopicResolver(string(TxBlockBodyTopic), p.messenger, p.marshalizer) + if err != nil { + return err + } + + txBlkResolver, err := block.NewGenericBlockBodyResolver( + resolve, + p.dataPool.TxBlocks(), + p.blockchain.GetStorer(blockchain.TxBlockBodyUnit), + p.marshalizer) + + if err != nil { + return err + } + + err = p.resolverContainer.Add(string(TxBlockBodyTopic), txBlkResolver) + return err +} + +func (p *processorsCreator) createPeerChBlockBodyResolver() error { + resolve, err := resolver.NewTopicResolver(string(PeerChBodyTopic), p.messenger, p.marshalizer) + if err != nil { + return err + } + + peerChBlkResolver, err := block.NewGenericBlockBodyResolver( + resolve, + p.dataPool.PeerChangesBlocks(), + p.blockchain.GetStorer(blockchain.PeerBlockBodyUnit), + p.marshalizer) + + if err != nil { + return err + } + + err = p.resolverContainer.Add(string(PeerChBodyTopic), peerChBlkResolver) + return err +} + +func (p *processorsCreator) createStateBlockBodyResolver() error { + resolve, err := resolver.NewTopicResolver(string(StateBodyTopic), p.messenger, p.marshalizer) + if err != nil { + return err + } + + stateBlkResolver, err := block.NewGenericBlockBodyResolver( + resolve, + p.dataPool.StateBlocks(), + p.blockchain.GetStorer(blockchain.StateBlockBodyUnit), + p.marshalizer) + + if err != nil { + return err + } + + err = p.resolverContainer.Add(string(StateBodyTopic), stateBlkResolver) + return err +} + +func validateRequiredProcessCreatorParams(config ProcessorsCreatorConfig) error { + if config.InterceptorContainer == nil { + return process.ErrNilInterceptorContainer + } + if config.ResolverContainer == nil { + return process.ErrNilResolverContainer + } + if config.Messenger == nil { + return process.ErrNilMessenger + } + if config.Blockchain == nil { + return process.ErrNilBlockChain + } + if config.DataPool == nil { + return process.ErrNilDataPoolHolder + } + if config.ShardCoordinator == nil { + return process.ErrNilShardCoordinator + } + if config.AddrConverter == nil { + return process.ErrNilAddressConverter + } + if config.Hasher == nil { + return process.ErrNilHasher + } + if config.Marshalizer == nil { + return process.ErrNilMarshalizer + } + if config.SingleSignKeyGen == nil { + return process.ErrNilSingleSignKeyGen + } + if config.Uint64ByteSliceConverter == nil { + return process.ErrNilUint64ByteSliceConverter + } + + return nil +} diff --git a/process/factory/factory_test.go b/process/factory/factory_test.go new file mode 100644 index 00000000000..3bd3e813716 --- /dev/null +++ b/process/factory/factory_test.go @@ -0,0 +1,300 @@ +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/libp2p/go-libp2p-pubsub" + "github.com/stretchr/testify/assert" +) + +func TestProcessorsCreator_NilInterceptorContainerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.InterceptorContainer = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil interceptor container") +} + +func TestProcessorsCreator_NilResolverContainerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.ResolverContainer = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil resolver container") +} + +func TestProcessorsCreator_NilMessengerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Messenger = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil Messenger") +} + +func TestProcessorsCreator_NilBlockchainShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Blockchain = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil block chain") +} + +func TestProcessorsCreator_NilDataPoolShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.DataPool = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil data pool") +} + +func TestProcessorsCreator_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.ShardCoordinator = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil shard coordinator") +} + +func TestProcessorsCreator_NilAddrConverterShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.AddrConverter = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil AddressConverter") +} + +func TestProcessorsCreator_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Hasher = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil Hasher") +} + +func TestProcessorsCreator_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Marshalizer = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil Marshalizer") +} + +func TestProcessorsCreator_NilSingleSignKeyGenShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.SingleSignKeyGen = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil single sign key generator") +} + +func TestProcessorsCreator_NilUint64ByteSliceConverterShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Uint64ByteSliceConverter = nil + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nil byte slice converter") +} + +func TestProcessorsCreator_ShouldWork(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + count := 10 + pFactoryConfig.InterceptorContainer = &mock.InterceptorContainer{ + LenCalled: func() int { + return count + }, + } + pFactoryConfig.ResolverContainer = &mock.ResolverContainer{ + LenCalled: func() int { + return count + }, + } + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + + assert.NotNil(t, pFactory) + assert.NotNil(t, pFactory.ResolverContainer()) + assert.NotNil(t, pFactory.InterceptorContainer()) + assert.Nil(t, err) + assert.Equal(t, pFactory.ResolverContainer().Len(), count) + assert.Equal(t, pFactory.InterceptorContainer().Len(), count) +} + +func TestCreateInterceptors_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + assert.Nil(t, err) + + err = pFactory.CreateInterceptors() + assert.Nil(t, err) +} + +func TestCreateInterceptors_NewTopicInterceptorErrorsWillMakeCreateInterceptorsError(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactory, _ := factory.NewProcessorsCreator(pFactoryConfig) + + pFactory.SetMessenger(nil) + err := pFactory.CreateInterceptors() + assert.NotNil(t, err) +} + +func TestCreateResolvers_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) + assert.Nil(t, err) + + err = pFactory.CreateResolvers() + assert.Nil(t, err) +} + +func TestCreateResolvers_NewTopicInterceptorErrorsWillMakeCreateInterceptorsError(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactory, _ := factory.NewProcessorsCreator(pFactoryConfig) + + pFactory.SetMessenger(nil) + err := pFactory.CreateResolvers() + assert.NotNil(t, err) +} + +func createConfig() factory.ProcessorsCreatorConfig { + + mockMessenger := createMessenger() + mockTransientDataPool := createDataPool() + mockInterceptorContainer := createInterceptorContainer() + mockResolverContainer := createResolverContainer() + + return factory.ProcessorsCreatorConfig{ + InterceptorContainer: mockInterceptorContainer, + ResolverContainer: mockResolverContainer, + Messenger: mockMessenger, + DataPool: mockTransientDataPool, + Blockchain: createBlockchain(), + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + AddrConverter: &mock.AddressConverterMock{}, + Hasher: mock.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + SingleSignKeyGen: &mock.SingleSignKeyGenMock{}, + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + } +} + +func createBlockchain() *blockchain.BlockChain { + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}) + + return blkc +} + +func createMessenger() p2p.Messenger { + mockMessenger := mock.NewMessengerStub() + mockMessenger.GetTopicCalled = func(name string) *p2p.Topic { + topic := &p2p.Topic{} + topic.RegisterTopicValidator = func(v pubsub.Validator) error { + return nil + } + return topic + } + return mockMessenger +} + +func createDataPool() data.TransientDataHolder { + mockTransientDataPool := &mock.TransientDataPoolMock{} + mockTransientDataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + mockTransientDataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + mockTransientDataPool.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + mockTransientDataPool.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + mockTransientDataPool.PeerChangesBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + mockTransientDataPool.StateBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + return mockTransientDataPool +} + +func createInterceptorContainer() process.InterceptorContainer { + mockInterceptorContainer := &mock.InterceptorContainer{} + mockInterceptorContainer.AddCalled = func(key string, interceptor process.Interceptor) error { + return nil + } + return mockInterceptorContainer +} +func createResolverContainer() process.ResolverContainer { + mockResolverContainer := &mock.ResolverContainer{} + mockResolverContainer.AddCalled = func(key string, resolver process.Resolver) error { + return nil + } + return mockResolverContainer +} diff --git a/process/interceptor/container.go b/process/interceptor/container.go new file mode 100644 index 00000000000..3eed5b3bf5d --- /dev/null +++ b/process/interceptor/container.go @@ -0,0 +1,78 @@ +package interceptor + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// Container is a holder for interceptors organized by type +type Container struct { + mutex sync.RWMutex + interceptors map[string]process.Interceptor +} + +// NewContainer will create a new instance of an inteceptor container +func NewContainer() *Container { + return &Container{ + mutex: sync.RWMutex{}, + interceptors: make(map[string]process.Interceptor), + } +} + +// Get returns the interceptor stored at a certain key. +// Returns an error if the element does not exist +func (i *Container) Get(key string) (process.Interceptor, error) { + i.mutex.RLock() + interceptor, ok := i.interceptors[key] + i.mutex.RUnlock() + if !ok { + return nil, process.ErrInvalidContainerKey + } + return interceptor, nil +} + +// Add will add an interceptor at a given key. Returns +// an error if the element already exists +func (i *Container) Add(key string, interceptor process.Interceptor) error { + if interceptor == nil { + return process.ErrNilContainerElement + } + i.mutex.Lock() + defer i.mutex.Unlock() + + _, ok := i.interceptors[key] + + if ok { + return process.ErrContainerKeyAlreadyExists + } + + i.interceptors[key] = interceptor + return nil +} + +// Replace will add (or replace if it already exists) an interceptor at a given key +func (i *Container) Replace(key string, interceptor process.Interceptor) error { + if interceptor == nil { + return process.ErrNilContainerElement + } + i.mutex.Lock() + i.interceptors[key] = interceptor + i.mutex.Unlock() + return nil +} + +// Remove will remove an interceptor at a given key +func (i *Container) Remove(key string) { + i.mutex.Lock() + delete(i.interceptors, key) + i.mutex.Unlock() +} + +// Len returns the length of the added interceptors +func (i *Container) Len() int { + i.mutex.RLock() + l := len(i.interceptors) + i.mutex.RUnlock() + return l +} diff --git a/process/interceptor/topicInterceptor.go b/process/interceptor/topicInterceptor.go index d2b937c8403..4b5189abbe5 100644 --- a/process/interceptor/topicInterceptor.go +++ b/process/interceptor/topicInterceptor.go @@ -4,6 +4,7 @@ import ( "context" "github.com/ElrondNetwork/elrond-go-sandbox/logger" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/libp2p/go-libp2p-pubsub" @@ -15,6 +16,7 @@ var log = logger.NewDefaultLogger() // the data type topicInterceptor struct { messenger p2p.Messenger + marshalizer marshal.Marshalizer templateObject p2p.Creator name string @@ -32,6 +34,12 @@ func NewTopicInterceptor( return nil, process.ErrNilMessenger } + marshalizer := messenger.Marshalizer() + + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if templateObject == nil { return nil, process.ErrNilNewer } @@ -45,6 +53,7 @@ func NewTopicInterceptor( messenger: messenger, templateObject: templateObject, name: name, + marshalizer: marshalizer, } err = topic.RegisterValidator(intercept.validator) @@ -69,14 +78,15 @@ func getOrCreateTopic(name string, templateObject p2p.Creator, messenger p2p.Mes func (ti *topicInterceptor) validator(ctx context.Context, message *pubsub.Message) bool { obj := ti.templateObject.Create() - marshalizer := ti.messenger.Marshalizer() - err := marshalizer.Unmarshal(obj, message.GetData()) + err := ti.marshalizer.Unmarshal(obj, message.GetData()) if err != nil { + log.Debug(err.Error()) return false } if ti.checkReceivedObject == nil { + log.Error("nil checkReceivedObject handler") return false } @@ -105,3 +115,8 @@ func (ti *topicInterceptor) SetCheckReceivedObjectHandler(handler func(newer p2p func (ti *topicInterceptor) CheckReceivedObjectHandler() func(newer p2p.Creator, rawData []byte) error { return ti.checkReceivedObject } + +// Marshalizer returns the marshalizer used to unmarshal the received data +func (ti *topicInterceptor) Marshalizer() marshal.Marshalizer { + return ti.marshalizer +} diff --git a/process/interceptor/topicInterceptor_test.go b/process/interceptor/topicInterceptor_test.go index 9790915f95b..98a4342ad0d 100644 --- a/process/interceptor/topicInterceptor_test.go +++ b/process/interceptor/topicInterceptor_test.go @@ -22,7 +22,7 @@ func TestNewTopicInterceptor_NilMessengerShouldErr(t *testing.T) { } func TestNewTopicInterceptor_NilTemplateObjectShouldErr(t *testing.T) { - ti, err := interceptor.NewTopicInterceptor("", &mock.MessengerStub{}, nil) + ti, err := interceptor.NewTopicInterceptor("", mock.NewMessengerStub(), nil) assert.Equal(t, process.ErrNilNewer, err) assert.Nil(t, ti) } @@ -57,6 +57,26 @@ func TestNewTopicInterceptor_ErrMessengerRegistrationValidatorShouldErr(t *testi assert.Nil(t, ti) } +func TestNewTopicInterceptor_NilMessengerMarshalizerShouldErr(t *testing.T) { + mes := &mock.MessengerStub{} + + mes.AddTopicCalled = func(t *p2p.Topic) error { + t.RegisterTopicValidator = func(v pubsub.Validator) error { + return nil + } + + return nil + } + + mes.GetTopicCalled = func(name string) *p2p.Topic { + return nil + } + + ti, err := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) + assert.Nil(t, ti, err) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + func TestNewTopicInterceptor_OkValsShouldWork(t *testing.T) { mes := mock.NewMessengerStub() @@ -81,6 +101,25 @@ func TestNewTopicInterceptor_OkValsShouldWork(t *testing.T) { assert.NotNil(t, ti) } +func TestNewTopicInterceptor_CompareMarshlizersShouldEqual(t *testing.T) { + mes := mock.NewMessengerStub() + + mes.AddTopicCalled = func(t *p2p.Topic) error { + t.RegisterTopicValidator = func(v pubsub.Validator) error { + return nil + } + + return nil + } + + mes.GetTopicCalled = func(name string) *p2p.Topic { + return nil + } + + ti, _ := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) + assert.True(t, ti.Marshalizer() == mes.Marshalizer()) +} + func TestNewTopicInterceptor_WithExistingTopicShouldWork(t *testing.T) { mes := mock.NewMessengerStub() diff --git a/process/interface.go b/process/interface.go index f04ef0c5972..b3f658fc4bf 100644 --- a/process/interface.go +++ b/process/interface.go @@ -2,12 +2,14 @@ package process import ( "math/big" + "time" "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) @@ -18,17 +20,18 @@ type TransactionProcessor interface { SetSCHandler(func(accountsAdapter state.AccountsAdapter, transaction *transaction.Transaction) error) ProcessTransaction(transaction *transaction.Transaction, round int32) error - SetBalancesToTrie(accBalance map[string]big.Int) (rootHash []byte, err error) + SetBalancesToTrie(accBalance map[string]*big.Int) (rootHash []byte, err error) } // BlockProcessor is the main interface for block execution engine type BlockProcessor interface { - ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error - ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error + ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error + ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error CommitBlock(blockChain *blockchain.BlockChain, header *block.Header, block *block.TxBlockBody) error RevertAccountState() - CreateGenesisBlockBody(balances map[string]big.Int, shardId uint32) *block.StateBlockBody + CreateGenesisBlockBody(balances map[string]*big.Int, shardId uint32) (*block.StateBlockBody, error) CreateTxBlockBody(shardId uint32, maxTxInBlock int, round int32, haveTime func() bool) (*block.TxBlockBody, error) + CreateEmptyBlockBody(shardId uint32, round int32) *block.TxBlockBody RemoveBlockTxsFromPool(body *block.TxBlockBody) error GetRootHash() []byte } @@ -72,6 +75,8 @@ type TransactionInterceptorAdapter interface { GetTransaction() *transaction.Transaction SingleSignKeyGen() crypto.KeyGenerator SetSingleSignKeyGen(generator crypto.KeyGenerator) + SetTxBuffWithoutSig(txBuffWithoutSig []byte) + TxBuffWithoutSig() []byte } // BlockBodyInterceptorAdapter defines what a block body object should do @@ -80,6 +85,7 @@ type BlockBodyInterceptorAdapter interface { HashAccesser p2p.Creator Shard() uint32 + GetUnderlyingObject() interface{} } // HeaderInterceptorAdapter is the interface used in interception of headers @@ -94,6 +100,7 @@ type Interceptor interface { Name() string SetCheckReceivedObjectHandler(func(newer p2p.Creator, rawData []byte) error) CheckReceivedObjectHandler() func(newer p2p.Creator, rawData []byte) error + Marshalizer() marshal.Marshalizer } // Resolver is an interface that defines the behaviour of a struct that is able @@ -103,3 +110,46 @@ type Resolver interface { SetResolverHandler(func(rd RequestData) ([]byte, error)) ResolverHandler() func(rd RequestData) ([]byte, error) } + +// Bootstraper is an interface that defines the behaviour of a struct that is able +// to syncronize the node +type Bootstraper interface { + ShouldSync() bool +} + +// ForkDetector is an interface that defines the behaviour of a struct that is able +// to detect forks +type ForkDetector interface { + AddHeader(header *block.Header, hash []byte, isReceived bool) error + RemoveHeaders(nonce uint64) + CheckFork() bool +} + +// InterceptorContainer is an interface that defines the beahaviour for a container +// holding a list of interceptors organized by type +type InterceptorContainer interface { + Get(key string) (Interceptor, error) + Add(key string, interceptor Interceptor) error + Replace(key string, interceptor Interceptor) error + Remove(key string) + Len() int +} + +// ResolverContainer is an interface that defines the beahaviour for a container +// holding a list of resolvers organized by type +type ResolverContainer interface { + Get(key string) (Resolver, error) + Add(key string, resolver Resolver) error + Replace(key string, interceptor Resolver) error + Remove(key string) + Len() int +} + +// ProcessorFactory is an interface that defines the behaviour for a factory that +// can create the needed interceptors and resolvers for the application +type ProcessorFactory interface { + CreateInterceptors() error + CreateResolvers() error + InterceptorContainer() InterceptorContainer + ResolverContainer() ResolverContainer +} diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index f6899d9fda1..9eeda494047 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -2,29 +2,32 @@ package mock import ( "math/big" + "time" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" ) type BlockProcessorMock struct { - ProcessBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error - ProcessAndCommitCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error - CommitBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, block *block.TxBlockBody) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]big.Int, shardId uint32) *block.StateBlockBody - CreateTxBlockCalled func(shardId uint32, maxTxInBlock int, round int32, haveTime func() bool) (*block.TxBlockBody, error) - RemoveBlockTxsFromPoolCalled func(body *block.TxBlockBody) error - GetRootHashCalled func() []byte - noShards uint32 + ProcessBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error + ProcessAndCommitCalled func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain *blockchain.BlockChain, header *block.Header, block *block.TxBlockBody) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int, shardId uint32) (*block.StateBlockBody, error) + CreateTxBlockCalled func(shardId uint32, maxTxInBlock int, round int32, haveTime func() bool) (*block.TxBlockBody, error) + CreateEmptyBlockBodyCalled func(shardId uint32, round int32) *block.TxBlockBody + RemoveBlockTxsFromPoolCalled func(body *block.TxBlockBody) error + GetRootHashCalled func() []byte + noShards uint32 + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) } -func (bpm *BlockProcessorMock) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { - return bpm.ProcessBlockCalled(blockChain, header, body) +func (bpm *BlockProcessorMock) ProcessBlock(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { + return bpm.ProcessBlockCalled(blockChain, header, body, haveTime) } -func (bpm *BlockProcessorMock) ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { - return bpm.ProcessAndCommitCalled(blockChain, header, body) +func (bpm *BlockProcessorMock) ProcessAndCommit(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { + return bpm.ProcessAndCommitCalled(blockChain, header, body, haveTime) } func (bpm *BlockProcessorMock) CommitBlock(blockChain *blockchain.BlockChain, header *block.Header, block *block.TxBlockBody) error { @@ -35,14 +38,18 @@ func (bpm *BlockProcessorMock) RevertAccountState() { bpm.RevertAccountStateCalled() } -func (blProcMock BlockProcessorMock) CreateGenesisBlockBody(balances map[string]big.Int, shardId uint32) *block.StateBlockBody { - panic("implement me") +func (blProcMock BlockProcessorMock) CreateGenesisBlockBody(balances map[string]*big.Int, shardId uint32) (*block.StateBlockBody, error) { + return blProcMock.CreateGenesisBlockCalled(balances, shardId) } func (blProcMock BlockProcessorMock) CreateTxBlockBody(shardId uint32, maxTxInBlock int, round int32, haveTime func() bool) (*block.TxBlockBody, error) { return blProcMock.CreateTxBlockCalled(shardId, maxTxInBlock, round, haveTime) } +func (blProcMock BlockProcessorMock) CreateEmptyBlockBody(shardId uint32, round int32) *block.TxBlockBody { + return blProcMock.CreateEmptyBlockBodyCalled(shardId, round) +} + func (blProcMock BlockProcessorMock) RemoveBlockTxsFromPool(body *block.TxBlockBody) error { // pretend we removed the data return blProcMock.RemoveBlockTxsFromPoolCalled(body) diff --git a/process/mock/forkDetectorMock.go b/process/mock/forkDetectorMock.go new file mode 100644 index 00000000000..51613a45293 --- /dev/null +++ b/process/mock/forkDetectorMock.go @@ -0,0 +1,23 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" +) + +type ForkDetectorMock struct { + AddHeaderCalled func(header *block.Header, hash []byte, isReceived bool) error + RemoveHeadersCalled func(nonce uint64) + CheckForkCalled func() bool +} + +func (fdm *ForkDetectorMock) AddHeader(header *block.Header, hash []byte, isReceived bool) error { + return fdm.AddHeaderCalled(header, hash, isReceived) +} + +func (fdm *ForkDetectorMock) RemoveHeaders(nonce uint64) { + fdm.RemoveHeadersCalled(nonce) +} + +func (fdm *ForkDetectorMock) CheckFork() bool { + return fdm.CheckForkCalled() +} diff --git a/process/mock/hasherMock.go b/process/mock/hasherMock.go index 0120c31c30f..c5b967ce1d9 100644 --- a/process/mock/hasherMock.go +++ b/process/mock/hasherMock.go @@ -23,7 +23,7 @@ func (sha HasherMock) EmptyHash() []byte { return sha256EmptyHash } -// Size return the required size in bytes +// Size returns the required size in bytes func (HasherMock) Size() int { return sha256.Size } diff --git a/process/mock/hasherStub.go b/process/mock/hasherStub.go new file mode 100644 index 00000000000..92d68ae2182 --- /dev/null +++ b/process/mock/hasherStub.go @@ -0,0 +1,22 @@ +package mock + +type HasherStub struct { + ComputeCalled func(s string) []byte + EmptyHashCalled func() []byte +} + +func (hash HasherStub) Compute(s string) []byte { + if hash.ComputeCalled != nil { + return hash.ComputeCalled(s) + } + return nil +} +func (hash HasherStub) EmptyHash() []byte { + if hash.EmptyHashCalled != nil { + hash.EmptyHashCalled() + } + return nil +} +func (HasherStub) Size() int { + return 0 +} diff --git a/process/mock/interceptorConteinerMock.go b/process/mock/interceptorConteinerMock.go new file mode 100644 index 00000000000..ffaae271de1 --- /dev/null +++ b/process/mock/interceptorConteinerMock.go @@ -0,0 +1,42 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// InterceptorContainer is a struct that defines the beahaviour for a container +// holding a list of interceptors organized by type +type InterceptorContainer struct { + GetCalled func(key string) (process.Interceptor, error) + AddCalled func(key string, interceptor process.Interceptor) error + ReplaceCalled func(key string, interceptor process.Interceptor) error + RemoveCalled func(key string) + LenCalled func() int +} + +// Get returns the interceptor stored at a certain key. +// Returns an error if the element does not exist +func (i *InterceptorContainer) Get(key string) (process.Interceptor, error) { + return i.GetCalled(key) +} + +// Add will add an interceptor at a given key. Returns +// an error if the element already exists +func (i *InterceptorContainer) Add(key string, interceptor process.Interceptor) error { + return i.AddCalled(key, interceptor) +} + +// Replace will add (or replace if it already exists) an interceptor at a given key +func (i *InterceptorContainer) Replace(key string, interceptor process.Interceptor) error { + return i.ReplaceCalled(key, interceptor) +} + +// Remove will remove an interceptor at a given key +func (i *InterceptorContainer) Remove(key string) { + i.RemoveCalled(key) +} + +// Len returns the length of the added interceptors +func (i *InterceptorContainer) Len() int { + return i.LenCalled() +} diff --git a/process/mock/interceptorStub.go b/process/mock/interceptorStub.go index 1d4ad31e056..54a25f9d482 100644 --- a/process/mock/interceptorStub.go +++ b/process/mock/interceptorStub.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" ) @@ -8,6 +9,11 @@ type InterceptorStub struct { NameCalled func() string SetCheckReceivedObjectHandlerCalled func(func(newer p2p.Creator, rawData []byte) error) CheckReceivedObjectHandlerCalled func() func(newer p2p.Creator, rawData []byte) error + MarshalizerCalled func() marshal.Marshalizer +} + +func (is *InterceptorStub) Marshalizer() marshal.Marshalizer { + return is.MarshalizerCalled() } func (is *InterceptorStub) Name() string { diff --git a/process/mock/journalizedAccountWrapMock.go b/process/mock/journalizedAccountWrapMock.go index 1b0c75202e2..e370c8a917c 100644 --- a/process/mock/journalizedAccountWrapMock.go +++ b/process/mock/journalizedAccountWrapMock.go @@ -19,7 +19,8 @@ type JournalizedAccountWrapMock struct { originalData map[string][]byte dirtyData map[string][]byte - Fail bool + Fail bool + FailSetNonceWithJurnal bool } func NewJournalizedAccountWrapMock(address state.AddressContainer) *JournalizedAccountWrapMock { @@ -123,14 +124,18 @@ func (jawm *JournalizedAccountWrapMock) SaveKeyValue(key []byte, value []byte) { func (jawm *JournalizedAccountWrapMock) SetNonceWithJournal(nonce uint64) error { if jawm.Fail { - return errors.New("failure") + return errors.New("failure setting nonce") + } + + if jawm.FailSetNonceWithJurnal { + return errors.New("failure setting nonce") } jawm.Nonce = nonce return nil } -func (jawm *JournalizedAccountWrapMock) SetBalanceWithJournal(balance big.Int) error { +func (jawm *JournalizedAccountWrapMock) SetBalanceWithJournal(balance *big.Int) error { if jawm.Fail { return errors.New("failure") } diff --git a/process/mock/resolverContainerMock.go b/process/mock/resolverContainerMock.go new file mode 100644 index 00000000000..9a76585277c --- /dev/null +++ b/process/mock/resolverContainerMock.go @@ -0,0 +1,42 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// ResolverContainer is a struct that defines the beahaviour for a container +// holding a list of resolvers organized by type +type ResolverContainer struct { + GetCalled func(key string) (process.Resolver, error) + AddCalled func(key string, resolver process.Resolver) error + ReplaceCalled func(key string, resolver process.Resolver) error + RemoveCalled func(key string) + LenCalled func() int +} + +// Get returns the resolver stored at a certain key. +// Returns an error if the element does not exist +func (i *ResolverContainer) Get(key string) (process.Resolver, error) { + return i.GetCalled(key) +} + +// Add will add a resolver at a given key. Returns +// an error if the element already exists +func (i *ResolverContainer) Add(key string, resolver process.Resolver) error { + return i.AddCalled(key, resolver) +} + +// Replace will add (or replace if it already exists) a resolver at a given key +func (i *ResolverContainer) Replace(key string, resolver process.Resolver) error { + return i.ReplaceCalled(key, resolver) +} + +// Remove will remove a resolver at a given key +func (i *ResolverContainer) Remove(key string) { + i.RemoveCalled(key) +} + +// Len returns the length of the added resolvers +func (i *ResolverContainer) Len() int { + return i.LenCalled() +} diff --git a/process/mock/shardedDataStub.go b/process/mock/shardedDataStub.go index ffa31631323..21460f57be9 100644 --- a/process/mock/shardedDataStub.go +++ b/process/mock/shardedDataStub.go @@ -8,7 +8,7 @@ type ShardedDataStub struct { RegisterHandlerCalled func(func(key []byte)) ShardDataStoreCalled func(shardID uint32) (c storage.Cacher) AddDataCalled func(key []byte, data interface{}, destShardID uint32) - SearchDataCalled func(key []byte) (shardValuesPairs map[uint32]interface{}) + SearchFirstDataCalled func(key []byte) (value interface{}, ok bool) RemoveDataCalled func(key []byte, destShardID uint32) RemoveDataFromAllShardsCalled func(key []byte) MergeShardStoresCalled func(sourceShardID, destShardID uint32) @@ -16,6 +16,7 @@ type ShardedDataStub struct { ClearCalled func() ClearShardStoreCalled func(shardID uint32) RemoveSetOfDataFromPoolCalled func(keys [][]byte, destShardID uint32) + CreateShardStoreCalled func(destShardID uint32) } func (sd *ShardedDataStub) RegisterHandler(handler func(key []byte)) { @@ -30,8 +31,8 @@ func (sd *ShardedDataStub) AddData(key []byte, data interface{}, destShardID uin sd.AddDataCalled(key, data, destShardID) } -func (sd *ShardedDataStub) SearchData(key []byte) (shardValuesPairs map[uint32]interface{}) { - return sd.SearchDataCalled(key) +func (sd *ShardedDataStub) SearchFirstData(key []byte) (value interface{}, ok bool) { + return sd.SearchFirstDataCalled(key) } func (sd *ShardedDataStub) RemoveData(key []byte, destShardID uint32) { @@ -46,6 +47,10 @@ func (sd *ShardedDataStub) MergeShardStores(sourceShardID, destShardID uint32) { sd.MergeShardStoresCalled(sourceShardID, destShardID) } +func (sd *ShardedDataStub) CreateShardStore(destShardID uint32) { + sd.CreateShardStoreCalled(destShardID) +} + func (sd *ShardedDataStub) MoveData(sourceShardID, destShardID uint32, key [][]byte) { sd.MoveDataCalled(sourceShardID, destShardID, key) } diff --git a/process/mock/txProcessorMock.go b/process/mock/txProcessorMock.go index cdedb14a941..b65330cf69e 100644 --- a/process/mock/txProcessorMock.go +++ b/process/mock/txProcessorMock.go @@ -9,7 +9,7 @@ import ( type TxProcessorMock struct { ProcessTransactionCalled func(transaction *transaction.Transaction, round int32) error - SetBalancesToTrieCalled func(accBalance map[string]big.Int) (rootHash []byte, err error) + SetBalancesToTrieCalled func(accBalance map[string]*big.Int) (rootHash []byte, err error) } func (etm *TxProcessorMock) SCHandler() func(accountsAdapter state.AccountsAdapter, transaction *transaction.Transaction) error { @@ -24,6 +24,6 @@ func (etm *TxProcessorMock) ProcessTransaction(transaction *transaction.Transact return etm.ProcessTransactionCalled(transaction, round) } -func (etm *TxProcessorMock) SetBalancesToTrie(accBalance map[string]big.Int) (rootHash []byte, err error) { +func (etm *TxProcessorMock) SetBalancesToTrie(accBalance map[string]*big.Int) (rootHash []byte, err error) { return etm.SetBalancesToTrieCalled(accBalance) } diff --git a/process/mock/uint64ByteSliceConverterMock.go b/process/mock/uint64ByteSliceConverterMock.go new file mode 100644 index 00000000000..9f5972aa749 --- /dev/null +++ b/process/mock/uint64ByteSliceConverterMock.go @@ -0,0 +1,17 @@ +package mock + +// Uint64ByteSliceConverterMock converts byte slice to/from uint64 +type Uint64ByteSliceConverterMock struct { + ToByteSliceCalled func(uint64) []byte + ToUint64Called func([]byte) (uint64, error) +} + +// ToByteSlice is a mock implementation for Uint64ByteSliceConverter +func (u *Uint64ByteSliceConverterMock) ToByteSlice(p uint64) []byte { + return u.ToByteSliceCalled(p) +} + +// ToUint64 is a mock implementation for Uint64ByteSliceConverter +func (u *Uint64ByteSliceConverterMock) ToUint64(p []byte) (uint64, error) { + return u.ToUint64Called(p) +} diff --git a/process/resolver/container.go b/process/resolver/container.go new file mode 100644 index 00000000000..a9261673229 --- /dev/null +++ b/process/resolver/container.go @@ -0,0 +1,78 @@ +package resolver + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// Container is a holder for resolvers organized by type +type Container struct { + mutex sync.RWMutex + resolvers map[string]process.Resolver +} + +// NewContainer will create a new instance of a resolver container +func NewContainer() *Container { + return &Container{ + mutex: sync.RWMutex{}, + resolvers: make(map[string]process.Resolver), + } +} + +// Get returns the resolver stored at a certain key. +// Returns an error if the element does not exist +func (i *Container) Get(key string) (process.Resolver, error) { + i.mutex.RLock() + resolver, ok := i.resolvers[key] + i.mutex.RUnlock() + if !ok { + return nil, process.ErrInvalidContainerKey + } + return resolver, nil +} + +// Add will add a resolver at a given key. Returns +// an error if the element already exists +func (i *Container) Add(key string, resolver process.Resolver) error { + if resolver == nil { + return process.ErrNilContainerElement + } + i.mutex.Lock() + defer i.mutex.Unlock() + + _, ok := i.resolvers[key] + + if ok { + return process.ErrContainerKeyAlreadyExists + } + + i.resolvers[key] = resolver + return nil +} + +// Replace will add (or replace if it already exists) a resolver at a given key +func (i *Container) Replace(key string, resolver process.Resolver) error { + if resolver == nil { + return process.ErrNilContainerElement + } + i.mutex.Lock() + i.resolvers[key] = resolver + i.mutex.Unlock() + return nil +} + +// Remove will remove a resolver at a given key +func (i *Container) Remove(key string) { + i.mutex.Lock() + delete(i.resolvers, key) + i.mutex.Unlock() +} + +// Len returns the length of the added resolvers +func (i *Container) Len() int { + i.mutex.RLock() + l := len(i.resolvers) + i.mutex.RUnlock() + return l +} diff --git a/process/sync/basicForkDetector.go b/process/sync/basicForkDetector.go new file mode 100644 index 00000000000..1ae3f0109fd --- /dev/null +++ b/process/sync/basicForkDetector.go @@ -0,0 +1,139 @@ +package sync + +import ( + "bytes" + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" +) + +type headerInfo struct { + header *block.Header + hash []byte + isReceived bool +} + +// basicForkDetector defines a struct with necessary data needed for fork detection +type basicForkDetector struct { + headers map[uint64][]*headerInfo + mutHeaders sync.Mutex +} + +// NewBasicForkDetector method creates a new BasicForkDetector object +func NewBasicForkDetector() *basicForkDetector { + bfd := &basicForkDetector{} + bfd.headers = make(map[uint64][]*headerInfo) + + return bfd +} + +// AddHeader method adds a new header to headers map +func (bfd *basicForkDetector) AddHeader(header *block.Header, hash []byte, isReceived bool) error { + if header == nil { + return ErrNilHeader + } + + if hash == nil { + return ErrNilHash + } + + if !isEmpty(header) && !isReceived { + bfd.removePastHeaders(header.Nonce) // create a check point and remove all the past headers + } + + bfd.append(&headerInfo{ + header: header, + hash: hash, + isReceived: isReceived, + }) + + return nil +} + +func (bfd *basicForkDetector) removePastHeaders(nonce uint64) { + bfd.mutHeaders.Lock() + + for storedNonce := range bfd.headers { + if storedNonce <= nonce { + delete(bfd.headers, nonce) + } + } + + bfd.mutHeaders.Unlock() +} + +// RemoveHeaders removes all stored headers with a given nonce +func (bfd *basicForkDetector) RemoveHeaders(nonce uint64) { + bfd.mutHeaders.Lock() + delete(bfd.headers, nonce) + bfd.mutHeaders.Unlock() +} + +// append adds a new header in the slice found in nonce position +// it not adds the header if its hash is already stored in the slice +func (bfd *basicForkDetector) append(hdrInfo *headerInfo) { + bfd.mutHeaders.Lock() + defer bfd.mutHeaders.Unlock() + + hdrInfos := bfd.headers[hdrInfo.header.Nonce] + + isHdrInfosNilOrEmpty := hdrInfos == nil || len(hdrInfos) == 0 + + if isHdrInfosNilOrEmpty { + bfd.headers[hdrInfo.header.Nonce] = []*headerInfo{hdrInfo} + return + } + + for _, hdrInfoStored := range hdrInfos { + if bytes.Equal(hdrInfoStored.hash, hdrInfo.hash) { + return + } + } + + bfd.headers[hdrInfo.header.Nonce] = append(bfd.headers[hdrInfo.header.Nonce], hdrInfo) +} + +// CheckFork method checks if the node could be on the fork +func (bfd *basicForkDetector) CheckFork() bool { + bfd.mutHeaders.Lock() + defer bfd.mutHeaders.Unlock() + + for nonce, hdrInfos := range bfd.headers { + if len(hdrInfos) == 1 { + continue + } + + var selfHdrInfo *headerInfo + foundNotEmptyBlock := false + + for i := 0; i < len(hdrInfos); i++ { + if !hdrInfos[i].isReceived { + selfHdrInfo = hdrInfos[i] + continue + } + + if !isEmpty(hdrInfos[i].header) { + foundNotEmptyBlock = true + } + } + + if selfHdrInfo == nil { + //current nonce has not been (yet) processed, skipping, trying the next one + continue + } + + if !isEmpty(selfHdrInfo.header) { + //keep it clean so next time this position will be processed faster + delete(bfd.headers, nonce) + bfd.headers[nonce] = []*headerInfo{selfHdrInfo} + } + + if foundNotEmptyBlock { + //detected a fork: self has an unsigned header, it also received a signed block + //with the same nonce + return true + } + } + + return false +} diff --git a/process/sync/basicForkDetector_test.go b/process/sync/basicForkDetector_test.go new file mode 100644 index 00000000000..f3270e2dabf --- /dev/null +++ b/process/sync/basicForkDetector_test.go @@ -0,0 +1,164 @@ +package sync_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" + "github.com/stretchr/testify/assert" +) + +func TestNewBasicForkDetector_ShouldWork(t *testing.T) { + t.Parallel() + + bfd := sync.NewBasicForkDetector() + + assert.NotNil(t, bfd) +} + +func TestBasicForkDetector_AddHeaderNilHeaderShouldErr(t *testing.T) { + t.Parallel() + + bfd := sync.NewBasicForkDetector() + + err := bfd.AddHeader(nil, make([]byte, 0), false) + + assert.Equal(t, sync.ErrNilHeader, err) +} + +func TestBasicForkDetector_AddHeaderNilHashShouldErr(t *testing.T) { + t.Parallel() + + bfd := sync.NewBasicForkDetector() + + err := bfd.AddHeader(&block.Header{}, nil, false) + + assert.Equal(t, sync.ErrNilHash, err) +} + +func TestBasicForkDetector_AddHeaderNotPresentShouldWork(t *testing.T) { + t.Parallel() + + hdr := &block.Header{} + hash := make([]byte, 0) + + bfd := sync.NewBasicForkDetector() + + err := bfd.AddHeader(hdr, hash, false) + assert.Nil(t, err) + + hInfos := bfd.GetHeaders(0) + assert.Equal(t, 1, len(hInfos)) + assert.Equal(t, hdr, hInfos[0].Header()) + assert.Equal(t, hash, hInfos[0].Hash()) +} + +func TestBasicForkDetector_AddHeaderPresentShouldAppend(t *testing.T) { + t.Parallel() + + hdr1 := &block.Header{} + hash1 := []byte("hash1") + + hdr2 := &block.Header{} + hash2 := []byte("hash2") + + bfd := sync.NewBasicForkDetector() + + _ = bfd.AddHeader(hdr1, hash1, false) + err := bfd.AddHeader(hdr2, hash2, false) + assert.Nil(t, err) + + hInfos := bfd.GetHeaders(0) + assert.Equal(t, 2, len(hInfos)) + + assert.Equal(t, hdr1, hInfos[0].Header()) + assert.Equal(t, hash1, hInfos[0].Hash()) + + assert.Equal(t, hdr2, hInfos[1].Header()) + assert.Equal(t, hash2, hInfos[1].Hash()) +} + +func TestBasicForkDetector_AddHeaderPresentShouldNotRewriteWhenSameHash(t *testing.T) { + t.Parallel() + + hdr1 := &block.Header{} + hash := []byte("hash1") + hdr2 := &block.Header{} + + bfd := sync.NewBasicForkDetector() + + _ = bfd.AddHeader(hdr1, hash, false) + err := bfd.AddHeader(hdr2, hash, false) + assert.Nil(t, err) + + hInfos := bfd.GetHeaders(0) + assert.Equal(t, 1, len(hInfos)) + + assert.Equal(t, hdr1, hInfos[0].Header()) + assert.Equal(t, hash, hInfos[0].Hash()) +} + +func TestBasicForkDetector_RemoveHeadersShouldWork(t *testing.T) { + t.Parallel() + + hdr1 := &block.Header{} + hash := []byte("hash1") + + bfd := sync.NewBasicForkDetector() + + _ = bfd.AddHeader(hdr1, hash, false) + hInfos := bfd.GetHeaders(0) + assert.Equal(t, 1, len(hInfos)) + + bfd.RemoveHeaders(0) + + hInfos = bfd.GetHeaders(0) + assert.Nil(t, hInfos) +} + +func TestBasicForkDetector_CheckForkOnlyOneHeaderOnANonceShouldRetFalse(t *testing.T) { + t.Parallel() + + bfd := sync.NewBasicForkDetector() + + _ = bfd.AddHeader(&block.Header{Nonce: 0}, []byte("hash1"), false) + _ = bfd.AddHeader(&block.Header{Nonce: 1}, []byte("hash2"), false) + + assert.False(t, bfd.CheckFork()) +} + +func TestBasicForkDetector_CheckForkNodeHasNonEmptyBlockShouldRetFalse(t *testing.T) { + t.Parallel() + + bfd := sync.NewBasicForkDetector() + + _ = bfd.AddHeader(&block.Header{Nonce: 0}, []byte("hash1"), false) + _ = bfd.AddHeader(&block.Header{Nonce: 1}, []byte("hash2"), true) + _ = bfd.AddHeader(&block.Header{Nonce: 1, PubKeysBitmap: []byte{1}}, []byte("hash3"), false) + + assert.False(t, bfd.CheckFork()) +} + +func TestBasicForkDetector_CheckForkNodeHasEmptyBlockShouldRetTrue(t *testing.T) { + t.Parallel() + + bfd := sync.NewBasicForkDetector() + + _ = bfd.AddHeader(&block.Header{Nonce: 0}, []byte("hash1"), false) + _ = bfd.AddHeader(&block.Header{Nonce: 1}, []byte("hash2"), false) + _ = bfd.AddHeader(&block.Header{Nonce: 1, PubKeysBitmap: []byte{1}}, []byte("hash3"), true) + + assert.True(t, bfd.CheckFork()) +} + +func TestBasicForkDetector_CheckForkNodeHasOnlyReceivedShouldRetFalse(t *testing.T) { + t.Parallel() + + bfd := sync.NewBasicForkDetector() + + _ = bfd.AddHeader(&block.Header{Nonce: 0}, []byte("hash1"), false) + _ = bfd.AddHeader(&block.Header{Nonce: 1}, []byte("hash2"), true) + _ = bfd.AddHeader(&block.Header{Nonce: 1, PubKeysBitmap: []byte{1}}, []byte("hash3"), true) + + assert.False(t, bfd.CheckFork()) +} diff --git a/process/sync/block.go b/process/sync/block.go index fac3e10d11d..f1dfab2db25 100644 --- a/process/sync/block.go +++ b/process/sync/block.go @@ -2,6 +2,7 @@ package sync import ( "bytes" + "encoding/base64" "fmt" "sync" "time" @@ -11,20 +12,26 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/logger" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) var log = logger.NewDefaultLogger() -// bootstrap implements the boostrsap mechanism -type bootstrap struct { +// sleepTime defines the time in milliseconds between each iteration made in syncBlocks method +const sleepTime = time.Duration(5 * time.Millisecond) + +// Bootstrap implements the boostrsap mechanism +type Bootstrap struct { headers data.ShardedDataCacherNotifier headersNonces data.Uint64Cacher txBlockBodies storage.Cacher blkc *blockchain.BlockChain round *chronology.Round blkExecutor process.BlockProcessor + marshalizer marshal.Marshalizer + forkDetector process.ForkDetector mutHeader sync.RWMutex headerNonce *uint64 @@ -41,21 +48,23 @@ type bootstrap struct { waitTime time.Duration } -// NewBootstrap creates a new bootstrap object +// NewBootstrap creates a new Bootstrap object func NewBootstrap( transientDataHolder data.TransientDataHolder, blkc *blockchain.BlockChain, round *chronology.Round, blkExecutor process.BlockProcessor, waitTime time.Duration, -) (*bootstrap, error) { - err := checkBootstrapNilParameters(transientDataHolder, blkc, round, blkExecutor) + marshalizer marshal.Marshalizer, + forkDetector process.ForkDetector, +) (*Bootstrap, error) { + err := checkBootstrapNilParameters(transientDataHolder, blkc, round, blkExecutor, marshalizer, forkDetector) if err != nil { return nil, err } - boot := bootstrap{ + boot := Bootstrap{ headers: transientDataHolder.Headers(), headersNonces: transientDataHolder.HeadersNonces(), txBlockBodies: transientDataHolder.TxBlocks(), @@ -63,6 +72,8 @@ func NewBootstrap( round: round, blkExecutor: blkExecutor, waitTime: waitTime, + marshalizer: marshalizer, + forkDetector: forkDetector, } boot.chRcvHdr = make(chan bool) @@ -73,6 +84,7 @@ func NewBootstrap( boot.headersNonces.RegisterHandler(boot.receivedHeaderNonce) boot.txBlockBodies.RegisterHandler(boot.receivedBodyHash) + boot.headers.RegisterHandler(boot.receivedHeaders) boot.chStopSync = make(chan bool) @@ -85,6 +97,8 @@ func checkBootstrapNilParameters( blkc *blockchain.BlockChain, round *chronology.Round, blkExecutor process.BlockProcessor, + marshalizer marshal.Marshalizer, + forkDetector process.ForkDetector, ) error { if transientDataHolder == nil { return process.ErrNilTransientDataHolder @@ -114,18 +128,26 @@ func checkBootstrapNilParameters( return process.ErrNilBlockExecutor } + if marshalizer == nil { + return process.ErrNilMarshalizer + } + + if forkDetector == nil { + return process.ErrNilForkDetector + } + return nil } // setRequestedHeaderNonce method sets the header nonce requested by the sync mechanism -func (boot *bootstrap) setRequestedHeaderNonce(nonce *uint64) { +func (boot *Bootstrap) setRequestedHeaderNonce(nonce *uint64) { boot.mutHeader.Lock() boot.headerNonce = nonce boot.mutHeader.Unlock() } // requestedHeaderNonce method gets the header nonce requested by the sync mechanism -func (boot *bootstrap) requestedHeaderNonce() (nonce *uint64) { +func (boot *Bootstrap) requestedHeaderNonce() (nonce *uint64) { boot.mutHeader.RLock() nonce = boot.headerNonce boot.mutHeader.RUnlock() @@ -133,9 +155,66 @@ func (boot *bootstrap) requestedHeaderNonce() (nonce *uint64) { return } +func (boot *Bootstrap) getHeader(hash []byte) *block.Header { + hdr := boot.getHeaderFromPool(hash) + + if hdr != nil { + return hdr + } + + return boot.getHeaderFromStorage(hash) +} + +func (boot *Bootstrap) getHeaderFromPool(hash []byte) *block.Header { + hdr, ok := boot.headers.SearchFirstData(hash) + + if !ok { + log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) + return nil + } + + header, ok := hdr.(*block.Header) + + if !ok { + log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) + return nil + } + + return header +} + +func (boot *Bootstrap) getHeaderFromStorage(hash []byte) *block.Header { + headerStore := boot.blkc.GetStorer(blockchain.BlockHeaderUnit) + + if headerStore == nil { + log.Error(process.ErrNilHeadersStorage.Error()) + return nil + } + + buffHeader, _ := headerStore.Get(hash) + header := &block.Header{} + err := boot.marshalizer.Unmarshal(header, buffHeader) + if err != nil { + log.Error(err.Error()) + return nil + } + + return header +} + +func (boot *Bootstrap) receivedHeaders(headerHash []byte) { + header := boot.getHeader(headerHash) + + err := boot.forkDetector.AddHeader(header, headerHash, true) + + if err != nil { + log.Info(err.Error()) + } +} + // receivedHeaderNonce method is a call back function which is called when a new header is added // in the block headers pool -func (boot *bootstrap) receivedHeaderNonce(nonce uint64) { +func (boot *Bootstrap) receivedHeaderNonce(nonce uint64) { n := boot.requestedHeaderNonce() if n == nil { @@ -143,13 +222,14 @@ func (boot *bootstrap) receivedHeaderNonce(nonce uint64) { } if *n == nonce { + log.Info(fmt.Sprintf("received header with nonce %d from network\n", nonce)) boot.setRequestedHeaderNonce(nil) boot.chRcvHdr <- true } } // requestedTxBodyHash method gets the body hash requested by the sync mechanism -func (boot *bootstrap) requestedTxBodyHash() []byte { +func (boot *Bootstrap) requestedTxBodyHash() []byte { boot.mutTxBody.RLock() hash := boot.txBodyHash boot.mutTxBody.RUnlock() @@ -158,7 +238,7 @@ func (boot *bootstrap) requestedTxBodyHash() []byte { } // setRequestedTxBodyHash method sets the body hash requested by the sync mechanism -func (boot *bootstrap) setRequestedTxBodyHash(hash []byte) { +func (boot *Bootstrap) setRequestedTxBodyHash(hash []byte) { boot.mutTxBody.Lock() boot.txBodyHash = hash boot.mutTxBody.Unlock() @@ -166,34 +246,37 @@ func (boot *bootstrap) setRequestedTxBodyHash(hash []byte) { // receivedBody method is a call back function which is called when a new body is added // in the block bodies pool -func (boot *bootstrap) receivedBodyHash(hash []byte) { +func (boot *Bootstrap) receivedBodyHash(hash []byte) { if bytes.Equal(boot.requestedTxBodyHash(), hash) { + log.Info(fmt.Sprintf("received tx body with hash %s from network\n", toB64(hash))) boot.setRequestedTxBodyHash(nil) boot.chRcvTxBdy <- true } } // StartSync method will start SyncBlocks as a go routine -func (boot *bootstrap) StartSync() { +func (boot *Bootstrap) StartSync() { go boot.syncBlocks() } // StopSync method will stop SyncBlocks -func (boot *bootstrap) StopSync() { +func (boot *Bootstrap) StopSync() { boot.chStopSync <- true } // syncBlocks method calls repeatedly synchronization method SyncBlock -func (boot *bootstrap) syncBlocks() { +func (boot *Bootstrap) syncBlocks() { for { + time.Sleep(sleepTime) select { case <-boot.chStopSync: return default: - if boot.shouldSync() { + if boot.ShouldSync() { err := boot.SyncBlock() + if err != nil { - log.Debug(err.Error()) + log.Info(err.Error()) } } } @@ -206,13 +289,13 @@ func (boot *bootstrap) syncBlocks() { // If either header and body are received the ProcessAndCommit method will be called. This method will execute // the block and its transactions. Finally if everything works, the block will be committed in the blockchain, // and all this mechanism will be reiterated for the next block. -func (boot *bootstrap) SyncBlock() error { +func (boot *Bootstrap) SyncBlock() error { boot.setRequestedHeaderNonce(nil) boot.setRequestedTxBodyHash(nil) nonce := boot.getNonceForNextBlock() - hdr, err := boot.getHeaderWithNonce(nonce) + hdr, err := boot.getHeaderRequestingIfMissing(nonce) if err != nil { return err } @@ -222,50 +305,57 @@ func (boot *bootstrap) SyncBlock() error { return process.ErrNotImplementedBlockProcessingType } - blk, err := boot.getTxBodyWithHash(hdr.BlockBodyHash) + blk, err := boot.getTxBodyRequestingIfMissing(hdr.BlockBodyHash) if err != nil { return err } + haveTime := func() time.Duration { + return boot.round.TimeDuration() + } + //TODO remove type assertions and implement a way for block executor to process - //TODO all kinds of blocks - err = boot.blkExecutor.ProcessAndCommit(boot.blkc, hdr, blk.(*block.TxBlockBody)) + //TODO all kinds of headers + err = boot.blkExecutor.ProcessAndCommit(boot.blkc, hdr, blk.(*block.TxBlockBody), haveTime) - if err == nil { - log.Debug("block synced successfully") + if err != nil { + if err == process.ErrInvalidBlockHash { + err = boot.forkChoice(hdr) + } + + return err } - return err + log.Info(fmt.Sprintf("block with nonce %d was synced successfully\n", hdr.Nonce)) + + return nil } -// getHeaderFromPool method returns the block header from a given nonce -func (boot *bootstrap) getHeaderFromPool(nonce uint64) *block.Header { +// getHeaderFromPoolHavingNonce method returns the block header from a given nonce +func (boot *Bootstrap) getHeaderFromPoolHavingNonce(nonce uint64) *block.Header { hash, _ := boot.headersNonces.Get(nonce) if hash == nil { - log.Debug(fmt.Sprintf("nonce %d not found in headers-nonces cache", nonce)) + log.Debug(fmt.Sprintf("nonce %d not found in headers-nonces cache\n", nonce)) return nil } - hdr := boot.headers.SearchData(hash) - if len(hdr) == 0 { - log.Debug(fmt.Sprintf("header with hash %v not found in headers cache", hash)) + hdr, ok := boot.headers.SearchFirstData(hash) + if !ok { + log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) return nil } - for _, v := range hdr { - //just get the first header that is ok - header, ok := v.(*block.Header) - - if ok { - return header - } + header, ok := hdr.(*block.Header) + if !ok { + log.Debug(fmt.Sprintf("header with hash %v not found in headers cache\n", hash)) + return nil } - return nil + return header } // requestHeader method requests a block header from network when it is not found in the pool -func (boot *bootstrap) requestHeader(nonce uint64) { +func (boot *Bootstrap) requestHeader(nonce uint64) { if boot.RequestHeaderHandler != nil { boot.setRequestedHeaderNonce(&nonce) boot.RequestHeaderHandler(nonce) @@ -274,13 +364,13 @@ func (boot *bootstrap) requestHeader(nonce uint64) { // getHeaderWithNonce method gets the header with given nonce from pool, if it exist there, // and if not it will be requested from network -func (boot *bootstrap) getHeaderWithNonce(nonce uint64) (*block.Header, error) { - hdr := boot.getHeaderFromPool(nonce) +func (boot *Bootstrap) getHeaderRequestingIfMissing(nonce uint64) (*block.Header, error) { + hdr := boot.getHeaderFromPoolHavingNonce(nonce) if hdr == nil { boot.requestHeader(nonce) boot.waitForHeaderNonce() - hdr = boot.getHeaderFromPool(nonce) + hdr = boot.getHeaderFromPoolHavingNonce(nonce) if hdr == nil { return nil, process.ErrMissingHeader } @@ -289,14 +379,41 @@ func (boot *bootstrap) getHeaderWithNonce(nonce uint64) (*block.Header, error) { return hdr, nil } -// getBodyFromPool method returns the block header or block body from a given nonce -func (boot *bootstrap) getTxBodyFromPool(hash []byte) interface{} { +// getTxBody method returns the block body from a given hash either from data pool or from storage +func (boot *Bootstrap) getTxBody(hash []byte) interface{} { txBody, _ := boot.txBlockBodies.Get(hash) + + if txBody != nil { + return txBody + } + + txBodyStorer := boot.blkc.GetStorer(blockchain.TxBlockBodyUnit) + + if txBodyStorer == nil { + return nil + } + + buff, err := txBodyStorer.Get(hash) + if buff == nil { + log.LogIfError(err) + return nil + } + + txBody = &block.TxBlockBody{} + + err = boot.marshalizer.Unmarshal(txBody, buff) + log.LogIfError(err) + if err != nil { + err = txBodyStorer.Remove(hash) + log.LogIfError(err) + txBody = nil + } + return txBody } // requestBody method requests a block body from network when it is not found in the pool -func (boot *bootstrap) requestTxBody(hash []byte) { +func (boot *Bootstrap) requestTxBody(hash []byte) { if boot.RequestTxBodyHandler != nil { boot.setRequestedTxBodyHash(hash) boot.RequestTxBodyHandler(hash) @@ -308,23 +425,28 @@ func (boot *bootstrap) requestTxBody(hash []byte) { // the func returns interface{} as to match the next implementations for block body fetchers // that will be added. The block executor should decide by parsing the header block body type value // what kind of block body received. -func (boot *bootstrap) getTxBodyWithHash(hash []byte) (interface{}, error) { - blk := boot.getTxBodyFromPool(hash) +func (boot *Bootstrap) getTxBodyRequestingIfMissing(hash []byte) (interface{}, error) { + blk := boot.getTxBody(hash) if blk == nil { boot.requestTxBody(hash) boot.waitForTxBodyHash() - blk = boot.getTxBodyFromPool(hash) + blk = boot.getTxBody(hash) if blk == nil { return nil, process.ErrMissingBody } } - return blk.(*block.TxBlockBody), nil + intercepted, ok := blk.(*block.TxBlockBody) + if !ok { + return nil, ErrTxBlockBodyMismatch + } + + return intercepted, nil } // getNonceForNextBlock will get the nonce for the next block we should request -func (boot *bootstrap) getNonceForNextBlock() uint64 { +func (boot *Bootstrap) getNonceForNextBlock() uint64 { nonce := uint64(1) // first block nonce after genesis block if boot.blkc != nil && boot.blkc.CurrentBlockHeader != nil { nonce = boot.blkc.CurrentBlockHeader.Nonce + 1 @@ -333,18 +455,8 @@ func (boot *bootstrap) getNonceForNextBlock() uint64 { return nonce } -// shouldSync method returns the sync state of the node. If it returns true that means that the node should -// continue the syncing mechanism, otherwise the node should stop syncing because it is already synced -func (boot *bootstrap) shouldSync() bool { - if boot.blkc.CurrentBlockHeader == nil { - return boot.round.Index() > 0 - } - - return boot.blkc.CurrentBlockHeader.Round+1 < uint32(boot.round.Index()) -} - // waitForHeaderNonce method wait for header with the requested nonce to be received -func (boot *bootstrap) waitForHeaderNonce() { +func (boot *Bootstrap) waitForHeaderNonce() { select { case <-boot.chRcvHdr: return @@ -354,7 +466,7 @@ func (boot *bootstrap) waitForHeaderNonce() { } // waitForBodyNonce method wait for body with the requested nonce to be received -func (boot *bootstrap) waitForTxBodyHash() { +func (boot *Bootstrap) waitForTxBodyHash() { select { case <-boot.chRcvTxBdy: return @@ -362,3 +474,149 @@ func (boot *bootstrap) waitForTxBodyHash() { return } } + +// forkChoice decides if rollback must be called +func (boot *Bootstrap) forkChoice(hdr *block.Header) error { + log.Info(fmt.Sprintf("starting fork choice\n")) + + header := boot.blkc.CurrentBlockHeader + + if header == nil { + return ErrNilCurrentHeader + } + + if hdr == nil { + return ErrNilHeader + } + + if !isEmpty(header) { + boot.removeHeaderFromPools(hdr) + return &ErrNotEmptyHeader{ + CurrentNonce: header.Nonce, + PoolNonce: hdr.Nonce} + } + + log.Info(fmt.Sprintf("roll back to header with hash %s\n", + toB64(header.PrevHash))) + + return boot.rollback(header) +} + +func (boot *Bootstrap) cleanCachesOnRollback(header *block.Header, headerStore storage.Storer) { + hash, _ := boot.headersNonces.Get(header.Nonce) + boot.headersNonces.Remove(header.Nonce) + boot.headers.RemoveData(hash, header.ShardId) + boot.forkDetector.RemoveHeaders(header.Nonce) + //TODO uncomment this when badBlocks will be implemented + //_ = headerStore.Remove(hash) +} + +func (boot *Bootstrap) rollback(header *block.Header) error { + headerStore := boot.blkc.GetStorer(blockchain.BlockHeaderUnit) + if headerStore == nil { + return process.ErrNilHeadersStorage + } + + txBlockBodyStore := boot.blkc.GetStorer(blockchain.TxBlockBodyUnit) + if txBlockBodyStore == nil { + return process.ErrNilBlockBodyStorage + } + + // genesis block is treated differently + if header.Nonce == 1 { + boot.blkc.CurrentBlockHeader = nil + boot.blkc.CurrentTxBlockBody = nil + boot.blkc.CurrentBlockHeaderHash = nil + boot.cleanCachesOnRollback(header, headerStore) + + return nil + } + + newHeader, err := boot.getPrevHeader(headerStore, header) + if err != nil { + return err + } + + newTxBlockBody, err := boot.getTxBlockBody(txBlockBodyStore, newHeader) + if err != nil { + return err + } + + boot.blkc.CurrentBlockHeader = newHeader + boot.blkc.CurrentTxBlockBody = newTxBlockBody + boot.blkc.CurrentBlockHeaderHash = header.PrevHash + boot.cleanCachesOnRollback(header, headerStore) + + return nil +} + +func (boot *Bootstrap) removeHeaderFromPools(header *block.Header) { + hash, _ := boot.headersNonces.Get(header.Nonce) + + boot.headersNonces.Remove(header.Nonce) + boot.headers.RemoveData(hash, header.ShardId) + boot.forkDetector.RemoveHeaders(header.Nonce) +} + +func (boot *Bootstrap) getPrevHeader(headerStore storage.Storer, header *block.Header) (*block.Header, error) { + prevHash := header.PrevHash + buffHeader, _ := headerStore.Get(prevHash) + newHeader := &block.Header{} + err := boot.marshalizer.Unmarshal(newHeader, buffHeader) + if err != nil { + return nil, err + } + + return newHeader, nil +} + +func (boot *Bootstrap) getTxBlockBody(txBlockBodyStore storage.Storer, + header *block.Header) (*block.TxBlockBody, error) { + + buffTxBlockBody, _ := txBlockBodyStore.Get(header.BlockBodyHash) + txBlockBody := &block.TxBlockBody{} + err := boot.marshalizer.Unmarshal(txBlockBody, buffTxBlockBody) + if err != nil { + return nil, err + } + + return txBlockBody, nil +} + +// IsEmpty verifies if a block is empty +func isEmpty(header *block.Header) bool { + bitmap := header.PubKeysBitmap + areEqual := bytes.Equal(bitmap, make([]byte, len(bitmap))) + return areEqual +} + +func toB64(buff []byte) string { + if buff == nil { + return "" + } + return base64.StdEncoding.EncodeToString(buff) +} + +// ShouldSync method returns the synch state of the node. If it returns 'true', this means that the node +// is not synchronized yet and it has to continue the bootstrapping mechanism, otherwise the node is already +// synched and it can participate to the consensus, if it is in the jobDone group of this round +func (boot *Bootstrap) ShouldSync() bool { + if boot.blkc.CurrentBlockHeader == nil { + isNotSynchronized := boot.round.Index() > 0 + return isNotSynchronized + } + + isNotSynchronized := boot.blkc.CurrentBlockHeader.Round+1 < uint32(boot.round.Index()) + + if isNotSynchronized { + return true + } + + isForkDetected := boot.forkDetector.CheckFork() + + if isForkDetected { + return true + } + + return false +} diff --git a/process/sync/block_test.go b/process/sync/block_test.go index 4410ba10395..fbf9fc4c3d3 100644 --- a/process/sync/block_test.go +++ b/process/sync/block_test.go @@ -2,6 +2,8 @@ package sync_test import ( "bytes" + "fmt" + "reflect" "testing" "time" @@ -15,11 +17,19 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) -// WaitTime defines the time in milliseconds until node waits the requested info from the network -const WaitTime = time.Duration(100 * time.Millisecond) +// waitTime defines the time in milliseconds until node waits the requested info from the network +const waitTime = time.Duration(100 * time.Millisecond) + +type removedFlags struct { + flagHdrRemovedFromNonces bool + flagHdrRemovedFromHeaders bool + flagHdrRemovedFromStorage bool + flagHdrRemovedFromForkDetector bool +} //------- NewBootstrap @@ -29,8 +39,10 @@ func TestNewBootstrap_NilTransientDataHolderShouldErr(t *testing.T) { blkc := &blockchain.BlockChain{} round := &chronology.Round{} blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(nil, blkc, round, blkExec, WaitTime) + bs, err := sync.NewBootstrap(nil, blkc, round, blkExec, waitTime, marshalizer, forkDetector) assert.Nil(t, bs) assert.Equal(t, process.ErrNilTransientDataHolder, err) @@ -52,8 +64,10 @@ func TestNewBootstrap_TransientDataHolderRetNilOnHeadersShouldErr(t *testing.T) blkc := &blockchain.BlockChain{} round := &chronology.Round{} blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, WaitTime) + bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) assert.Nil(t, bs) assert.Equal(t, process.ErrNilHeadersDataPool, err) @@ -75,8 +89,10 @@ func TestNewBootstrap_TransientDataHolderRetNilOnHeadersNoncesShouldErr(t *testi blkc := &blockchain.BlockChain{} round := &chronology.Round{} blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, WaitTime) + bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) assert.Nil(t, bs) assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) @@ -98,8 +114,10 @@ func TestNewBootstrap_TransientDataHolderRetNilOnTxBlockBodyShouldErr(t *testing blkc := &blockchain.BlockChain{} round := &chronology.Round{} blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, WaitTime) + bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) assert.Nil(t, bs) assert.Equal(t, process.ErrNilTxBlockBody, err) @@ -120,8 +138,10 @@ func TestNewBootstrap_NilBlockchainShouldErr(t *testing.T) { } round := &chronology.Round{} blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, nil, round, blkExec, WaitTime) + bs, err := sync.NewBootstrap(transient, nil, round, blkExec, waitTime, marshalizer, forkDetector) assert.Nil(t, bs) assert.Equal(t, process.ErrNilBlockChain, err) @@ -142,8 +162,10 @@ func TestNewBootstrap_NilRoundShouldErr(t *testing.T) { } blkc := &blockchain.BlockChain{} blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, nil, blkExec, WaitTime) + bs, err := sync.NewBootstrap(transient, blkc, nil, blkExec, waitTime, marshalizer, forkDetector) assert.Nil(t, bs) assert.Equal(t, process.ErrNilRound, err) @@ -164,13 +186,63 @@ func TestNewBootstrap_NilBlockProcessorShouldErr(t *testing.T) { } blkc := &blockchain.BlockChain{} round := &chronology.Round{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, round, nil, WaitTime) + bs, err := sync.NewBootstrap(transient, blkc, round, nil, waitTime, marshalizer, forkDetector) assert.Nil(t, bs) assert.Equal(t, process.ErrNilBlockExecutor, err) } +func TestNewBootstrap_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + blkc := &blockchain.BlockChain{} + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + + bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, nil) + + assert.Nil(t, bs) + assert.Equal(t, process.ErrNilForkDetector, err) +} + +func TestNewBootstrap_NilForkDetectorShouldErr(t *testing.T) { + t.Parallel() + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + blkc := &blockchain.BlockChain{} + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + forkDetector := &mock.ForkDetectorMock{} + + bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, nil, forkDetector) + + assert.Nil(t, bs) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + func TestNewBootstrap_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -179,10 +251,14 @@ func TestNewBootstrap_OkValsShouldWork(t *testing.T) { transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { assert.Fail(t, "should have not reached this point") } + sds.RegisterHandlerCalled = func(func(key []byte)) { + } + return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -204,8 +280,10 @@ func TestNewBootstrap_OkValsShouldWork(t *testing.T) { blkc := &blockchain.BlockChain{} round := &chronology.Round{} blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, WaitTime) + bs, err := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) assert.NotNil(t, bs) assert.Nil(t, err) @@ -224,6 +302,8 @@ func TestBootstrap_ShouldReturnMissingHeader(t *testing.T) { transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -243,13 +323,17 @@ func TestBootstrap_ShouldReturnMissingHeader(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} bs, _ := sync.NewBootstrap( transient, &blkc, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) bs.RequestHeaderHandler = func(nonce uint64) {} bs.RequestTxBodyHandler = func(hash []byte) {} @@ -270,14 +354,15 @@ func TestBootstrap_ShouldReturnMissingBody(t *testing.T) { transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} - sds.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - m := make(map[uint32]interface{}) - + sds.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal([]byte("aaa"), key) { - m[0] = &block.Header{Nonce: 2} + return &block.Header{Nonce: 2}, true } - return m + return nil, false + } + + sds.RegisterHandlerCalled = func(func(key []byte)) { } return sds @@ -306,13 +391,17 @@ func TestBootstrap_ShouldReturnMissingBody(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} bs, _ := sync.NewBootstrap( transient, &blkc, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) bs.RequestHeader(2) @@ -325,7 +414,7 @@ func TestBootstrap_ShouldNotNeedToSync(t *testing.T) { t.Parallel() ebm := mock.BlockProcessorMock{} - ebm.ProcessAndCommitCalled = func(blk *blockchain.BlockChain, hdr *block.Header, bdy *block.TxBlockBody) error { + ebm.ProcessAndCommitCalled = func(blk *blockchain.BlockChain, hdr *block.Header, bdy *block.TxBlockBody, haveTime func() time.Duration) error { blk.CurrentBlockHeader = hdr return nil } @@ -338,8 +427,11 @@ func TestBootstrap_ShouldNotNeedToSync(t *testing.T) { transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} - sds.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - return nil + sds.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + sds.RegisterHandlerCalled = func(func(key []byte)) { } return sds @@ -364,13 +456,21 @@ func TestBootstrap_ShouldNotNeedToSync(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + forkDetector.CheckForkCalled = func() bool { + return false + } bs, _ := sync.NewBootstrap( transient, &blkc, chronology.NewRound(time.Now(), time.Now().Add(0*time.Millisecond), time.Duration(100*time.Millisecond)), &ebm, - WaitTime) + waitTime, + marshalizer, + forkDetector) bs.StartSync() time.Sleep(200 * time.Millisecond) @@ -381,7 +481,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { t.Parallel() ebm := mock.BlockProcessorMock{} - ebm.ProcessAndCommitCalled = func(blk *blockchain.BlockChain, hdr *block.Header, bdy *block.TxBlockBody) error { + ebm.ProcessAndCommitCalled = func(blk *blockchain.BlockChain, hdr *block.Header, bdy *block.TxBlockBody, haveTime func() time.Duration) error { blk.CurrentBlockHeader = hdr return nil } @@ -397,21 +497,22 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} - sds.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + sds.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { mutDataAvailable.RLock() defer mutDataAvailable.RUnlock() - m := make(map[uint32]interface{}) - if bytes.Equal([]byte("aaa"), key) && dataAvailable { - m[0] = &block.Header{ + return &block.Header{ Nonce: 2, Round: 1, BlockBodyType: block.TxBlock, - BlockBodyHash: []byte("bbb")} + BlockBodyHash: []byte("bbb")}, true } - return m + return nil, false + } + + sds.RegisterHandlerCalled = func(func(key []byte)) { } return sds @@ -447,13 +548,21 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + forkDetector.CheckForkCalled = func() bool { + return false + } bs, _ := sync.NewBootstrap( transient, &blkc, chronology.NewRound(time.Now(), time.Now().Add(200*time.Millisecond), time.Duration(100*time.Millisecond)), &ebm, - WaitTime) + waitTime, + marshalizer, + forkDetector) bs.StartSync() @@ -472,7 +581,7 @@ func TestBootstrap_ShouldReturnNilErr(t *testing.T) { t.Parallel() ebm := mock.BlockProcessorMock{} - ebm.ProcessAndCommitCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody) error { + ebm.ProcessAndCommitCalled = func(blockChain *blockchain.BlockChain, header *block.Header, body *block.TxBlockBody, haveTime func() time.Duration) error { return nil } @@ -484,18 +593,19 @@ func TestBootstrap_ShouldReturnNilErr(t *testing.T) { transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} - sds.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - m := make(map[uint32]interface{}) - + sds.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal([]byte("aaa"), key) { - m[0] = &block.Header{ + return &block.Header{ Nonce: 2, Round: 1, BlockBodyType: block.TxBlock, - BlockBodyHash: []byte("bbb")} + BlockBodyHash: []byte("bbb")}, true } - return m + return nil, false + } + + sds.RegisterHandlerCalled = func(func(key []byte)) { } return sds @@ -529,12 +639,17 @@ func TestBootstrap_ShouldReturnNilErr(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + bs, _ := sync.NewBootstrap( transient, &blkc, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &ebm, - WaitTime) + waitTime, + marshalizer, + forkDetector) r := bs.SyncBlock() @@ -547,6 +662,8 @@ func TestBootstrap_ShouldSyncShouldReturnFalseWhenCurrentBlockIsNilAndRoundIndex transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -561,13 +678,17 @@ func TestBootstrap_ShouldSyncShouldReturnFalseWhenCurrentBlockIsNilAndRoundIndex } return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} bs, _ := sync.NewBootstrap( transient, &blockchain.BlockChain{}, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) assert.False(t, bs.ShouldSync()) } @@ -578,6 +699,8 @@ func TestBootstrap_ShouldReturnTrueWhenCurrentBlockIsNilAndRoundIndexIsGreaterTh transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -592,13 +715,21 @@ func TestBootstrap_ShouldReturnTrueWhenCurrentBlockIsNilAndRoundIndexIsGreaterTh } return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + forkDetector.CheckForkCalled = func() bool { + return false + } bs, _ := sync.NewBootstrap( transient, &blockchain.BlockChain{}, chronology.NewRound(time.Now(), time.Now().Add(100*time.Millisecond), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) assert.True(t, bs.ShouldSync()) } @@ -613,6 +744,8 @@ func TestBootstrap_ShouldReturnFalseWhenNodeIsSynced(t *testing.T) { transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -628,12 +761,21 @@ func TestBootstrap_ShouldReturnFalseWhenNodeIsSynced(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + forkDetector.CheckForkCalled = func() bool { + return false + } + bs, _ := sync.NewBootstrap( transient, &blkc, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) assert.False(t, bs.ShouldSync()) } @@ -648,6 +790,8 @@ func TestBootstrap_ShouldReturnTrueWhenNodeIsNotSynced(t *testing.T) { transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -663,12 +807,20 @@ func TestBootstrap_ShouldReturnTrueWhenNodeIsNotSynced(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + forkDetector.CheckForkCalled = func() bool { + return false + } + bs, _ := sync.NewBootstrap( transient, &blkc, chronology.NewRound(time.Now(), time.Now().Add(100*time.Millisecond), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) assert.False(t, bs.ShouldSync()) } @@ -679,6 +831,8 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -698,12 +852,21 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + forkDetector.CheckForkCalled = func() bool { + return false + } + bs, _ := sync.NewBootstrap( transient, &blockchain.BlockChain{}, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) assert.Nil(t, bs.GetHeaderFromPool(0)) } @@ -716,13 +879,17 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} - sds.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { - m := make(map[uint32]interface{}) + + sds.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal([]byte("aaa"), key) { - m[0] = hdr + return hdr, true } - return m + return nil, false } + + sds.RegisterHandlerCalled = func(func(key []byte)) { + } + return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -745,23 +912,72 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { } return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} bs, _ := sync.NewBootstrap( transient, &blockchain.BlockChain{}, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) assert.True(t, hdr == bs.GetHeaderFromPool(0)) } -func TestBootstrap_GetBlockFromPoolShouldReturnNil(t *testing.T) { - t.Parallel() +//func TestBootstrap_GetBlockFromPoolShouldReturnNil(t *testing.T) { +// t.Parallel() +// +// transient := &mock.TransientDataPoolMock{} +// transient.HeadersCalled = func() data.ShardedDataCacherNotifier { +// sds := &mock.ShardedDataStub{} +// sds.RegisterHandlerCalled = func(func(key []byte)) { +// } +// return sds +// } +// transient.HeadersNoncesCalled = func() data.Uint64Cacher { +// hnc := &mock.Uint64CacherStub{} +// hnc.RegisterHandlerCalled = func(handler func(nonce uint64)) { +// } +// return hnc +// } +// transient.TxBlocksCalled = func() storage.Cacher { +// cs := &mock.CacherStub{} +// cs.RegisterHandlerCalled = func(i func(key []byte)) { +// } +// cs.GetCalled = func(key []byte) (value interface{}, ok bool) { +// return nil, false +// } +// return cs +// } +// +// marshalizer := &mock.MarshalizerMock{} +// forkDetector := &mock.ForkDetectorMock{} +// +// bs, _ := sync.NewBootstrap( +// transient, +// &blockchain.BlockChain{}, +// chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), +// &mock.BlockProcessorMock{}, +// waitTime, +// marshalizer, +// forkDetector) +// +// r := bs.GetTxBodyHavingHash([]byte("aaa")) +// +// assert.Nil(t, r) +//} + +func TestGetBlockFromPoolShouldReturnBlock(t *testing.T) { + blk := &block.TxBlockBody{} transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -775,29 +991,50 @@ func TestBootstrap_GetBlockFromPoolShouldReturnNil(t *testing.T) { cs.RegisterHandlerCalled = func(i func(key []byte)) { } cs.GetCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, []byte("aaa")) { + return blk, true + } + return nil, false } return cs } + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} bs, _ := sync.NewBootstrap( transient, &blockchain.BlockChain{}, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) - r := bs.GetTxBodyFromPool([]byte("aaa")) + assert.True(t, blk == bs.GetTxBody([]byte("aaa"))) - assert.Nil(t, r) } -func TestGetBlockFromPoolShouldReturnBlock(t *testing.T) { - blk := &block.TxBlockBody{} +//------- testing received headers + +func TestBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *testing.T) { + t.Parallel() + + addedHash := []byte("hash") + addedHdr := &block.Header{} transient := &mock.TransientDataPoolMock{} transient.HeadersCalled = func() data.ShardedDataCacherNotifier { sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } + sds.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, addedHash) { + return addedHdr, true + } + + return nil, false + } return sds } transient.HeadersNoncesCalled = func() data.Uint64Cacher { @@ -811,22 +1048,759 @@ func TestGetBlockFromPoolShouldReturnBlock(t *testing.T) { cs.RegisterHandlerCalled = func(i func(key []byte)) { } cs.GetCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(key, []byte("aaa")) { - return blk, true - } - return nil, false } return cs } + wasAdded := false + + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + forkDetector.AddHeaderCalled = func(header *block.Header, hash []byte, isReceived bool) error { + if !isReceived { + return errors.New("not received") + } + + if !bytes.Equal(hash, addedHash) { + return errors.New("hash mismatch") + } + + if !reflect.DeepEqual(header, addedHdr) { + return errors.New("header mismatch") + } + + wasAdded = true + return nil + } + bs, _ := sync.NewBootstrap( transient, &blockchain.BlockChain{}, chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), &mock.BlockProcessorMock{}, - WaitTime) + waitTime, + marshalizer, + forkDetector) + + bs.ReceivedHeaders(addedHash) + + assert.True(t, wasAdded) +} + +func TestBootstrap_ReceivedHeadersNotFoundInPoolButFoundInStorageShouldAddToForkDetector(t *testing.T) { + t.Parallel() + + addedHash := []byte("hash") + addedHdr := &block.Header{} + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + sds := &mock.ShardedDataStub{} + sds.RegisterHandlerCalled = func(func(key []byte)) { + } + sds.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + //not found in data pool as it was already moved out to storage unit + //should not happen normally, but this test takes this situation into account + + return nil, false + } + return sds + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + hnc := &mock.Uint64CacherStub{} + hnc.RegisterHandlerCalled = func(handler func(nonce uint64)) { + } + return hnc + } + transient.TxBlocksCalled = func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + cs.GetCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + return cs + } + + wasAdded := false + + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + forkDetector.AddHeaderCalled = func(header *block.Header, hash []byte, isReceived bool) error { + if !isReceived { + return errors.New("not received") + } + + if !bytes.Equal(hash, addedHash) { + return errors.New("hash mismatch") + } + + if !reflect.DeepEqual(header, addedHdr) { + return errors.New("header mismatch") + } + + wasAdded = true + return nil + } + + headerStorage := &mock.StorerStub{} + headerStorage.GetCalled = func(key []byte) (i []byte, e error) { + if bytes.Equal(key, addedHash) { + buff, _ := marshalizer.Marshal(addedHdr) + + return buff, nil + } + + return nil, nil + } + + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + headerStorage) + + bs, _ := sync.NewBootstrap( + transient, + blkc, + chronology.NewRound(time.Now(), time.Now(), time.Duration(100*time.Millisecond)), + &mock.BlockProcessorMock{}, + waitTime, + marshalizer, + forkDetector) + + bs.ReceivedHeaders(addedHash) + + assert.True(t, wasAdded) +} + +//------- ForkChoice + +func TestBootstrap_ForkChoiceNilBlockchainHeaderShouldErr(t *testing.T) { + t.Parallel() + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + sds := &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, destShardID uint32) {}, + RegisterHandlerCalled: func(func(key []byte)) {}, + } + return sds + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + hnc := &mock.Uint64CacherStub{ + RegisterHandlerCalled: func(handler func(nonce uint64)) {}, + } + return hnc + } + transient.TxBlocksCalled = func() storage.Cacher { + cs := &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + return cs + } + blkc := &blockchain.BlockChain{} + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + + err := bs.ForkChoice(&block.Header{}) + assert.Equal(t, sync.ErrNilCurrentHeader, err) +} + +func TestBootstrap_ForkChoiceNilParamHeaderShouldErr(t *testing.T) { + t.Parallel() + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + sds := &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, destShardID uint32) {}, + RegisterHandlerCalled: func(func(key []byte)) {}, + } + return sds + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + hnc := &mock.Uint64CacherStub{ + RegisterHandlerCalled: func(handler func(nonce uint64)) {}, + } + return hnc + } + transient.TxBlocksCalled = func() storage.Cacher { + cs := &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + return cs + } + blkc := &blockchain.BlockChain{} + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + + blkc.CurrentBlockHeader = &block.Header{} + + err := bs.ForkChoice(nil) + assert.Equal(t, sync.ErrNilHeader, err) +} + +func createHeadersDataPool(removedHashCompare []byte, remFlags *removedFlags) data.ShardedDataCacherNotifier { + sds := &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, destShardID uint32) {}, + RegisterHandlerCalled: func(func(key []byte)) {}, + RemoveDataCalled: func(key []byte, destShardID uint32) { + if bytes.Equal(key, removedHashCompare) { + remFlags.flagHdrRemovedFromHeaders = true + } + }, + } + return sds +} + +func createHeadersNoncesDataPool( + getNonceCompare uint64, + getRetHash []byte, + removedNonce uint64, + remFlags *removedFlags) data.Uint64Cacher { + + hnc := &mock.Uint64CacherStub{ + RegisterHandlerCalled: func(handler func(nonce uint64)) {}, + GetCalled: func(u uint64) (i []byte, b bool) { + if u == getNonceCompare { + return getRetHash, true + } + + return nil, false + }, + RemoveCalled: func(u uint64) { + if u == removedNonce { + remFlags.flagHdrRemovedFromNonces = true + } + }, + } + return hnc +} + +func createForkDetector(removedNonce uint64, remFlags *removedFlags) process.ForkDetector { + return &mock.ForkDetectorMock{ + RemoveHeadersCalled: func(nonce uint64) { + if nonce == removedNonce { + remFlags.flagHdrRemovedFromForkDetector = true + } + }, + } +} + +func TestBootstrap_ForkChoiceIsNotEmptyShouldRemove(t *testing.T) { + t.Parallel() + + newHdrHash := []byte("new hdr hash") + newHdrNonce := uint64(6) + + remFlags := &removedFlags{} + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return createHeadersDataPool(newHdrHash, remFlags) + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return createHeadersNoncesDataPool(newHdrNonce, newHdrHash, newHdrNonce, remFlags) + } + transient.TxBlocksCalled = func() storage.Cacher { + cs := &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + return cs + } + blkc := &blockchain.BlockChain{} + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := createForkDetector(newHdrNonce, remFlags) + + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + + blkc.CurrentBlockHeader = &block.Header{ + PubKeysBitmap: []byte{1}, + } + + newHdr := &block.Header{Nonce: newHdrNonce} + + err := bs.ForkChoice(newHdr) + assert.Equal(t, reflect.TypeOf(&sync.ErrNotEmptyHeader{}), reflect.TypeOf(err)) + fmt.Printf(err.Error()) + assert.True(t, remFlags.flagHdrRemovedFromNonces) + assert.True(t, remFlags.flagHdrRemovedFromHeaders) + assert.True(t, remFlags.flagHdrRemovedFromForkDetector) + +} + +func createHeadersStorage( + getHashCompare []byte, + getRetBytes []byte, + removedHash []byte, + remFlags *removedFlags, +) storage.Storer { + + return &mock.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + if bytes.Equal(key, getHashCompare) { + return getRetBytes, nil + } + + return nil, errors.New("not found") + }, + RemoveCalled: func(key []byte) error { + if bytes.Equal(key, removedHash) { + remFlags.flagHdrRemovedFromStorage = true + } + return nil + }, + } +} + +func TestBootstrap_ForkChoiceIsEmptyCallRollBackOkValsShouldWork(t *testing.T) { + t.Skip("unskip this test after the fix is applied on rollback, storer not erasing header") + + t.Parallel() + + //retain if the remove process from different storage locations has been called + remFlags := &removedFlags{} + + currentHdrNonce := uint64(8) + currentHdrHash := []byte("current header hash") + + //define prev tx block body "strings" as in this test there are a lot of stubs that + //constantly need to check some defined symbols + prevTxBlockBodyHash := []byte("prev block body hash") + prevTxBlockBodyBytes := []byte("prev block body bytes") + prevTxBlockBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{RootHash: []byte("state root hash")}, + } + + //define prev header "strings" + prevHdrHash := []byte("prev header hash") + prevHdrBytes := []byte("prev header bytes") + prevHdr := &block.Header{ + Signature: []byte("sig of the prev header as to be unique in this context"), + BlockBodyHash: prevTxBlockBodyHash, + } + + transient := &mock.TransientDataPoolMock{} + //data pool headers + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return createHeadersDataPool(currentHdrHash, remFlags) + } + //data pool headers-nonces + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return createHeadersNoncesDataPool( + currentHdrNonce, + currentHdrHash, + currentHdrNonce, + remFlags, + ) + } + //data pool tx block bodies + transient.TxBlocksCalled = func() storage.Cacher { + cs := &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + return cs + } + + hdrUnit := createHeadersStorage(prevHdrHash, prevHdrBytes, currentHdrHash, remFlags) + txBlockUnit := createHeadersStorage(prevTxBlockBodyHash, prevTxBlockBodyBytes, nil, remFlags) + + //a mock blockchain with special header and tx block bodies stubs (defined above) + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + txBlockUnit, + &mock.StorerStub{}, + &mock.StorerStub{}, + hdrUnit, + ) + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + + //a marshalizer stub + marshalizer := &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + if bytes.Equal(buff, prevHdrBytes) { + //bytes represent a header (strings are returns from hdrUnit.Get which is also a stub here) + //copy only defined fields + obj.(*block.Header).Signature = prevHdr.Signature + obj.(*block.Header).BlockBodyHash = prevTxBlockBodyHash + return nil + } + if bytes.Equal(buff, prevTxBlockBodyBytes) { + //bytes represent a tx block body (strings are returns from txBlockUnit.Get which is also a stub here) + //copy only defined fields + obj.(*block.TxBlockBody).RootHash = prevTxBlockBody.RootHash + return nil + } + + return nil + }, + } + forkDetector := createForkDetector(currentHdrNonce, remFlags) + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + + //this is the block we want to revert + blkc.CurrentBlockHeader = &block.Header{ + Nonce: currentHdrNonce, + //empty bitmap + PrevHash: prevHdrHash, + } + + err := bs.ForkChoice(&block.Header{}) + assert.Nil(t, err) + assert.True(t, remFlags.flagHdrRemovedFromNonces) + assert.True(t, remFlags.flagHdrRemovedFromHeaders) + assert.True(t, remFlags.flagHdrRemovedFromStorage) + assert.True(t, remFlags.flagHdrRemovedFromForkDetector) + assert.Equal(t, blkc.CurrentBlockHeader, prevHdr) + assert.Equal(t, blkc.CurrentTxBlockBody, prevTxBlockBody) + assert.Equal(t, blkc.CurrentBlockHeaderHash, prevHdrHash) +} + +func TestBootstrap_ForkChoiceIsEmptyCallRollBackToGenesisShouldWork(t *testing.T) { + t.Skip("unskip this test after the fix is applied on rollback, storer not erasing header") + + t.Parallel() + + //retain if the remove process from different storage locations has been called + remFlags := &removedFlags{} + + currentHdrNonce := uint64(1) + currentHdrHash := []byte("current header hash") + + //define prev tx block body "strings" as in this test there are a lot of stubs that + //constantly need to check some defined symbols + prevTxBlockBodyHash := []byte("prev block body hash") + prevTxBlockBodyBytes := []byte("prev block body bytes") + prevTxBlockBody := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{RootHash: []byte("state root hash")}, + } + + //define prev header "strings" + prevHdrHash := []byte("prev header hash") + prevHdrBytes := []byte("prev header bytes") + prevHdr := &block.Header{ + Signature: []byte("sig of the prev header as to be unique in this context"), + BlockBodyHash: prevTxBlockBodyHash, + } + + transient := &mock.TransientDataPoolMock{} + //data pool headers + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return createHeadersDataPool(currentHdrHash, remFlags) + } + //data pool headers-nonces + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return createHeadersNoncesDataPool( + currentHdrNonce, + currentHdrHash, + currentHdrNonce, + remFlags, + ) + } + //data pool tx block bodies + transient.TxBlocksCalled = func() storage.Cacher { + cs := &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + return cs + } + + hdrUnit := createHeadersStorage(prevHdrHash, prevHdrBytes, currentHdrHash, remFlags) + txBlockUnit := createHeadersStorage(prevTxBlockBodyHash, prevTxBlockBodyBytes, nil, remFlags) + + //a mock blockchain with special header and tx block bodies stubs (defined above) + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + txBlockUnit, + &mock.StorerStub{}, + &mock.StorerStub{}, + hdrUnit, + ) + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + + //a marshalizer stub + marshalizer := &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + if bytes.Equal(buff, prevHdrBytes) { + //bytes represent a header (strings are returns from hdrUnit.Get which is also a stub here) + //copy only defined fields + obj.(*block.Header).Signature = prevHdr.Signature + obj.(*block.Header).BlockBodyHash = prevTxBlockBodyHash + return nil + } + if bytes.Equal(buff, prevTxBlockBodyBytes) { + //bytes represent a tx block body (strings are returns from txBlockUnit.Get which is also a stub here) + //copy only defined fields + obj.(*block.TxBlockBody).RootHash = prevTxBlockBody.RootHash + return nil + } + + return nil + }, + } + forkDetector := createForkDetector(currentHdrNonce, remFlags) + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + + //this is the block we want to revert + blkc.CurrentBlockHeader = &block.Header{ + Nonce: currentHdrNonce, + //empty bitmap + PrevHash: prevHdrHash, + } + + err := bs.ForkChoice(&block.Header{}) + assert.Nil(t, err) + assert.True(t, remFlags.flagHdrRemovedFromNonces) + assert.True(t, remFlags.flagHdrRemovedFromHeaders) + assert.True(t, remFlags.flagHdrRemovedFromStorage) + assert.True(t, remFlags.flagHdrRemovedFromForkDetector) + assert.Nil(t, blkc.CurrentBlockHeader) + assert.Nil(t, blkc.CurrentTxBlockBody) + assert.Nil(t, blkc.CurrentBlockHeaderHash) +} + +//------- GetTxBodyHavingHash + +func TestBootstrap_GetTxBodyHavingHashReturnsFromCacherShouldWork(t *testing.T) { + t.Parallel() + + requestedHash := []byte("requested hash") + txBlock := &block.TxBlockBody{} + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, destShardID uint32) {}, + RegisterHandlerCalled: func(func(key []byte)) {}, + } + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{ + RegisterHandlerCalled: func(handler func(nonce uint64)) {}, + } + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + GetCalled: func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, requestedHash) { + return txBlock, true + } + return nil, false + }, + } + } + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + ) + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + txBlockRecovered := bs.GetTxBody(requestedHash) + + assert.True(t, txBlockRecovered == txBlock) +} + +func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetNil(t *testing.T) { + t.Parallel() + + requestedHash := []byte("requested hash") + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, destShardID uint32) {}, + RegisterHandlerCalled: func(func(key []byte)) {}, + } + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{ + RegisterHandlerCalled: func(handler func(nonce uint64)) {}, + } + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + } + + txBlockUnit := &mock.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + return nil, errors.New("not found") + }, + } + + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + txBlockUnit, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + ) + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} + + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + txBlockRecovered := bs.GetTxBody(requestedHash) + + assert.Nil(t, txBlockRecovered) +} + +func TestBootstrap_GetTxBodyHavingHashFoundInStorageShouldWork(t *testing.T) { + t.Parallel() + + requestedHash := []byte("requested hash") + txBlock := &block.TxBlockBody{ + StateBlockBody: block.StateBlockBody{RootHash: []byte("root hash")}, + } + + marshalizer := &mock.MarshalizerMock{} + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, destShardID uint32) {}, + RegisterHandlerCalled: func(func(key []byte)) {}, + } + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{ + RegisterHandlerCalled: func(handler func(nonce uint64)) {}, + } + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + } + + txBlockUnit := &mock.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + if bytes.Equal(key, requestedHash) { + buff, _ := marshalizer.Marshal(txBlock) + return buff, nil + } + + return nil, errors.New("not found") + }, + } + + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + txBlockUnit, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + ) + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + forkDetector := &mock.ForkDetectorMock{} + + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + txBlockRecovered := bs.GetTxBody(requestedHash) + + assert.Equal(t, txBlock, txBlockRecovered) +} + +func TestBootstrap_GetTxBodyHavingHashMarshalizerFailShouldRemoveAndRetNil(t *testing.T) { + t.Parallel() + + removedCalled := false + requestedHash := []byte("requested hash") + + marshalizer := &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return errors.New("marshalizer failure") + }, + } + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, destShardID uint32) {}, + RegisterHandlerCalled: func(func(key []byte)) {}, + } + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{ + RegisterHandlerCalled: func(handler func(nonce uint64)) {}, + } + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + } + + txBlockUnit := &mock.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + if bytes.Equal(key, requestedHash) { + return make([]byte, 0), nil + } + + return nil, errors.New("not found") + }, + RemoveCalled: func(key []byte) error { + if bytes.Equal(key, requestedHash) { + removedCalled = true + } + return nil + }, + } + + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + txBlockUnit, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + ) + round := &chronology.Round{} + blkExec := &mock.BlockProcessorMock{} + forkDetector := &mock.ForkDetectorMock{} - assert.True(t, blk == bs.GetTxBodyFromPool([]byte("aaa"))) + bs, _ := sync.NewBootstrap(transient, blkc, round, blkExec, waitTime, marshalizer, forkDetector) + txBlockRecovered := bs.GetTxBody(requestedHash) + assert.Nil(t, txBlockRecovered) + assert.True(t, removedCalled) } diff --git a/process/sync/errors.go b/process/sync/errors.go new file mode 100644 index 00000000000..4d49b3ae860 --- /dev/null +++ b/process/sync/errors.go @@ -0,0 +1,29 @@ +package sync + +import ( + "errors" + "fmt" +) + +// ErrNilHeader signals that a nil header has been provided +var ErrNilHeader = errors.New("nil header") + +// ErrNilHash signals that a nil hash has been provided +var ErrNilHash = errors.New("nil hash") + +// ErrNilCurrentHeader signals that the current header is nil +var ErrNilCurrentHeader = errors.New("The current header is nil\n") + +// ErrTxBlockBodyMismatch signals that a tx block body can not by asserted to TxBlockBody type +var ErrTxBlockBodyMismatch = errors.New("tx block body mismatch") + +type ErrNotEmptyHeader struct { + CurrentNonce uint64 + PoolNonce uint64 +} + +func (err ErrNotEmptyHeader) Error() string { + return fmt.Sprintf("the current header with nonce %d is not from an empty block, "+ + "try to remove header with nonce %d from pool and request it again\n", + err.CurrentNonce, err.PoolNonce) +} diff --git a/process/sync/export_test.go b/process/sync/export_test.go index d4fb5bcc542..b53ac71ad1f 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -4,18 +4,50 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" ) -func (boot *bootstrap) RequestHeader(nonce uint64) { +func (boot *Bootstrap) RequestHeader(nonce uint64) { boot.requestHeader(nonce) } -func (boot *bootstrap) ShouldSync() bool { - return boot.shouldSync() +func (boot *Bootstrap) GetHeaderFromPool(nonce uint64) *block.Header { + return boot.getHeaderFromPoolHavingNonce(nonce) } -func (boot *bootstrap) GetHeaderFromPool(nonce uint64) *block.Header { - return boot.getHeaderFromPool(nonce) +func (boot *Bootstrap) GetTxBody(hash []byte) interface{} { + return boot.getTxBody(hash) } -func (boot *bootstrap) GetTxBodyFromPool(hash []byte) interface{} { - return boot.getTxBodyFromPool(hash) +func (boot *Bootstrap) ReceivedHeaders(key []byte) { + boot.receivedHeaders(key) +} + +func (boot *Bootstrap) ForkChoice(hdr *block.Header) error { + return boot.forkChoice(hdr) +} + +func (bfd *basicForkDetector) GetHeaders(nonce uint64) []*headerInfo { + bfd.mutHeaders.Lock() + defer bfd.mutHeaders.Unlock() + + headers := bfd.headers[nonce] + + if headers == nil { + return nil + } + + newHeaders := make([]*headerInfo, len(headers)) + copy(newHeaders, headers) + + return newHeaders +} + +func (hi *headerInfo) Header() *block.Header { + return hi.header +} + +func (hi *headerInfo) Hash() []byte { + return hi.hash +} + +func (hi *headerInfo) IsReceived() bool { + return hi.isReceived } diff --git a/process/sync/validator.go b/process/sync/validator.go index 07805c13771..7bb9ac7f540 100644 --- a/process/sync/validator.go +++ b/process/sync/validator.go @@ -17,7 +17,7 @@ const RoundsToWaitToBeRemoved = 5 type validatorData struct { RoundIndex int32 - Stake big.Int + Stake *big.Int } // syncValidators implements a validators sync mechanism @@ -106,14 +106,14 @@ func (sv *syncValidators) processRegisterRequests(regData *state.RegistrationDat if v, isInEligibleList := sv.eligibleList[k]; isInEligibleList { sv.eligibleList[k] = &validatorData{ RoundIndex: v.RoundIndex, - Stake: *big.NewInt(0).Add(&v.Stake, ®Data.Stake), + Stake: big.NewInt(0).Add(v.Stake, regData.Stake), } } else { // if the validator is not in the eligible list it should wait for some certain rounds until it // would be moved there if regData.RoundIndex+RoundsToWaitToBeEligible < sv.rounder.Index() { sv.eligibleList[k] = &validatorData{ RoundIndex: regData.RoundIndex, - Stake: *big.NewInt(0).Set(®Data.Stake), + Stake: big.NewInt(0).Set(regData.Stake), } } } @@ -146,7 +146,7 @@ func (sv *syncValidators) GetEligibleList() map[string]*validatorData { sv.mut.RLock() for k, v := range sv.eligibleList { - eligibleList[k] = &validatorData{RoundIndex: v.RoundIndex, Stake: *big.NewInt(0).Set(&v.Stake)} + eligibleList[k] = &validatorData{RoundIndex: v.RoundIndex, Stake: big.NewInt(0).Set(v.Stake)} } sv.mut.RUnlock() diff --git a/process/sync/validator_test.go b/process/sync/validator_test.go index 3f870545c48..5c278c433e5 100644 --- a/process/sync/validator_test.go +++ b/process/sync/validator_test.go @@ -76,7 +76,7 @@ func TestGetEligibleList_ShouldHaveNoValidatorsAfterOneRegisterRequest(t *testin regData := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArRegister, - Stake: *big.NewInt(1), + Stake: big.NewInt(1), RoundIndex: 0, } @@ -95,7 +95,7 @@ func TestGetEligibleList_ShouldHaveOneValidatorAfterOneRegisterRequestAndSomeRou regData := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArRegister, - Stake: *big.NewInt(1), + Stake: big.NewInt(1), RoundIndex: 0, } @@ -119,7 +119,7 @@ func TestGetEligibleList_ShouldHaveOneValidatorWithIncreasedStakeAfterTwoRegiste regData := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArRegister, - Stake: *big.NewInt(1), + Stake: big.NewInt(1), RoundIndex: 0, } @@ -128,7 +128,7 @@ func TestGetEligibleList_ShouldHaveOneValidatorWithIncreasedStakeAfterTwoRegiste regData2 := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArRegister, - Stake: *big.NewInt(2), + Stake: big.NewInt(2), RoundIndex: 3, } @@ -140,7 +140,7 @@ func TestGetEligibleList_ShouldHaveOneValidatorWithIncreasedStakeAfterTwoRegiste } assert.Equal(t, 1, len(sv.GetEligibleList())) - assert.Equal(t, *big.NewInt(3), sv.GetEligibleList()["node1"].Stake) + assert.Equal(t, big.NewInt(3), sv.GetEligibleList()["node1"].Stake) } func TestGetEligibleList_ShouldHaveOneValidatorAfterOneRegisterAndUnregisterRequest(t *testing.T) { @@ -153,7 +153,7 @@ func TestGetEligibleList_ShouldHaveOneValidatorAfterOneRegisterAndUnregisterRequ regData := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArRegister, - Stake: *big.NewInt(1), + Stake: big.NewInt(1), RoundIndex: 0, } @@ -162,7 +162,7 @@ func TestGetEligibleList_ShouldHaveOneValidatorAfterOneRegisterAndUnregisterRequ regData2 := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArUnregister, - Stake: *big.NewInt(1), + Stake: big.NewInt(1), RoundIndex: 1, } @@ -186,7 +186,7 @@ func TestGetEligibleList_ShouldHaveNoValidatorsAfterOneRegisterAndUnregisterRequ regData := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArRegister, - Stake: *big.NewInt(1), + Stake: big.NewInt(1), RoundIndex: 0, } @@ -195,7 +195,7 @@ func TestGetEligibleList_ShouldHaveNoValidatorsAfterOneRegisterAndUnregisterRequ regData2 := state.RegistrationData{ NodePubKey: []byte("node1"), Action: state.ArUnregister, - Stake: *big.NewInt(1), + Stake: big.NewInt(1), RoundIndex: 1, } diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index 32f8661e1c4..ccb98c5aaba 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -37,6 +37,6 @@ func (txi *TxInterceptor) ProcessTx(tx p2p.Creator, rawData []byte) error { return txi.processTx(tx, rawData) } -func (txRes *txResolver) ResolveTxRequest(rd process.RequestData) ([]byte, error) { +func (txRes *TxResolver) ResolveTxRequest(rd process.RequestData) ([]byte, error) { return txRes.resolveTxRequest(rd) } diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index 0d4ecb029ec..f9e49e71271 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -15,6 +15,7 @@ import ( type InterceptedTransaction struct { *transaction.Transaction + txBuffWithoutSig []byte hash []byte rcvShard uint32 sndShard uint32 @@ -85,10 +86,6 @@ func (inTx *InterceptedTransaction) Integrity(coordinator sharding.ShardCoordina return process.ErrNilSignature } - if inTx.Challenge == nil { - return process.ErrNilChallenge - } - if inTx.RcvAddr == nil { return process.ErrNilRcvAddr } @@ -97,6 +94,10 @@ func (inTx *InterceptedTransaction) Integrity(coordinator sharding.ShardCoordina return process.ErrNilSndAddr } + if inTx.Transaction.Value == nil { + return process.ErrNilValue + } + if inTx.Transaction.Value.Cmp(big.NewInt(0)) < 0 { return process.ErrNegativeValue } @@ -114,12 +115,12 @@ func (inTx *InterceptedTransaction) VerifySig() error { return process.ErrNilSingleSignKeyGen } - singleSignVerifier, err := inTx.singleSignKeyGen.PublicKeyFromByteArray(inTx.RcvAddr) + singleSignVerifier, err := inTx.singleSignKeyGen.PublicKeyFromByteArray(inTx.SndAddr) if err != nil { return err } - err = singleSignVerifier.Verify(inTx.hash, inTx.Signature) + err = singleSignVerifier.Verify(inTx.txBuffWithoutSig, inTx.Signature) if err != nil { return err @@ -168,6 +169,16 @@ func (inTx *InterceptedTransaction) Hash() []byte { return inTx.hash } +// SetTxBuffWithoutSig sets the byte slice buffer of this transaction having nil in Signature field. +func (inTx *InterceptedTransaction) SetTxBuffWithoutSig(txBuffWithoutSig []byte) { + inTx.txBuffWithoutSig = txBuffWithoutSig +} + +// TxBuffWithoutSig gets the byte slice buffer of this transaction having nil in Signature field +func (inTx *InterceptedTransaction) TxBuffWithoutSig() []byte { + return inTx.txBuffWithoutSig +} + // SingleSignKeyGen returns the key generator that is used to create a new public key verifier that will be used // for validating transaction's signature func (inTx *InterceptedTransaction) SingleSignKeyGen() crypto.KeyGenerator { diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index 313217ee995..49b6042f657 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -1,6 +1,7 @@ package transaction_test import ( + "bytes" "math/big" "testing" @@ -33,25 +34,11 @@ func TestInterceptedTransaction_IntegrityNilSignatureShouldErr(t *testing.T) { tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 0) tx.SndAddr = make([]byte, 0) - tx.Value = *big.NewInt(1) + tx.Value = big.NewInt(1) assert.Equal(t, process.ErrNilSignature, tx.Integrity(nil)) } -func TestInterceptedTransaction_IntegrityNilChallengeShouldErr(t *testing.T) { - t.Parallel() - - tx := transaction.NewInterceptedTransaction() - - tx.Signature = make([]byte, 0) - tx.Challenge = nil - tx.RcvAddr = make([]byte, 0) - tx.SndAddr = make([]byte, 0) - tx.Value = *big.NewInt(1) - - assert.Equal(t, process.ErrNilChallenge, tx.Integrity(nil)) -} - func TestInterceptedTransaction_IntegrityNilRcvAddrShouldErr(t *testing.T) { t.Parallel() @@ -61,7 +48,7 @@ func TestInterceptedTransaction_IntegrityNilRcvAddrShouldErr(t *testing.T) { tx.Challenge = make([]byte, 0) tx.RcvAddr = nil tx.SndAddr = make([]byte, 0) - tx.Value = *big.NewInt(1) + tx.Value = big.NewInt(1) assert.Equal(t, process.ErrNilRcvAddr, tx.Integrity(nil)) } @@ -75,7 +62,7 @@ func TestInterceptedTransaction_IntegrityNilSndAddrShouldErr(t *testing.T) { tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 0) tx.SndAddr = nil - tx.Value = *big.NewInt(1) + tx.Value = big.NewInt(1) assert.Equal(t, process.ErrNilSndAddr, tx.Integrity(nil)) } @@ -89,7 +76,7 @@ func TestInterceptedTransaction_IntegrityNegativeValueShouldErr(t *testing.T) { tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 0) tx.SndAddr = make([]byte, 0) - tx.Value = *big.NewInt(-1) + tx.Value = big.NewInt(-1) assert.Equal(t, process.ErrNegativeValue, tx.Integrity(nil)) } @@ -103,7 +90,7 @@ func TestInterceptedTransaction_IntegrityOkValsShouldWork(t *testing.T) { tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 0) tx.SndAddr = make([]byte, 0) - tx.Value = *big.NewInt(0) + tx.Value = big.NewInt(0) assert.Nil(t, tx.Integrity(nil)) } @@ -141,7 +128,7 @@ func TestInterceptedTransaction_IntegrityAndValidityNilAddrConverterShouldErr(t tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 0) tx.SndAddr = make([]byte, 0) - tx.Value = *big.NewInt(1) + tx.Value = big.NewInt(1) assert.Equal(t, process.ErrNilAddressConverter, tx.IntegrityAndValidity(oneSharder)) } @@ -156,6 +143,7 @@ func TestTransactionInterceptor_IntegrityAndValidityInvalidSenderAddrShouldRetFa tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 0) tx.SndAddr = []byte("please fail, addrConverter!") + tx.Value = big.NewInt(0) addrConv := &mock.AddressConverterMock{} addrConv.CreateAddressFromPublicKeyBytesRetErrForValue = []byte("please fail, addrConverter!") @@ -174,6 +162,7 @@ func TestTransactionInterceptor_IntegrityAndValidityInvalidReceiverAddrShouldRet tx.Challenge = make([]byte, 0) tx.RcvAddr = []byte("please fail, addrConverter!") tx.SndAddr = make([]byte, 0) + tx.Value = big.NewInt(0) addrConv := &mock.AddressConverterMock{} addrConv.CreateAddressFromPublicKeyBytesRetErrForValue = []byte("please fail, addrConverter!") @@ -192,6 +181,7 @@ func TestTransactionInterceptor_IntegrityAndValiditySameShardShouldWork(t *testi tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 0) tx.SndAddr = make([]byte, 0) + tx.Value = big.NewInt(0) addrConv := &mock.AddressConverterMock{} tx.SetAddressConverter(addrConv) @@ -224,6 +214,7 @@ func TestTransactionInterceptor_IntegrityAndValidityOtherShardsShouldWork(t *tes tx.Challenge = make([]byte, 0) tx.RcvAddr = make([]byte, 1) tx.SndAddr = make([]byte, 0) + tx.Value = big.NewInt(0) addrConv := &mock.AddressConverterMock{} tx.SetAddressConverter(addrConv) @@ -269,6 +260,28 @@ func TestInterceptedTransaction_VerifySigKeyGenRetErrShouldErr(t *testing.T) { assert.Equal(t, "failure", tx.VerifySig().Error()) } +func TestInterceptedTransaction_VerifySigKeyGenShouldReceiveSenderAddr(t *testing.T) { + t.Parallel() + + tx := transaction.NewInterceptedTransaction() + senderBytes := []byte("sender") + + tx.SndAddr = senderBytes + tx.RcvAddr = []byte("receiver") + + keyGen := &mock.SingleSignKeyGenMock{} + keyGen.PublicKeyFromByteArrayCalled = func(b []byte) (key crypto.PublicKey, e error) { + if !bytes.Equal(b, senderBytes) { + assert.Fail(t, "publickey from byte array should have been called for sender bytes") + } + + return nil, errors.New("failure") + } + tx.SetSingleSignKeyGen(keyGen) + + tx.VerifySig() +} + func TestInterceptedTransaction_VerifySigVerifyDoesNotPassShouldErr(t *testing.T) { t.Parallel() @@ -309,22 +322,47 @@ func TestInterceptedTransaction_VerifySigVerifyDoesPassShouldRetNil(t *testing.T //------- Getters and Setters -func TestTransactionInterceptor_AllGettersAndSettersShouldWork(t *testing.T) { +func TestTransactionInterceptor_GetterSetterAddrConv(t *testing.T) { t.Parallel() addrConv := &mock.AddressConverterMock{} tx := transaction.NewInterceptedTransaction() tx.SetAddressConverter(addrConv) + assert.True(t, addrConv == tx.AddressConverter()) +} + +func TestTransactionInterceptor_GetterSetterHash(t *testing.T) { + t.Parallel() + + hash := []byte("hash") + + tx := transaction.NewInterceptedTransaction() + tx.SetHash(hash) + + assert.Equal(t, string(hash), tx.ID()) + assert.Equal(t, string(hash), string(tx.Hash())) +} - tx.SetHash([]byte("aaaa")) - assert.Equal(t, "aaaa", tx.ID()) - assert.Equal(t, "aaaa", string(tx.Hash())) +func TestTransactionInterceptor_GetterSetterTxBuffWithoutSig(t *testing.T) { + t.Parallel() + + txBuffWithoutSig := []byte("txBuffWithoutSig") + + tx := transaction.NewInterceptedTransaction() + tx.SetTxBuffWithoutSig(txBuffWithoutSig) + + assert.Equal(t, txBuffWithoutSig, tx.TxBuffWithoutSig()) +} + +func TestTransactionInterceptor_GetterSetterKeyGen(t *testing.T) { + t.Parallel() keyGen := &mock.SingleSignKeyGenMock{} + tx := transaction.NewInterceptedTransaction() tx.SetSingleSignKeyGen(keyGen) - assert.True(t, keyGen == tx.SingleSignKeyGen()) + assert.True(t, keyGen == tx.SingleSignKeyGen()) } diff --git a/process/transaction/interceptor.go b/process/transaction/interceptor.go index 9ea7f476687..ff484b7f290 100644 --- a/process/transaction/interceptor.go +++ b/process/transaction/interceptor.go @@ -8,12 +8,14 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) // TxInterceptor is used for intercepting transaction and storing them into a datapool type TxInterceptor struct { process.Interceptor txPool data.ShardedDataCacherNotifier + txStorer storage.Storer addrConverter state.AddressConverter hasher hashing.Hasher singleSignKeyGen crypto.KeyGenerator @@ -24,11 +26,11 @@ type TxInterceptor struct { func NewTxInterceptor( interceptor process.Interceptor, txPool data.ShardedDataCacherNotifier, + txStorer storage.Storer, addrConverter state.AddressConverter, hasher hashing.Hasher, singleSignKeyGen crypto.KeyGenerator, shardCoordinator sharding.ShardCoordinator, - ) (*TxInterceptor, error) { if interceptor == nil { @@ -39,6 +41,10 @@ func NewTxInterceptor( return nil, process.ErrNilTxDataPool } + if txStorer == nil { + return nil, process.ErrNilTxStorage + } + if addrConverter == nil { return nil, process.ErrNilAddressConverter } @@ -58,6 +64,7 @@ func NewTxInterceptor( txIntercept := &TxInterceptor{ Interceptor: interceptor, txPool: txPool, + txStorer: txStorer, hasher: hasher, addrConverter: addrConverter, singleSignKeyGen: singleSignKeyGen, @@ -86,10 +93,24 @@ func (txi *TxInterceptor) processTx(tx p2p.Creator, rawData []byte) error { txIntercepted.SetAddressConverter(txi.addrConverter) txIntercepted.SetSingleSignKeyGen(txi.singleSignKeyGen) - hash := txi.hasher.Compute(string(rawData)) - txIntercepted.SetHash(hash) + hashWithSig := txi.hasher.Compute(string(rawData)) + txIntercepted.SetHash(hashWithSig) + + copiedTx := *txIntercepted.GetTransaction() + copiedTx.Signature = nil - err := txIntercepted.IntegrityAndValidity(txi.shardCoordinator) + marshalizer := txi.Marshalizer() + if marshalizer == nil { + return process.ErrNilMarshalizer + } + + buffCopiedTx, err := marshalizer.Marshal(&copiedTx) + if err != nil { + return err + } + txIntercepted.SetTxBuffWithoutSig(buffCopiedTx) + + err = txIntercepted.IntegrityAndValidity(txi.shardCoordinator) if err != nil { return err } @@ -104,7 +125,13 @@ func (txi *TxInterceptor) processTx(tx p2p.Creator, rawData []byte) error { return nil } - txi.txPool.AddData(hash, txIntercepted.GetTransaction(), txIntercepted.SndShard()) + isTxInStorage, _ := txi.txStorer.Has(hashWithSig) + + if isTxInStorage { + log.Debug("intercepted tx already processed") + return nil + } + txi.txPool.AddData(hashWithSig, txIntercepted.GetTransaction(), txIntercepted.SndShard()) return nil } diff --git a/process/transaction/interceptor_test.go b/process/transaction/interceptor_test.go index b93d2f910b1..a74fe5c830a 100644 --- a/process/transaction/interceptor_test.go +++ b/process/transaction/interceptor_test.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/pkg/errors" @@ -25,10 +26,12 @@ func TestNewTxInterceptor_NilInterceptorShouldErr(t *testing.T) { addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( nil, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -45,10 +48,12 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( interceptor, nil, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -58,6 +63,28 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { assert.Nil(t, txi) } +func TestNewTxInterceptor_NilStorerShouldErr(t *testing.T) { + t.Parallel() + + txPool := &mock.ShardedDataStub{} + interceptor := &mock.InterceptorStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + + txi, err := transaction.NewTxInterceptor( + interceptor, + txPool, + nil, + addrConv, + mock.HasherMock{}, + keyGen, + oneSharder) + + assert.Equal(t, process.ErrNilTxStorage, err) + assert.Nil(t, txi) +} + func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { t.Parallel() @@ -65,10 +92,12 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { txPool := &mock.ShardedDataStub{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( interceptor, txPool, + storer, nil, mock.HasherMock{}, keyGen, @@ -86,10 +115,12 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, nil, keyGen, @@ -106,10 +137,12 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, nil, @@ -126,10 +159,12 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} + storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -150,10 +185,12 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -176,10 +213,12 @@ func TestTransactionInterceptor_ProcessTxNilTxShouldErr(t *testing.T) { addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -199,10 +238,12 @@ func TestTransactionInterceptor_ProcessTxWrongTypeOfNewerShouldErr(t *testing.T) addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -213,21 +254,60 @@ func TestTransactionInterceptor_ProcessTxWrongTypeOfNewerShouldErr(t *testing.T) assert.Equal(t, process.ErrBadInterceptorTopicImplementation, txi.ProcessTx(&sn, make([]byte, 0))) } +func TestTransactionInterceptor_ProcessTxNilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + interceptor := &mock.InterceptorStub{} + interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { + } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return nil + } + + txPool := &mock.ShardedDataStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} + + txi, _ := transaction.NewTxInterceptor( + interceptor, + txPool, + storer, + addrConv, + mock.HasherMock{}, + keyGen, + oneSharder) + + txNewer := transaction.NewInterceptedTransaction() + txNewer.Signature = make([]byte, 0) + txNewer.Challenge = make([]byte, 0) + txNewer.RcvAddr = make([]byte, 0) + txNewer.SndAddr = make([]byte, 0) + + assert.Equal(t, process.ErrNilMarshalizer, txi.ProcessTx(txNewer, make([]byte, 0))) +} + func TestTransactionInterceptor_ProcessTxIntegrityFailedShouldErr(t *testing.T) { t.Parallel() interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -248,15 +328,20 @@ func TestTransactionInterceptor_ProcessNilDataToProcessShouldErr(t *testing.T) { interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -277,15 +362,20 @@ func TestTransactionInterceptor_ProcessTxIntegrityAndValidityShouldErr(t *testin interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -296,6 +386,7 @@ func TestTransactionInterceptor_ProcessTxIntegrityAndValidityShouldErr(t *testin txNewer.Challenge = make([]byte, 0) txNewer.RcvAddr = []byte("please fail, addrConverter!") txNewer.SndAddr = make([]byte, 0) + txNewer.Value = big.NewInt(0) addrConv.CreateAddressFromPublicKeyBytesRetErrForValue = []byte("please fail, addrConverter!") @@ -308,6 +399,9 @@ func TestTransactionInterceptor_ProcessTxVerifySigFailsShouldErr(t *testing.T) { interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} @@ -323,10 +417,12 @@ func TestTransactionInterceptor_ProcessTxVerifySigFailsShouldErr(t *testing.T) { } oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -337,7 +433,7 @@ func TestTransactionInterceptor_ProcessTxVerifySigFailsShouldErr(t *testing.T) { txNewer.Challenge = make([]byte, 0) txNewer.RcvAddr = make([]byte, 0) txNewer.SndAddr = make([]byte, 0) - txNewer.Value = *big.NewInt(0) + txNewer.Value = big.NewInt(0) assert.Equal(t, "sig not valid", txi.ProcessTx(txNewer, []byte("txHash")).Error()) } @@ -348,6 +444,9 @@ func TestTransactionInterceptor_ProcessTxOkValsSameShardShouldWork(t *testing.T) interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } wasAdded := 0 @@ -370,10 +469,15 @@ func TestTransactionInterceptor_ProcessTxOkValsSameShardShouldWork(t *testing.T) } oneSharder := mock.NewOneShardCoordinatorMock() + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return false, nil + } txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -384,6 +488,7 @@ func TestTransactionInterceptor_ProcessTxOkValsSameShardShouldWork(t *testing.T) txNewer.Challenge = make([]byte, 0) txNewer.RcvAddr = make([]byte, 0) txNewer.SndAddr = make([]byte, 0) + txNewer.Value = big.NewInt(0) assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) assert.Equal(t, 1, wasAdded) @@ -395,6 +500,9 @@ func TestTransactionInterceptor_ProcessTxOkValsOtherShardsShouldWork(t *testing. interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } wasAdded := 0 @@ -421,10 +529,12 @@ func TestTransactionInterceptor_ProcessTxOkValsOtherShardsShouldWork(t *testing. multiSharder.ComputeShardForAddressCalled = func(address state.AddressContainer, addressConverter state.AddressConverter) uint32 { return 0 } + storer := &mock.StorerStub{} txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -435,17 +545,85 @@ func TestTransactionInterceptor_ProcessTxOkValsOtherShardsShouldWork(t *testing. txNewer.Challenge = make([]byte, 0) txNewer.RcvAddr = make([]byte, 0) txNewer.SndAddr = make([]byte, 0) + txNewer.Value = big.NewInt(0) assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) assert.Equal(t, 0, wasAdded) } +func TestTransactionInterceptor_ProcessTxMarshalizerFailShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + marshalizer.Fail = true + + interceptor := &mock.InterceptorStub{} + interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { + } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return marshalizer + } + + txPool := &mock.ShardedDataStub{} + txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { + if bytes.Equal(mock.HasherMock{}.Compute("txHash"), key) { + } + } + addrConv := &mock.AddressConverterMock{} + + pubKey := &mock.SingleSignPublicKey{} + pubKey.VerifyCalled = func(data []byte, signature []byte) error { + return nil + } + + keyGen := &mock.SingleSignKeyGenMock{} + keyGen.PublicKeyFromByteArrayCalled = func(b []byte) (key crypto.PublicKey, e error) { + return pubKey, nil + } + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return false, nil + } + + multiSharder := mock.NewMultipleShardsCoordinatorMock() + multiSharder.CurrentShard = 0 + called := uint32(0) + multiSharder.ComputeShardForAddressCalled = func(address state.AddressContainer, addressConverter state.AddressConverter) uint32 { + defer func() { + called++ + }() + + return called + } + + txi, _ := transaction.NewTxInterceptor( + interceptor, + txPool, + storer, + addrConv, + mock.HasherMock{}, + keyGen, + multiSharder) + + txNewer := transaction.NewInterceptedTransaction() + txNewer.Signature = make([]byte, 0) + txNewer.Challenge = make([]byte, 0) + txNewer.RcvAddr = make([]byte, 0) + txNewer.SndAddr = make([]byte, 0) + + err := txi.ProcessTx(txNewer, []byte("txHash")) + assert.Equal(t, "MarshalizerMock generic error", err.Error()) +} + func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { t.Parallel() interceptor := &mock.InterceptorStub{} interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } wasAdded := 0 @@ -466,6 +644,10 @@ func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { keyGen.PublicKeyFromByteArrayCalled = func(b []byte) (key crypto.PublicKey, e error) { return pubKey, nil } + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return false, nil + } multiSharder := mock.NewMultipleShardsCoordinatorMock() multiSharder.CurrentShard = 0 @@ -481,6 +663,7 @@ func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { txi, _ := transaction.NewTxInterceptor( interceptor, txPool, + storer, addrConv, mock.HasherMock{}, keyGen, @@ -491,7 +674,73 @@ func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { txNewer.Challenge = make([]byte, 0) txNewer.RcvAddr = make([]byte, 0) txNewer.SndAddr = make([]byte, 0) + txNewer.Value = big.NewInt(0) assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) assert.Equal(t, 1, wasAdded) } + +func TestTransactionInterceptor_ProcessTxPresentInStorerShouldNotAdd(t *testing.T) { + t.Parallel() + + interceptor := &mock.InterceptorStub{} + interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { + } + interceptor.MarshalizerCalled = func() marshal.Marshalizer { + return &mock.MarshalizerMock{} + } + + wasAdded := 0 + + txPool := &mock.ShardedDataStub{} + txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { + if bytes.Equal(mock.HasherMock{}.Compute("txHash"), key) { + wasAdded++ + } + } + addrConv := &mock.AddressConverterMock{} + + pubKey := &mock.SingleSignPublicKey{} + pubKey.VerifyCalled = func(data []byte, signature []byte) error { + return nil + } + + keyGen := &mock.SingleSignKeyGenMock{} + keyGen.PublicKeyFromByteArrayCalled = func(b []byte) (key crypto.PublicKey, e error) { + return pubKey, nil + } + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return true, nil + } + + multiSharder := mock.NewMultipleShardsCoordinatorMock() + multiSharder.CurrentShard = 0 + called := uint32(0) + multiSharder.ComputeShardForAddressCalled = func(address state.AddressContainer, addressConverter state.AddressConverter) uint32 { + defer func() { + called++ + }() + + return called + } + + txi, _ := transaction.NewTxInterceptor( + interceptor, + txPool, + storer, + addrConv, + mock.HasherMock{}, + keyGen, + multiSharder) + + txNewer := transaction.NewInterceptedTransaction() + txNewer.Signature = make([]byte, 0) + txNewer.Challenge = make([]byte, 0) + txNewer.RcvAddr = make([]byte, 0) + txNewer.SndAddr = make([]byte, 0) + txNewer.Value = big.NewInt(0) + + assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) + assert.Equal(t, 0, wasAdded) +} diff --git a/process/transaction/process.go b/process/transaction/process.go index 217409c92d9..2e0f88699ff 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -111,12 +111,12 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round value := tx.Value - err = txProc.checkTxValues(acntSrc, &value, tx.Nonce) + err = txProc.checkTxValues(acntSrc, value, tx.Nonce) if err != nil { return err } - err = txProc.moveBalances(acntSrc, acntDest, &value) + err = txProc.moveBalances(acntSrc, acntDest, value) if err != nil { return err } @@ -130,12 +130,15 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round } // SetBalancesToTrie adds balances to trie -func (txProc *txProcessor) SetBalancesToTrie(accBalance map[string]big.Int) (rootHash []byte, err error) { - +func (txProc *txProcessor) SetBalancesToTrie(accBalance map[string]*big.Int) (rootHash []byte, err error) { if txProc.accounts.JournalLen() != 0 { return nil, process.ErrAccountStateDirty } + if accBalance == nil { + return nil, process.ErrNilValue + } + for i, v := range accBalance { err := txProc.setBalanceToTrie([]byte(i), v) @@ -158,7 +161,7 @@ func (txProc *txProcessor) SetBalancesToTrie(accBalance map[string]big.Int) (roo return rootHash, err } -func (txProc *txProcessor) setBalanceToTrie(addr []byte, balance big.Int) error { +func (txProc *txProcessor) setBalanceToTrie(addr []byte, balance *big.Int) error { if addr == nil { return process.ErrNilValue } @@ -192,19 +195,31 @@ func (txProc *txProcessor) getAddresses(tx *transaction.Transaction) (adrSrc, ad return } -func (txProc *txProcessor) getAccounts(adrSrc, adrDest state.AddressContainer) (acntSrc, acntDest state.JournalizedAccountWrapper, err error) { +func (txProc *txProcessor) getAccounts(adrSrc, adrDest state.AddressContainer) ( + acntSrc state.JournalizedAccountWrapper, + acntDest state.JournalizedAccountWrapper, + err error) { + if adrSrc == nil || adrDest == nil { - err = process.ErrNilValue - return + return nil, nil, process.ErrNilValue + } + + if bytes.Equal(adrSrc.Bytes(), adrDest.Bytes()) { + acnt, err := txProc.accounts.GetJournalizedAccount(adrSrc) + if err != nil { + return nil, nil, err + } + + return acnt, acnt, nil } acntSrc, err = txProc.accounts.GetJournalizedAccount(adrSrc) if err != nil { - return + return nil, nil, err } acntDest, err = txProc.accounts.GetJournalizedAccount(adrDest) - return + return acntSrc, acntDest, err } func (txProc *txProcessor) callSCHandler(tx *transaction.Transaction) error { @@ -216,13 +231,14 @@ func (txProc *txProcessor) callSCHandler(tx *transaction.Transaction) error { } func (txProc *txProcessor) checkTxValues(acntSrc state.JournalizedAccountWrapper, value *big.Int, nonce uint64) error { - if acntSrc.BaseAccount().Nonce < nonce { - return process.ErrHigherNonceInTransaction - } - - if acntSrc.BaseAccount().Nonce > nonce { - return process.ErrLowerNonceInTransaction - } + //TODO: undo this for nonce checking and un-skip tests + //if acntSrc.BaseAccount().Nonce < nonce { + // return process.ErrHigherNonceInTransaction + //} + // + //if acntSrc.BaseAccount().Nonce > nonce { + // return process.ErrLowerNonceInTransaction + //} //negative balance test is done in transaction interceptor as the transaction is invalid and thus shall not disseminate @@ -237,11 +253,11 @@ func (txProc *txProcessor) moveBalances(acntSrc, acntDest state.JournalizedAccou operation1 := big.NewInt(0) operation2 := big.NewInt(0) - err := acntSrc.SetBalanceWithJournal(*operation1.Sub(&acntSrc.BaseAccount().Balance, value)) + err := acntSrc.SetBalanceWithJournal(operation1.Sub(acntSrc.BaseAccount().Balance, value)) if err != nil { return err } - err = acntDest.SetBalanceWithJournal(*operation2.Add(&acntDest.BaseAccount().Balance, value)) + err = acntDest.SetBalanceWithJournal(operation2.Add(acntDest.BaseAccount().Balance, value)) if err != nil { return err } diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index a814d8ec614..2aa5eae938e 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -19,61 +19,71 @@ import ( func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { t.Parallel() - _, err := txproc.NewTxProcessor( + txProc, err := txproc.NewTxProcessor( nil, mock.HasherMock{}, &mock.AddressConverterMock{}, &mock.MarshalizerMock{}, ) + assert.Equal(t, process.ErrNilAccountsAdapter, err) + assert.Nil(t, txProc) } func TestNewTxProcessor_NilHasherShouldErr(t *testing.T) { t.Parallel() - _, err := txproc.NewTxProcessor( + txProc, err := txproc.NewTxProcessor( &mock.AccountsStub{}, nil, &mock.AddressConverterMock{}, &mock.MarshalizerMock{}, ) + assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, txProc) } func TestNewTxProcessor_NilAddressConverterMockShouldErr(t *testing.T) { t.Parallel() - _, err := txproc.NewTxProcessor( + txProc, err := txproc.NewTxProcessor( &mock.AccountsStub{}, mock.HasherMock{}, nil, &mock.MarshalizerMock{}, ) + assert.Equal(t, process.ErrNilAddressConverter, err) + assert.Nil(t, txProc) } func TestNewTxProcessor_NilMarshalizerMockShouldErr(t *testing.T) { t.Parallel() - _, err := txproc.NewTxProcessor( + txProc, err := txproc.NewTxProcessor( &mock.AccountsStub{}, mock.HasherMock{}, &mock.AddressConverterMock{}, nil, ) + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, txProc) } func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() - _, err := txproc.NewTxProcessor( + txProc, err := txproc.NewTxProcessor( &mock.AccountsStub{}, mock.HasherMock{}, &mock.AddressConverterMock{}, &mock.MarshalizerMock{}, ) + assert.Nil(t, err) + assert.NotNil(t, txProc) } //------- SChandler @@ -169,7 +179,7 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { adr2 := mock.NewAddressMock([]byte{67}) acnt1 := mock.NewJournalizedAccountWrapMock(adr1) - acnt2 := mock.NewJournalizedAccountWrapMock(adr1) + acnt2 := mock.NewJournalizedAccountWrapMock(adr2) accounts.GetJournalizedAccountCalled = func(addressContainer state.AddressContainer) (state.JournalizedAccountWrapper, error) { if addressContainer == adr1 { @@ -196,6 +206,39 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { assert.Equal(t, acnt2, a2) } +func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { + accounts := mock.AccountsStub{} + + adr1 := mock.NewAddressMock([]byte{65}) + adr2 := mock.NewAddressMock([]byte{65}) + + acnt1 := mock.NewJournalizedAccountWrapMock(adr1) + acnt2 := mock.NewJournalizedAccountWrapMock(adr2) + + accounts.GetJournalizedAccountCalled = func(addressContainer state.AddressContainer) (state.JournalizedAccountWrapper, error) { + if addressContainer == adr1 { + return acnt1, nil + } + + if addressContainer == adr2 { + return acnt2, nil + } + + return nil, errors.New("failure") + } + + execTx, _ := txproc.NewTxProcessor( + &accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + a1, a2, err := execTx.GetAccounts(adr1, adr1) + assert.Nil(t, err) + assert.True(t, a1 == a2) +} + //------- callSCHandler func TestTxProcessor_NoCallSCHandlerShouldErr(t *testing.T) { @@ -233,6 +276,7 @@ func TestTxProcessor_WithCallSCHandlerShouldWork(t *testing.T) { //------- checkTxValues func TestTxProcessor_CheckTxValuesHigherNonceShouldErr(t *testing.T) { + t.Skip() adr1 := mock.NewAddressMock([]byte{65}) acnt1 := mock.NewJournalizedAccountWrapMock(adr1) @@ -250,6 +294,7 @@ func TestTxProcessor_CheckTxValuesHigherNonceShouldErr(t *testing.T) { } func TestTxProcessor_CheckTxValuesLowerNonceShouldErr(t *testing.T) { + t.Skip() adr1 := mock.NewAddressMock([]byte{65}) acnt1 := mock.NewJournalizedAccountWrapMock(adr1) @@ -277,7 +322,7 @@ func TestTxProcessor_CheckTxValuesInsufficientFundsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, ) - acnt1.BaseAccount().Balance = *big.NewInt(67) + acnt1.BaseAccount().Balance = big.NewInt(67) err := execTx.CheckTxValues(acnt1, big.NewInt(68), 0) assert.Equal(t, process.ErrInsufficientFunds, err) @@ -294,7 +339,7 @@ func TestTxProcessor_CheckTxValuesOkValsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, ) - acnt1.BaseAccount().Balance = *big.NewInt(67) + acnt1.BaseAccount().Balance = big.NewInt(67) err := execTx.CheckTxValues(acnt1, big.NewInt(67), 0) assert.Nil(t, err) @@ -356,13 +401,35 @@ func TestTxProcessor_MoveBalancesOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, ) - acntSrc.Balance = *big.NewInt(64) - acntDest.Balance = *big.NewInt(31) + acntSrc.Balance = big.NewInt(64) + acntDest.Balance = big.NewInt(31) err := execTx.MoveBalances(acntSrc, acntDest, big.NewInt(14)) assert.Nil(t, err) - assert.Equal(t, *big.NewInt(50), acntSrc.Balance) - assert.Equal(t, *big.NewInt(45), acntDest.Balance) + assert.Equal(t, big.NewInt(50), acntSrc.Balance) + assert.Equal(t, big.NewInt(45), acntDest.Balance) + +} + +func TestTxProcessor_MoveBalancesToSelfOkValsShouldWork(t *testing.T) { + adrSrc := mock.NewAddressMock([]byte{65}) + acntSrc := mock.NewJournalizedAccountWrapMock(adrSrc) + + acntDest := acntSrc + + execTx, _ := txproc.NewTxProcessor( + &mock.AccountsStub{}, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + acntSrc.Balance = big.NewInt(64) + + err := execTx.MoveBalances(acntSrc, acntDest, big.NewInt(1)) + assert.Nil(t, err) + assert.Equal(t, big.NewInt(64), acntSrc.Balance) + assert.Equal(t, big.NewInt(64), acntDest.Balance) } @@ -425,7 +492,7 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T tx.Nonce = 1 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DEST") - tx.Value = *big.NewInt(45) + tx.Value = big.NewInt(45) accounts.GetJournalizedAccountCalled = func(addressContainer state.AddressContainer) (state.JournalizedAccountWrapper, error) { return nil, errors.New("failure") @@ -455,7 +522,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { tx.Nonce = 1 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DEST") - tx.Value = *big.NewInt(45) + tx.Value = big.NewInt(45) acntSrc := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.SndAddr)) acntDest := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.RcvAddr)) @@ -493,7 +560,7 @@ func TestTxProcessor_ProcessTransactionRegisterTxShouldWork(t *testing.T) { NodePubKey: []byte("b"), RoundIndex: 6, Action: state.ArUnregister, - Stake: *big.NewInt(45), + Stake: big.NewInt(45), } marshalizer := mock.MarshalizerMock{} @@ -504,7 +571,7 @@ func TestTxProcessor_ProcessTransactionRegisterTxShouldWork(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = state.RegistrationAddress.Bytes() - tx.Value = *big.NewInt(0) + tx.Value = big.NewInt(0) tx.Data = buff acntSrc := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.SndAddr)) @@ -535,7 +602,7 @@ func TestTxProcessor_ProcessTransactionRegisterTxShouldWork(t *testing.T) { err = execTx.ProcessTransaction(&tx, 1) assert.Nil(t, err) assert.True(t, wasCalledAppend) - assert.Equal(t, *big.NewInt(45), data2.Stake) + assert.Equal(t, big.NewInt(45), data2.Stake) assert.Equal(t, []byte("SRC"), data2.OriginatorPubKey) assert.Equal(t, []byte("b"), data2.NodePubKey) assert.Equal(t, int32(1), data2.RoundIndex) @@ -544,6 +611,7 @@ func TestTxProcessor_ProcessTransactionRegisterTxShouldWork(t *testing.T) { } func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { + t.Skip() accounts := &mock.AccountsStub{} execTx, _ := txproc.NewTxProcessor( @@ -558,7 +626,7 @@ func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { tx.Nonce = 1 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DEST") - tx.Value = *big.NewInt(45) + tx.Value = big.NewInt(45) acntSrc := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.SndAddr)) acntDest := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.RcvAddr)) @@ -594,7 +662,7 @@ func TestTxProcessor_ProcessMoveBalancesFailShouldErr(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DEST") - tx.Value = *big.NewInt(0) + tx.Value = big.NewInt(0) acntSrc := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.SndAddr)) acntSrc.Fail = true @@ -616,7 +684,7 @@ func TestTxProcessor_ProcessMoveBalancesFailShouldErr(t *testing.T) { assert.NotNil(t, err) } -func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { +func TestTxProcessor_ProcessIncreaseNonceFailShouldErr(t *testing.T) { accounts := &mock.AccountsStub{} execTx, _ := txproc.NewTxProcessor( @@ -627,17 +695,53 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { ) //these values will trigger ErrHigherNonceInTransaction + tx := transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DEST") + tx.Value = big.NewInt(0) + + acntSrc := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.SndAddr)) + acntSrc.FailSetNonceWithJurnal = true + acntDest := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.RcvAddr)) + + accounts.GetJournalizedAccountCalled = func(addressContainer state.AddressContainer) (state.JournalizedAccountWrapper, error) { + if bytes.Equal(addressContainer.Bytes(), tx.SndAddr) { + return acntSrc, nil + } + + if bytes.Equal(addressContainer.Bytes(), tx.RcvAddr) { + return acntDest, nil + } + + return nil, errors.New("failure") + } + + err := execTx.ProcessTransaction(&tx, 4) + assert.Equal(t, "failure setting nonce", err.Error()) +} + +func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { + accounts := &mock.AccountsStub{} + + execTx, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + tx := transaction.Transaction{} tx.Nonce = 4 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DEST") - tx.Value = *big.NewInt(61) + tx.Value = big.NewInt(61) acntSrc := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.SndAddr)) acntSrc.Nonce = 4 - acntSrc.Balance = *big.NewInt(90) + acntSrc.Balance = big.NewInt(90) acntDest := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(tx.RcvAddr)) - acntDest.Balance = *big.NewInt(10) + acntDest.Balance = big.NewInt(10) accounts.GetJournalizedAccountCalled = func(addressContainer state.AddressContainer) (state.JournalizedAccountWrapper, error) { if bytes.Equal(addressContainer.Bytes(), tx.SndAddr) { @@ -654,6 +758,258 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { err := execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, uint64(5), acntSrc.Nonce) - assert.Equal(t, *big.NewInt(29), acntSrc.Balance) - assert.Equal(t, *big.NewInt(71), acntDest.Balance) + assert.Equal(t, big.NewInt(29), acntSrc.Balance) + assert.Equal(t, big.NewInt(71), acntDest.Balance) +} + +//------- SetBalancesToTrie + +func TestTxProcessor_SetBalancesToTrieDirtyAccountsShouldErr(t *testing.T) { + t.Parallel() + + accounts := &mock.AccountsStub{ + JournalLenCalled: func() int { + return 1 + }, + } + + txProc, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + hash, err := txProc.SetBalancesToTrie(make(map[string]*big.Int)) + + assert.Nil(t, hash) + assert.Equal(t, process.ErrAccountStateDirty, err) +} + +func TestTxProcessor_SetBalancesToTrieNilMapShouldErr(t *testing.T) { + t.Parallel() + + accounts := &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 + }, + } + + txProc, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + hash, err := txProc.SetBalancesToTrie(nil) + + assert.Nil(t, hash) + assert.Equal(t, process.ErrNilValue, err) +} + +func TestTxProcessor_SetBalancesToTrieCommitFailsShouldRevert(t *testing.T) { + t.Parallel() + + adr1 := []byte("accnt1") + adr2 := []byte("accnt2") + + accnt1 := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(adr1)) + accnt2 := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(adr2)) + + val1 := big.NewInt(10) + val2 := big.NewInt(20) + + revertCalled := false + errCommit := errors.New("should err") + + accounts := &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 + }, + GetJournalizedAccountCalled: func(addressContainer state.AddressContainer) (wrapper state.JournalizedAccountWrapper, e error) { + if bytes.Equal(addressContainer.Bytes(), adr1) { + return accnt1, nil + } + + if bytes.Equal(addressContainer.Bytes(), adr2) { + return accnt2, nil + } + + return nil, errors.New("should have not gone through here") + }, + CommitCalled: func() (i []byte, e error) { + return nil, errCommit + }, + RevertToSnapshotCalled: func(snapshot int) error { + revertCalled = true + return nil + }, + } + + txProc, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + m := make(map[string]*big.Int) + m[string(adr1)] = val1 + m[string(adr2)] = val2 + + hash, err := txProc.SetBalancesToTrie(m) + + assert.Nil(t, hash) + assert.Equal(t, errCommit, err) + assert.True(t, revertCalled) +} + +func TestTxProcessor_SetBalancesToTrieNilAddressShouldErr(t *testing.T) { + t.Parallel() + + adr1 := []byte("accnt1") + adr2 := []byte("accnt2") + + accnt1 := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(adr1)) + accnt2 := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(adr2)) + + val1 := big.NewInt(10) + val2 := big.NewInt(20) + + rootHash := []byte("resulted root hash") + + accounts := &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 + }, + GetJournalizedAccountCalled: func(addressContainer state.AddressContainer) (wrapper state.JournalizedAccountWrapper, e error) { + if bytes.Equal(addressContainer.Bytes(), adr1) { + return accnt1, nil + } + + if bytes.Equal(addressContainer.Bytes(), adr2) { + return accnt2, nil + } + + return nil, errors.New("should have not gone through here") + }, + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + } + + txProc, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + m := make(map[string]*big.Int) + m[string(adr1)] = val1 + m[string(adr2)] = val2 + + hash, err := txProc.SetBalancesToTrie(m) + + assert.Equal(t, rootHash, hash) + assert.Nil(t, err) + assert.Equal(t, val1, accnt1.Balance) + assert.Equal(t, val2, accnt2.Balance) +} + +func TestTxProcessor_SetBalancesToTrieAccountsFailShouldErr(t *testing.T) { + t.Parallel() + + adr1 := []byte("accnt1") + adr2 := []byte("accnt2") + + val1 := big.NewInt(10) + val2 := big.NewInt(20) + + rootHash := []byte("resulted root hash") + + errAccounts := errors.New("accounts error") + + accounts := &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 + }, + GetJournalizedAccountCalled: func(addressContainer state.AddressContainer) (wrapper state.JournalizedAccountWrapper, e error) { + + return nil, errAccounts + }, + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + } + + txProc, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + m := make(map[string]*big.Int) + m[string(adr1)] = val1 + m[string(adr2)] = val2 + + hash, err := txProc.SetBalancesToTrie(m) + + assert.Nil(t, hash) + assert.Equal(t, errAccounts, err) +} + +func TestTxProcessor_SetBalancesToTrieOkValsShouldWork(t *testing.T) { + t.Parallel() + + adr1 := []byte("accnt1") + adr2 := []byte("accnt2") + + accnt1 := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(adr1)) + accnt2 := mock.NewJournalizedAccountWrapMock(mock.NewAddressMock(adr2)) + + val1 := big.NewInt(10) + val2 := big.NewInt(20) + + rootHash := []byte("resulted root hash") + + accounts := &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 + }, + GetJournalizedAccountCalled: func(addressContainer state.AddressContainer) (wrapper state.JournalizedAccountWrapper, e error) { + if bytes.Equal(addressContainer.Bytes(), adr1) { + return accnt1, nil + } + + if bytes.Equal(addressContainer.Bytes(), adr2) { + return accnt2, nil + } + + return nil, errors.New("should have not gone through here") + }, + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + } + + txProc, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + ) + + m := make(map[string]*big.Int) + m[string(adr1)] = val1 + m[string(adr2)] = val2 + + hash, err := txProc.SetBalancesToTrie(m) + + assert.Equal(t, rootHash, hash) + assert.Nil(t, err) + assert.Equal(t, val1, accnt1.Balance) + assert.Equal(t, val2, accnt2.Balance) } diff --git a/process/transaction/resolver.go b/process/transaction/resolver.go index f967ab13b82..32457a21fa8 100644 --- a/process/transaction/resolver.go +++ b/process/transaction/resolver.go @@ -7,8 +7,8 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) -// txResolver is a wrapper over Resolver that is specialized in resolving transaction requests -type txResolver struct { +// TxResolver is a wrapper over Resolver that is specialized in resolving transaction requests +type TxResolver struct { process.Resolver txPool data.ShardedDataCacherNotifier txStorage storage.Storer @@ -21,7 +21,7 @@ func NewTxResolver( txPool data.ShardedDataCacherNotifier, txStorage storage.Storer, marshalizer marshal.Marshalizer, -) (*txResolver, error) { +) (*TxResolver, error) { if resolver == nil { return nil, process.ErrNilResolver @@ -39,7 +39,7 @@ func NewTxResolver( return nil, process.ErrNilMarshalizer } - txResolver := &txResolver{ + txResolver := &TxResolver{ Resolver: resolver, txPool: txPool, txStorage: txStorage, @@ -50,7 +50,7 @@ func NewTxResolver( return txResolver, nil } -func (txRes *txResolver) resolveTxRequest(rd process.RequestData) ([]byte, error) { +func (txRes *TxResolver) resolveTxRequest(rd process.RequestData) ([]byte, error) { if rd.Type != process.HashType { return nil, process.ErrResolveNotHashType } @@ -59,24 +59,21 @@ func (txRes *txResolver) resolveTxRequest(rd process.RequestData) ([]byte, error return nil, process.ErrNilValue } - dataMap := txRes.txPool.SearchData(rd.Value) - if len(dataMap) > 0 { - for _, v := range dataMap { - //since there might be multiple entries, it shall return the first one that it finds - buff, err := txRes.marshalizer.Marshal(v) - if err != nil { - return nil, err - } + value, ok := txRes.txPool.SearchFirstData(rd.Value) + if !ok { + return txRes.txStorage.Get(rd.Value) + } - return buff, nil - } + buff, err := txRes.marshalizer.Marshal(value) + if err != nil { + return nil, err } - return txRes.txStorage.Get(rd.Value) + return buff, nil } // RequestTransactionFromHash requests a transaction from other peers having input the tx hash -func (txRes *txResolver) RequestTransactionFromHash(hash []byte) error { +func (txRes *TxResolver) RequestTransactionFromHash(hash []byte) error { return txRes.RequestData(process.RequestData{ Type: process.HashType, Value: hash, diff --git a/process/transaction/resolver_test.go b/process/transaction/resolver_test.go index 9454e5b0f08..7a3b5d078d3 100644 --- a/process/transaction/resolver_test.go +++ b/process/transaction/resolver_test.go @@ -144,14 +144,12 @@ func TestTxResolver_ResolveTxRequestFoundInTxPoolShouldRetVal(t *testing.T) { assert.Nil(t, err) txPool := &mock.ShardedDataStub{} - txPool.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + txPool.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal([]byte("aaa"), key) { - return map[uint32]interface{}{ - 0: "value", - } + return "value", true } - return nil + return nil, false } txRes, _ := NewTxResolver( @@ -182,14 +180,12 @@ func TestTxResolver_ResolveTxRequestFoundInTxPoolMarshalizerFailShouldRetNilAndE } txPool := &mock.ShardedDataStub{} - txPool.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + txPool.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal([]byte("aaa"), key) { - return map[uint32]interface{}{ - 0: "value", - } + return "value", true } - return nil + return nil, false } txRes, _ := NewTxResolver( @@ -216,9 +212,9 @@ func TestTxResolver_ResolveTxRequestFoundInTxStorageShouldRetValAndError(t *test marshalizer := &mock.MarshalizerMock{} txPool := &mock.ShardedDataStub{} - txPool.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + txPool.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { //not found in txPool - return nil + return nil, false } expectedBuff := []byte("bbb") @@ -255,9 +251,9 @@ func TestTxResolver_ResolveTxRequestFoundInTxStorageCheckRetError(t *testing.T) marshalizer := &mock.MarshalizerMock{} txPool := &mock.ShardedDataStub{} - txPool.SearchDataCalled = func(key []byte) (shardValuesPairs map[uint32]interface{}) { + txPool.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { //not found in txPool - return nil + return nil, false } expectedBuff := []byte("bbb") diff --git a/storage/lrucache/lrucache.go b/storage/lrucache/lrucache.go index 60536b1dbf8..8255e449ca2 100644 --- a/storage/lrucache/lrucache.go +++ b/storage/lrucache/lrucache.go @@ -43,13 +43,11 @@ func (c *LRUCache) Clear() { // Put adds a value to the cache. Returns true if an eviction occurred. func (c *LRUCache) Put(key []byte, value interface{}) (evicted bool) { - found, evicted := c.cache.ContainsOrAdd(string(key), value) + evicted = c.cache.Add(string(key), value) - if !found { - c.callAddedDataHandlers(key) - } + c.callAddedDataHandlers(key) - return + return evicted } // RegisterHandler registers a new handler to be called when a new data is added diff --git a/storage/lrucache/lrucache_test.go b/storage/lrucache/lrucache_test.go index 00b1178e1e2..7060bc7cba4 100644 --- a/storage/lrucache/lrucache_test.go +++ b/storage/lrucache/lrucache_test.go @@ -13,7 +13,7 @@ import ( var timeoutWaitForWaitGroups = time.Second * 2 -func TestAddNotPresent(t *testing.T) { +func TestLRUCache_PutNotPresent(t *testing.T) { key, val := []byte("key"), []byte("value") c, err := lrucache.NewCache(10) @@ -26,10 +26,10 @@ func TestAddNotPresent(t *testing.T) { c.Put(key, val) l = c.Len() - assert.Equal(t, l, 1, "cachhe size expected 1 but found %d", l) + assert.Equal(t, l, 1, "cache size expected 1 but found %d", l) } -func TestAddPresent(t *testing.T) { +func TestLRUCache_PutPresent(t *testing.T) { key, val := []byte("key"), []byte("value") c, err := lrucache.NewCache(10) @@ -42,7 +42,25 @@ func TestAddPresent(t *testing.T) { assert.Equal(t, l, 1, "cache size expected 1 but found %d", l) } -func TestGetNotPresent(t *testing.T) { +func TestLRUCache_PutPresentRewrite(t *testing.T) { + key := []byte("key") + val1 := []byte("value1") + val2 := []byte("value2") + c, err := lrucache.NewCache(10) + + assert.Nil(t, err, "no error expected but got %s", err) + + c.Put(key, val1) + c.Put(key, val2) + + l := c.Len() + assert.Equal(t, l, 1, "cache size expected 1 but found %d", l) + recoveredVal, has := c.Get(key) + assert.True(t, has) + assert.Equal(t, val2, recoveredVal) +} + +func TestLRUCache_GetNotPresent(t *testing.T) { key := []byte("key1") c, err := lrucache.NewCache(10) @@ -53,7 +71,7 @@ func TestGetNotPresent(t *testing.T) { assert.False(t, ok, "value %s not expected to be found", v) } -func TestGetPresent(t *testing.T) { +func TestLRUCache_GetPresent(t *testing.T) { key, val := []byte("key2"), []byte("value2") c, err := lrucache.NewCache(10) @@ -67,7 +85,7 @@ func TestGetPresent(t *testing.T) { assert.Equal(t, val, v) } -func TestHasNotPresent(t *testing.T) { +func TestLRUCache_HasNotPresent(t *testing.T) { key := []byte("key3") c, err := lrucache.NewCache(10) @@ -78,7 +96,7 @@ func TestHasNotPresent(t *testing.T) { assert.False(t, found, "key %s not expected to be found", key) } -func TestHasPresent(t *testing.T) { +func TestLRUCache_HasPresent(t *testing.T) { key, val := []byte("key4"), []byte("value4") c, err := lrucache.NewCache(10) @@ -91,7 +109,7 @@ func TestHasPresent(t *testing.T) { assert.True(t, found, "value expected but not found") } -func TestPeekNotPresent(t *testing.T) { +func TestLRUCache_PeekNotPresent(t *testing.T) { key := []byte("key5") c, err := lrucache.NewCache(10) @@ -102,7 +120,7 @@ func TestPeekNotPresent(t *testing.T) { assert.False(t, ok, "not expected to find key %s", key) } -func TestPeekPresent(t *testing.T) { +func TestLRUCache_PeekPresent(t *testing.T) { key, val := []byte("key6"), []byte("value6") c, err := lrucache.NewCache(10) @@ -115,7 +133,7 @@ func TestPeekPresent(t *testing.T) { assert.Equal(t, val, v, "expected to find %s but found %s", val, v) } -func TestHasOrAddNotPresent(t *testing.T) { +func TestLRUCache_HasOrAddNotPresent(t *testing.T) { key, val := []byte("key7"), []byte("value7") c, err := lrucache.NewCache(10) @@ -132,7 +150,7 @@ func TestHasOrAddNotPresent(t *testing.T) { assert.Equal(t, val, v, "expected to find %s but found %s", val, v) } -func TestHasOrAddPresent(t *testing.T) { +func TestLRUCache_HasOrAddPresent(t *testing.T) { key, val := []byte("key8"), []byte("value8") c, err := lrucache.NewCache(10) @@ -149,7 +167,7 @@ func TestHasOrAddPresent(t *testing.T) { assert.Equal(t, val, v, "expected to find %s but found %s", val, v) } -func TestRemoveNotPresent(t *testing.T) { +func TestLRUCache_RemoveNotPresent(t *testing.T) { key := []byte("key9") c, err := lrucache.NewCache(10) @@ -165,7 +183,7 @@ func TestRemoveNotPresent(t *testing.T) { assert.False(t, found, "not expected to find key %s", key) } -func TestRemovePresent(t *testing.T) { +func TestLRUCache_RemovePresent(t *testing.T) { key, val := []byte("key10"), []byte("value10") c, err := lrucache.NewCache(10) @@ -182,7 +200,7 @@ func TestRemovePresent(t *testing.T) { assert.False(t, found, "not expected to find key %s", key) } -func TestRemoveOldestEmpty(t *testing.T) { +func TestLRUCache_RemoveOldestEmpty(t *testing.T) { c, err := lrucache.NewCache(10) assert.Nil(t, err, "no error expected but got %s", err) @@ -198,7 +216,7 @@ func TestRemoveOldestEmpty(t *testing.T) { assert.Zero(t, l, "expected size 0 but got %d", l) } -func TestRemoveOldestPresent(t *testing.T) { +func TestLRUCache_RemoveOldestPresent(t *testing.T) { c, err := lrucache.NewCache(10) assert.Nil(t, err, "no error expected but got %s", err) @@ -217,7 +235,7 @@ func TestRemoveOldestPresent(t *testing.T) { assert.False(t, found, "not expected to find key key0") } -func TestKeys(t *testing.T) { +func TestLRUCache_Keys(t *testing.T) { c, err := lrucache.NewCache(10) assert.Nil(t, err, "no error expected but got %s", err) @@ -233,7 +251,7 @@ func TestKeys(t *testing.T) { assert.Equal(t, 10, len(keys), "expected cache size 10 but current size %d", len(keys)) } -func TestLen(t *testing.T) { +func TestLRUCache_Len(t *testing.T) { c, err := lrucache.NewCache(10) assert.Nil(t, err, "no error expected but got %s", err) @@ -248,7 +266,7 @@ func TestLen(t *testing.T) { assert.Equal(t, 10, l, "expected cache size 10 but current size %d", l) } -func TestClear(t *testing.T) { +func TestLRUCache_Clear(t *testing.T) { c, err := lrucache.NewCache(10) assert.Nil(t, err, "no error expected but got %s", err) @@ -268,7 +286,7 @@ func TestClear(t *testing.T) { assert.Zero(t, l, "expected size 0, got %d", l) } -func TestCacherRegisterAddedDataHandlerNilHandlerShouldIgnore(t *testing.T) { +func TestLRUCache_CacherRegisterAddedDataHandlerNilHandlerShouldIgnore(t *testing.T) { t.Parallel() c, err := lrucache.NewCache(100) @@ -278,7 +296,7 @@ func TestCacherRegisterAddedDataHandlerNilHandlerShouldIgnore(t *testing.T) { assert.Equal(t, 0, len(c.AddedDataHandlers())) } -func TestCacherRegisterPutAddedDataHandlerShouldWork(t *testing.T) { +func TestLRUCache_CacherRegisterPutAddedDataHandlerShouldWork(t *testing.T) { t.Parallel() wg := sync.WaitGroup{} @@ -313,7 +331,7 @@ func TestCacherRegisterPutAddedDataHandlerShouldWork(t *testing.T) { assert.Equal(t, 1, len(c.AddedDataHandlers())) } -func TestCacherRegisterHasOrAddAddedDataHandlerShouldWork(t *testing.T) { +func TestLRUCache_CacherRegisterHasOrAddAddedDataHandlerShouldWork(t *testing.T) { t.Parallel() wg := sync.WaitGroup{} @@ -348,41 +366,7 @@ func TestCacherRegisterHasOrAddAddedDataHandlerShouldWork(t *testing.T) { assert.Equal(t, 1, len(c.AddedDataHandlers())) } -func TestCacherRegisterPutAddedDataHandlerNotAddedShouldNotCall(t *testing.T) { - t.Parallel() - - wg := sync.WaitGroup{} - wg.Add(1) - chDone := make(chan bool, 0) - - f := func(key []byte) { - wg.Done() - } - - go func() { - wg.Wait() - chDone <- true - }() - - c, err := lrucache.NewCache(100) - assert.Nil(t, err) - //first add, no call - c.Put([]byte("aaaa"), "bbbb") - c.RegisterHandler(f) - //second add, should not call as the data was found - c.Put([]byte("aaaa"), "bbbb") - - select { - case <-chDone: - assert.Fail(t, "should have not been called") - return - case <-time.After(timeoutWaitForWaitGroups): - } - - assert.Equal(t, 1, len(c.AddedDataHandlers())) -} - -func TestCacherRegisterHasOrAddAddedDataHandlerNotAddedShouldNotCall(t *testing.T) { +func TestLRUCache_CacherRegisterHasOrAddAddedDataHandlerNotAddedShouldNotCall(t *testing.T) { t.Parallel() wg := sync.WaitGroup{} diff --git a/storage/memorydb/memorydb.go b/storage/memorydb/memorydb.go index 1bd17f608eb..4f4a78f7efa 100644 --- a/storage/memorydb/memorydb.go +++ b/storage/memorydb/memorydb.go @@ -1,6 +1,7 @@ package memorydb import ( + "encoding/base64" "errors" "fmt" "sync" @@ -39,7 +40,7 @@ func (s *DB) Get(key []byte) ([]byte, error) { val, ok := s.db[string(key)] if !ok { - return nil, errors.New(fmt.Sprintf("key %s not found", key)) + return nil, errors.New(fmt.Sprintf("key: %s not found", base64.StdEncoding.EncodeToString(key))) } return val, nil diff --git a/storage/storageunit.go b/storage/storageunit.go index 6cf89509cfa..8aab017197d 100644 --- a/storage/storageunit.go +++ b/storage/storageunit.go @@ -1,6 +1,7 @@ package storage import ( + "encoding/base64" "fmt" "reflect" "sync" @@ -226,7 +227,7 @@ func (s *Unit) Get(key []byte) ([]byte, error) { // if found in persistance unit, add it in cache s.cacher.Put(key, v) } else { - return nil, errors.New(fmt.Sprintf("key: %s not found", string(key))) + return nil, errors.New(fmt.Sprintf("key: %s not found", base64.StdEncoding.EncodeToString(key))) } }