From 809fc3d84b2a2d2c4e4d99149525d04f2704c189 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 5 Dec 2019 18:17:15 +0200 Subject: [PATCH 01/35] added antiflooding component implementation and first integration tests --- consensus/mock/sposWorkerMock.go | 2 +- consensus/spos/interface.go | 2 +- consensus/spos/worker.go | 2 +- consensus/spos/worker_test.go | 23 +- dataRetriever/interface.go | 2 +- dataRetriever/mock/hashSliceResolverStub.go | 2 +- dataRetriever/mock/headerResolverStub.go | 2 +- dataRetriever/mock/resolverStub.go | 2 +- .../resolvers/genericBlockBodyResolver.go | 2 +- .../genericBlockBodyResolver_test.go | 10 +- dataRetriever/resolvers/headerResolver.go | 2 +- .../resolvers/headerResolver_test.go | 18 +- .../resolvers/transactionResolver.go | 2 +- .../resolvers/transactionResolver_test.go | 18 +- integrationTests/mock/headerResolverMock.go | 2 +- .../mock/miniBlocksResolverMock.go | 2 +- .../p2p/antiflood/antiflooding_test.go | 125 +++++++++ .../p2p/antiflood/messageProcessor.go | 77 ++++++ .../p2p/peerDiscovery/messageProcessor.go | 2 +- .../p2p/pubsub/messageProcessor.go | 38 +++ .../p2p/pubsub/peerReceivingMessages_test.go | 65 ++++- integrationTests/testInitializer.go | 79 ++++++ node/heartbeat/monitor.go | 2 +- node/heartbeat/monitor_test.go | 12 +- node/node_test.go | 4 +- p2p/libp2p/directSender.go | 10 +- p2p/libp2p/directSender_test.go | 36 +-- p2p/libp2p/export_test.go | 5 +- p2p/libp2p/netMessenger.go | 6 +- p2p/memp2p/memp2p.go | 10 +- p2p/mock/messageProcessorStub.go | 2 +- p2p/mock/mockMessageProcessor.go | 2 +- p2p/mock/streamMock.go | 7 +- p2p/p2p.go | 2 +- process/errors.go | 3 + process/interceptors/multiDataInterceptor.go | 2 +- .../interceptors/multiDataInterceptor_test.go | 19 +- process/interceptors/singleDataInterceptor.go | 2 +- process/interface.go | 10 +- process/mock/headerResolverMock.go | 2 +- process/mock/interceptorStub.go | 2 +- process/sync/metablock_test.go | 8 +- process/throttle/antiflood/countersMap.go | 80 ++++++ .../throttle/antiflood/countersMap_test.go | 243 ++++++++++++++++++ process/throttle/antiflood/export_test.go | 3 + 45 files changed, 856 insertions(+), 95 deletions(-) create mode 100644 integrationTests/p2p/antiflood/antiflooding_test.go create mode 100644 integrationTests/p2p/antiflood/messageProcessor.go create mode 100644 integrationTests/p2p/pubsub/messageProcessor.go create mode 100644 process/throttle/antiflood/countersMap.go create mode 100644 process/throttle/antiflood/countersMap_test.go create mode 100644 process/throttle/antiflood/export_test.go diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index b29a1cf38e4..d715cd3e977 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -30,7 +30,7 @@ func (sposWorkerMock *SposWorkerMock) RemoveAllReceivedMessagesCalls() { sposWorkerMock.RemoveAllReceivedMessagesCallsCalled() } -func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { return sposWorkerMock.ProcessReceivedMessageCalled(message) } diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 22b6bb341e4..939ddac488c 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -83,7 +83,7 @@ type WorkerHandler interface { //RemoveAllReceivedMessagesCalls removes all the functions handlers RemoveAllReceivedMessagesCalls() //ProcessReceivedMessage method redirects the received message to the channel which should handle it - ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error //Extend does an extension for the subround with subroundId Extend(subroundId int) //GetConsensusStateChangedChannel gets the channel for the consensusStateChanged diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 1e2a872305e..b54858a9673 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -216,7 +216,7 @@ func (wrk *Worker) getCleanedList(cnsDataList []*consensus.Message) []*consensus } // ProcessReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { if message == nil || message.IsInterfaceNil() { return ErrNilMessage } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 67ff7cb79fd..a16b91fa017 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -13,12 +13,15 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/stretchr/testify/assert" ) const roundTimeDuration = 100 * time.Millisecond +var fromConnectedPeerId = p2p.PeerID("connected peer id") + func initWorker() *spos.Worker { blockchainMock := &mock.BlockChainMock{} blockProcessor := &mock.BlockProcessorMock{ @@ -662,7 +665,7 @@ func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) time.Sleep(time.Second) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) assert.Nil(t, err) } @@ -686,7 +689,7 @@ func TestWorker_ProcessReceivedMessageHeaderShouldRetNil(t *testing.T) { ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) time.Sleep(time.Second) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) assert.Nil(t, err) } @@ -694,7 +697,7 @@ func TestWorker_ProcessReceivedMessageHeaderShouldRetNil(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() - err := wrk.ProcessReceivedMessage(nil, nil) + err := wrk.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -704,7 +707,7 @@ func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageDataFieldShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{}, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -726,7 +729,7 @@ func TestWorker_ProcessReceivedMessageNodeNotInEligibleListShouldErr(t *testing. 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -748,7 +751,7 @@ func TestWorker_ProcessReceivedMessageMessageIsForPastRoundShouldErr(t *testing. -1, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -770,7 +773,7 @@ func TestWorker_ProcessReceivedMessageInvalidSignatureShouldErr(t *testing.T) { 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -792,7 +795,7 @@ func TestWorker_ProcessReceivedMessageReceivedMessageIsFromSelfShouldRetNilAndNo 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -815,7 +818,7 @@ func TestWorker_ProcessReceivedMessageWhenRoundIsCanceledShouldRetNilAndNotProce 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -837,7 +840,7 @@ func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bn.MtBlockHeader])) diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index d84b56f8dee..b2b1761115b 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -49,7 +49,7 @@ const ( // Resolver defines what a data resolver should do type Resolver interface { RequestDataFromHash(hash []byte) error - ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error IsInterfaceNil() bool } diff --git a/dataRetriever/mock/hashSliceResolverStub.go b/dataRetriever/mock/hashSliceResolverStub.go index 4fad62f78ed..9467f4be070 100644 --- a/dataRetriever/mock/hashSliceResolverStub.go +++ b/dataRetriever/mock/hashSliceResolverStub.go @@ -16,7 +16,7 @@ func (hsrs *HashSliceResolverStub) RequestDataFromHash(hash []byte) error { return errNotImplemented } -func (hsrs *HashSliceResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (hsrs *HashSliceResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { if hsrs.ProcessReceivedMessageCalled != nil { return hsrs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/headerResolverStub.go b/dataRetriever/mock/headerResolverStub.go index 0bfe959d45e..91a1e48f9ca 100644 --- a/dataRetriever/mock/headerResolverStub.go +++ b/dataRetriever/mock/headerResolverStub.go @@ -21,7 +21,7 @@ func (hrs *HeaderResolverStub) RequestDataFromHash(hash []byte) error { return errNotImplemented } -func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { if hrs.ProcessReceivedMessageCalled != nil { return hrs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/resolverStub.go b/dataRetriever/mock/resolverStub.go index cae00d52cde..588215cc51b 100644 --- a/dataRetriever/mock/resolverStub.go +++ b/dataRetriever/mock/resolverStub.go @@ -13,7 +13,7 @@ func (rs *ResolverStub) RequestDataFromHash(hash []byte) error { return rs.RequestDataFromHashCalled(hash) } -func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { +func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { return rs.ProcessReceivedMessageCalled(message, broadcastHandler) } diff --git a/dataRetriever/resolvers/genericBlockBodyResolver.go b/dataRetriever/resolvers/genericBlockBodyResolver.go index 3053d57e42d..ad7976e71e0 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver.go @@ -52,7 +52,7 @@ func NewGenericBlockBodyResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { rd := &dataRetriever.RequestData{} err := rd.Unmarshal(gbbRes.marshalizer, message) if err != nil { diff --git a/dataRetriever/resolvers/genericBlockBodyResolver_test.go b/dataRetriever/resolvers/genericBlockBodyResolver_test.go index 74d4157e7ad..5f4d8706e4b 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver_test.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver_test.go @@ -13,6 +13,8 @@ import ( "github.com/stretchr/testify/assert" ) +var fromConnectedPeerId = p2p.PeerID("from connected peer Id") + //------- NewBlockBodyResolver func TestNewGenericBlockBodyResolver_NilSenderResolverShouldErr(t *testing.T) { @@ -97,7 +99,7 @@ func TestNewGenericBlockBodyResolver_ProcessReceivedMessageNilValueShouldErr(t * &mock.MarshalizerMock{}, ) - err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), nil) + err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId, nil) assert.Equal(t, dataRetriever.ErrNilValue, err) } @@ -111,7 +113,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageWrongTypeShouldErr(t *te &mock.MarshalizerMock{}, ) - err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), nil) + err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId, nil) assert.Equal(t, dataRetriever.ErrInvalidRequestType, err) } @@ -155,6 +157,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolShouldRetValA err := gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, nil, ) @@ -206,6 +209,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolMarshalizerFa err := gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, nil, ) @@ -251,6 +255,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageNotFoundInPoolShouldRetF err := gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), + fromConnectedPeerId, nil, ) @@ -294,6 +299,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageMissingDataShouldNotSend _ = gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), + fromConnectedPeerId, nil, ) diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 64944696ef3..6f24c85ec1f 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -70,7 +70,7 @@ func NewHeaderResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { rd, err := hdrRes.parseReceivedMessage(message) if err != nil { return err diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 967f3f78516..e95d725998a 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -13,6 +13,8 @@ import ( "github.com/stretchr/testify/assert" ) +var fromConnectedPeerId = p2p.PeerID("from connected peer Id") + //------- NewHeaderResolver func TestNewHeaderResolver_NilSenderResolverShouldErr(t *testing.T) { @@ -166,7 +168,7 @@ func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId, nil) assert.Equal(t, dataRetriever.ErrNilValue, err) } @@ -183,7 +185,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestUnknownTypeShouldErr(t *tes mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId, nil) assert.Equal(t, dataRetriever.ErrResolveTypeUnknown, err) } @@ -223,7 +225,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, nil) assert.Nil(t, err) assert.True(t, searchWasCalled) assert.True(t, sendWasCalled) @@ -270,7 +272,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, nil) assert.Equal(t, errExpected, err) } @@ -315,7 +317,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, nil) assert.Nil(t, err) assert.True(t, wasGotFromStorage) assert.True(t, wasSent) @@ -338,7 +340,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeInvalidSliceShould mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId, nil) assert.Equal(t, dataRetriever.ErrInvalidNonceByteSlice, err) } @@ -377,6 +379,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, nil, ) assert.Nil(t, err) @@ -441,6 +444,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, nil, ) @@ -512,6 +516,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, nil, ) @@ -580,6 +585,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, nil, ) diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index 3597bff7d66..a3178fabf29 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -57,7 +57,7 @@ func NewTxResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { rd := &dataRetriever.RequestData{} err := rd.Unmarshal(txRes.marshalizer, message) if err != nil { diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index 62b06a5ab1c..cfd5ac0f67a 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/assert" ) +var connectedPeerId = p2p.PeerID("connected peer id") + //------- NewTxResolver func TestNewTxResolver_NilResolverShouldErr(t *testing.T) { @@ -121,7 +123,7 @@ func TestTxResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { &mock.DataPackerStub{}, ) - err := txRes.ProcessReceivedMessage(nil, nil) + err := txRes.ProcessReceivedMessage(nil, connectedPeerId, nil) assert.Equal(t, dataRetriever.ErrNilMessage, err) } @@ -143,7 +145,7 @@ func TestTxResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) assert.Equal(t, dataRetriever.ErrRequestTypeNotImplemented, err) } @@ -165,7 +167,7 @@ func TestTxResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) assert.Equal(t, dataRetriever.ErrNilValue, err) } @@ -206,7 +208,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -251,7 +253,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) assert.Equal(t, errExpected, err) } @@ -299,7 +301,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -340,7 +342,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageCheckRetError(t *testi msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) assert.Equal(t, errExpected, err) @@ -399,7 +401,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) assert.Nil(t, err) assert.True(t, sendSliceWasCalled) diff --git a/integrationTests/mock/headerResolverMock.go b/integrationTests/mock/headerResolverMock.go index d811bad7004..c9d751a1a30 100644 --- a/integrationTests/mock/headerResolverMock.go +++ b/integrationTests/mock/headerResolverMock.go @@ -17,7 +17,7 @@ func (hrm *HeaderResolverMock) RequestDataFromHash(hash []byte) error { return hrm.RequestDataFromHashCalled(hash) } -func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { if hrm.ProcessReceivedMessageCalled == nil { return nil } diff --git a/integrationTests/mock/miniBlocksResolverMock.go b/integrationTests/mock/miniBlocksResolverMock.go index 9b51a31dc49..6c30bc4a27b 100644 --- a/integrationTests/mock/miniBlocksResolverMock.go +++ b/integrationTests/mock/miniBlocksResolverMock.go @@ -21,7 +21,7 @@ func (hrm *MiniBlocksResolverMock) RequestDataFromHashArray(hashes [][]byte) err return hrm.RequestDataFromHashArrayCalled(hashes) } -func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { return hrm.ProcessReceivedMessageCalled(message) } diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go new file mode 100644 index 00000000000..43221175aed --- /dev/null +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -0,0 +1,125 @@ +package antiflood + +import ( + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" +) + +var durationBootstrapingTime = 2 * time.Second + +// TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send a large number of transactions +// All directed peers should prevent the flooding to the rest of the network and process only a limited number of messages +func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + peers, err := integrationTests.CreateFixedNetworkOf7Peers() + assert.Nil(t, err) + + defer func() { + integrationTests.ClosePeers(peers) + }() + + //node 3 is connected to 0, 2, 4 and 6 (check integrationTests.CreateFixedNetworkOf7Peers function) + //large number of broadcast messages from 3 might flood above mentioned peers but should not flood 5 and 7 + + topic := "test_topic" + broadcastMessageDuration := time.Second * 2 + maxMumProcessMessages := 5 + interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages) + assert.Nil(t, err) + + fmt.Println("bootstrapping nodes") + time.Sleep(durationBootstrapingTime) + + flooderIdx := 3 + floodedIdxes := []int{0, 2, 4, 6} + protectedIdexes := []int{5, 7} + + //flooder will deactivate its flooding mechanism as to be able to flood the network + interceptors[flooderIdx].CountersMap = nil + + fmt.Println("flooding the network") + isFlooding := atomic.Value{} + isFlooding.Store(true) + go func() { + for { + peers[flooderIdx].Broadcast(topic, []byte("floodMessage")) + + if !isFlooding.Load().(bool) { + return + } + } + }() + time.Sleep(broadcastMessageDuration) + + isFlooding.Store(false) + + checkFunctionForFloodedPeers := func(interceptor *messageProcessor) { + assert.Equal(t, uint64(maxMumProcessMessages), interceptor.MessagesProcessed()) + //can not precisely determine how many message have been received + assert.True(t, uint64(maxMumProcessMessages) < interceptor.MessagesReceived()) + } + checkFunctionForProtectedPeers := func(interceptor *messageProcessor) { + assert.Equal(t, uint64(maxMumProcessMessages), interceptor.MessagesProcessed()) + assert.Equal(t, uint64(maxMumProcessMessages), interceptor.MessagesReceived()) + } + + fmt.Println("checking flooded peers") + checkPeers(peers, interceptors, floodedIdxes, checkFunctionForFloodedPeers) + fmt.Println("checking protected peers") + checkPeers(peers, interceptors, protectedIdexes, checkFunctionForProtectedPeers) +} + +func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNumMessages int) ([]*messageProcessor, error) { + interceptors := make([]*messageProcessor, len(peers)) + + for idx, p := range peers { + err := p.CreateTopic(topic, true) + if err != nil { + return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) + } + + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: 1} + antifloodPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + interceptors[idx] = newMessageProcessor() + interceptors[idx].CountersMap, _ = antiflood.NewCountersMap(antifloodPool, maxNumMessages) + err = p.RegisterMessageProcessor(topic, interceptors[idx]) + if err != nil { + return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) + } + } + + return interceptors, nil + +} + +func checkPeers( + peers []p2p.Messenger, + interceptors []*messageProcessor, + indexes []int, + checkFunction func(interceptor *messageProcessor), +) { + + for _, idx := range indexes { + peer := peers[idx] + interceptor := interceptors[idx] + fmt.Printf("%s got %d total messages and processed %d\n", + peer.ID().Pretty(), + interceptor.MessagesReceived(), + interceptor.MessagesProcessed(), + ) + + checkFunction(interceptor) + } +} diff --git a/integrationTests/p2p/antiflood/messageProcessor.go b/integrationTests/p2p/antiflood/messageProcessor.go new file mode 100644 index 00000000000..273af2b1147 --- /dev/null +++ b/integrationTests/p2p/antiflood/messageProcessor.go @@ -0,0 +1,77 @@ +package antiflood + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" +) + +type messageProcessor struct { + numMessagesReceived uint64 + mutMessages sync.Mutex + messages map[p2p.PeerID][]p2p.MessageP2P + CountersMap process.AntifloodProtector +} + +func newMessageProcessor() *messageProcessor { + return &messageProcessor{ + messages: make(map[p2p.PeerID][]p2p.MessageP2P), + } +} + +func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { + atomic.AddUint64(&mp.numMessagesReceived, 1) + + if mp.CountersMap != nil { + //protect from directly connected peer + ok := mp.CountersMap.TryIncrement(string(fromConnectedPeer)) + if !ok { + return fmt.Errorf("system flooded") + } + + if fromConnectedPeer != message.Peer() { + //protect from the flooding messages that originate from the same source but come from different peers + ok = mp.CountersMap.TryIncrement(string(message.Peer())) + if !ok { + return fmt.Errorf("system flooded") + } + } + } + + mp.mutMessages.Lock() + defer mp.mutMessages.Unlock() + + mp.messages[fromConnectedPeer] = append(mp.messages[fromConnectedPeer], message) + + return nil +} + +func (mp *messageProcessor) Messages(pid p2p.PeerID) []p2p.MessageP2P { + mp.mutMessages.Lock() + defer mp.mutMessages.Unlock() + + return mp.messages[pid] +} + +func (mp *messageProcessor) MessagesReceived() uint64 { + return atomic.LoadUint64(&mp.numMessagesReceived) +} + +func (mp *messageProcessor) MessagesProcessed() uint64 { + mp.mutMessages.Lock() + defer mp.mutMessages.Unlock() + + count := 0 + for _, msgs := range mp.messages { + count += len(msgs) + } + + return uint64(count) +} + +func (mp *messageProcessor) IsInterfaceNil() bool { + return mp == nil +} diff --git a/integrationTests/p2p/peerDiscovery/messageProcessor.go b/integrationTests/p2p/peerDiscovery/messageProcessor.go index ac026da4d66..1c1e52085d7 100644 --- a/integrationTests/p2p/peerDiscovery/messageProcessor.go +++ b/integrationTests/p2p/peerDiscovery/messageProcessor.go @@ -21,7 +21,7 @@ func NewMessageProcessor(chanDone chan struct{}, requiredVal []byte) *MessagePro } } -func (mp *MessageProcesssor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (mp *MessageProcesssor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { if bytes.Equal(mp.RequiredValue, message.Data()) { mp.mutDataReceived.Lock() mp.wasDataReceived = true diff --git a/integrationTests/p2p/pubsub/messageProcessor.go b/integrationTests/p2p/pubsub/messageProcessor.go new file mode 100644 index 00000000000..16980d6d8e8 --- /dev/null +++ b/integrationTests/p2p/pubsub/messageProcessor.go @@ -0,0 +1,38 @@ +package peerDisconnecting + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type messageProcessor struct { + mutMessages sync.Mutex + messages map[p2p.PeerID][]p2p.MessageP2P +} + +func newMessageProcessor() *messageProcessor { + return &messageProcessor{ + messages: make(map[p2p.PeerID][]p2p.MessageP2P), + } +} + +func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { + mp.mutMessages.Lock() + defer mp.mutMessages.Unlock() + + mp.messages[fromConnectedPeer] = append(mp.messages[fromConnectedPeer], message) + + return nil +} + +func (mp *messageProcessor) Messages(pid p2p.PeerID) []p2p.MessageP2P { + mp.mutMessages.Lock() + defer mp.mutMessages.Unlock() + + return mp.messages[pid] +} + +func (mp *messageProcessor) IsInterfaceNil() bool { + return mp == nil +} diff --git a/integrationTests/p2p/pubsub/peerReceivingMessages_test.go b/integrationTests/p2p/pubsub/peerReceivingMessages_test.go index 8d48df40958..b959ff3c8ec 100644 --- a/integrationTests/p2p/pubsub/peerReceivingMessages_test.go +++ b/integrationTests/p2p/pubsub/peerReceivingMessages_test.go @@ -20,7 +20,7 @@ type messageProcessorStub struct { ProcessReceivedMessageCalled func(message p2p.MessageP2P) error } -func (mps *messageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (mps *messageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { return mps.ProcessReceivedMessageCalled(message) } @@ -126,3 +126,66 @@ func TestPeerReceivesTheSameMessageMultipleTimesShouldNotHappen(t *testing.T) { time.Sleep(time.Millisecond) } } + +// TestBroadcastMessageComesFormTheConnectedPeers tests what happens in a network when a message comes through pubsub +// The receiving peer should get the message only from one of the connected peers +func TestBroadcastMessageComesFormTheConnectedPeers(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + topic := "test_topic" + broadcastMessageDuration := time.Second * 2 + peers, err := integrationTests.CreateFixedNetworkOf7Peers() + assert.Nil(t, err) + + defer func() { + integrationTests.ClosePeers(peers) + }() + + //node 0 is connected only to 1 and 3 (check integrationTests.CreateFixedNetworkOf7Peers function) + //a broadcast message from 6 should be received on node 0 only through peers 1 and 3 + + interceptors, err := createTopicsAndMockInterceptors(peers, topic) + assert.Nil(t, err) + + fmt.Println("bootstrapping nodes") + time.Sleep(durationBootstrapingTime) + + broadcastIdx := 6 + receiverIdx := 0 + shouldReceiveFrom := []int{1, 3} + + broadcastPeer := peers[broadcastIdx] + fmt.Printf("broadcasting message from pid %s\n", broadcastPeer.ID().Pretty()) + broadcastPeer.Broadcast(topic, []byte("dummy")) + time.Sleep(broadcastMessageDuration) + + countReceivedMessages := 0 + receiverInterceptor := interceptors[receiverIdx] + for _, idx := range shouldReceiveFrom { + connectedPid := peers[idx].ID() + countReceivedMessages += len(receiverInterceptor.Messages(connectedPid)) + } + + assert.Equal(t, 1, countReceivedMessages) +} + +func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string) ([]*messageProcessor, error) { + interceptors := make([]*messageProcessor, len(peers)) + + for idx, p := range peers { + err := p.CreateTopic(topic, true) + if err != nil { + return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) + } + + interceptors[idx] = newMessageProcessor() + err = p.RegisterMessageProcessor(topic, interceptors[idx]) + if err != nil { + return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) + } + } + + return interceptors, nil +} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 6afbd87b59c..a1338765d79 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -89,6 +89,85 @@ func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Mess return libP2PMes } +// CreateMessengerWithNoDiscovery creates a new libp2p messenger with no peer discovery +func CreateMessengerWithNoDiscovery(ctx context.Context) p2p.Messenger { + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), rand.Reader) + sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) + + libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( + ctx, + sk, + nil, + loadBalancer.NewOutgoingChannelLoadBalancer(), + discovery.NewNullDiscoverer(), + ) + if err != nil { + fmt.Println(err.Error()) + } + + return libP2PMes +} + +// CreateFixedNetworkOf7Peers assembles a network as following: +// +// 0------------------- 1 +// | | +// 2 ------------------ 3 ------------------ 4 +// | | | +// 5 6 7 +func CreateFixedNetworkOf7Peers() ([]p2p.Messenger, error) { + numPeers := 7 + peers := make([]p2p.Messenger, numPeers+1) + + for i := 0; i <= numPeers; i++ { + peers[i] = CreateMessengerWithNoDiscovery(context.Background()) + } + + connections := map[int][]int{ + 0: {1, 3}, + 1: {4}, + 2: {5, 3}, + 3: {4, 6}, + 4: {7}, + } + + err := createConnections(peers, connections) + if err != nil { + return nil, err + } + + return peers, nil +} + +func createConnections(peers []p2p.Messenger, connections map[int][]int) error { + for pid, connectTo := range connections { + err := connectPeerToOthers(peers, pid, connectTo) + if err != nil { + return err + } + } + + return nil +} + +func connectPeerToOthers(peers []p2p.Messenger, idx int, connectToIdxes []int) error { + for _, connectToIdx := range connectToIdxes { + err := peers[idx].ConnectToPeer(peers[connectToIdx].Addresses()[0]) + if err != nil { + return fmt.Errorf("%w connecting %s to %s", err, peers[idx].ID(), peers[connectToIdx].ID()) + } + } + + return nil +} + +// ClosePeers calls Messenger.Close on the provided peers +func ClosePeers(peers []p2p.Messenger) { + for _, p := range peers { + _ = p.Close() + } +} + // CreateTestShardDataPool creates a test data pool for shard nodes func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dataRetriever.PoolsHolder { if txPool == nil { diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index f07f6ae823b..97cb9c4a88b 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -201,7 +201,7 @@ func (m *Monitor) SetAppStatusHandler(ash core.AppStatusHandler) error { // ProcessReceivedMessage satisfies the p2p.MessageProcessor interface so it can be called // by the p2p subsystem each time a new heartbeat message arrives -func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { hbRecv, err := m.messageHandler.CreateHeartbeatFromP2pMessage(message) if err != nil { return err diff --git a/node/heartbeat/monitor_test.go b/node/heartbeat/monitor_test.go index fef6cdd8e8e..573c28b88e3 100644 --- a/node/heartbeat/monitor_test.go +++ b/node/heartbeat/monitor_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" ) +var fromConnectedPeerId = p2p.PeerID("from connected peer Id") + //------- NewMonitor func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { @@ -228,7 +230,7 @@ func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { Pubkey: []byte(pubKey), } hbBytes, _ := json.Marshal(hb) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, nil) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId, nil) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -286,7 +288,7 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { Pubkey: []byte(pubKey), } hbBytes, _ := json.Marshal(hb) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, nil) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId, nil) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -353,7 +355,7 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { buffToSend, err := json.Marshal(hb) assert.Nil(t, err) - err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, nil) + err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId, nil) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -373,7 +375,7 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { assert.Nil(t, err) - err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, nil) + err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId, nil) time.Sleep(1 * time.Second) @@ -452,6 +454,6 @@ func sendHbMessageFromPubKey(pubKey string, mon *heartbeat.Monitor) error { Pubkey: []byte(pubKey), } buffToSend, _ := json.Marshal(hb) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, nil) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId, nil) return err } diff --git a/node/node_test.go b/node/node_test.go index 4ab76c6d9ee..814c7f0fc89 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -31,6 +31,8 @@ import ( "github.com/stretchr/testify/assert" ) +var fromConnectedPeerId = p2p.PeerID("from connected peer Id") + func logError(err error) { if err != nil { fmt.Println(err.Error()) @@ -1435,7 +1437,7 @@ func TestNode_StartHeartbeatShouldWorkAndCanCallProcessMessage(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, registeredHandler) - err = registeredHandler.ProcessReceivedMessage(nil, nil) + err = registeredHandler.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) assert.NotNil(t, err) assert.Contains(t, "nil message", err.Error()) } diff --git a/p2p/libp2p/directSender.go b/p2p/libp2p/directSender.go index ab7600a0566..074484919c0 100644 --- a/p2p/libp2p/directSender.go +++ b/p2p/libp2p/directSender.go @@ -29,7 +29,7 @@ type directSender struct { counter uint64 ctx context.Context hostP2P host.Host - messageHandler func(msg p2p.MessageP2P) error + messageHandler func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error mutSeenMesages sync.Mutex seenMessages *timecache.TimeCache mutexForPeer *MutexHolder @@ -39,7 +39,7 @@ type directSender struct { func NewDirectSender( ctx context.Context, h host.Host, - messageHandler func(msg p2p.MessageP2P) error, + messageHandler func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error, ) (*directSender, error) { if h == nil { @@ -97,7 +97,7 @@ func (ds *directSender) directStreamHandler(s network.Stream) { return } - err = ds.processReceivedDirectMessage(msg) + err = ds.processReceivedDirectMessage(msg, s.Conn().RemotePeer()) if err != nil { log.Trace("p2p processReceivedDirectMessage", "error", err.Error()) } @@ -105,7 +105,7 @@ func (ds *directSender) directStreamHandler(s network.Stream) { }(reader) } -func (ds *directSender) processReceivedDirectMessage(message *pubsubPb.Message) error { +func (ds *directSender) processReceivedDirectMessage(message *pubsubPb.Message, fromConnectedPeer peer.ID) error { if message == nil { return p2p.ErrNilMessage } @@ -120,7 +120,7 @@ func (ds *directSender) processReceivedDirectMessage(message *pubsubPb.Message) } p2pMsg := NewMessage(&pubsub.Message{Message: message}) - return ds.messageHandler(p2pMsg) + return ds.messageHandler(p2pMsg, p2p.PeerID(fromConnectedPeer)) } func (ds *directSender) checkAndSetSeenMessage(msg *pubsubPb.Message) bool { diff --git a/p2p/libp2p/directSender_test.go b/p2p/libp2p/directSender_test.go index 8d53b754acf..2e75d1de3a4 100644 --- a/p2p/libp2p/directSender_test.go +++ b/p2p/libp2p/directSender_test.go @@ -25,7 +25,7 @@ import ( const timeout = time.Second * 5 -var blankMessageHandler = func(msg p2p.MessageP2P) error { +var blankMessageHandler = func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { return nil } @@ -69,7 +69,7 @@ func createLibP2PCredentialsDirectSender() (peer.ID, libp2pCrypto.PrivKey) { func TestNewDirectSender_NilContextShouldErr(t *testing.T) { hs := &mock.ConnectableHostStub{} - ds, err := libp2p.NewDirectSender(nil, hs, func(msg p2p.MessageP2P) error { + ds, err := libp2p.NewDirectSender(nil, hs, func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { return nil }) @@ -78,7 +78,7 @@ func TestNewDirectSender_NilContextShouldErr(t *testing.T) { } func TestNewDirectSender_NilHostShouldErr(t *testing.T) { - ds, err := libp2p.NewDirectSender(context.Background(), nil, func(msg p2p.MessageP2P) error { + ds, err := libp2p.NewDirectSender(context.Background(), nil, func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { return nil }) @@ -94,7 +94,7 @@ func TestNewDirectSender_NilMessageHandlerShouldErr(t *testing.T) { } func TestNewDirectSender_OkValsShouldWork(t *testing.T) { - ds, err := libp2p.NewDirectSender(context.Background(), generateHostStub(), func(msg p2p.MessageP2P) error { + ds, err := libp2p.NewDirectSender(context.Background(), generateHostStub(), func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { return nil }) @@ -113,7 +113,7 @@ func TestNewDirectSender_OkValsShouldCallSetStreamHandlerWithCorrectValues(t *te }, } - _, _ = libp2p.NewDirectSender(context.Background(), hs, func(msg p2p.MessageP2P) error { + _, _ = libp2p.NewDirectSender(context.Background(), hs, func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { return nil }) @@ -130,7 +130,7 @@ func TestDirectSender_ProcessReceivedDirectMessageNilMessageShouldErr(t *testing blankMessageHandler, ) - err := ds.ProcessReceivedDirectMessage(nil) + err := ds.ProcessReceivedDirectMessage(nil, "peer id") assert.Equal(t, p2p.ErrNilMessage, err) } @@ -150,7 +150,7 @@ func TestDirectSender_ProcessReceivedDirectMessageNilTopicIdsShouldErr(t *testin msg.From = []byte(id) msg.TopicIDs = nil - err := ds.ProcessReceivedDirectMessage(msg) + err := ds.ProcessReceivedDirectMessage(msg, "peer id") assert.Equal(t, p2p.ErrNilTopic, err) } @@ -170,7 +170,7 @@ func TestDirectSender_ProcessReceivedDirectMessageEmptyTopicIdsShouldErr(t *test msg.From = []byte(id) msg.TopicIDs = make([]string, 0) - err := ds.ProcessReceivedDirectMessage(msg) + err := ds.ProcessReceivedDirectMessage(msg, "peer id") assert.Equal(t, p2p.ErrEmptyTopicList, err) } @@ -193,7 +193,7 @@ func TestDirectSender_ProcessReceivedDirectMessageAlreadySeenMsgShouldErr(t *tes msgId := string(msg.GetFrom()) + string(msg.GetSeqno()) ds.SeenMessages().Add(msgId) - err := ds.ProcessReceivedDirectMessage(msg) + err := ds.ProcessReceivedDirectMessage(msg, "peer id") assert.Equal(t, p2p.ErrAlreadySeenMessage, err) } @@ -213,7 +213,7 @@ func TestDirectSender_ProcessReceivedDirectMessageShouldWork(t *testing.T) { msg.From = []byte(id) msg.TopicIDs = []string{"topic"} - err := ds.ProcessReceivedDirectMessage(msg) + err := ds.ProcessReceivedDirectMessage(msg, "peer id") assert.Nil(t, err) } @@ -224,7 +224,7 @@ func TestDirectSender_ProcessReceivedDirectMessageShouldCallMessageHandler(t *te ds, _ := libp2p.NewDirectSender( context.Background(), generateHostStub(), - func(msg p2p.MessageP2P) error { + func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { wasCalled = true return nil }, @@ -238,7 +238,7 @@ func TestDirectSender_ProcessReceivedDirectMessageShouldCallMessageHandler(t *te msg.From = []byte(id) msg.TopicIDs = []string{"topic"} - _ = ds.ProcessReceivedDirectMessage(msg) + _ = ds.ProcessReceivedDirectMessage(msg, "peer id") assert.True(t, wasCalled) } @@ -249,7 +249,7 @@ func TestDirectSender_ProcessReceivedDirectMessageShouldReturnHandlersError(t *t ds, _ := libp2p.NewDirectSender( context.Background(), generateHostStub(), - func(msg p2p.MessageP2P) error { + func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { return checkErr }, ) @@ -262,7 +262,7 @@ func TestDirectSender_ProcessReceivedDirectMessageShouldReturnHandlersError(t *t msg.From = []byte(id) msg.TopicIDs = []string{"topic"} - err := ds.ProcessReceivedDirectMessage(msg) + err := ds.ProcessReceivedDirectMessage(msg, "peer id") assert.Equal(t, checkErr, err) } @@ -516,7 +516,7 @@ func TestDirectSender_ReceivedSentMessageShouldCallMessageHandlerTestFullCycle(t ds, _ := libp2p.NewDirectSender( context.Background(), hs, - func(msg p2p.MessageP2P) error { + func(msg p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { receivedMsg = msg chanDone <- true return nil @@ -527,6 +527,12 @@ func TestDirectSender_ReceivedSentMessageShouldCallMessageHandlerTestFullCycle(t remotePeer := peer.ID("remote peer") stream := mock.NewStreamMock() + stream.SetConn( + &mock.ConnStub{ + RemotePeerCalled: func() peer.ID { + return "remote peer ID" + }, + }) stream.SetProtocol(libp2p.DirectSendID) streamHandler(stream) diff --git a/p2p/libp2p/export_test.go b/p2p/libp2p/export_test.go index b82e79dd330..a12c33f5732 100644 --- a/p2p/libp2p/export_test.go +++ b/p2p/libp2p/export_test.go @@ -3,6 +3,7 @@ package libp2p import ( "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/whyrusleeping/timecache" ) @@ -18,8 +19,8 @@ func (netMes *networkMessenger) SetHost(newHost ConnectableHost) { netMes.ctxProvider.connHost = newHost } -func (ds *directSender) ProcessReceivedDirectMessage(message *pubsub_pb.Message) error { - return ds.processReceivedDirectMessage(message) +func (ds *directSender) ProcessReceivedDirectMessage(message *pubsub_pb.Message, fromConnectedPeer peer.ID) error { + return ds.processReceivedDirectMessage(message, fromConnectedPeer) } func (ds *directSender) SeenMessages() *timecache.TimeCache { diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 37554d34347..7ee78de8514 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -456,7 +456,7 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p } err := netMes.pb.RegisterTopicValidator(topic, func(ctx context.Context, pid peer.ID, message *pubsub.Message) bool { - err := handler.ProcessReceivedMessage(NewMessage(message), broadcastHandler) + err := handler.ProcessReceivedMessage(NewMessage(message), p2p.PeerID(pid), broadcastHandler) if err != nil { log.Trace("p2p validator", "error", err.Error(), "topics", message.TopicIDs) } @@ -499,7 +499,7 @@ func (netMes *networkMessenger) SendToConnectedPeer(topic string, buff []byte, p return netMes.ds.Send(topic, buff, peerID) } -func (netMes *networkMessenger) directMessageHandler(message p2p.MessageP2P) error { +func (netMes *networkMessenger) directMessageHandler(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { var processor p2p.MessageProcessor netMes.mutTopics.RLock() @@ -511,7 +511,7 @@ func (netMes *networkMessenger) directMessageHandler(message p2p.MessageP2P) err } go func(msg p2p.MessageP2P) { - err := processor.ProcessReceivedMessage(msg, nil) + err := processor.ProcessReceivedMessage(msg, fromConnectedPeer, nil) if err != nil { log.Trace("p2p validator", "error", err.Error(), "topics", msg.TopicIDs()) } diff --git a/p2p/memp2p/memp2p.go b/p2p/memp2p/memp2p.go index e186fee1ea3..47e2641855e 100644 --- a/p2p/memp2p/memp2p.go +++ b/p2p/memp2p/memp2p.go @@ -292,11 +292,11 @@ func (messenger *Messenger) parametricBroadcast(topic string, data []byte, async for _, peer := range messenger.Network.Peers() { if async { go func(receivingPeer *Messenger) { - err := receivingPeer.ReceiveMessage(topic, message, true) + err := receivingPeer.ReceiveMessage(topic, message, messenger.P2PID, true) log.LogIfError(err) }(peer) } else { - err = peer.ReceiveMessage(topic, message, true) + err = peer.ReceiveMessage(topic, message, messenger.P2PID, true) } if err != nil { break @@ -322,7 +322,7 @@ func (messenger *Messenger) SendToConnectedPeer(topic string, buff []byte, peerI return ErrReceivingPeerNotConnected } - return receivingPeer.ReceiveMessage(topic, message, false) + return receivingPeer.ReceiveMessage(topic, message, messenger.P2PID, false) } return ErrNotConnectedToNetwork @@ -333,7 +333,7 @@ func (messenger *Messenger) SendToConnectedPeer(topic string, buff []byte, peerI // previously registered a message processor for that topic. The Network will // log the message only if the Network.LogMessages flag is set and only if the // Messenger has the requested topic and MessageProcessor. -func (messenger *Messenger) ReceiveMessage(topic string, message p2p.MessageP2P, allowBroadcast bool) error { +func (messenger *Messenger) ReceiveMessage(topic string, message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, allowBroadcast bool) error { messenger.TopicsMutex.Lock() validator, found := messenger.Topics[topic] messenger.TopicsMutex.Unlock() @@ -357,7 +357,7 @@ func (messenger *Messenger) ReceiveMessage(topic string, message p2p.MessageP2P, } } - return validator.ProcessReceivedMessage(message, handler) + return validator.ProcessReceivedMessage(message, fromConnectedPeer, handler) } // IsConnectedToTheNetwork returns true as this implementation is always connected to its network diff --git a/p2p/mock/messageProcessorStub.go b/p2p/mock/messageProcessorStub.go index 2299101234d..9313598b6e8 100644 --- a/p2p/mock/messageProcessorStub.go +++ b/p2p/mock/messageProcessorStub.go @@ -8,7 +8,7 @@ type MessageProcessorStub struct { ProcessMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error } -func (mps *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { +func (mps *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { return mps.ProcessMessageCalled(message, broadcastHandler) } diff --git a/p2p/mock/mockMessageProcessor.go b/p2p/mock/mockMessageProcessor.go index d0c976e8cce..015ab0aaaa8 100644 --- a/p2p/mock/mockMessageProcessor.go +++ b/p2p/mock/mockMessageProcessor.go @@ -16,7 +16,7 @@ func NewMockMessageProcessor(peer p2p.PeerID) *MockMessageProcessor { return &processor } -func (processor *MockMessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (processor *MockMessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { fmt.Printf("Message received by %s from %s: %s\n", string(processor.Peer), string(message.Peer()), string(message.Data())) return nil } diff --git a/p2p/mock/streamMock.go b/p2p/mock/streamMock.go index f5bcfc307a6..6c2ccbaef49 100644 --- a/p2p/mock/streamMock.go +++ b/p2p/mock/streamMock.go @@ -16,6 +16,7 @@ type streamMock struct { pid protocol.ID streamClosed bool canRead bool + conn network.Conn } func NewStreamMock() *streamMock { @@ -104,5 +105,9 @@ func (sm *streamMock) Stat() network.Stat { } func (sm *streamMock) Conn() network.Conn { - panic("implement me") + return sm.conn +} + +func (sm *streamMock) SetConn(conn network.Conn) { + sm.conn = conn } diff --git a/p2p/p2p.go b/p2p/p2p.go index c65fb883b2a..1c968d7eab7 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -11,7 +11,7 @@ import ( // All implementations that will be called from Messenger implementation will need to satisfy this interface // If the function returns a non nil value, the received message will not be propagated to its connected peers type MessageProcessor interface { - ProcessReceivedMessage(message MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessage(message MessageP2P, fromConnectedPeer PeerID, broadcastHandler func(buffToSend []byte)) error IsInterfaceNil() bool } diff --git a/process/errors.go b/process/errors.go index a7791d29fce..a28f7eda58c 100644 --- a/process/errors.go +++ b/process/errors.go @@ -574,3 +574,6 @@ var ErrSCDeployFromSCRIsNotPermitted = errors.New("it is not permitted to deploy // ErrNotEnoughGas signals that not enough gas has been provided var ErrNotEnoughGas = errors.New("not enough gas was sent in the transaction") + +// ErrInvalidValue signals that an invalid value was provided +var ErrInvalidValue = errors.New("invalid value provided") diff --git a/process/interceptors/multiDataInterceptor.go b/process/interceptors/multiDataInterceptor.go index 948c2a3b94e..ba9777e932c 100644 --- a/process/interceptors/multiDataInterceptor.go +++ b/process/interceptors/multiDataInterceptor.go @@ -53,7 +53,7 @@ func NewMultiDataInterceptor( // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { +func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { err := preProcessMesage(mdi.throttler, message) if err != nil { return err diff --git a/process/interceptors/multiDataInterceptor_test.go b/process/interceptors/multiDataInterceptor_test.go index 8bd8014eab9..d53d1b7bd87 100644 --- a/process/interceptors/multiDataInterceptor_test.go +++ b/process/interceptors/multiDataInterceptor_test.go @@ -8,12 +8,15 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/interceptors" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" ) +var fromConnectedPeerId = p2p.PeerID("from connected peer Id") + func TestNewMultiDataInterceptor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() @@ -96,7 +99,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testi &mock.InterceptorThrottlerStub{}, ) - err := mdi.ProcessReceivedMessage(nil, nil) + err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) assert.Equal(t, process.ErrNilMessage, err) } @@ -119,7 +122,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalFailsShouldErr(t *t msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := mdi.ProcessReceivedMessage(msg, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) assert.Equal(t, errExpeced, err) } @@ -141,7 +144,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalReturnsEmptySliceSh msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := mdi.ProcessReceivedMessage(msg, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) assert.Equal(t, process.ErrNoDataInMessage, err) } @@ -175,7 +178,7 @@ func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldNotResend(t *testi msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, bradcastCallback) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, bradcastCallback) time.Sleep(time.Second) @@ -242,7 +245,7 @@ func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldSendOnlyC msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, bradcastCallback) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, bradcastCallback) time.Sleep(time.Second) @@ -287,7 +290,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNotValidShouldErrAndNotProce msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) time.Sleep(time.Second) @@ -330,7 +333,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageIsAddressedToOtherShardShoul msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) time.Sleep(time.Second) @@ -373,7 +376,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageOkMessageShouldRetNil(t *tes msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) time.Sleep(time.Second) diff --git a/process/interceptors/singleDataInterceptor.go b/process/interceptors/singleDataInterceptor.go index 3f9a01d0188..18e5b6abdd7 100644 --- a/process/interceptors/singleDataInterceptor.go +++ b/process/interceptors/singleDataInterceptor.go @@ -43,7 +43,7 @@ func NewSingleDataInterceptor( // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { err := preProcessMesage(sdi.throttler, message) if err != nil { return err diff --git a/process/interface.go b/process/interface.go index 962e98a408e..0452ae49d3a 100644 --- a/process/interface.go +++ b/process/interface.go @@ -346,7 +346,7 @@ type BlockChainHookHandler interface { // Interceptor defines what a data interceptor should do // It should also adhere to the p2p.MessageProcessor interface so it can wire to a p2p.Messenger type Interceptor interface { - ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error IsInterfaceNil() bool } @@ -530,3 +530,11 @@ type BootstrapperFromStorage interface { type RequestBlockBodyHandler interface { GetBlockBodyFromPool(headerHandler data.HeaderHandler) (data.BodyHandler, error) } + +// AntifloodProtector defines the behavior of a component that is able to signal that too many events occurred +// on a provided identifier between Reset calls +type AntifloodProtector interface { + TryIncrement(identifier string) bool + Reset() + IsInterfaceNil() bool +} diff --git a/process/mock/headerResolverMock.go b/process/mock/headerResolverMock.go index 12cda07ab34..8c858f74096 100644 --- a/process/mock/headerResolverMock.go +++ b/process/mock/headerResolverMock.go @@ -14,7 +14,7 @@ func (hrm *HeaderResolverMock) RequestDataFromHash(hash []byte) error { return hrm.RequestDataFromHashCalled(hash) } -func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { return hrm.ProcessReceivedMessageCalled(message) } diff --git a/process/mock/interceptorStub.go b/process/mock/interceptorStub.go index 72c81cedacb..279f042dade 100644 --- a/process/mock/interceptorStub.go +++ b/process/mock/interceptorStub.go @@ -8,7 +8,7 @@ type InterceptorStub struct { ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error } -func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { +func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { return is.ProcessReceivedMessageCalled(message, broadcastHandler) } diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 42e22c75266..8a7e6eabcae 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -752,8 +752,14 @@ func TestMetaBootstrap_SyncBlockShouldCallRollBack(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} forkDetector.CheckForkCalled = func() *process.ForkInfo { - return &process.ForkInfo{true, 90, 90, []byte("hash")} + return &process.ForkInfo{ + IsDetected: true, + Nonce: 90, + Round: 90, + Hash: []byte("hash"), + } } + forkDetector.RemoveHeadersCalled = func(nonce uint64, hash []byte) { } forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { diff --git a/process/throttle/antiflood/countersMap.go b/process/throttle/antiflood/countersMap.go new file mode 100644 index 00000000000..9c14936c34c --- /dev/null +++ b/process/throttle/antiflood/countersMap.go @@ -0,0 +1,80 @@ +package antiflood + +import ( + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +const minOperations = 1 + +// countersMap represents a cache of counters used in antiflooding mechanism +type countersMap struct { + mutOperation sync.Mutex + cacher storage.Cacher + maxOperations int +} + +// NewCountersMap creates a new countersMap instance +func NewCountersMap(cacher storage.Cacher, maxOperations int) (*countersMap, error) { + if cacher == nil { + return nil, process.ErrNilCacher + } + if maxOperations < minOperations { + return nil, + fmt.Errorf("%w raised in NewCountersMap, provided %d, minimum %d", + process.ErrInvalidValue, + maxOperations, + minOperations, + ) + } + + return &countersMap{ + cacher: cacher, + maxOperations: maxOperations, + }, nil +} + +// TryIncrement tries to increment the counter value held at "identifier" position +// It returns true if it had succeeded (existing counter value is lower or equal with provided maxOperations) +func (cm *countersMap) TryIncrement(identifier string) bool { + //we need the mutOperation here as the get and put should be done atomically. + // Otherwise we might yield a slightly higher number of false valid increments + cm.mutOperation.Lock() + defer cm.mutOperation.Unlock() + + value, ok := cm.cacher.Get([]byte(identifier)) + if !ok { + cm.cacher.Put([]byte(identifier), 1) + return true + } + + intVal, isInt := value.(int) + if !isInt { + cm.cacher.Put([]byte(identifier), 1) + return true + } + + if intVal < cm.maxOperations { + cm.cacher.Put([]byte(identifier), intVal+1) + return true + } + + return false +} + +// Reset clears all map values +func (cm *countersMap) Reset() { + cm.mutOperation.Lock() + defer cm.mutOperation.Unlock() + + //TODO change this if cacher.Clear() is time consuming + cm.cacher.Clear() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cm *countersMap) IsInterfaceNil() bool { + return cm == nil +} diff --git a/process/throttle/antiflood/countersMap_test.go b/process/throttle/antiflood/countersMap_test.go new file mode 100644 index 00000000000..c37b09e6f24 --- /dev/null +++ b/process/throttle/antiflood/countersMap_test.go @@ -0,0 +1,243 @@ +package antiflood_test + +import ( + "errors" + "fmt" + "sync" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" + "github.com/stretchr/testify/assert" +) + +//------- NewCountersMap + +func TestNewCountersMap_NilCacherShouldErr(t *testing.T) { + t.Parallel() + + cm, err := antiflood.NewCountersMap(nil, antiflood.MinOperations) + + assert.True(t, check.IfNil(cm)) + assert.Equal(t, process.ErrNilCacher, err) +} + +func TestNewCountersMap_LowerMinOperationsShouldErr(t *testing.T) { + t.Parallel() + + cm, err := antiflood.NewCountersMap(&mock.CacherStub{}, antiflood.MinOperations-1) + + assert.True(t, check.IfNil(cm)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + +func TestNewCountersMap_EqualMinOperationsShouldWork(t *testing.T) { + t.Parallel() + + cm, err := antiflood.NewCountersMap(&mock.CacherStub{}, antiflood.MinOperations) + + assert.False(t, check.IfNil(cm)) + assert.Nil(t, err) +} + +func TestNewCountersMap_HigherMinOperationsShouldWork(t *testing.T) { + t.Parallel() + + cm, err := antiflood.NewCountersMap(&mock.CacherStub{}, antiflood.MinOperations+1) + + assert.False(t, check.IfNil(cm)) + assert.Nil(t, err) +} + +//------- TryIncrement + +func TestCountersMap_TryIncrementIdentifierNotPresentPutOneAndReturnTrue(t *testing.T) { + t.Parallel() + + putWasCalled := false + cm, _ := antiflood.NewCountersMap( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + valInt, isInt := value.(int) + if isInt && valInt == 1 { + putWasCalled = true + } + + return false + }, + }, + antiflood.MinOperations) + + ok := cm.TryIncrement("identifier") + + assert.True(t, ok) + assert.True(t, putWasCalled) +} + +func TestCountersMap_TryIncrementNotIntCounterShouldPutOneAndReturnTrue(t *testing.T) { + t.Parallel() + + putWasCalled := false + cm, _ := antiflood.NewCountersMap( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return "bad value", true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + valInt, isInt := value.(int) + if isInt && valInt == 1 { + putWasCalled = true + } + + return false + }, + }, + antiflood.MinOperations) + + ok := cm.TryIncrement("identifier") + + assert.True(t, ok) + assert.True(t, putWasCalled) +} + +func TestCountersMap_TryIncrementUnderMaxValueShouldIncrementAndReturnTrue(t *testing.T) { + t.Parallel() + + putWasCalled := false + existingValue := antiflood.MinOperations + cm, _ := antiflood.NewCountersMap( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return existingValue, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + valInt, isInt := value.(int) + if isInt && valInt == existingValue+1 { + putWasCalled = true + } + + return false + }, + }, + antiflood.MinOperations+10) + + ok := cm.TryIncrement("identifier") + + assert.True(t, ok) + assert.True(t, putWasCalled) +} + +func TestCountersMap_TryIncrementEqualMaxValueShouldNotPutAndReturnFalse(t *testing.T) { + t.Parallel() + + existingValue := antiflood.MinOperations + 10 + cm, _ := antiflood.NewCountersMap( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return existingValue, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + assert.Fail(t, "should have not called put") + + return false + }, + }, + antiflood.MinOperations+10) + + ok := cm.TryIncrement("identifier") + + assert.False(t, ok) +} + +func TestCountersMap_TryIncrementOverMaxValueShouldNotPutAndReturnFalse(t *testing.T) { + t.Parallel() + + existingValue := antiflood.MinOperations + 11 + cm, _ := antiflood.NewCountersMap( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return existingValue, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + assert.Fail(t, "should have not called put") + + return false + }, + }, + antiflood.MinOperations+10) + + ok := cm.TryIncrement("identifier") + + assert.False(t, ok) +} + +func TestCountersMap_TryIncrementShouldWorkConcurrently(t *testing.T) { + t.Parallel() + + cm, _ := antiflood.NewCountersMap( + mock.NewCacherMock(), + antiflood.MinOperations) + numIterations := 1000 + wg := sync.WaitGroup{} + wg.Add(numIterations) + for i := 0; i < numIterations; i++ { + go func(idx int) { + ok := cm.TryIncrement(fmt.Sprintf("%d", idx)) + assert.True(t, ok) + wg.Done() + }(i) + } + + wg.Wait() +} + +//------- Reset + +func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { + t.Parallel() + + clearCalled := false + cm, _ := antiflood.NewCountersMap( + &mock.CacherStub{ + ClearCalled: func() { + clearCalled = true + }, + }, + antiflood.MinOperations) + + cm.Reset() + + assert.True(t, clearCalled) +} + +func TestCountersMap_TryIncrementAndResetShouldWorkConcurrently(t *testing.T) { + t.Parallel() + + cm, _ := antiflood.NewCountersMap( + mock.NewCacherMock(), + antiflood.MinOperations) + numIterations := 1000 + wg := sync.WaitGroup{} + wg.Add(numIterations + numIterations/10) + for i := 0; i < numIterations; i++ { + go func(idx int) { + ok := cm.TryIncrement(fmt.Sprintf("%d", idx)) + assert.True(t, ok) + wg.Done() + }(i) + + if i%10 == 0 { + go func() { + cm.Reset() + wg.Done() + }() + } + } + + wg.Wait() +} diff --git a/process/throttle/antiflood/export_test.go b/process/throttle/antiflood/export_test.go new file mode 100644 index 00000000000..ffc097f713c --- /dev/null +++ b/process/throttle/antiflood/export_test.go @@ -0,0 +1,3 @@ +package antiflood + +const MinOperations = minOperations From 2c349a3dc20266d081231f997f28bd8c8c505588 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 5 Dec 2019 19:30:59 +0200 Subject: [PATCH 02/35] fixed the rest of the project --- dataRetriever/resolvers/headerResolver_test.go | 2 -- process/interceptors/singleDataInterceptor_test.go | 10 +++++----- process/mock/miniBlocksResolverMock.go | 2 +- process/mock/resolverStub.go | 2 +- process/sync/shardblock_test.go | 10 ++++++++-- 5 files changed, 15 insertions(+), 11 deletions(-) diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index e95d725998a..8e1f0c92106 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -13,8 +13,6 @@ import ( "github.com/stretchr/testify/assert" ) -var fromConnectedPeerId = p2p.PeerID("from connected peer Id") - //------- NewHeaderResolver func TestNewHeaderResolver_NilSenderResolverShouldErr(t *testing.T) { diff --git a/process/interceptors/singleDataInterceptor_test.go b/process/interceptors/singleDataInterceptor_test.go index 853c5113e61..07ba7b4e184 100644 --- a/process/interceptors/singleDataInterceptor_test.go +++ b/process/interceptors/singleDataInterceptor_test.go @@ -103,7 +103,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *test &mock.InterceptorThrottlerStub{}, ) - err := sdi.ProcessReceivedMessage(nil, nil) + err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) assert.Equal(t, process.ErrNilMessage, err) } @@ -129,7 +129,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageFactoryCreationErrorShouldE msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) assert.Equal(t, errExpected, err) } @@ -163,7 +163,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageIsNotValidShouldNotCallProc msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) time.Sleep(time.Second) @@ -202,7 +202,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageIsNotForCurrentShardShouldN msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) time.Sleep(time.Second) @@ -241,7 +241,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageShouldWork(t *testing.T) { msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) time.Sleep(time.Second) diff --git a/process/mock/miniBlocksResolverMock.go b/process/mock/miniBlocksResolverMock.go index 9b51a31dc49..6c30bc4a27b 100644 --- a/process/mock/miniBlocksResolverMock.go +++ b/process/mock/miniBlocksResolverMock.go @@ -21,7 +21,7 @@ func (hrm *MiniBlocksResolverMock) RequestDataFromHashArray(hashes [][]byte) err return hrm.RequestDataFromHashArrayCalled(hashes) } -func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { return hrm.ProcessReceivedMessageCalled(message) } diff --git a/process/mock/resolverStub.go b/process/mock/resolverStub.go index cae00d52cde..588215cc51b 100644 --- a/process/mock/resolverStub.go +++ b/process/mock/resolverStub.go @@ -13,7 +13,7 @@ func (rs *ResolverStub) RequestDataFromHash(hash []byte) error { return rs.RequestDataFromHashCalled(hash) } -func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { +func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { return rs.ProcessReceivedMessageCalled(message, broadcastHandler) } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index a62f3e079b1..56c034a4d00 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -1041,7 +1041,13 @@ func TestBootstrap_SyncBlockShouldCallForkChoice(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} forkDetector.CheckForkCalled = func() *process.ForkInfo { - return &process.ForkInfo{true, 90, 90, []byte("hash")} + return &process.ForkInfo{ + IsDetected: true, + Nonce: 90, + Round: 90, + Hash: []byte("hash"), + } + } forkDetector.RemoveHeadersCalled = func(nonce uint64, hash []byte) { } @@ -1412,7 +1418,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { return nil, nil } - rnd, _ := round.NewRound(time.Now(), time.Now().Add(200*time.Millisecond), time.Duration(100*time.Millisecond), &mock.SyncTimerMock{}) + rnd, _ := round.NewRound(time.Now(), time.Now().Add(200*time.Millisecond), 100*time.Millisecond, &mock.SyncTimerMock{}) bs, _ := sync.NewShardBootstrap( pools, From e8d78384e10789d1b44da948b2f808765cc66c02 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 5 Dec 2019 21:27:29 +0200 Subject: [PATCH 03/35] added one more integration test --- .../p2p/antiflood/antiflooding_test.go | 68 +++++++++++++++++-- integrationTests/testInitializer.go | 31 +++++++++ 2 files changed, 94 insertions(+), 5 deletions(-) diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 43221175aed..c776e1fadc2 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -15,7 +15,8 @@ import ( var durationBootstrapingTime = 2 * time.Second -// TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send a large number of transactions +// TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send a large number of messages +// all originating from its peer ID // All directed peers should prevent the flooding to the rest of the network and process only a limited number of messages func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { if testing.Short() { @@ -64,14 +65,71 @@ func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { isFlooding.Store(false) + checkMessagesOnPeers(t, peers, interceptors, uint64(maxMumProcessMessages), floodedIdxes, protectedIdexes) +} + +// TestAntifloodWithMessagesFromOtherPeers tests what happens if a peer decide to send a number of messages +// originating form other peer IDs. Since this is exceptionally hard to accomplish in integration tests because it needs +// 3-rd party library tweaking, the test is reduced to 10 peers generating 1 message through one peer that acts as a flooder +// All directed peers should prevent the flooding to the rest of the network and process only a limited number of messages +func TestAntifloodWithMessagesFromOtherPeers(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + peers, err := integrationTests.CreateFixedNetworkOf14Peers() + assert.Nil(t, err) + + defer func() { + integrationTests.ClosePeers(peers) + }() + + //peer 2 acts as a flooder that propagates 10 messages from 10 different peers. Peer 1 should prevent flooding to peer 0 + // (check integrationTests.CreateFixedNetworkOf14Peers function) + topic := "test_topic" + broadcastMessageDuration := time.Second * 2 + maxMumProcessMessages := 5 + interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages) + assert.Nil(t, err) + + fmt.Println("bootstrapping nodes") + time.Sleep(durationBootstrapingTime) + + flooderIdxes := []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} + floodedIdxes := []int{1} + protectedIdexes := []int{0} + + //flooders will deactivate their flooding mechanism as to be able to flood the network + for _, idx := range flooderIdxes { + interceptors[idx].CountersMap = nil + } + + //generate a message from connected peers of the main flooder (peer 2) + fmt.Println("flooding the network") + for i := 3; i <= 13; i++ { + peers[i].Broadcast(topic, []byte("floodMessage")) + } + time.Sleep(broadcastMessageDuration) + + checkMessagesOnPeers(t, peers, interceptors, uint64(maxMumProcessMessages), floodedIdxes, protectedIdexes) +} + +func checkMessagesOnPeers( + t *testing.T, + peers []p2p.Messenger, + interceptors []*messageProcessor, + maxMumProcessMessages uint64, + floodedIdxes []int, + protectedIdexes []int, +) { checkFunctionForFloodedPeers := func(interceptor *messageProcessor) { - assert.Equal(t, uint64(maxMumProcessMessages), interceptor.MessagesProcessed()) + assert.Equal(t, maxMumProcessMessages, interceptor.MessagesProcessed()) //can not precisely determine how many message have been received - assert.True(t, uint64(maxMumProcessMessages) < interceptor.MessagesReceived()) + assert.True(t, maxMumProcessMessages < interceptor.MessagesReceived()) } checkFunctionForProtectedPeers := func(interceptor *messageProcessor) { - assert.Equal(t, uint64(maxMumProcessMessages), interceptor.MessagesProcessed()) - assert.Equal(t, uint64(maxMumProcessMessages), interceptor.MessagesReceived()) + assert.Equal(t, maxMumProcessMessages, interceptor.MessagesProcessed()) + assert.Equal(t, maxMumProcessMessages, interceptor.MessagesReceived()) } fmt.Println("checking flooded peers") diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index a1338765d79..9f0ca045b4c 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -139,6 +139,37 @@ func CreateFixedNetworkOf7Peers() ([]p2p.Messenger, error) { return peers, nil } +// CreateFixedNetworkOf14Peers assembles a network as following: +// +// 0 +// | +// 1 +// | +// +--+--+--+--+--2--+--+--+--+--+ +// | | | | | | | | | | | +// 3 4 5 6 7 8 9 10 11 12 13 +func CreateFixedNetworkOf14Peers() ([]p2p.Messenger, error) { + numPeers := 13 + peers := make([]p2p.Messenger, numPeers+1) + + for i := 0; i <= numPeers; i++ { + peers[i] = CreateMessengerWithNoDiscovery(context.Background()) + } + + connections := map[int][]int{ + 0: {1}, + 1: {2}, + 2: {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, + } + + err := createConnections(peers, connections) + if err != nil { + return nil, err + } + + return peers, nil +} + func createConnections(peers []p2p.Messenger, connections map[int][]int) error { for pid, connectTo := range connections { err := connectPeerToOthers(peers, pid, connectTo) From c7952d8bb02b8015002049b7f2530a0c10c67df2 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Fri, 6 Dec 2019 22:15:33 +0200 Subject: [PATCH 04/35] refactor anti flooding component to take into account the size of the message --- .../p2p/antiflood/antiflooding_test.go | 3 +- .../p2p/antiflood/messageProcessor.go | 6 +- process/interface.go | 6 +- process/throttle/antiflood/countersMap.go | 80 ----- .../throttle/antiflood/countersMap_test.go | 243 --------------- process/throttle/antiflood/export_test.go | 3 - .../throttle/antiflood/quotaFloodPreventer.go | 113 +++++++ .../antiflood/quotaFloodPreventer_test.go | 284 ++++++++++++++++++ 8 files changed, 405 insertions(+), 333 deletions(-) delete mode 100644 process/throttle/antiflood/countersMap.go delete mode 100644 process/throttle/antiflood/countersMap_test.go delete mode 100644 process/throttle/antiflood/export_test.go create mode 100644 process/throttle/antiflood/quotaFloodPreventer.go create mode 100644 process/throttle/antiflood/quotaFloodPreventer_test.go diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index c776e1fadc2..88996904b43 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -14,6 +14,7 @@ import ( ) var durationBootstrapingTime = 2 * time.Second +var maxSize = 1 << 20 //1MB // TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send a large number of messages // all originating from its peer ID @@ -151,7 +152,7 @@ func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNum antifloodPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) interceptors[idx] = newMessageProcessor() - interceptors[idx].CountersMap, _ = antiflood.NewCountersMap(antifloodPool, maxNumMessages) + interceptors[idx].CountersMap, _ = antiflood.NewQuotaFloodPreventer(antifloodPool, uint32(maxNumMessages), uint64(maxSize)) err = p.RegisterMessageProcessor(topic, interceptors[idx]) if err != nil { return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) diff --git a/integrationTests/p2p/antiflood/messageProcessor.go b/integrationTests/p2p/antiflood/messageProcessor.go index 273af2b1147..75b9ccd894d 100644 --- a/integrationTests/p2p/antiflood/messageProcessor.go +++ b/integrationTests/p2p/antiflood/messageProcessor.go @@ -13,7 +13,7 @@ type messageProcessor struct { numMessagesReceived uint64 mutMessages sync.Mutex messages map[p2p.PeerID][]p2p.MessageP2P - CountersMap process.AntifloodProtector + CountersMap process.FloodPreventer } func newMessageProcessor() *messageProcessor { @@ -27,14 +27,14 @@ func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromC if mp.CountersMap != nil { //protect from directly connected peer - ok := mp.CountersMap.TryIncrement(string(fromConnectedPeer)) + ok := mp.CountersMap.TryIncrement(string(fromConnectedPeer), uint64(len(message.Data()))) if !ok { return fmt.Errorf("system flooded") } if fromConnectedPeer != message.Peer() { //protect from the flooding messages that originate from the same source but come from different peers - ok = mp.CountersMap.TryIncrement(string(message.Peer())) + ok = mp.CountersMap.TryIncrement(string(message.Peer()), uint64(len(message.Data()))) if !ok { return fmt.Errorf("system flooded") } diff --git a/process/interface.go b/process/interface.go index 0452ae49d3a..6bc98dff504 100644 --- a/process/interface.go +++ b/process/interface.go @@ -531,10 +531,10 @@ type RequestBlockBodyHandler interface { GetBlockBodyFromPool(headerHandler data.HeaderHandler) (data.BodyHandler, error) } -// AntifloodProtector defines the behavior of a component that is able to signal that too many events occurred +// FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls -type AntifloodProtector interface { - TryIncrement(identifier string) bool +type FloodPreventer interface { + TryIncrement(identifier string, size uint64) bool Reset() IsInterfaceNil() bool } diff --git a/process/throttle/antiflood/countersMap.go b/process/throttle/antiflood/countersMap.go deleted file mode 100644 index 9c14936c34c..00000000000 --- a/process/throttle/antiflood/countersMap.go +++ /dev/null @@ -1,80 +0,0 @@ -package antiflood - -import ( - "fmt" - "sync" - - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/storage" -) - -const minOperations = 1 - -// countersMap represents a cache of counters used in antiflooding mechanism -type countersMap struct { - mutOperation sync.Mutex - cacher storage.Cacher - maxOperations int -} - -// NewCountersMap creates a new countersMap instance -func NewCountersMap(cacher storage.Cacher, maxOperations int) (*countersMap, error) { - if cacher == nil { - return nil, process.ErrNilCacher - } - if maxOperations < minOperations { - return nil, - fmt.Errorf("%w raised in NewCountersMap, provided %d, minimum %d", - process.ErrInvalidValue, - maxOperations, - minOperations, - ) - } - - return &countersMap{ - cacher: cacher, - maxOperations: maxOperations, - }, nil -} - -// TryIncrement tries to increment the counter value held at "identifier" position -// It returns true if it had succeeded (existing counter value is lower or equal with provided maxOperations) -func (cm *countersMap) TryIncrement(identifier string) bool { - //we need the mutOperation here as the get and put should be done atomically. - // Otherwise we might yield a slightly higher number of false valid increments - cm.mutOperation.Lock() - defer cm.mutOperation.Unlock() - - value, ok := cm.cacher.Get([]byte(identifier)) - if !ok { - cm.cacher.Put([]byte(identifier), 1) - return true - } - - intVal, isInt := value.(int) - if !isInt { - cm.cacher.Put([]byte(identifier), 1) - return true - } - - if intVal < cm.maxOperations { - cm.cacher.Put([]byte(identifier), intVal+1) - return true - } - - return false -} - -// Reset clears all map values -func (cm *countersMap) Reset() { - cm.mutOperation.Lock() - defer cm.mutOperation.Unlock() - - //TODO change this if cacher.Clear() is time consuming - cm.cacher.Clear() -} - -// IsInterfaceNil returns true if there is no value under the interface -func (cm *countersMap) IsInterfaceNil() bool { - return cm == nil -} diff --git a/process/throttle/antiflood/countersMap_test.go b/process/throttle/antiflood/countersMap_test.go deleted file mode 100644 index c37b09e6f24..00000000000 --- a/process/throttle/antiflood/countersMap_test.go +++ /dev/null @@ -1,243 +0,0 @@ -package antiflood_test - -import ( - "errors" - "fmt" - "sync" - "testing" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" - "github.com/stretchr/testify/assert" -) - -//------- NewCountersMap - -func TestNewCountersMap_NilCacherShouldErr(t *testing.T) { - t.Parallel() - - cm, err := antiflood.NewCountersMap(nil, antiflood.MinOperations) - - assert.True(t, check.IfNil(cm)) - assert.Equal(t, process.ErrNilCacher, err) -} - -func TestNewCountersMap_LowerMinOperationsShouldErr(t *testing.T) { - t.Parallel() - - cm, err := antiflood.NewCountersMap(&mock.CacherStub{}, antiflood.MinOperations-1) - - assert.True(t, check.IfNil(cm)) - assert.True(t, errors.Is(err, process.ErrInvalidValue)) -} - -func TestNewCountersMap_EqualMinOperationsShouldWork(t *testing.T) { - t.Parallel() - - cm, err := antiflood.NewCountersMap(&mock.CacherStub{}, antiflood.MinOperations) - - assert.False(t, check.IfNil(cm)) - assert.Nil(t, err) -} - -func TestNewCountersMap_HigherMinOperationsShouldWork(t *testing.T) { - t.Parallel() - - cm, err := antiflood.NewCountersMap(&mock.CacherStub{}, antiflood.MinOperations+1) - - assert.False(t, check.IfNil(cm)) - assert.Nil(t, err) -} - -//------- TryIncrement - -func TestCountersMap_TryIncrementIdentifierNotPresentPutOneAndReturnTrue(t *testing.T) { - t.Parallel() - - putWasCalled := false - cm, _ := antiflood.NewCountersMap( - &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - valInt, isInt := value.(int) - if isInt && valInt == 1 { - putWasCalled = true - } - - return false - }, - }, - antiflood.MinOperations) - - ok := cm.TryIncrement("identifier") - - assert.True(t, ok) - assert.True(t, putWasCalled) -} - -func TestCountersMap_TryIncrementNotIntCounterShouldPutOneAndReturnTrue(t *testing.T) { - t.Parallel() - - putWasCalled := false - cm, _ := antiflood.NewCountersMap( - &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - return "bad value", true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - valInt, isInt := value.(int) - if isInt && valInt == 1 { - putWasCalled = true - } - - return false - }, - }, - antiflood.MinOperations) - - ok := cm.TryIncrement("identifier") - - assert.True(t, ok) - assert.True(t, putWasCalled) -} - -func TestCountersMap_TryIncrementUnderMaxValueShouldIncrementAndReturnTrue(t *testing.T) { - t.Parallel() - - putWasCalled := false - existingValue := antiflood.MinOperations - cm, _ := antiflood.NewCountersMap( - &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - return existingValue, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - valInt, isInt := value.(int) - if isInt && valInt == existingValue+1 { - putWasCalled = true - } - - return false - }, - }, - antiflood.MinOperations+10) - - ok := cm.TryIncrement("identifier") - - assert.True(t, ok) - assert.True(t, putWasCalled) -} - -func TestCountersMap_TryIncrementEqualMaxValueShouldNotPutAndReturnFalse(t *testing.T) { - t.Parallel() - - existingValue := antiflood.MinOperations + 10 - cm, _ := antiflood.NewCountersMap( - &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - return existingValue, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - assert.Fail(t, "should have not called put") - - return false - }, - }, - antiflood.MinOperations+10) - - ok := cm.TryIncrement("identifier") - - assert.False(t, ok) -} - -func TestCountersMap_TryIncrementOverMaxValueShouldNotPutAndReturnFalse(t *testing.T) { - t.Parallel() - - existingValue := antiflood.MinOperations + 11 - cm, _ := antiflood.NewCountersMap( - &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - return existingValue, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - assert.Fail(t, "should have not called put") - - return false - }, - }, - antiflood.MinOperations+10) - - ok := cm.TryIncrement("identifier") - - assert.False(t, ok) -} - -func TestCountersMap_TryIncrementShouldWorkConcurrently(t *testing.T) { - t.Parallel() - - cm, _ := antiflood.NewCountersMap( - mock.NewCacherMock(), - antiflood.MinOperations) - numIterations := 1000 - wg := sync.WaitGroup{} - wg.Add(numIterations) - for i := 0; i < numIterations; i++ { - go func(idx int) { - ok := cm.TryIncrement(fmt.Sprintf("%d", idx)) - assert.True(t, ok) - wg.Done() - }(i) - } - - wg.Wait() -} - -//------- Reset - -func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { - t.Parallel() - - clearCalled := false - cm, _ := antiflood.NewCountersMap( - &mock.CacherStub{ - ClearCalled: func() { - clearCalled = true - }, - }, - antiflood.MinOperations) - - cm.Reset() - - assert.True(t, clearCalled) -} - -func TestCountersMap_TryIncrementAndResetShouldWorkConcurrently(t *testing.T) { - t.Parallel() - - cm, _ := antiflood.NewCountersMap( - mock.NewCacherMock(), - antiflood.MinOperations) - numIterations := 1000 - wg := sync.WaitGroup{} - wg.Add(numIterations + numIterations/10) - for i := 0; i < numIterations; i++ { - go func(idx int) { - ok := cm.TryIncrement(fmt.Sprintf("%d", idx)) - assert.True(t, ok) - wg.Done() - }(i) - - if i%10 == 0 { - go func() { - cm.Reset() - wg.Done() - }() - } - } - - wg.Wait() -} diff --git a/process/throttle/antiflood/export_test.go b/process/throttle/antiflood/export_test.go deleted file mode 100644 index ffc097f713c..00000000000 --- a/process/throttle/antiflood/export_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package antiflood - -const MinOperations = minOperations diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go new file mode 100644 index 00000000000..38e1fc54a21 --- /dev/null +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -0,0 +1,113 @@ +package antiflood + +import ( + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +const minMessages = 1 +const minTotalSize = 1 //1Byte + +type quota struct { + numMessages uint32 + totalSize uint64 +} + +// qoutaFloodPreventer represents a cache of quotas per peer used in antiflooding mechanism +type quotaFloodPreventer struct { + mutOperation sync.Mutex + cacher storage.Cacher + maxMessages uint32 + maxSize uint64 +} + +// NewQuotaFloodPreventer creates a new flood preventer based on quota / peer +func NewQuotaFloodPreventer( + cacher storage.Cacher, + maxMessagesPerPeer uint32, + maxTotalSizePerPeer uint64, +) (*quotaFloodPreventer, error) { + + if cacher == nil { + return nil, process.ErrNilCacher + } + if maxMessagesPerPeer < minMessages { + return nil, fmt.Errorf("%w raised in NewCountersMap, maxMessages: provided %d, minimum %d", + process.ErrInvalidValue, + maxMessagesPerPeer, + minMessages, + ) + } + if maxTotalSizePerPeer < minTotalSize { + return nil, fmt.Errorf("%w raised in NewCountersMap, maxTotalSize: provided %d, minimum %d", + process.ErrInvalidValue, + maxTotalSizePerPeer, + minTotalSize, + ) + } + + return "aFloodPreventer{ + cacher: cacher, + maxMessages: maxMessagesPerPeer, + maxSize: maxTotalSizePerPeer, + }, nil +} + +// TryIncrement tries to increment the counter values held at "identifier" position +// It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) +func (qfp *quotaFloodPreventer) TryIncrement(identifier string, size uint64) bool { + //we need the mutOperation here as the get and put should be done atomically. + // Otherwise we might yield a slightly higher number of false valid increments + qfp.mutOperation.Lock() + defer qfp.mutOperation.Unlock() + + valueQuota, ok := qfp.cacher.Get([]byte(identifier)) + if !ok { + qfp.putDefaultQuota(qfp.cacher, identifier, size) + + return true + } + + q, isQuota := valueQuota.(*quota) + if !isQuota { + qfp.putDefaultQuota(qfp.cacher, identifier, size) + + return true + } + + q.numMessages++ + q.totalSize += size + isQuotaReached := q.numMessages > qfp.maxMessages || q.totalSize > qfp.maxSize + if !isQuotaReached { + qfp.cacher.Put([]byte(identifier), q) + + return true + } + + return false +} + +func (qfp *quotaFloodPreventer) putDefaultQuota(cacher storage.Cacher, identifier string, size uint64) { + q := "a{ + numMessages: 1, + totalSize: size, + } + qfp.cacher.Put([]byte(identifier), q) +} + +// Reset clears all map values +func (qfp *quotaFloodPreventer) Reset() { + qfp.mutOperation.Lock() + defer qfp.mutOperation.Unlock() + + //TODO change this if cacher.Clear() is time consuming + qfp.cacher.Clear() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (qfp *quotaFloodPreventer) IsInterfaceNil() bool { + return qfp == nil +} diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go new file mode 100644 index 00000000000..289500d11af --- /dev/null +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -0,0 +1,284 @@ +package antiflood + +import ( + "errors" + "fmt" + "sync" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +//------- NewQuotaFloodPreventer + +func TestNewQuotaFloodPreventer_NilCacherShouldErr(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer(nil, minMessages, minTotalSize) + + assert.True(t, check.IfNil(qfp)) + assert.Equal(t, process.ErrNilCacher, err) +} + +func TestNewQuotaFloodPreventer_LowerMinMessagesShouldErr(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, minMessages-1, minTotalSize) + + assert.True(t, check.IfNil(qfp)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + +func TestNewQuotaFloodPreventer_LowerMinSizeShouldErr(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, minMessages, minTotalSize-1) + + assert.True(t, check.IfNil(qfp)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + +func TestNewQuotaFloodPreventer_ShouldWork(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, minMessages, minTotalSize) + + assert.False(t, check.IfNil(qfp)) + assert.Nil(t, err) +} + +//------- TryIncrement + +func TestNewQuotaFloodPreventer_TryIncrementIdentifierNotPresentPutQuotaAndReturnTrue(t *testing.T) { + t.Parallel() + + putWasCalled := false + size := uint64(minTotalSize * 5) + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + q, isQuota := value.(*quota) + if !isQuota { + return + } + if q.numMessages == 1 && q.totalSize == size { + putWasCalled = true + } + + return + }, + }, + minMessages*4, + minTotalSize*10, + ) + + ok := qfp.TryIncrement("identifier", size) + + assert.True(t, ok) + assert.True(t, putWasCalled) +} + +func TestNewQuotaFloodPreventer_TryIncrementNotQuotaSavedInCacheShouldPutQuotaAndReturnTrue(t *testing.T) { + t.Parallel() + + putWasCalled := false + size := uint64(minTotalSize * 5) + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return "bad value", true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + q, isQuota := value.(*quota) + if !isQuota { + return + } + if q.numMessages == 1 && q.totalSize == size { + putWasCalled = true + } + + return + }, + }, + minMessages*4, + minTotalSize*10, + ) + + ok := qfp.TryIncrement("identifier", size) + + assert.True(t, ok) + assert.True(t, putWasCalled) +} + +func TestNewQuotaFloodPreventer_TryIncrementUnderMaxValuesShouldIncrementAndReturnTrue(t *testing.T) { + t.Parallel() + + putWasCalled := false + existingSize := uint64(minTotalSize * 5) + existingMessages := uint32(minMessages * 2) + existingQuota := "a{ + numMessages: existingMessages, + totalSize: existingSize, + } + size := uint64(minTotalSize * 2) + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return existingQuota, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + q, isQuota := value.(*quota) + if !isQuota { + return + } + if q.numMessages == existingMessages+1 && q.totalSize == existingSize+size { + putWasCalled = true + } + + return + }, + }, + minMessages*4, + minTotalSize*10, + ) + + ok := qfp.TryIncrement("identifier", size) + + assert.True(t, ok) + assert.True(t, putWasCalled) +} + +func TestNewQuotaFloodPreventer_TryIncrementOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { + t.Parallel() + + existingMessages := uint32(minMessages + 11) + existingSize := uint64(minTotalSize * 3) + existingQuota := "a{ + numMessages: existingMessages, + totalSize: existingSize, + } + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return existingQuota, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + assert.Fail(t, "should have not called put") + + return false + }, + }, + minMessages*4, + minTotalSize*10, + ) + + ok := qfp.TryIncrement("identifier", minTotalSize) + + assert.False(t, ok) +} + +func TestNewQuotaFloodPreventer_TryIncrementOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { + t.Parallel() + + existingMessages := uint32(minMessages) + existingSize := uint64(minTotalSize * 11) + existingQuota := "a{ + numMessages: existingMessages, + totalSize: existingSize, + } + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return existingQuota, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + assert.Fail(t, "should have not called put") + + return false + }, + }, + minMessages*4, + minTotalSize*10, + ) + + ok := qfp.TryIncrement("identifier", minTotalSize) + + assert.False(t, ok) +} + +func TestCountersMap_TryIncrementShouldWorkConcurrently(t *testing.T) { + t.Parallel() + + qfp, _ := NewQuotaFloodPreventer( + mock.NewCacherMock(), + minMessages, + minTotalSize) + numIterations := 1000 + wg := sync.WaitGroup{} + wg.Add(numIterations) + for i := 0; i < numIterations; i++ { + go func(idx int) { + ok := qfp.TryIncrement(fmt.Sprintf("%d", idx), minTotalSize) + assert.True(t, ok) + wg.Done() + }(i) + } + + wg.Wait() +} + +//------- Reset + +func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { + t.Parallel() + + clearCalled := false + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + ClearCalled: func() { + clearCalled = true + }, + }, + minTotalSize, + minMessages, + ) + + qfp.Reset() + + assert.True(t, clearCalled) +} + +func TestCountersMap_TryIncrementAndResetShouldWorkConcurrently(t *testing.T) { + t.Parallel() + + qfp, _ := NewQuotaFloodPreventer( + mock.NewCacherMock(), + minMessages, + minTotalSize, + ) + numIterations := 1000 + wg := sync.WaitGroup{} + wg.Add(numIterations + numIterations/10) + for i := 0; i < numIterations; i++ { + go func(idx int) { + ok := qfp.TryIncrement(fmt.Sprintf("%d", idx), minTotalSize) + assert.True(t, ok) + wg.Done() + }(i) + + if i%10 == 0 { + go func() { + qfp.Reset() + wg.Done() + }() + } + } + + wg.Wait() +} From 4aa456eb197b0f3f10ec8439751db6092b615de5 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sat, 7 Dec 2019 16:06:29 +0200 Subject: [PATCH 05/35] refactored integration tests --- .../p2p/antiflood/antiflooding_test.go | 38 +++++++------- .../p2p/antiflood/messageProcessor.go | 50 ++++++++++--------- .../throttle/antiflood/quotaFloodPreventer.go | 2 +- 3 files changed, 47 insertions(+), 43 deletions(-) diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 88996904b43..22cd25a838b 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" @@ -14,7 +15,7 @@ import ( ) var durationBootstrapingTime = 2 * time.Second -var maxSize = 1 << 20 //1MB +var maxSize = uint64(1 << 20) //1MB // TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send a large number of messages // all originating from its peer ID @@ -36,7 +37,7 @@ func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - maxMumProcessMessages := 5 + maxMumProcessMessages := uint32(5) interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages) assert.Nil(t, err) @@ -48,7 +49,7 @@ func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { protectedIdexes := []int{5, 7} //flooder will deactivate its flooding mechanism as to be able to flood the network - interceptors[flooderIdx].CountersMap = nil + interceptors[flooderIdx].FloodPreventer = nil fmt.Println("flooding the network") isFlooding := atomic.Value{} @@ -66,7 +67,7 @@ func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { isFlooding.Store(false) - checkMessagesOnPeers(t, peers, interceptors, uint64(maxMumProcessMessages), floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, maxMumProcessMessages, floodedIdxes, protectedIdexes) } // TestAntifloodWithMessagesFromOtherPeers tests what happens if a peer decide to send a number of messages @@ -89,7 +90,7 @@ func TestAntifloodWithMessagesFromOtherPeers(t *testing.T) { // (check integrationTests.CreateFixedNetworkOf14Peers function) topic := "test_topic" broadcastMessageDuration := time.Second * 2 - maxMumProcessMessages := 5 + maxMumProcessMessages := uint32(5) interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages) assert.Nil(t, err) @@ -102,7 +103,7 @@ func TestAntifloodWithMessagesFromOtherPeers(t *testing.T) { //flooders will deactivate their flooding mechanism as to be able to flood the network for _, idx := range flooderIdxes { - interceptors[idx].CountersMap = nil + interceptors[idx].FloodPreventer = nil } //generate a message from connected peers of the main flooder (peer 2) @@ -112,25 +113,25 @@ func TestAntifloodWithMessagesFromOtherPeers(t *testing.T) { } time.Sleep(broadcastMessageDuration) - checkMessagesOnPeers(t, peers, interceptors, uint64(maxMumProcessMessages), floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, maxMumProcessMessages, floodedIdxes, protectedIdexes) } func checkMessagesOnPeers( t *testing.T, peers []p2p.Messenger, interceptors []*messageProcessor, - maxMumProcessMessages uint64, + maxMumProcessMessages uint32, floodedIdxes []int, protectedIdexes []int, ) { checkFunctionForFloodedPeers := func(interceptor *messageProcessor) { - assert.Equal(t, maxMumProcessMessages, interceptor.MessagesProcessed()) + assert.Equal(t, maxMumProcessMessages, interceptor.NumMessagesProcessed()) //can not precisely determine how many message have been received - assert.True(t, maxMumProcessMessages < interceptor.MessagesReceived()) + assert.True(t, maxMumProcessMessages < interceptor.NumMessagesReceived()) } checkFunctionForProtectedPeers := func(interceptor *messageProcessor) { - assert.Equal(t, maxMumProcessMessages, interceptor.MessagesProcessed()) - assert.Equal(t, maxMumProcessMessages, interceptor.MessagesReceived()) + assert.Equal(t, maxMumProcessMessages, interceptor.NumMessagesProcessed()) + assert.Equal(t, maxMumProcessMessages, interceptor.NumMessagesReceived()) } fmt.Println("checking flooded peers") @@ -139,7 +140,7 @@ func checkMessagesOnPeers( checkPeers(peers, interceptors, protectedIdexes, checkFunctionForProtectedPeers) } -func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNumMessages int) ([]*messageProcessor, error) { +func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNumMessages uint32) ([]*messageProcessor, error) { interceptors := make([]*messageProcessor, len(peers)) for idx, p := range peers { @@ -152,7 +153,7 @@ func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNum antifloodPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) interceptors[idx] = newMessageProcessor() - interceptors[idx].CountersMap, _ = antiflood.NewQuotaFloodPreventer(antifloodPool, uint32(maxNumMessages), uint64(maxSize)) + interceptors[idx].FloodPreventer, _ = antiflood.NewQuotaFloodPreventer(antifloodPool, maxNumMessages, maxSize) err = p.RegisterMessageProcessor(topic, interceptors[idx]) if err != nil { return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) @@ -160,7 +161,6 @@ func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNum } return interceptors, nil - } func checkPeers( @@ -173,10 +173,12 @@ func checkPeers( for _, idx := range indexes { peer := peers[idx] interceptor := interceptors[idx] - fmt.Printf("%s got %d total messages and processed %d\n", + fmt.Printf("%s got %d (%s) total messages and processed %d (%s)\n", peer.ID().Pretty(), - interceptor.MessagesReceived(), - interceptor.MessagesProcessed(), + interceptor.NumMessagesReceived(), + core.ConvertBytes(interceptor.SizeMessagesReceived()), + interceptor.NumMessagesProcessed(), + core.ConvertBytes(interceptor.SizeMessagesProcessed()), ) checkFunction(interceptor) diff --git a/integrationTests/p2p/antiflood/messageProcessor.go b/integrationTests/p2p/antiflood/messageProcessor.go index 75b9ccd894d..65316e82335 100644 --- a/integrationTests/p2p/antiflood/messageProcessor.go +++ b/integrationTests/p2p/antiflood/messageProcessor.go @@ -10,10 +10,15 @@ import ( ) type messageProcessor struct { - numMessagesReceived uint64 - mutMessages sync.Mutex - messages map[p2p.PeerID][]p2p.MessageP2P - CountersMap process.FloodPreventer + numMessagesProcessed uint32 + sizeMessagesProcessed uint64 + + numMessagesReceived uint32 + sizeMessagesReceived uint64 + + mutMessages sync.Mutex + messages map[p2p.PeerID][]p2p.MessageP2P + FloodPreventer process.FloodPreventer } func newMessageProcessor() *messageProcessor { @@ -23,24 +28,28 @@ func newMessageProcessor() *messageProcessor { } func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { - atomic.AddUint64(&mp.numMessagesReceived, 1) + atomic.AddUint32(&mp.numMessagesReceived, 1) + atomic.AddUint64(&mp.sizeMessagesReceived, uint64(len(message.Data()))) - if mp.CountersMap != nil { + if mp.FloodPreventer != nil { //protect from directly connected peer - ok := mp.CountersMap.TryIncrement(string(fromConnectedPeer), uint64(len(message.Data()))) + ok := mp.FloodPreventer.TryIncrement(string(fromConnectedPeer), uint64(len(message.Data()))) if !ok { return fmt.Errorf("system flooded") } if fromConnectedPeer != message.Peer() { //protect from the flooding messages that originate from the same source but come from different peers - ok = mp.CountersMap.TryIncrement(string(message.Peer()), uint64(len(message.Data()))) + ok = mp.FloodPreventer.TryIncrement(string(message.Peer()), uint64(len(message.Data()))) if !ok { return fmt.Errorf("system flooded") } } } + atomic.AddUint32(&mp.numMessagesProcessed, 1) + atomic.AddUint64(&mp.sizeMessagesProcessed, uint64(len(message.Data()))) + mp.mutMessages.Lock() defer mp.mutMessages.Unlock() @@ -49,27 +58,20 @@ func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromC return nil } -func (mp *messageProcessor) Messages(pid p2p.PeerID) []p2p.MessageP2P { - mp.mutMessages.Lock() - defer mp.mutMessages.Unlock() - - return mp.messages[pid] +func (mp *messageProcessor) NumMessagesProcessed() uint32 { + return atomic.LoadUint32(&mp.numMessagesProcessed) } -func (mp *messageProcessor) MessagesReceived() uint64 { - return atomic.LoadUint64(&mp.numMessagesReceived) +func (mp *messageProcessor) SizeMessagesProcessed() uint64 { + return atomic.LoadUint64(&mp.sizeMessagesProcessed) } -func (mp *messageProcessor) MessagesProcessed() uint64 { - mp.mutMessages.Lock() - defer mp.mutMessages.Unlock() - - count := 0 - for _, msgs := range mp.messages { - count += len(msgs) - } +func (mp *messageProcessor) NumMessagesReceived() uint32 { + return atomic.LoadUint32(&mp.numMessagesReceived) +} - return uint64(count) +func (mp *messageProcessor) SizeMessagesReceived() uint64 { + return atomic.LoadUint64(&mp.sizeMessagesReceived) } func (mp *messageProcessor) IsInterfaceNil() bool { diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 38e1fc54a21..216ae69b763 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -18,7 +18,7 @@ type quota struct { // qoutaFloodPreventer represents a cache of quotas per peer used in antiflooding mechanism type quotaFloodPreventer struct { - mutOperation sync.Mutex + mutOperation sync.RWMutex cacher storage.Cacher maxMessages uint32 maxSize uint64 From d6efd15a6f50acf70ed809bb52b2e1ddb7b18410 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sun, 8 Dec 2019 21:23:47 +0200 Subject: [PATCH 06/35] added integration test for flooding with large messages --- .../p2p/antiflood/antiflooding_test.go | 77 ++++++++++++++++--- 1 file changed, 66 insertions(+), 11 deletions(-) diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 22cd25a838b..17b2b999886 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -15,12 +15,11 @@ import ( ) var durationBootstrapingTime = 2 * time.Second -var maxSize = uint64(1 << 20) //1MB // TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send a large number of messages // all originating from its peer ID // All directed peers should prevent the flooding to the rest of the network and process only a limited number of messages -func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { +func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } @@ -38,7 +37,8 @@ func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 maxMumProcessMessages := uint32(5) - interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages) + maxMessageSize := uint64(1 << 20) //1MB + interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages, maxMessageSize) assert.Nil(t, err) fmt.Println("bootstrapping nodes") @@ -74,7 +74,7 @@ func TestAntifloodWithMessagesFromTheSamePeer(t *testing.T) { // originating form other peer IDs. Since this is exceptionally hard to accomplish in integration tests because it needs // 3-rd party library tweaking, the test is reduced to 10 peers generating 1 message through one peer that acts as a flooder // All directed peers should prevent the flooding to the rest of the network and process only a limited number of messages -func TestAntifloodWithMessagesFromOtherPeers(t *testing.T) { +func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } @@ -91,7 +91,8 @@ func TestAntifloodWithMessagesFromOtherPeers(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 maxMumProcessMessages := uint32(5) - interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages) + maxMessageSize := uint64(1 << 20) //1MB + interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages, maxMessageSize) assert.Nil(t, err) fmt.Println("bootstrapping nodes") @@ -116,22 +117,76 @@ func TestAntifloodWithMessagesFromOtherPeers(t *testing.T) { checkMessagesOnPeers(t, peers, interceptors, maxMumProcessMessages, floodedIdxes, protectedIdexes) } +// TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send large messages +// all originating from its peer ID +// All directed peers should prevent the flooding to the rest of the network and process only a limited number of messages +func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + peers, err := integrationTests.CreateFixedNetworkOf7Peers() + assert.Nil(t, err) + + defer func() { + integrationTests.ClosePeers(peers) + }() + + //node 3 is connected to 0, 2, 4 and 6 (check integrationTests.CreateFixedNetworkOf7Peers function) + //large number of broadcast messages from 3 might flood above mentioned peers but should not flood 5 and 7 + + topic := "test_topic" + broadcastMessageDuration := time.Second * 2 + maxMumProcessMessages := uint32(100000) + maxMessageSize := uint64(1 << 10) //1KB + interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages, maxMessageSize) + assert.Nil(t, err) + + fmt.Println("bootstrapping nodes") + time.Sleep(durationBootstrapingTime) + + flooderIdx := 3 + floodedIdxes := []int{0, 2, 4, 6} + protectedIdexes := []int{5, 7} + + //flooder will deactivate its flooding mechanism as to be able to flood the network + interceptors[flooderIdx].FloodPreventer = nil + + fmt.Println("flooding the network") + isFlooding := atomic.Value{} + isFlooding.Store(true) + go func() { + for { + peers[flooderIdx].Broadcast(topic, make([]byte, maxMessageSize+1)) + + if !isFlooding.Load().(bool) { + return + } + } + }() + time.Sleep(broadcastMessageDuration) + + isFlooding.Store(false) + + checkMessagesOnPeers(t, peers, interceptors, 1, floodedIdxes, protectedIdexes) +} + func checkMessagesOnPeers( t *testing.T, peers []p2p.Messenger, interceptors []*messageProcessor, - maxMumProcessMessages uint32, + maxNumProcessMessages uint32, floodedIdxes []int, protectedIdexes []int, ) { checkFunctionForFloodedPeers := func(interceptor *messageProcessor) { - assert.Equal(t, maxMumProcessMessages, interceptor.NumMessagesProcessed()) + assert.Equal(t, maxNumProcessMessages, interceptor.NumMessagesProcessed()) //can not precisely determine how many message have been received - assert.True(t, maxMumProcessMessages < interceptor.NumMessagesReceived()) + assert.True(t, maxNumProcessMessages < interceptor.NumMessagesReceived()) } checkFunctionForProtectedPeers := func(interceptor *messageProcessor) { - assert.Equal(t, maxMumProcessMessages, interceptor.NumMessagesProcessed()) - assert.Equal(t, maxMumProcessMessages, interceptor.NumMessagesReceived()) + assert.Equal(t, maxNumProcessMessages, interceptor.NumMessagesProcessed()) + assert.Equal(t, maxNumProcessMessages, interceptor.NumMessagesReceived()) } fmt.Println("checking flooded peers") @@ -140,7 +195,7 @@ func checkMessagesOnPeers( checkPeers(peers, interceptors, protectedIdexes, checkFunctionForProtectedPeers) } -func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNumMessages uint32) ([]*messageProcessor, error) { +func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNumMessages uint32, maxSize uint64) ([]*messageProcessor, error) { interceptors := make([]*messageProcessor, len(peers)) for idx, p := range peers { From 0df51be681624595a1f938266e8f1a520c3fd251 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sun, 8 Dec 2019 21:37:08 +0200 Subject: [PATCH 07/35] updated a function comment --- integrationTests/testInitializer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 64616ad95a3..2b572e23876 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -113,8 +113,8 @@ func CreateMessengerWithNoDiscovery(ctx context.Context) p2p.Messenger { // 0------------------- 1 // | | // 2 ------------------ 3 ------------------ 4 -// | | | -// 5 6 7 +// | | | +// 5 6 7 func CreateFixedNetworkOf7Peers() ([]p2p.Messenger, error) { numPeers := 7 peers := make([]p2p.Messenger, numPeers+1) From 0b6689b43da6ec21ae729c4f5ab32a0bdcc489aa Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 9 Dec 2019 17:17:08 +0200 Subject: [PATCH 08/35] removed broadcast handler function from MessageProcessor renamed function from TryIncrement to Increment in floodPreventer interace + implementations minor code changes: added constants instead of magic numbers, added check.IfNil checks, moved comments --- consensus/mock/sposWorkerMock.go | 2 +- consensus/spos/interface.go | 2 +- consensus/spos/worker.go | 2 +- consensus/spos/worker_test.go | 20 ++-- dataRetriever/interface.go | 2 +- dataRetriever/mock/hashSliceResolverStub.go | 2 +- dataRetriever/mock/headerResolverStub.go | 2 +- dataRetriever/mock/resolverStub.go | 6 +- .../resolvers/genericBlockBodyResolver.go | 2 +- .../genericBlockBodyResolver_test.go | 8 +- dataRetriever/resolvers/headerResolver.go | 2 +- .../resolvers/headerResolver_test.go | 16 ++-- .../resolvers/transactionResolver.go | 2 +- .../resolvers/transactionResolver_test.go | 16 ++-- integrationTests/mock/headerResolverMock.go | 2 +- .../mock/miniBlocksResolverMock.go | 2 +- .../p2p/antiflood/antiflooding_test.go | 12 +-- .../p2p/antiflood/messageProcessor.go | 16 +++- .../p2p/peerDiscovery/messageProcessor.go | 2 +- .../p2p/pubsub/messageProcessor.go | 2 +- .../p2p/pubsub/peerReceivingMessages_test.go | 4 +- integrationTests/testInitializer.go | 4 +- node/heartbeat/monitor.go | 2 +- node/heartbeat/monitor_test.go | 10 +- node/node_test.go | 2 +- .../internalBroadcastSpeedMeasure/main.go | 6 +- p2p/libp2p/issues_test.go | 2 +- p2p/libp2p/netMessenger.go | 8 +- p2p/libp2p/netMessenger_test.go | 93 +------------------ p2p/memp2p/memp2p.go | 17 +--- p2p/mock/messageProcessorStub.go | 6 +- p2p/mock/mockMessageProcessor.go | 2 +- p2p/p2p.go | 9 +- process/interceptors/multiDataInterceptor.go | 18 +--- .../interceptors/multiDataInterceptor_test.go | 42 ++------- process/interceptors/singleDataInterceptor.go | 2 +- .../singleDataInterceptor_test.go | 10 +- process/interface.go | 4 +- process/mock/headerResolverMock.go | 2 +- process/mock/interceptorStub.go | 6 +- process/mock/miniBlocksResolverMock.go | 2 +- process/mock/resolverStub.go | 6 +- .../throttle/antiflood/quotaFloodPreventer.go | 14 +-- .../antiflood/quotaFloodPreventer_test.go | 30 +++--- 44 files changed, 137 insertions(+), 284 deletions(-) diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index d715cd3e977..fc010c14d17 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -30,7 +30,7 @@ func (sposWorkerMock *SposWorkerMock) RemoveAllReceivedMessagesCalls() { sposWorkerMock.RemoveAllReceivedMessagesCallsCalled() } -func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { return sposWorkerMock.ProcessReceivedMessageCalled(message) } diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 939ddac488c..ae487961238 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -83,7 +83,7 @@ type WorkerHandler interface { //RemoveAllReceivedMessagesCalls removes all the functions handlers RemoveAllReceivedMessagesCalls() //ProcessReceivedMessage method redirects the received message to the channel which should handle it - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error //Extend does an extension for the subround with subroundId Extend(subroundId int) //GetConsensusStateChangedChannel gets the channel for the consensusStateChanged diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index b54858a9673..9070d49c5fa 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -216,7 +216,7 @@ func (wrk *Worker) getCleanedList(cnsDataList []*consensus.Message) []*consensus } // ProcessReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { if message == nil || message.IsInterfaceNil() { return ErrNilMessage } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index a16b91fa017..ba65d8a2a85 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -665,7 +665,7 @@ func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) time.Sleep(time.Second) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) assert.Nil(t, err) } @@ -689,7 +689,7 @@ func TestWorker_ProcessReceivedMessageHeaderShouldRetNil(t *testing.T) { ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) time.Sleep(time.Second) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) assert.Nil(t, err) } @@ -697,7 +697,7 @@ func TestWorker_ProcessReceivedMessageHeaderShouldRetNil(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() - err := wrk.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(nil, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -707,7 +707,7 @@ func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageDataFieldShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -729,7 +729,7 @@ func TestWorker_ProcessReceivedMessageNodeNotInEligibleListShouldErr(t *testing. 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -751,7 +751,7 @@ func TestWorker_ProcessReceivedMessageMessageIsForPastRoundShouldErr(t *testing. -1, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -773,7 +773,7 @@ func TestWorker_ProcessReceivedMessageInvalidSignatureShouldErr(t *testing.T) { 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -795,7 +795,7 @@ func TestWorker_ProcessReceivedMessageReceivedMessageIsFromSelfShouldRetNilAndNo 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -818,7 +818,7 @@ func TestWorker_ProcessReceivedMessageWhenRoundIsCanceledShouldRetNilAndNotProce 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) @@ -840,7 +840,7 @@ func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bn.MtBlockHeader])) diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index b2b1761115b..b4773edb3bd 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -49,7 +49,7 @@ const ( // Resolver defines what a data resolver should do type Resolver interface { RequestDataFromHash(hash []byte) error - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error IsInterfaceNil() bool } diff --git a/dataRetriever/mock/hashSliceResolverStub.go b/dataRetriever/mock/hashSliceResolverStub.go index 9467f4be070..9f39a4b5b59 100644 --- a/dataRetriever/mock/hashSliceResolverStub.go +++ b/dataRetriever/mock/hashSliceResolverStub.go @@ -16,7 +16,7 @@ func (hsrs *HashSliceResolverStub) RequestDataFromHash(hash []byte) error { return errNotImplemented } -func (hsrs *HashSliceResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (hsrs *HashSliceResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { if hsrs.ProcessReceivedMessageCalled != nil { return hsrs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/headerResolverStub.go b/dataRetriever/mock/headerResolverStub.go index 91a1e48f9ca..e43e303a91a 100644 --- a/dataRetriever/mock/headerResolverStub.go +++ b/dataRetriever/mock/headerResolverStub.go @@ -21,7 +21,7 @@ func (hrs *HeaderResolverStub) RequestDataFromHash(hash []byte) error { return errNotImplemented } -func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { if hrs.ProcessReceivedMessageCalled != nil { return hrs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/resolverStub.go b/dataRetriever/mock/resolverStub.go index 588215cc51b..0a33666afed 100644 --- a/dataRetriever/mock/resolverStub.go +++ b/dataRetriever/mock/resolverStub.go @@ -6,15 +6,15 @@ import ( type ResolverStub struct { RequestDataFromHashCalled func(hash []byte) error - ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessageCalled func(message p2p.MessageP2P) error } func (rs *ResolverStub) RequestDataFromHash(hash []byte) error { return rs.RequestDataFromHashCalled(hash) } -func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { - return rs.ProcessReceivedMessageCalled(message, broadcastHandler) +func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { + return rs.ProcessReceivedMessageCalled(message) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/resolvers/genericBlockBodyResolver.go b/dataRetriever/resolvers/genericBlockBodyResolver.go index ad7976e71e0..de55c75ea79 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver.go @@ -52,7 +52,7 @@ func NewGenericBlockBodyResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { rd := &dataRetriever.RequestData{} err := rd.Unmarshal(gbbRes.marshalizer, message) if err != nil { diff --git a/dataRetriever/resolvers/genericBlockBodyResolver_test.go b/dataRetriever/resolvers/genericBlockBodyResolver_test.go index 5f4d8706e4b..691eadb701e 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver_test.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver_test.go @@ -99,7 +99,7 @@ func TestNewGenericBlockBodyResolver_ProcessReceivedMessageNilValueShouldErr(t * &mock.MarshalizerMock{}, ) - err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId, nil) + err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) assert.Equal(t, dataRetriever.ErrNilValue, err) } @@ -113,7 +113,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageWrongTypeShouldErr(t *te &mock.MarshalizerMock{}, ) - err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId, nil) + err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId) assert.Equal(t, dataRetriever.ErrInvalidRequestType, err) } @@ -158,7 +158,6 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolShouldRetValA err := gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, - nil, ) assert.Nil(t, err) @@ -210,7 +209,6 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolMarshalizerFa err := gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, - nil, ) assert.Equal(t, errExpected, err) @@ -256,7 +254,6 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageNotFoundInPoolShouldRetF err := gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, - nil, ) assert.Nil(t, err) @@ -300,7 +297,6 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageMissingDataShouldNotSend _ = gbbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, - nil, ) assert.False(t, wasSent) diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 6f24c85ec1f..995daf6ea03 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -70,7 +70,7 @@ func NewHeaderResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { rd, err := hdrRes.parseReceivedMessage(message) if err != nil { return err diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 8e1f0c92106..3865cbf4ee4 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -166,7 +166,7 @@ func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId, nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) assert.Equal(t, dataRetriever.ErrNilValue, err) } @@ -183,7 +183,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestUnknownTypeShouldErr(t *tes mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId, nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId) assert.Equal(t, dataRetriever.ErrResolveTypeUnknown, err) } @@ -223,7 +223,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) assert.Nil(t, err) assert.True(t, searchWasCalled) assert.True(t, sendWasCalled) @@ -270,7 +270,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) assert.Equal(t, errExpected, err) } @@ -315,7 +315,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) assert.Nil(t, err) assert.True(t, wasGotFromStorage) assert.True(t, wasSent) @@ -338,7 +338,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeInvalidSliceShould mock.NewNonceHashConverterMock(), ) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId, nil) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId) assert.Equal(t, dataRetriever.ErrInvalidNonceByteSlice, err) } @@ -378,7 +378,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, - nil, ) assert.Nil(t, err) assert.False(t, wasSent) @@ -443,7 +442,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, - nil, ) assert.Nil(t, err) @@ -515,7 +513,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, - nil, ) assert.Nil(t, err) @@ -584,7 +581,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, - nil, ) assert.Equal(t, errExpected, err) diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index a3178fabf29..5b0e5652d69 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -57,7 +57,7 @@ func NewTxResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { rd := &dataRetriever.RequestData{} err := rd.Unmarshal(txRes.marshalizer, message) if err != nil { diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index cfd5ac0f67a..0b0d71eaac5 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -123,7 +123,7 @@ func TestTxResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { &mock.DataPackerStub{}, ) - err := txRes.ProcessReceivedMessage(nil, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(nil, connectedPeerId) assert.Equal(t, dataRetriever.ErrNilMessage, err) } @@ -145,7 +145,7 @@ func TestTxResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) assert.Equal(t, dataRetriever.ErrRequestTypeNotImplemented, err) } @@ -167,7 +167,7 @@ func TestTxResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) assert.Equal(t, dataRetriever.ErrNilValue, err) } @@ -208,7 +208,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -253,7 +253,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) assert.Equal(t, errExpected, err) } @@ -301,7 +301,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -342,7 +342,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageCheckRetError(t *testi msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) assert.Equal(t, errExpected, err) @@ -401,7 +401,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal msg := &mock.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId, nil) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) assert.Nil(t, err) assert.True(t, sendSliceWasCalled) diff --git a/integrationTests/mock/headerResolverMock.go b/integrationTests/mock/headerResolverMock.go index c9d751a1a30..964fe3dac5c 100644 --- a/integrationTests/mock/headerResolverMock.go +++ b/integrationTests/mock/headerResolverMock.go @@ -17,7 +17,7 @@ func (hrm *HeaderResolverMock) RequestDataFromHash(hash []byte) error { return hrm.RequestDataFromHashCalled(hash) } -func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { if hrm.ProcessReceivedMessageCalled == nil { return nil } diff --git a/integrationTests/mock/miniBlocksResolverMock.go b/integrationTests/mock/miniBlocksResolverMock.go index 6c30bc4a27b..55ae0d352a9 100644 --- a/integrationTests/mock/miniBlocksResolverMock.go +++ b/integrationTests/mock/miniBlocksResolverMock.go @@ -21,7 +21,7 @@ func (hrm *MiniBlocksResolverMock) RequestDataFromHashArray(hashes [][]byte) err return hrm.RequestDataFromHashArrayCalled(hashes) } -func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { return hrm.ProcessReceivedMessageCalled(message) } diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 17b2b999886..863e4bae4a5 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -24,7 +24,7 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { t.Skip("this is not a short test") } - peers, err := integrationTests.CreateFixedNetworkOf7Peers() + peers, err := integrationTests.CreateFixedNetworkOf8Peers() assert.Nil(t, err) defer func() { @@ -49,7 +49,7 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { protectedIdexes := []int{5, 7} //flooder will deactivate its flooding mechanism as to be able to flood the network - interceptors[flooderIdx].FloodPreventer = nil + interceptors[flooderIdx].floodPreventer = nil fmt.Println("flooding the network") isFlooding := atomic.Value{} @@ -104,7 +104,7 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { //flooders will deactivate their flooding mechanism as to be able to flood the network for _, idx := range flooderIdxes { - interceptors[idx].FloodPreventer = nil + interceptors[idx].floodPreventer = nil } //generate a message from connected peers of the main flooder (peer 2) @@ -125,7 +125,7 @@ func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { t.Skip("this is not a short test") } - peers, err := integrationTests.CreateFixedNetworkOf7Peers() + peers, err := integrationTests.CreateFixedNetworkOf8Peers() assert.Nil(t, err) defer func() { @@ -150,7 +150,7 @@ func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { protectedIdexes := []int{5, 7} //flooder will deactivate its flooding mechanism as to be able to flood the network - interceptors[flooderIdx].FloodPreventer = nil + interceptors[flooderIdx].floodPreventer = nil fmt.Println("flooding the network") isFlooding := atomic.Value{} @@ -208,7 +208,7 @@ func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNum antifloodPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) interceptors[idx] = newMessageProcessor() - interceptors[idx].FloodPreventer, _ = antiflood.NewQuotaFloodPreventer(antifloodPool, maxNumMessages, maxSize) + interceptors[idx].floodPreventer, _ = antiflood.NewQuotaFloodPreventer(antifloodPool, maxNumMessages, maxSize) err = p.RegisterMessageProcessor(topic, interceptors[idx]) if err != nil { return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) diff --git a/integrationTests/p2p/antiflood/messageProcessor.go b/integrationTests/p2p/antiflood/messageProcessor.go index 65316e82335..310b759ea81 100644 --- a/integrationTests/p2p/antiflood/messageProcessor.go +++ b/integrationTests/p2p/antiflood/messageProcessor.go @@ -18,7 +18,7 @@ type messageProcessor struct { mutMessages sync.Mutex messages map[p2p.PeerID][]p2p.MessageP2P - FloodPreventer process.FloodPreventer + floodPreventer process.FloodPreventer } func newMessageProcessor() *messageProcessor { @@ -27,20 +27,21 @@ func newMessageProcessor() *messageProcessor { } } -func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +// ProcessReceivedMessage is the callback function from the p2p side whenever a new message is received +func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { atomic.AddUint32(&mp.numMessagesReceived, 1) atomic.AddUint64(&mp.sizeMessagesReceived, uint64(len(message.Data()))) - if mp.FloodPreventer != nil { + if mp.floodPreventer != nil { //protect from directly connected peer - ok := mp.FloodPreventer.TryIncrement(string(fromConnectedPeer), uint64(len(message.Data()))) + ok := mp.floodPreventer.Increment(string(fromConnectedPeer), uint64(len(message.Data()))) if !ok { return fmt.Errorf("system flooded") } if fromConnectedPeer != message.Peer() { //protect from the flooding messages that originate from the same source but come from different peers - ok = mp.FloodPreventer.TryIncrement(string(message.Peer()), uint64(len(message.Data()))) + ok = mp.floodPreventer.Increment(string(message.Peer()), uint64(len(message.Data()))) if !ok { return fmt.Errorf("system flooded") } @@ -58,22 +59,27 @@ func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromC return nil } +// NumMessagesProcessed returns the number of processed messages func (mp *messageProcessor) NumMessagesProcessed() uint32 { return atomic.LoadUint32(&mp.numMessagesProcessed) } +// SizeMessagesProcessed returns the total size of the processed messages func (mp *messageProcessor) SizeMessagesProcessed() uint64 { return atomic.LoadUint64(&mp.sizeMessagesProcessed) } +// NumMessagesReceived returns the number of received messages func (mp *messageProcessor) NumMessagesReceived() uint32 { return atomic.LoadUint32(&mp.numMessagesReceived) } +// SizeMessagesReceived returns the total size of the received messages func (mp *messageProcessor) SizeMessagesReceived() uint64 { return atomic.LoadUint64(&mp.sizeMessagesReceived) } +// IsInterfaceNil returns true if there is no value under the interface func (mp *messageProcessor) IsInterfaceNil() bool { return mp == nil } diff --git a/integrationTests/p2p/peerDiscovery/messageProcessor.go b/integrationTests/p2p/peerDiscovery/messageProcessor.go index 1c1e52085d7..5575a1b5cd5 100644 --- a/integrationTests/p2p/peerDiscovery/messageProcessor.go +++ b/integrationTests/p2p/peerDiscovery/messageProcessor.go @@ -21,7 +21,7 @@ func NewMessageProcessor(chanDone chan struct{}, requiredVal []byte) *MessagePro } } -func (mp *MessageProcesssor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (mp *MessageProcesssor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { if bytes.Equal(mp.RequiredValue, message.Data()) { mp.mutDataReceived.Lock() mp.wasDataReceived = true diff --git a/integrationTests/p2p/pubsub/messageProcessor.go b/integrationTests/p2p/pubsub/messageProcessor.go index 16980d6d8e8..1eb96018ac9 100644 --- a/integrationTests/p2p/pubsub/messageProcessor.go +++ b/integrationTests/p2p/pubsub/messageProcessor.go @@ -17,7 +17,7 @@ func newMessageProcessor() *messageProcessor { } } -func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { mp.mutMessages.Lock() defer mp.mutMessages.Unlock() diff --git a/integrationTests/p2p/pubsub/peerReceivingMessages_test.go b/integrationTests/p2p/pubsub/peerReceivingMessages_test.go index b959ff3c8ec..7460d28ddb1 100644 --- a/integrationTests/p2p/pubsub/peerReceivingMessages_test.go +++ b/integrationTests/p2p/pubsub/peerReceivingMessages_test.go @@ -20,7 +20,7 @@ type messageProcessorStub struct { ProcessReceivedMessageCalled func(message p2p.MessageP2P) error } -func (mps *messageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (mps *messageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { return mps.ProcessReceivedMessageCalled(message) } @@ -136,7 +136,7 @@ func TestBroadcastMessageComesFormTheConnectedPeers(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - peers, err := integrationTests.CreateFixedNetworkOf7Peers() + peers, err := integrationTests.CreateFixedNetworkOf8Peers() assert.Nil(t, err) defer func() { diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 2b572e23876..eade59c3646 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -108,14 +108,14 @@ func CreateMessengerWithNoDiscovery(ctx context.Context) p2p.Messenger { return libP2PMes } -// CreateFixedNetworkOf7Peers assembles a network as following: +// CreateFixedNetworkOf8Peers assembles a network as following: // // 0------------------- 1 // | | // 2 ------------------ 3 ------------------ 4 // | | | // 5 6 7 -func CreateFixedNetworkOf7Peers() ([]p2p.Messenger, error) { +func CreateFixedNetworkOf8Peers() ([]p2p.Messenger, error) { numPeers := 7 peers := make([]p2p.Messenger, numPeers+1) diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index 97cb9c4a88b..b8cff09afbf 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -201,7 +201,7 @@ func (m *Monitor) SetAppStatusHandler(ash core.AppStatusHandler) error { // ProcessReceivedMessage satisfies the p2p.MessageProcessor interface so it can be called // by the p2p subsystem each time a new heartbeat message arrives -func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { hbRecv, err := m.messageHandler.CreateHeartbeatFromP2pMessage(message) if err != nil { return err diff --git a/node/heartbeat/monitor_test.go b/node/heartbeat/monitor_test.go index 573c28b88e3..a824a7cad06 100644 --- a/node/heartbeat/monitor_test.go +++ b/node/heartbeat/monitor_test.go @@ -230,7 +230,7 @@ func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { Pubkey: []byte(pubKey), } hbBytes, _ := json.Marshal(hb) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId, nil) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -288,7 +288,7 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { Pubkey: []byte(pubKey), } hbBytes, _ := json.Marshal(hb) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId, nil) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -355,7 +355,7 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { buffToSend, err := json.Marshal(hb) assert.Nil(t, err) - err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId, nil) + err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -375,7 +375,7 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { assert.Nil(t, err) - err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId, nil) + err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId) time.Sleep(1 * time.Second) @@ -454,6 +454,6 @@ func sendHbMessageFromPubKey(pubKey string, mon *heartbeat.Monitor) error { Pubkey: []byte(pubKey), } buffToSend, _ := json.Marshal(hb) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId, nil) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId) return err } diff --git a/node/node_test.go b/node/node_test.go index 814c7f0fc89..125071d5479 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -1437,7 +1437,7 @@ func TestNode_StartHeartbeatShouldWorkAndCanCallProcessMessage(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, registeredHandler) - err = registeredHandler.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) + err = registeredHandler.ProcessReceivedMessage(nil, fromConnectedPeerId) assert.NotNil(t, err) assert.Contains(t, "nil message", err.Error()) } diff --git a/p2p/example/libp2p/internalBroadcastSpeedMeasure/main.go b/p2p/example/libp2p/internalBroadcastSpeedMeasure/main.go index 1eeb7e4dc4d..0ff4d5eea15 100644 --- a/p2p/example/libp2p/internalBroadcastSpeedMeasure/main.go +++ b/p2p/example/libp2p/internalBroadcastSpeedMeasure/main.go @@ -41,7 +41,7 @@ func main() { _ = mes1.RegisterMessageProcessor("test1", &mock.MessageProcessorStub{ - ProcessMessageCalled: func(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + ProcessMessageCalled: func(message p2p.MessageP2P) error { atomic.AddInt64(&bytesReceived1, int64(len(message.Data()))) return nil @@ -49,7 +49,7 @@ func main() { }) _ = mes1.RegisterMessageProcessor("test2", &mock.MessageProcessorStub{ - ProcessMessageCalled: func(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + ProcessMessageCalled: func(message p2p.MessageP2P) error { atomic.AddInt64(&bytesReceived2, int64(len(message.Data()))) return nil @@ -57,7 +57,7 @@ func main() { }) _ = mes1.RegisterMessageProcessor("test3", &mock.MessageProcessorStub{ - ProcessMessageCalled: func(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + ProcessMessageCalled: func(message p2p.MessageP2P) error { atomic.AddInt64(&bytesReceived3, int64(len(message.Data()))) return nil diff --git a/p2p/libp2p/issues_test.go b/p2p/libp2p/issues_test.go index 7ed9b1c0817..10dc89eaf5f 100644 --- a/p2p/libp2p/issues_test.go +++ b/p2p/libp2p/issues_test.go @@ -81,7 +81,7 @@ func TestIssueEN898_StreamResetError(t *testing.T) { _ = mes2.CreateTopic(topic, false) _ = mes2.RegisterMessageProcessor(topic, &mock.MessageProcessorStub{ - ProcessMessageCalled: func(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + ProcessMessageCalled: func(message p2p.MessageP2P) error { if bytes.Equal(message.Data(), largePacket) { largePacketReceived.Store(true) } diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 7ee78de8514..df0a783cb98 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -451,12 +451,8 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p return p2p.ErrTopicValidatorOperationNotSupported } - broadcastHandler := func(buffToSend []byte) { - netMes.Broadcast(topic, buffToSend) - } - err := netMes.pb.RegisterTopicValidator(topic, func(ctx context.Context, pid peer.ID, message *pubsub.Message) bool { - err := handler.ProcessReceivedMessage(NewMessage(message), p2p.PeerID(pid), broadcastHandler) + err := handler.ProcessReceivedMessage(NewMessage(message), p2p.PeerID(pid)) if err != nil { log.Trace("p2p validator", "error", err.Error(), "topics", message.TopicIDs) } @@ -511,7 +507,7 @@ func (netMes *networkMessenger) directMessageHandler(message p2p.MessageP2P, fro } go func(msg p2p.MessageP2P) { - err := processor.ProcessReceivedMessage(msg, fromConnectedPeer, nil) + err := processor.ProcessReceivedMessage(msg, fromConnectedPeer) if err != nil { log.Trace("p2p validator", "error", err.Error(), "topics", msg.TopicIDs()) } diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index 39cfec686b8..cb8dc5dbedf 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -9,7 +9,6 @@ import ( "fmt" "strings" "sync" - "sync/atomic" "testing" "time" @@ -44,7 +43,7 @@ func prepareMessengerForMatchDataReceive(mes p2p.Messenger, matchData []byte, wg _ = mes.RegisterMessageProcessor("test", &mock.MessageProcessorStub{ - ProcessMessageCalled: func(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + ProcessMessageCalled: func(message p2p.MessageP2P) error { if bytes.Equal(matchData, message.Data()) { fmt.Printf("%s got the message\n", mes.ID().Pretty()) wg.Done() @@ -78,30 +77,6 @@ func createMockNetworkOf2() (mocknet.Mocknet, p2p.Messenger, p2p.Messenger) { return netw, mes1, mes2 } -func createMockNetwork(numOfPeers int) (mocknet.Mocknet, []p2p.Messenger) { - netw := mocknet.New(context.Background()) - peers := make([]p2p.Messenger, numOfPeers) - - for i := 0; i < numOfPeers; i++ { - peers[i], _ = libp2p.NewMemoryMessenger(context.Background(), netw, discovery.NewNullDiscoverer()) - } - - _ = netw.LinkAll() - - return netw, peers -} - -func connectPeersFullMesh(peers []p2p.Messenger) { - for i := 0; i < len(peers); i++ { - for j := i + 1; j < len(peers); j++ { - err := peers[i].ConnectToPeer(peers[j].Addresses()[0]) - if err != nil { - fmt.Printf("error connecting: %s\n", err.Error()) - } - } - } -} - func createMockMessenger() p2p.Messenger { netw := mocknet.New(context.Background()) @@ -1334,72 +1309,6 @@ func TestLibp2pMessenger_SendDataThrottlerShouldReturnCorrectObject(t *testing.T _ = mes.Close() } -func TestLibp2pMessenger_SendDirectShouldNotBroadcastIfMessageIsPartiallyInvalid(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - numOfPeers := 4 - _, peers := createMockNetwork(numOfPeers) - connectPeersFullMesh(peers) - - broadcastMsgResolver := []byte("broadcast resolver msg") - directMsgResolver := []byte("resolver msg") - msgRequester := []byte("resolver msg is partially valid, mine is ok") - numResolverMessagesReceived := int32(0) - mesProcessorRequester := &mock.MessageProcessorStub{ - ProcessMessageCalled: func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { - if !bytes.Equal(message.Data(), directMsgResolver) { - // pass through all other messages - return nil - } - - atomic.AddInt32(&numResolverMessagesReceived, 1) - if broadcastHandler != nil { - broadcastHandler(msgRequester) - } - - return errors.New("resolver msg is partially valid") - }, - } - - mesProcessorResolverAndOtherPeers := &mock.MessageProcessorStub{ - ProcessMessageCalled: func(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - if bytes.Equal(message.Data(), msgRequester) { - assert.Fail(t, "other peers should have not received filtered out requester's message") - } - return nil - }, - } - - idxRequester := 0 - - topic := "testTopic" - for i := 0; i < numOfPeers; i++ { - _ = peers[i].CreateTopic(topic, true) - if i == idxRequester { - _ = peers[i].RegisterMessageProcessor(topic, mesProcessorRequester) - } else { - _ = peers[i].RegisterMessageProcessor(topic, mesProcessorResolverAndOtherPeers) - } - } - - fmt.Println("Delaying for peer connections and topic broadcast...") - time.Sleep(time.Second * 5) - - idxResolver := 1 - fmt.Println("broadcasting a message") - peers[idxResolver].Broadcast(topic, broadcastMsgResolver) - - time.Sleep(time.Second) - - fmt.Println("sending a direct message") - _ = peers[idxResolver].SendToConnectedPeer(topic, directMsgResolver, peers[idxRequester].ID()) - - time.Sleep(time.Second * 2) - assert.Equal(t, int32(1), atomic.LoadInt32(&numResolverMessagesReceived)) -} - func TestLibp2pMessenger_SendDirectWithMockNetToConnectedPeerShouldWork(t *testing.T) { msg := []byte("test message") diff --git a/p2p/memp2p/memp2p.go b/p2p/memp2p/memp2p.go index 47e2641855e..23a85011a4e 100644 --- a/p2p/memp2p/memp2p.go +++ b/p2p/memp2p/memp2p.go @@ -292,11 +292,11 @@ func (messenger *Messenger) parametricBroadcast(topic string, data []byte, async for _, peer := range messenger.Network.Peers() { if async { go func(receivingPeer *Messenger) { - err := receivingPeer.ReceiveMessage(topic, message, messenger.P2PID, true) + err := receivingPeer.ReceiveMessage(topic, message, messenger.P2PID) log.LogIfError(err) }(peer) } else { - err = peer.ReceiveMessage(topic, message, messenger.P2PID, true) + err = peer.ReceiveMessage(topic, message, messenger.P2PID) } if err != nil { break @@ -322,7 +322,7 @@ func (messenger *Messenger) SendToConnectedPeer(topic string, buff []byte, peerI return ErrReceivingPeerNotConnected } - return receivingPeer.ReceiveMessage(topic, message, messenger.P2PID, false) + return receivingPeer.ReceiveMessage(topic, message, messenger.P2PID) } return ErrNotConnectedToNetwork @@ -333,7 +333,7 @@ func (messenger *Messenger) SendToConnectedPeer(topic string, buff []byte, peerI // previously registered a message processor for that topic. The Network will // log the message only if the Network.LogMessages flag is set and only if the // Messenger has the requested topic and MessageProcessor. -func (messenger *Messenger) ReceiveMessage(topic string, message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, allowBroadcast bool) error { +func (messenger *Messenger) ReceiveMessage(topic string, message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { messenger.TopicsMutex.Lock() validator, found := messenger.Topics[topic] messenger.TopicsMutex.Unlock() @@ -350,14 +350,7 @@ func (messenger *Messenger) ReceiveMessage(topic string, message p2p.MessageP2P, messenger.Network.LogMessage(message) } - var handler func(buffToSend []byte) - if allowBroadcast { - handler = func(buffToSend []byte) { - messenger.Broadcast(topic, buffToSend) - } - } - - return validator.ProcessReceivedMessage(message, fromConnectedPeer, handler) + return validator.ProcessReceivedMessage(message, fromConnectedPeer) } // IsConnectedToTheNetwork returns true as this implementation is always connected to its network diff --git a/p2p/mock/messageProcessorStub.go b/p2p/mock/messageProcessorStub.go index 9313598b6e8..ce6d2bc2eda 100644 --- a/p2p/mock/messageProcessorStub.go +++ b/p2p/mock/messageProcessorStub.go @@ -5,11 +5,11 @@ import ( ) type MessageProcessorStub struct { - ProcessMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessMessageCalled func(message p2p.MessageP2P) error } -func (mps *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { - return mps.ProcessMessageCalled(message, broadcastHandler) +func (mps *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return mps.ProcessMessageCalled(message) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/p2p/mock/mockMessageProcessor.go b/p2p/mock/mockMessageProcessor.go index 015ab0aaaa8..f2e6a9f119e 100644 --- a/p2p/mock/mockMessageProcessor.go +++ b/p2p/mock/mockMessageProcessor.go @@ -16,7 +16,7 @@ func NewMockMessageProcessor(peer p2p.PeerID) *MockMessageProcessor { return &processor } -func (processor *MockMessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (processor *MockMessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { fmt.Printf("Message received by %s from %s: %s\n", string(processor.Peer), string(message.Peer()), string(message.Data())) return nil } diff --git a/p2p/p2p.go b/p2p/p2p.go index 1c968d7eab7..a2a2ef6a756 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -11,14 +11,7 @@ import ( // All implementations that will be called from Messenger implementation will need to satisfy this interface // If the function returns a non nil value, the received message will not be propagated to its connected peers type MessageProcessor interface { - ProcessReceivedMessage(message MessageP2P, fromConnectedPeer PeerID, broadcastHandler func(buffToSend []byte)) error - IsInterfaceNil() bool -} - -// BroadcastCallbackHandler will be implemented by those message processor instances that need to send back -// a subset of received message (after filtering occurs) -type BroadcastCallbackHandler interface { - SetBroadcastCallback(callback func(buffToSend []byte)) + ProcessReceivedMessage(message MessageP2P, fromConnectedPeer PeerID) error IsInterfaceNil() bool } diff --git a/process/interceptors/multiDataInterceptor.go b/process/interceptors/multiDataInterceptor.go index ba9777e932c..2d0cbf5e558 100644 --- a/process/interceptors/multiDataInterceptor.go +++ b/process/interceptors/multiDataInterceptor.go @@ -53,7 +53,7 @@ func NewMultiDataInterceptor( // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { +func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { err := preProcessMesage(mdi.throttler, message) if err != nil { return err @@ -70,7 +70,6 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, return process.ErrNoDataInMessage } - filteredMultiDataBuff := make([][]byte, 0) lastErrEncountered := error(nil) wgProcess := &sync.WaitGroup{} wgProcess.Add(len(multiDataBuff)) @@ -94,8 +93,6 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, continue } - //data is validated, add it to filtered out buff - filteredMultiDataBuff = append(filteredMultiDataBuff, dataBuff) if !interceptedData.IsForCurrentShard() { log.Trace("intercepted data is for other shards") wgProcess.Done() @@ -105,19 +102,6 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, go processInterceptedData(mdi.processor, interceptedData, wgProcess) } - var buffToSend []byte - haveDataForBroadcast := len(filteredMultiDataBuff) > 0 && lastErrEncountered != nil - if haveDataForBroadcast { - buffToSend, err = mdi.marshalizer.Marshal(filteredMultiDataBuff) - if err != nil { - return err - } - - if broadcastHandler != nil { - broadcastHandler(buffToSend) - } - } - return lastErrEncountered } diff --git a/process/interceptors/multiDataInterceptor_test.go b/process/interceptors/multiDataInterceptor_test.go index d53d1b7bd87..2219f559d0b 100644 --- a/process/interceptors/multiDataInterceptor_test.go +++ b/process/interceptors/multiDataInterceptor_test.go @@ -99,7 +99,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testi &mock.InterceptorThrottlerStub{}, ) - err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) + err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId) assert.Equal(t, process.ErrNilMessage, err) } @@ -122,7 +122,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalFailsShouldErr(t *t msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) assert.Equal(t, errExpeced, err) } @@ -144,12 +144,12 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalReturnsEmptySliceSh msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) assert.Equal(t, process.ErrNoDataInMessage, err) } -func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldNotResend(t *testing.T) { +func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldErr(t *testing.T) { t.Parallel() buffData := [][]byte{[]byte("buff1"), []byte("buff2")} @@ -158,7 +158,6 @@ func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldNotResend(t *testi checkCalledNum := int32(0) processCalledNum := int32(0) throttler := createMockThrottler() - broadcastNum := int32(0) errExpected := errors.New("expected err") mdi, _ := interceptors.NewMultiDataInterceptor( marshalizer, @@ -170,15 +169,12 @@ func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldNotResend(t *testi createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, ) - bradcastCallback := func(buffToSend []byte) { - atomic.AddInt32(&broadcastNum, 1) - } dataField, _ := marshalizer.Marshal(buffData) msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, bradcastCallback) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) @@ -187,10 +183,9 @@ func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldNotResend(t *testi assert.Equal(t, int32(0), atomic.LoadInt32(&processCalledNum)) assert.Equal(t, int32(1), throttler.StartProcessingCount()) assert.Equal(t, int32(1), throttler.EndProcessingCount()) - assert.Equal(t, int32(0), atomic.LoadInt32(&broadcastNum)) } -func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldSendOnlyCorrectPart(t *testing.T) { +func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldErr(t *testing.T) { t.Parallel() correctData := []byte("buff1") @@ -201,7 +196,6 @@ func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldSendOnlyC checkCalledNum := int32(0) processCalledNum := int32(0) throttler := createMockThrottler() - broadcastNum := int32(0) errExpected := errors.New("expected err") interceptedData := &mock.InterceptedDataStub{ CheckValidityCalled: func() error { @@ -225,27 +219,12 @@ func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldSendOnlyC createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, ) - bradcastCallback := func(buffToSend []byte) { - unmarshalledBuffs := make([][]byte, 0) - err := marshalizer.Unmarshal(&unmarshalledBuffs, buffToSend) - if err != nil { - return - } - if len(unmarshalledBuffs) == 0 { - return - } - if !bytes.Equal(unmarshalledBuffs[0], correctData) { - return - } - - atomic.AddInt32(&broadcastNum, 1) - } dataField, _ := marshalizer.Marshal(buffData) msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, bradcastCallback) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) @@ -254,7 +233,6 @@ func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldSendOnlyC assert.Equal(t, int32(1), atomic.LoadInt32(&processCalledNum)) assert.Equal(t, int32(1), throttler.StartProcessingCount()) assert.Equal(t, int32(1), throttler.EndProcessingCount()) - assert.Equal(t, int32(1), atomic.LoadInt32(&broadcastNum)) } func TestMultiDataInterceptor_ProcessReceivedMessageNotValidShouldErrAndNotProcess(t *testing.T) { @@ -290,7 +268,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNotValidShouldErrAndNotProce msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) @@ -333,7 +311,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageIsAddressedToOtherShardShoul msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) @@ -376,7 +354,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageOkMessageShouldRetNil(t *tes msg := &mock.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) diff --git a/process/interceptors/singleDataInterceptor.go b/process/interceptors/singleDataInterceptor.go index 18e5b6abdd7..ef662b59c71 100644 --- a/process/interceptors/singleDataInterceptor.go +++ b/process/interceptors/singleDataInterceptor.go @@ -43,7 +43,7 @@ func NewSingleDataInterceptor( // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, _ func(buffToSend []byte)) error { +func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { err := preProcessMesage(sdi.throttler, message) if err != nil { return err diff --git a/process/interceptors/singleDataInterceptor_test.go b/process/interceptors/singleDataInterceptor_test.go index 07ba7b4e184..65fd57273e8 100644 --- a/process/interceptors/singleDataInterceptor_test.go +++ b/process/interceptors/singleDataInterceptor_test.go @@ -103,7 +103,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *test &mock.InterceptorThrottlerStub{}, ) - err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId, nil) + err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId) assert.Equal(t, process.ErrNilMessage, err) } @@ -129,7 +129,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageFactoryCreationErrorShouldE msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) assert.Equal(t, errExpected, err) } @@ -163,7 +163,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageIsNotValidShouldNotCallProc msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) @@ -202,7 +202,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageIsNotForCurrentShardShouldN msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) @@ -241,7 +241,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageShouldWork(t *testing.T) { msg := &mock.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, nil) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) time.Sleep(time.Second) diff --git a/process/interface.go b/process/interface.go index 6bc98dff504..d0997d28f11 100644 --- a/process/interface.go +++ b/process/interface.go @@ -346,7 +346,7 @@ type BlockChainHookHandler interface { // Interceptor defines what a data interceptor should do // It should also adhere to the p2p.MessageProcessor interface so it can wire to a p2p.Messenger type Interceptor interface { - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error IsInterfaceNil() bool } @@ -534,7 +534,7 @@ type RequestBlockBodyHandler interface { // FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls type FloodPreventer interface { - TryIncrement(identifier string, size uint64) bool + Increment(identifier string, size uint64) bool Reset() IsInterfaceNil() bool } diff --git a/process/mock/headerResolverMock.go b/process/mock/headerResolverMock.go index 8c858f74096..2a36776cebd 100644 --- a/process/mock/headerResolverMock.go +++ b/process/mock/headerResolverMock.go @@ -14,7 +14,7 @@ func (hrm *HeaderResolverMock) RequestDataFromHash(hash []byte) error { return hrm.RequestDataFromHashCalled(hash) } -func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { return hrm.ProcessReceivedMessageCalled(message) } diff --git a/process/mock/interceptorStub.go b/process/mock/interceptorStub.go index 279f042dade..8e1cc547fc9 100644 --- a/process/mock/interceptorStub.go +++ b/process/mock/interceptorStub.go @@ -5,11 +5,11 @@ import ( ) type InterceptorStub struct { - ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessageCalled func(message p2p.MessageP2P) error } -func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { - return is.ProcessReceivedMessageCalled(message, broadcastHandler) +func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { + return is.ProcessReceivedMessageCalled(message) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/mock/miniBlocksResolverMock.go b/process/mock/miniBlocksResolverMock.go index 6c30bc4a27b..55ae0d352a9 100644 --- a/process/mock/miniBlocksResolverMock.go +++ b/process/mock/miniBlocksResolverMock.go @@ -21,7 +21,7 @@ func (hrm *MiniBlocksResolverMock) RequestDataFromHashArray(hashes [][]byte) err return hrm.RequestDataFromHashArrayCalled(hashes) } -func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, _ func(buffToSend []byte)) error { +func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { return hrm.ProcessReceivedMessageCalled(message) } diff --git a/process/mock/resolverStub.go b/process/mock/resolverStub.go index 588215cc51b..0a33666afed 100644 --- a/process/mock/resolverStub.go +++ b/process/mock/resolverStub.go @@ -6,15 +6,15 @@ import ( type ResolverStub struct { RequestDataFromHashCalled func(hash []byte) error - ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + ProcessReceivedMessageCalled func(message p2p.MessageP2P) error } func (rs *ResolverStub) RequestDataFromHash(hash []byte) error { return rs.RequestDataFromHashCalled(hash) } -func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID, broadcastHandler func(buffToSend []byte)) error { - return rs.ProcessReceivedMessageCalled(message, broadcastHandler) +func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { + return rs.ProcessReceivedMessageCalled(message) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 216ae69b763..ad3a753af8a 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -4,12 +4,14 @@ import ( "fmt" "sync" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/storage" ) const minMessages = 1 const minTotalSize = 1 //1Byte +const initNumMessages = 1 type quota struct { numMessages uint32 @@ -31,7 +33,7 @@ func NewQuotaFloodPreventer( maxTotalSizePerPeer uint64, ) (*quotaFloodPreventer, error) { - if cacher == nil { + if check.IfNil(cacher) { return nil, process.ErrNilCacher } if maxMessagesPerPeer < minMessages { @@ -56,11 +58,11 @@ func NewQuotaFloodPreventer( }, nil } -// TryIncrement tries to increment the counter values held at "identifier" position +// Increment tries to increment the counter values held at "identifier" position // It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) -func (qfp *quotaFloodPreventer) TryIncrement(identifier string, size uint64) bool { - //we need the mutOperation here as the get and put should be done atomically. - // Otherwise we might yield a slightly higher number of false valid increments +// We need the mutOperation here as the get and put should be done atomically. +// Otherwise we might yield a slightly higher number of false valid increments +func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() @@ -92,7 +94,7 @@ func (qfp *quotaFloodPreventer) TryIncrement(identifier string, size uint64) boo func (qfp *quotaFloodPreventer) putDefaultQuota(cacher storage.Cacher, identifier string, size uint64) { q := "a{ - numMessages: 1, + numMessages: initNumMessages, totalSize: size, } qfp.cacher.Put([]byte(identifier), q) diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index 289500d11af..2c7aa5f6a85 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -50,9 +50,9 @@ func TestNewQuotaFloodPreventer_ShouldWork(t *testing.T) { assert.Nil(t, err) } -//------- TryIncrement +//------- Increment -func TestNewQuotaFloodPreventer_TryIncrementIdentifierNotPresentPutQuotaAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_IncrementIdentifierNotPresentPutQuotaAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -78,13 +78,13 @@ func TestNewQuotaFloodPreventer_TryIncrementIdentifierNotPresentPutQuotaAndRetur minTotalSize*10, ) - ok := qfp.TryIncrement("identifier", size) + ok := qfp.Increment("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_TryIncrementNotQuotaSavedInCacheShouldPutQuotaAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_IncrementNotQuotaSavedInCacheShouldPutQuotaAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -110,13 +110,13 @@ func TestNewQuotaFloodPreventer_TryIncrementNotQuotaSavedInCacheShouldPutQuotaAn minTotalSize*10, ) - ok := qfp.TryIncrement("identifier", size) + ok := qfp.Increment("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_TryIncrementUnderMaxValuesShouldIncrementAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -148,13 +148,13 @@ func TestNewQuotaFloodPreventer_TryIncrementUnderMaxValuesShouldIncrementAndRetu minTotalSize*10, ) - ok := qfp.TryIncrement("identifier", size) + ok := qfp.Increment("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_TryIncrementOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages + 11) @@ -178,12 +178,12 @@ func TestNewQuotaFloodPreventer_TryIncrementOverMaxNumMessagesShouldNotPutAndRet minTotalSize*10, ) - ok := qfp.TryIncrement("identifier", minTotalSize) + ok := qfp.Increment("identifier", minTotalSize) assert.False(t, ok) } -func TestNewQuotaFloodPreventer_TryIncrementOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages) @@ -207,12 +207,12 @@ func TestNewQuotaFloodPreventer_TryIncrementOverMaxSizeShouldNotPutAndReturnFals minTotalSize*10, ) - ok := qfp.TryIncrement("identifier", minTotalSize) + ok := qfp.Increment("identifier", minTotalSize) assert.False(t, ok) } -func TestCountersMap_TryIncrementShouldWorkConcurrently(t *testing.T) { +func TestCountersMap_IncrementShouldWorkConcurrently(t *testing.T) { t.Parallel() qfp, _ := NewQuotaFloodPreventer( @@ -224,7 +224,7 @@ func TestCountersMap_TryIncrementShouldWorkConcurrently(t *testing.T) { wg.Add(numIterations) for i := 0; i < numIterations; i++ { go func(idx int) { - ok := qfp.TryIncrement(fmt.Sprintf("%d", idx), minTotalSize) + ok := qfp.Increment(fmt.Sprintf("%d", idx), minTotalSize) assert.True(t, ok) wg.Done() }(i) @@ -254,7 +254,7 @@ func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { assert.True(t, clearCalled) } -func TestCountersMap_TryIncrementAndResetShouldWorkConcurrently(t *testing.T) { +func TestCountersMap_IncrementAndResetShouldWorkConcurrently(t *testing.T) { t.Parallel() qfp, _ := NewQuotaFloodPreventer( @@ -267,7 +267,7 @@ func TestCountersMap_TryIncrementAndResetShouldWorkConcurrently(t *testing.T) { wg.Add(numIterations + numIterations/10) for i := 0; i < numIterations; i++ { go func(idx int) { - ok := qfp.TryIncrement(fmt.Sprintf("%d", idx), minTotalSize) + ok := qfp.Increment(fmt.Sprintf("%d", idx), minTotalSize) assert.True(t, ok) wg.Done() }(i) From e9f25c2ef40433049e41494bb1d7a8347ebfe4be Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 9 Dec 2019 17:40:05 +0200 Subject: [PATCH 09/35] moved an assert.Nil in the right place --- node/heartbeat/monitor_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/node/heartbeat/monitor_test.go b/node/heartbeat/monitor_test.go index a824a7cad06..2048f3545cb 100644 --- a/node/heartbeat/monitor_test.go +++ b/node/heartbeat/monitor_test.go @@ -373,9 +373,8 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { buffToSend, err = json.Marshal(hb) - assert.Nil(t, err) - err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId) + assert.Nil(t, err) time.Sleep(1 * time.Second) From 1624429616a7f39ad9021a60cd9e539e6c205e1d Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 9 Dec 2019 22:15:12 +0200 Subject: [PATCH 10/35] added the possibility to gather metrics from antiflood component added an p2p specific antiflood component integrated in process subpackage work in progress --- consensus/mock/p2pMessageMock.go | 2 +- consensus/spos/worker.go | 2 +- .../resolvers/genericBlockBodyResolver.go | 2 +- dataRetriever/resolvers/headerResolver.go | 2 +- .../resolvers/transactionResolver.go | 2 +- .../p2p/antiflood/antiflooding_test.go | 7 +- .../p2p/antiflood/messageProcessor.go | 18 +-- .../p2p/antiflood/nilQuotaStatusHandler.go | 18 +++ node/heartbeat/monitor.go | 2 +- p2p/antiflood/p2pAntiflood.go | 56 ++++++++ p2p/antiflood/p2pAntiflood_test.go | 134 ++++++++++++++++++ p2p/errors.go | 6 + p2p/mock/floodPreventerStub.go | 18 +++ p2p/mock/p2pMessageMock.go | 51 +++++++ p2p/p2p.go | 8 ++ process/block/argProcessor.go | 3 +- process/block/metablock.go | 3 +- .../block/preprocess/gasConsumption_test.go | 2 +- process/errors.go | 6 + .../metachain/interceptorsContainerFactory.go | 16 ++- .../interceptorsContainerFactory_test.go | 49 +++++++ .../shard/interceptorsContainerFactory.go | 17 ++- .../interceptorsContainerFactory_test.go | 53 +++++++ process/interceptors/common.go | 13 +- process/interceptors/common_test.go | 47 +++++- process/interceptors/multiDataInterceptor.go | 29 ++-- .../interceptors/multiDataInterceptor_test.go | 36 +++++ process/interceptors/singleDataInterceptor.go | 25 ++-- .../singleDataInterceptor_test.go | 23 +++ process/interface.go | 13 ++ process/mock/cacherMock.go | 9 +- process/mock/p2pAntifloodHandlerStub.go | 15 ++ process/mock/quotaStatusHandlerStub.go | 21 +++ process/scToProtocol/stakingToPeer.go | 5 +- process/throttle/antiflood/interface.go | 10 ++ .../throttle/antiflood/quotaFloodPreventer.go | 85 +++++++++-- .../antiflood/quotaFloodPreventer_test.go | 119 ++++++++++++++-- 37 files changed, 831 insertions(+), 96 deletions(-) create mode 100644 integrationTests/p2p/antiflood/nilQuotaStatusHandler.go create mode 100644 p2p/antiflood/p2pAntiflood.go create mode 100644 p2p/antiflood/p2pAntiflood_test.go create mode 100644 p2p/mock/floodPreventerStub.go create mode 100644 p2p/mock/p2pMessageMock.go create mode 100644 process/mock/p2pAntifloodHandlerStub.go create mode 100644 process/mock/quotaStatusHandlerStub.go create mode 100644 process/throttle/antiflood/interface.go diff --git a/consensus/mock/p2pMessageMock.go b/consensus/mock/p2pMessageMock.go index ea0d00b555e..9c9cfe3ef9a 100644 --- a/consensus/mock/p2pMessageMock.go +++ b/consensus/mock/p2pMessageMock.go @@ -23,7 +23,7 @@ func (msg *P2PMessageMock) Data() []byte { } func (msg *P2PMessageMock) SeqNo() []byte { - return msg.SeqNo() + return msg.SeqNoField } func (msg *P2PMessageMock) TopicIDs() []string { diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 9070d49c5fa..0178128165c 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -216,7 +216,7 @@ func (wrk *Worker) getCleanedList(cnsDataList []*consensus.Message) []*consensus } // ProcessReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { +func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { if message == nil || message.IsInterfaceNil() { return ErrNilMessage } diff --git a/dataRetriever/resolvers/genericBlockBodyResolver.go b/dataRetriever/resolvers/genericBlockBodyResolver.go index de55c75ea79..f45052f2062 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver.go @@ -52,7 +52,7 @@ func NewGenericBlockBodyResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { +func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { rd := &dataRetriever.RequestData{} err := rd.Unmarshal(gbbRes.marshalizer, message) if err != nil { diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 995daf6ea03..a3fc7e7d9c0 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -70,7 +70,7 @@ func NewHeaderResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { +func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { rd, err := hdrRes.parseReceivedMessage(message) if err != nil { return err diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index 5b0e5652d69..4a6b75e425d 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -57,7 +57,7 @@ func NewTxResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { +func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { rd := &dataRetriever.RequestData{} err := rd.Unmarshal(txRes.marshalizer, message) if err != nil { diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 863e4bae4a5..bc41aaa51eb 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -208,7 +208,12 @@ func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNum antifloodPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) interceptors[idx] = newMessageProcessor() - interceptors[idx].floodPreventer, _ = antiflood.NewQuotaFloodPreventer(antifloodPool, maxNumMessages, maxSize) + interceptors[idx].floodPreventer, _ = antiflood.NewQuotaFloodPreventer( + antifloodPool, + &nilQuotaStatusHandler{}, + maxNumMessages, + maxSize, + ) err = p.RegisterMessageProcessor(topic, interceptors[idx]) if err != nil { return nil, fmt.Errorf("%w, pid: %s", err, p.ID()) diff --git a/integrationTests/p2p/antiflood/messageProcessor.go b/integrationTests/p2p/antiflood/messageProcessor.go index 310b759ea81..1c170e484ff 100644 --- a/integrationTests/p2p/antiflood/messageProcessor.go +++ b/integrationTests/p2p/antiflood/messageProcessor.go @@ -1,11 +1,11 @@ package antiflood import ( - "fmt" "sync" "sync/atomic" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/antiflood" "github.com/ElrondNetwork/elrond-go/process" ) @@ -33,18 +33,10 @@ func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromC atomic.AddUint64(&mp.sizeMessagesReceived, uint64(len(message.Data()))) if mp.floodPreventer != nil { - //protect from directly connected peer - ok := mp.floodPreventer.Increment(string(fromConnectedPeer), uint64(len(message.Data()))) - if !ok { - return fmt.Errorf("system flooded") - } - - if fromConnectedPeer != message.Peer() { - //protect from the flooding messages that originate from the same source but come from different peers - ok = mp.floodPreventer.Increment(string(message.Peer()), uint64(len(message.Data()))) - if !ok { - return fmt.Errorf("system flooded") - } + af, _ := antiflood.NewP2pAntiflood(mp.floodPreventer) + err := af.CanProcessMessage(message, fromConnectedPeer) + if err != nil { + return err } } diff --git a/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go b/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go new file mode 100644 index 00000000000..1987df1c4b9 --- /dev/null +++ b/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go @@ -0,0 +1,18 @@ +package antiflood + +type nilQuotaStatusHandler struct { +} + +// ResetStatistics is not implemented +func (nqsh *nilQuotaStatusHandler) ResetStatistics() { +} + +// AddQuota is not implemented +func (nqsh *nilQuotaStatusHandler) AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, + numProcessedMessages uint32, sizeProcessedMessages uint64) { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (nqsh *nilQuotaStatusHandler) IsInterfaceNil() bool { + return nqsh == nil +} diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index b8cff09afbf..096948e4f31 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -201,7 +201,7 @@ func (m *Monitor) SetAppStatusHandler(ash core.AppStatusHandler) error { // ProcessReceivedMessage satisfies the p2p.MessageProcessor interface so it can be called // by the p2p subsystem each time a new heartbeat message arrives -func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { +func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { hbRecv, err := m.messageHandler.CreateHeartbeatFromP2pMessage(message) if err != nil { return err diff --git a/p2p/antiflood/p2pAntiflood.go b/p2p/antiflood/p2pAntiflood.go new file mode 100644 index 00000000000..dc61c8ca315 --- /dev/null +++ b/p2p/antiflood/p2pAntiflood.go @@ -0,0 +1,56 @@ +package antiflood + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type p2pAntiflood struct { + p2p.FloodPreventer +} + +// NewP2pAntiflood creates a new p2p anti flood protection mechanism built on top of a flood preventer implementation. +// It contains only the p2p anti flood logic that should be applied +func NewP2pAntiflood(floodPreventer p2p.FloodPreventer) (*p2pAntiflood, error) { + if check.IfNil(floodPreventer) { + return nil, p2p.ErrNilFloodPreventer + } + + return &p2pAntiflood{ + FloodPreventer: floodPreventer, + }, nil +} + +// CanProcessMessage signals if a p2p message can or not be processed +func (af *p2pAntiflood) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + floodPreventer := af.FloodPreventer + if check.IfNil(floodPreventer) { + return p2p.ErrNilFloodPreventer + } + if message == nil { + return p2p.ErrNilMessage + } + + //protect from directly connected peer + ok := floodPreventer.Increment(fromConnectedPeer.Pretty(), uint64(len(message.Data()))) + if !ok { + return fmt.Errorf("%w in p2pAntiflood for connected peer", p2p.ErrSystemBusy) + } + + if fromConnectedPeer != message.Peer() { + //protect from the flooding messages that originate from the same source but come from different peers + ok = floodPreventer.Increment(message.Peer().Pretty(), uint64(len(message.Data()))) + if !ok { + return fmt.Errorf("%w in p2pAntiflood for originator", p2p.ErrSystemBusy) + } + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (af *p2pAntiflood) IsInterfaceNil() bool { + return af == nil || check.IfNil(af.FloodPreventer) +} diff --git a/p2p/antiflood/p2pAntiflood_test.go b/p2p/antiflood/p2pAntiflood_test.go new file mode 100644 index 00000000000..9b420b78972 --- /dev/null +++ b/p2p/antiflood/p2pAntiflood_test.go @@ -0,0 +1,134 @@ +package antiflood_test + +import ( + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/antiflood" + "github.com/ElrondNetwork/elrond-go/p2p/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewP2pAntiflood_NilFloodPreventerShouldErr(t *testing.T) { + t.Parallel() + + afm, err := antiflood.NewP2pAntiflood(nil) + + assert.True(t, check.IfNil(afm)) + assert.True(t, errors.Is(err, p2p.ErrNilFloodPreventer)) +} + +func TestNewP2pAntiflood_ShouldWork(t *testing.T) { + t.Parallel() + + afm, err := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{}) + + assert.False(t, check.IfNil(afm)) + assert.Nil(t, err) +} + +func TestP2pAntiflood_SettingInnerFloodPreventerToNil(t *testing.T) { + t.Parallel() + + afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{}) + afm.FloodPreventer = nil + + assert.True(t, check.IfNil(afm)) +} + +//------- CanProcessMessage + +func TestP2pAntiflood_CanProcessMessageNilFloodPreventerShouldError(t *testing.T) { + t.Parallel() + + afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{}) + afm.FloodPreventer = nil + + err := afm.CanProcessMessage(&mock.P2PMessageMock{}, "connected peer") + + assert.Equal(t, p2p.ErrNilFloodPreventer, err) +} + +func TestP2pAntiflood_CanProcessMessageNilMessageShouldError(t *testing.T) { + t.Parallel() + + afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{}) + + err := afm.CanProcessMessage(nil, "connected peer") + + assert.Equal(t, p2p.ErrNilMessage, err) +} + +func TestP2pAntiflood_CanNotIncrementFromConnectedPeerShouldError(t *testing.T) { + t.Parallel() + + messageOriginator := []byte("originator") + fromConnectedPeer := p2p.PeerID("from connected peer") + message := &mock.P2PMessageMock{ + DataField: []byte("data"), + FromField: messageOriginator, + } + afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ + IncrementCalled: func(identifier string, size uint64) bool { + if identifier != fromConnectedPeer.Pretty() { + assert.Fail(t, "should have been the connected peer") + } + + return false + }, + }) + + err := afm.CanProcessMessage(message, fromConnectedPeer) + + assert.True(t, errors.Is(err, p2p.ErrSystemBusy)) +} + +func TestP2pAntiflood_CanNotIncrementMessageOriginatorShouldError(t *testing.T) { + t.Parallel() + + messageOriginator := []byte("originator") + fromConnectedPeer := p2p.PeerID("from connected peer") + message := &mock.P2PMessageMock{ + DataField: []byte("data"), + FromField: messageOriginator, + PeerField: p2p.PeerID(messageOriginator), + } + afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ + IncrementCalled: func(identifier string, size uint64) bool { + if identifier == fromConnectedPeer.Pretty() { + return true + } + if identifier != message.PeerField.Pretty() { + assert.Fail(t, "should have been the originator") + } + + return false + }, + }) + + err := afm.CanProcessMessage(message, fromConnectedPeer) + + assert.True(t, errors.Is(err, p2p.ErrSystemBusy)) +} + +func TestP2pAntiflood_ShouldWork(t *testing.T) { + t.Parallel() + + messageOriginator := []byte("originator") + fromConnectedPeer := p2p.PeerID("from connected peer") + message := &mock.P2PMessageMock{ + DataField: []byte("data"), + PeerField: p2p.PeerID(messageOriginator), + } + afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ + IncrementCalled: func(identifier string, size uint64) bool { + return true + }, + }) + + err := afm.CanProcessMessage(message, fromConnectedPeer) + + assert.Nil(t, err) +} diff --git a/p2p/errors.go b/p2p/errors.go index b132d08020a..02f05980c57 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -87,3 +87,9 @@ var ErrTooManyGoroutines = errors.New(" number of goroutines exceeded") // ErrInvalidValue signals that an invalid value has been provided var ErrInvalidValue = errors.New("invalid value") + +// ErrNilFloodPreventer signals that a nil flood preventer has been provided +var ErrNilFloodPreventer = errors.New("nil flood preventer") + +// ErrSystemBusy signals that the system is busy +var ErrSystemBusy = errors.New("system busy") diff --git a/p2p/mock/floodPreventerStub.go b/p2p/mock/floodPreventerStub.go new file mode 100644 index 00000000000..6783c4afec7 --- /dev/null +++ b/p2p/mock/floodPreventerStub.go @@ -0,0 +1,18 @@ +package mock + +type FloodPreventerStub struct { + IncrementCalled func(identifier string, size uint64) bool + ResetCalled func() +} + +func (fps *FloodPreventerStub) Increment(identifier string, size uint64) bool { + return fps.IncrementCalled(identifier, size) +} + +func (fps *FloodPreventerStub) Reset() { + fps.ResetCalled() +} + +func (fps *FloodPreventerStub) IsInterfaceNil() bool { + return fps == nil +} diff --git a/p2p/mock/p2pMessageMock.go b/p2p/mock/p2pMessageMock.go new file mode 100644 index 00000000000..9c9cfe3ef9a --- /dev/null +++ b/p2p/mock/p2pMessageMock.go @@ -0,0 +1,51 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type P2PMessageMock struct { + FromField []byte + DataField []byte + SeqNoField []byte + TopicIDsField []string + SignatureField []byte + KeyField []byte + PeerField p2p.PeerID +} + +func (msg *P2PMessageMock) From() []byte { + return msg.FromField +} + +func (msg *P2PMessageMock) Data() []byte { + return msg.DataField +} + +func (msg *P2PMessageMock) SeqNo() []byte { + return msg.SeqNoField +} + +func (msg *P2PMessageMock) TopicIDs() []string { + return msg.TopicIDsField +} + +func (msg *P2PMessageMock) Signature() []byte { + return msg.SignatureField +} + +func (msg *P2PMessageMock) Key() []byte { + return msg.KeyField +} + +func (msg *P2PMessageMock) Peer() p2p.PeerID { + return msg.PeerField +} + +// IsInterfaceNil returns true if there is no value under the interface +func (msg *P2PMessageMock) IsInterfaceNil() bool { + if msg == nil { + return true + } + return false +} diff --git a/p2p/p2p.go b/p2p/p2p.go index a2a2ef6a756..22766175f15 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -191,3 +191,11 @@ type PeerDiscoveryFactory interface { CreatePeerDiscoverer() (PeerDiscoverer, error) IsInterfaceNil() bool } + +// FloodPreventer defines the behavior of a component that is able to signal that too many events occurred +// on a provided identifier between Reset calls +type FloodPreventer interface { + Increment(identifier string, size uint64) bool + Reset() + IsInterfaceNil() bool +} diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 6dde2f0b05c..90b12a4b274 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -9,7 +9,6 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -49,7 +48,7 @@ type ArgShardProcessor struct { type ArgMetaProcessor struct { ArgBaseProcessor DataPool dataRetriever.MetaPoolsHolder - SCDataGetter external.SCQueryService + SCDataGetter process.SCQueryService PeerChangesHandler process.PeerChangesHandler SCToProtocol process.SmartContractToProtocolHandler } diff --git a/process/block/metablock.go b/process/block/metablock.go index 00743f7feb8..22300a335b0 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -14,7 +14,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/process/throttle" @@ -27,7 +26,7 @@ type metaProcessor struct { *baseProcessor core serviceContainer.Core dataPool dataRetriever.MetaPoolsHolder - scDataGetter external.SCQueryService + scDataGetter process.SCQueryService scToProtocol process.SmartContractToProtocolHandler peerChanges process.PeerChangesHandler diff --git a/process/block/preprocess/gasConsumption_test.go b/process/block/preprocess/gasConsumption_test.go index 73a801c0d02..03147310c2b 100644 --- a/process/block/preprocess/gasConsumption_test.go +++ b/process/block/preprocess/gasConsumption_test.go @@ -7,9 +7,9 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/node/mock" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" ) diff --git a/process/errors.go b/process/errors.go index a28f7eda58c..e1a76ca5782 100644 --- a/process/errors.go +++ b/process/errors.go @@ -577,3 +577,9 @@ var ErrNotEnoughGas = errors.New("not enough gas was sent in the transaction") // ErrInvalidValue signals that an invalid value was provided var ErrInvalidValue = errors.New("invalid value provided") + +// ErrNilQuotaStatusHandler signals that a nil quota status handler has been provided +var ErrNilQuotaStatusHandler = errors.New("nil quota status handler") + +// ErrNilAntifloodHandler signals that a nil antiflood handler has been provided +var ErrNilAntifloodHandler = errors.New("nil antiflood handler") diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index 0a823e76e1e..61292a251e8 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -43,6 +43,7 @@ type interceptorsContainerFactory struct { tpsBenchmark *statistics.TpsBenchmark argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory globalThrottler process.InterceptorThrottler + antifloodHandler process.P2PAntifloodHandler } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -64,6 +65,7 @@ func NewInterceptorsContainerFactory( maxTxNonceDeltaAllowed int, txFeeHandler process.FeeHandler, blackList process.BlackListHandler, + antifloodHandler process.P2PAntifloodHandler, ) (*interceptorsContainerFactory, error) { if check.IfNil(shardCoordinator) { @@ -114,6 +116,9 @@ func NewInterceptorsContainerFactory( if check.IfNil(blockSingleSigner) { return nil, process.ErrNilSingleSigner } + if check.IfNil(antifloodHandler) { + return nil, process.ErrNilAntifloodHandler + } argInterceptorFactory := &interceptorFactory.ArgInterceptedDataFactory{ Marshalizer: marshalizer, @@ -142,6 +147,7 @@ func NewInterceptorsContainerFactory( argInterceptorFactory: argInterceptorFactory, maxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, accounts: accounts, + antifloodHandler: antifloodHandler, } var err error @@ -253,6 +259,7 @@ func (icf *interceptorsContainerFactory) generateMetablockInterceptor() ([]strin hdrFactory, hdrProcessor, icf.globalThrottler, + icf.antifloodHandler, ) if err != nil { return nil, nil, err @@ -317,6 +324,7 @@ func (icf *interceptorsContainerFactory) createOneShardHeaderInterceptor(topic s hdrFactory, hdrProcessor, icf.globalThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -385,6 +393,7 @@ func (icf *interceptorsContainerFactory) createOneTxInterceptor(topic string) (p txFactory, txProcessor, icf.globalThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -444,6 +453,7 @@ func (icf *interceptorsContainerFactory) createOneUnsignedTxInterceptor(topic st txFactory, txProcessor, icf.globalThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -506,6 +516,7 @@ func (icf *interceptorsContainerFactory) createOneMiniBlocksInterceptor(topic st txFactory, txBlockBodyProcessor, icf.globalThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -516,8 +527,5 @@ func (icf *interceptorsContainerFactory) createOneMiniBlocksInterceptor(topic st // IsInterfaceNil returns true if there is no value under the interface func (icf *interceptorsContainerFactory) IsInterfaceNil() bool { - if icf == nil { - return true - } - return false + return icf == nil } diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index 23e5509c870..28230303583 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -102,6 +102,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -129,6 +130,7 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -156,6 +158,7 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -183,6 +186,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -210,6 +214,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -237,6 +242,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -264,6 +270,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -291,6 +298,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -318,6 +326,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -345,6 +354,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConvShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -372,6 +382,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -399,6 +410,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -426,6 +438,7 @@ func TestNewInterceptorsContainerFactory_NilFeeHandlerShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, nil, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -453,12 +466,41 @@ func TestNewInterceptorsContainerFactory_NilBlackListHandlerShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilBlackListHandler, err) } +func TestNewInterceptorsContainerFactory_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + icf, err := metachain.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + nil, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilAntifloodHandler, err) +} + func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -480,6 +522,7 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.NotNil(t, icf) @@ -509,6 +552,7 @@ func TestInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *tes maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -538,6 +582,7 @@ func TestInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsSh maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -567,6 +612,7 @@ func TestInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr( maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -596,6 +642,7 @@ func TestInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFail maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -632,6 +679,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -681,6 +729,7 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index d9b3ca9b019..0fe81467f9e 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -38,6 +38,7 @@ type interceptorsContainerFactory struct { argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory globalTxThrottler process.InterceptorThrottler maxTxNonceDeltaAllowed int + antifloodHandler process.P2PAntifloodHandler } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -59,6 +60,7 @@ func NewInterceptorsContainerFactory( maxTxNonceDeltaAllowed int, txFeeHandler process.FeeHandler, blackList process.BlackListHandler, + antifloodHandler process.P2PAntifloodHandler, ) (*interceptorsContainerFactory, error) { if check.IfNil(accounts) { return nil, process.ErrNilAccountsAdapter @@ -108,6 +110,9 @@ func NewInterceptorsContainerFactory( if check.IfNil(blockSingleSigner) { return nil, process.ErrNilSingleSigner } + if check.IfNil(antifloodHandler) { + return nil, process.ErrNilAntifloodHandler + } argInterceptorFactory := &interceptorFactory.ArgInterceptedDataFactory{ Marshalizer: marshalizer, @@ -139,6 +144,7 @@ func NewInterceptorsContainerFactory( argInterceptorFactory: argInterceptorFactory, blackList: blackList, maxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + antifloodHandler: antifloodHandler, } var err error @@ -291,6 +297,7 @@ func (icf *interceptorsContainerFactory) createOneTxInterceptor(topic string) (p txFactory, txProcessor, icf.globalTxThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -359,6 +366,7 @@ func (icf *interceptorsContainerFactory) createOneUnsignedTxInterceptor(topic st txFactory, txProcessor, icf.globalTxThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -428,6 +436,7 @@ func (icf *interceptorsContainerFactory) createOneRewardTxInterceptor(topic stri txFactory, txProcessor, icf.globalTxThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -468,6 +477,7 @@ func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []p hdrFactory, hdrProcessor, icf.globalTxThrottler, + icf.antifloodHandler, ) if err != nil { return nil, nil, err @@ -536,6 +546,7 @@ func (icf *interceptorsContainerFactory) createOneMiniBlocksInterceptor(topic st txFactory, txBlockBodyProcessor, icf.globalTxThrottler, + icf.antifloodHandler, ) if err != nil { return nil, err @@ -576,6 +587,7 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ hdrFactory, hdrProcessor, icf.globalTxThrottler, + icf.antifloodHandler, ) if err != nil { return nil, nil, err @@ -591,8 +603,5 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ // IsInterfaceNil returns true if there is no value under the interface func (icf *interceptorsContainerFactory) IsInterfaceNil() bool { - if icf == nil { - return true - } - return false + return icf == nil } diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index da3152c16d7..eec8c170917 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -106,6 +106,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -133,6 +134,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -160,6 +162,7 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -187,6 +190,7 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -214,6 +218,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -241,6 +246,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -268,6 +274,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -295,6 +302,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -322,6 +330,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -349,6 +358,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -376,6 +386,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -403,6 +414,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -430,6 +442,7 @@ func TestNewInterceptorsContainerFactory_NilTxFeeHandlerShouldErr(t *testing.T) maxTxNonceDeltaAllowed, nil, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -457,12 +470,41 @@ func TestNewInterceptorsContainerFactory_NilBlackListHandlerShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilBlackListHandler, err) } +func TestNewInterceptorsContainerFactory_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + nil, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilAntifloodHandler, err) +} + func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -484,6 +526,7 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.NotNil(t, icf) @@ -513,6 +556,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -542,6 +586,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -571,6 +616,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -600,6 +646,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -629,6 +676,7 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -658,6 +706,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -687,6 +736,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -716,6 +766,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -752,6 +803,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() @@ -801,6 +853,7 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := icf.Create() diff --git a/process/interceptors/common.go b/process/interceptors/common.go index 61f6e51bdd9..dde757ba3e4 100644 --- a/process/interceptors/common.go +++ b/process/interceptors/common.go @@ -7,14 +7,23 @@ import ( "github.com/ElrondNetwork/elrond-go/process" ) -func preProcessMesage(throttler process.InterceptorThrottler, message p2p.MessageP2P) error { +func preProcessMesage( + throttler process.InterceptorThrottler, + antifloodHandler process.P2PAntifloodHandler, + message p2p.MessageP2P, + fromConnectedPeer p2p.PeerID, +) error { + if message == nil { return process.ErrNilMessage } if message.Data() == nil { return process.ErrNilDataToProcess } - + err := antifloodHandler.CanProcessMessage(message, fromConnectedPeer) + if err != nil { + return err + } if !throttler.CanProcess() { return process.ErrSystemBusy } diff --git a/process/interceptors/common_test.go b/process/interceptors/common_test.go index d65f02af44c..40119c619cd 100644 --- a/process/interceptors/common_test.go +++ b/process/interceptors/common_test.go @@ -6,16 +6,20 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" ) +const fromConnectedPeer = "from connected peer" + //------- preProcessMessage + func TestPreProcessMessage_NilMessageShouldErr(t *testing.T) { t.Parallel() - err := preProcessMesage(&mock.InterceptorThrottlerStub{}, nil) + err := preProcessMesage(&mock.InterceptorThrottlerStub{}, &mock.P2PAntifloodHandlerStub{}, nil, fromConnectedPeer) assert.Equal(t, process.ErrNilMessage, err) } @@ -24,12 +28,35 @@ func TestPreProcessMessage_NilDataShouldErr(t *testing.T) { t.Parallel() msg := &mock.P2PMessageMock{} - err := preProcessMesage(&mock.InterceptorThrottlerStub{}, msg) + err := preProcessMesage(&mock.InterceptorThrottlerStub{}, &mock.P2PAntifloodHandlerStub{}, msg, fromConnectedPeer) assert.Equal(t, process.ErrNilDataToProcess, err) } -func TestPreProcessMessage_CanNotProcessShouldErr(t *testing.T) { +func TestPreProcessMessage_AntifloodCanNotProcessShouldErr(t *testing.T) { + t.Parallel() + + msg := &mock.P2PMessageMock{ + DataField: []byte("data to process"), + } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return false + }, + } + errExpected := errors.New("expected error") + antifloodHandler := &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return errExpected + }, + } + + err := preProcessMesage(throttler, antifloodHandler, msg, fromConnectedPeer) + + assert.Equal(t, errExpected, err) +} + +func TestPreProcessMessage_ThrottlerCanNotProcessShouldErr(t *testing.T) { t.Parallel() msg := &mock.P2PMessageMock{ @@ -40,8 +67,13 @@ func TestPreProcessMessage_CanNotProcessShouldErr(t *testing.T) { return false }, } + antifloodHandler := &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + } - err := preProcessMesage(throttler, msg) + err := preProcessMesage(throttler, antifloodHandler, msg, fromConnectedPeer) assert.Equal(t, process.ErrSystemBusy, err) } @@ -57,7 +89,12 @@ func TestPreProcessMessage_CanProcessReturnsNilAndCallsStartProcessing(t *testin return true }, } - err := preProcessMesage(throttler, msg) + antifloodHandler := &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + } + err := preProcessMesage(throttler, antifloodHandler, msg, fromConnectedPeer) assert.Nil(t, err) assert.Equal(t, int32(1), throttler.StartProcessingCount()) diff --git a/process/interceptors/multiDataInterceptor.go b/process/interceptors/multiDataInterceptor.go index 2d0cbf5e558..9601e6bf4ad 100644 --- a/process/interceptors/multiDataInterceptor.go +++ b/process/interceptors/multiDataInterceptor.go @@ -14,10 +14,11 @@ var log = logger.GetOrCreate("process/interceptors") // MultiDataInterceptor is used for intercepting packed multi data type MultiDataInterceptor struct { - marshalizer marshal.Marshalizer - factory process.InterceptedDataFactory - processor process.InterceptorProcessor - throttler process.InterceptorThrottler + marshalizer marshal.Marshalizer + factory process.InterceptedDataFactory + processor process.InterceptorProcessor + throttler process.InterceptorThrottler + antifloodHandler process.P2PAntifloodHandler } // NewMultiDataInterceptor hooks a new interceptor for packed multi data @@ -26,6 +27,7 @@ func NewMultiDataInterceptor( factory process.InterceptedDataFactory, processor process.InterceptorProcessor, throttler process.InterceptorThrottler, + antifloodHandler process.P2PAntifloodHandler, ) (*MultiDataInterceptor, error) { if check.IfNil(marshalizer) { @@ -40,12 +42,16 @@ func NewMultiDataInterceptor( if check.IfNil(throttler) { return nil, process.ErrNilInterceptorThrottler } + if check.IfNil(antifloodHandler) { + return nil, process.ErrNilAntifloodHandler + } multiDataIntercept := &MultiDataInterceptor{ - marshalizer: marshalizer, - factory: factory, - processor: processor, - throttler: throttler, + marshalizer: marshalizer, + factory: factory, + processor: processor, + throttler: throttler, + antifloodHandler: antifloodHandler, } return multiDataIntercept, nil @@ -54,7 +60,7 @@ func NewMultiDataInterceptor( // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { - err := preProcessMesage(mdi.throttler, message) + err := preProcessMesage(mdi.throttler, mdi.antifloodHandler, message, fromConnectedPeer) if err != nil { return err } @@ -107,8 +113,5 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, // IsInterfaceNil returns true if there is no value under the interface func (mdi *MultiDataInterceptor) IsInterfaceNil() bool { - if mdi == nil { - return true - } - return false + return mdi == nil } diff --git a/process/interceptors/multiDataInterceptor_test.go b/process/interceptors/multiDataInterceptor_test.go index 2219f559d0b..65019d9f874 100644 --- a/process/interceptors/multiDataInterceptor_test.go +++ b/process/interceptors/multiDataInterceptor_test.go @@ -17,6 +17,14 @@ import ( var fromConnectedPeerId = p2p.PeerID("from connected peer Id") +func createMockAntifloodHandler() *mock.P2PAntifloodHandlerStub { + return &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + } +} + func TestNewMultiDataInterceptor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() @@ -25,6 +33,7 @@ func TestNewMultiDataInterceptor_NilMarshalizerShouldErr(t *testing.T) { &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, mdi) @@ -39,6 +48,7 @@ func TestNewMultiDataInterceptor_NilInterceptedDataFactoryShouldErr(t *testing.T nil, &mock.InterceptorProcessorStub{}, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, mdi) @@ -53,6 +63,7 @@ func TestNewMultiDataInterceptor_NilInterceptedDataProcessorShouldErr(t *testing &mock.InterceptedDataFactoryStub{}, nil, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, mdi) @@ -67,12 +78,28 @@ func TestNewMultiDataInterceptor_NilInterceptorThrottlerShouldErr(t *testing.T) &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, mdi) assert.Equal(t, process.ErrNilInterceptorThrottler, err) } +func TestNewMultiDataInterceptor_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + mdi, err := interceptors.NewMultiDataInterceptor( + &mock.MarshalizerMock{}, + &mock.InterceptedDataFactoryStub{}, + &mock.InterceptorProcessorStub{}, + &mock.InterceptorThrottlerStub{}, + nil, + ) + + assert.Nil(t, mdi) + assert.Equal(t, process.ErrNilAntifloodHandler, err) +} + func TestNewMultiDataInterceptor(t *testing.T) { t.Parallel() @@ -81,6 +108,7 @@ func TestNewMultiDataInterceptor(t *testing.T) { &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.False(t, check.IfNil(mdi)) @@ -97,6 +125,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testi &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, &mock.InterceptorThrottlerStub{}, + createMockAntifloodHandler(), ) err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId) @@ -117,6 +146,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalFailsShouldErr(t *t &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, createMockThrottler(), + createMockAntifloodHandler(), ) msg := &mock.P2PMessageMock{ @@ -139,6 +169,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalReturnsEmptySliceSh &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, createMockThrottler(), + createMockAntifloodHandler(), ) msg := &mock.P2PMessageMock{ @@ -168,6 +199,7 @@ func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldErr(t *testing.T) }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) dataField, _ := marshalizer.Marshal(buffData) @@ -218,6 +250,7 @@ func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldErr(t *te }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) dataField, _ := marshalizer.Marshal(buffData) @@ -262,6 +295,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNotValidShouldErrAndNotProce }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) dataField, _ := marshalizer.Marshal(buffData) @@ -305,6 +339,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageIsAddressedToOtherShardShoul }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) dataField, _ := marshalizer.Marshal(buffData) @@ -348,6 +383,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageOkMessageShouldRetNil(t *tes }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) dataField, _ := marshalizer.Marshal(buffData) diff --git a/process/interceptors/singleDataInterceptor.go b/process/interceptors/singleDataInterceptor.go index ef662b59c71..8a6b6eb614e 100644 --- a/process/interceptors/singleDataInterceptor.go +++ b/process/interceptors/singleDataInterceptor.go @@ -10,9 +10,10 @@ import ( // SingleDataInterceptor is used for intercepting packed multi data type SingleDataInterceptor struct { - factory process.InterceptedDataFactory - processor process.InterceptorProcessor - throttler process.InterceptorThrottler + factory process.InterceptedDataFactory + processor process.InterceptorProcessor + throttler process.InterceptorThrottler + antifloodHandler process.P2PAntifloodHandler } // NewSingleDataInterceptor hooks a new interceptor for single data @@ -20,6 +21,7 @@ func NewSingleDataInterceptor( factory process.InterceptedDataFactory, processor process.InterceptorProcessor, throttler process.InterceptorThrottler, + antifloodHandler process.P2PAntifloodHandler, ) (*SingleDataInterceptor, error) { if check.IfNil(factory) { @@ -31,11 +33,15 @@ func NewSingleDataInterceptor( if check.IfNil(throttler) { return nil, process.ErrNilInterceptorThrottler } + if check.IfNil(antifloodHandler) { + return nil, process.ErrNilAntifloodHandler + } singleDataIntercept := &SingleDataInterceptor{ - factory: factory, - processor: processor, - throttler: throttler, + factory: factory, + processor: processor, + throttler: throttler, + antifloodHandler: antifloodHandler, } return singleDataIntercept, nil @@ -44,7 +50,7 @@ func NewSingleDataInterceptor( // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { - err := preProcessMesage(sdi.throttler, message) + err := preProcessMesage(sdi.throttler, sdi.antifloodHandler, message, fromConnectedPeer) if err != nil { return err } @@ -81,8 +87,5 @@ func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, // IsInterfaceNil returns true if there is no value under the interface func (sdi *SingleDataInterceptor) IsInterfaceNil() bool { - if sdi == nil { - return true - } - return false + return sdi == nil } diff --git a/process/interceptors/singleDataInterceptor_test.go b/process/interceptors/singleDataInterceptor_test.go index 65fd57273e8..ed8b274e0c1 100644 --- a/process/interceptors/singleDataInterceptor_test.go +++ b/process/interceptors/singleDataInterceptor_test.go @@ -47,6 +47,7 @@ func TestNewSingleDataInterceptor_NilInterceptedDataFactoryShouldErr(t *testing. nil, &mock.InterceptorProcessorStub{}, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, sdi) @@ -60,6 +61,7 @@ func TestNewSingleDataInterceptor_NilInterceptedDataProcessorShouldErr(t *testin &mock.InterceptedDataFactoryStub{}, nil, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, sdi) @@ -73,12 +75,27 @@ func TestNewSingleDataInterceptor_NilInterceptorThrottlerShouldErr(t *testing.T) &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, sdi) assert.Equal(t, process.ErrNilInterceptorThrottler, err) } +func TestNewSingleDataInterceptor_NilP2PAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + sdi, err := interceptors.NewSingleDataInterceptor( + &mock.InterceptedDataFactoryStub{}, + &mock.InterceptorProcessorStub{}, + &mock.InterceptorThrottlerStub{}, + nil, + ) + + assert.Nil(t, sdi) + assert.Equal(t, process.ErrNilAntifloodHandler, err) +} + func TestNewSingleDataInterceptor(t *testing.T) { t.Parallel() @@ -86,6 +103,7 @@ func TestNewSingleDataInterceptor(t *testing.T) { &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.NotNil(t, sdi) @@ -101,6 +119,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *test &mock.InterceptedDataFactoryStub{}, &mock.InterceptorProcessorStub{}, &mock.InterceptorThrottlerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId) @@ -124,6 +143,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageFactoryCreationErrorShouldE return true }, }, + createMockAntifloodHandler(), ) msg := &mock.P2PMessageMock{ @@ -158,6 +178,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageIsNotValidShouldNotCallProc }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) msg := &mock.P2PMessageMock{ @@ -197,6 +218,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageIsNotForCurrentShardShouldN }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) msg := &mock.P2PMessageMock{ @@ -236,6 +258,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageShouldWork(t *testing.T) { }, createMockInterceptorStub(&checkCalledNum, &processCalledNum), throttler, + createMockAntifloodHandler(), ) msg := &mock.P2PMessageMock{ diff --git a/process/interface.go b/process/interface.go index d0997d28f11..f0665e0f9b9 100644 --- a/process/interface.go +++ b/process/interface.go @@ -538,3 +538,16 @@ type FloodPreventer interface { Reset() IsInterfaceNil() bool } + +// P2PAntifloodHandler defines the behavior of a component able to signal that the system is too busy (or flooded) processing +// p2p messages +type P2PAntifloodHandler interface { + CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error + IsInterfaceNil() bool +} + +// SCQueryService defines how data should be get from a SC account +type SCQueryService interface { + ExecuteQuery(query *SCQuery) (*vmcommon.VMOutput, error) + IsInterfaceNil() bool +} diff --git a/process/mock/cacherMock.go b/process/mock/cacherMock.go index c6aaf7ba7ef..0e4795fdd37 100644 --- a/process/mock/cacherMock.go +++ b/process/mock/cacherMock.go @@ -80,7 +80,14 @@ func (cm *CacherMock) RemoveOldest() { } func (cm *CacherMock) Keys() [][]byte { - panic("implement me") + keys := make([][]byte, len(cm.dataMap)) + idx := 0 + for k := range cm.dataMap { + keys[idx] = []byte(k) + idx++ + } + + return keys } func (cm *CacherMock) Len() int { diff --git a/process/mock/p2pAntifloodHandlerStub.go b/process/mock/p2pAntifloodHandlerStub.go new file mode 100644 index 00000000000..e8236ed9167 --- /dev/null +++ b/process/mock/p2pAntifloodHandlerStub.go @@ -0,0 +1,15 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +type P2PAntifloodHandlerStub struct { + CanProcessMessageCalled func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error +} + +func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) +} + +func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return p2pahs == nil +} diff --git a/process/mock/quotaStatusHandlerStub.go b/process/mock/quotaStatusHandlerStub.go new file mode 100644 index 00000000000..5b731068ca3 --- /dev/null +++ b/process/mock/quotaStatusHandlerStub.go @@ -0,0 +1,21 @@ +package mock + +type QuotaStatusHandlerStub struct { + ResetStatisticsCalled func() + AddQuotaCalled func(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, + numProcessedMessages uint32, sizeProcessedMessages uint64) +} + +func (qshs *QuotaStatusHandlerStub) ResetStatistics() { + qshs.ResetStatisticsCalled() +} + +func (qshs *QuotaStatusHandlerStub) AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, + numProcessedMessages uint32, sizeProcessedMessages uint64) { + + qshs.AddQuotaCalled(identifier, numReceivedMessages, sizeReceivedMessages, numProcessedMessages, sizeProcessedMessages) +} + +func (qshs *QuotaStatusHandlerStub) IsInterfaceNil() bool { + return qshs == nil +} diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index d55074ec1f7..60509c7390a 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -13,7 +13,6 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/vm/factory" @@ -30,7 +29,7 @@ type ArgStakingToPeer struct { ArgParser process.ArgumentsParser CurrTxs dataRetriever.TransactionCacher - ScQuery external.SCQueryService + ScQuery process.SCQueryService } // stakingToPeer defines the component which will translate changes from staking SC state @@ -44,7 +43,7 @@ type stakingToPeer struct { argParser process.ArgumentsParser currTxs dataRetriever.TransactionCacher - scQuery external.SCQueryService + scQuery process.SCQueryService mutPeerChanges sync.Mutex peerChanges map[string]block.PeerData diff --git a/process/throttle/antiflood/interface.go b/process/throttle/antiflood/interface.go new file mode 100644 index 00000000000..1860945e53e --- /dev/null +++ b/process/throttle/antiflood/interface.go @@ -0,0 +1,10 @@ +package antiflood + +// QuotaStatusHandler defines the behavior of a quota handler able to process periodic updates of peers quota measured +// by the system +type QuotaStatusHandler interface { + ResetStatistics() + AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, + numProcessedMessages uint32, sizeProcessedMessages uint64) + IsInterfaceNil() bool +} diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index ad3a753af8a..702bf08fbe4 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -12,23 +12,28 @@ import ( const minMessages = 1 const minTotalSize = 1 //1Byte const initNumMessages = 1 +const totalIdentifier = "total" type quota struct { - numMessages uint32 - totalSize uint64 + numReceivedMessages uint32 + sizeReceivedMessages uint64 + numProcessedMessages uint32 + sizeProcessedMessages uint64 } -// qoutaFloodPreventer represents a cache of quotas per peer used in antiflooding mechanism +// quotaFloodPreventer represents a cache of quotas per peer used in antiflooding mechanism type quotaFloodPreventer struct { - mutOperation sync.RWMutex - cacher storage.Cacher - maxMessages uint32 - maxSize uint64 + mutOperation sync.RWMutex + cacher storage.Cacher + statusHandler QuotaStatusHandler + maxMessages uint32 + maxSize uint64 } // NewQuotaFloodPreventer creates a new flood preventer based on quota / peer func NewQuotaFloodPreventer( cacher storage.Cacher, + statusHandler QuotaStatusHandler, maxMessagesPerPeer uint32, maxTotalSizePerPeer uint64, ) (*quotaFloodPreventer, error) { @@ -36,6 +41,9 @@ func NewQuotaFloodPreventer( if check.IfNil(cacher) { return nil, process.ErrNilCacher } + if check.IfNil(statusHandler) { + return nil, process.ErrNilQuotaStatusHandler + } if maxMessagesPerPeer < minMessages { return nil, fmt.Errorf("%w raised in NewCountersMap, maxMessages: provided %d, minimum %d", process.ErrInvalidValue, @@ -52,9 +60,10 @@ func NewQuotaFloodPreventer( } return "aFloodPreventer{ - cacher: cacher, - maxMessages: maxMessagesPerPeer, - maxSize: maxTotalSizePerPeer, + cacher: cacher, + statusHandler: statusHandler, + maxMessages: maxMessagesPerPeer, + maxSize: maxTotalSizePerPeer, }, nil } @@ -80,11 +89,13 @@ func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { return true } - q.numMessages++ - q.totalSize += size - isQuotaReached := q.numMessages > qfp.maxMessages || q.totalSize > qfp.maxSize + q.numReceivedMessages++ + q.sizeReceivedMessages += size + isQuotaReached := q.numReceivedMessages > qfp.maxMessages || q.sizeReceivedMessages > qfp.maxSize if !isQuotaReached { qfp.cacher.Put([]byte(identifier), q) + q.numProcessedMessages++ + q.sizeProcessedMessages += size return true } @@ -94,8 +105,10 @@ func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { func (qfp *quotaFloodPreventer) putDefaultQuota(cacher storage.Cacher, identifier string, size uint64) { q := "a{ - numMessages: initNumMessages, - totalSize: size, + numReceivedMessages: initNumMessages, + sizeReceivedMessages: size, + numProcessedMessages: initNumMessages, + sizeProcessedMessages: size, } qfp.cacher.Put([]byte(identifier), q) } @@ -105,10 +118,52 @@ func (qfp *quotaFloodPreventer) Reset() { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() + qfp.createStatistics() + //TODO change this if cacher.Clear() is time consuming qfp.cacher.Clear() } +// createStatistics is useful to benchmark the system when running +func (qfp quotaFloodPreventer) createStatistics() { + qfp.statusHandler.ResetStatistics() + + keys := qfp.cacher.Keys() + totalQuota := "a{} + for _, k := range keys { + val, ok := qfp.cacher.Get(k) + if !ok { + continue + } + + q, isQuota := val.(*quota) + if !isQuota { + continue + } + + totalQuota.numReceivedMessages += q.numReceivedMessages + totalQuota.sizeReceivedMessages += q.sizeReceivedMessages + totalQuota.numProcessedMessages += q.numProcessedMessages + totalQuota.sizeProcessedMessages += q.sizeProcessedMessages + + qfp.statusHandler.AddQuota( + string(k), + q.numReceivedMessages, + q.sizeReceivedMessages, + q.numProcessedMessages, + q.sizeProcessedMessages, + ) + } + + qfp.statusHandler.AddQuota( + totalIdentifier, + totalQuota.numReceivedMessages, + totalQuota.sizeReceivedMessages, + totalQuota.numProcessedMessages, + totalQuota.sizeProcessedMessages, + ) +} + // IsInterfaceNil returns true if there is no value under the interface func (qfp *quotaFloodPreventer) IsInterfaceNil() bool { return qfp == nil diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index 2c7aa5f6a85..c074cc49c9c 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -12,12 +12,19 @@ import ( "github.com/stretchr/testify/assert" ) +func createMockQuotaStatusHandler() *mock.QuotaStatusHandlerStub { + return &mock.QuotaStatusHandlerStub{ + ResetStatisticsCalled: func() {}, + AddQuotaCalled: func(_ string, _ uint32, _ uint64, _ uint32, _ uint64) {}, + } +} + //------- NewQuotaFloodPreventer func TestNewQuotaFloodPreventer_NilCacherShouldErr(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(nil, minMessages, minTotalSize) + qfp, err := NewQuotaFloodPreventer(nil, &mock.QuotaStatusHandlerStub{}, minMessages, minTotalSize) assert.True(t, check.IfNil(qfp)) assert.Equal(t, process.ErrNilCacher, err) @@ -26,7 +33,7 @@ func TestNewQuotaFloodPreventer_NilCacherShouldErr(t *testing.T) { func TestNewQuotaFloodPreventer_LowerMinMessagesShouldErr(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, minMessages-1, minTotalSize) + qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, &mock.QuotaStatusHandlerStub{}, minMessages-1, minTotalSize) assert.True(t, check.IfNil(qfp)) assert.True(t, errors.Is(err, process.ErrInvalidValue)) @@ -35,7 +42,7 @@ func TestNewQuotaFloodPreventer_LowerMinMessagesShouldErr(t *testing.T) { func TestNewQuotaFloodPreventer_LowerMinSizeShouldErr(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, minMessages, minTotalSize-1) + qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, &mock.QuotaStatusHandlerStub{}, minMessages, minTotalSize-1) assert.True(t, check.IfNil(qfp)) assert.True(t, errors.Is(err, process.ErrInvalidValue)) @@ -44,7 +51,7 @@ func TestNewQuotaFloodPreventer_LowerMinSizeShouldErr(t *testing.T) { func TestNewQuotaFloodPreventer_ShouldWork(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, minMessages, minTotalSize) + qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, &mock.QuotaStatusHandlerStub{}, minMessages, minTotalSize) assert.False(t, check.IfNil(qfp)) assert.Nil(t, err) @@ -67,13 +74,14 @@ func TestNewQuotaFloodPreventer_IncrementIdentifierNotPresentPutQuotaAndReturnTr if !isQuota { return } - if q.numMessages == 1 && q.totalSize == size { + if q.numReceivedMessages == 1 && q.sizeReceivedMessages == size { putWasCalled = true } return }, }, + createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, ) @@ -99,13 +107,14 @@ func TestNewQuotaFloodPreventer_IncrementNotQuotaSavedInCacheShouldPutQuotaAndRe if !isQuota { return } - if q.numMessages == 1 && q.totalSize == size { + if q.numReceivedMessages == 1 && q.sizeReceivedMessages == size { putWasCalled = true } return }, }, + createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, ) @@ -123,8 +132,8 @@ func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnT existingSize := uint64(minTotalSize * 5) existingMessages := uint32(minMessages * 2) existingQuota := "a{ - numMessages: existingMessages, - totalSize: existingSize, + numReceivedMessages: existingMessages, + sizeReceivedMessages: existingSize, } size := uint64(minTotalSize * 2) qfp, _ := NewQuotaFloodPreventer( @@ -137,13 +146,14 @@ func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnT if !isQuota { return } - if q.numMessages == existingMessages+1 && q.totalSize == existingSize+size { + if q.numReceivedMessages == existingMessages+1 && q.sizeReceivedMessages == existingSize+size { putWasCalled = true } return }, }, + createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, ) @@ -160,8 +170,8 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturn existingMessages := uint32(minMessages + 11) existingSize := uint64(minTotalSize * 3) existingQuota := "a{ - numMessages: existingMessages, - totalSize: existingSize, + numReceivedMessages: existingMessages, + sizeReceivedMessages: existingSize, } qfp, _ := NewQuotaFloodPreventer( &mock.CacherStub{ @@ -174,6 +184,7 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturn return false }, }, + createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, ) @@ -189,8 +200,8 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t existingMessages := uint32(minMessages) existingSize := uint64(minTotalSize * 11) existingQuota := "a{ - numMessages: existingMessages, - totalSize: existingSize, + numReceivedMessages: existingMessages, + sizeReceivedMessages: existingSize, } qfp, _ := NewQuotaFloodPreventer( &mock.CacherStub{ @@ -203,6 +214,7 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t return false }, }, + createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, ) @@ -217,6 +229,7 @@ func TestCountersMap_IncrementShouldWorkConcurrently(t *testing.T) { qfp, _ := NewQuotaFloodPreventer( mock.NewCacherMock(), + createMockQuotaStatusHandler(), minMessages, minTotalSize) numIterations := 1000 @@ -244,7 +257,11 @@ func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { ClearCalled: func() { clearCalled = true }, + KeysCalled: func() [][]byte { + return make([][]byte, 0) + }, }, + createMockQuotaStatusHandler(), minTotalSize, minMessages, ) @@ -254,11 +271,87 @@ func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { assert.True(t, clearCalled) } +func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { + t.Parallel() + + cacher := mock.NewCacherMock() + key1 := []byte("key1") + quota1 := "a{ + numReceivedMessages: 1, + sizeReceivedMessages: 2, + numProcessedMessages: 3, + sizeProcessedMessages: 4, + } + key2 := []byte("key2") + quota2 := "a{ + numReceivedMessages: 5, + sizeReceivedMessages: 6, + numProcessedMessages: 7, + sizeProcessedMessages: 8, + } + + cacher.HasOrAdd(key1, quota1) + cacher.HasOrAdd(key2, quota2) + + resetStatisticsCalled := false + quota1Compared := false + quota2Compared := false + totalCompared := false + qfp, _ := NewQuotaFloodPreventer( + cacher, + &mock.QuotaStatusHandlerStub{ + ResetStatisticsCalled: func() { + resetStatisticsCalled = true + }, + AddQuotaCalled: func(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, numProcessedMessages uint32, sizeProcessedMessages uint64) { + quotaProvided := quota{ + numReceivedMessages: numReceivedMessages, + sizeReceivedMessages: sizeReceivedMessages, + numProcessedMessages: numProcessedMessages, + sizeProcessedMessages: sizeProcessedMessages, + } + quotaToCompare := quota{} + + switch identifier { + case string(key1): + quotaToCompare = *quota1 + quota1Compared = true + case string(key2): + quotaToCompare = *quota2 + quota2Compared = true + case totalIdentifier: + quotaToCompare = quota{ + numReceivedMessages: quota1.numReceivedMessages + quota2.numReceivedMessages, + sizeReceivedMessages: quota1.sizeReceivedMessages + quota2.sizeReceivedMessages, + numProcessedMessages: quota1.numProcessedMessages + quota2.numProcessedMessages, + sizeProcessedMessages: quota1.sizeProcessedMessages + quota2.sizeProcessedMessages, + } + totalCompared = true + default: + assert.Fail(t, fmt.Sprintf("unknown identifier %s", identifier)) + } + + assert.Equal(t, quotaToCompare, quotaProvided) + }, + }, + minTotalSize, + minMessages, + ) + + qfp.Reset() + + assert.True(t, resetStatisticsCalled) + assert.True(t, quota1Compared) + assert.True(t, quota2Compared) + assert.True(t, totalCompared) +} + func TestCountersMap_IncrementAndResetShouldWorkConcurrently(t *testing.T) { t.Parallel() qfp, _ := NewQuotaFloodPreventer( mock.NewCacherMock(), + createMockQuotaStatusHandler(), minMessages, minTotalSize, ) From e43c15bc27c2c493002370ec0651ad0ccf115c83 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 10 Dec 2019 09:23:24 +0200 Subject: [PATCH 11/35] work in progress integration antiflood component --- consensus/interface.go | 8 ++ consensus/mock/p2pAntifloodHandlerStub.go | 15 +++ consensus/spos/errors.go | 3 + consensus/spos/worker.go | 45 +++++--- consensus/spos/worker_test.go | 105 +++++++++++++++--- dataRetriever/errors.go | 3 + .../metachain/resolversContainerFactory.go | 30 +++-- .../resolversContainerFactory_test.go | 30 +++++ .../shard/resolversContainerFactory.go | 32 ++++-- .../shard/resolversContainerFactory_test.go | 36 ++++++ dataRetriever/interface.go | 7 ++ dataRetriever/mock/p2pAntifloodHandlerStub.go | 15 +++ .../resolvers/genericBlockBodyResolver.go | 32 +++--- .../genericBlockBodyResolver_test.go | 55 +++++++++ dataRetriever/resolvers/headerResolver.go | 31 ++++-- .../resolvers/headerResolver_test.go | 61 ++++++++++ .../resolvers/transactionResolver.go | 39 ++++--- .../resolvers/transactionResolver_test.go | 91 ++++++++++++--- node/heartbeat/errors.go | 3 + node/heartbeat/interface.go | 7 ++ node/heartbeat/monitor.go | 22 +++- node/heartbeat/monitorEdgeCases_test.go | 1 + node/heartbeat/monitor_test.go | 38 +++++++ node/interface.go | 7 ++ node/mock/p2pAntifloodHandlerStub.go | 15 +++ 25 files changed, 617 insertions(+), 114 deletions(-) create mode 100644 consensus/mock/p2pAntifloodHandlerStub.go create mode 100644 dataRetriever/mock/p2pAntifloodHandlerStub.go create mode 100644 node/mock/p2pAntifloodHandlerStub.go diff --git a/consensus/interface.go b/consensus/interface.go index 512e817d0a6..7d010aef89b 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -4,6 +4,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/p2p" ) // Rounder defines the actions which should be handled by a round implementation @@ -68,3 +69,10 @@ type P2PMessenger interface { Broadcast(topic string, buff []byte) IsInterfaceNil() bool } + +// P2PAntifloodHandler defines the behavior of a component able to signal that the system is too busy (or flooded) processing +// p2p messages +type P2PAntifloodHandler interface { + CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error + IsInterfaceNil() bool +} diff --git a/consensus/mock/p2pAntifloodHandlerStub.go b/consensus/mock/p2pAntifloodHandlerStub.go new file mode 100644 index 00000000000..e8236ed9167 --- /dev/null +++ b/consensus/mock/p2pAntifloodHandlerStub.go @@ -0,0 +1,15 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +type P2PAntifloodHandlerStub struct { + CanProcessMessageCalled func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error +} + +func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) +} + +func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return p2pahs == nil +} diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index 566c09fb385..e600f8d13dd 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -147,3 +147,6 @@ var ErrNilExecuteStoredMessages = errors.New("executeStoredMessages is nil") // ErrNilAppStatusHandler defines the error for setting a nil AppStatusHandler var ErrNilAppStatusHandler = errors.New("nil AppStatusHandler") + +// ErrNilAntifloodHandler signals that a nil antiflood handler has been provided +var ErrNilAntifloodHandler = errors.New("nil antiflood handler") diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 0178128165c..715475b1344 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -45,6 +45,8 @@ type Worker struct { mapHashConsensusMessage map[string][]*consensus.Message mutHashConsensusMessage sync.RWMutex + + antifloodHandler consensus.P2PAntifloodHandler } // NewWorker creates a new Worker object @@ -62,6 +64,7 @@ func NewWorker( shardCoordinator sharding.Coordinator, singleSigner crypto.SingleSigner, syncTimer ntp.SyncTimer, + antifloodHandler consensus.P2PAntifloodHandler, ) (*Worker, error) { err := checkNewWorkerParams( consensusService, @@ -77,6 +80,7 @@ func NewWorker( shardCoordinator, singleSigner, syncTimer, + antifloodHandler, ) if err != nil { return nil, err @@ -96,6 +100,7 @@ func NewWorker( shardCoordinator: shardCoordinator, singleSigner: singleSigner, syncTimer: syncTimer, + antifloodHandler: antifloodHandler, } wrk.executeMessageChannel = make(chan *consensus.Message) @@ -125,46 +130,50 @@ func checkNewWorkerParams( shardCoordinator sharding.Coordinator, singleSigner crypto.SingleSigner, syncTimer ntp.SyncTimer, + antifloodHandler consensus.P2PAntifloodHandler, ) error { - if consensusService == nil || consensusService.IsInterfaceNil() { + if check.IfNil(consensusService) { return ErrNilConsensusService } - if blockChain == nil || blockChain.IsInterfaceNil() { + if check.IfNil(blockChain) { return ErrNilBlockChain } - if blockProcessor == nil || blockProcessor.IsInterfaceNil() { + if check.IfNil(blockProcessor) { return ErrNilBlockProcessor } - if bootstrapper == nil || bootstrapper.IsInterfaceNil() { + if check.IfNil(bootstrapper) { return ErrNilBootstrapper } - if broadcastMessenger == nil || broadcastMessenger.IsInterfaceNil() { + if check.IfNil(broadcastMessenger) { return ErrNilBroadcastMessenger } if consensusState == nil { return ErrNilConsensusState } - if forkDetector == nil || forkDetector.IsInterfaceNil() { + if check.IfNil(forkDetector) { return ErrNilForkDetector } - if keyGenerator == nil || keyGenerator.IsInterfaceNil() { + if check.IfNil(keyGenerator) { return ErrNilKeyGenerator } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return ErrNilMarshalizer } - if rounder == nil || rounder.IsInterfaceNil() { + if check.IfNil(rounder) { return ErrNilRounder } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if check.IfNil(shardCoordinator) { return ErrNilShardCoordinator } - if singleSigner == nil || singleSigner.IsInterfaceNil() { + if check.IfNil(singleSigner) { return ErrNilSingleSigner } - if syncTimer == nil || syncTimer.IsInterfaceNil() { + if check.IfNil(syncTimer) { return ErrNilSyncTimer } + if check.IfNil(antifloodHandler) { + return ErrNilAntifloodHandler + } return nil } @@ -216,17 +225,21 @@ func (wrk *Worker) getCleanedList(cnsDataList []*consensus.Message) []*consensus } // ProcessReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { - if message == nil || message.IsInterfaceNil() { +func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + if check.IfNil(message) { return ErrNilMessage } - if message.Data() == nil { return ErrNilDataToProcess } + err := wrk.antifloodHandler.CanProcessMessage(message, fromConnectedPeer) + if err != nil { + return err + } + cnsDta := &consensus.Message{} - err := wrk.marshalizer.Unmarshal(cnsDta, message.Data()) + err = wrk.marshalizer.Unmarshal(cnsDta, message.Data()) if err != nil { return err } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index ba65d8a2a85..d7d8085b994 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -22,6 +22,14 @@ const roundTimeDuration = 100 * time.Millisecond var fromConnectedPeerId = p2p.PeerID("connected peer id") +func createMockP2pAntifloodHandler() *mock.P2PAntifloodHandlerStub { + return &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + } +} + func initWorker() *spos.Worker { blockchainMock := &mock.BlockChainMock{} blockProcessor := &mock.BlockProcessorMock{ @@ -67,7 +75,9 @@ func initWorker() *spos.Worker { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) return sposWorker } @@ -113,7 +123,9 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilConsensusService, err) @@ -148,7 +160,9 @@ func TestWorker_NewWorkerBlockChainNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilBlockChain, err) @@ -183,7 +197,9 @@ func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilBlockProcessor, err) @@ -218,7 +234,9 @@ func TestWorker_NewWorkerBootstrapperNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilBootstrapper, err) @@ -253,7 +271,9 @@ func TestWorker_NewWorkerBroadcastMessengerNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilBroadcastMessenger, err) @@ -287,7 +307,9 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilConsensusState, err) @@ -321,7 +343,9 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilForkDetector, err) @@ -355,7 +379,9 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilKeyGenerator, err) @@ -389,7 +415,9 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilMarshalizer, err) @@ -423,7 +451,9 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { nil, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilRounder, err) @@ -457,7 +487,9 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { rounderMock, nil, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilShardCoordinator, err) @@ -491,7 +523,9 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, nil, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilSingleSigner, err) @@ -525,12 +559,51 @@ func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - nil) + nil, + createMockP2pAntifloodHandler(), + ) assert.Nil(t, wrk) assert.Equal(t, spos.ErrNilSyncTimer, err) } +func TestWorker_NewWorkerNilAntifloodHandlerShouldFail(t *testing.T) { + t.Parallel() + blockchainMock := &mock.BlockChainMock{} + blockProcessor := &mock.BlockProcessorMock{} + bootstrapperMock := &mock.BootstrapperMock{} + broadcastMessengerMock := &mock.BroadcastMessengerMock{} + consensusState := initConsensusState() + forkDetectorMock := &mock.ForkDetectorMock{} + keyGeneratorMock := &mock.KeyGenMock{} + marshalizerMock := mock.MarshalizerMock{} + rounderMock := initRounderMock() + shardCoordinatorMock := mock.ShardCoordinatorMock{} + singleSignerMock := &mock.SingleSignerMock{} + syncTimerMock := &mock.SyncTimerMock{} + bnService, _ := bn.NewConsensusService() + + wrk, err := spos.NewWorker( + bnService, + blockchainMock, + blockProcessor, + bootstrapperMock, + broadcastMessengerMock, + consensusState, + forkDetectorMock, + keyGeneratorMock, + marshalizerMock, + rounderMock, + shardCoordinatorMock, + singleSignerMock, + syncTimerMock, + nil, + ) + + assert.Nil(t, wrk) + assert.Equal(t, spos.ErrNilAntifloodHandler, err) +} + func TestWorker_NewWorkerShouldWork(t *testing.T) { t.Parallel() blockchainMock := &mock.BlockChainMock{} @@ -560,7 +633,9 @@ func TestWorker_NewWorkerShouldWork(t *testing.T) { rounderMock, shardCoordinatorMock, singleSignerMock, - syncTimerMock) + syncTimerMock, + createMockP2pAntifloodHandler(), + ) assert.NotNil(t, wrk) assert.Nil(t, err) diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index 7b7b9ba42cf..cbaa022b6b5 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -202,3 +202,6 @@ var ErrNilCurrBlockTxs = errors.New("nil current block txs holder") // ErrNilRequestedItemsHandler signals that a nil requested items handler was provided var ErrNilRequestedItemsHandler = errors.New("nil requested items handler") + +// ErrNilAntifloodHandler signals that a nil antiflood handler has been provided +var ErrNilAntifloodHandler = errors.New("nil antiflood handler") diff --git a/dataRetriever/factory/metachain/resolversContainerFactory.go b/dataRetriever/factory/metachain/resolversContainerFactory.go index 7e324031afc..4abf433eb0a 100644 --- a/dataRetriever/factory/metachain/resolversContainerFactory.go +++ b/dataRetriever/factory/metachain/resolversContainerFactory.go @@ -1,6 +1,7 @@ package metachain import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/random" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -23,6 +24,7 @@ type resolversContainerFactory struct { uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter intRandomizer dataRetriever.IntRandomizer dataPacker dataRetriever.DataPacker + antifloodHandler dataRetriever.P2PAntifloodHandler } // NewResolversContainerFactory creates a new container filled with topic resolvers @@ -34,29 +36,33 @@ func NewResolversContainerFactory( dataPools dataRetriever.MetaPoolsHolder, uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, dataPacker dataRetriever.DataPacker, + antifloodHandler dataRetriever.P2PAntifloodHandler, ) (*resolversContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if check.IfNil(shardCoordinator) { return nil, dataRetriever.ErrNilShardCoordinator } - if messenger == nil || messenger.IsInterfaceNil() { + if check.IfNil(messenger) { return nil, dataRetriever.ErrNilMessenger } - if store == nil || store.IsInterfaceNil() { + if check.IfNil(store) { return nil, dataRetriever.ErrNilStore } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } - if dataPools == nil || dataPools.IsInterfaceNil() { + if check.IfNil(dataPools) { return nil, dataRetriever.ErrNilDataPoolHolder } - if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { + if check.IfNil(uint64ByteSliceConverter) { return nil, dataRetriever.ErrNilUint64ByteSliceConverter } - if dataPacker == nil || dataPacker.IsInterfaceNil() { + if check.IfNil(dataPacker) { return nil, dataRetriever.ErrNilDataPacker } + if check.IfNil(antifloodHandler) { + return nil, dataRetriever.ErrNilAntifloodHandler + } return &resolversContainerFactory{ shardCoordinator: shardCoordinator, @@ -67,6 +73,7 @@ func NewResolversContainerFactory( uint64ByteSliceConverter: uint64ByteSliceConverter, intRandomizer: &random.ConcurrentSafeIntRandomizer{}, dataPacker: dataPacker, + antifloodHandler: antifloodHandler, }, nil } @@ -201,6 +208,7 @@ func (rcf *resolversContainerFactory) createShardHeaderResolver(topic string, ex hdrNonceStore, rcf.marshalizer, rcf.uint64ByteSliceConverter, + rcf.antifloodHandler, ) if err != nil { return nil, err @@ -254,6 +262,7 @@ func (rcf *resolversContainerFactory) createMetaChainHeaderResolver(identifier s hdrNonceStore, rcf.marshalizer, rcf.uint64ByteSliceConverter, + rcf.antifloodHandler, ) if err != nil { return nil, err @@ -327,6 +336,7 @@ func (rcf *resolversContainerFactory) createTxResolver( txStorer, rcf.marshalizer, rcf.dataPacker, + rcf.antifloodHandler, ) if err != nil { return nil, err @@ -387,6 +397,7 @@ func (rcf *resolversContainerFactory) createMiniBlocksResolver(topic string, exc rcf.dataPools.MiniBlocks(), miniBlocksStorer, rcf.marshalizer, + rcf.antifloodHandler, ) if err != nil { return nil, err @@ -428,8 +439,5 @@ func (rcf *resolversContainerFactory) createOneResolverSender( // IsInterfaceNil returns true if there is no value under the interface func (rcf *resolversContainerFactory) IsInterfaceNil() bool { - if rcf == nil { - return true - } - return false + return rcf == nil } diff --git a/dataRetriever/factory/metachain/resolversContainerFactory_test.go b/dataRetriever/factory/metachain/resolversContainerFactory_test.go index 34d4a98d079..af532b00656 100644 --- a/dataRetriever/factory/metachain/resolversContainerFactory_test.go +++ b/dataRetriever/factory/metachain/resolversContainerFactory_test.go @@ -90,6 +90,7 @@ func TestNewResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -107,6 +108,7 @@ func TestNewResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -124,6 +126,7 @@ func TestNewResolversContainerFactory_NilStoreShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -141,6 +144,7 @@ func TestNewResolversContainerFactory_NilMarshalizerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -158,6 +162,7 @@ func TestNewResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { nil, &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -175,6 +180,7 @@ func TestNewResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testin createDataPools(), nil, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -192,12 +198,31 @@ func TestNewResolversContainerFactory_NilDataPackerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewResolversContainerFactory_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + rcf, err := metachain.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + nil, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) +} + func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -209,6 +234,7 @@ func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.NotNil(t, rcf) @@ -228,6 +254,7 @@ func TestResolversContainerFactory_CreateTopicShardHeadersForMetachainFailsShoul createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -247,6 +274,7 @@ func TestResolversContainerFactory_CreateRegisterShardHeadersForMetachainFailsSh createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -266,6 +294,7 @@ func TestResolversContainerFactory_CreateShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -290,6 +319,7 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, _ := rcf.Create() diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index a421530178d..65edab79da0 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -1,6 +1,7 @@ package shard import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/random" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -23,6 +24,7 @@ type resolversContainerFactory struct { uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter intRandomizer dataRetriever.IntRandomizer dataPacker dataRetriever.DataPacker + antifloodHandler dataRetriever.P2PAntifloodHandler } // NewResolversContainerFactory creates a new container filled with topic resolvers @@ -34,29 +36,33 @@ func NewResolversContainerFactory( dataPools dataRetriever.PoolsHolder, uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, dataPacker dataRetriever.DataPacker, + antifloodHandler dataRetriever.P2PAntifloodHandler, ) (*resolversContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if check.IfNil(shardCoordinator) { return nil, dataRetriever.ErrNilShardCoordinator } - if messenger == nil || messenger.IsInterfaceNil() { + if check.IfNil(messenger) { return nil, dataRetriever.ErrNilMessenger } - if store == nil || store.IsInterfaceNil() { + if check.IfNil(store) { return nil, dataRetriever.ErrNilTxStorage } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } - if dataPools == nil || dataPools.IsInterfaceNil() { + if check.IfNil(dataPools) { return nil, dataRetriever.ErrNilDataPoolHolder } - if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { + if check.IfNil(uint64ByteSliceConverter) { return nil, dataRetriever.ErrNilUint64ByteSliceConverter } - if dataPacker == nil || dataPacker.IsInterfaceNil() { + if check.IfNil(dataPacker) { return nil, dataRetriever.ErrNilDataPacker } + if check.IfNil(antifloodHandler) { + return nil, dataRetriever.ErrNilAntifloodHandler + } return &resolversContainerFactory{ shardCoordinator: shardCoordinator, @@ -67,6 +73,7 @@ func NewResolversContainerFactory( uint64ByteSliceConverter: uint64ByteSliceConverter, intRandomizer: &random.ConcurrentSafeIntRandomizer{}, dataPacker: dataPacker, + antifloodHandler: antifloodHandler, }, nil } @@ -238,6 +245,7 @@ func (rcf *resolversContainerFactory) createTxResolver( txStorer, rcf.marshalizer, rcf.dataPacker, + rcf.antifloodHandler, ) if err != nil { return nil, err @@ -286,6 +294,7 @@ func (rcf *resolversContainerFactory) generateHdrResolver() ([]string, []dataRet hdrNonceStore, rcf.marshalizer, rcf.uint64ByteSliceConverter, + rcf.antifloodHandler, ) if err != nil { return nil, nil, err @@ -362,6 +371,7 @@ func (rcf *resolversContainerFactory) createMiniBlocksResolver(topic string, exc rcf.dataPools.MiniBlocks(), miniBlocksStorer, rcf.marshalizer, + rcf.antifloodHandler, ) if err != nil { return nil, err @@ -405,6 +415,7 @@ func (rcf *resolversContainerFactory) generatePeerChBlockBodyResolver() ([]strin rcf.dataPools.MiniBlocks(), peerBlockBodyStorer, rcf.marshalizer, + rcf.antifloodHandler, ) if err != nil { return nil, nil, err @@ -457,6 +468,7 @@ func (rcf *resolversContainerFactory) generateMetachainShardHeaderResolver() ([] hdrNonceStore, rcf.marshalizer, rcf.uint64ByteSliceConverter, + rcf.antifloodHandler, ) if err != nil { return nil, nil, err @@ -513,6 +525,7 @@ func (rcf *resolversContainerFactory) generateMetablockHeaderResolver() ([]strin hdrNonceStore, rcf.marshalizer, rcf.uint64ByteSliceConverter, + rcf.antifloodHandler, ) if err != nil { return nil, nil, err @@ -559,8 +572,5 @@ func (rcf *resolversContainerFactory) createOneResolverSender( // IsInterfaceNil returns true if there is no value under the interface func (rcf *resolversContainerFactory) IsInterfaceNil() bool { - if rcf == nil { - return true - } - return false + return rcf == nil } diff --git a/dataRetriever/factory/shard/resolversContainerFactory_test.go b/dataRetriever/factory/shard/resolversContainerFactory_test.go index d8de13689f3..ef391437113 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory_test.go +++ b/dataRetriever/factory/shard/resolversContainerFactory_test.go @@ -97,6 +97,7 @@ func TestNewResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -114,6 +115,7 @@ func TestNewResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -131,6 +133,7 @@ func TestNewResolversContainerFactory_NilBlockchainShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -148,6 +151,7 @@ func TestNewResolversContainerFactory_NilMarshalizerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -165,6 +169,7 @@ func TestNewResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { nil, &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -182,6 +187,7 @@ func TestNewResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testin createDataPools(), nil, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) @@ -199,12 +205,31 @@ func TestNewResolversContainerFactory_NilSliceSplitterShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, rcf) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewResolversContainerFactory_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + nil, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) +} + func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -216,6 +241,7 @@ func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) assert.NotNil(t, rcf) @@ -235,6 +261,7 @@ func TestResolversContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testin createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -254,6 +281,7 @@ func TestResolversContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testi createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -273,6 +301,7 @@ func TestResolversContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -292,6 +321,7 @@ func TestResolversContainerFactory_CreateTopicCreationPeerChBlocksFailsShouldErr createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -311,6 +341,7 @@ func TestResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -330,6 +361,7 @@ func TestResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -349,6 +381,7 @@ func TestResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *tes createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -368,6 +401,7 @@ func TestResolversContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t *t createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -387,6 +421,7 @@ func TestResolversContainerFactory_CreateShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, err := rcf.Create() @@ -412,6 +447,7 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{}, ) container, _ := rcf.Create() diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index b4773edb3bd..403e6443a54 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -268,3 +268,10 @@ type RequestedItemsHandler interface { Sweep() IsInterfaceNil() bool } + +// P2PAntifloodHandler defines the behavior of a component able to signal that the system is too busy (or flooded) processing +// p2p messages +type P2PAntifloodHandler interface { + CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error + IsInterfaceNil() bool +} diff --git a/dataRetriever/mock/p2pAntifloodHandlerStub.go b/dataRetriever/mock/p2pAntifloodHandlerStub.go new file mode 100644 index 00000000000..e8236ed9167 --- /dev/null +++ b/dataRetriever/mock/p2pAntifloodHandlerStub.go @@ -0,0 +1,15 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +type P2PAntifloodHandlerStub struct { + CanProcessMessageCalled func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error +} + +func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) +} + +func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return p2pahs == nil +} diff --git a/dataRetriever/resolvers/genericBlockBodyResolver.go b/dataRetriever/resolvers/genericBlockBodyResolver.go index f45052f2062..4f1d6d96bab 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver.go @@ -1,6 +1,7 @@ package resolvers import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/marshal" @@ -14,6 +15,7 @@ type genericBlockBodyResolver struct { miniBlockPool storage.Cacher miniBlockStorage storage.Storer marshalizer marshal.Marshalizer + antifloodHandler dataRetriever.P2PAntifloodHandler } // NewGenericBlockBodyResolver creates a new block body resolver @@ -22,29 +24,31 @@ func NewGenericBlockBodyResolver( miniBlockPool storage.Cacher, miniBlockStorage storage.Storer, marshalizer marshal.Marshalizer, + antifloodHandler dataRetriever.P2PAntifloodHandler, ) (*genericBlockBodyResolver, error) { - if senderResolver == nil || senderResolver.IsInterfaceNil() { + if check.IfNil(senderResolver) { return nil, dataRetriever.ErrNilResolverSender } - - if miniBlockPool == nil || miniBlockPool.IsInterfaceNil() { + if check.IfNil(miniBlockPool) { return nil, dataRetriever.ErrNilBlockBodyPool } - - if miniBlockStorage == nil || miniBlockStorage.IsInterfaceNil() { + if check.IfNil(miniBlockStorage) { return nil, dataRetriever.ErrNilBlockBodyStorage } - - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } + if check.IfNil(antifloodHandler) { + return nil, dataRetriever.ErrNilAntifloodHandler + } bbResolver := &genericBlockBodyResolver{ TopicResolverSender: senderResolver, miniBlockPool: miniBlockPool, miniBlockStorage: miniBlockStorage, marshalizer: marshalizer, + antifloodHandler: antifloodHandler, } return bbResolver, nil @@ -52,9 +56,14 @@ func NewGenericBlockBodyResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { +func (gbbRes *genericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + err := gbbRes.antifloodHandler.CanProcessMessage(message, fromConnectedPeer) + if err != nil { + return err + } + rd := &dataRetriever.RequestData{} - err := rd.Unmarshal(gbbRes.marshalizer, message) + err = rd.Unmarshal(gbbRes.marshalizer, message) if err != nil { return err } @@ -212,8 +221,5 @@ func (gbbRes *genericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) // IsInterfaceNil returns true if there is no value under the interface func (gbbRes *genericBlockBodyResolver) IsInterfaceNil() bool { - if gbbRes == nil { - return true - } - return false + return gbbRes == nil } diff --git a/dataRetriever/resolvers/genericBlockBodyResolver_test.go b/dataRetriever/resolvers/genericBlockBodyResolver_test.go index 691eadb701e..9d992826c73 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver_test.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver_test.go @@ -15,6 +15,14 @@ import ( var fromConnectedPeerId = p2p.PeerID("from connected peer Id") +func createMockP2pAntifloodHandler() *mock.P2PAntifloodHandlerStub { + return &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + } +} + //------- NewBlockBodyResolver func TestNewGenericBlockBodyResolver_NilSenderResolverShouldErr(t *testing.T) { @@ -25,6 +33,7 @@ func TestNewGenericBlockBodyResolver_NilSenderResolverShouldErr(t *testing.T) { &mock.CacherStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilResolverSender, err) @@ -39,6 +48,7 @@ func TestNewGenericBlockBodyResolver_NilBlockBodyPoolShouldErr(t *testing.T) { nil, &mock.StorerStub{}, &mock.MarshalizerMock{}, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilBlockBodyPool, err) @@ -53,6 +63,7 @@ func TestNewGenericBlockBodyResolver_NilBlockBodyStorageShouldErr(t *testing.T) &mock.CacherStub{}, nil, &mock.MarshalizerMock{}, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilBlockBodyStorage, err) @@ -67,12 +78,28 @@ func TestNewGenericBlockBodyResolver_NilBlockMArshalizerShouldErr(t *testing.T) &mock.CacherStub{}, &mock.StorerStub{}, nil, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) assert.Nil(t, gbbRes) } +func TestNewGenericBlockBodyResolver_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + gbbRes, err := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + nil, + ) + + assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) + assert.Nil(t, gbbRes) +} + func TestNewGenericBlockBodyResolver_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -81,6 +108,7 @@ func TestNewGenericBlockBodyResolver_OkValsShouldWork(t *testing.T) { &mock.CacherStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, + createMockP2pAntifloodHandler(), ) assert.Nil(t, err) @@ -89,6 +117,26 @@ func TestNewGenericBlockBodyResolver_OkValsShouldWork(t *testing.T) { //------- ProcessReceivedMessage +func TestNewGenericBlockBodyResolver_ProcessReceivedAntifloodErrorsShouldErr(t *testing.T) { + t.Parallel() + + errExpected := errors.New("expected error") + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return errExpected + }, + }, + ) + + err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) + assert.Equal(t, errExpected, err) +} + func TestNewGenericBlockBodyResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { t.Parallel() @@ -97,6 +145,7 @@ func TestNewGenericBlockBodyResolver_ProcessReceivedMessageNilValueShouldErr(t * &mock.CacherStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, + createMockP2pAntifloodHandler(), ) err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) @@ -111,6 +160,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageWrongTypeShouldErr(t *te &mock.CacherStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, + createMockP2pAntifloodHandler(), ) err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId) @@ -153,6 +203,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolShouldRetValA }, }, marshalizer, + createMockP2pAntifloodHandler(), ) err := gbbRes.ProcessReceivedMessage( @@ -204,6 +255,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolMarshalizerFa }, }, marshalizer, + createMockP2pAntifloodHandler(), ) err := gbbRes.ProcessReceivedMessage( @@ -249,6 +301,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageNotFoundInPoolShouldRetF cache, store, marshalizer, + createMockP2pAntifloodHandler(), ) err := gbbRes.ProcessReceivedMessage( @@ -292,6 +345,7 @@ func TestGenericBlockBodyResolver_ProcessReceivedMessageMissingDataShouldNotSend cache, store, marshalizer, + createMockP2pAntifloodHandler(), ) _ = gbbRes.ProcessReceivedMessage( @@ -321,6 +375,7 @@ func TestBlockBodyResolver_RequestDataFromHashShouldWork(t *testing.T) { &mock.CacherStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, + createMockP2pAntifloodHandler(), ) assert.Nil(t, gbbRes.RequestDataFromHash(buffRequested)) diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index a3fc7e7d9c0..78ac10b1f3a 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -1,6 +1,7 @@ package resolvers import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/logger" @@ -20,6 +21,7 @@ type HeaderResolver struct { hdrNoncesStorage storage.Storer marshalizer marshal.Marshalizer nonceConverter typeConverters.Uint64ByteSliceConverter + antifloodHandler dataRetriever.P2PAntifloodHandler } // NewHeaderResolver creates a new header resolver @@ -31,29 +33,33 @@ func NewHeaderResolver( headersNoncesStorage storage.Storer, marshalizer marshal.Marshalizer, nonceConverter typeConverters.Uint64ByteSliceConverter, + antifloodHandler dataRetriever.P2PAntifloodHandler, ) (*HeaderResolver, error) { - if senderResolver == nil || senderResolver.IsInterfaceNil() { + if check.IfNil(senderResolver) { return nil, dataRetriever.ErrNilResolverSender } - if headers == nil || headers.IsInterfaceNil() { + if check.IfNil(headers) { return nil, dataRetriever.ErrNilHeadersDataPool } - if headersNonces == nil || headersNonces.IsInterfaceNil() { + if check.IfNil(headersNonces) { return nil, dataRetriever.ErrNilHeadersNoncesDataPool } if hdrStorage == nil || hdrStorage.IsInterfaceNil() { return nil, dataRetriever.ErrNilHeadersStorage } - if headersNoncesStorage == nil || headersNoncesStorage.IsInterfaceNil() { + if check.IfNil(headersNoncesStorage) { return nil, dataRetriever.ErrNilHeadersNoncesStorage } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } - if nonceConverter == nil || nonceConverter.IsInterfaceNil() { + if check.IfNil(nonceConverter) { return nil, dataRetriever.ErrNilUint64ByteSliceConverter } + if check.IfNil(antifloodHandler) { + return nil, dataRetriever.ErrNilAntifloodHandler + } hdrResolver := &HeaderResolver{ TopicResolverSender: senderResolver, @@ -63,6 +69,7 @@ func NewHeaderResolver( hdrNoncesStorage: headersNoncesStorage, marshalizer: marshalizer, nonceConverter: nonceConverter, + antifloodHandler: antifloodHandler, } return hdrResolver, nil @@ -70,7 +77,12 @@ func NewHeaderResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { +func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + err := hdrRes.antifloodHandler.CanProcessMessage(message, fromConnectedPeer) + if err != nil { + return err + } + rd, err := hdrRes.parseReceivedMessage(message) if err != nil { return err @@ -180,8 +192,5 @@ func (hdrRes *HeaderResolver) RequestDataFromNonce(nonce uint64) error { // IsInterfaceNil returns true if there is no value under the interface func (hdrRes *HeaderResolver) IsInterfaceNil() bool { - if hdrRes == nil { - return true - } - return false + return hdrRes == nil } diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 3865cbf4ee4..3ce1c19048a 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -26,6 +26,7 @@ func TestNewHeaderResolver_NilSenderResolverShouldErr(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilResolverSender, err) @@ -43,6 +44,7 @@ func TestNewHeaderResolver_NilHeadersPoolShouldErr(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilHeadersDataPool, err) @@ -60,6 +62,7 @@ func TestNewHeaderResolver_NilHeadersNoncesPoolShouldErr(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilHeadersNoncesDataPool, err) @@ -77,6 +80,7 @@ func TestNewHeaderResolver_NilHeadersStorageShouldErr(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilHeadersStorage, err) @@ -94,6 +98,7 @@ func TestNewHeaderResolver_NilHeadersNoncesStorageShouldErr(t *testing.T) { nil, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilHeadersNoncesStorage, err) @@ -111,6 +116,7 @@ func TestNewHeaderResolver_NilMarshalizerShouldErr(t *testing.T) { &mock.StorerStub{}, nil, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) @@ -128,12 +134,31 @@ func TestNewHeaderResolver_NilNonceConverterShouldErr(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, nil, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) assert.Nil(t, hdrRes) } +func TestNewHeaderResolver_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.Uint64SyncMapCacherStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + nil, + ) + + assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) + assert.Nil(t, hdrRes) +} + func TestNewHeaderResolver_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -145,6 +170,7 @@ func TestNewHeaderResolver_OkValsShouldWork(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) assert.NotNil(t, hdrRes) @@ -153,6 +179,29 @@ func TestNewHeaderResolver_OkValsShouldWork(t *testing.T) { //------- ProcessReceivedMessage +func TestHeaderResolver_ProcessReceivedAntifloodErrorsShouldErr(t *testing.T) { + t.Parallel() + + errExpected := errors.New("expected error") + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.Uint64SyncMapCacherStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return errExpected + }, + }, + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) + assert.Equal(t, errExpected, err) +} + func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { t.Parallel() @@ -164,6 +213,7 @@ func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) @@ -181,6 +231,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestUnknownTypeShouldErr(t *tes &mock.StorerStub{}, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId) @@ -221,6 +272,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend &mock.StorerStub{}, marshalizer, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) @@ -268,6 +320,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh &mock.StorerStub{}, marshalizerStub, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) @@ -313,6 +366,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA &mock.StorerStub{}, marshalizer, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) @@ -336,6 +390,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeInvalidSliceShould }, &mock.MarshalizerMock{}, mock.NewNonceHashConverterMock(), + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId) @@ -373,6 +428,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce }, &mock.MarshalizerMock{}, nonceConverter, + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage( @@ -437,6 +493,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo }, marshalizer, nonceConverter, + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage( @@ -508,6 +565,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo }, marshalizer, nonceConverter, + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage( @@ -576,6 +634,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo }, marshalizer, nonceConverter, + createMockP2pAntifloodHandler(), ) err := hdrRes.ProcessReceivedMessage( @@ -613,6 +672,7 @@ func TestHeaderResolver_RequestDataFromNonceShouldWork(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, nonceConverter, + createMockP2pAntifloodHandler(), ) assert.Nil(t, hdrRes.RequestDataFromNonce(nonceRequested)) @@ -641,6 +701,7 @@ func TestHeaderResolverBase_RequestDataFromHashShouldWork(t *testing.T) { &mock.StorerStub{}, &mock.MarshalizerMock{}, nonceConverter, + createMockP2pAntifloodHandler(), ) assert.Nil(t, hdrResBase.RequestDataFromHash(buffRequested)) diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index 4a6b75e425d..06be4f91ce2 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -1,6 +1,7 @@ package resolvers import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" @@ -13,10 +14,11 @@ var maxBuffToSendBulkTransactions = 2 << 17 //128KB // TxResolver is a wrapper over Resolver that is specialized in resolving transaction requests type TxResolver struct { dataRetriever.TopicResolverSender - txPool dataRetriever.ShardedDataCacherNotifier - txStorage storage.Storer - marshalizer marshal.Marshalizer - dataPacker dataRetriever.DataPacker + txPool dataRetriever.ShardedDataCacherNotifier + txStorage storage.Storer + marshalizer marshal.Marshalizer + dataPacker dataRetriever.DataPacker + antifloodHandler dataRetriever.P2PAntifloodHandler } // NewTxResolver creates a new transaction resolver @@ -26,23 +28,27 @@ func NewTxResolver( txStorage storage.Storer, marshalizer marshal.Marshalizer, dataPacker dataRetriever.DataPacker, + antifloodHandler dataRetriever.P2PAntifloodHandler, ) (*TxResolver, error) { - if senderResolver == nil || senderResolver.IsInterfaceNil() { + if check.IfNil(senderResolver) { return nil, dataRetriever.ErrNilResolverSender } - if txPool == nil || txPool.IsInterfaceNil() { + if check.IfNil(txPool) { return nil, dataRetriever.ErrNilTxDataPool } - if txStorage == nil || txStorage.IsInterfaceNil() { + if check.IfNil(txStorage) { return nil, dataRetriever.ErrNilTxStorage } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } - if dataPacker == nil || dataPacker.IsInterfaceNil() { + if check.IfNil(dataPacker) { return nil, dataRetriever.ErrNilDataPacker } + if check.IfNil(antifloodHandler) { + return nil, dataRetriever.ErrNilAntifloodHandler + } txResolver := &TxResolver{ TopicResolverSender: senderResolver, @@ -50,6 +56,7 @@ func NewTxResolver( txStorage: txStorage, marshalizer: marshalizer, dataPacker: dataPacker, + antifloodHandler: antifloodHandler, } return txResolver, nil @@ -57,9 +64,14 @@ func NewTxResolver( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { +func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + err := txRes.antifloodHandler.CanProcessMessage(message, fromConnectedPeer) + if err != nil { + return err + } + rd := &dataRetriever.RequestData{} - err := rd.Unmarshal(txRes.marshalizer, message) + err = rd.Unmarshal(txRes.marshalizer, message) if err != nil { return err } @@ -170,8 +182,5 @@ func (txRes *TxResolver) RequestDataFromHashArray(hashes [][]byte) error { // IsInterfaceNil returns true if there is no value under the interface func (txRes *TxResolver) IsInterfaceNil() bool { - if txRes == nil { - return true - } - return false + return txRes == nil } diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index 0b0d71eaac5..36b6f23a1a4 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -1,4 +1,4 @@ -package resolvers +package resolvers_test import ( "bytes" @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/stretchr/testify/assert" ) @@ -19,12 +20,13 @@ var connectedPeerId = p2p.PeerID("connected peer id") func TestNewTxResolver_NilResolverShouldErr(t *testing.T) { t.Parallel() - txRes, err := NewTxResolver( + txRes, err := resolvers.NewTxResolver( nil, &mock.ShardedDataStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilResolverSender, err) @@ -34,12 +36,13 @@ func TestNewTxResolver_NilResolverShouldErr(t *testing.T) { func TestNewTxResolver_NilTxPoolShouldErr(t *testing.T) { t.Parallel() - txRes, err := NewTxResolver( + txRes, err := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, nil, &mock.StorerStub{}, &mock.MarshalizerMock{}, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilTxDataPool, err) @@ -49,12 +52,13 @@ func TestNewTxResolver_NilTxPoolShouldErr(t *testing.T) { func TestNewTxResolver_NilTxStorageShouldErr(t *testing.T) { t.Parallel() - txRes, err := NewTxResolver( + txRes, err := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, nil, &mock.MarshalizerMock{}, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilTxStorage, err) @@ -64,12 +68,13 @@ func TestNewTxResolver_NilTxStorageShouldErr(t *testing.T) { func TestNewTxResolver_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() - txRes, err := NewTxResolver( + txRes, err := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, nil, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) @@ -81,29 +86,49 @@ func TestNewTxResolver_NilDataPackerShouldErr(t *testing.T) { res := &mock.TopicResolverSenderStub{} - txRes, err := NewTxResolver( + txRes, err := resolvers.NewTxResolver( res, &mock.ShardedDataStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, nil, + createMockP2pAntifloodHandler(), ) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) assert.Nil(t, txRes) } +func TestNewTxResolver_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + res := &mock.TopicResolverSenderStub{} + + txRes, err := resolvers.NewTxResolver( + res, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + &mock.DataPackerStub{}, + nil, + ) + + assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) + assert.Nil(t, txRes) +} + func TestNewTxResolver_OkValsShouldWork(t *testing.T) { t.Parallel() res := &mock.TopicResolverSenderStub{} - txRes, err := NewTxResolver( + txRes, err := resolvers.NewTxResolver( res, &mock.ShardedDataStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) assert.Nil(t, err) @@ -112,15 +137,38 @@ func TestNewTxResolver_OkValsShouldWork(t *testing.T) { //------- ProcessReceivedMessage +func TestTxResolver_ProcessReceivedMessageAntifloodHandlerErrorsShouldErr(t *testing.T) { + t.Parallel() + + errExpected := errors.New("expected error") + txRes, _ := resolvers.NewTxResolver( + &mock.TopicResolverSenderStub{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + &mock.DataPackerStub{}, + &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return errExpected + }, + }, + ) + + err := txRes.ProcessReceivedMessage(nil, connectedPeerId) + + assert.Equal(t, errExpected, err) +} + func TestTxResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) err := txRes.ProcessReceivedMessage(nil, connectedPeerId) @@ -133,12 +181,13 @@ func TestTxResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, marshalizer, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.NonceType, Value: []byte("aaa")}) @@ -155,12 +204,13 @@ func TestTxResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, marshalizer, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: nil}) @@ -191,7 +241,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te return nil, false } - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{ SendCalled: func(buff []byte, peer p2p.PeerID) error { sendWasCalled = true @@ -202,6 +252,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te &mock.StorerStub{}, marshalizer, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("aaa")}) @@ -241,12 +292,13 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN return nil, false } - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, txPool, &mock.StorerStub{}, marshalizerStub, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("aaa")}) @@ -284,7 +336,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t return nil, nil } - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{ SendCalled: func(buff []byte, peer p2p.PeerID) error { sendWasCalled = true @@ -295,6 +347,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t txStorage, marshalizer, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("aaa")}) @@ -330,12 +383,13 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageCheckRetError(t *testi return nil, nil } - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, txPool, txStorage, marshalizer, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("aaa")}) @@ -375,7 +429,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal } sendSliceWasCalled := false - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{ SendCalled: func(buff []byte, peer p2p.PeerID) error { return nil @@ -394,6 +448,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal return make([][]byte, 0), nil }, }, + createMockP2pAntifloodHandler(), ) buff, _ := marshalizer.Marshal([][]byte{txHash1, txHash2}) @@ -422,12 +477,13 @@ func TestTxResolver_RequestDataFromHashShouldWork(t *testing.T) { buffRequested := []byte("aaaa") - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( res, &mock.ShardedDataStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) assert.Nil(t, txRes.RequestDataFromHash(buffRequested)) @@ -454,12 +510,13 @@ func TestTxResolver_RequestDataFromHashArrayShouldWork(t *testing.T) { buffRequested := [][]byte{[]byte("aaaa"), []byte("bbbb")} marshalizer := &mock.MarshalizerMock{} - txRes, _ := NewTxResolver( + txRes, _ := resolvers.NewTxResolver( res, &mock.ShardedDataStub{}, &mock.StorerStub{}, marshalizer, &mock.DataPackerStub{}, + createMockP2pAntifloodHandler(), ) buff, _ := marshalizer.Marshal(buffRequested) diff --git a/node/heartbeat/errors.go b/node/heartbeat/errors.go index c041744030b..9345dd97774 100644 --- a/node/heartbeat/errors.go +++ b/node/heartbeat/errors.go @@ -61,3 +61,6 @@ var ErrMarshalGenesisTime = errors.New("monitor: can't marshal genesis time") // ErrPropertyTooLong signals that one of the properties is too long var ErrPropertyTooLong = errors.New("property too long in Heartbeat") + +// ErrNilAntifloodHandler signals that a nil antiflood handler has been provided +var ErrNilAntifloodHandler = errors.New("nil antiflood handler") diff --git a/node/heartbeat/interface.go b/node/heartbeat/interface.go index 550e6914965..61251a4cb0d 100644 --- a/node/heartbeat/interface.go +++ b/node/heartbeat/interface.go @@ -34,3 +34,10 @@ type HeartbeatStorageHandler interface { SaveKeys(peersSlice [][]byte) error IsInterfaceNil() bool } + +// P2PAntifloodHandler defines the behavior of a component able to signal that the system is too busy (or flooded) processing +// p2p messages +type P2PAntifloodHandler interface { + CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error + IsInterfaceNil() bool +} diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index 096948e4f31..28cc0bbf4c9 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" @@ -33,6 +34,7 @@ type Monitor struct { messageHandler MessageHandler storer HeartbeatStorageHandler timer Timer + antifloodHandler P2PAntifloodHandler } // NewMonitor returns a new monitor instance @@ -44,23 +46,27 @@ func NewMonitor( messageHandler MessageHandler, storer HeartbeatStorageHandler, timer Timer, + antifloodHandler P2PAntifloodHandler, ) (*Monitor, error) { - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, ErrNilMarshalizer } if len(pubKeysMap) == 0 { return nil, ErrEmptyPublicKeysMap } - if messageHandler == nil || messageHandler.IsInterfaceNil() { + if check.IfNil(messageHandler) { return nil, ErrNilMessageHandler } - if storer == nil || storer.IsInterfaceNil() { + if check.IfNil(storer) { return nil, ErrNilHeartbeatStorer } - if timer == nil || timer.IsInterfaceNil() { + if check.IfNil(timer) { return nil, ErrNilTimer } + if check.IfNil(antifloodHandler) { + return nil, ErrNilAntifloodHandler + } mon := &Monitor{ marshalizer: marshalizer, @@ -71,6 +77,7 @@ func NewMonitor( messageHandler: messageHandler, storer: storer, timer: timer, + antifloodHandler: antifloodHandler, } err := mon.storer.UpdateGenesisTime(genesisTime) @@ -201,7 +208,12 @@ func (m *Monitor) SetAppStatusHandler(ash core.AppStatusHandler) error { // ProcessReceivedMessage satisfies the p2p.MessageProcessor interface so it can be called // by the p2p subsystem each time a new heartbeat message arrives -func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerIDjjj) error { +func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + err := m.antifloodHandler.CanProcessMessage(message, fromConnectedPeer) + if err != nil { + return err + } + hbRecv, err := m.messageHandler.CreateHeartbeatFromP2pMessage(message) if err != nil { return err diff --git a/node/heartbeat/monitorEdgeCases_test.go b/node/heartbeat/monitorEdgeCases_test.go index 095acbe4911..08cd8e0f368 100644 --- a/node/heartbeat/monitorEdgeCases_test.go +++ b/node/heartbeat/monitorEdgeCases_test.go @@ -26,6 +26,7 @@ func createMonitor( &mock.MessageHandlerStub{}, storer, timer, + createMockP2pAntifloodHandler(), ) return mon diff --git a/node/heartbeat/monitor_test.go b/node/heartbeat/monitor_test.go index 2048f3545cb..01b32d1255f 100644 --- a/node/heartbeat/monitor_test.go +++ b/node/heartbeat/monitor_test.go @@ -16,6 +16,14 @@ import ( var fromConnectedPeerId = p2p.PeerID("from connected peer Id") +func createMockP2pAntifloodHandler() *mock.P2PAntifloodHandlerStub { + return &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + } +} + //------- NewMonitor func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { @@ -30,6 +38,7 @@ func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { &mock.MessageHandlerStub{}, &mock.HeartbeatStorerStub{}, th, + createMockP2pAntifloodHandler(), ) assert.Nil(t, mon) @@ -48,6 +57,7 @@ func TestNewMonitor_EmptyPublicKeyListShouldErr(t *testing.T) { &mock.MessageHandlerStub{}, &mock.HeartbeatStorerStub{}, th, + createMockP2pAntifloodHandler(), ) assert.Nil(t, mon) @@ -66,6 +76,7 @@ func TestNewMonitor_NilMessageHandlerShouldErr(t *testing.T) { nil, &mock.HeartbeatStorerStub{}, th, + createMockP2pAntifloodHandler(), ) assert.Nil(t, mon) @@ -84,6 +95,7 @@ func TestNewMonitor_NilHeartbeatStorerShouldErr(t *testing.T) { &mock.MessageHandlerStub{}, nil, th, + createMockP2pAntifloodHandler(), ) assert.Nil(t, mon) @@ -101,12 +113,32 @@ func TestNewMonitor_NilTimeHandlerShouldErr(t *testing.T) { &mock.MessageHandlerStub{}, &mock.HeartbeatStorerStub{}, nil, + createMockP2pAntifloodHandler(), ) assert.Nil(t, mon) assert.Equal(t, heartbeat.ErrNilTimer, err) } +func TestNewMonitor_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + th := mock.NewMockTimer() + mon, err := heartbeat.NewMonitor( + &mock.MarshalizerMock{}, + 0, + map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{}, + th, + nil, + ) + + assert.Nil(t, mon) + assert.Equal(t, heartbeat.ErrNilAntifloodHandler, err) +} + func TestNewMonitor_OkValsShouldCreatePubkeyMap(t *testing.T) { t.Parallel() @@ -132,6 +164,7 @@ func TestNewMonitor_OkValsShouldCreatePubkeyMap(t *testing.T) { }, }, th, + createMockP2pAntifloodHandler(), ) assert.NotNil(t, mon) @@ -171,6 +204,7 @@ func TestNewMonitor_ShouldComputeShardId(t *testing.T) { }, }, th, + createMockP2pAntifloodHandler(), ) assert.NotNil(t, mon) @@ -224,6 +258,7 @@ func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { }, }, th, + createMockP2pAntifloodHandler(), ) hb := heartbeat.Heartbeat{ @@ -282,6 +317,7 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { }, }, th, + createMockP2pAntifloodHandler(), ) hb := heartbeat.Heartbeat{ @@ -344,6 +380,7 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { }, }, th, + createMockP2pAntifloodHandler(), ) // First send from pk1 from shard 0 @@ -414,6 +451,7 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { }, storer, th, + createMockP2pAntifloodHandler(), ) // First send from pk1 diff --git a/node/interface.go b/node/interface.go index 76cd8010390..b5b7dac0da2 100644 --- a/node/interface.go +++ b/node/interface.go @@ -21,3 +21,10 @@ type P2PMessenger interface { IsConnectedToTheNetwork() bool IsInterfaceNil() bool } + +// P2PAntifloodHandler defines the behavior of a component able to signal that the system is too busy (or flooded) processing +// p2p messages +type P2PAntifloodHandler interface { + CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error + IsInterfaceNil() bool +} diff --git a/node/mock/p2pAntifloodHandlerStub.go b/node/mock/p2pAntifloodHandlerStub.go new file mode 100644 index 00000000000..e8236ed9167 --- /dev/null +++ b/node/mock/p2pAntifloodHandlerStub.go @@ -0,0 +1,15 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +type P2PAntifloodHandlerStub struct { + CanProcessMessageCalled func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error +} + +func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) +} + +func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return p2pahs == nil +} From da7234bece4c102cccabc713957322193bb6466e Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 10 Dec 2019 13:41:20 +0200 Subject: [PATCH 12/35] minor code changes when creating fix p2p networks --- integrationTests/testInitializer.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 466099f4613..465441bcdb9 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -116,12 +116,7 @@ func CreateMessengerWithNoDiscovery(ctx context.Context) p2p.Messenger { // | | | // 5 6 7 func CreateFixedNetworkOf8Peers() ([]p2p.Messenger, error) { - numPeers := 7 - peers := make([]p2p.Messenger, numPeers+1) - - for i := 0; i <= numPeers; i++ { - peers[i] = CreateMessengerWithNoDiscovery(context.Background()) - } + peers := createMessengersWithNoDiscovery(8) connections := map[int][]int{ 0: {1, 3}, @@ -149,12 +144,7 @@ func CreateFixedNetworkOf8Peers() ([]p2p.Messenger, error) { // | | | | | | | | | | | // 3 4 5 6 7 8 9 10 11 12 13 func CreateFixedNetworkOf14Peers() ([]p2p.Messenger, error) { - numPeers := 13 - peers := make([]p2p.Messenger, numPeers+1) - - for i := 0; i <= numPeers; i++ { - peers[i] = CreateMessengerWithNoDiscovery(context.Background()) - } + peers := createMessengersWithNoDiscovery(14) connections := map[int][]int{ 0: {1}, @@ -170,6 +160,16 @@ func CreateFixedNetworkOf14Peers() ([]p2p.Messenger, error) { return peers, nil } +func createMessengersWithNoDiscovery(numPeers int) []p2p.Messenger { + peers := make([]p2p.Messenger, numPeers) + + for i := 0; i < numPeers; i++ { + peers[i] = CreateMessengerWithNoDiscovery(context.Background()) + } + + return peers +} + func createConnections(peers []p2p.Messenger, connections map[int][]int) error { for pid, connectTo := range connections { err := connectPeerToOthers(peers, pid, connectTo) From dee3527546a0830a2e95852c406f1f68516bbc96 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 10 Dec 2019 15:17:22 +0200 Subject: [PATCH 13/35] fixed a test in worker_test.go --- consensus/spos/worker_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index be97c7c032d..b6e659251a8 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -872,7 +872,7 @@ func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { 0, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bn.MtBlockHeader])) From 2ad8457d17e8f5bce43255b9a6693d9a345c3713 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 10 Dec 2019 15:51:41 +0200 Subject: [PATCH 14/35] resolved merge conflicts --- consensus/spos/worker_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 03df09592ba..037c98a6656 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -650,7 +650,7 @@ func TestWorker_ProcessReceivedMessageWrongHeaderShouldErr(t *testing.T) { ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) time.Sleep(time.Second) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) assert.Equal(t, process.ErrRandSeedDoesNotMatch, err) } From d57b4d0da07c365b08dc94e3fee9ee91cbf52af3 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 10 Dec 2019 20:00:55 +0200 Subject: [PATCH 15/35] finished antiflooding integration --- cmd/node/config/config.toml | 7 + cmd/node/factory/structs.go | 54 +++++- cmd/node/main.go | 3 +- config/config.go | 8 + consensus/spos/worker_test.go | 5 +- integrationTests/consensus/testInitializer.go | 2 +- integrationTests/mock/nilAntifloodHandler.go | 18 ++ integrationTests/testProcessorNode.go | 4 + node/defineOptions.go | 11 ++ node/defineOptions_test.go | 24 +++ node/errors.go | 3 + node/node.go | 5 + .../throttle/antiflood/quotaFloodPreventer.go | 15 -- .../antiflood/quotaFloodPreventer_test.go | 10 -- .../printQuotaStatusHandler.go | 158 ++++++++++++++++++ 15 files changed, 296 insertions(+), 31 deletions(-) create mode 100644 integrationTests/mock/nilAntifloodHandler.go create mode 100644 statusHandler/quotaStatusHandler/printQuotaStatusHandler.go diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 149bb8f0d4a..492972ef3da 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -232,6 +232,13 @@ Size = 1000 Type = "LRU" +[Antiflood] + MaxMessagesPerPeerPerSecond = 10 + MaxTotalSizePerPeerPerSecond = 10000 + [Antiflood.Cache] + Size = 5000 + Type = "LRU" + [Logger] Path = "logs" StackTraceDepth = 2 diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 0e5413f2001..7927b229874 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -50,6 +50,7 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/ntp" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/antiflood" "github.com/ElrondNetwork/elrond-go/p2p/libp2p" factoryP2P "github.com/ElrondNetwork/elrond-go/p2p/libp2p/factory" "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" @@ -70,9 +71,11 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" processSync "github.com/ElrondNetwork/elrond-go/process/sync" + antiflood2 "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" + "github.com/ElrondNetwork/elrond-go/statusHandler/quotaStatusHandler" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" @@ -112,7 +115,8 @@ var timeSpanForBadHeaders = time.Minute * 2 // Network struct holds the network components of the Elrond protocol type Network struct { - NetMessenger p2p.Messenger + NetMessenger p2p.Messenger + AntifloodHandler consensus.P2PAntifloodHandler } // Core struct holds the core components of the Elrond protocol @@ -432,7 +436,7 @@ func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) } // NetworkComponentsFactory creates the network components -func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log logger.Logger, core *Core) (*Network, error) { +func NetworkComponentsFactory(p2pConfig *config.P2PConfig, config *config.Config, log logger.Logger, core *Core) (*Network, error) { var randReader io.Reader if p2pConfig.Node.Seed != "" { randReader = NewSeedRandReader(core.Hasher.Compute(p2pConfig.Node.Seed)) @@ -445,11 +449,51 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log logger.Logger, co return nil, err } + antifloodHandler, err := createAntifloodComponent(config) + if err != nil { + return nil, err + } + return &Network{ - NetMessenger: netMessenger, + NetMessenger: netMessenger, + AntifloodHandler: antifloodHandler, }, nil } +func createAntifloodComponent(config *config.Config) (consensus.P2PAntifloodHandler, error) { + cacheConfig := getCacherFromConfig(config.Antiflood.Cache) + antifloodCache, err := storageUnit.NewCache(cacheConfig.Type, cacheConfig.Size, cacheConfig.Shards) + if err != nil { + return nil, err + } + + maxMessagesPerPeer := config.Antiflood.MaxMessagesPerPeerPerSecond + maxTotalSizePerPeer := config.Antiflood.MaxTotalSizePerPeerPerSecond + + floodPreventer, err := antiflood2.NewQuotaFloodPreventer( + antifloodCache, + quotaStatusHandler.NewPrintQuotaStatusHandler(), + maxMessagesPerPeer, + maxTotalSizePerPeer, + ) + if err != nil { + return nil, err + } + + startResetingFloodPreventer(floodPreventer) + + return antiflood.NewP2pAntiflood(floodPreventer) +} + +func startResetingFloodPreventer(floodPreventer p2p.FloodPreventer) { + go func() { + for { + time.Sleep(time.Second) + floodPreventer.Reset() + } + }() +} + type processComponentsFactoryArgs struct { coreComponents *coreComponentsFactoryArgs genesisConfig *sharding.Genesis @@ -1474,6 +1518,7 @@ func newShardInterceptorAndResolverContainerFactory( economics, headerBlackList, headerSigVerifier, + network.AntifloodHandler, ) if err != nil { return nil, nil, nil, err @@ -1492,6 +1537,7 @@ func newShardInterceptorAndResolverContainerFactory( data.Datapool, core.Uint64ByteSliceConverter, dataPacker, + network.AntifloodHandler, ) if err != nil { return nil, nil, nil, err @@ -1531,6 +1577,7 @@ func newMetaInterceptorAndResolverContainerFactory( economics, headerBlackList, headerSigVerifier, + network.AntifloodHandler, ) if err != nil { return nil, nil, nil, err @@ -1549,6 +1596,7 @@ func newMetaInterceptorAndResolverContainerFactory( data.MetaDatapool, core.Uint64ByteSliceConverter, dataPacker, + network.AntifloodHandler, ) if err != nil { return nil, nil, nil, err diff --git a/cmd/node/main.go b/cmd/node/main.go index ab8925e590c..95f68e5d6d8 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -630,7 +630,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.LogIfError(err) log.Trace("creating network components") - networkComponents, err := factory.NetworkComponentsFactory(p2pConfig, log, coreComponents) + networkComponents, err := factory.NetworkComponentsFactory(p2pConfig, generalConfig, log, coreComponents) if err != nil { return err } @@ -1110,6 +1110,7 @@ func createNode( node.WithBootStorer(process.BootStorer), node.WithRequestedItemsHandler(requestedItemsHandler), node.WithHeaderSigVerifier(process.HeaderSigVerifier), + node.WithAntifloodHandler(network.AntifloodHandler), ) if err != nil { return nil, errors.New("error creating node: " + err.Error()) diff --git a/config/config.go b/config/config.go index 61173ccbbf4..40a172084b2 100644 --- a/config/config.go +++ b/config/config.go @@ -89,6 +89,7 @@ type Config struct { ShardHeadersDataPool CacheConfig MetaHeaderNoncesDataPool CacheConfig + Antiflood AntifloodConfig Logger LoggerConfig Address AddressConfig Hasher TypeConfig @@ -180,3 +181,10 @@ type FacadeConfig struct { PrometheusJoinURL string PrometheusJobName string } + +// AntifloodConfig will hold all p2p antiflood parameters +type AntifloodConfig struct { + Cache CacheConfig + MaxMessagesPerPeerPerSecond uint32 + MaxTotalSizePerPeerPerSecond uint64 +} diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 107fbe9cefc..1ff79a0e699 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -611,6 +611,7 @@ func TestWorker_NewWorkerNilAntifloodHandlerShouldFail(t *testing.T) { shardCoordinatorMock, singleSignerMock, syncTimerMock, + &mock.HeaderSigVerifierStub{}, nil, ) @@ -708,7 +709,9 @@ func TestWorker_ProcessReceivedMessageWrongHeaderShouldErr(t *testing.T) { shardCoordinatorMock, singleSignerMock, syncTimerMock, - headerSigVerifier) + headerSigVerifier, + createMockP2pAntifloodHandler(), + ) hdr := &block.Header{} hdr.Nonce = 1 diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 5798854f71a..2b849f19a3c 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -428,6 +428,7 @@ func createConsensusOnlyNode( node.WithBootStorer(&mock.BoostrapStorerMock{}), node.WithRequestedItemsHandler(&mock.RequestedItemsHandlerStub{}), node.WithHeaderSigVerifier(&mock.HeaderSigVerifierStub{}), + node.WithAntifloodHandler(&mock.NilAntifloodHandler{}), ) if err != nil { @@ -488,7 +489,6 @@ func createNodes( consensusType, ) - testNode.node = n testNode.node = n testNode.sk = kp.sk testNode.mesenger = mes diff --git a/integrationTests/mock/nilAntifloodHandler.go b/integrationTests/mock/nilAntifloodHandler.go new file mode 100644 index 00000000000..88d42a018a2 --- /dev/null +++ b/integrationTests/mock/nilAntifloodHandler.go @@ -0,0 +1,18 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +// NilAntifloodHandler is an empty implementation of P2PAntifloodHandler +// it does nothing +type NilAntifloodHandler struct { +} + +// CanProcessMessage will always return nil, allowing messages to go to interceptors +func (nah *NilAntifloodHandler) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (nah *NilAntifloodHandler) IsInterfaceNil() bool { + return nah == nil +} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 3857f123210..921d54ad665 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -380,6 +380,7 @@ func (tpn *TestProcessorNode) initInterceptors() { tpn.EconomicsData, tpn.BlackListHandler, tpn.HeaderSigVerifier, + &mock.NilAntifloodHandler{}, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -406,6 +407,7 @@ func (tpn *TestProcessorNode) initInterceptors() { tpn.EconomicsData, tpn.BlackListHandler, tpn.HeaderSigVerifier, + &mock.NilAntifloodHandler{}, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -427,6 +429,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.MetaDataPool, TestUint64Converter, dataPacker, + &mock.NilAntifloodHandler{}, ) tpn.ResolversContainer, _ = resolversContainerFactory.Create() @@ -450,6 +453,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.ShardDataPool, TestUint64Converter, dataPacker, + &mock.NilAntifloodHandler{}, ) tpn.ResolversContainer, _ = resolversContainerFactory.Create() diff --git a/node/defineOptions.go b/node/defineOptions.go index b74cea74bfc..1460b7d27a4 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -466,3 +466,14 @@ func WithHeaderSigVerifier(headerSigVerifier spos.RandSeedVerifier) Option { return nil } } + +// WithAntifloodHandler sets up an antiflood handler for the Node +func WithAntifloodHandler(antifloodHandler P2PAntifloodHandler) Option { + return func(n *Node) error { + if check.IfNil(antifloodHandler) { + return ErrNilAntifloodHandler + } + n.antifloodHandler = antifloodHandler + return nil + } +} diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 147da60818e..e1bfa196857 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -919,3 +919,27 @@ func TestWithRequestedItemsHandler_OkRequestedItemsHandlerShouldWork(t *testing. assert.True(t, node.requestedItemsHandler == requestedItemsHeanlder) assert.Nil(t, err) } + +func TestWithAntifloodHandler_NilAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithAntifloodHandler(nil) + err := opt(node) + + assert.Equal(t, ErrNilAntifloodHandler, err) +} + +func TestWithAntifloodHandler_OkAntifloodHandlerShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + antifloodHandler := &mock.P2PAntifloodHandlerStub{} + opt := WithAntifloodHandler(antifloodHandler) + err := opt(node) + + assert.True(t, node.antifloodHandler == antifloodHandler) + assert.Nil(t, err) +} diff --git a/node/errors.go b/node/errors.go index 802bd81024c..6168deead1d 100644 --- a/node/errors.go +++ b/node/errors.go @@ -138,3 +138,6 @@ var ErrNilBootStorer = errors.New("nil boot storer") // ErrNilHeaderSigVerifier signals that a nil header sig verifier has been provided var ErrNilHeaderSigVerifier = errors.New("nil header sig verifier") + +// ErrNilAntifloodHandler signals that a nil antiflood handler has been provided +var ErrNilAntifloodHandler = errors.New("nil antiflood handler") diff --git a/node/node.go b/node/node.go index 5edfc2a29a6..4db0d7695fc 100644 --- a/node/node.go +++ b/node/node.go @@ -110,6 +110,8 @@ type Node struct { bootStorer process.BootStorer requestedItemsHandler dataRetriever.RequestedItemsHandler headerSigVerifier spos.RandSeedVerifier + + antifloodHandler P2PAntifloodHandler } // ApplyOptions can set up different configurable options of a Node instance @@ -281,6 +283,7 @@ func (n *Node) StartConsensus() error { n.singleSigner, n.syncTimer, n.headerSigVerifier, + n.antifloodHandler, ) if err != nil { return err @@ -642,6 +645,7 @@ func (n *Node) SendBulkTransactions(txs []*transaction.Transaction) (uint64, err } func (n *Node) validateTx(tx *transaction.Transaction) error { + //TODO remove the dependency with /cmd/node txValidator, err := dataValidators.NewTxValidator(n.accounts, n.shardCoordinator, nodeCmdFactory.MaxTxNonceDeltaAllowed) if err != nil { return nil @@ -870,6 +874,7 @@ func (n *Node) StartHeartbeat(hbConfig config.HeartbeatConfig, versionNumber str heartBeatMsgProcessor, heartbeatStorer, timer, + n.antifloodHandler, ) if err != nil { return err diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 702bf08fbe4..c7d53e95024 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -12,7 +12,6 @@ import ( const minMessages = 1 const minTotalSize = 1 //1Byte const initNumMessages = 1 -const totalIdentifier = "total" type quota struct { numReceivedMessages uint32 @@ -129,7 +128,6 @@ func (qfp quotaFloodPreventer) createStatistics() { qfp.statusHandler.ResetStatistics() keys := qfp.cacher.Keys() - totalQuota := "a{} for _, k := range keys { val, ok := qfp.cacher.Get(k) if !ok { @@ -141,11 +139,6 @@ func (qfp quotaFloodPreventer) createStatistics() { continue } - totalQuota.numReceivedMessages += q.numReceivedMessages - totalQuota.sizeReceivedMessages += q.sizeReceivedMessages - totalQuota.numProcessedMessages += q.numProcessedMessages - totalQuota.sizeProcessedMessages += q.sizeProcessedMessages - qfp.statusHandler.AddQuota( string(k), q.numReceivedMessages, @@ -154,14 +147,6 @@ func (qfp quotaFloodPreventer) createStatistics() { q.sizeProcessedMessages, ) } - - qfp.statusHandler.AddQuota( - totalIdentifier, - totalQuota.numReceivedMessages, - totalQuota.sizeReceivedMessages, - totalQuota.numProcessedMessages, - totalQuota.sizeProcessedMessages, - ) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index c074cc49c9c..9a1784d1949 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -296,7 +296,6 @@ func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { resetStatisticsCalled := false quota1Compared := false quota2Compared := false - totalCompared := false qfp, _ := NewQuotaFloodPreventer( cacher, &mock.QuotaStatusHandlerStub{ @@ -319,14 +318,6 @@ func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { case string(key2): quotaToCompare = *quota2 quota2Compared = true - case totalIdentifier: - quotaToCompare = quota{ - numReceivedMessages: quota1.numReceivedMessages + quota2.numReceivedMessages, - sizeReceivedMessages: quota1.sizeReceivedMessages + quota2.sizeReceivedMessages, - numProcessedMessages: quota1.numProcessedMessages + quota2.numProcessedMessages, - sizeProcessedMessages: quota1.sizeProcessedMessages + quota2.sizeProcessedMessages, - } - totalCompared = true default: assert.Fail(t, fmt.Sprintf("unknown identifier %s", identifier)) } @@ -343,7 +334,6 @@ func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { assert.True(t, resetStatisticsCalled) assert.True(t, quota1Compared) assert.True(t, quota2Compared) - assert.True(t, totalCompared) } func TestCountersMap_IncrementAndResetShouldWorkConcurrently(t *testing.T) { diff --git a/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go b/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go new file mode 100644 index 00000000000..05b692f2040 --- /dev/null +++ b/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go @@ -0,0 +1,158 @@ +package quotaStatusHandler + +import ( + "math" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/logger" +) + +var log = logger.GetOrCreate("statushandler/quotastatushandler") + +type quota struct { + numReceivedMessages uint32 + sizeReceivedMessages uint64 + numProcessedMessages uint32 + sizeProcessedMessages uint64 +} + +//TODO replace this structure with a new one that can output on a route the statistics measured +//TODO add tests +// printQuotaStatusHandler implements process.QuotaStatusHandler and is able to periodically print peer connection statistics +type printQuotaStatusHandler struct { + mutStatistics sync.Mutex + statistics map[string]*quota +} + +// NewPrintQuotaStatusHandler creates a new NewPrintQuotaStatusHandler instance +func NewPrintQuotaStatusHandler() *printQuotaStatusHandler { + return &printQuotaStatusHandler{ + statistics: make(map[string]*quota), + } +} + +// ResetStatistics output gathered statistics, process and prints them. After that it empties the statistics map +func (pqsh *printQuotaStatusHandler) ResetStatistics() { + minQuota := "a{ + numReceivedMessages: math.MaxUint32, + sizeReceivedMessages: math.MaxUint64, + numProcessedMessages: math.MaxUint32, + sizeProcessedMessages: math.MaxUint64, + } + avgQuota := "a{} + maxQuota := "a{} + + pqsh.mutStatistics.Lock() + defer pqsh.mutStatistics.Unlock() + + numStatistics := len(pqsh.statistics) + + if numStatistics == 0 { + log.Trace("empty quota statistics") + return + } + + for name, q := range pqsh.statistics { + avgQuota.numReceivedMessages += q.numReceivedMessages + avgQuota.sizeReceivedMessages += q.sizeReceivedMessages + avgQuota.numProcessedMessages += q.numProcessedMessages + avgQuota.sizeProcessedMessages += q.sizeProcessedMessages + + minQuota.numReceivedMessages = pickMinUint32(minQuota.numReceivedMessages, q.numReceivedMessages) + minQuota.sizeReceivedMessages = pickMinUint64(minQuota.sizeReceivedMessages, q.sizeReceivedMessages) + minQuota.numProcessedMessages = pickMinUint32(minQuota.numProcessedMessages, q.numProcessedMessages) + minQuota.sizeProcessedMessages = pickMinUint64(minQuota.sizeProcessedMessages, q.sizeProcessedMessages) + + maxQuota.numReceivedMessages = pickMaxUint32(maxQuota.numReceivedMessages, q.numReceivedMessages) + maxQuota.sizeReceivedMessages = pickMaxUint64(maxQuota.sizeReceivedMessages, q.sizeReceivedMessages) + maxQuota.numProcessedMessages = pickMaxUint32(maxQuota.numProcessedMessages, q.numProcessedMessages) + maxQuota.sizeProcessedMessages = pickMaxUint64(maxQuota.sizeProcessedMessages, q.sizeProcessedMessages) + log.Trace("peer quota statistics", + "peer", name, + "num received msg", q.numReceivedMessages, + "size received", core.ConvertBytes(q.sizeReceivedMessages), + "num processed msg", q.numProcessedMessages, + "size processed", core.ConvertBytes(q.sizeProcessedMessages), + ) + } + + log.Trace("quota statistics", "num peers", numStatistics) + log.Trace("min quota statistics / peer", + "num received msg", minQuota.numReceivedMessages, + "size received", core.ConvertBytes(minQuota.sizeReceivedMessages), + "num processed msg", minQuota.numProcessedMessages, + "size processed", core.ConvertBytes(minQuota.sizeProcessedMessages), + ) + log.Trace("avg quota statistics / peer", + "num received msg", avgQuota.numReceivedMessages/uint32(numStatistics), + "size received", core.ConvertBytes(avgQuota.sizeReceivedMessages/uint64(numStatistics)), + "num processed msg", avgQuota.numProcessedMessages/uint32(numStatistics), + "size processed", core.ConvertBytes(avgQuota.sizeProcessedMessages/uint64(numStatistics)), + ) + log.Trace("max quota statistics / peer", + "num received msg", maxQuota.numReceivedMessages, + "size received", core.ConvertBytes(maxQuota.sizeReceivedMessages), + "num processed msg", maxQuota.numProcessedMessages, + "size processed", core.ConvertBytes(maxQuota.sizeProcessedMessages), + ) + log.Trace("total quota statistics / network", + "num received msg", avgQuota.numReceivedMessages, + "size received", core.ConvertBytes(avgQuota.sizeReceivedMessages), + "num processed msg", avgQuota.numProcessedMessages, + "size processed", core.ConvertBytes(avgQuota.sizeProcessedMessages), + ) +} + +func pickMinUint32(val1 uint32, val2 uint32) uint32 { + if val1 > val2 { + return val2 + } + + return val1 +} + +func pickMinUint64(val1 uint64, val2 uint64) uint64 { + if val1 > val2 { + return val2 + } + + return val1 +} + +func pickMaxUint32(val1 uint32, val2 uint32) uint32 { + if val1 < val2 { + return val2 + } + + return val1 +} + +func pickMaxUint64(val1 uint64, val2 uint64) uint64 { + if val1 < val2 { + return val2 + } + + return val1 +} + +// AddQuota adds a quota statistics +func (pqsh *printQuotaStatusHandler) AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, + numProcessedMessages uint32, sizeProcessedMessages uint64) { + + q := "a{ + numReceivedMessages: numReceivedMessages, + sizeReceivedMessages: sizeReceivedMessages, + numProcessedMessages: numProcessedMessages, + sizeProcessedMessages: sizeProcessedMessages, + } + + pqsh.mutStatistics.Lock() + pqsh.statistics[identifier] = q + pqsh.mutStatistics.Unlock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pqsh *printQuotaStatusHandler) IsInterfaceNil() bool { + return pqsh == nil +} From f0cbfccb2ea1c77dca5e408957b19429b5bad6c1 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Fri, 13 Dec 2019 20:36:57 +0200 Subject: [PATCH 16/35] minor code changes, fix after review --- cmd/node/config/config.toml | 4 +- cmd/node/factory/structs.go | 16 ++-- config/config.go | 6 +- .../genericBlockBodyResolver_test.go | 6 +- dataRetriever/resolvers/headerResolver.go | 2 +- .../resolvers/headerResolver_test.go | 6 +- .../resolvers/transactionResolver_test.go | 6 +- .../mock/p2pAntifloodHandlerStub.go | 15 ++++ integrationTests/node/heartbeat_test.go | 5 ++ .../p2p/antiflood/nilQuotaStatusHandler.go | 3 +- node/node_test.go | 13 ++++ p2p/antiflood/p2pAntiflood_test.go | 8 +- p2p/mock/p2pMessageMock.go | 5 +- .../interceptorsContainerFactory_test.go | 1 + .../interceptorsContainerFactory_test.go | 1 + process/interceptors/common_test.go | 6 +- process/mock/quotaStatusHandlerStub.go | 12 ++- process/throttle/antiflood/interface.go | 4 +- .../printQuotaStatusHandler.go | 78 ++++++------------- 19 files changed, 97 insertions(+), 100 deletions(-) create mode 100644 integrationTests/mock/p2pAntifloodHandlerStub.go diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 492972ef3da..94699559f46 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -233,8 +233,8 @@ Type = "LRU" [Antiflood] - MaxMessagesPerPeerPerSecond = 10 - MaxTotalSizePerPeerPerSecond = 10000 + PeerMaxMessagesPerSecond = 10 + PeerMaxTotalSizePerSecond = 10000 [Antiflood.Cache] Size = 5000 Type = "LRU" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7927b229874..2d7e38a5c8e 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -71,7 +71,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" processSync "github.com/ElrondNetwork/elrond-go/process/sync" - antiflood2 "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" + antifloodThrottle "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -436,7 +436,7 @@ func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) } // NetworkComponentsFactory creates the network components -func NetworkComponentsFactory(p2pConfig *config.P2PConfig, config *config.Config, log logger.Logger, core *Core) (*Network, error) { +func NetworkComponentsFactory(p2pConfig *config.P2PConfig, mainConfig *config.Config, log logger.Logger, core *Core) (*Network, error) { var randReader io.Reader if p2pConfig.Node.Seed != "" { randReader = NewSeedRandReader(core.Hasher.Compute(p2pConfig.Node.Seed)) @@ -449,7 +449,7 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, config *config.Config return nil, err } - antifloodHandler, err := createAntifloodComponent(config) + antifloodHandler, err := createAntifloodComponent(mainConfig) if err != nil { return nil, err } @@ -460,17 +460,17 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, config *config.Config }, nil } -func createAntifloodComponent(config *config.Config) (consensus.P2PAntifloodHandler, error) { - cacheConfig := getCacherFromConfig(config.Antiflood.Cache) +func createAntifloodComponent(mainConfig *config.Config) (consensus.P2PAntifloodHandler, error) { + cacheConfig := getCacherFromConfig(mainConfig.Antiflood.Cache) antifloodCache, err := storageUnit.NewCache(cacheConfig.Type, cacheConfig.Size, cacheConfig.Shards) if err != nil { return nil, err } - maxMessagesPerPeer := config.Antiflood.MaxMessagesPerPeerPerSecond - maxTotalSizePerPeer := config.Antiflood.MaxTotalSizePerPeerPerSecond + maxMessagesPerPeer := mainConfig.Antiflood.PeerMaxMessagesPerSecond + maxTotalSizePerPeer := mainConfig.Antiflood.PeerMaxTotalSizePerSecond - floodPreventer, err := antiflood2.NewQuotaFloodPreventer( + floodPreventer, err := antifloodThrottle.NewQuotaFloodPreventer( antifloodCache, quotaStatusHandler.NewPrintQuotaStatusHandler(), maxMessagesPerPeer, diff --git a/config/config.go b/config/config.go index 40a172084b2..0a9bb73981e 100644 --- a/config/config.go +++ b/config/config.go @@ -184,7 +184,7 @@ type FacadeConfig struct { // AntifloodConfig will hold all p2p antiflood parameters type AntifloodConfig struct { - Cache CacheConfig - MaxMessagesPerPeerPerSecond uint32 - MaxTotalSizePerPeerPerSecond uint64 + Cache CacheConfig + PeerMaxMessagesPerSecond uint32 + PeerMaxTotalSizePerSecond uint64 } diff --git a/dataRetriever/resolvers/genericBlockBodyResolver_test.go b/dataRetriever/resolvers/genericBlockBodyResolver_test.go index 9d992826c73..f174185875d 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver_test.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver_test.go @@ -120,7 +120,7 @@ func TestNewGenericBlockBodyResolver_OkValsShouldWork(t *testing.T) { func TestNewGenericBlockBodyResolver_ProcessReceivedAntifloodErrorsShouldErr(t *testing.T) { t.Parallel() - errExpected := errors.New("expected error") + expectedErr := errors.New("expected error") gbbRes, _ := resolvers.NewGenericBlockBodyResolver( &mock.TopicResolverSenderStub{}, &mock.CacherStub{}, @@ -128,13 +128,13 @@ func TestNewGenericBlockBodyResolver_ProcessReceivedAntifloodErrorsShouldErr(t * &mock.MarshalizerMock{}, &mock.P2PAntifloodHandlerStub{ CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { - return errExpected + return expectedErr }, }, ) err := gbbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) - assert.Equal(t, errExpected, err) + assert.Equal(t, expectedErr, err) } func TestNewGenericBlockBodyResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 78ac10b1f3a..f126b4bb3cc 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -45,7 +45,7 @@ func NewHeaderResolver( if check.IfNil(headersNonces) { return nil, dataRetriever.ErrNilHeadersNoncesDataPool } - if hdrStorage == nil || hdrStorage.IsInterfaceNil() { + if check.IfNil(hdrStorage) { return nil, dataRetriever.ErrNilHeadersStorage } if check.IfNil(headersNoncesStorage) { diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 3ce1c19048a..f7412959f57 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -182,7 +182,7 @@ func TestNewHeaderResolver_OkValsShouldWork(t *testing.T) { func TestHeaderResolver_ProcessReceivedAntifloodErrorsShouldErr(t *testing.T) { t.Parallel() - errExpected := errors.New("expected error") + expectedErr := errors.New("expected error") hdrRes, _ := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, &mock.CacherStub{}, @@ -193,13 +193,13 @@ func TestHeaderResolver_ProcessReceivedAntifloodErrorsShouldErr(t *testing.T) { mock.NewNonceHashConverterMock(), &mock.P2PAntifloodHandlerStub{ CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { - return errExpected + return expectedErr }, }, ) err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) - assert.Equal(t, errExpected, err) + assert.Equal(t, expectedErr, err) } func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index 36b6f23a1a4..b3c0f09bf21 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -140,7 +140,7 @@ func TestNewTxResolver_OkValsShouldWork(t *testing.T) { func TestTxResolver_ProcessReceivedMessageAntifloodHandlerErrorsShouldErr(t *testing.T) { t.Parallel() - errExpected := errors.New("expected error") + expectedErr := errors.New("expected error") txRes, _ := resolvers.NewTxResolver( &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, @@ -149,14 +149,14 @@ func TestTxResolver_ProcessReceivedMessageAntifloodHandlerErrorsShouldErr(t *tes &mock.DataPackerStub{}, &mock.P2PAntifloodHandlerStub{ CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { - return errExpected + return expectedErr }, }, ) err := txRes.ProcessReceivedMessage(nil, connectedPeerId) - assert.Equal(t, errExpected, err) + assert.Equal(t, expectedErr, err) } func TestTxResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { diff --git a/integrationTests/mock/p2pAntifloodHandlerStub.go b/integrationTests/mock/p2pAntifloodHandlerStub.go new file mode 100644 index 00000000000..e8236ed9167 --- /dev/null +++ b/integrationTests/mock/p2pAntifloodHandlerStub.go @@ -0,0 +1,15 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +type P2PAntifloodHandlerStub struct { + CanProcessMessageCalled func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error +} + +func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) +} + +func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return p2pahs == nil +} diff --git a/integrationTests/node/heartbeat_test.go b/integrationTests/node/heartbeat_test.go index 78a487502fc..9dfbf2f942a 100644 --- a/integrationTests/node/heartbeat_test.go +++ b/integrationTests/node/heartbeat_test.go @@ -238,6 +238,11 @@ func createMonitor(maxDurationPeerUnresponsive time.Duration) *heartbeat.Monitor }, }, &heartbeat.RealTimer{}, + &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + }, ) return monitor diff --git a/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go b/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go index 1987df1c4b9..9e68cd794b7 100644 --- a/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go +++ b/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go @@ -8,8 +8,7 @@ func (nqsh *nilQuotaStatusHandler) ResetStatistics() { } // AddQuota is not implemented -func (nqsh *nilQuotaStatusHandler) AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, - numProcessedMessages uint32, sizeProcessedMessages uint64) { +func (nqsh *nilQuotaStatusHandler) AddQuota(_ string, _ uint32, _ uint64, _ uint32, _ uint64) { } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/node_test.go b/node/node_test.go index 125071d5479..a09004a3bbb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -1181,6 +1181,7 @@ func TestNode_StartHeartbeatRegisterMessageProcessorFailsShouldErr(t *testing.T) return mock.NewStorerMock() }, }), + node.WithAntifloodHandler(&mock.P2PAntifloodHandlerStub{}), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1243,6 +1244,11 @@ func TestNode_StartHeartbeatShouldWorkAndCallSendHeartbeat(t *testing.T) { return mock.NewStorerMock() }, }), + node.WithAntifloodHandler(&mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1301,6 +1307,7 @@ func TestNode_StartHeartbeatShouldWorkAndHaveAllPublicKeys(t *testing.T) { return mock.NewStorerMock() }, }), + node.WithAntifloodHandler(&mock.P2PAntifloodHandlerStub{}), ) err := n.StartHeartbeat(config.HeartbeatConfig{ @@ -1360,6 +1367,7 @@ func TestNode_StartHeartbeatShouldSetNodesFromInitialPubKeysAsValidators(t *test return mock.NewStorerMock() }, }), + node.WithAntifloodHandler(&mock.P2PAntifloodHandlerStub{}), ) err := n.StartHeartbeat(config.HeartbeatConfig{ @@ -1424,6 +1432,11 @@ func TestNode_StartHeartbeatShouldWorkAndCanCallProcessMessage(t *testing.T) { return mock.NewStorerMock() }, }), + node.WithAntifloodHandler(&mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { + return nil + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ diff --git a/p2p/antiflood/p2pAntiflood_test.go b/p2p/antiflood/p2pAntiflood_test.go index 9b420b78972..a59def23e02 100644 --- a/p2p/antiflood/p2pAntiflood_test.go +++ b/p2p/antiflood/p2pAntiflood_test.go @@ -15,7 +15,6 @@ func TestNewP2pAntiflood_NilFloodPreventerShouldErr(t *testing.T) { t.Parallel() afm, err := antiflood.NewP2pAntiflood(nil) - assert.True(t, check.IfNil(afm)) assert.True(t, errors.Is(err, p2p.ErrNilFloodPreventer)) } @@ -33,8 +32,8 @@ func TestP2pAntiflood_SettingInnerFloodPreventerToNil(t *testing.T) { t.Parallel() afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{}) - afm.FloodPreventer = nil + afm.FloodPreventer = nil assert.True(t, check.IfNil(afm)) } @@ -47,7 +46,6 @@ func TestP2pAntiflood_CanProcessMessageNilFloodPreventerShouldError(t *testing.T afm.FloodPreventer = nil err := afm.CanProcessMessage(&mock.P2PMessageMock{}, "connected peer") - assert.Equal(t, p2p.ErrNilFloodPreventer, err) } @@ -57,7 +55,6 @@ func TestP2pAntiflood_CanProcessMessageNilMessageShouldError(t *testing.T) { afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{}) err := afm.CanProcessMessage(nil, "connected peer") - assert.Equal(t, p2p.ErrNilMessage, err) } @@ -81,7 +78,6 @@ func TestP2pAntiflood_CanNotIncrementFromConnectedPeerShouldError(t *testing.T) }) err := afm.CanProcessMessage(message, fromConnectedPeer) - assert.True(t, errors.Is(err, p2p.ErrSystemBusy)) } @@ -109,7 +105,6 @@ func TestP2pAntiflood_CanNotIncrementMessageOriginatorShouldError(t *testing.T) }) err := afm.CanProcessMessage(message, fromConnectedPeer) - assert.True(t, errors.Is(err, p2p.ErrSystemBusy)) } @@ -129,6 +124,5 @@ func TestP2pAntiflood_ShouldWork(t *testing.T) { }) err := afm.CanProcessMessage(message, fromConnectedPeer) - assert.Nil(t, err) } diff --git a/p2p/mock/p2pMessageMock.go b/p2p/mock/p2pMessageMock.go index 9c9cfe3ef9a..e859878e6fb 100644 --- a/p2p/mock/p2pMessageMock.go +++ b/p2p/mock/p2pMessageMock.go @@ -44,8 +44,5 @@ func (msg *P2PMessageMock) Peer() p2p.PeerID { // IsInterfaceNil returns true if there is no value under the interface func (msg *P2PMessageMock) IsInterfaceNil() bool { - if msg == nil { - return true - } - return false + return msg == nil } diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index 9a6f3ab4b7b..6d74cc62eed 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -508,6 +508,7 @@ func TestNewInterceptorsContainerFactory_NilAntifloodHandlerShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, nil, ) diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index a61e700d915..7796049b1c9 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -512,6 +512,7 @@ func TestNewInterceptorsContainerFactory_NilAntifloodHandlerShouldErr(t *testing maxTxNonceDeltaAllowed, &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, nil, ) diff --git a/process/interceptors/common_test.go b/process/interceptors/common_test.go index 40119c619cd..40cbd97ec20 100644 --- a/process/interceptors/common_test.go +++ b/process/interceptors/common_test.go @@ -44,16 +44,16 @@ func TestPreProcessMessage_AntifloodCanNotProcessShouldErr(t *testing.T) { return false }, } - errExpected := errors.New("expected error") + expectedErr := errors.New("expected error") antifloodHandler := &mock.P2PAntifloodHandlerStub{ CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { - return errExpected + return expectedErr }, } err := preProcessMesage(throttler, antifloodHandler, msg, fromConnectedPeer) - assert.Equal(t, errExpected, err) + assert.Equal(t, expectedErr, err) } func TestPreProcessMessage_ThrottlerCanNotProcessShouldErr(t *testing.T) { diff --git a/process/mock/quotaStatusHandlerStub.go b/process/mock/quotaStatusHandlerStub.go index 5b731068ca3..55c4bdbae75 100644 --- a/process/mock/quotaStatusHandlerStub.go +++ b/process/mock/quotaStatusHandlerStub.go @@ -10,10 +10,14 @@ func (qshs *QuotaStatusHandlerStub) ResetStatistics() { qshs.ResetStatisticsCalled() } -func (qshs *QuotaStatusHandlerStub) AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, - numProcessedMessages uint32, sizeProcessedMessages uint64) { - - qshs.AddQuotaCalled(identifier, numReceivedMessages, sizeReceivedMessages, numProcessedMessages, sizeProcessedMessages) +func (qshs *QuotaStatusHandlerStub) AddQuota( + identifier string, + numReceived uint32, + sizeReceived uint64, + numProcessed uint32, + sizeProcessed uint64, +) { + qshs.AddQuotaCalled(identifier, numReceived, sizeReceived, numProcessed, sizeProcessed) } func (qshs *QuotaStatusHandlerStub) IsInterfaceNil() bool { diff --git a/process/throttle/antiflood/interface.go b/process/throttle/antiflood/interface.go index 1860945e53e..9e7b14355d9 100644 --- a/process/throttle/antiflood/interface.go +++ b/process/throttle/antiflood/interface.go @@ -4,7 +4,7 @@ package antiflood // by the system type QuotaStatusHandler interface { ResetStatistics() - AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, - numProcessedMessages uint32, sizeProcessedMessages uint64) + AddQuota(identifier string, numReceived uint32, sizeReceived uint64, + numProcessed uint32, sizeProcessed uint64) IsInterfaceNil() bool } diff --git a/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go b/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go index 05b692f2040..155c7cc1d03 100644 --- a/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go +++ b/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go @@ -40,7 +40,7 @@ func (pqsh *printQuotaStatusHandler) ResetStatistics() { numProcessedMessages: math.MaxUint32, sizeProcessedMessages: math.MaxUint64, } - avgQuota := "a{} + sumQuota := "a{} maxQuota := "a{} pqsh.mutStatistics.Lock() @@ -54,20 +54,20 @@ func (pqsh *printQuotaStatusHandler) ResetStatistics() { } for name, q := range pqsh.statistics { - avgQuota.numReceivedMessages += q.numReceivedMessages - avgQuota.sizeReceivedMessages += q.sizeReceivedMessages - avgQuota.numProcessedMessages += q.numProcessedMessages - avgQuota.sizeProcessedMessages += q.sizeProcessedMessages - - minQuota.numReceivedMessages = pickMinUint32(minQuota.numReceivedMessages, q.numReceivedMessages) - minQuota.sizeReceivedMessages = pickMinUint64(minQuota.sizeReceivedMessages, q.sizeReceivedMessages) - minQuota.numProcessedMessages = pickMinUint32(minQuota.numProcessedMessages, q.numProcessedMessages) - minQuota.sizeProcessedMessages = pickMinUint64(minQuota.sizeProcessedMessages, q.sizeProcessedMessages) - - maxQuota.numReceivedMessages = pickMaxUint32(maxQuota.numReceivedMessages, q.numReceivedMessages) - maxQuota.sizeReceivedMessages = pickMaxUint64(maxQuota.sizeReceivedMessages, q.sizeReceivedMessages) - maxQuota.numProcessedMessages = pickMaxUint32(maxQuota.numProcessedMessages, q.numProcessedMessages) - maxQuota.sizeProcessedMessages = pickMaxUint64(maxQuota.sizeProcessedMessages, q.sizeProcessedMessages) + sumQuota.numReceivedMessages += q.numReceivedMessages + sumQuota.sizeReceivedMessages += q.sizeReceivedMessages + sumQuota.numProcessedMessages += q.numProcessedMessages + sumQuota.sizeProcessedMessages += q.sizeProcessedMessages + + minQuota.numReceivedMessages = core.MinUint32(minQuota.numReceivedMessages, q.numReceivedMessages) + minQuota.sizeReceivedMessages = core.MinUint64(minQuota.sizeReceivedMessages, q.sizeReceivedMessages) + minQuota.numProcessedMessages = core.MinUint32(minQuota.numProcessedMessages, q.numProcessedMessages) + minQuota.sizeProcessedMessages = core.MinUint64(minQuota.sizeProcessedMessages, q.sizeProcessedMessages) + + maxQuota.numReceivedMessages = core.MaxUint32(maxQuota.numReceivedMessages, q.numReceivedMessages) + maxQuota.sizeReceivedMessages = core.MaxUint64(maxQuota.sizeReceivedMessages, q.sizeReceivedMessages) + maxQuota.numProcessedMessages = core.MaxUint32(maxQuota.numProcessedMessages, q.numProcessedMessages) + maxQuota.sizeProcessedMessages = core.MaxUint64(maxQuota.sizeProcessedMessages, q.sizeProcessedMessages) log.Trace("peer quota statistics", "peer", name, "num received msg", q.numReceivedMessages, @@ -85,10 +85,10 @@ func (pqsh *printQuotaStatusHandler) ResetStatistics() { "size processed", core.ConvertBytes(minQuota.sizeProcessedMessages), ) log.Trace("avg quota statistics / peer", - "num received msg", avgQuota.numReceivedMessages/uint32(numStatistics), - "size received", core.ConvertBytes(avgQuota.sizeReceivedMessages/uint64(numStatistics)), - "num processed msg", avgQuota.numProcessedMessages/uint32(numStatistics), - "size processed", core.ConvertBytes(avgQuota.sizeProcessedMessages/uint64(numStatistics)), + "num received msg", sumQuota.numReceivedMessages/uint32(numStatistics), + "size received", core.ConvertBytes(sumQuota.sizeReceivedMessages/uint64(numStatistics)), + "num processed msg", sumQuota.numProcessedMessages/uint32(numStatistics), + "size processed", core.ConvertBytes(sumQuota.sizeProcessedMessages/uint64(numStatistics)), ) log.Trace("max quota statistics / peer", "num received msg", maxQuota.numReceivedMessages, @@ -97,45 +97,13 @@ func (pqsh *printQuotaStatusHandler) ResetStatistics() { "size processed", core.ConvertBytes(maxQuota.sizeProcessedMessages), ) log.Trace("total quota statistics / network", - "num received msg", avgQuota.numReceivedMessages, - "size received", core.ConvertBytes(avgQuota.sizeReceivedMessages), - "num processed msg", avgQuota.numProcessedMessages, - "size processed", core.ConvertBytes(avgQuota.sizeProcessedMessages), + "num received msg", sumQuota.numReceivedMessages, + "size received", core.ConvertBytes(sumQuota.sizeReceivedMessages), + "num processed msg", sumQuota.numProcessedMessages, + "size processed", core.ConvertBytes(sumQuota.sizeProcessedMessages), ) } -func pickMinUint32(val1 uint32, val2 uint32) uint32 { - if val1 > val2 { - return val2 - } - - return val1 -} - -func pickMinUint64(val1 uint64, val2 uint64) uint64 { - if val1 > val2 { - return val2 - } - - return val1 -} - -func pickMaxUint32(val1 uint32, val2 uint32) uint32 { - if val1 < val2 { - return val2 - } - - return val1 -} - -func pickMaxUint64(val1 uint64, val2 uint64) uint64 { - if val1 < val2 { - return val2 - } - - return val1 -} - // AddQuota adds a quota statistics func (pqsh *printQuotaStatusHandler) AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, numProcessedMessages uint32, sizeProcessedMessages uint64) { From 545cf7bbe4bf35ed0b7f91b7870b7e30fc5258d5 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Fri, 13 Dec 2019 21:20:23 +0200 Subject: [PATCH 17/35] - fix golangbot issue: copylocks: createStatistics passes lock by value: quotaFloodPreventer contains sync.RWMutex func (qfp quotaFloodPreventer) createStatistics() - removed unnecessary parameter in qfp.putDefaultQuota --- process/throttle/antiflood/quotaFloodPreventer.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index c7d53e95024..70fa69f5fad 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -22,7 +22,7 @@ type quota struct { // quotaFloodPreventer represents a cache of quotas per peer used in antiflooding mechanism type quotaFloodPreventer struct { - mutOperation sync.RWMutex + mutOperation *sync.RWMutex cacher storage.Cacher statusHandler QuotaStatusHandler maxMessages uint32 @@ -59,6 +59,7 @@ func NewQuotaFloodPreventer( } return "aFloodPreventer{ + mutOperation: &sync.RWMutex{}, cacher: cacher, statusHandler: statusHandler, maxMessages: maxMessagesPerPeer, @@ -76,14 +77,14 @@ func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { valueQuota, ok := qfp.cacher.Get([]byte(identifier)) if !ok { - qfp.putDefaultQuota(qfp.cacher, identifier, size) + qfp.putDefaultQuota(identifier, size) return true } q, isQuota := valueQuota.(*quota) if !isQuota { - qfp.putDefaultQuota(qfp.cacher, identifier, size) + qfp.putDefaultQuota(identifier, size) return true } @@ -102,7 +103,7 @@ func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { return false } -func (qfp *quotaFloodPreventer) putDefaultQuota(cacher storage.Cacher, identifier string, size uint64) { +func (qfp *quotaFloodPreventer) putDefaultQuota(identifier string, size uint64) { q := "a{ numReceivedMessages: initNumMessages, sizeReceivedMessages: size, From 63165c0bc690773e5ba0ccff6eaf95a844641eed Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 16 Dec 2019 14:52:00 +0200 Subject: [PATCH 18/35] created p2p quota processor for antiflooding metrics --- cmd/node/factory/structs.go | 13 +- core/errors.go | 3 - core/p2pConstants.go | 26 +++ integrationTests/testProcessorNode.go | 2 +- statusHandler/errors.go | 3 + statusHandler/mock/appStatusHandlerMock.go | 99 ++++++++ statusHandler/p2pQuota/export_test.go | 33 +++ statusHandler/p2pQuota/p2pQuotaProcessor.go | 135 +++++++++++ .../p2pQuota/p2pQuotaProcessor_test.go | 212 ++++++++++++++++++ .../printQuotaStatusHandler.go | 126 ----------- 10 files changed, 518 insertions(+), 134 deletions(-) create mode 100644 core/p2pConstants.go create mode 100644 statusHandler/mock/appStatusHandlerMock.go create mode 100644 statusHandler/p2pQuota/export_test.go create mode 100644 statusHandler/p2pQuota/p2pQuotaProcessor.go create mode 100644 statusHandler/p2pQuota/p2pQuotaProcessor_test.go delete mode 100644 statusHandler/quotaStatusHandler/printQuotaStatusHandler.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 2d7e38a5c8e..5aac372bed6 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -75,7 +75,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" - "github.com/ElrondNetwork/elrond-go/statusHandler/quotaStatusHandler" + "github.com/ElrondNetwork/elrond-go/statusHandler/p2pQuota" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" @@ -449,7 +449,7 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, mainConfig *config.Co return nil, err } - antifloodHandler, err := createAntifloodComponent(mainConfig) + antifloodHandler, err := createAntifloodComponent(mainConfig, core.StatusHandler) if err != nil { return nil, err } @@ -460,7 +460,7 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, mainConfig *config.Co }, nil } -func createAntifloodComponent(mainConfig *config.Config) (consensus.P2PAntifloodHandler, error) { +func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHandler) (consensus.P2PAntifloodHandler, error) { cacheConfig := getCacherFromConfig(mainConfig.Antiflood.Cache) antifloodCache, err := storageUnit.NewCache(cacheConfig.Type, cacheConfig.Size, cacheConfig.Shards) if err != nil { @@ -470,9 +470,14 @@ func createAntifloodComponent(mainConfig *config.Config) (consensus.P2PAntiflood maxMessagesPerPeer := mainConfig.Antiflood.PeerMaxMessagesPerSecond maxTotalSizePerPeer := mainConfig.Antiflood.PeerMaxTotalSizePerSecond + quotaProcessor, err := p2pQuota.NewP2pQuotaProcessor(status) + if err != nil { + return nil, err + } + floodPreventer, err := antifloodThrottle.NewQuotaFloodPreventer( antifloodCache, - quotaStatusHandler.NewPrintQuotaStatusHandler(), + quotaProcessor, maxMessagesPerPeer, maxTotalSizePerPeer, ) diff --git a/core/errors.go b/core/errors.go index 808cd09988e..9d1feb17ddf 100644 --- a/core/errors.go +++ b/core/errors.go @@ -13,9 +13,6 @@ var ErrNilHasher = errors.New("nil hasher provided") // ErrNilCoordinator signals that a nil shardCoordinator has been provided var ErrNilCoordinator = errors.New("nil coordinator provided") -// ErrNilLogger signals that a nil logger has been provided -var ErrNilLogger = errors.New("nil logger provided") - // ErrInvalidValue signals that a nil value has been provided var ErrInvalidValue = errors.New("invalid value provided") diff --git a/core/p2pConstants.go b/core/p2pConstants.go new file mode 100644 index 00000000000..afe4088525b --- /dev/null +++ b/core/p2pConstants.go @@ -0,0 +1,26 @@ +package core + +//TODO comment these + +const MetricP2pSumNumReceivedMessages = "erd_p2p_sum_num_received_messages" +const MetricP2pSumSizeReceivedMessages = "erd_p2p_sum_size_received_messages" +const MetricP2pSumNumProcessedMessages = "erd_p2p_sum_num_processed_messages" +const MetricP2pSumSizeProcessedMessages = "erd_p2p_sum_size_processed_messages" + +const MetricP2pTopSumNumReceivedMessages = "erd_p2p_top_sum_num_received_messages" +const MetricP2pTopSumSizeReceivedMessages = "erd_p2p_top_sum_size_received_messages" +const MetricP2pTopSumNumProcessedMessages = "erd_p2p_top_sum_num_processed_messages" +const MetricP2pTopSumSizeProcessedMessages = "erd_p2p_top_sum_size_processed_messages" + +const MetricP2pMaxNumReceivedMessages = "erd_p2p_max_num_received_messages" +const MetricP2pMaxSizeReceivedMessages = "erd_p2p_max_size_received_messages" +const MetricP2pMaxNumProcessedMessages = "erd_p2p_max_num_processed_messages" +const MetricP2pMaxSizeProcessedMessages = "erd_p2p_max_size_processed_messages" + +const MetricP2pTopMaxNumReceivedMessages = "erd_p2p_top_max_num_received_messages" +const MetricP2pTopMaxSizeReceivedMessages = "erd_p2p_top_max_size_received_messages" +const MetricP2pTopMaxNumProcessedMessages = "erd_p2p_top_max_num_processed_messages" +const MetricP2pTopMaxSizeProcessedMessages = "erd_p2p_top_max_size_processed_messages" + +const MetricP2pNumReceiverPeers = "erd_p2p_num_receiver_peers" +const MetricP2pTopNumReceiverPeers = "erd_p2p_top_num_receiver_peers" diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 921d54ad665..b4aa4b03776 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -97,7 +97,7 @@ const OpGasValueForMockVm = uint64(50) var TimeSpanForBadHeaders = time.Second * 30 // roundDuration defines the duration of the round -const roundDuration = time.Duration(5 * time.Second) +const roundDuration = 5 * time.Second // TestKeyPair holds a pair of private/public Keys type TestKeyPair struct { diff --git a/statusHandler/errors.go b/statusHandler/errors.go index e7a0feb1be5..49c4cddb694 100644 --- a/statusHandler/errors.go +++ b/statusHandler/errors.go @@ -13,3 +13,6 @@ var ErrNilPresenterInterface = errors.New("nil presenter interface") // ErrNilGrid will be returned when a nil grid is returned var ErrNilGrid = errors.New("nil grid") + +// ErrNilAppStatusHandler signals that a nil status handler has been provided +var ErrNilAppStatusHandler = errors.New("appStatusHandler is nil") diff --git a/statusHandler/mock/appStatusHandlerMock.go b/statusHandler/mock/appStatusHandlerMock.go new file mode 100644 index 00000000000..41aa121e52d --- /dev/null +++ b/statusHandler/mock/appStatusHandlerMock.go @@ -0,0 +1,99 @@ +package mock + +import "sync" + +type AppStatusHandlerMock struct { + mut sync.Mutex + data map[string]interface{} +} + +func NewAppStatusHandlerMock() *AppStatusHandlerMock { + return &AppStatusHandlerMock{ + data: make(map[string]interface{}), + } +} + +func (ashm *AppStatusHandlerMock) Increment(key string) { + ashm.mut.Lock() + defer ashm.mut.Unlock() + + v, ok := ashm.data[key] + if !ok { + return + } + + vUint64, ok := v.(uint64) + if !ok { + return + } + + ashm.data[key] = vUint64 + 1 +} + +func (ashm *AppStatusHandlerMock) AddUint64(key string, val uint64) { + ashm.mut.Lock() + defer ashm.mut.Unlock() + + v, ok := ashm.data[key] + if !ok { + return + } + + vUint64, ok := v.(uint64) + if !ok { + return + } + + ashm.data[key] = vUint64 + val +} + +func (ashm *AppStatusHandlerMock) Decrement(key string) { + ashm.mut.Lock() + defer ashm.mut.Unlock() + + v, ok := ashm.data[key] + if !ok { + return + } + + vUint64, ok := v.(uint64) + if !ok { + return + } + + ashm.data[key] = vUint64 - 1 +} + +func (ashm *AppStatusHandlerMock) SetInt64Value(key string, value int64) { + ashm.mut.Lock() + defer ashm.mut.Unlock() + + ashm.data[key] = value +} + +func (ashm *AppStatusHandlerMock) SetUInt64Value(key string, value uint64) { + ashm.mut.Lock() + defer ashm.mut.Unlock() + + ashm.data[key] = value +} + +func (ashm *AppStatusHandlerMock) SetStringValue(key string, value string) { + ashm.mut.Lock() + defer ashm.mut.Unlock() + + ashm.data[key] = value +} + +func (ashm *AppStatusHandlerMock) GetUint64(key string) uint64 { + ashm.mut.Lock() + defer ashm.mut.Unlock() + + return ashm.data[key].(uint64) +} + +func (ashm *AppStatusHandlerMock) Close() {} + +func (ashm *AppStatusHandlerMock) IsInterfaceNil() bool { + return ashm == nil +} diff --git a/statusHandler/p2pQuota/export_test.go b/statusHandler/p2pQuota/export_test.go new file mode 100644 index 00000000000..c0877cfa294 --- /dev/null +++ b/statusHandler/p2pQuota/export_test.go @@ -0,0 +1,33 @@ +package p2pQuota + +type Quota struct { + *quota +} + +func (q *Quota) NumReceived() uint32 { + return q.numReceivedMessages +} + +func (q *Quota) SizeReceived() uint64 { + return q.sizeReceivedMessages +} + +func (q *Quota) NumProcessed() uint32 { + return q.numProcessedMessages +} + +func (q *Quota) SizeProcessed() uint64 { + return q.sizeProcessedMessages +} + +func (pqp *p2pQuotaProcessor) GetQuota(identifier string) *Quota { + pqp.mutStatistics.Lock() + q := pqp.statistics[identifier] + pqp.mutStatistics.Unlock() + + if q == nil { + return nil + } + + return &Quota{quota: q} +} diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor.go b/statusHandler/p2pQuota/p2pQuotaProcessor.go new file mode 100644 index 00000000000..a1bc50f9db2 --- /dev/null +++ b/statusHandler/p2pQuota/p2pQuotaProcessor.go @@ -0,0 +1,135 @@ +package p2pQuota + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/statusHandler" +) + +type quota struct { + numReceivedMessages uint32 + sizeReceivedMessages uint64 + numProcessedMessages uint32 + sizeProcessedMessages uint64 +} + +// p2pQuotaProcessor implements process.QuotaStatusHandler and is able to periodically sends to a +// statusHandler the processed p2p quota information +type p2pQuotaProcessor struct { + mutStatistics sync.Mutex + statistics map[string]*quota + topSumQuota *quota + topMaxQuota *quota + topNumReceivers uint64 + handler core.AppStatusHandler +} + +// NewP2pQuotaProcessor creates a new p2pQuotaProcessor instance +func NewP2pQuotaProcessor(handler core.AppStatusHandler) (*p2pQuotaProcessor, error) { + if check.IfNil(handler) { + return nil, statusHandler.ErrNilAppStatusHandler + } + + return &p2pQuotaProcessor{ + statistics: make(map[string]*quota), + topSumQuota: "a{}, + topMaxQuota: "a{}, + handler: handler, + }, nil +} + +// ResetStatistics output gathered statistics, process and prints them. After that it empties the statistics map +func (pqp *p2pQuotaProcessor) ResetStatistics() { + sumQuota := "a{} + maxQuota := "a{} + + pqp.mutStatistics.Lock() + defer pqp.mutStatistics.Unlock() + + for _, q := range pqp.statistics { + sumQuota.numReceivedMessages += q.numReceivedMessages + sumQuota.sizeReceivedMessages += q.sizeReceivedMessages + sumQuota.numProcessedMessages += q.numProcessedMessages + sumQuota.sizeProcessedMessages += q.sizeProcessedMessages + + maxQuota.numReceivedMessages = core.MaxUint32(maxQuota.numReceivedMessages, q.numReceivedMessages) + maxQuota.sizeReceivedMessages = core.MaxUint64(maxQuota.sizeReceivedMessages, q.sizeReceivedMessages) + maxQuota.numProcessedMessages = core.MaxUint32(maxQuota.numProcessedMessages, q.numProcessedMessages) + maxQuota.sizeProcessedMessages = core.MaxUint64(maxQuota.sizeProcessedMessages, q.sizeProcessedMessages) + } + + pqp.topMaxQuota.numReceivedMessages = core.MaxUint32(maxQuota.numReceivedMessages, pqp.topMaxQuota.numReceivedMessages) + pqp.topMaxQuota.sizeReceivedMessages = core.MaxUint64(maxQuota.sizeReceivedMessages, pqp.topMaxQuota.sizeReceivedMessages) + pqp.topMaxQuota.numProcessedMessages = core.MaxUint32(maxQuota.numProcessedMessages, pqp.topMaxQuota.numProcessedMessages) + pqp.topMaxQuota.sizeProcessedMessages = core.MaxUint64(maxQuota.sizeProcessedMessages, pqp.topMaxQuota.sizeProcessedMessages) + + pqp.topSumQuota.numReceivedMessages = core.MaxUint32(sumQuota.numReceivedMessages, pqp.topSumQuota.numReceivedMessages) + pqp.topSumQuota.sizeReceivedMessages = core.MaxUint64(sumQuota.sizeReceivedMessages, pqp.topSumQuota.sizeReceivedMessages) + pqp.topSumQuota.numProcessedMessages = core.MaxUint32(sumQuota.numProcessedMessages, pqp.topSumQuota.numProcessedMessages) + pqp.topSumQuota.sizeProcessedMessages = core.MaxUint64(sumQuota.sizeProcessedMessages, pqp.topSumQuota.sizeProcessedMessages) + + numPeers := uint64(len(pqp.statistics)) + pqp.topNumReceivers = core.MaxUint64(numPeers, pqp.topNumReceivers) + + pqp.moveStatisticsInAppStatusHandler(maxQuota, sumQuota, numPeers, pqp.topNumReceivers) + + pqp.statistics = make(map[string]*quota) +} + +func (pqp *p2pQuotaProcessor) moveStatisticsInAppStatusHandler( + maxQuota *quota, + sumQuota *quota, + numReceiverPeers uint64, + topNumReceiverPeers uint64, +) { + + pqp.handler.SetUInt64Value(core.MetricP2pSumNumReceivedMessages, uint64(sumQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pSumSizeReceivedMessages, sumQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pSumNumProcessedMessages, uint64(sumQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pSumSizeProcessedMessages, sumQuota.sizeProcessedMessages) + + pqp.handler.SetUInt64Value(core.MetricP2pTopSumNumReceivedMessages, uint64(pqp.topSumQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pTopSumSizeReceivedMessages, pqp.topSumQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pTopSumNumProcessedMessages, uint64(pqp.topSumQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pTopSumSizeProcessedMessages, pqp.topSumQuota.sizeProcessedMessages) + + pqp.handler.SetUInt64Value(core.MetricP2pMaxNumReceivedMessages, uint64(maxQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pMaxSizeReceivedMessages, maxQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pMaxNumProcessedMessages, uint64(maxQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pMaxSizeProcessedMessages, maxQuota.sizeProcessedMessages) + + pqp.handler.SetUInt64Value(core.MetricP2pTopMaxNumReceivedMessages, uint64(pqp.topMaxQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pTopMaxSizeReceivedMessages, pqp.topMaxQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pTopMaxNumProcessedMessages, uint64(pqp.topMaxQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pTopMaxSizeProcessedMessages, pqp.topMaxQuota.sizeProcessedMessages) + + pqp.handler.SetUInt64Value(core.MetricP2pNumReceiverPeers, numReceiverPeers) + pqp.handler.SetUInt64Value(core.MetricP2pTopNumReceiverPeers, topNumReceiverPeers) +} + +// AddQuota adds a quota statistics +func (pqp *p2pQuotaProcessor) AddQuota( + identifier string, + numReceived uint32, + sizeReceived uint64, + numProcessed uint32, + sizeProcessed uint64, +) { + q := "a{ + numReceivedMessages: numReceived, + sizeReceivedMessages: sizeReceived, + numProcessedMessages: numProcessed, + sizeProcessedMessages: sizeProcessed, + } + + pqp.mutStatistics.Lock() + pqp.statistics[identifier] = q + pqp.mutStatistics.Unlock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pqp *p2pQuotaProcessor) IsInterfaceNil() bool { + return pqp == nil +} diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go new file mode 100644 index 00000000000..ad16cf8dd55 --- /dev/null +++ b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go @@ -0,0 +1,212 @@ +package p2pQuota_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/statusHandler" + "github.com/ElrondNetwork/elrond-go/statusHandler/mock" + "github.com/ElrondNetwork/elrond-go/statusHandler/p2pQuota" + "github.com/stretchr/testify/assert" +) + +func TestNewP2pQuotaProcessor_NilStatusHandlerShouldErr(t *testing.T) { + t.Parallel() + + pqp, err := p2pQuota.NewP2pQuotaProcessor(nil) + assert.True(t, check.IfNil(pqp)) + assert.Equal(t, statusHandler.ErrNilAppStatusHandler, err) +} + +func TestNewP2pQuotaProcessor_ShouldWork(t *testing.T) { + t.Parallel() + + pqp, err := p2pQuota.NewP2pQuotaProcessor(&mock.AppStatusHandlerStub{}) + assert.False(t, check.IfNil(pqp)) + assert.Nil(t, err) +} + +//------- AddQuota + +func TestP2pQuotaProcessor_AddQuotaShouldWork(t *testing.T) { + t.Parallel() + + pqp, _ := p2pQuota.NewP2pQuotaProcessor(&mock.AppStatusHandlerStub{}) + nonExistingIdentifier := "non existing identifier" + identifier := "identifier" + numReceived := uint32(1) + sizeReceived := uint64(2) + numProcessed := uint32(3) + sizeProcessed := uint64(4) + + pqp.AddQuota(identifier, numReceived, sizeReceived, numProcessed, sizeProcessed) + + nonExistentQuota := pqp.GetQuota(nonExistingIdentifier) + assert.Nil(t, nonExistentQuota) + + quota := pqp.GetQuota(identifier) + assert.Equal(t, numReceived, quota.NumReceived()) + assert.Equal(t, sizeReceived, quota.SizeReceived()) + assert.Equal(t, numProcessed, quota.NumProcessed()) + assert.Equal(t, sizeProcessed, quota.SizeProcessed()) +} + +//------- ResetStatistics + +func TestP2pQuotaProcessor_ResetStatisticsShouldEmptyStatsAndCallSetOnAllMetrics(t *testing.T) { + t.Parallel() + + identifier1 := "identifier" + numReceived1 := uint64(1) + sizeReceived1 := uint64(2) + numProcessed1 := uint64(3) + sizeProcessed1 := uint64(4) + + status := mock.NewAppStatusHandlerMock() + pqp, _ := p2pQuota.NewP2pQuotaProcessor(status) + pqp.AddQuota(identifier1, uint32(numReceived1), sizeReceived1, uint32(numProcessed1), sizeProcessed1) + + pqp.ResetStatistics() + + assert.Nil(t, pqp.GetQuota(identifier1)) + + numReceivers := uint64(1) + checkSumMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkTopSumMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkMaxMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkTopMaxMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkNumReceivers(t, status, numReceivers, numReceivers) +} + +func TestP2pQuotaProcessor_ResetStatisticsShouldSetTops(t *testing.T) { + t.Parallel() + + identifier1 := "identifier" + numReceived1 := uint64(1) + sizeReceived1 := uint64(2) + numProcessed1 := uint64(3) + sizeProcessed1 := uint64(4) + + identifier2 := "identifier" + numReceived2 := uint64(10) + sizeReceived2 := uint64(20) + numProcessed2 := uint64(30) + sizeProcessed2 := uint64(40) + + status := mock.NewAppStatusHandlerMock() + pqp, _ := p2pQuota.NewP2pQuotaProcessor(status) + pqp.AddQuota(identifier1, uint32(numReceived1), sizeReceived1, uint32(numProcessed1), sizeProcessed1) + pqp.ResetStatistics() + pqp.AddQuota(identifier2, uint32(numReceived2), sizeReceived2, uint32(numProcessed2), sizeProcessed2) + + pqp.ResetStatistics() + + numReceivers := uint64(1) + checkSumMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) + checkTopSumMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) + checkMaxMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) + checkTopMaxMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) + checkNumReceivers(t, status, numReceivers, numReceivers) +} + +func checkSumMetrics( + t *testing.T, + status *mock.AppStatusHandlerMock, + numReceived uint64, + sizeReceived uint64, + numProcessed uint64, + sizeProcessed uint64, +) { + + value := status.GetUint64(core.MetricP2pSumNumReceivedMessages) + assert.Equal(t, value, numReceived) + + value = status.GetUint64(core.MetricP2pSumSizeReceivedMessages) + assert.Equal(t, value, sizeReceived) + + value = status.GetUint64(core.MetricP2pSumNumProcessedMessages) + assert.Equal(t, value, numProcessed) + + value = status.GetUint64(core.MetricP2pSumSizeProcessedMessages) + assert.Equal(t, value, sizeProcessed) +} + +func checkTopSumMetrics( + t *testing.T, + status *mock.AppStatusHandlerMock, + numReceived uint64, + sizeReceived uint64, + numProcessed uint64, + sizeProcessed uint64, +) { + + value := status.GetUint64(core.MetricP2pTopSumNumReceivedMessages) + assert.Equal(t, value, numReceived) + + value = status.GetUint64(core.MetricP2pTopSumSizeReceivedMessages) + assert.Equal(t, value, sizeReceived) + + value = status.GetUint64(core.MetricP2pTopSumNumProcessedMessages) + assert.Equal(t, value, numProcessed) + + value = status.GetUint64(core.MetricP2pTopSumSizeProcessedMessages) + assert.Equal(t, value, sizeProcessed) +} + +func checkMaxMetrics( + t *testing.T, + status *mock.AppStatusHandlerMock, + numReceived uint64, + sizeReceived uint64, + numProcessed uint64, + sizeProcessed uint64, +) { + + value := status.GetUint64(core.MetricP2pMaxNumReceivedMessages) + assert.Equal(t, value, numReceived) + + value = status.GetUint64(core.MetricP2pMaxSizeReceivedMessages) + assert.Equal(t, value, sizeReceived) + + value = status.GetUint64(core.MetricP2pMaxNumProcessedMessages) + assert.Equal(t, value, numProcessed) + + value = status.GetUint64(core.MetricP2pMaxSizeProcessedMessages) + assert.Equal(t, value, sizeProcessed) +} + +func checkTopMaxMetrics( + t *testing.T, + status *mock.AppStatusHandlerMock, + numReceived uint64, + sizeReceived uint64, + numProcessed uint64, + sizeProcessed uint64, +) { + + value := status.GetUint64(core.MetricP2pTopMaxNumReceivedMessages) + assert.Equal(t, value, numReceived) + + value = status.GetUint64(core.MetricP2pTopMaxSizeReceivedMessages) + assert.Equal(t, value, sizeReceived) + + value = status.GetUint64(core.MetricP2pTopMaxNumProcessedMessages) + assert.Equal(t, value, numProcessed) + + value = status.GetUint64(core.MetricP2pTopMaxSizeProcessedMessages) + assert.Equal(t, value, sizeProcessed) +} + +func checkNumReceivers( + t *testing.T, + status *mock.AppStatusHandlerMock, + numReceiverPeers uint64, + topNumReceiverPeers uint64, +) { + value := status.GetUint64(core.MetricP2pNumReceiverPeers) + assert.Equal(t, value, numReceiverPeers) + + value = status.GetUint64(core.MetricP2pTopNumReceiverPeers) + assert.Equal(t, value, topNumReceiverPeers) +} diff --git a/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go b/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go deleted file mode 100644 index 155c7cc1d03..00000000000 --- a/statusHandler/quotaStatusHandler/printQuotaStatusHandler.go +++ /dev/null @@ -1,126 +0,0 @@ -package quotaStatusHandler - -import ( - "math" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/logger" -) - -var log = logger.GetOrCreate("statushandler/quotastatushandler") - -type quota struct { - numReceivedMessages uint32 - sizeReceivedMessages uint64 - numProcessedMessages uint32 - sizeProcessedMessages uint64 -} - -//TODO replace this structure with a new one that can output on a route the statistics measured -//TODO add tests -// printQuotaStatusHandler implements process.QuotaStatusHandler and is able to periodically print peer connection statistics -type printQuotaStatusHandler struct { - mutStatistics sync.Mutex - statistics map[string]*quota -} - -// NewPrintQuotaStatusHandler creates a new NewPrintQuotaStatusHandler instance -func NewPrintQuotaStatusHandler() *printQuotaStatusHandler { - return &printQuotaStatusHandler{ - statistics: make(map[string]*quota), - } -} - -// ResetStatistics output gathered statistics, process and prints them. After that it empties the statistics map -func (pqsh *printQuotaStatusHandler) ResetStatistics() { - minQuota := "a{ - numReceivedMessages: math.MaxUint32, - sizeReceivedMessages: math.MaxUint64, - numProcessedMessages: math.MaxUint32, - sizeProcessedMessages: math.MaxUint64, - } - sumQuota := "a{} - maxQuota := "a{} - - pqsh.mutStatistics.Lock() - defer pqsh.mutStatistics.Unlock() - - numStatistics := len(pqsh.statistics) - - if numStatistics == 0 { - log.Trace("empty quota statistics") - return - } - - for name, q := range pqsh.statistics { - sumQuota.numReceivedMessages += q.numReceivedMessages - sumQuota.sizeReceivedMessages += q.sizeReceivedMessages - sumQuota.numProcessedMessages += q.numProcessedMessages - sumQuota.sizeProcessedMessages += q.sizeProcessedMessages - - minQuota.numReceivedMessages = core.MinUint32(minQuota.numReceivedMessages, q.numReceivedMessages) - minQuota.sizeReceivedMessages = core.MinUint64(minQuota.sizeReceivedMessages, q.sizeReceivedMessages) - minQuota.numProcessedMessages = core.MinUint32(minQuota.numProcessedMessages, q.numProcessedMessages) - minQuota.sizeProcessedMessages = core.MinUint64(minQuota.sizeProcessedMessages, q.sizeProcessedMessages) - - maxQuota.numReceivedMessages = core.MaxUint32(maxQuota.numReceivedMessages, q.numReceivedMessages) - maxQuota.sizeReceivedMessages = core.MaxUint64(maxQuota.sizeReceivedMessages, q.sizeReceivedMessages) - maxQuota.numProcessedMessages = core.MaxUint32(maxQuota.numProcessedMessages, q.numProcessedMessages) - maxQuota.sizeProcessedMessages = core.MaxUint64(maxQuota.sizeProcessedMessages, q.sizeProcessedMessages) - log.Trace("peer quota statistics", - "peer", name, - "num received msg", q.numReceivedMessages, - "size received", core.ConvertBytes(q.sizeReceivedMessages), - "num processed msg", q.numProcessedMessages, - "size processed", core.ConvertBytes(q.sizeProcessedMessages), - ) - } - - log.Trace("quota statistics", "num peers", numStatistics) - log.Trace("min quota statistics / peer", - "num received msg", minQuota.numReceivedMessages, - "size received", core.ConvertBytes(minQuota.sizeReceivedMessages), - "num processed msg", minQuota.numProcessedMessages, - "size processed", core.ConvertBytes(minQuota.sizeProcessedMessages), - ) - log.Trace("avg quota statistics / peer", - "num received msg", sumQuota.numReceivedMessages/uint32(numStatistics), - "size received", core.ConvertBytes(sumQuota.sizeReceivedMessages/uint64(numStatistics)), - "num processed msg", sumQuota.numProcessedMessages/uint32(numStatistics), - "size processed", core.ConvertBytes(sumQuota.sizeProcessedMessages/uint64(numStatistics)), - ) - log.Trace("max quota statistics / peer", - "num received msg", maxQuota.numReceivedMessages, - "size received", core.ConvertBytes(maxQuota.sizeReceivedMessages), - "num processed msg", maxQuota.numProcessedMessages, - "size processed", core.ConvertBytes(maxQuota.sizeProcessedMessages), - ) - log.Trace("total quota statistics / network", - "num received msg", sumQuota.numReceivedMessages, - "size received", core.ConvertBytes(sumQuota.sizeReceivedMessages), - "num processed msg", sumQuota.numProcessedMessages, - "size processed", core.ConvertBytes(sumQuota.sizeProcessedMessages), - ) -} - -// AddQuota adds a quota statistics -func (pqsh *printQuotaStatusHandler) AddQuota(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, - numProcessedMessages uint32, sizeProcessedMessages uint64) { - - q := "a{ - numReceivedMessages: numReceivedMessages, - sizeReceivedMessages: sizeReceivedMessages, - numProcessedMessages: numProcessedMessages, - sizeProcessedMessages: sizeProcessedMessages, - } - - pqsh.mutStatistics.Lock() - pqsh.statistics[identifier] = q - pqsh.mutStatistics.Unlock() -} - -// IsInterfaceNil returns true if there is no value under the interface -func (pqsh *printQuotaStatusHandler) IsInterfaceNil() bool { - return pqsh == nil -} From 958dd3cc60c88ff9fd94949c88c0bc352de1148b Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 16 Dec 2019 15:04:03 +0200 Subject: [PATCH 19/35] changed antiflood parameters to high values to permit observations --- cmd/node/config/config.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 94699559f46..67d4cca98f5 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -233,8 +233,8 @@ Type = "LRU" [Antiflood] - PeerMaxMessagesPerSecond = 10 - PeerMaxTotalSizePerSecond = 10000 + PeerMaxMessagesPerSecond = 1000000 + PeerMaxTotalSizePerSecond = 100000000 [Antiflood.Cache] Size = 5000 Type = "LRU" From fdf33d2a35d9b82ad2d2a281561ed6feffa78b20 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 16 Dec 2019 18:53:44 +0200 Subject: [PATCH 20/35] added 2 more parameters on quotaFloodPreventer.go, tuned the antiflood parameters --- cmd/node/config/config.toml | 6 +- cmd/node/factory/structs.go | 4 + config/config.go | 2 + process/throttle/antiflood/export_test.go | 8 + .../throttle/antiflood/quotaFloodPreventer.go | 72 +++++-- .../antiflood/quotaFloodPreventer_test.go | 175 +++++++++++++++++- 6 files changed, 237 insertions(+), 30 deletions(-) create mode 100644 process/throttle/antiflood/export_test.go diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 67d4cca98f5..f2afb22fe37 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -233,8 +233,10 @@ Type = "LRU" [Antiflood] - PeerMaxMessagesPerSecond = 1000000 - PeerMaxTotalSizePerSecond = 100000000 + PeerMaxMessagesPerSecond = 68 + PeerMaxTotalSizePerSecond = 2000000 + MaxMessagesPerSecond = 400 + MaxTotalSizePerSecond = 9000000 [Antiflood.Cache] Size = 5000 Type = "LRU" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 5aac372bed6..8fb73395bf3 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -469,6 +469,8 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa maxMessagesPerPeer := mainConfig.Antiflood.PeerMaxMessagesPerSecond maxTotalSizePerPeer := mainConfig.Antiflood.PeerMaxTotalSizePerSecond + maxMessages := mainConfig.Antiflood.MaxMessagesPerSecond + maxTotalSize := mainConfig.Antiflood.MaxTotalSizePerSecond quotaProcessor, err := p2pQuota.NewP2pQuotaProcessor(status) if err != nil { @@ -480,6 +482,8 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa quotaProcessor, maxMessagesPerPeer, maxTotalSizePerPeer, + maxMessages, + maxTotalSize, ) if err != nil { return nil, err diff --git a/config/config.go b/config/config.go index 0a9bb73981e..3083eb0b67e 100644 --- a/config/config.go +++ b/config/config.go @@ -187,4 +187,6 @@ type AntifloodConfig struct { Cache CacheConfig PeerMaxMessagesPerSecond uint32 PeerMaxTotalSizePerSecond uint64 + MaxMessagesPerSecond uint32 + MaxTotalSizePerSecond uint64 } diff --git a/process/throttle/antiflood/export_test.go b/process/throttle/antiflood/export_test.go new file mode 100644 index 00000000000..914f925da36 --- /dev/null +++ b/process/throttle/antiflood/export_test.go @@ -0,0 +1,8 @@ +package antiflood + +func (qfp *quotaFloodPreventer) SetGlobalQuotaValues(maxMessages uint32, size uint64) { + qfp.mutOperation.Lock() + qfp.globalQuota.numReceivedMessages = maxMessages + qfp.globalQuota.sizeReceivedMessages = size + qfp.mutOperation.Unlock() +} diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 70fa69f5fad..03c90ff1e6b 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -22,11 +22,14 @@ type quota struct { // quotaFloodPreventer represents a cache of quotas per peer used in antiflooding mechanism type quotaFloodPreventer struct { - mutOperation *sync.RWMutex - cacher storage.Cacher - statusHandler QuotaStatusHandler - maxMessages uint32 - maxSize uint64 + mutOperation *sync.RWMutex + cacher storage.Cacher + statusHandler QuotaStatusHandler + maxMessagesPerPeer uint32 + maxSizePerPeer uint64 + maxMessages uint32 + maxSize uint64 + globalQuota *quota } // NewQuotaFloodPreventer creates a new flood preventer based on quota / peer @@ -35,6 +38,8 @@ func NewQuotaFloodPreventer( statusHandler QuotaStatusHandler, maxMessagesPerPeer uint32, maxTotalSizePerPeer uint64, + maxMessages uint32, + maxTotalSize uint64, ) (*quotaFloodPreventer, error) { if check.IfNil(cacher) { @@ -44,26 +49,43 @@ func NewQuotaFloodPreventer( return nil, process.ErrNilQuotaStatusHandler } if maxMessagesPerPeer < minMessages { - return nil, fmt.Errorf("%w raised in NewCountersMap, maxMessages: provided %d, minimum %d", + return nil, fmt.Errorf("%w raised in NewCountersMap, maxMessagesPerPeer: provided %d, minimum %d", process.ErrInvalidValue, maxMessagesPerPeer, minMessages, ) } if maxTotalSizePerPeer < minTotalSize { + return nil, fmt.Errorf("%w raised in NewCountersMap, maxTotalSizePerPeer: provided %d, minimum %d", + process.ErrInvalidValue, + maxTotalSize, + minTotalSize, + ) + } + if maxMessages < minMessages { + return nil, fmt.Errorf("%w raised in NewCountersMap, maxMessages: provided %d, minimum %d", + process.ErrInvalidValue, + maxMessagesPerPeer, + minMessages, + ) + } + if maxTotalSize < minTotalSize { return nil, fmt.Errorf("%w raised in NewCountersMap, maxTotalSize: provided %d, minimum %d", process.ErrInvalidValue, - maxTotalSizePerPeer, + maxTotalSize, minTotalSize, ) } return "aFloodPreventer{ - mutOperation: &sync.RWMutex{}, - cacher: cacher, - statusHandler: statusHandler, - maxMessages: maxMessagesPerPeer, - maxSize: maxTotalSizePerPeer, + mutOperation: &sync.RWMutex{}, + cacher: cacher, + statusHandler: statusHandler, + maxMessagesPerPeer: maxMessagesPerPeer, + maxSizePerPeer: maxTotalSizePerPeer, + maxMessages: maxMessages, + maxSize: maxTotalSize, + globalQuota: "a{}, }, nil } @@ -75,6 +97,15 @@ func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() + qfp.globalQuota.numReceivedMessages++ + qfp.globalQuota.sizeReceivedMessages += size + + isGlobalQuotaReached := qfp.globalQuota.numReceivedMessages > qfp.maxMessages || + qfp.globalQuota.sizeReceivedMessages > qfp.maxSize + if isGlobalQuotaReached { + return false + } + valueQuota, ok := qfp.cacher.Get([]byte(identifier)) if !ok { qfp.putDefaultQuota(identifier, size) @@ -91,16 +122,18 @@ func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { q.numReceivedMessages++ q.sizeReceivedMessages += size - isQuotaReached := q.numReceivedMessages > qfp.maxMessages || q.sizeReceivedMessages > qfp.maxSize - if !isQuotaReached { - qfp.cacher.Put([]byte(identifier), q) - q.numProcessedMessages++ - q.sizeProcessedMessages += size - return true + isPeerQuotaReached := q.numReceivedMessages > qfp.maxMessagesPerPeer || + q.sizeReceivedMessages > qfp.maxSizePerPeer + if isPeerQuotaReached { + return false } - return false + qfp.cacher.Put([]byte(identifier), q) + q.numProcessedMessages++ + q.sizeProcessedMessages += size + + return true } func (qfp *quotaFloodPreventer) putDefaultQuota(identifier string, size uint64) { @@ -122,6 +155,7 @@ func (qfp *quotaFloodPreventer) Reset() { //TODO change this if cacher.Clear() is time consuming qfp.cacher.Clear() + qfp.globalQuota = "a{} } // createStatistics is useful to benchmark the system when running diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index 9a1784d1949..d2a66cce3dd 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -24,16 +24,78 @@ func createMockQuotaStatusHandler() *mock.QuotaStatusHandlerStub { func TestNewQuotaFloodPreventer_NilCacherShouldErr(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(nil, &mock.QuotaStatusHandlerStub{}, minMessages, minTotalSize) + qfp, err := NewQuotaFloodPreventer( + nil, + &mock.QuotaStatusHandlerStub{}, + minMessages, + minTotalSize, + minMessages, + minTotalSize, + ) assert.True(t, check.IfNil(qfp)) assert.Equal(t, process.ErrNilCacher, err) } +func TestNewQuotaFloodPreventer_NilStatusHandlerShouldErr(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer( + &mock.CacherStub{}, + nil, + minMessages, + minTotalSize, + minMessages, + minTotalSize, + ) + + assert.True(t, check.IfNil(qfp)) + assert.Equal(t, process.ErrNilQuotaStatusHandler, err) +} + +func TestNewQuotaFloodPreventer_LowerMinMessagesPerPeerShouldErr(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer( + &mock.CacherStub{}, + &mock.QuotaStatusHandlerStub{}, + minMessages-1, + minTotalSize, + minMessages, + minTotalSize, + ) + + assert.True(t, check.IfNil(qfp)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + +func TestNewQuotaFloodPreventer_LowerMinSizePerPeerShouldErr(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer( + &mock.CacherStub{}, + &mock.QuotaStatusHandlerStub{}, + minMessages, + minTotalSize-1, + minMessages, + minTotalSize, + ) + + assert.True(t, check.IfNil(qfp)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + func TestNewQuotaFloodPreventer_LowerMinMessagesShouldErr(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, &mock.QuotaStatusHandlerStub{}, minMessages-1, minTotalSize) + qfp, err := NewQuotaFloodPreventer( + &mock.CacherStub{}, + &mock.QuotaStatusHandlerStub{}, + minMessages, + minTotalSize, + minMessages-1, + minTotalSize, + ) assert.True(t, check.IfNil(qfp)) assert.True(t, errors.Is(err, process.ErrInvalidValue)) @@ -42,7 +104,14 @@ func TestNewQuotaFloodPreventer_LowerMinMessagesShouldErr(t *testing.T) { func TestNewQuotaFloodPreventer_LowerMinSizeShouldErr(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, &mock.QuotaStatusHandlerStub{}, minMessages, minTotalSize-1) + qfp, err := NewQuotaFloodPreventer( + &mock.CacherStub{}, + &mock.QuotaStatusHandlerStub{}, + minMessages, + minTotalSize, + minMessages, + minTotalSize-1, + ) assert.True(t, check.IfNil(qfp)) assert.True(t, errors.Is(err, process.ErrInvalidValue)) @@ -51,7 +120,14 @@ func TestNewQuotaFloodPreventer_LowerMinSizeShouldErr(t *testing.T) { func TestNewQuotaFloodPreventer_ShouldWork(t *testing.T) { t.Parallel() - qfp, err := NewQuotaFloodPreventer(&mock.CacherStub{}, &mock.QuotaStatusHandlerStub{}, minMessages, minTotalSize) + qfp, err := NewQuotaFloodPreventer( + &mock.CacherStub{}, + &mock.QuotaStatusHandlerStub{}, + minMessages, + minTotalSize, + minMessages, + minTotalSize, + ) assert.False(t, check.IfNil(qfp)) assert.Nil(t, err) @@ -84,6 +160,8 @@ func TestNewQuotaFloodPreventer_IncrementIdentifierNotPresentPutQuotaAndReturnTr createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, + minMessages*4, + minTotalSize*10, ) ok := qfp.Increment("identifier", size) @@ -117,6 +195,8 @@ func TestNewQuotaFloodPreventer_IncrementNotQuotaSavedInCacheShouldPutQuotaAndRe createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, + minMessages*4, + minTotalSize*10, ) ok := qfp.Increment("identifier", size) @@ -156,6 +236,8 @@ func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnT createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, + minMessages*4, + minTotalSize*10, ) ok := qfp.Increment("identifier", size) @@ -164,7 +246,9 @@ func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnT assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { +//------- Increment per peer + +func TestNewQuotaFloodPreventer_IncrementOverMaxPeerNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages + 11) @@ -187,6 +271,8 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturn createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, + minMessages*4, + minTotalSize*10, ) ok := qfp.Increment("identifier", minTotalSize) @@ -194,7 +280,7 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturn assert.False(t, ok) } -func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_IncrementOverMaxPeerSizeShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages) @@ -217,6 +303,8 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t createMockQuotaStatusHandler(), minMessages*4, minTotalSize*10, + minMessages*4, + minTotalSize*10, ) ok := qfp.Increment("identifier", minTotalSize) @@ -224,15 +312,78 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t assert.False(t, ok) } +//------- Increment globally + +func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { + t.Parallel() + + globalMessages := uint32(minMessages + 11) + globalSize := uint64(minTotalSize * 3) + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + assert.Fail(t, "should have not called put") + + return false + }, + }, + createMockQuotaStatusHandler(), + minMessages*4, + minTotalSize*10, + minMessages*4, + minTotalSize*10, + ) + qfp.SetGlobalQuotaValues(globalMessages, globalSize) + + ok := qfp.Increment("identifier", minTotalSize) + + assert.False(t, ok) +} + +func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { + t.Parallel() + + globalMessages := uint32(minMessages) + globalSize := uint64(minTotalSize * 11) + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + assert.Fail(t, "should have not called put") + + return false + }, + }, + createMockQuotaStatusHandler(), + minMessages*4, + minTotalSize*10, + minMessages*4, + minTotalSize*10, + ) + qfp.SetGlobalQuotaValues(globalMessages, globalSize) + + ok := qfp.Increment("identifier", minTotalSize) + + assert.False(t, ok) +} + func TestCountersMap_IncrementShouldWorkConcurrently(t *testing.T) { t.Parallel() + numIterations := 1000 qfp, _ := NewQuotaFloodPreventer( mock.NewCacherMock(), createMockQuotaStatusHandler(), minMessages, - minTotalSize) - numIterations := 1000 + minTotalSize, + minMessages*uint32(numIterations), + minTotalSize*uint64(numIterations), + ) wg := sync.WaitGroup{} wg.Add(numIterations) for i := 0; i < numIterations; i++ { @@ -264,6 +415,8 @@ func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { createMockQuotaStatusHandler(), minTotalSize, minMessages, + minTotalSize, + minMessages, ) qfp.Reset() @@ -327,6 +480,8 @@ func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { }, minTotalSize, minMessages, + minTotalSize, + minMessages, ) qfp.Reset() @@ -339,13 +494,15 @@ func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { func TestCountersMap_IncrementAndResetShouldWorkConcurrently(t *testing.T) { t.Parallel() + numIterations := 1000 qfp, _ := NewQuotaFloodPreventer( mock.NewCacherMock(), createMockQuotaStatusHandler(), minMessages, minTotalSize, + minTotalSize*uint32(numIterations), + minMessages*uint64(numIterations), ) - numIterations := 1000 wg := sync.WaitGroup{} wg.Add(numIterations + numIterations/10) for i := 0; i < numIterations; i++ { From 948bcaaf308e0443e741fafc51cdb2608027a6e2 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 16 Dec 2019 18:59:16 +0200 Subject: [PATCH 21/35] added log print for antiflood parameters rounded antiflood parameters --- cmd/node/config/config.toml | 4 ++-- cmd/node/factory/structs.go | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index f2afb22fe37..4d4b9e26875 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -234,9 +234,9 @@ [Antiflood] PeerMaxMessagesPerSecond = 68 - PeerMaxTotalSizePerSecond = 2000000 + PeerMaxTotalSizePerSecond = 2097152 MaxMessagesPerSecond = 400 - MaxTotalSizePerSecond = 9000000 + MaxTotalSizePerSecond = 9437184 [Antiflood.Cache] Size = 5000 Type = "LRU" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 8fb73395bf3..04f609eac29 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -472,6 +472,13 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa maxMessages := mainConfig.Antiflood.MaxMessagesPerSecond maxTotalSize := mainConfig.Antiflood.MaxTotalSizePerSecond + log.Debug("started antiflood component", + "maxMessagesPerPeer", maxMessagesPerPeer, + "maxTotalSizePerPeer", core.ConvertBytes(maxTotalSizePerPeer), + "maxMessages", maxMessages, + "maxTotalSize", core.ConvertBytes(maxTotalSize), + ) + quotaProcessor, err := p2pQuota.NewP2pQuotaProcessor(status) if err != nil { return nil, err From 8b4f21da0e5f31d159f72037a5693b11c004ba7c Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 17 Dec 2019 17:57:03 +0200 Subject: [PATCH 22/35] renamed core.p2pConstants, added comments. fixed antiflooding integration tests --- core/p2pConstants.go | 83 ++++++++++---- .../p2p/antiflood/antiflooding_test.go | 57 ++++++++-- statusHandler/p2pQuota/p2pQuotaProcessor.go | 102 +++++++++--------- .../p2pQuota/p2pQuotaProcessor_test.go | 34 +++--- 4 files changed, 179 insertions(+), 97 deletions(-) diff --git a/core/p2pConstants.go b/core/p2pConstants.go index afe4088525b..a5c5ff46be7 100644 --- a/core/p2pConstants.go +++ b/core/p2pConstants.go @@ -1,26 +1,73 @@ package core -//TODO comment these +// MetricP2pNetworkNumReceivedMessages represents the current total network number of received messages in the amount +// of time. It represents the sum of all received messages from all connected peers +const MetricP2pNetworkNumReceivedMessages = "erd_p2p_network_num_received_messages" -const MetricP2pSumNumReceivedMessages = "erd_p2p_sum_num_received_messages" -const MetricP2pSumSizeReceivedMessages = "erd_p2p_sum_size_received_messages" -const MetricP2pSumNumProcessedMessages = "erd_p2p_sum_num_processed_messages" -const MetricP2pSumSizeProcessedMessages = "erd_p2p_sum_size_processed_messages" +// MetricP2pNetworkSizeReceivedMessages represents the current total network size of received messages in the amount +// of time. It represents the sum of all received messages from all connected peers +const MetricP2pNetworkSizeReceivedMessages = "erd_p2p_network_size_received_messages" -const MetricP2pTopSumNumReceivedMessages = "erd_p2p_top_sum_num_received_messages" -const MetricP2pTopSumSizeReceivedMessages = "erd_p2p_top_sum_size_received_messages" -const MetricP2pTopSumNumProcessedMessages = "erd_p2p_top_sum_num_processed_messages" -const MetricP2pTopSumSizeProcessedMessages = "erd_p2p_top_sum_size_processed_messages" +// MetricP2pNetworkNumProcessedMessages represents the current total network number of processed messages in the amount +// of time. It represents the sum of all processed messages from all connected peers +const MetricP2pNetworkNumProcessedMessages = "erd_p2p_network_num_processed_messages" -const MetricP2pMaxNumReceivedMessages = "erd_p2p_max_num_received_messages" -const MetricP2pMaxSizeReceivedMessages = "erd_p2p_max_size_received_messages" -const MetricP2pMaxNumProcessedMessages = "erd_p2p_max_num_processed_messages" -const MetricP2pMaxSizeProcessedMessages = "erd_p2p_max_size_processed_messages" +// MetricP2pNetworkSizeProcessedMessages represents the current total network size of processed messages in the amount +// of time. It represents the sum of all processed messages from all connected peers +const MetricP2pNetworkSizeProcessedMessages = "erd_p2p_network_size_processed_messages" -const MetricP2pTopMaxNumReceivedMessages = "erd_p2p_top_max_num_received_messages" -const MetricP2pTopMaxSizeReceivedMessages = "erd_p2p_top_max_size_received_messages" -const MetricP2pTopMaxNumProcessedMessages = "erd_p2p_top_max_num_processed_messages" -const MetricP2pTopMaxSizeProcessedMessages = "erd_p2p_top_max_size_processed_messages" +// MetricP2pPeakNetworkNumReceivedMessages represents the peak network number of received messages in the amount of time +// It represents the peak sum of all received messages from all connected peers +const MetricP2pPeakNetworkNumReceivedMessages = "erd_p2p_peak_network_num_received_messages" +// MetricP2pPeakNetworkSizeReceivedMessages represents the peak network size of received messages in the amount of time +// It represents the peak sum of all received messages from all connected peers +const MetricP2pPeakNetworkSizeReceivedMessages = "erd_p2p_peak_network_size_received_messages" + +// MetricP2pPeakNetworkNumProcessedMessages represents the peak network number of processed messages in the amount of time +// It represents the peak sum of all processed messages from all connected peers +const MetricP2pPeakNetworkNumProcessedMessages = "erd_p2p_peak_network_num_processed_messages" + +// MetricP2pPeakNetworkSizeProcessedMessages represents the peak network size of processed messages in the amount of time +// It represents the peak sum of all processed messages from all connected peers +const MetricP2pPeakNetworkSizeProcessedMessages = "erd_p2p_peak_network_size_processed_messages" + +// MetricP2pPeerNumReceivedMessages represents the current maximum number of received messages in the amount of time +// counted on a connected peer +const MetricP2pPeerNumReceivedMessages = "erd_p2p_peer_num_received_messages" + +// MetricP2pPeerSizeReceivedMessages represents the current maximum size of received data (sum of all messages) in +// the amount of time counted on a connected peer +const MetricP2pPeerSizeReceivedMessages = "erd_p2p_peer_size_received_messages" + +// MetricP2pPeerNumProcessedMessages represents the current maximum number of processed messages in the amount of time +// counted on a connected peer +const MetricP2pPeerNumProcessedMessages = "erd_p2p_peer_num_processed_messages" + +// MetricP2pPeerSizeProcessedMessages represents the current maximum size of processed data (sum of all messages) in +// the amount of time counted on a connected peer +const MetricP2pPeerSizeProcessedMessages = "erd_p2p_peer_size_processed_messages" + +// MetricP2pPeakPeerNumReceivedMessages represents the peak maximum number of received messages in the amount of time +// counted on a connected peer +const MetricP2pPeakPeerNumReceivedMessages = "erd_p2p_peak_peer_num_received_messages" + +// MetricP2pPeakPeerSizeReceivedMessages represents the peak maximum size of received data (sum of all messages) in +// the amount of time counted on a connected peer +const MetricP2pPeakPeerSizeReceivedMessages = "erd_p2p_peak_peer_size_received_messages" + +// MetricP2pPeakPeerxNumProcessedMessages represents the peak maximum number of processed messages in the amount of time +// counted on a connected peer +const MetricP2pPeakPeerxNumProcessedMessages = "erd_p2p_peak_peer_num_processed_messages" + +// MetricP2pPeakPeerSizeProcessedMessages represents the peak maximum size of processed data (sum of all messages) in +// the amount of time counted on a connected peer +const MetricP2pPeakPeerSizeProcessedMessages = "erd_p2p_peak_peer_size_processed_messages" + +// MetricP2pNumReceiverPeers represents the number of connected peer sent messages to the current peer (and have been +// received by the current peer) in the amount of time const MetricP2pNumReceiverPeers = "erd_p2p_num_receiver_peers" -const MetricP2pTopNumReceiverPeers = "erd_p2p_top_num_receiver_peers" + +// MetricP2pPeakNumReceiverPeers represents the peak number of connected peer sent messages to the current peer +// (and have been received by the current peer) in the amount of time +const MetricP2pPeakNumReceiverPeers = "erd_p2p_peak_num_receiver_peers" diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index bc41aaa51eb..07eaa281818 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -2,6 +2,7 @@ package antiflood import ( "fmt" + "math" "sync/atomic" "testing" "time" @@ -36,9 +37,17 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - maxMumProcessMessages := uint32(5) + peerMaxMumProcessMessages := uint32(5) + maxMumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(1 << 20) //1MB - interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages, maxMessageSize) + interceptors, err := createTopicsAndMockInterceptors( + peers, + topic, + peerMaxMumProcessMessages, + maxMessageSize, + maxMumProcessMessages, + maxMessageSize, + ) assert.Nil(t, err) fmt.Println("bootstrapping nodes") @@ -67,7 +76,7 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { isFlooding.Store(false) - checkMessagesOnPeers(t, peers, interceptors, maxMumProcessMessages, floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, peerMaxMumProcessMessages, floodedIdxes, protectedIdexes) } // TestAntifloodWithMessagesFromOtherPeers tests what happens if a peer decide to send a number of messages @@ -90,9 +99,17 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { // (check integrationTests.CreateFixedNetworkOf14Peers function) topic := "test_topic" broadcastMessageDuration := time.Second * 2 - maxMumProcessMessages := uint32(5) + peerMaxMumProcessMessages := uint32(5) + maxMumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(1 << 20) //1MB - interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages, maxMessageSize) + interceptors, err := createTopicsAndMockInterceptors( + peers, + topic, + peerMaxMumProcessMessages, + maxMessageSize, + maxMumProcessMessages, + maxMessageSize, + ) assert.Nil(t, err) fmt.Println("bootstrapping nodes") @@ -114,7 +131,7 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { } time.Sleep(broadcastMessageDuration) - checkMessagesOnPeers(t, peers, interceptors, maxMumProcessMessages, floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, peerMaxMumProcessMessages, floodedIdxes, protectedIdexes) } // TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send large messages @@ -137,9 +154,17 @@ func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - maxMumProcessMessages := uint32(100000) - maxMessageSize := uint64(1 << 10) //1KB - interceptors, err := createTopicsAndMockInterceptors(peers, topic, maxMumProcessMessages, maxMessageSize) + maxMumProcessMessages := uint32(math.MaxUint32) + maxMessageSize := uint64(math.MaxUint64) + peerMaxMessageSize := uint64(1 << 10) //1KB + interceptors, err := createTopicsAndMockInterceptors( + peers, + topic, + maxMumProcessMessages, + peerMaxMessageSize, + maxMumProcessMessages, + maxMessageSize, + ) assert.Nil(t, err) fmt.Println("bootstrapping nodes") @@ -157,7 +182,7 @@ func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { isFlooding.Store(true) go func() { for { - peers[flooderIdx].Broadcast(topic, make([]byte, maxMessageSize+1)) + peers[flooderIdx].Broadcast(topic, make([]byte, peerMaxMessageSize+1)) if !isFlooding.Load().(bool) { return @@ -195,7 +220,15 @@ func checkMessagesOnPeers( checkPeers(peers, interceptors, protectedIdexes, checkFunctionForProtectedPeers) } -func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNumMessages uint32, maxSize uint64) ([]*messageProcessor, error) { +func createTopicsAndMockInterceptors( + peers []p2p.Messenger, + topic string, + peerMaxNumMessages uint32, + peerMaxSize uint64, + maxNumMessages uint32, + maxSize uint64, +) ([]*messageProcessor, error) { + interceptors := make([]*messageProcessor, len(peers)) for idx, p := range peers { @@ -211,6 +244,8 @@ func createTopicsAndMockInterceptors(peers []p2p.Messenger, topic string, maxNum interceptors[idx].floodPreventer, _ = antiflood.NewQuotaFloodPreventer( antifloodPool, &nilQuotaStatusHandler{}, + peerMaxNumMessages, + peerMaxSize, maxNumMessages, maxSize, ) diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor.go b/statusHandler/p2pQuota/p2pQuotaProcessor.go index a1bc50f9db2..0ed39b48814 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor.go @@ -18,12 +18,12 @@ type quota struct { // p2pQuotaProcessor implements process.QuotaStatusHandler and is able to periodically sends to a // statusHandler the processed p2p quota information type p2pQuotaProcessor struct { - mutStatistics sync.Mutex - statistics map[string]*quota - topSumQuota *quota - topMaxQuota *quota - topNumReceivers uint64 - handler core.AppStatusHandler + mutStatistics sync.Mutex + statistics map[string]*quota + peakNetworkQuota *quota + peakPeerQuota *quota + peakNumReceivers uint64 + handler core.AppStatusHandler } // NewP2pQuotaProcessor creates a new p2pQuotaProcessor instance @@ -33,80 +33,80 @@ func NewP2pQuotaProcessor(handler core.AppStatusHandler) (*p2pQuotaProcessor, er } return &p2pQuotaProcessor{ - statistics: make(map[string]*quota), - topSumQuota: "a{}, - topMaxQuota: "a{}, - handler: handler, + statistics: make(map[string]*quota), + peakNetworkQuota: "a{}, + peakPeerQuota: "a{}, + handler: handler, }, nil } // ResetStatistics output gathered statistics, process and prints them. After that it empties the statistics map func (pqp *p2pQuotaProcessor) ResetStatistics() { - sumQuota := "a{} - maxQuota := "a{} + networkQuota := "a{} + peakPeerQuota := "a{} pqp.mutStatistics.Lock() defer pqp.mutStatistics.Unlock() for _, q := range pqp.statistics { - sumQuota.numReceivedMessages += q.numReceivedMessages - sumQuota.sizeReceivedMessages += q.sizeReceivedMessages - sumQuota.numProcessedMessages += q.numProcessedMessages - sumQuota.sizeProcessedMessages += q.sizeProcessedMessages - - maxQuota.numReceivedMessages = core.MaxUint32(maxQuota.numReceivedMessages, q.numReceivedMessages) - maxQuota.sizeReceivedMessages = core.MaxUint64(maxQuota.sizeReceivedMessages, q.sizeReceivedMessages) - maxQuota.numProcessedMessages = core.MaxUint32(maxQuota.numProcessedMessages, q.numProcessedMessages) - maxQuota.sizeProcessedMessages = core.MaxUint64(maxQuota.sizeProcessedMessages, q.sizeProcessedMessages) + networkQuota.numReceivedMessages += q.numReceivedMessages + networkQuota.sizeReceivedMessages += q.sizeReceivedMessages + networkQuota.numProcessedMessages += q.numProcessedMessages + networkQuota.sizeProcessedMessages += q.sizeProcessedMessages + + peakPeerQuota.numReceivedMessages = core.MaxUint32(peakPeerQuota.numReceivedMessages, q.numReceivedMessages) + peakPeerQuota.sizeReceivedMessages = core.MaxUint64(peakPeerQuota.sizeReceivedMessages, q.sizeReceivedMessages) + peakPeerQuota.numProcessedMessages = core.MaxUint32(peakPeerQuota.numProcessedMessages, q.numProcessedMessages) + peakPeerQuota.sizeProcessedMessages = core.MaxUint64(peakPeerQuota.sizeProcessedMessages, q.sizeProcessedMessages) } - pqp.topMaxQuota.numReceivedMessages = core.MaxUint32(maxQuota.numReceivedMessages, pqp.topMaxQuota.numReceivedMessages) - pqp.topMaxQuota.sizeReceivedMessages = core.MaxUint64(maxQuota.sizeReceivedMessages, pqp.topMaxQuota.sizeReceivedMessages) - pqp.topMaxQuota.numProcessedMessages = core.MaxUint32(maxQuota.numProcessedMessages, pqp.topMaxQuota.numProcessedMessages) - pqp.topMaxQuota.sizeProcessedMessages = core.MaxUint64(maxQuota.sizeProcessedMessages, pqp.topMaxQuota.sizeProcessedMessages) + pqp.peakPeerQuota.numReceivedMessages = core.MaxUint32(peakPeerQuota.numReceivedMessages, pqp.peakPeerQuota.numReceivedMessages) + pqp.peakPeerQuota.sizeReceivedMessages = core.MaxUint64(peakPeerQuota.sizeReceivedMessages, pqp.peakPeerQuota.sizeReceivedMessages) + pqp.peakPeerQuota.numProcessedMessages = core.MaxUint32(peakPeerQuota.numProcessedMessages, pqp.peakPeerQuota.numProcessedMessages) + pqp.peakPeerQuota.sizeProcessedMessages = core.MaxUint64(peakPeerQuota.sizeProcessedMessages, pqp.peakPeerQuota.sizeProcessedMessages) - pqp.topSumQuota.numReceivedMessages = core.MaxUint32(sumQuota.numReceivedMessages, pqp.topSumQuota.numReceivedMessages) - pqp.topSumQuota.sizeReceivedMessages = core.MaxUint64(sumQuota.sizeReceivedMessages, pqp.topSumQuota.sizeReceivedMessages) - pqp.topSumQuota.numProcessedMessages = core.MaxUint32(sumQuota.numProcessedMessages, pqp.topSumQuota.numProcessedMessages) - pqp.topSumQuota.sizeProcessedMessages = core.MaxUint64(sumQuota.sizeProcessedMessages, pqp.topSumQuota.sizeProcessedMessages) + pqp.peakNetworkQuota.numReceivedMessages = core.MaxUint32(networkQuota.numReceivedMessages, pqp.peakNetworkQuota.numReceivedMessages) + pqp.peakNetworkQuota.sizeReceivedMessages = core.MaxUint64(networkQuota.sizeReceivedMessages, pqp.peakNetworkQuota.sizeReceivedMessages) + pqp.peakNetworkQuota.numProcessedMessages = core.MaxUint32(networkQuota.numProcessedMessages, pqp.peakNetworkQuota.numProcessedMessages) + pqp.peakNetworkQuota.sizeProcessedMessages = core.MaxUint64(networkQuota.sizeProcessedMessages, pqp.peakNetworkQuota.sizeProcessedMessages) numPeers := uint64(len(pqp.statistics)) - pqp.topNumReceivers = core.MaxUint64(numPeers, pqp.topNumReceivers) + pqp.peakNumReceivers = core.MaxUint64(numPeers, pqp.peakNumReceivers) - pqp.moveStatisticsInAppStatusHandler(maxQuota, sumQuota, numPeers, pqp.topNumReceivers) + pqp.moveStatisticsInAppStatusHandler(peakPeerQuota, networkQuota, numPeers, pqp.peakNumReceivers) pqp.statistics = make(map[string]*quota) } func (pqp *p2pQuotaProcessor) moveStatisticsInAppStatusHandler( - maxQuota *quota, - sumQuota *quota, + peerQuota *quota, + networkQuota *quota, numReceiverPeers uint64, - topNumReceiverPeers uint64, + peakNumReceiverPeers uint64, ) { - pqp.handler.SetUInt64Value(core.MetricP2pSumNumReceivedMessages, uint64(sumQuota.numReceivedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pSumSizeReceivedMessages, sumQuota.sizeReceivedMessages) - pqp.handler.SetUInt64Value(core.MetricP2pSumNumProcessedMessages, uint64(sumQuota.numProcessedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pSumSizeProcessedMessages, sumQuota.sizeProcessedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pNetworkNumReceivedMessages, uint64(networkQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pNetworkSizeReceivedMessages, networkQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pNetworkNumProcessedMessages, uint64(networkQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pNetworkSizeProcessedMessages, networkQuota.sizeProcessedMessages) - pqp.handler.SetUInt64Value(core.MetricP2pTopSumNumReceivedMessages, uint64(pqp.topSumQuota.numReceivedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pTopSumSizeReceivedMessages, pqp.topSumQuota.sizeReceivedMessages) - pqp.handler.SetUInt64Value(core.MetricP2pTopSumNumProcessedMessages, uint64(pqp.topSumQuota.numProcessedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pTopSumSizeProcessedMessages, pqp.topSumQuota.sizeProcessedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pPeakNetworkNumReceivedMessages, uint64(pqp.peakNetworkQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pPeakNetworkSizeReceivedMessages, pqp.peakNetworkQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pPeakNetworkNumProcessedMessages, uint64(pqp.peakNetworkQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pPeakNetworkSizeProcessedMessages, pqp.peakNetworkQuota.sizeProcessedMessages) - pqp.handler.SetUInt64Value(core.MetricP2pMaxNumReceivedMessages, uint64(maxQuota.numReceivedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pMaxSizeReceivedMessages, maxQuota.sizeReceivedMessages) - pqp.handler.SetUInt64Value(core.MetricP2pMaxNumProcessedMessages, uint64(maxQuota.numProcessedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pMaxSizeProcessedMessages, maxQuota.sizeProcessedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pPeerNumReceivedMessages, uint64(peerQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pPeerSizeReceivedMessages, peerQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pPeerNumProcessedMessages, uint64(peerQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pPeerSizeProcessedMessages, peerQuota.sizeProcessedMessages) - pqp.handler.SetUInt64Value(core.MetricP2pTopMaxNumReceivedMessages, uint64(pqp.topMaxQuota.numReceivedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pTopMaxSizeReceivedMessages, pqp.topMaxQuota.sizeReceivedMessages) - pqp.handler.SetUInt64Value(core.MetricP2pTopMaxNumProcessedMessages, uint64(pqp.topMaxQuota.numProcessedMessages)) - pqp.handler.SetUInt64Value(core.MetricP2pTopMaxSizeProcessedMessages, pqp.topMaxQuota.sizeProcessedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pPeakPeerNumReceivedMessages, uint64(pqp.peakPeerQuota.numReceivedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pPeakPeerSizeReceivedMessages, pqp.peakPeerQuota.sizeReceivedMessages) + pqp.handler.SetUInt64Value(core.MetricP2pPeakPeerxNumProcessedMessages, uint64(pqp.peakPeerQuota.numProcessedMessages)) + pqp.handler.SetUInt64Value(core.MetricP2pPeakPeerSizeProcessedMessages, pqp.peakPeerQuota.sizeProcessedMessages) pqp.handler.SetUInt64Value(core.MetricP2pNumReceiverPeers, numReceiverPeers) - pqp.handler.SetUInt64Value(core.MetricP2pTopNumReceiverPeers, topNumReceiverPeers) + pqp.handler.SetUInt64Value(core.MetricP2pPeakNumReceiverPeers, peakNumReceiverPeers) } // AddQuota adds a quota statistics diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go index ad16cf8dd55..f9229c79515 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go @@ -119,16 +119,16 @@ func checkSumMetrics( sizeProcessed uint64, ) { - value := status.GetUint64(core.MetricP2pSumNumReceivedMessages) + value := status.GetUint64(core.MetricP2pNetworkNumReceivedMessages) assert.Equal(t, value, numReceived) - value = status.GetUint64(core.MetricP2pSumSizeReceivedMessages) + value = status.GetUint64(core.MetricP2pNetworkSizeReceivedMessages) assert.Equal(t, value, sizeReceived) - value = status.GetUint64(core.MetricP2pSumNumProcessedMessages) + value = status.GetUint64(core.MetricP2pNetworkNumProcessedMessages) assert.Equal(t, value, numProcessed) - value = status.GetUint64(core.MetricP2pSumSizeProcessedMessages) + value = status.GetUint64(core.MetricP2pNetworkSizeProcessedMessages) assert.Equal(t, value, sizeProcessed) } @@ -141,16 +141,16 @@ func checkTopSumMetrics( sizeProcessed uint64, ) { - value := status.GetUint64(core.MetricP2pTopSumNumReceivedMessages) + value := status.GetUint64(core.MetricP2pPeakNetworkNumReceivedMessages) assert.Equal(t, value, numReceived) - value = status.GetUint64(core.MetricP2pTopSumSizeReceivedMessages) + value = status.GetUint64(core.MetricP2pPeakNetworkSizeReceivedMessages) assert.Equal(t, value, sizeReceived) - value = status.GetUint64(core.MetricP2pTopSumNumProcessedMessages) + value = status.GetUint64(core.MetricP2pPeakNetworkNumProcessedMessages) assert.Equal(t, value, numProcessed) - value = status.GetUint64(core.MetricP2pTopSumSizeProcessedMessages) + value = status.GetUint64(core.MetricP2pPeakNetworkSizeProcessedMessages) assert.Equal(t, value, sizeProcessed) } @@ -163,16 +163,16 @@ func checkMaxMetrics( sizeProcessed uint64, ) { - value := status.GetUint64(core.MetricP2pMaxNumReceivedMessages) + value := status.GetUint64(core.MetricP2pPeerNumReceivedMessages) assert.Equal(t, value, numReceived) - value = status.GetUint64(core.MetricP2pMaxSizeReceivedMessages) + value = status.GetUint64(core.MetricP2pPeerSizeReceivedMessages) assert.Equal(t, value, sizeReceived) - value = status.GetUint64(core.MetricP2pMaxNumProcessedMessages) + value = status.GetUint64(core.MetricP2pPeerNumProcessedMessages) assert.Equal(t, value, numProcessed) - value = status.GetUint64(core.MetricP2pMaxSizeProcessedMessages) + value = status.GetUint64(core.MetricP2pPeerSizeProcessedMessages) assert.Equal(t, value, sizeProcessed) } @@ -185,16 +185,16 @@ func checkTopMaxMetrics( sizeProcessed uint64, ) { - value := status.GetUint64(core.MetricP2pTopMaxNumReceivedMessages) + value := status.GetUint64(core.MetricP2pPeakPeerNumReceivedMessages) assert.Equal(t, value, numReceived) - value = status.GetUint64(core.MetricP2pTopMaxSizeReceivedMessages) + value = status.GetUint64(core.MetricP2pPeakPeerSizeReceivedMessages) assert.Equal(t, value, sizeReceived) - value = status.GetUint64(core.MetricP2pTopMaxNumProcessedMessages) + value = status.GetUint64(core.MetricP2pPeakPeerxNumProcessedMessages) assert.Equal(t, value, numProcessed) - value = status.GetUint64(core.MetricP2pTopMaxSizeProcessedMessages) + value = status.GetUint64(core.MetricP2pPeakPeerSizeProcessedMessages) assert.Equal(t, value, sizeProcessed) } @@ -207,6 +207,6 @@ func checkNumReceivers( value := status.GetUint64(core.MetricP2pNumReceiverPeers) assert.Equal(t, value, numReceiverPeers) - value = status.GetUint64(core.MetricP2pTopNumReceiverPeers) + value = status.GetUint64(core.MetricP2pPeakNumReceiverPeers) assert.Equal(t, value, topNumReceiverPeers) } From b6213c70f4c6f1c3900c7ec83c588c3076e3cc3a Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 17 Dec 2019 19:33:24 +0200 Subject: [PATCH 23/35] fixes after merge --- consensus/spos/worker.go | 2 +- consensus/spos/worker_test.go | 4 +++- .../factory/metachain/interceptorsContainerFactory_test.go | 2 ++ process/factory/shard/interceptorsContainerFactory_test.go | 2 ++ 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index c3ecb80ad13..3c0b713634c 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -242,7 +242,7 @@ func (wrk *Worker) getCleanedList(cnsDataList []*consensus.Message) []*consensus } // ProcessReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer p2p.PeerID) error { if check.IfNil(message) { return ErrNilMessage } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 7cbe9aef527..639cd3d3f03 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -627,6 +627,7 @@ func TestWorker_NewWorkerEmptyChainIDShouldFail(t *testing.T) { syncTimerMock, &mock.HeaderSigVerifierStub{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, wrk) @@ -664,6 +665,7 @@ func TestWorker_NewWorkerNilAntifloodHandlerShouldFail(t *testing.T) { singleSignerMock, syncTimerMock, &mock.HeaderSigVerifierStub{}, + chainID, nil, ) @@ -977,7 +979,7 @@ func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShoul []byte("inconsistent chain ID"), ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) assert.True(t, errors.Is(err, spos.ErrInvalidChainID)) } diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index 5dc5b1b9c00..7d5cfc005e6 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -525,6 +525,7 @@ func TestNewInterceptorsContainerFactory_EmptyCahinIDShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -553,6 +554,7 @@ func TestNewInterceptorsContainerFactory_NilAntifloodHandlerShouldErr(t *testing &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, + chainID, nil, ) diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 0dd4500ed83..2d663e368e9 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -529,6 +529,7 @@ func TestNewInterceptorsContainerFactory_EmptyChainIDShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, nil, + &mock.P2PAntifloodHandlerStub{}, ) assert.Nil(t, icf) @@ -557,6 +558,7 @@ func TestNewInterceptorsContainerFactory_NilAntifloodHandlerShouldErr(t *testing &mock.FeeHandlerStub{}, &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, + chainID, nil, ) From c2846819a72a5a67e3719fc3f40c36388806f60d Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 18 Dec 2019 18:50:00 +0200 Subject: [PATCH 24/35] fixed network statistics computing on antiflood component cleaned code --- .../p2p/antiflood/nilQuotaStatusHandler.go | 4 ++ p2p/antiflood/p2pAntiflood.go | 2 +- p2p/antiflood/p2pAntiflood_test.go | 17 +++-- p2p/mock/floodPreventerStub.go | 9 ++- p2p/p2p.go | 1 + process/interface.go | 1 + process/mock/quotaStatusHandlerStub.go | 10 +++ process/throttle/antiflood/interface.go | 4 +- .../throttle/antiflood/quotaFloodPreventer.go | 34 ++++++++- .../antiflood/quotaFloodPreventer_test.go | 61 ++++++++++++++++ statusHandler/p2pQuota/p2pQuotaProcessor.go | 56 ++++++++++----- .../p2pQuota/p2pQuotaProcessor_test.go | 71 +++++++++++++------ 12 files changed, 220 insertions(+), 50 deletions(-) diff --git a/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go b/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go index 9e68cd794b7..af66bf30f54 100644 --- a/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go +++ b/integrationTests/p2p/antiflood/nilQuotaStatusHandler.go @@ -11,6 +11,10 @@ func (nqsh *nilQuotaStatusHandler) ResetStatistics() { func (nqsh *nilQuotaStatusHandler) AddQuota(_ string, _ uint32, _ uint64, _ uint32, _ uint64) { } +// SetGlobalQuota is not implemented +func (nqsh *nilQuotaStatusHandler) SetGlobalQuota(_ uint32, _ uint64, _ uint32, _ uint64) { +} + // IsInterfaceNil returns true if there is no value under the interface func (nqsh *nilQuotaStatusHandler) IsInterfaceNil() bool { return nqsh == nil diff --git a/p2p/antiflood/p2pAntiflood.go b/p2p/antiflood/p2pAntiflood.go index dc61c8ca315..dd8bc75412e 100644 --- a/p2p/antiflood/p2pAntiflood.go +++ b/p2p/antiflood/p2pAntiflood.go @@ -34,7 +34,7 @@ func (af *p2pAntiflood) CanProcessMessage(message p2p.MessageP2P, fromConnectedP } //protect from directly connected peer - ok := floodPreventer.Increment(fromConnectedPeer.Pretty(), uint64(len(message.Data()))) + ok := floodPreventer.IncrementAddingToSum(fromConnectedPeer.Pretty(), uint64(len(message.Data()))) if !ok { return fmt.Errorf("%w in p2pAntiflood for connected peer", p2p.ErrSystemBusy) } diff --git a/p2p/antiflood/p2pAntiflood_test.go b/p2p/antiflood/p2pAntiflood_test.go index a59def23e02..6d1de450cf2 100644 --- a/p2p/antiflood/p2pAntiflood_test.go +++ b/p2p/antiflood/p2pAntiflood_test.go @@ -68,7 +68,7 @@ func TestP2pAntiflood_CanNotIncrementFromConnectedPeerShouldError(t *testing.T) FromField: messageOriginator, } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementCalled: func(identifier string, size uint64) bool { + IncrementAddingToSumCalled: func(identifier string, size uint64) bool { if identifier != fromConnectedPeer.Pretty() { assert.Fail(t, "should have been the connected peer") } @@ -92,16 +92,20 @@ func TestP2pAntiflood_CanNotIncrementMessageOriginatorShouldError(t *testing.T) PeerField: p2p.PeerID(messageOriginator), } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementCalled: func(identifier string, size uint64) bool { + IncrementAddingToSumCalled: func(identifier string, size uint64) bool { if identifier == fromConnectedPeer.Pretty() { return true } - if identifier != message.PeerField.Pretty() { - assert.Fail(t, "should have been the originator") - } return false }, + IncrementCalled: func(identifier string, size uint64) bool { + if identifier == message.PeerField.Pretty() { + return false + } + + return true + }, }) err := afm.CanProcessMessage(message, fromConnectedPeer) @@ -118,6 +122,9 @@ func TestP2pAntiflood_ShouldWork(t *testing.T) { PeerField: p2p.PeerID(messageOriginator), } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ + IncrementAddingToSumCalled: func(identifier string, size uint64) bool { + return true + }, IncrementCalled: func(identifier string, size uint64) bool { return true }, diff --git a/p2p/mock/floodPreventerStub.go b/p2p/mock/floodPreventerStub.go index 6783c4afec7..27278e1aa96 100644 --- a/p2p/mock/floodPreventerStub.go +++ b/p2p/mock/floodPreventerStub.go @@ -1,8 +1,13 @@ package mock type FloodPreventerStub struct { - IncrementCalled func(identifier string, size uint64) bool - ResetCalled func() + IncrementAddingToSumCalled func(identifier string, size uint64) bool + IncrementCalled func(identifier string, size uint64) bool + ResetCalled func() +} + +func (fps *FloodPreventerStub) IncrementAddingToSum(identifier string, size uint64) bool { + return fps.IncrementAddingToSumCalled(identifier, size) } func (fps *FloodPreventerStub) Increment(identifier string, size uint64) bool { diff --git a/p2p/p2p.go b/p2p/p2p.go index 22766175f15..70ee40d007b 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -195,6 +195,7 @@ type PeerDiscoveryFactory interface { // FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls type FloodPreventer interface { + IncrementAddingToSum(identifier string, size uint64) bool Increment(identifier string, size uint64) bool Reset() IsInterfaceNil() bool diff --git a/process/interface.go b/process/interface.go index bf141d880f8..120267bc0a5 100644 --- a/process/interface.go +++ b/process/interface.go @@ -543,6 +543,7 @@ type InterceptedHeaderSigVerifier interface { // FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls type FloodPreventer interface { + IncrementAddingToSum(identifier string, size uint64) bool Increment(identifier string, size uint64) bool Reset() IsInterfaceNil() bool diff --git a/process/mock/quotaStatusHandlerStub.go b/process/mock/quotaStatusHandlerStub.go index 55c4bdbae75..ecdc10ba553 100644 --- a/process/mock/quotaStatusHandlerStub.go +++ b/process/mock/quotaStatusHandlerStub.go @@ -4,6 +4,7 @@ type QuotaStatusHandlerStub struct { ResetStatisticsCalled func() AddQuotaCalled func(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, numProcessedMessages uint32, sizeProcessedMessages uint64) + SetGlobalQuotaCalled func(numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) } func (qshs *QuotaStatusHandlerStub) ResetStatistics() { @@ -20,6 +21,15 @@ func (qshs *QuotaStatusHandlerStub) AddQuota( qshs.AddQuotaCalled(identifier, numReceived, sizeReceived, numProcessed, sizeProcessed) } +func (qshs *QuotaStatusHandlerStub) SetGlobalQuota( + numReceived uint32, + sizeReceived uint64, + numProcessed uint32, + sizeProcessed uint64, +) { + qshs.SetGlobalQuotaCalled(numReceived, sizeReceived, numProcessed, sizeProcessed) +} + func (qshs *QuotaStatusHandlerStub) IsInterfaceNil() bool { return qshs == nil } diff --git a/process/throttle/antiflood/interface.go b/process/throttle/antiflood/interface.go index 9e7b14355d9..18c3a441166 100644 --- a/process/throttle/antiflood/interface.go +++ b/process/throttle/antiflood/interface.go @@ -4,7 +4,7 @@ package antiflood // by the system type QuotaStatusHandler interface { ResetStatistics() - AddQuota(identifier string, numReceived uint32, sizeReceived uint64, - numProcessed uint32, sizeProcessed uint64) + AddQuota(identifier string, numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) + SetGlobalQuota(numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) IsInterfaceNil() bool } diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 03c90ff1e6b..15aee8e7e4a 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -89,17 +89,40 @@ func NewQuotaFloodPreventer( }, nil } -// Increment tries to increment the counter values held at "identifier" position +// IncrementAddingToSum tries to increment the counter values held at "identifier" position // It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) // We need the mutOperation here as the get and put should be done atomically. // Otherwise we might yield a slightly higher number of false valid increments -func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { +// This method also checks the global sum quota and increment its values +func (qfp *quotaFloodPreventer) IncrementAddingToSum(identifier string, size uint64) bool { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() qfp.globalQuota.numReceivedMessages++ qfp.globalQuota.sizeReceivedMessages += size + result := qfp.increment(identifier, size) + if result { + qfp.globalQuota.numProcessedMessages++ + qfp.globalQuota.sizeProcessedMessages += size + } + + return result +} + +// Increment tries to increment the counter values held at "identifier" position +// It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) +// We need the mutOperation here as the get and put should be done atomically. +// Otherwise we might yield a slightly higher number of false valid increments +// This method also checks the global sum quota but does not increment its values +func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { + qfp.mutOperation.Lock() + defer qfp.mutOperation.Unlock() + + return qfp.increment(identifier, size) +} + +func (qfp *quotaFloodPreventer) increment(identifier string, size uint64) bool { isGlobalQuotaReached := qfp.globalQuota.numReceivedMessages > qfp.maxMessages || qfp.globalQuota.sizeReceivedMessages > qfp.maxSize if isGlobalQuotaReached { @@ -182,6 +205,13 @@ func (qfp quotaFloodPreventer) createStatistics() { q.sizeProcessedMessages, ) } + + qfp.statusHandler.SetGlobalQuota( + qfp.globalQuota.numReceivedMessages, + qfp.globalQuota.sizeReceivedMessages, + qfp.globalQuota.numProcessedMessages, + qfp.globalQuota.sizeProcessedMessages, + ) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index d2a66cce3dd..b7f5d31d652 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -16,6 +16,7 @@ func createMockQuotaStatusHandler() *mock.QuotaStatusHandlerStub { return &mock.QuotaStatusHandlerStub{ ResetStatisticsCalled: func() {}, AddQuotaCalled: func(_ string, _ uint32, _ uint64, _ uint32, _ uint64) {}, + SetGlobalQuotaCalled: func(_ uint32, _ uint64, _ uint32, _ uint64) {}, } } @@ -246,6 +247,65 @@ func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnT assert.True(t, putWasCalled) } +func TestNewQuotaFloodPreventer_IncrementAddingSumWithResetShouldWork(t *testing.T) { + t.Parallel() + + putWasCalled := 0 + addedGlobalQuotaCalled := false + existingSize := uint64(0) + existingMessages := uint32(0) + existingQuota := "a{ + numReceivedMessages: existingMessages, + sizeReceivedMessages: existingSize, + } + identifier := "identifier" + size := uint64(minTotalSize * 2) + qfp, _ := NewQuotaFloodPreventer( + &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return existingQuota, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + if string(key) == identifier { + putWasCalled++ + } + + return + }, + KeysCalled: func() [][]byte { + return make([][]byte, 0) + }, + ClearCalled: func() {}, + }, + &mock.QuotaStatusHandlerStub{ + AddQuotaCalled: func(_ string, _ uint32, _ uint64, _ uint32, _ uint64) {}, + SetGlobalQuotaCalled: func(numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) { + addedGlobalQuotaCalled = true + assert.Equal(t, uint32(2), numReceived) + assert.Equal(t, size+size+1, sizeReceived) + assert.Equal(t, uint32(2), numProcessed) + assert.Equal(t, size+size+1, sizeProcessed) + }, + ResetStatisticsCalled: func() {}, + }, + minMessages*4, + minTotalSize*10, + minMessages*4, + minTotalSize*10, + ) + + ok := qfp.IncrementAddingToSum(identifier, size) + assert.True(t, ok) + + ok = qfp.IncrementAddingToSum(identifier, size+1) + assert.True(t, ok) + + qfp.Reset() + + assert.Equal(t, 2, putWasCalled) + assert.True(t, addedGlobalQuotaCalled) +} + //------- Increment per peer func TestNewQuotaFloodPreventer_IncrementOverMaxPeerNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { @@ -477,6 +537,7 @@ func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { assert.Equal(t, quotaToCompare, quotaProvided) }, + SetGlobalQuotaCalled: func(_ uint32, _ uint64, _ uint32, _ uint64) {}, }, minTotalSize, minMessages, diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor.go b/statusHandler/p2pQuota/p2pQuotaProcessor.go index 0ed39b48814..a05a1e13e4e 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor.go @@ -21,6 +21,7 @@ type p2pQuotaProcessor struct { mutStatistics sync.Mutex statistics map[string]*quota peakNetworkQuota *quota + networkQuota *quota peakPeerQuota *quota peakNumReceivers uint64 handler core.AppStatusHandler @@ -35,6 +36,7 @@ func NewP2pQuotaProcessor(handler core.AppStatusHandler) (*p2pQuotaProcessor, er return &p2pQuotaProcessor{ statistics: make(map[string]*quota), peakNetworkQuota: "a{}, + networkQuota: "a{}, peakPeerQuota: "a{}, handler: handler, }, nil @@ -42,40 +44,43 @@ func NewP2pQuotaProcessor(handler core.AppStatusHandler) (*p2pQuotaProcessor, er // ResetStatistics output gathered statistics, process and prints them. After that it empties the statistics map func (pqp *p2pQuotaProcessor) ResetStatistics() { - networkQuota := "a{} - peakPeerQuota := "a{} - pqp.mutStatistics.Lock() defer pqp.mutStatistics.Unlock() - for _, q := range pqp.statistics { - networkQuota.numReceivedMessages += q.numReceivedMessages - networkQuota.sizeReceivedMessages += q.sizeReceivedMessages - networkQuota.numProcessedMessages += q.numProcessedMessages - networkQuota.sizeProcessedMessages += q.sizeProcessedMessages + peakPeerQuota := pqp.computePeerStatistics() + numPeers := uint64(len(pqp.statistics)) + pqp.setPeakStatistics(peakPeerQuota, numPeers) + + pqp.moveStatisticsInAppStatusHandler(peakPeerQuota, pqp.networkQuota, numPeers, pqp.peakNumReceivers) + + pqp.statistics = make(map[string]*quota) +} +func (pqp *p2pQuotaProcessor) computePeerStatistics() *quota { + peakPeerQuota := "a{} + + for _, q := range pqp.statistics { peakPeerQuota.numReceivedMessages = core.MaxUint32(peakPeerQuota.numReceivedMessages, q.numReceivedMessages) peakPeerQuota.sizeReceivedMessages = core.MaxUint64(peakPeerQuota.sizeReceivedMessages, q.sizeReceivedMessages) peakPeerQuota.numProcessedMessages = core.MaxUint32(peakPeerQuota.numProcessedMessages, q.numProcessedMessages) peakPeerQuota.sizeProcessedMessages = core.MaxUint64(peakPeerQuota.sizeProcessedMessages, q.sizeProcessedMessages) } + return peakPeerQuota +} + +func (pqp *p2pQuotaProcessor) setPeakStatistics(peakPeerQuota *quota, numPeers uint64) { pqp.peakPeerQuota.numReceivedMessages = core.MaxUint32(peakPeerQuota.numReceivedMessages, pqp.peakPeerQuota.numReceivedMessages) pqp.peakPeerQuota.sizeReceivedMessages = core.MaxUint64(peakPeerQuota.sizeReceivedMessages, pqp.peakPeerQuota.sizeReceivedMessages) pqp.peakPeerQuota.numProcessedMessages = core.MaxUint32(peakPeerQuota.numProcessedMessages, pqp.peakPeerQuota.numProcessedMessages) pqp.peakPeerQuota.sizeProcessedMessages = core.MaxUint64(peakPeerQuota.sizeProcessedMessages, pqp.peakPeerQuota.sizeProcessedMessages) - pqp.peakNetworkQuota.numReceivedMessages = core.MaxUint32(networkQuota.numReceivedMessages, pqp.peakNetworkQuota.numReceivedMessages) - pqp.peakNetworkQuota.sizeReceivedMessages = core.MaxUint64(networkQuota.sizeReceivedMessages, pqp.peakNetworkQuota.sizeReceivedMessages) - pqp.peakNetworkQuota.numProcessedMessages = core.MaxUint32(networkQuota.numProcessedMessages, pqp.peakNetworkQuota.numProcessedMessages) - pqp.peakNetworkQuota.sizeProcessedMessages = core.MaxUint64(networkQuota.sizeProcessedMessages, pqp.peakNetworkQuota.sizeProcessedMessages) + pqp.peakNetworkQuota.numReceivedMessages = core.MaxUint32(pqp.networkQuota.numReceivedMessages, pqp.peakNetworkQuota.numReceivedMessages) + pqp.peakNetworkQuota.sizeReceivedMessages = core.MaxUint64(pqp.networkQuota.sizeReceivedMessages, pqp.peakNetworkQuota.sizeReceivedMessages) + pqp.peakNetworkQuota.numProcessedMessages = core.MaxUint32(pqp.networkQuota.numProcessedMessages, pqp.peakNetworkQuota.numProcessedMessages) + pqp.peakNetworkQuota.sizeProcessedMessages = core.MaxUint64(pqp.networkQuota.sizeProcessedMessages, pqp.peakNetworkQuota.sizeProcessedMessages) - numPeers := uint64(len(pqp.statistics)) pqp.peakNumReceivers = core.MaxUint64(numPeers, pqp.peakNumReceivers) - - pqp.moveStatisticsInAppStatusHandler(peakPeerQuota, networkQuota, numPeers, pqp.peakNumReceivers) - - pqp.statistics = make(map[string]*quota) } func (pqp *p2pQuotaProcessor) moveStatisticsInAppStatusHandler( @@ -129,6 +134,23 @@ func (pqp *p2pQuotaProcessor) AddQuota( pqp.mutStatistics.Unlock() } +// SetGlobalQuota sets the global quota statistics +func (pqp *p2pQuotaProcessor) SetGlobalQuota( + numReceived uint32, + sizeReceived uint64, + numProcessed uint32, + sizeProcessed uint64, +) { + pqp.mutStatistics.Lock() + pqp.networkQuota = "a{ + numReceivedMessages: numReceived, + sizeReceivedMessages: sizeReceived, + numProcessedMessages: numProcessed, + sizeProcessedMessages: sizeProcessed, + } + pqp.mutStatistics.Unlock() +} + // IsInterfaceNil returns true if there is no value under the interface func (pqp *p2pQuotaProcessor) IsInterfaceNil() bool { return pqp == nil diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go index f9229c79515..ce7f2551867 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go @@ -63,36 +63,42 @@ func TestP2pQuotaProcessor_ResetStatisticsShouldEmptyStatsAndCallSetOnAllMetrics numProcessed1 := uint64(3) sizeProcessed1 := uint64(4) + numReceivedNetwork := uint64(5) + sizeReceivedNetwork := uint64(6) + numProcessedNetwork := uint64(7) + sizeProcessedNetwork := uint64(8) + status := mock.NewAppStatusHandlerMock() pqp, _ := p2pQuota.NewP2pQuotaProcessor(status) pqp.AddQuota(identifier1, uint32(numReceived1), sizeReceived1, uint32(numProcessed1), sizeProcessed1) + pqp.SetGlobalQuota(uint32(numReceivedNetwork), sizeReceivedNetwork, uint32(numProcessedNetwork), sizeProcessedNetwork) pqp.ResetStatistics() assert.Nil(t, pqp.GetQuota(identifier1)) numReceivers := uint64(1) - checkSumMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) - checkTopSumMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) - checkMaxMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) - checkTopMaxMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkNetworkMetrics(t, status, numReceivedNetwork, sizeReceivedNetwork, numProcessedNetwork, sizeProcessedNetwork) + checkPeakNetworkMetrics(t, status, numReceivedNetwork, sizeReceivedNetwork, numProcessedNetwork, sizeProcessedNetwork) + checkPeerMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkPeakPeerMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) checkNumReceivers(t, status, numReceivers, numReceivers) } -func TestP2pQuotaProcessor_ResetStatisticsShouldSetTops(t *testing.T) { +func TestP2pQuotaProcessor_ResetStatisticsShouldSetPeerStatisticsTops(t *testing.T) { t.Parallel() identifier1 := "identifier" - numReceived1 := uint64(1) - sizeReceived1 := uint64(2) - numProcessed1 := uint64(3) - sizeProcessed1 := uint64(4) + numReceived1 := uint64(10) + sizeReceived1 := uint64(20) + numProcessed1 := uint64(30) + sizeProcessed1 := uint64(40) identifier2 := "identifier" - numReceived2 := uint64(10) - sizeReceived2 := uint64(20) - numProcessed2 := uint64(30) - sizeProcessed2 := uint64(40) + numReceived2 := uint64(1) + sizeReceived2 := uint64(2) + numProcessed2 := uint64(3) + sizeProcessed2 := uint64(4) status := mock.NewAppStatusHandlerMock() pqp, _ := p2pQuota.NewP2pQuotaProcessor(status) @@ -103,14 +109,37 @@ func TestP2pQuotaProcessor_ResetStatisticsShouldSetTops(t *testing.T) { pqp.ResetStatistics() numReceivers := uint64(1) - checkSumMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) - checkTopSumMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) - checkMaxMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) - checkTopMaxMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) + checkPeerMetrics(t, status, numReceived2, sizeReceived2, numProcessed2, sizeProcessed2) + checkPeakPeerMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) checkNumReceivers(t, status, numReceivers, numReceivers) } -func checkSumMetrics( +func TestP2pQuotaProcessor_ResetStatisticsShouldSetNetworkStatisticsTops(t *testing.T) { + t.Parallel() + + numReceivedNetwork1 := uint64(10) + sizeReceivedNetwork1 := uint64(20) + numProcessedNetwork1 := uint64(30) + sizeProcessedNetwork1 := uint64(40) + + numReceivedNetwork2 := uint64(1) + sizeReceivedNetwork2 := uint64(2) + numProcessedNetwork2 := uint64(3) + sizeProcessedNetwork2 := uint64(4) + + status := mock.NewAppStatusHandlerMock() + pqp, _ := p2pQuota.NewP2pQuotaProcessor(status) + pqp.SetGlobalQuota(uint32(numReceivedNetwork1), sizeReceivedNetwork1, uint32(numProcessedNetwork1), sizeProcessedNetwork1) + pqp.ResetStatistics() + pqp.SetGlobalQuota(uint32(numReceivedNetwork2), sizeReceivedNetwork2, uint32(numProcessedNetwork2), sizeProcessedNetwork2) + + pqp.ResetStatistics() + + checkNetworkMetrics(t, status, numReceivedNetwork2, sizeReceivedNetwork2, numProcessedNetwork2, sizeProcessedNetwork2) + checkPeakNetworkMetrics(t, status, numReceivedNetwork1, sizeReceivedNetwork1, numProcessedNetwork1, sizeProcessedNetwork1) +} + +func checkNetworkMetrics( t *testing.T, status *mock.AppStatusHandlerMock, numReceived uint64, @@ -132,7 +161,7 @@ func checkSumMetrics( assert.Equal(t, value, sizeProcessed) } -func checkTopSumMetrics( +func checkPeakNetworkMetrics( t *testing.T, status *mock.AppStatusHandlerMock, numReceived uint64, @@ -154,7 +183,7 @@ func checkTopSumMetrics( assert.Equal(t, value, sizeProcessed) } -func checkMaxMetrics( +func checkPeerMetrics( t *testing.T, status *mock.AppStatusHandlerMock, numReceived uint64, @@ -176,7 +205,7 @@ func checkMaxMetrics( assert.Equal(t, value, sizeProcessed) } -func checkTopMaxMetrics( +func checkPeakPeerMetrics( t *testing.T, status *mock.AppStatusHandlerMock, numReceived uint64, From 4628de36e554b12d2c3006a7d22a85e06bbc8ff3 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 18 Dec 2019 19:40:49 +0200 Subject: [PATCH 25/35] fixed bot comment in an unit test --- p2p/antiflood/p2pAntiflood_test.go | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/p2p/antiflood/p2pAntiflood_test.go b/p2p/antiflood/p2pAntiflood_test.go index 6d1de450cf2..93564438201 100644 --- a/p2p/antiflood/p2pAntiflood_test.go +++ b/p2p/antiflood/p2pAntiflood_test.go @@ -93,18 +93,10 @@ func TestP2pAntiflood_CanNotIncrementMessageOriginatorShouldError(t *testing.T) } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ IncrementAddingToSumCalled: func(identifier string, size uint64) bool { - if identifier == fromConnectedPeer.Pretty() { - return true - } - - return false + return identifier == fromConnectedPeer.Pretty() }, IncrementCalled: func(identifier string, size uint64) bool { - if identifier == message.PeerField.Pretty() { - return false - } - - return true + return identifier != message.PeerField.Pretty() }, }) From 7382b7aec0f14e8b98c95605e2d1e538148a00f5 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 26 Dec 2019 10:20:03 +0200 Subject: [PATCH 26/35] fixes after review: method and variables renaming --- cmd/node/factory/structs.go | 24 ++++----- .../p2p/antiflood/antiflooding_test.go | 26 +++++----- p2p/antiflood/p2pAntiflood.go | 4 +- p2p/antiflood/p2pAntiflood_test.go | 10 ++-- p2p/mock/floodPreventerStub.go | 14 ++--- p2p/p2p.go | 4 +- process/interface.go | 4 +- .../throttle/antiflood/quotaFloodPreventer.go | 23 ++++---- .../antiflood/quotaFloodPreventer_test.go | 52 +++++++++---------- statusHandler/p2pQuota/p2pQuotaProcessor.go | 2 +- .../p2pQuota/p2pQuotaProcessor_test.go | 18 +++---- 11 files changed, 90 insertions(+), 91 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index d7957b27284..7b12db736b2 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -482,16 +482,16 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa return nil, err } - maxMessagesPerPeer := mainConfig.Antiflood.PeerMaxMessagesPerSecond - maxTotalSizePerPeer := mainConfig.Antiflood.PeerMaxTotalSizePerSecond - maxMessages := mainConfig.Antiflood.MaxMessagesPerSecond - maxTotalSize := mainConfig.Antiflood.MaxTotalSizePerSecond + peerMaxMessagesPerSecond := mainConfig.Antiflood.PeerMaxMessagesPerSecond + peerMaxTotalSizePerSecond := mainConfig.Antiflood.PeerMaxTotalSizePerSecond + maxMessagesPerSecond := mainConfig.Antiflood.MaxMessagesPerSecond + maxTotalSizePerSecond := mainConfig.Antiflood.MaxTotalSizePerSecond log.Debug("started antiflood component", - "maxMessagesPerPeer", maxMessagesPerPeer, - "maxTotalSizePerPeer", core.ConvertBytes(maxTotalSizePerPeer), - "maxMessages", maxMessages, - "maxTotalSize", core.ConvertBytes(maxTotalSize), + "peerMaxMessagesPerSecond", peerMaxMessagesPerSecond, + "peerMaxTotalSizePerSecond", core.ConvertBytes(peerMaxTotalSizePerSecond), + "maxMessagesPerSecond", maxMessagesPerSecond, + "maxTotalSizePerSecond", core.ConvertBytes(maxTotalSizePerSecond), ) quotaProcessor, err := p2pQuota.NewP2pQuotaProcessor(status) @@ -502,10 +502,10 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa floodPreventer, err := antifloodThrottle.NewQuotaFloodPreventer( antifloodCache, quotaProcessor, - maxMessagesPerPeer, - maxTotalSizePerPeer, - maxMessages, - maxTotalSize, + peerMaxMessagesPerSecond, + peerMaxTotalSizePerSecond, + maxMessagesPerSecond, + maxTotalSizePerSecond, ) if err != nil { return nil, err diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 07eaa281818..46aac60a95f 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -37,15 +37,15 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - peerMaxMumProcessMessages := uint32(5) - maxMumProcessMessages := uint32(math.MaxUint32) + peerMaxNumProcessMessages := uint32(5) + maxNumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(1 << 20) //1MB interceptors, err := createTopicsAndMockInterceptors( peers, topic, - peerMaxMumProcessMessages, + peerMaxNumProcessMessages, maxMessageSize, - maxMumProcessMessages, + maxNumProcessMessages, maxMessageSize, ) assert.Nil(t, err) @@ -76,7 +76,7 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { isFlooding.Store(false) - checkMessagesOnPeers(t, peers, interceptors, peerMaxMumProcessMessages, floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, peerMaxNumProcessMessages, floodedIdxes, protectedIdexes) } // TestAntifloodWithMessagesFromOtherPeers tests what happens if a peer decide to send a number of messages @@ -99,15 +99,15 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { // (check integrationTests.CreateFixedNetworkOf14Peers function) topic := "test_topic" broadcastMessageDuration := time.Second * 2 - peerMaxMumProcessMessages := uint32(5) - maxMumProcessMessages := uint32(math.MaxUint32) + peerMaxNumProcessMessages := uint32(5) + maxNumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(1 << 20) //1MB interceptors, err := createTopicsAndMockInterceptors( peers, topic, - peerMaxMumProcessMessages, + peerMaxNumProcessMessages, maxMessageSize, - maxMumProcessMessages, + maxNumProcessMessages, maxMessageSize, ) assert.Nil(t, err) @@ -131,7 +131,7 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { } time.Sleep(broadcastMessageDuration) - checkMessagesOnPeers(t, peers, interceptors, peerMaxMumProcessMessages, floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, peerMaxNumProcessMessages, floodedIdxes, protectedIdexes) } // TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send large messages @@ -154,15 +154,15 @@ func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - maxMumProcessMessages := uint32(math.MaxUint32) + maxNumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(math.MaxUint64) peerMaxMessageSize := uint64(1 << 10) //1KB interceptors, err := createTopicsAndMockInterceptors( peers, topic, - maxMumProcessMessages, + maxNumProcessMessages, peerMaxMessageSize, - maxMumProcessMessages, + maxNumProcessMessages, maxMessageSize, ) assert.Nil(t, err) diff --git a/p2p/antiflood/p2pAntiflood.go b/p2p/antiflood/p2pAntiflood.go index dd8bc75412e..4eabaabbd96 100644 --- a/p2p/antiflood/p2pAntiflood.go +++ b/p2p/antiflood/p2pAntiflood.go @@ -34,14 +34,14 @@ func (af *p2pAntiflood) CanProcessMessage(message p2p.MessageP2P, fromConnectedP } //protect from directly connected peer - ok := floodPreventer.IncrementAddingToSum(fromConnectedPeer.Pretty(), uint64(len(message.Data()))) + ok := floodPreventer.AccumulateGlobal(fromConnectedPeer.Pretty(), uint64(len(message.Data()))) if !ok { return fmt.Errorf("%w in p2pAntiflood for connected peer", p2p.ErrSystemBusy) } if fromConnectedPeer != message.Peer() { //protect from the flooding messages that originate from the same source but come from different peers - ok = floodPreventer.Increment(message.Peer().Pretty(), uint64(len(message.Data()))) + ok = floodPreventer.Accumulate(message.Peer().Pretty(), uint64(len(message.Data()))) if !ok { return fmt.Errorf("%w in p2pAntiflood for originator", p2p.ErrSystemBusy) } diff --git a/p2p/antiflood/p2pAntiflood_test.go b/p2p/antiflood/p2pAntiflood_test.go index 93564438201..40b04b07319 100644 --- a/p2p/antiflood/p2pAntiflood_test.go +++ b/p2p/antiflood/p2pAntiflood_test.go @@ -68,7 +68,7 @@ func TestP2pAntiflood_CanNotIncrementFromConnectedPeerShouldError(t *testing.T) FromField: messageOriginator, } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementAddingToSumCalled: func(identifier string, size uint64) bool { + AccumulateGlobalCalled: func(identifier string, size uint64) bool { if identifier != fromConnectedPeer.Pretty() { assert.Fail(t, "should have been the connected peer") } @@ -92,10 +92,10 @@ func TestP2pAntiflood_CanNotIncrementMessageOriginatorShouldError(t *testing.T) PeerField: p2p.PeerID(messageOriginator), } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementAddingToSumCalled: func(identifier string, size uint64) bool { + AccumulateGlobalCalled: func(identifier string, size uint64) bool { return identifier == fromConnectedPeer.Pretty() }, - IncrementCalled: func(identifier string, size uint64) bool { + AccumulateCalled: func(identifier string, size uint64) bool { return identifier != message.PeerField.Pretty() }, }) @@ -114,10 +114,10 @@ func TestP2pAntiflood_ShouldWork(t *testing.T) { PeerField: p2p.PeerID(messageOriginator), } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementAddingToSumCalled: func(identifier string, size uint64) bool { + AccumulateGlobalCalled: func(identifier string, size uint64) bool { return true }, - IncrementCalled: func(identifier string, size uint64) bool { + AccumulateCalled: func(identifier string, size uint64) bool { return true }, }) diff --git a/p2p/mock/floodPreventerStub.go b/p2p/mock/floodPreventerStub.go index 27278e1aa96..022f863927b 100644 --- a/p2p/mock/floodPreventerStub.go +++ b/p2p/mock/floodPreventerStub.go @@ -1,17 +1,17 @@ package mock type FloodPreventerStub struct { - IncrementAddingToSumCalled func(identifier string, size uint64) bool - IncrementCalled func(identifier string, size uint64) bool - ResetCalled func() + AccumulateGlobalCalled func(identifier string, size uint64) bool + AccumulateCalled func(identifier string, size uint64) bool + ResetCalled func() } -func (fps *FloodPreventerStub) IncrementAddingToSum(identifier string, size uint64) bool { - return fps.IncrementAddingToSumCalled(identifier, size) +func (fps *FloodPreventerStub) AccumulateGlobal(identifier string, size uint64) bool { + return fps.AccumulateGlobalCalled(identifier, size) } -func (fps *FloodPreventerStub) Increment(identifier string, size uint64) bool { - return fps.IncrementCalled(identifier, size) +func (fps *FloodPreventerStub) Accumulate(identifier string, size uint64) bool { + return fps.AccumulateCalled(identifier, size) } func (fps *FloodPreventerStub) Reset() { diff --git a/p2p/p2p.go b/p2p/p2p.go index 70ee40d007b..aba43118e9c 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -195,8 +195,8 @@ type PeerDiscoveryFactory interface { // FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls type FloodPreventer interface { - IncrementAddingToSum(identifier string, size uint64) bool - Increment(identifier string, size uint64) bool + AccumulateGlobal(identifier string, size uint64) bool + Accumulate(identifier string, size uint64) bool Reset() IsInterfaceNil() bool } diff --git a/process/interface.go b/process/interface.go index 120267bc0a5..681b098eb78 100644 --- a/process/interface.go +++ b/process/interface.go @@ -543,8 +543,8 @@ type InterceptedHeaderSigVerifier interface { // FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls type FloodPreventer interface { - IncrementAddingToSum(identifier string, size uint64) bool - Increment(identifier string, size uint64) bool + AccumulateGlobal(identifier string, size uint64) bool + Accumulate(identifier string, size uint64) bool Reset() IsInterfaceNil() bool } diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 15aee8e7e4a..9c86260c318 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -89,40 +89,40 @@ func NewQuotaFloodPreventer( }, nil } -// IncrementAddingToSum tries to increment the counter values held at "identifier" position +// AccumulateGlobal tries to increment the counter values held at "identifier" position // It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) // We need the mutOperation here as the get and put should be done atomically. // Otherwise we might yield a slightly higher number of false valid increments // This method also checks the global sum quota and increment its values -func (qfp *quotaFloodPreventer) IncrementAddingToSum(identifier string, size uint64) bool { +func (qfp *quotaFloodPreventer) AccumulateGlobal(identifier string, size uint64) bool { qfp.mutOperation.Lock() - defer qfp.mutOperation.Unlock() qfp.globalQuota.numReceivedMessages++ qfp.globalQuota.sizeReceivedMessages += size - result := qfp.increment(identifier, size) - if result { + isQuotaNotReached := qfp.accumulate(identifier, size) + if isQuotaNotReached { qfp.globalQuota.numProcessedMessages++ qfp.globalQuota.sizeProcessedMessages += size } + qfp.mutOperation.Unlock() - return result + return isQuotaNotReached } -// Increment tries to increment the counter values held at "identifier" position +// Accumulate tries to increment the counter values held at "identifier" position // It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) // We need the mutOperation here as the get and put should be done atomically. // Otherwise we might yield a slightly higher number of false valid increments // This method also checks the global sum quota but does not increment its values -func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { +func (qfp *quotaFloodPreventer) Accumulate(identifier string, size uint64) bool { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() - return qfp.increment(identifier, size) + return qfp.accumulate(identifier, size) } -func (qfp *quotaFloodPreventer) increment(identifier string, size uint64) bool { +func (qfp *quotaFloodPreventer) accumulate(identifier string, size uint64) bool { isGlobalQuotaReached := qfp.globalQuota.numReceivedMessages > qfp.maxMessages || qfp.globalQuota.sizeReceivedMessages > qfp.maxSize if isGlobalQuotaReached { @@ -174,6 +174,7 @@ func (qfp *quotaFloodPreventer) Reset() { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() + qfp.statusHandler.ResetStatistics() qfp.createStatistics() //TODO change this if cacher.Clear() is time consuming @@ -183,8 +184,6 @@ func (qfp *quotaFloodPreventer) Reset() { // createStatistics is useful to benchmark the system when running func (qfp quotaFloodPreventer) createStatistics() { - qfp.statusHandler.ResetStatistics() - keys := qfp.cacher.Keys() for _, k := range keys { val, ok := qfp.cacher.Get(k) diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index b7f5d31d652..8c6153c689a 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -134,9 +134,9 @@ func TestNewQuotaFloodPreventer_ShouldWork(t *testing.T) { assert.Nil(t, err) } -//------- Increment +//------- Accumulate -func TestNewQuotaFloodPreventer_IncrementIdentifierNotPresentPutQuotaAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateIdentifierNotPresentPutQuotaAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -165,13 +165,13 @@ func TestNewQuotaFloodPreventer_IncrementIdentifierNotPresentPutQuotaAndReturnTr minTotalSize*10, ) - ok := qfp.Increment("identifier", size) + ok := qfp.Accumulate("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_IncrementNotQuotaSavedInCacheShouldPutQuotaAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateNotQuotaSavedInCacheShouldPutQuotaAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -200,13 +200,13 @@ func TestNewQuotaFloodPreventer_IncrementNotQuotaSavedInCacheShouldPutQuotaAndRe minTotalSize*10, ) - ok := qfp.Increment("identifier", size) + ok := qfp.Accumulate("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateUnderMaxValuesShouldIncrementAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -241,16 +241,16 @@ func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnT minTotalSize*10, ) - ok := qfp.Increment("identifier", size) + ok := qfp.Accumulate("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_IncrementAddingSumWithResetShouldWork(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateGlobalWithResetShouldWork(t *testing.T) { t.Parallel() - putWasCalled := 0 + numPutOperations := 0 addedGlobalQuotaCalled := false existingSize := uint64(0) existingMessages := uint32(0) @@ -267,7 +267,7 @@ func TestNewQuotaFloodPreventer_IncrementAddingSumWithResetShouldWork(t *testing }, PutCalled: func(key []byte, value interface{}) (evicted bool) { if string(key) == identifier { - putWasCalled++ + numPutOperations++ } return @@ -294,21 +294,21 @@ func TestNewQuotaFloodPreventer_IncrementAddingSumWithResetShouldWork(t *testing minTotalSize*10, ) - ok := qfp.IncrementAddingToSum(identifier, size) + ok := qfp.AccumulateGlobal(identifier, size) assert.True(t, ok) - ok = qfp.IncrementAddingToSum(identifier, size+1) + ok = qfp.AccumulateGlobal(identifier, size+1) assert.True(t, ok) qfp.Reset() - assert.Equal(t, 2, putWasCalled) + assert.Equal(t, 2, numPutOperations) assert.True(t, addedGlobalQuotaCalled) } -//------- Increment per peer +//------- Accumulate per peer -func TestNewQuotaFloodPreventer_IncrementOverMaxPeerNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxPeerNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages + 11) @@ -335,12 +335,12 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxPeerNumMessagesShouldNotPutAndRe minTotalSize*10, ) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -func TestNewQuotaFloodPreventer_IncrementOverMaxPeerSizeShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxPeerSizeShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages) @@ -367,14 +367,14 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxPeerSizeShouldNotPutAndReturnFal minTotalSize*10, ) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -//------- Increment globally +//------- Accumulate globally -func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() globalMessages := uint32(minMessages + 11) @@ -398,12 +398,12 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturn ) qfp.SetGlobalQuotaValues(globalMessages, globalSize) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() globalMessages := uint32(minMessages) @@ -427,12 +427,12 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t ) qfp.SetGlobalQuotaValues(globalMessages, globalSize) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -func TestCountersMap_IncrementShouldWorkConcurrently(t *testing.T) { +func TestCountersMap_AccumulateShouldWorkConcurrently(t *testing.T) { t.Parallel() numIterations := 1000 @@ -448,7 +448,7 @@ func TestCountersMap_IncrementShouldWorkConcurrently(t *testing.T) { wg.Add(numIterations) for i := 0; i < numIterations; i++ { go func(idx int) { - ok := qfp.Increment(fmt.Sprintf("%d", idx), minTotalSize) + ok := qfp.Accumulate(fmt.Sprintf("%d", idx), minTotalSize) assert.True(t, ok) wg.Done() }(i) @@ -568,7 +568,7 @@ func TestCountersMap_IncrementAndResetShouldWorkConcurrently(t *testing.T) { wg.Add(numIterations + numIterations/10) for i := 0; i < numIterations; i++ { go func(idx int) { - ok := qfp.Increment(fmt.Sprintf("%d", idx), minTotalSize) + ok := qfp.Accumulate(fmt.Sprintf("%d", idx), minTotalSize) assert.True(t, ok) wg.Done() }(i) diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor.go b/statusHandler/p2pQuota/p2pQuotaProcessor.go index a05a1e13e4e..a5c6ba326e9 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor.go @@ -15,7 +15,7 @@ type quota struct { sizeProcessedMessages uint64 } -// p2pQuotaProcessor implements process.QuotaStatusHandler and is able to periodically sends to a +// p2pQuotaProcessor implements process.QuotaStatusHandler and is able to periodically send to a // statusHandler the processed p2p quota information type p2pQuotaProcessor struct { mutStatistics sync.Mutex diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go index ce7f2551867..309dcb06f30 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go @@ -57,11 +57,11 @@ func TestP2pQuotaProcessor_AddQuotaShouldWork(t *testing.T) { func TestP2pQuotaProcessor_ResetStatisticsShouldEmptyStatsAndCallSetOnAllMetrics(t *testing.T) { t.Parallel() - identifier1 := "identifier" - numReceived1 := uint64(1) - sizeReceived1 := uint64(2) - numProcessed1 := uint64(3) - sizeProcessed1 := uint64(4) + identifier := "identifier" + numReceived := uint64(1) + sizeReceived := uint64(2) + numProcessed := uint64(3) + sizeProcessed := uint64(4) numReceivedNetwork := uint64(5) sizeReceivedNetwork := uint64(6) @@ -70,18 +70,18 @@ func TestP2pQuotaProcessor_ResetStatisticsShouldEmptyStatsAndCallSetOnAllMetrics status := mock.NewAppStatusHandlerMock() pqp, _ := p2pQuota.NewP2pQuotaProcessor(status) - pqp.AddQuota(identifier1, uint32(numReceived1), sizeReceived1, uint32(numProcessed1), sizeProcessed1) + pqp.AddQuota(identifier, uint32(numReceived), sizeReceived, uint32(numProcessed), sizeProcessed) pqp.SetGlobalQuota(uint32(numReceivedNetwork), sizeReceivedNetwork, uint32(numProcessedNetwork), sizeProcessedNetwork) pqp.ResetStatistics() - assert.Nil(t, pqp.GetQuota(identifier1)) + assert.Nil(t, pqp.GetQuota(identifier)) numReceivers := uint64(1) checkNetworkMetrics(t, status, numReceivedNetwork, sizeReceivedNetwork, numProcessedNetwork, sizeProcessedNetwork) checkPeakNetworkMetrics(t, status, numReceivedNetwork, sizeReceivedNetwork, numProcessedNetwork, sizeProcessedNetwork) - checkPeerMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) - checkPeakPeerMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkPeerMetrics(t, status, numReceived, sizeReceived, numProcessed, sizeProcessed) + checkPeakPeerMetrics(t, status, numReceived, sizeReceived, numProcessed, sizeProcessed) checkNumReceivers(t, status, numReceivers, numReceivers) } From 6d2144628a4fdfb45d9c3f93b8e8aada980915e5 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sun, 29 Dec 2019 12:16:10 +0200 Subject: [PATCH 27/35] fixes after merge --- cmd/node/factory/structs.go | 2 +- consensus/spos/worker_test.go | 2 +- p2p/libp2p/netMessenger_test.go | 1 + p2p/memp2p/messenger.go | 25 ++----------------------- process/block/argProcessor.go | 2 +- process/block/metablock.go | 2 +- process/scToProtocol/stakingToPeer.go | 5 +++-- process/sync/metablock_test.go | 1 - 8 files changed, 10 insertions(+), 30 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index d99a44867f3..79f8075c057 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -502,7 +502,7 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, mainConfig *config.Co } func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHandler) (consensus.P2PAntifloodHandler, error) { - cacheConfig := getCacherFromConfig(mainConfig.Antiflood.Cache) + cacheConfig := storageFactory.GetCacherFromConfig(mainConfig.Antiflood.Cache) antifloodCache, err := storageUnit.NewCache(cacheConfig.Type, cacheConfig.Size, cacheConfig.Shards) if err != nil { return nil, err diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 5d850c23e9d..af700d95060 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -784,7 +784,7 @@ func TestWorker_ProcessReceivedMessageWrongHeaderShouldErr(t *testing.T) { ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) time.Sleep(time.Second) - err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, fromConnectedPeerId) assert.Equal(t, spos.ErrInvalidHeader, err) } diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index 35d4ded45c5..92ef4721086 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -9,6 +9,7 @@ import ( "fmt" "strings" "sync" + "sync/atomic" "testing" "time" diff --git a/p2p/memp2p/messenger.go b/p2p/memp2p/messenger.go index b842cf3a1ff..aaa499ab767 100644 --- a/p2p/memp2p/messenger.go +++ b/p2p/memp2p/messenger.go @@ -314,27 +314,6 @@ func (messenger *Messenger) processFromQueue() { continue } - //TODO(jls) - /* - for _, peer := range messenger.Network.Peers() { - if async { - go func(receivingPeer *Messenger) { - err := receivingPeer.ReceiveMessage(topic, message, messenger.P2PID) - log.LogIfError(err) - }(peer) - } else { - err = peer.ReceiveMessage(topic, message, messenger.P2PID) - } - if err != nil { - break - } - } - - - */ - - - topic := message.TopicIDs()[0] if topic == "" { continue @@ -356,7 +335,7 @@ func (messenger *Messenger) processFromQueue() { } messenger.topicsMutex.Unlock() - _ = validator.ProcessReceivedMessage(message, nil) + _ = validator.ProcessReceivedMessage(message, messenger.p2pID) } } @@ -370,10 +349,10 @@ func (messenger *Messenger) SendToConnectedPeer(topic string, buff []byte, peerI if !peerFound { return ErrReceivingPeerNotConnected } + receivingPeer.receiveMessage(message) return nil - return receivingPeer.ReceiveMessage(topic, message, messenger.P2PID) } return ErrNotConnectedToNetwork diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index c05b338b811..05e49dfcdc6 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -51,7 +51,7 @@ type ArgMetaProcessor struct { ArgBaseProcessor DataPool dataRetriever.MetaPoolsHolder PendingMiniBlocks process.PendingMiniBlocksHandler - SCDataGetter external.SCQueryService + SCDataGetter process.SCQueryService PeerChangesHandler process.PeerChangesHandler SCToProtocol process.SmartContractToProtocolHandler } diff --git a/process/block/metablock.go b/process/block/metablock.go index 9d9f2a2f2ff..74ed00a1404 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -26,7 +26,7 @@ type metaProcessor struct { *baseProcessor core serviceContainer.Core dataPool dataRetriever.MetaPoolsHolder - scDataGetter external.SCQueryService + scDataGetter process.SCQueryService scToProtocol process.SmartContractToProtocolHandler peerChanges process.PeerChangesHandler pendingMiniBlocks process.PendingMiniBlocksHandler diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index ec32e2dec91..f205ef294a9 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -13,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/vm/factory" @@ -29,7 +30,7 @@ type ArgStakingToPeer struct { ArgParser process.ArgumentsParser CurrTxs dataRetriever.TransactionCacher - ScQuery process.SCQueryService + ScQuery external.SCQueryService } // stakingToPeer defines the component which will translate changes from staking SC state @@ -43,7 +44,7 @@ type stakingToPeer struct { argParser process.ArgumentsParser currTxs dataRetriever.TransactionCacher - scQuery process.SCQueryService + scQuery external.SCQueryService mutPeerChanges sync.Mutex peerChanges map[string]block.PeerData diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index d07f2178bbc..e6fca781ce3 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -802,7 +802,6 @@ func TestMetaBootstrap_SyncBlockShouldCallRollBack(t *testing.T) { Hash: []byte("hash"), } } - forkDetector.RemoveHeadersCalled = func(nonce uint64, hash []byte) { } forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { From 2f57831e9ea08d46e50175952e21f1c0eb966770 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sun, 29 Dec 2019 12:56:21 +0200 Subject: [PATCH 28/35] fix golang bot issues --- epochStart/metachain/pendingMiniBlocks_test.go | 3 +++ epochStart/mock/cacherStub.go | 5 +---- epochStart/mock/marshalizerMock.go | 5 +---- epochStart/mock/poolsHolderStub.go | 5 +---- epochStart/shardchain/trigger.go | 2 +- p2p/memp2p/export_test.go | 2 +- p2p/memp2p/messenger.go | 8 ++++---- process/block/metablock_test.go | 1 + 8 files changed, 13 insertions(+), 18 deletions(-) diff --git a/epochStart/metachain/pendingMiniBlocks_test.go b/epochStart/metachain/pendingMiniBlocks_test.go index 26e735ab2a5..df0c68c2560 100644 --- a/epochStart/metachain/pendingMiniBlocks_test.go +++ b/epochStart/metachain/pendingMiniBlocks_test.go @@ -131,6 +131,8 @@ func TestPendingMiniBlockHeaders_AddProcessedHeader(t *testing.T) { //Check miniblocks headers are returned shdMbHdrs, err := pmb.PendingMiniBlockHeaders([]data.HeaderHandler{shardHeader}) + assert.Nil(t, err) + assert.True(t, isMbInSlice(hash1, shdMbHdrs)) assert.True(t, isMbInSlice(hash2, shdMbHdrs)) @@ -139,6 +141,7 @@ func TestPendingMiniBlockHeaders_AddProcessedHeader(t *testing.T) { //Check miniblocks headers are removed from pending list shdMbHdrs, err = pmb.PendingMiniBlockHeaders([]data.HeaderHandler{shardHeader}) + assert.Nil(t, err) assert.False(t, isMbInSlice(hash1, shdMbHdrs)) assert.False(t, isMbInSlice(hash2, shdMbHdrs)) } diff --git a/epochStart/mock/cacherStub.go b/epochStart/mock/cacherStub.go index a35caeff349..f86d6be6bf1 100644 --- a/epochStart/mock/cacherStub.go +++ b/epochStart/mock/cacherStub.go @@ -65,8 +65,5 @@ func (cs *CacherStub) RegisterHandler(handler func(key []byte)) { // IsInterfaceNil returns true if there is no value under the interface func (cs *CacherStub) IsInterfaceNil() bool { - if cs == nil { - return true - } - return false + return cs == nil } diff --git a/epochStart/mock/marshalizerMock.go b/epochStart/mock/marshalizerMock.go index deebc6139bc..5299a5bb257 100644 --- a/epochStart/mock/marshalizerMock.go +++ b/epochStart/mock/marshalizerMock.go @@ -48,8 +48,5 @@ func (mm *MarshalizerMock) Unmarshal(obj interface{}, buff []byte) error { // IsInterfaceNil returns true if there is no value under the interface func (mm *MarshalizerMock) IsInterfaceNil() bool { - if mm == nil { - return true - } - return false + return mm == nil } diff --git a/epochStart/mock/poolsHolderStub.go b/epochStart/mock/poolsHolderStub.go index 35a1d5e92db..6e711f37af8 100644 --- a/epochStart/mock/poolsHolderStub.go +++ b/epochStart/mock/poolsHolderStub.go @@ -55,8 +55,5 @@ func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacher // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { - if phs == nil { - return true - } - return false + return phs == nil } diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index 54da7f0d7c0..c7764258c31 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -197,7 +197,7 @@ func (t *trigger) ReceivedHeader(header data.HeaderHandler) { return } - if t.newEpochHdrReceived == false && !metaHdr.IsStartOfEpochBlock() { + if !t.newEpochHdrReceived && !metaHdr.IsStartOfEpochBlock() { return } diff --git a/p2p/memp2p/export_test.go b/p2p/memp2p/export_test.go index f22dbff0976..d779ae8e0ee 100644 --- a/p2p/memp2p/export_test.go +++ b/p2p/memp2p/export_test.go @@ -4,7 +4,7 @@ import "github.com/ElrondNetwork/elrond-go/p2p" func (messenger *Messenger) TopicValidator(name string) p2p.MessageProcessor { messenger.topicsMutex.RLock() - processor, _ := messenger.topicValidators[name] + processor := messenger.topicValidators[name] messenger.topicsMutex.RUnlock() return processor diff --git a/p2p/memp2p/messenger.go b/p2p/memp2p/messenger.go index aaa499ab767..0aee10b9cf5 100644 --- a/p2p/memp2p/messenger.go +++ b/p2p/memp2p/messenger.go @@ -207,7 +207,7 @@ func (messenger *Messenger) HasTopic(name string) bool { // Returns false otherwise. func (messenger *Messenger) HasTopicValidator(name string) bool { messenger.topicsMutex.RLock() - validator, _ := messenger.topicValidators[name] + validator := messenger.topicValidators[name] messenger.topicsMutex.RUnlock() return check.IfNil(validator) @@ -228,7 +228,7 @@ func (messenger *Messenger) RegisterMessageProcessor(topic string, handler p2p.M return fmt.Errorf("%w RegisterMessageProcessor, topic: %s", p2p.ErrNilTopic, topic) } - validator, _ := messenger.topicValidators[topic] + validator := messenger.topicValidators[topic] if !check.IfNil(validator) { return p2p.ErrTopicValidatorOperationNotSupported } @@ -248,7 +248,7 @@ func (messenger *Messenger) UnregisterMessageProcessor(topic string) error { return fmt.Errorf("%w UnregisterMessageProcessor, topic: %s", p2p.ErrNilTopic, topic) } - validator, _ := messenger.topicValidators[topic] + validator := messenger.topicValidators[topic] if check.IfNil(validator) { return p2p.ErrTopicValidatorOperationNotSupported } @@ -328,7 +328,7 @@ func (messenger *Messenger) processFromQueue() { // numReceived gets incremented because the message arrived on a registered topic atomic.AddUint64(&messenger.numReceived, 1) - validator, _ := messenger.topicValidators[topic] + validator := messenger.topicValidators[topic] if check.IfNil(validator) { messenger.topicsMutex.Unlock() continue diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 1f6b9e050af..80980bbb222 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -1762,6 +1762,7 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataCurr) highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) + assert.Nil(t, err) assert.Equal(t, 0, len(highestNonceHdrs)) pool.ShardHeaders().Put(currHash, currHdr) From 2fbe8a75aa34af9612bc37c153c66a1cdab15f68 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sun, 29 Dec 2019 13:28:58 +0200 Subject: [PATCH 29/35] fix more golang bot issues --- epochStart/mock/rounderStub.go | 5 +---- epochStart/mock/shardIdHashMapStub.go | 5 +---- epochStart/mock/storerStub.go | 5 +---- 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/epochStart/mock/rounderStub.go b/epochStart/mock/rounderStub.go index b3dde0616a6..73252b7d5cf 100644 --- a/epochStart/mock/rounderStub.go +++ b/epochStart/mock/rounderStub.go @@ -57,8 +57,5 @@ func (rndm *RounderStub) RemainingTime(startTime time.Time, maxTime time.Duratio // IsInterfaceNil returns true if there is no value under the interface func (rndm *RounderStub) IsInterfaceNil() bool { - if rndm == nil { - return true - } - return false + return rndm == nil } diff --git a/epochStart/mock/shardIdHashMapStub.go b/epochStart/mock/shardIdHashMapStub.go index 1d2f3463ece..f87fc07de94 100644 --- a/epochStart/mock/shardIdHashMapStub.go +++ b/epochStart/mock/shardIdHashMapStub.go @@ -25,8 +25,5 @@ func (sihsm *ShardIdHasMapStub) Delete(shardId uint32) { // IsInterfaceNil returns true if there is no value under the interface func (sihsm *ShardIdHasMapStub) IsInterfaceNil() bool { - if sihsm == nil { - return true - } - return false + return sihsm == nil } diff --git a/epochStart/mock/storerStub.go b/epochStart/mock/storerStub.go index af7d1b3ee16..96425489da6 100644 --- a/epochStart/mock/storerStub.go +++ b/epochStart/mock/storerStub.go @@ -35,8 +35,5 @@ func (ss *StorerStub) DestroyUnit() error { // IsInterfaceNil returns true if there is no value under the interface func (ss *StorerStub) IsInterfaceNil() bool { - if ss == nil { - return true - } - return false + return ss == nil } From d1e90ea93aabbe8e281bfc58cf5601ea9bc57c1e Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sun, 29 Dec 2019 13:36:03 +0200 Subject: [PATCH 30/35] fix more golang bot issues --- epochStart/mock/syncTimerStub.go | 5 +---- epochStart/mock/uint64ByteSliceConverterMock.go | 5 +---- epochStart/mock/uint64SyncMapCacherStub.go | 5 +---- 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/epochStart/mock/syncTimerStub.go b/epochStart/mock/syncTimerStub.go index 7ee040f18dc..f86ddbdb6a7 100644 --- a/epochStart/mock/syncTimerStub.go +++ b/epochStart/mock/syncTimerStub.go @@ -34,8 +34,5 @@ func (s *SyncTimerStub) CurrentTime() time.Time { // IsInterfaceNil returns true if there is no value under the interface func (stm *SyncTimerStub) IsInterfaceNil() bool { - if stm == nil { - return true - } - return false + return stm == nil } diff --git a/epochStart/mock/uint64ByteSliceConverterMock.go b/epochStart/mock/uint64ByteSliceConverterMock.go index b9afe5ecab8..68c1ec89397 100644 --- a/epochStart/mock/uint64ByteSliceConverterMock.go +++ b/epochStart/mock/uint64ByteSliceConverterMock.go @@ -24,8 +24,5 @@ func (u *Uint64ByteSliceConverterMock) ToUint64(p []byte) (uint64, error) { // IsInterfaceNil returns true if there is no value under the interface func (u *Uint64ByteSliceConverterMock) IsInterfaceNil() bool { - if u == nil { - return true - } - return false + return u == nil } diff --git a/epochStart/mock/uint64SyncMapCacherStub.go b/epochStart/mock/uint64SyncMapCacherStub.go index c48e1deee2c..7c58567fe34 100644 --- a/epochStart/mock/uint64SyncMapCacherStub.go +++ b/epochStart/mock/uint64SyncMapCacherStub.go @@ -39,8 +39,5 @@ func (usmcs *Uint64SyncMapCacherStub) Remove(nonce uint64, shardId uint32) { // IsInterfaceNil returns true if there is no value under the interface func (usmcs *Uint64SyncMapCacherStub) IsInterfaceNil() bool { - if usmcs == nil { - return true - } - return false + return usmcs == nil } From 5b21aebed744dde1627c4f77d09e65bccfb80f73 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 30 Dec 2019 16:57:53 +0200 Subject: [PATCH 31/35] impl + refactor --- cmd/node/factory/structs.go | 2 +- .../p2p/antiflood/antiflooding_test.go | 2 +- p2p/errors.go | 6 + p2p/libp2p/connectionMonitor.go | 148 ++++++++++ p2p/libp2p/connectionMonitorNotifiee.go | 55 ++++ p2p/libp2p/connectionMonitorNotifiee_test.go | 143 +++++++++ p2p/libp2p/connectionMonitor_test.go | 277 ++++++++++++++++++ p2p/libp2p/discovery/kadDhtDiscoverer.go | 12 +- p2p/libp2p/issues_test.go | 2 +- p2p/libp2p/libp2pConnectionMonitor.go | 116 -------- p2p/libp2p/libp2pConnectionMonitor_test.go | 155 ---------- p2p/libp2p/libp2pContext.go | 42 ++- p2p/libp2p/libp2pContext_test.go | 41 ++- p2p/libp2p/memMessenger.go | 9 +- p2p/libp2p/netMessenger.go | 65 +++- p2p/libp2p/netMessenger_test.go | 30 +- p2p/libp2p/options.go | 16 + p2p/mock/blacklistHandlerStub.go | 13 + p2p/mock/connectionMonitorStub.go | 30 ++ p2p/mock/nilBlacklistHandler.go | 16 + p2p/mock/reconnecterStub.go | 6 +- p2p/p2p.go | 17 +- process/errors.go | 6 - .../throttle/antiflood/quotaFloodPreventer.go | 47 ++- .../antiflood/quotaFloodPreventer_test.go | 132 +++++---- statusHandler/p2pQuota/interface.go | 7 + .../p2pQuota/p2pQuotaBlacklistProcessor.go | 20 ++ 27 files changed, 1016 insertions(+), 399 deletions(-) create mode 100644 p2p/libp2p/connectionMonitor.go create mode 100644 p2p/libp2p/connectionMonitorNotifiee.go create mode 100644 p2p/libp2p/connectionMonitorNotifiee_test.go create mode 100644 p2p/libp2p/connectionMonitor_test.go delete mode 100644 p2p/libp2p/libp2pConnectionMonitor.go delete mode 100644 p2p/libp2p/libp2pConnectionMonitor_test.go create mode 100644 p2p/libp2p/options.go create mode 100644 p2p/mock/blacklistHandlerStub.go create mode 100644 p2p/mock/connectionMonitorStub.go create mode 100644 p2p/mock/nilBlacklistHandler.go create mode 100644 statusHandler/p2pQuota/interface.go create mode 100644 statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7b12db736b2..3f54c722a34 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -501,7 +501,7 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa floodPreventer, err := antifloodThrottle.NewQuotaFloodPreventer( antifloodCache, - quotaProcessor, + []antifloodThrottle.QuotaStatusHandler{quotaProcessor}, peerMaxMessagesPerSecond, peerMaxTotalSizePerSecond, maxMessagesPerSecond, diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 46aac60a95f..81c013c2a2e 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -243,7 +243,7 @@ func createTopicsAndMockInterceptors( interceptors[idx] = newMessageProcessor() interceptors[idx].floodPreventer, _ = antiflood.NewQuotaFloodPreventer( antifloodPool, - &nilQuotaStatusHandler{}, + []antiflood.QuotaStatusHandler{&nilQuotaStatusHandler{}}, peerMaxNumMessages, peerMaxSize, maxNumMessages, diff --git a/p2p/errors.go b/p2p/errors.go index 02f05980c57..ff4eade3f18 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -93,3 +93,9 @@ var ErrNilFloodPreventer = errors.New("nil flood preventer") // ErrSystemBusy signals that the system is busy var ErrSystemBusy = errors.New("system busy") + +// ErrNilPeerBlacklistHandler signals that a nil peer black list handler was provided +var ErrNilPeerBlacklistHandler = errors.New("nil peer black list handler") + +// ErrPeerBlacklisted signals that a peer is blacklisted +var ErrPeerBlacklisted = errors.New("peer is blacklisted") diff --git a/p2p/libp2p/connectionMonitor.go b/p2p/libp2p/connectionMonitor.go new file mode 100644 index 00000000000..61be3f10ae7 --- /dev/null +++ b/p2p/libp2p/connectionMonitor.go @@ -0,0 +1,148 @@ +package libp2p + +import ( + "fmt" + "math" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p/networksharding" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" +) + +type connectionMonitor struct { + chDoReconnect chan struct{} + reconnecter p2p.Reconnecter + libp2pContext *Libp2pContext + netw network.Network + thresholdMinConnectedPeers int + thresholdDiscoveryResume int // if the number of connections drops under this value, the discovery is restarted + thresholdDiscoveryPause int // if the number of connections is over this value, the discovery is stopped + thresholdConnTrim int // if the number of connections is over this value, we start trimming +} + +func newConnectionMonitor( + reconnecter p2p.Reconnecter, + libp2pContext *Libp2pContext, + thresholdMinConnectedPeers int, + targetConnCount int, +) (*connectionMonitor, error) { + + if thresholdMinConnectedPeers <= 0 { + return nil, fmt.Errorf("%w, thresholdMinConnectedPeers expected to be higher than 0", + p2p.ErrInvalidValue) + } + if targetConnCount <= 0 { + return nil, fmt.Errorf("%w, targetConnCount expected to be higher than 0", p2p.ErrInvalidValue) + } + if check.IfNil(libp2pContext) { + return nil, p2p.ErrNilContextProvider + } + + cm := &connectionMonitor{ + reconnecter: reconnecter, + chDoReconnect: make(chan struct{}, 0), + libp2pContext: libp2pContext, + netw: libp2pContext.connHost.Network(), + thresholdMinConnectedPeers: thresholdMinConnectedPeers, + thresholdDiscoveryResume: 0, + thresholdDiscoveryPause: math.MaxInt32, + thresholdConnTrim: math.MaxInt32, + } + + if targetConnCount > 0 { + cm.thresholdDiscoveryResume = targetConnCount * 4 / 5 + cm.thresholdDiscoveryPause = targetConnCount + cm.thresholdConnTrim = targetConnCount * 6 / 5 + } + + return cm, nil +} + +// HandleConnectedPeer is called whenever a new peer is connected to the current host +func (cm *connectionMonitor) HandleConnectedPeer(pid p2p.PeerID) error { + blacklistHandler := cm.libp2pContext.PeerBlacklist() + if blacklistHandler.Has(string(pid)) { + return fmt.Errorf("%w, pid: %s", p2p.ErrPeerBlacklisted, pid.Pretty()) + } + + if len(cm.netw.Conns()) > cm.thresholdDiscoveryPause && cm.reconnecter != nil { + cm.reconnecter.Pause() + } + if len(cm.netw.Conns()) > cm.thresholdConnTrim { + sorted := networksharding.Get().SortList(cm.netw.Peers(), cm.netw.LocalPeer()) + for i := cm.thresholdDiscoveryPause; i < len(sorted); i++ { + cm.closePeer(sorted[i]) + } + cm.doReconn() + } + + return nil +} + +// HandleDisconnectedPeer is called whenever a new peer is disconnected from the current host +func (cm *connectionMonitor) HandleDisconnectedPeer(_ p2p.PeerID) error { + cm.doReconnectionIfNeeded() + + if len(cm.netw.Conns()) < cm.thresholdDiscoveryResume && cm.reconnecter != nil { + cm.reconnecter.Resume() + cm.doReconn() + } + + return nil +} + +// DoReconnectionBlocking will try to reconnect to the initial addresses (seeders) +func (cm *connectionMonitor) DoReconnectionBlocking() { + select { + case <-cm.chDoReconnect: + if cm.reconnecter != nil { + cm.reconnecter.ReconnectToNetwork() + } + } +} + +// CheckConnectionsBlocking will sweep all available connections checking if the connection has or not been blacklisted +func (cm *connectionMonitor) CheckConnectionsBlocking() { + peers := cm.netw.Peers() + blacklistHandler := cm.libp2pContext.PeerBlacklist() + for _, pid := range peers { + if blacklistHandler.Has(string(pid)) { + cm.closePeer(pid) + } + } +} + +func (cm *connectionMonitor) closePeer(pid peer.ID) { + err := cm.netw.ClosePeer(pid) + if err != nil { + log.Trace("error closing peer connection HandleConnectedPeer", + "pid", pid.Pretty(), + "error", err.Error(), + ) + } +} + +// Request a reconnect to initial list +func (cm *connectionMonitor) doReconn() { + select { + case cm.chDoReconnect <- struct{}{}: + default: + } +} + +func (cm *connectionMonitor) doReconnectionIfNeeded() { + if !cm.isConnectedToTheNetwork() { + cm.doReconn() + } +} + +func (cm *connectionMonitor) isConnectedToTheNetwork() bool { + return len(cm.netw.Conns()) >= cm.thresholdMinConnectedPeers +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cm *connectionMonitor) IsInterfaceNil() bool { + return cm == nil +} diff --git a/p2p/libp2p/connectionMonitorNotifiee.go b/p2p/libp2p/connectionMonitorNotifiee.go new file mode 100644 index 00000000000..8c526e15f93 --- /dev/null +++ b/p2p/libp2p/connectionMonitorNotifiee.go @@ -0,0 +1,55 @@ +package libp2p + +import ( + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/libp2p/go-libp2p-core/network" + "github.com/multiformats/go-multiaddr" +) + +// connectionMonitorNotifiee is a wrapper over p2p.ConnectionMonitor that satisfies the Notifiee interface +// and is able to be notified by the current running host (connection status changes) +type connectionMonitorNotifiee struct { + p2p.ConnectionMonitor +} + +// Listen is called when network starts listening on an addr +func (cmn *connectionMonitorNotifiee) Listen(network.Network, multiaddr.Multiaddr) {} + +// ListenClose is called when network stops listening on an addr +func (cmn *connectionMonitorNotifiee) ListenClose(network.Network, multiaddr.Multiaddr) {} + +// Connected is called when a connection opened +func (cmn *connectionMonitorNotifiee) Connected(netw network.Network, conn network.Conn) { + err := cmn.HandleConnectedPeer(p2p.PeerID(conn.RemotePeer())) + if err != nil { + log.Trace("connection error", + "pid", conn.RemotePeer().Pretty(), + "error", err.Error(), + ) + + err = netw.ClosePeer(conn.RemotePeer()) + if err != nil { + log.Trace("connection close error", + "pid", conn.RemotePeer().Pretty(), + "error", err.Error(), + ) + } + } +} + +// Disconnected is called when a connection closed +func (cmn *connectionMonitorNotifiee) Disconnected(_ network.Network, conn network.Conn) { + err := cmn.HandleDisconnectedPeer(p2p.PeerID(conn.RemotePeer())) + if err != nil { + log.Trace("disconnection error", + "pid", conn.RemotePeer().Pretty(), + "error", err.Error(), + ) + } +} + +// OpenedStream is called when a stream opened +func (cmn *connectionMonitorNotifiee) OpenedStream(network.Network, network.Stream) {} + +// ClosedStream is called when a stream closed +func (cmn *connectionMonitorNotifiee) ClosedStream(network.Network, network.Stream) {} diff --git a/p2p/libp2p/connectionMonitorNotifiee_test.go b/p2p/libp2p/connectionMonitorNotifiee_test.go new file mode 100644 index 00000000000..7c6304ff620 --- /dev/null +++ b/p2p/libp2p/connectionMonitorNotifiee_test.go @@ -0,0 +1,143 @@ +package libp2p + +import ( + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/mock" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" +) + +func createStubConn() *mock.ConnStub { + return &mock.ConnStub{ + RemotePeerCalled: func() peer.ID { + return "remote peer" + }, + } +} + +//------- Connected + +func TestConnectionMonitorNotifiee_ConnectedHandledWithErrShouldCloseConnection(t *testing.T) { + t.Parallel() + + cms := &mock.ConnectionMonitorStub{ + HandleConnectedPeerCalled: func(pid p2p.PeerID) error { + return errors.New("expected error") + }, + } + cmn := &connectionMonitorNotifiee{ + ConnectionMonitor: cms, + } + peerCloseCalled := false + conn := createStubConn() + ns := &mock.NetworkStub{ + ClosePeerCall: func(id peer.ID) error { + if id == conn.RemotePeer() { + peerCloseCalled = true + } + return nil + }, + } + + cmn.Connected(ns, conn) + + assert.True(t, peerCloseCalled) +} + +func TestConnectionMonitorNotifiee_ConnectedHandledWithErrShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not have paniced") + } + }() + + expectedErr := errors.New("expected error") + cms := &mock.ConnectionMonitorStub{ + HandleConnectedPeerCalled: func(pid p2p.PeerID) error { + return expectedErr + }, + } + cmn := &connectionMonitorNotifiee{ + ConnectionMonitor: cms, + } + conn := createStubConn() + ns := &mock.NetworkStub{ + ClosePeerCall: func(id peer.ID) error { + return expectedErr + }, + } + + cmn.Connected(ns, conn) +} + +//------- Disconnected + +func TestConnectionMonitorNotifiee_DisconnectedShouldCallHandler(t *testing.T) { + t.Parallel() + + handlerCalled := false + cms := &mock.ConnectionMonitorStub{ + HandleDisconnectedPeerCalled: func(pid p2p.PeerID) error { + handlerCalled = true + + return nil + }, + } + cmn := &connectionMonitorNotifiee{ + ConnectionMonitor: cms, + } + conn := createStubConn() + + cmn.Disconnected(nil, conn) + + assert.True(t, handlerCalled) +} + +func TestConnectionMonitorNotifiee_DisconnectedHandledWithErrShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not have paniced") + } + }() + + cms := &mock.ConnectionMonitorStub{ + HandleDisconnectedPeerCalled: func(pid p2p.PeerID) error { + return errors.New("expected error") + }, + } + cmn := &connectionMonitorNotifiee{ + ConnectionMonitor: cms, + } + conn := createStubConn() + + cmn.Disconnected(nil, conn) +} + +//------- handlers + +func TestConnectionMonitorNotifiee_CallingHandlersShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not have paniced") + } + }() + + cmn := &connectionMonitorNotifiee{} + + cmn.Listen(nil, nil) + cmn.ListenClose(nil, nil) + cmn.OpenedStream(nil, nil) + cmn.ClosedStream(nil, nil) +} diff --git a/p2p/libp2p/connectionMonitor_test.go b/p2p/libp2p/connectionMonitor_test.go new file mode 100644 index 00000000000..a390a0dfc83 --- /dev/null +++ b/p2p/libp2p/connectionMonitor_test.go @@ -0,0 +1,277 @@ +package libp2p + +import ( + "errors" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/mock" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" +) + +var durTimeoutWaiting = time.Second * 2 + +func createNetwork(numPeers int) network.Network { + numPeersCopy := numPeers + currentCount := &numPeersCopy + return &mock.NetworkStub{ + ConnsCalled: func() []network.Conn { + return make([]network.Conn, *currentCount) + }, + PeersCall: func() []peer.ID { + return make([]peer.ID, *currentCount) + }, + ClosePeerCall: func(peer.ID) error { + *currentCount-- + return nil + }, + } +} + +func createStubConnectableHost() *mock.ConnectableHostStub { + return &mock.ConnectableHostStub{ + NetworkCalled: func() network.Network { + return &mock.NetworkStub{} + }, + } +} + +func TestNewConnectionMonitor_InvalidMinConnectedPeersShouldErr(t *testing.T) { + t.Parallel() + + libp2pContext := &Libp2pContext{ + connHost: createStubConnectableHost(), + } + cm, err := newConnectionMonitor(nil, libp2pContext, -1, 1) + + assert.True(t, errors.Is(err, p2p.ErrInvalidValue)) + assert.True(t, check.IfNil(cm)) +} + +func TestNewConnectionMonitor_InvalidTargetCountShouldErr(t *testing.T) { + t.Parallel() + + libp2pContext := &Libp2pContext{ + connHost: createStubConnectableHost(), + } + cm, err := newConnectionMonitor(nil, libp2pContext, 1, -1) + + assert.True(t, errors.Is(err, p2p.ErrInvalidValue)) + assert.True(t, check.IfNil(cm)) +} + +func TestNewConnectionMonitor_NilLibP2pContextShouldErr(t *testing.T) { + t.Parallel() + + cm, err := newConnectionMonitor(&mock.ReconnecterStub{}, nil, 3, 1) + + assert.True(t, errors.Is(err, p2p.ErrNilContextProvider)) + assert.True(t, check.IfNil(cm)) +} + +func TestNewConnectionMonitor_WithNilReconnecterShouldWork(t *testing.T) { + t.Parallel() + + libp2pContext := &Libp2pContext{ + connHost: createStubConnectableHost(), + } + cm, err := newConnectionMonitor(nil, libp2pContext, 3, 1) + + assert.Nil(t, err) + assert.False(t, check.IfNil(cm)) +} + +func TestConnectionMonitor_OnDisconnectedUnderThresholdShouldCallReconnect(t *testing.T) { + t.Parallel() + + chReconnectCalled := make(chan struct{}, 1) + + rs := mock.ReconnecterStub{ + ReconnectToNetworkCalled: func() { + chReconnectCalled <- struct{}{} + }, + } + + ns := &mock.NetworkStub{ + ConnsCalled: func() []network.Conn { + //only one connection which is under the threshold + return []network.Conn{ + &mock.ConnStub{}, + } + }, + PeersCall: func() []peer.ID { return nil }, + } + + libp2pContext := &Libp2pContext{ + connHost: &mock.ConnectableHostStub{ + NetworkCalled: func() network.Network { + return ns + }, + }, + blacklistHandler: &mock.NilBlacklistHandler{}, + } + cm, _ := newConnectionMonitor(&rs, libp2pContext, 3, 1) + + go func() { + for { + cm.DoReconnectionBlocking() + } + }() + + //wait for the reconnection blocking go routine to start + time.Sleep(time.Second) + + err := cm.HandleDisconnectedPeer("") + assert.Nil(t, err) + + select { + case <-chReconnectCalled: + case <-time.After(durTimeoutWaiting): + assert.Fail(t, "timeout waiting to call reconnect") + } +} + +func TestConnectionMonitor_HandleConnectedPeerShouldTrim(t *testing.T) { + t.Parallel() + + pauseCallCount := 0 + resumeCallCount := 0 + + rc := mock.ReconnecterStub{ + ReconnectToNetworkCalled: func() {}, + PauseCall: func() { pauseCallCount++ }, + ResumeCall: func() { resumeCallCount++ }, + } + + libp2pContext := &Libp2pContext{ + blacklistHandler: &mock.NilBlacklistHandler{}, + connHost: createStubConnectableHost(), + } + cm, _ := newConnectionMonitor(&rc, libp2pContext, 3, 10) + + assert.NotNil(t, cm) + assert.Equal(t, 8, cm.thresholdDiscoveryResume) + assert.Equal(t, 10, cm.thresholdDiscoveryPause) + assert.Equal(t, 12, cm.thresholdConnTrim) + + pid := p2p.PeerID("remote peer") + + assert.Equal(t, 0, pauseCallCount) + assert.Equal(t, 0, resumeCallCount) + + cm.netw = createNetwork(5) + _ = cm.HandleConnectedPeer(pid) + assert.Equal(t, 0, pauseCallCount) + assert.Equal(t, 0, resumeCallCount) + + cm.netw = createNetwork(9) + _ = cm.HandleConnectedPeer(pid) + assert.Equal(t, 0, pauseCallCount) + assert.Equal(t, 0, resumeCallCount) + + cm.netw = createNetwork(11) + _ = cm.HandleConnectedPeer(pid) + assert.Equal(t, 1, pauseCallCount) + assert.Equal(t, 0, resumeCallCount) + + cm.netw = createNetwork(5) + _ = cm.HandleDisconnectedPeer(pid) + assert.Equal(t, 1, pauseCallCount) + assert.Equal(t, 1, resumeCallCount) + + cm.netw = createNetwork(13) + _ = cm.HandleConnectedPeer(pid) + assert.Equal(t, 2, pauseCallCount) + assert.Equal(t, 1, resumeCallCount) + + cm.netw = createNetwork(5) + _ = cm.HandleDisconnectedPeer(pid) + assert.Equal(t, 2, pauseCallCount) + assert.Equal(t, 2, resumeCallCount) +} + +func TestConnectionMonitor_BlackListedPeerShouldErr(t *testing.T) { + t.Parallel() + + pid := p2p.PeerID("remote peer") + libp2pContext := &Libp2pContext{ + blacklistHandler: &mock.BlacklistHandlerStub{ + HasCalled: func(key string) bool { + return key == string(pid) + }, + }, + connHost: createStubConnectableHost(), + } + cm, _ := newConnectionMonitor(nil, libp2pContext, 1, 1) + + err := cm.HandleConnectedPeer(pid) + + assert.True(t, errors.Is(err, p2p.ErrPeerBlacklisted)) +} + +func TestConnectionMonitor_RemoveAnAlreadyConnectedBlacklistedPeer(t *testing.T) { + t.Parallel() + + pid := peer.ID("remote peer") + numCloseCalled := 0 + + ns := &mock.NetworkStub{ + ClosePeerCall: func(id peer.ID) error { + if id == pid { + numCloseCalled++ + } + return nil + }, + PeersCall: func() []peer.ID { + return []peer.ID{pid} + }, + } + + libp2pContext := &Libp2pContext{ + blacklistHandler: &mock.BlacklistHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + connHost: &mock.ConnectableHostStub{ + NetworkCalled: func() network.Network { + return ns + }, + }, + } + cm, _ := newConnectionMonitor(nil, libp2pContext, 1, 1) + + cm.CheckConnectionsBlocking() + + assert.Equal(t, 1, numCloseCalled) +} + +func TestConnectionMonitor_ClosePeerErrorsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not have paniced") + } + }() + + closePeerCalled := false + expectedErr := errors.New("expected error") + cm := &connectionMonitor{ + netw: &mock.NetworkStub{ + ClosePeerCall: func(id peer.ID) error { + closePeerCalled = true + return expectedErr + }, + }, + } + + cm.closePeer("pid") + + assert.True(t, closePeerCalled) +} diff --git a/p2p/libp2p/discovery/kadDhtDiscoverer.go b/p2p/libp2p/discovery/kadDhtDiscoverer.go index 419534bcb12..fc965533b1f 100644 --- a/p2p/libp2p/discovery/kadDhtDiscoverer.go +++ b/p2p/libp2p/discovery/kadDhtDiscoverer.go @@ -4,6 +4,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/p2p/libp2p" @@ -111,13 +112,13 @@ func (kdd *KadDhtDiscoverer) connectToInitialAndBootstrap() { if kdd.initConns { err := kdd.kadDHT.BootstrapOnce(ctx, cfg) if err == kbucket.ErrLookupFailure { - <-kdd.ReconnectToNetwork() + kdd.ReconnectToNetwork() } i = 1 } else { i++ if (i % initReconnectMul) == 0 { - <-kdd.ReconnectToNetwork() + kdd.ReconnectToNetwork() i = 1 } } @@ -182,7 +183,7 @@ func (kdd *KadDhtDiscoverer) Name() string { // ApplyContext sets the context in which this discoverer is to be run func (kdd *KadDhtDiscoverer) ApplyContext(ctxProvider p2p.ContextProvider) error { - if ctxProvider == nil || ctxProvider.IsInterfaceNil() { + if check.IfNil(ctxProvider) { return p2p.ErrNilContextProvider } @@ -197,8 +198,9 @@ func (kdd *KadDhtDiscoverer) ApplyContext(ctxProvider p2p.ContextProvider) error } // ReconnectToNetwork will try to connect to one peer from the initial peer list -func (kdd *KadDhtDiscoverer) ReconnectToNetwork() <-chan struct{} { - return kdd.connectToOnePeerFromInitialPeersList(kdd.refreshInterval, kdd.initialPeersList) +func (kdd *KadDhtDiscoverer) ReconnectToNetwork() { + chDone := kdd.connectToOnePeerFromInitialPeersList(kdd.refreshInterval, kdd.initialPeersList) + <-chDone } // Pause will suspend the discovery process diff --git a/p2p/libp2p/issues_test.go b/p2p/libp2p/issues_test.go index 10dc89eaf5f..121345b85ed 100644 --- a/p2p/libp2p/issues_test.go +++ b/p2p/libp2p/issues_test.go @@ -33,7 +33,7 @@ func createMessenger(port int) p2p.Messenger { loadBalancer.NewOutgoingChannelLoadBalancer(), discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + 100, ) if err != nil { diff --git a/p2p/libp2p/libp2pConnectionMonitor.go b/p2p/libp2p/libp2pConnectionMonitor.go deleted file mode 100644 index a37d203b819..00000000000 --- a/p2p/libp2p/libp2pConnectionMonitor.go +++ /dev/null @@ -1,116 +0,0 @@ -package libp2p - -import ( - "math" - "time" - - "github.com/ElrondNetwork/elrond-go/p2p" - ns "github.com/ElrondNetwork/elrond-go/p2p/libp2p/networksharding" - "github.com/libp2p/go-libp2p-core/network" - "github.com/multiformats/go-multiaddr" -) - -// DurationBetweenReconnectAttempts is used as to not call reconnecter.ReconnectToNetwork() to often -// when there are a lot of peers disconnecting and reconnection to initial nodes succeed -var DurationBetweenReconnectAttempts = time.Second * 5 - -type libp2pConnectionMonitor struct { - chDoReconnect chan struct{} - reconnecter p2p.Reconnecter - thresholdMinConnectedPeers int - thresholdDiscoveryResume int // if the number of connections drops under this value, the discovery is restarted - thresholdDiscoveryPause int // if the number of connections is over this value, the discovery is stopped - thresholdConnTrim int // if the number of connections is over this value, we start trimming -} - -func newLibp2pConnectionMonitor(reconnecter p2p.Reconnecter, thresholdMinConnectedPeers int, targetConnCount int) (*libp2pConnectionMonitor, error) { - if thresholdMinConnectedPeers < 0 { - return nil, p2p.ErrInvalidValue - } - - cm := &libp2pConnectionMonitor{ - reconnecter: reconnecter, - chDoReconnect: make(chan struct{}, 0), - thresholdMinConnectedPeers: thresholdMinConnectedPeers, - thresholdDiscoveryResume: 0, - thresholdDiscoveryPause: math.MaxInt32, - thresholdConnTrim: math.MaxInt32, - } - - if targetConnCount > 0 { - cm.thresholdDiscoveryResume = targetConnCount * 4 / 5 - cm.thresholdDiscoveryPause = targetConnCount - cm.thresholdConnTrim = targetConnCount * 6 / 5 - } - - if reconnecter != nil { - go cm.doReconnection() - } - - return cm, nil -} - -// Listen is called when network starts listening on an addr -func (lcm *libp2pConnectionMonitor) Listen(network.Network, multiaddr.Multiaddr) {} - -// ListenClose is called when network stops listening on an addr -func (lcm *libp2pConnectionMonitor) ListenClose(network.Network, multiaddr.Multiaddr) {} - -// Request a reconnect to initial list -func (lcm *libp2pConnectionMonitor) doReconn() { - select { - case lcm.chDoReconnect <- struct{}{}: - default: - } -} - -// Connected is called when a connection opened -func (lcm *libp2pConnectionMonitor) Connected(netw network.Network, conn network.Conn) { - if len(netw.Conns()) > lcm.thresholdDiscoveryPause { - lcm.reconnecter.Pause() - } - if len(netw.Conns()) > lcm.thresholdConnTrim { - sorted := ns.Get().SortList(netw.Peers(), netw.LocalPeer()) - for i := lcm.thresholdDiscoveryPause; i < len(sorted); i++ { - _ = netw.ClosePeer(sorted[i]) - } - lcm.doReconn() - } -} - -// Disconnected is called when a connection closed -func (lcm *libp2pConnectionMonitor) Disconnected(netw network.Network, conn network.Conn) { - lcm.doReconnectionIfNeeded(netw) - - if len(netw.Conns()) < lcm.thresholdDiscoveryResume && lcm.reconnecter != nil { - lcm.reconnecter.Resume() - lcm.doReconn() - } -} - -func (lcm *libp2pConnectionMonitor) doReconnectionIfNeeded(netw network.Network) { - if !lcm.isConnectedToTheNetwork(netw) { - lcm.doReconn() - } -} - -// OpenedStream is called when a stream opened -func (lcm *libp2pConnectionMonitor) OpenedStream(network.Network, network.Stream) {} - -// ClosedStream is called when a stream closed -func (lcm *libp2pConnectionMonitor) ClosedStream(network.Network, network.Stream) {} - -func (lcm *libp2pConnectionMonitor) doReconnection() { - for { - select { - case <-lcm.chDoReconnect: - <-lcm.reconnecter.ReconnectToNetwork() - } - - time.Sleep(DurationBetweenReconnectAttempts) - } -} - -func (lcm *libp2pConnectionMonitor) isConnectedToTheNetwork(netw network.Network) bool { - return len(netw.Conns()) >= lcm.thresholdMinConnectedPeers -} diff --git a/p2p/libp2p/libp2pConnectionMonitor_test.go b/p2p/libp2p/libp2pConnectionMonitor_test.go deleted file mode 100644 index cb242d5b39e..00000000000 --- a/p2p/libp2p/libp2pConnectionMonitor_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package libp2p - -import ( - "math" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/mock" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" -) - -func init() { - DurationBetweenReconnectAttempts = time.Millisecond -} - -var durTimeoutWaiting = time.Second * 2 -var durStartGoRoutine = time.Second - -func TestNewLibp2pConnectionMonitor_WithNegativeThresholdShouldErr(t *testing.T) { - t.Parallel() - - cm, err := newLibp2pConnectionMonitor(nil, -1, 0) - - assert.Equal(t, p2p.ErrInvalidValue, err) - assert.Nil(t, cm) -} - -func TestNewLibp2pConnectionMonitor_WithNilReconnecterShouldWork(t *testing.T) { - t.Parallel() - - cm, err := newLibp2pConnectionMonitor(nil, 3, 0) - - assert.Nil(t, err) - assert.NotNil(t, cm) -} - -func TestNewLibp2pConnectionMonitor_OnDisconnectedUnderThresholdShouldCallReconnect(t *testing.T) { - t.Parallel() - - chReconnectCalled := make(chan struct{}, 1) - - rs := mock.ReconnecterStub{ - ReconnectToNetworkCalled: func() <-chan struct{} { - ch := make(chan struct{}, 1) - ch <- struct{}{} - - chReconnectCalled <- struct{}{} - - return ch - }, - } - - ns := mock.NetworkStub{ - ConnsCalled: func() []network.Conn { - //only one connection which is under the threshold - return []network.Conn{ - &mock.ConnStub{}, - } - }, - } - - cm, _ := newLibp2pConnectionMonitor(&rs, 3, 0) - time.Sleep(durStartGoRoutine) - cm.Disconnected(&ns, nil) - - select { - case <-chReconnectCalled: - case <-time.After(durTimeoutWaiting): - assert.Fail(t, "timeout waiting to call reconnect") - } -} - -func TestNewLibp2pConnectionMonitor_DefaultTriming(t *testing.T) { - t.Parallel() - - cm, _ := newLibp2pConnectionMonitor(nil, 3, 0) - - assert.NotNil(t, cm) - assert.Equal(t, 0, cm.thresholdDiscoveryResume) - assert.Equal(t, math.MaxInt32, cm.thresholdDiscoveryPause) - assert.Equal(t, math.MaxInt32, cm.thresholdConnTrim) -} - -func TestNewLibp2pConnectionMonitor_Triming(t *testing.T) { - t.Parallel() - - pauseCallCount := 0 - resumeCallCount := 0 - - rc := mock.ReconnecterStub{ - ReconnectToNetworkCalled: func() <-chan struct{} { - ch := make(chan struct{}) - defer func() { ch <- struct{}{} }() - return ch - }, - PauseCall: func() { pauseCallCount++ }, - ResumeCall: func() { resumeCallCount++ }, - } - - cm, _ := newLibp2pConnectionMonitor(&rc, 3, 10) - - assert.NotNil(t, cm) - assert.Equal(t, 8, cm.thresholdDiscoveryResume) - assert.Equal(t, 10, cm.thresholdDiscoveryPause) - assert.Equal(t, 12, cm.thresholdConnTrim) - - netFact := func(cnt int) network.Network { - cntr := cnt - currentCount := &cntr - return &mock.NetworkStub{ - ConnsCalled: func() []network.Conn { - return make([]network.Conn, *currentCount) - }, - - PeersCall: func() []peer.ID { - return make([]peer.ID, *currentCount) - }, - - ClosePeerCall: func(peer.ID) error { - *currentCount-- - return nil - }, - } - } - - assert.Equal(t, 0, pauseCallCount) - assert.Equal(t, 0, resumeCallCount) - - cm.Connected(netFact(5), nil) - assert.Equal(t, 0, pauseCallCount) - assert.Equal(t, 0, resumeCallCount) - - cm.Connected(netFact(9), nil) - assert.Equal(t, 0, pauseCallCount) - assert.Equal(t, 0, resumeCallCount) - - cm.Connected(netFact(11), nil) - assert.Equal(t, 1, pauseCallCount) - assert.Equal(t, 0, resumeCallCount) - - cm.Disconnected(netFact(5), nil) - assert.Equal(t, 1, pauseCallCount) - assert.Equal(t, 1, resumeCallCount) - - cm.Connected(netFact(13), nil) - assert.Equal(t, 2, pauseCallCount) - assert.Equal(t, 1, resumeCallCount) - - cm.Disconnected(netFact(5), nil) - assert.Equal(t, 2, pauseCallCount) - assert.Equal(t, 2, resumeCallCount) -} diff --git a/p2p/libp2p/libp2pContext.go b/p2p/libp2p/libp2pContext.go index fa0badd58d1..a5900a4bed1 100644 --- a/p2p/libp2p/libp2pContext.go +++ b/p2p/libp2p/libp2pContext.go @@ -2,14 +2,19 @@ package libp2p import ( "context" + "sync" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/mock" ) // Libp2pContext holds the context for the wrappers over libp2p implementation type Libp2pContext struct { - ctx context.Context - connHost ConnectableHost + ctx context.Context + connHost ConnectableHost + mutChangeableComponents sync.RWMutex + blacklistHandler p2p.BlacklistHandler } // NewLibp2pContext constructs a new Libp2pContext object @@ -17,14 +22,14 @@ func NewLibp2pContext(ctx context.Context, connHost ConnectableHost) (*Libp2pCon if ctx == nil { return nil, p2p.ErrNilContext } - - if connHost == nil || connHost.IsInterfaceNil() { + if check.IfNil(connHost) { return nil, p2p.ErrNilHost } return &Libp2pContext{ - ctx: ctx, - connHost: connHost, + ctx: ctx, + connHost: connHost, + blacklistHandler: &mock.NilBlacklistHandler{}, }, nil } @@ -38,10 +43,27 @@ func (lctx *Libp2pContext) Host() ConnectableHost { return lctx.connHost } +// SetPeerBlacklist sets the peer black list handler +func (lctx *Libp2pContext) SetPeerBlacklist(blacklistHandler p2p.BlacklistHandler) error { + if check.IfNil(blacklistHandler) { + return p2p.ErrNilPeerBlacklistHandler + } + + lctx.mutChangeableComponents.Lock() + lctx.blacklistHandler = blacklistHandler + lctx.mutChangeableComponents.Unlock() + + return nil +} + +func (lctx *Libp2pContext) PeerBlacklist() p2p.BlacklistHandler { + lctx.mutChangeableComponents.RLock() + defer lctx.mutChangeableComponents.RUnlock() + + return lctx.blacklistHandler +} + // IsInterfaceNil returns true if there is no value under the interface func (lctx *Libp2pContext) IsInterfaceNil() bool { - if lctx == nil { - return true - } - return false + return lctx == nil } diff --git a/p2p/libp2p/libp2pContext_test.go b/p2p/libp2p/libp2pContext_test.go index b0493a20191..6879395eef8 100644 --- a/p2p/libp2p/libp2pContext_test.go +++ b/p2p/libp2p/libp2pContext_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/p2p/libp2p" "github.com/ElrondNetwork/elrond-go/p2p/mock" @@ -11,38 +12,68 @@ import ( ) func TestNewLibp2pContext_NilContextShoulsErr(t *testing.T) { + t.Parallel() + lctx, err := libp2p.NewLibp2pContext(nil, &mock.ConnectableHostStub{}) - assert.Nil(t, lctx) + assert.True(t, check.IfNil(lctx)) assert.Equal(t, p2p.ErrNilContext, err) } func TestNewLibp2pContext_NilHostShoulsErr(t *testing.T) { + t.Parallel() + lctx, err := libp2p.NewLibp2pContext(context.Background(), nil) - assert.Nil(t, lctx) + assert.True(t, check.IfNil(lctx)) assert.Equal(t, p2p.ErrNilHost, err) } func TestNewLibp2pContext_OkValsShouldWork(t *testing.T) { + t.Parallel() + lctx, err := libp2p.NewLibp2pContext(context.Background(), &mock.ConnectableHostStub{}) - assert.NotNil(t, lctx) + assert.False(t, check.IfNil(lctx)) assert.Nil(t, err) } func TestLibp2pContext_Context(t *testing.T) { - ctx := context.Background() + t.Parallel() + ctx := context.Background() lctx, _ := libp2p.NewLibp2pContext(ctx, &mock.ConnectableHostStub{}) assert.True(t, ctx == lctx.Context()) } func TestLibp2pContext_Host(t *testing.T) { - h := &mock.ConnectableHostStub{} + t.Parallel() + h := &mock.ConnectableHostStub{} lctx, _ := libp2p.NewLibp2pContext(context.Background(), h) assert.True(t, h == lctx.Host()) } + +func TestLibp2pContext_SetPeerBlacklistNilPeerBlacklistShouldErr(t *testing.T) { + t.Parallel() + + lctx, _ := libp2p.NewLibp2pContext(context.Background(), &mock.ConnectableHostStub{}) + err := lctx.SetPeerBlacklist(nil) + + assert.Equal(t, p2p.ErrNilPeerBlacklistHandler, err) +} + +func TestLibp2pContext_GetSetBlacklistHandlerShouldWork(t *testing.T) { + t.Parallel() + + lctx, _ := libp2p.NewLibp2pContext(context.Background(), &mock.ConnectableHostStub{}) + npbh := &mock.NilBlacklistHandler{} + + err := lctx.SetPeerBlacklist(npbh) + assert.Nil(t, err) + + recoveredNpbh := lctx.PeerBlacklist() + assert.True(t, npbh == recoveredNpbh) +} diff --git a/p2p/libp2p/memMessenger.go b/p2p/libp2p/memMessenger.go index 2335707cdfe..5af2ed7ad3f 100644 --- a/p2p/libp2p/memMessenger.go +++ b/p2p/libp2p/memMessenger.go @@ -3,6 +3,7 @@ package libp2p import ( "context" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/throttler" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" @@ -11,6 +12,8 @@ import ( "github.com/libp2p/go-libp2p/p2p/net/mock" ) +const targetPeerCount = 100 + // NewMemoryMessenger creates a new sandbox testable instance of libP2P messenger // It should not open ports on current machine // Should be used only in testing! @@ -25,7 +28,7 @@ func NewMemoryMessenger( if mockNet == nil { return nil, p2p.ErrNilMockNet } - if peerDiscoverer == nil || peerDiscoverer.IsInterfaceNil() { + if check.IfNil(peerDiscoverer) { return nil, p2p.ErrNilPeerDiscoverer } @@ -45,7 +48,7 @@ func NewMemoryMessenger( false, loadBalancer.NewOutgoingChannelLoadBalancer(), peerDiscoverer, - 0, + targetPeerCount, ) if err != nil { return nil, err @@ -79,6 +82,6 @@ func NewNetworkMessengerOnFreePort( outgoingPLB, peerDiscoverer, ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) } diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index df0a783cb98..56d27397466 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -20,6 +20,9 @@ import ( const durationBetweenSends = time.Microsecond * 10 +const durationBetweenReconnectionAttempts = time.Second * 5 +const durationCheckConnections = time.Second + // ListenAddrWithIp4AndTcp defines the listening address with ip v.4 and TCP const ListenAddrWithIp4AndTcp = "/ip4/0.0.0.0/tcp/" @@ -50,7 +53,7 @@ type networkMessenger struct { ctxProvider *Libp2pContext pb *pubsub.PubSub ds p2p.DirectSender - connMonitor *libp2pConnectionMonitor + connMonitor *connectionMonitor peerDiscoverer p2p.PeerDiscoverer mutTopics sync.RWMutex topics map[string]p2p.MessageProcessor @@ -147,8 +150,6 @@ func createMessenger( return nil, err } - reconnecter, _ := peerDiscoverer.(p2p.Reconnecter) - netMes := networkMessenger{ ctxProvider: lctx, pb: pb, @@ -156,12 +157,11 @@ func createMessenger( outgoingPLB: outgoingPLB, peerDiscoverer: peerDiscoverer, } - netMes.connMonitor, err = newLibp2pConnectionMonitor(reconnecter, defaultThresholdMinConnectedPeers, targetConnCount) + + err = netMes.createConnectionMonitor(targetConnCount) if err != nil { return nil, err - } - lctx.connHost.Network().Notify(netMes.connMonitor) netMes.ds, err = NewDirectSender(lctx.Context(), lctx.Host(), netMes.directMessageHandler) if err != nil { @@ -214,6 +214,53 @@ func createPubSub(ctxProvider *Libp2pContext, withSigning bool) (*pubsub.PubSub, return ps, nil } +func (netMes *networkMessenger) createConnectionMonitor(targetConnCount int) error { + var err error + + reconnecter, _ := netMes.peerDiscoverer.(p2p.Reconnecter) + netMes.connMonitor, err = newConnectionMonitor( + reconnecter, + netMes.ctxProvider, + defaultThresholdMinConnectedPeers, + targetConnCount, + ) + if err != nil { + return err + + } + notifee := &connectionMonitorNotifiee{ + ConnectionMonitor: netMes.connMonitor, + } + netMes.ctxProvider.connHost.Network().Notify(notifee) + + go func() { + for { + netMes.connMonitor.DoReconnectionBlocking() + time.Sleep(durationBetweenReconnectionAttempts) + } + }() + + go func() { + for { + netMes.connMonitor.CheckConnectionsBlocking() + time.Sleep(durationCheckConnections) + } + }() + + return nil +} + +// ApplyOptions can set up different configurable options of a networkMessenger instance +func (netMes *networkMessenger) ApplyOptions(opts ...Option) error { + for _, opt := range opts { + err := opt(netMes) + if err != nil { + return err + } + } + return nil +} + // Close closes the host, connections and streams func (netMes *networkMessenger) Close() error { return netMes.ctxProvider.Host().Close() @@ -518,8 +565,7 @@ func (netMes *networkMessenger) directMessageHandler(message p2p.MessageP2P, fro // IsConnectedToTheNetwork returns true if the current node is connected to the network func (netMes *networkMessenger) IsConnectedToTheNetwork() bool { - netw := netMes.ctxProvider.connHost.Network() - return netMes.connMonitor.isConnectedToTheNetwork(netw) + return netMes.connMonitor.isConnectedToTheNetwork() } // SetThresholdMinConnectedPeers sets the minimum connected peers before triggering a new reconnection @@ -528,9 +574,8 @@ func (netMes *networkMessenger) SetThresholdMinConnectedPeers(minConnectedPeers return p2p.ErrInvalidValue } - netw := netMes.ctxProvider.connHost.Network() netMes.connMonitor.thresholdMinConnectedPeers = minConnectedPeers - netMes.connMonitor.doReconnectionIfNeeded(netw) + netMes.connMonitor.doReconnectionIfNeeded() return nil } diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index cb8dc5dbedf..0f592f6d637 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/assert" ) +const targetPeerCount = 100 + var timeoutWaitResponses = time.Second * 2 func waitDoneWithTimeout(t *testing.T, chanDone chan bool, timeout time.Duration) { @@ -174,7 +176,7 @@ func TestNewNetworkMessenger_NilContextShouldErr(t *testing.T) { &mock.ChannelLoadBalancerStub{}, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.Nil(t, mes) @@ -194,7 +196,7 @@ func TestNewNetworkMessenger_InvalidPortShouldErr(t *testing.T) { &mock.ChannelLoadBalancerStub{}, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.Nil(t, mes) @@ -212,7 +214,7 @@ func TestNewNetworkMessenger_NilP2PprivateKeyShouldErr(t *testing.T) { &mock.ChannelLoadBalancerStub{}, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.Nil(t, mes) @@ -232,7 +234,7 @@ func TestNewNetworkMessenger_NilPipeLoadBalancerShouldErr(t *testing.T) { nil, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.Nil(t, mes) @@ -257,7 +259,7 @@ func TestNewNetworkMessenger_NoConnMgrShouldWork(t *testing.T) { }, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.NotNil(t, mes) @@ -295,7 +297,7 @@ func TestNewNetworkMessenger_WithConnMgrShouldWork(t *testing.T) { }, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.NotNil(t, mes) @@ -328,7 +330,7 @@ func TestNewNetworkMessenger_WithNullPeerDiscoveryShouldWork(t *testing.T) { }, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.NotNil(t, mes) @@ -355,7 +357,7 @@ func TestNewNetworkMessenger_NilPeerDiscoveryShouldErr(t *testing.T) { }, nil, libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.Nil(t, mes) @@ -391,7 +393,7 @@ func TestNewNetworkMessenger_PeerDiscovererFailsWhenApplyingContextShouldErr(t * }, }, libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) assert.Nil(t, mes) @@ -727,7 +729,7 @@ func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines }, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) wg := sync.WaitGroup{} @@ -1261,7 +1263,7 @@ func TestLibp2pMessenger_TrimConnectionsCallsConnManagerTrimConnections(t *testi }, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) mes.TrimConnections() @@ -1299,7 +1301,7 @@ func TestLibp2pMessenger_SendDataThrottlerShouldReturnCorrectObject(t *testing.T sdt, discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) sdtReturned := mes.OutgoingChannelLoadBalancer() @@ -1366,7 +1368,7 @@ func TestLibp2pMessenger_SendDirectWithRealNetToConnectedPeerShouldWork(t *testi loadBalancer.NewOutgoingChannelLoadBalancer(), discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) fmt.Println("Messenger 2:") @@ -1378,7 +1380,7 @@ func TestLibp2pMessenger_SendDirectWithRealNetToConnectedPeerShouldWork(t *testi loadBalancer.NewOutgoingChannelLoadBalancer(), discovery.NewNullDiscoverer(), libp2p.ListenLocalhostAddrWithIp4AndTcp, - 0, + targetPeerCount, ) err := mes1.ConnectToPeer(getConnectableAddress(mes2)) diff --git a/p2p/libp2p/options.go b/p2p/libp2p/options.go new file mode 100644 index 00000000000..bb0a9545cfd --- /dev/null +++ b/p2p/libp2p/options.go @@ -0,0 +1,16 @@ +package libp2p + +import ( + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// Option represents a functional configuration parameter that can operate +// over the networkMessenger struct. +type Option func(*networkMessenger) error + +// WithPeerBlackList defines the option of setting a peer black list handler +func WithPeerBlackList(blacklistHandler p2p.BlacklistHandler) Option { + return func(mes *networkMessenger) error { + return mes.ctxProvider.SetPeerBlacklist(blacklistHandler) + } +} diff --git a/p2p/mock/blacklistHandlerStub.go b/p2p/mock/blacklistHandlerStub.go new file mode 100644 index 00000000000..679ef4cc0a7 --- /dev/null +++ b/p2p/mock/blacklistHandlerStub.go @@ -0,0 +1,13 @@ +package mock + +type BlacklistHandlerStub struct { + HasCalled func(key string) bool +} + +func (bhs *BlacklistHandlerStub) Has(key string) bool { + return bhs.HasCalled(key) +} + +func (bhs *BlacklistHandlerStub) IsInterfaceNil() bool { + return bhs == nil +} diff --git a/p2p/mock/connectionMonitorStub.go b/p2p/mock/connectionMonitorStub.go new file mode 100644 index 00000000000..1ed68367928 --- /dev/null +++ b/p2p/mock/connectionMonitorStub.go @@ -0,0 +1,30 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +type ConnectionMonitorStub struct { + HandleConnectedPeerCalled func(pid p2p.PeerID) error + HandleDisconnectedPeerCalled func(pid p2p.PeerID) error + DoReconnectionBlockingCalled func() + CheckConnectionsBlockingCalled func() +} + +func (cms *ConnectionMonitorStub) HandleConnectedPeer(pid p2p.PeerID) error { + return cms.HandleConnectedPeerCalled(pid) +} + +func (cms *ConnectionMonitorStub) HandleDisconnectedPeer(pid p2p.PeerID) error { + return cms.HandleDisconnectedPeerCalled(pid) +} + +func (cms *ConnectionMonitorStub) DoReconnectionBlocking() { + cms.DoReconnectionBlockingCalled() +} + +func (cms *ConnectionMonitorStub) CheckConnectionsBlocking() { + cms.CheckConnectionsBlockingCalled() +} + +func (cms *ConnectionMonitorStub) IsInterfaceNil() bool { + return cms == nil +} diff --git a/p2p/mock/nilBlacklistHandler.go b/p2p/mock/nilBlacklistHandler.go new file mode 100644 index 00000000000..e8012825006 --- /dev/null +++ b/p2p/mock/nilBlacklistHandler.go @@ -0,0 +1,16 @@ +package mock + +// NilBlacklistHandler is a mock implementation of BlacklistHandler that does not manage black listed keys +// (all keys [peers] are whitelisted) +type NilBlacklistHandler struct { +} + +// IsBlacklisted outputs false (all peers are white listed) +func (nbh *NilBlacklistHandler) Has(_ string) bool { + return false +} + +// IsInterfaceNil returns true if there is no value under the interface +func (nbh *NilBlacklistHandler) IsInterfaceNil() bool { + return nbh == nil +} diff --git a/p2p/mock/reconnecterStub.go b/p2p/mock/reconnecterStub.go index 853f1fad55e..fba03953cc1 100644 --- a/p2p/mock/reconnecterStub.go +++ b/p2p/mock/reconnecterStub.go @@ -1,13 +1,13 @@ package mock type ReconnecterStub struct { - ReconnectToNetworkCalled func() <-chan struct{} + ReconnectToNetworkCalled func() PauseCall func() ResumeCall func() } -func (rs *ReconnecterStub) ReconnectToNetwork() <-chan struct{} { - return rs.ReconnectToNetworkCalled() +func (rs *ReconnecterStub) ReconnectToNetwork() { + rs.ReconnectToNetworkCalled() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/p2p/p2p.go b/p2p/p2p.go index aba43118e9c..58192f53fa8 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -51,7 +51,7 @@ type PeerDiscoverer interface { // Reconnecter defines the behaviour of a network reconnection mechanism type Reconnecter interface { - ReconnectToNetwork() <-chan struct{} + ReconnectToNetwork() Pause() // pause the peer discovery Resume() // resume the peer discovery IsInterfaceNil() bool @@ -200,3 +200,18 @@ type FloodPreventer interface { Reset() IsInterfaceNil() bool } + +// BlacklistHandler defines the behavior of a component that is able to decide if a key (peer ID) is black listed or not +type BlacklistHandler interface { + Has(key string) bool + IsInterfaceNil() bool +} + +// ConnectionMonitor defines what a peer-management component should do +type ConnectionMonitor interface { + HandleConnectedPeer(pid PeerID) error + HandleDisconnectedPeer(pid PeerID) error + DoReconnectionBlocking() + CheckConnectionsBlocking() + IsInterfaceNil() bool +} diff --git a/process/errors.go b/process/errors.go index 479812f425f..39d1a63d861 100644 --- a/process/errors.go +++ b/process/errors.go @@ -64,9 +64,6 @@ var ErrNilBlockBody = errors.New("nil block body") // ErrNilTxHash signals that an operation has been attempted with a nil hash var ErrNilTxHash = errors.New("nil transaction hash") -// ErrNilPublicKey signals that a operation has been attempted with a nil public key -var ErrNilPublicKey = errors.New("nil public key") - // ErrNilPubKeysBitmap signals that a operation has been attempted with a nil public keys bitmap var ErrNilPubKeysBitmap = errors.New("nil public keys bitmap") @@ -343,9 +340,6 @@ var ErrNilRewardTransaction = errors.New("reward transaction is nil") // ErrRewardTransactionNotFound is raised when reward transaction should be present but was not found var ErrRewardTransactionNotFound = errors.New("reward transaction not found") -// ErrInvalidDataInput signals that the data input is invalid for parsing -var ErrInvalidDataInput = errors.New("data input is invalid to create key, value storage output") - // ErrNilUTxDataPool signals that unsigned transaction pool is nil var ErrNilUTxDataPool = errors.New("unsigned transactions pool is nil") diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 9c86260c318..904b4693779 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -24,7 +24,7 @@ type quota struct { type quotaFloodPreventer struct { mutOperation *sync.RWMutex cacher storage.Cacher - statusHandler QuotaStatusHandler + statusHandlers []QuotaStatusHandler maxMessagesPerPeer uint32 maxSizePerPeer uint64 maxMessages uint32 @@ -35,7 +35,7 @@ type quotaFloodPreventer struct { // NewQuotaFloodPreventer creates a new flood preventer based on quota / peer func NewQuotaFloodPreventer( cacher storage.Cacher, - statusHandler QuotaStatusHandler, + statusHandlers []QuotaStatusHandler, maxMessagesPerPeer uint32, maxTotalSizePerPeer uint64, maxMessages uint32, @@ -45,8 +45,10 @@ func NewQuotaFloodPreventer( if check.IfNil(cacher) { return nil, process.ErrNilCacher } - if check.IfNil(statusHandler) { - return nil, process.ErrNilQuotaStatusHandler + for _, statusHandler := range statusHandlers { + if check.IfNil(statusHandler) { + return nil, process.ErrNilQuotaStatusHandler + } } if maxMessagesPerPeer < minMessages { return nil, fmt.Errorf("%w raised in NewCountersMap, maxMessagesPerPeer: provided %d, minimum %d", @@ -80,7 +82,7 @@ func NewQuotaFloodPreventer( return "aFloodPreventer{ mutOperation: &sync.RWMutex{}, cacher: cacher, - statusHandler: statusHandler, + statusHandlers: statusHandlers, maxMessagesPerPeer: maxMessagesPerPeer, maxSizePerPeer: maxTotalSizePerPeer, maxMessages: maxMessages, @@ -174,7 +176,7 @@ func (qfp *quotaFloodPreventer) Reset() { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() - qfp.statusHandler.ResetStatistics() + qfp.resetStatusHandlers() qfp.createStatistics() //TODO change this if cacher.Clear() is time consuming @@ -182,6 +184,12 @@ func (qfp *quotaFloodPreventer) Reset() { qfp.globalQuota = "a{} } +func (qfp *quotaFloodPreventer) resetStatusHandlers() { + for _, statusHandler := range qfp.statusHandlers { + statusHandler.ResetStatistics() + } +} + // createStatistics is useful to benchmark the system when running func (qfp quotaFloodPreventer) createStatistics() { keys := qfp.cacher.Keys() @@ -196,7 +204,7 @@ func (qfp quotaFloodPreventer) createStatistics() { continue } - qfp.statusHandler.AddQuota( + qfp.addQuota( string(k), q.numReceivedMessages, q.sizeReceivedMessages, @@ -205,7 +213,7 @@ func (qfp quotaFloodPreventer) createStatistics() { ) } - qfp.statusHandler.SetGlobalQuota( + qfp.setGlobalQuota( qfp.globalQuota.numReceivedMessages, qfp.globalQuota.sizeReceivedMessages, qfp.globalQuota.numProcessedMessages, @@ -213,6 +221,29 @@ func (qfp quotaFloodPreventer) createStatistics() { ) } +func (qfp *quotaFloodPreventer) addQuota( + identifier string, + numReceived uint32, + sizeReceived uint64, + numProcessed uint32, + sizeProcessed uint64, +) { + for _, statusHandler := range qfp.statusHandlers { + statusHandler.AddQuota(identifier, numReceived, sizeReceived, numProcessed, sizeProcessed) + } +} + +func (qfp *quotaFloodPreventer) setGlobalQuota( + numReceived uint32, + sizeReceived uint64, + numProcessed uint32, + sizeProcessed uint64, +) { + for _, statusHandler := range qfp.statusHandlers { + statusHandler.SetGlobalQuota(numReceived, sizeReceived, numProcessed, sizeProcessed) + } +} + // IsInterfaceNil returns true if there is no value under the interface func (qfp *quotaFloodPreventer) IsInterfaceNil() bool { return qfp == nil diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index 8c6153c689a..5d220c5e6e6 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -12,14 +12,6 @@ import ( "github.com/stretchr/testify/assert" ) -func createMockQuotaStatusHandler() *mock.QuotaStatusHandlerStub { - return &mock.QuotaStatusHandlerStub{ - ResetStatisticsCalled: func() {}, - AddQuotaCalled: func(_ string, _ uint32, _ uint64, _ uint32, _ uint64) {}, - SetGlobalQuotaCalled: func(_ uint32, _ uint64, _ uint32, _ uint64) {}, - } -} - //------- NewQuotaFloodPreventer func TestNewQuotaFloodPreventer_NilCacherShouldErr(t *testing.T) { @@ -27,7 +19,7 @@ func TestNewQuotaFloodPreventer_NilCacherShouldErr(t *testing.T) { qfp, err := NewQuotaFloodPreventer( nil, - &mock.QuotaStatusHandlerStub{}, + []QuotaStatusHandler{&mock.QuotaStatusHandlerStub{}}, minMessages, minTotalSize, minMessages, @@ -43,7 +35,7 @@ func TestNewQuotaFloodPreventer_NilStatusHandlerShouldErr(t *testing.T) { qfp, err := NewQuotaFloodPreventer( &mock.CacherStub{}, - nil, + []QuotaStatusHandler{nil}, minMessages, minTotalSize, minMessages, @@ -59,7 +51,7 @@ func TestNewQuotaFloodPreventer_LowerMinMessagesPerPeerShouldErr(t *testing.T) { qfp, err := NewQuotaFloodPreventer( &mock.CacherStub{}, - &mock.QuotaStatusHandlerStub{}, + []QuotaStatusHandler{&mock.QuotaStatusHandlerStub{}}, minMessages-1, minTotalSize, minMessages, @@ -75,7 +67,7 @@ func TestNewQuotaFloodPreventer_LowerMinSizePerPeerShouldErr(t *testing.T) { qfp, err := NewQuotaFloodPreventer( &mock.CacherStub{}, - &mock.QuotaStatusHandlerStub{}, + []QuotaStatusHandler{&mock.QuotaStatusHandlerStub{}}, minMessages, minTotalSize-1, minMessages, @@ -91,7 +83,7 @@ func TestNewQuotaFloodPreventer_LowerMinMessagesShouldErr(t *testing.T) { qfp, err := NewQuotaFloodPreventer( &mock.CacherStub{}, - &mock.QuotaStatusHandlerStub{}, + []QuotaStatusHandler{&mock.QuotaStatusHandlerStub{}}, minMessages, minTotalSize, minMessages-1, @@ -107,7 +99,7 @@ func TestNewQuotaFloodPreventer_LowerMinSizeShouldErr(t *testing.T) { qfp, err := NewQuotaFloodPreventer( &mock.CacherStub{}, - &mock.QuotaStatusHandlerStub{}, + []QuotaStatusHandler{&mock.QuotaStatusHandlerStub{}}, minMessages, minTotalSize, minMessages, @@ -123,7 +115,23 @@ func TestNewQuotaFloodPreventer_ShouldWork(t *testing.T) { qfp, err := NewQuotaFloodPreventer( &mock.CacherStub{}, - &mock.QuotaStatusHandlerStub{}, + []QuotaStatusHandler{&mock.QuotaStatusHandlerStub{}}, + minMessages, + minTotalSize, + minMessages, + minTotalSize, + ) + + assert.False(t, check.IfNil(qfp)) + assert.Nil(t, err) +} + +func TestNewQuotaFloodPreventer_NilListShouldWork(t *testing.T) { + t.Parallel() + + qfp, err := NewQuotaFloodPreventer( + &mock.CacherStub{}, + nil, minMessages, minTotalSize, minMessages, @@ -158,7 +166,7 @@ func TestNewQuotaFloodPreventer_AccumulateIdentifierNotPresentPutQuotaAndReturnT return }, }, - createMockQuotaStatusHandler(), + nil, minMessages*4, minTotalSize*10, minMessages*4, @@ -193,7 +201,7 @@ func TestNewQuotaFloodPreventer_AccumulateNotQuotaSavedInCacheShouldPutQuotaAndR return }, }, - createMockQuotaStatusHandler(), + nil, minMessages*4, minTotalSize*10, minMessages*4, @@ -234,7 +242,7 @@ func TestNewQuotaFloodPreventer_AccumulateUnderMaxValuesShouldIncrementAndReturn return }, }, - createMockQuotaStatusHandler(), + nil, minMessages*4, minTotalSize*10, minMessages*4, @@ -277,16 +285,18 @@ func TestNewQuotaFloodPreventer_AccumulateGlobalWithResetShouldWork(t *testing.T }, ClearCalled: func() {}, }, - &mock.QuotaStatusHandlerStub{ - AddQuotaCalled: func(_ string, _ uint32, _ uint64, _ uint32, _ uint64) {}, - SetGlobalQuotaCalled: func(numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) { - addedGlobalQuotaCalled = true - assert.Equal(t, uint32(2), numReceived) - assert.Equal(t, size+size+1, sizeReceived) - assert.Equal(t, uint32(2), numProcessed) - assert.Equal(t, size+size+1, sizeProcessed) + []QuotaStatusHandler{ + &mock.QuotaStatusHandlerStub{ + AddQuotaCalled: func(_ string, _ uint32, _ uint64, _ uint32, _ uint64) {}, + SetGlobalQuotaCalled: func(numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) { + addedGlobalQuotaCalled = true + assert.Equal(t, uint32(2), numReceived) + assert.Equal(t, size+size+1, sizeReceived) + assert.Equal(t, uint32(2), numProcessed) + assert.Equal(t, size+size+1, sizeProcessed) + }, + ResetStatisticsCalled: func() {}, }, - ResetStatisticsCalled: func() {}, }, minMessages*4, minTotalSize*10, @@ -328,7 +338,7 @@ func TestNewQuotaFloodPreventer_AccumulateOverMaxPeerNumMessagesShouldNotPutAndR return false }, }, - createMockQuotaStatusHandler(), + nil, minMessages*4, minTotalSize*10, minMessages*4, @@ -360,7 +370,7 @@ func TestNewQuotaFloodPreventer_AccumulateOverMaxPeerSizeShouldNotPutAndReturnFa return false }, }, - createMockQuotaStatusHandler(), + nil, minMessages*4, minTotalSize*10, minMessages*4, @@ -390,7 +400,7 @@ func TestNewQuotaFloodPreventer_AccumulateOverMaxNumMessagesShouldNotPutAndRetur return false }, }, - createMockQuotaStatusHandler(), + nil, minMessages*4, minTotalSize*10, minMessages*4, @@ -419,7 +429,7 @@ func TestNewQuotaFloodPreventer_AccumulateOverMaxSizeShouldNotPutAndReturnFalse( return false }, }, - createMockQuotaStatusHandler(), + nil, minMessages*4, minTotalSize*10, minMessages*4, @@ -438,7 +448,7 @@ func TestCountersMap_AccumulateShouldWorkConcurrently(t *testing.T) { numIterations := 1000 qfp, _ := NewQuotaFloodPreventer( mock.NewCacherMock(), - createMockQuotaStatusHandler(), + nil, minMessages, minTotalSize, minMessages*uint32(numIterations), @@ -472,7 +482,7 @@ func TestCountersMap_ResetShouldCallCacherClear(t *testing.T) { return make([][]byte, 0) }, }, - createMockQuotaStatusHandler(), + nil, minTotalSize, minMessages, minTotalSize, @@ -511,33 +521,35 @@ func TestCountersMap_ResetShouldCallQuotaStatus(t *testing.T) { quota2Compared := false qfp, _ := NewQuotaFloodPreventer( cacher, - &mock.QuotaStatusHandlerStub{ - ResetStatisticsCalled: func() { - resetStatisticsCalled = true - }, - AddQuotaCalled: func(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, numProcessedMessages uint32, sizeProcessedMessages uint64) { - quotaProvided := quota{ - numReceivedMessages: numReceivedMessages, - sizeReceivedMessages: sizeReceivedMessages, - numProcessedMessages: numProcessedMessages, - sizeProcessedMessages: sizeProcessedMessages, - } - quotaToCompare := quota{} - - switch identifier { - case string(key1): - quotaToCompare = *quota1 - quota1Compared = true - case string(key2): - quotaToCompare = *quota2 - quota2Compared = true - default: - assert.Fail(t, fmt.Sprintf("unknown identifier %s", identifier)) - } - - assert.Equal(t, quotaToCompare, quotaProvided) + []QuotaStatusHandler{ + &mock.QuotaStatusHandlerStub{ + ResetStatisticsCalled: func() { + resetStatisticsCalled = true + }, + AddQuotaCalled: func(identifier string, numReceivedMessages uint32, sizeReceivedMessages uint64, numProcessedMessages uint32, sizeProcessedMessages uint64) { + quotaProvided := quota{ + numReceivedMessages: numReceivedMessages, + sizeReceivedMessages: sizeReceivedMessages, + numProcessedMessages: numProcessedMessages, + sizeProcessedMessages: sizeProcessedMessages, + } + quotaToCompare := quota{} + + switch identifier { + case string(key1): + quotaToCompare = *quota1 + quota1Compared = true + case string(key2): + quotaToCompare = *quota2 + quota2Compared = true + default: + assert.Fail(t, fmt.Sprintf("unknown identifier %s", identifier)) + } + + assert.Equal(t, quotaToCompare, quotaProvided) + }, + SetGlobalQuotaCalled: func(_ uint32, _ uint64, _ uint32, _ uint64) {}, }, - SetGlobalQuotaCalled: func(_ uint32, _ uint64, _ uint32, _ uint64) {}, }, minTotalSize, minMessages, @@ -558,7 +570,7 @@ func TestCountersMap_IncrementAndResetShouldWorkConcurrently(t *testing.T) { numIterations := 1000 qfp, _ := NewQuotaFloodPreventer( mock.NewCacherMock(), - createMockQuotaStatusHandler(), + nil, minMessages, minTotalSize, minTotalSize*uint32(numIterations), diff --git a/statusHandler/p2pQuota/interface.go b/statusHandler/p2pQuota/interface.go new file mode 100644 index 00000000000..92e9f27c07e --- /dev/null +++ b/statusHandler/p2pQuota/interface.go @@ -0,0 +1,7 @@ +package p2pQuota + +// BlacklistHandler defines the behavior of a component that is able to add a blacklisted key (peer) +type BlacklistHandler interface { + Add(key string) error + IsInterfaceNil() bool +} diff --git a/statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go b/statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go new file mode 100644 index 00000000000..e2c5868cd1f --- /dev/null +++ b/statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go @@ -0,0 +1,20 @@ +package p2pQuota + +type p2pQuotaBlacklistProcessor struct { +} + +func (pqbp *p2pQuotaBlacklistProcessor) ResetStatistics() { + panic("implement me") +} + +func (pqbp *p2pQuotaBlacklistProcessor) AddQuota(identifier string, numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) { + panic("implement me") +} + +// SetGlobalQuota does nothing (here to comply with QuotaStatusHandler interface) +func (pqbp *p2pQuotaBlacklistProcessor) SetGlobalQuota(_ uint32, _ uint64, _ uint32, _ uint64) {} + +// IsInterfaceNil returns true if there is no value under the interface +func (pqbp *p2pQuotaBlacklistProcessor) IsInterfaceNil() bool { + return pqbp == nil +} From 17e1a5a4f85618f8f6505ddece4c9575b2e04cc7 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 31 Dec 2019 12:03:55 +0200 Subject: [PATCH 32/35] implemented p2pBlackListProcessor added integration test for black list added unit test for p2p/options --- .../p2p/antiflood/antiflooding_test.go | 10 +- .../p2p/antiflood/blacklist_test.go | 182 +++++++++ p2p/errors.go | 3 + p2p/libp2p/connectionMonitor.go | 4 +- p2p/libp2p/netMessenger.go | 12 +- p2p/libp2p/options.go | 19 +- p2p/libp2p/options_test.go | 42 ++ p2p/memp2p/errors.go | 3 - p2p/memp2p/messenger.go | 5 + p2p/p2p.go | 11 + .../antiflood/p2pBlackListProcessor.go | 108 +++++ .../antiflood/p2pBlackListProcessor_test.go | 377 ++++++++++++++++++ .../p2pQuota/p2pQuotaBlacklistProcessor.go | 20 - 13 files changed, 761 insertions(+), 35 deletions(-) create mode 100644 integrationTests/p2p/antiflood/blacklist_test.go create mode 100644 p2p/libp2p/options_test.go create mode 100644 process/throttle/antiflood/p2pBlackListProcessor.go create mode 100644 process/throttle/antiflood/p2pBlackListProcessor_test.go delete mode 100644 statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 81c013c2a2e..1a7dccd7020 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -42,6 +42,7 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { maxMessageSize := uint64(1 << 20) //1MB interceptors, err := createTopicsAndMockInterceptors( peers, + nil, topic, peerMaxNumProcessMessages, maxMessageSize, @@ -104,6 +105,7 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { maxMessageSize := uint64(1 << 20) //1MB interceptors, err := createTopicsAndMockInterceptors( peers, + nil, topic, peerMaxNumProcessMessages, maxMessageSize, @@ -159,6 +161,7 @@ func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { peerMaxMessageSize := uint64(1 << 10) //1KB interceptors, err := createTopicsAndMockInterceptors( peers, + nil, topic, maxNumProcessMessages, peerMaxMessageSize, @@ -222,6 +225,7 @@ func checkMessagesOnPeers( func createTopicsAndMockInterceptors( peers []p2p.Messenger, + blacklistHandlers []antiflood.QuotaStatusHandler, topic string, peerMaxNumMessages uint32, peerMaxSize uint64, @@ -241,9 +245,13 @@ func createTopicsAndMockInterceptors( antifloodPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) interceptors[idx] = newMessageProcessor() + statusHandlers := []antiflood.QuotaStatusHandler{&nilQuotaStatusHandler{}} + if len(blacklistHandlers) == len(peers) { + statusHandlers = append(statusHandlers, blacklistHandlers[idx]) + } interceptors[idx].floodPreventer, _ = antiflood.NewQuotaFloodPreventer( antifloodPool, - []antiflood.QuotaStatusHandler{&nilQuotaStatusHandler{}}, + statusHandlers, peerMaxNumMessages, peerMaxSize, maxNumMessages, diff --git a/integrationTests/p2p/antiflood/blacklist_test.go b/integrationTests/p2p/antiflood/blacklist_test.go new file mode 100644 index 00000000000..d291cd0b3da --- /dev/null +++ b/integrationTests/p2p/antiflood/blacklist_test.go @@ -0,0 +1,182 @@ +package antiflood + +import ( + "fmt" + "math" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" + "github.com/ElrondNetwork/elrond-go/storage/timecache" + "github.com/stretchr/testify/assert" +) + +// TestAntifloodAndBlacklistWithNumMessages tests what happens if a peer decide to send a large number of messages +// all originating from its peer ID +// All directed peers should add the flooder peer to their black lists and disconnect from it. Further connections +// of the flooder to the flooded peers are no longer possible. +func TestAntifloodAndBlacklistWithNumMessages(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + peers, err := integrationTests.CreateFixedNetworkOf8Peers() + assert.Nil(t, err) + + defer func() { + integrationTests.ClosePeers(peers) + }() + + //node 3 is connected to 0, 2, 4 and 6 (check integrationTests.CreateFixedNetworkOf7Peers function) + //large number of broadcast messages from 3 might flood above mentioned peers but should not flood 5 and 7 + + topic := "test_topic" + broadcastMessageDuration := time.Second * 2 + peerMaxNumProcessMessages := uint32(5) + maxNumProcessMessages := uint32(math.MaxUint32) + maxMessageSize := uint64(1 << 20) //1MB + + blacklistProcessors, blacklistHandlers := createBlacklistHandlersAndProcessors( + peers, + peerMaxNumProcessMessages*2, + maxMessageSize*2, + 1, + ) + interceptors, err := createTopicsAndMockInterceptors( + peers, + blacklistProcessors, + topic, + peerMaxNumProcessMessages, + maxMessageSize, + maxNumProcessMessages, + maxMessageSize, + ) + applyBlacklistComponents(peers, blacklistHandlers) + assert.Nil(t, err) + + fmt.Println("bootstrapping nodes") + time.Sleep(durationBootstrapingTime) + + flooderIdx := 3 + floodedIdxes := []int{0, 2, 4, 6} + floodedIdxesConnections := make([]int, len(floodedIdxes)) + for i, idx := range floodedIdxes { + floodedIdxesConnections[i] = len(peers[idx].ConnectedPeers()) + } + + //flooder will deactivate its flooding mechanism as to be able to flood the network + interceptors[flooderIdx].floodPreventer = nil + + go func() { + for { + time.Sleep(time.Second) + + for _, interceptor := range interceptors { + if interceptor.floodPreventer == nil { + continue + } + interceptor.floodPreventer.Reset() + } + } + }() + + fmt.Println("flooding the network") + isFlooding := atomic.Value{} + isFlooding.Store(true) + go func() { + for { + peers[flooderIdx].Broadcast(topic, []byte("floodMessage")) + + if !isFlooding.Load().(bool) { + return + } + } + }() + time.Sleep(broadcastMessageDuration) + + isFlooding.Store(false) + fmt.Println("flooding the network stopped") + printConnected(peers) + + fmt.Println("waiting for peers to eliminate the flooding peer") + time.Sleep(time.Second * 10) + + printConnected(peers) + testConnections(t, peers, flooderIdx, floodedIdxes, floodedIdxesConnections) + fmt.Println("flooding peer wants to reconnect to the flooded peers (will fail)") + + reConnectFloodingPeer(peers, flooderIdx, floodedIdxes) + time.Sleep(time.Second * 5) + printConnected(peers) + testConnections(t, peers, flooderIdx, floodedIdxes, floodedIdxesConnections) +} + +func testConnections( + t *testing.T, + peers []p2p.Messenger, + flooderIdx int, + floodedIdxes []int, + floodedIdxesConnections []int, +) { + //flooder has 0 connections + assert.Equal(t, 0, len(peers[flooderIdx].ConnectedPeers())) + + //flooded peers have initial connection - 1 (they eliminated the flooder) + for i, idx := range floodedIdxes { + assert.Equal(t, floodedIdxesConnections[i]-1, len(peers[idx].ConnectedPeers())) + } +} + +func reConnectFloodingPeer(peers []p2p.Messenger, flooderIdx int, floodedIdxes []int) { + flooder := peers[flooderIdx] + for _, idx := range floodedIdxes { + _ = flooder.ConnectToPeer(peers[idx].Addresses()[0]) + } +} + +func applyBlacklistComponents(peers []p2p.Messenger, blacklistHandler []process.BlackListHandler) { + for idx, peer := range peers { + _ = peer.ApplyOptions( + libp2p.WithPeerBlackList(blacklistHandler[idx]), + ) + } +} + +func createBlacklistHandlersAndProcessors( + peers []p2p.Messenger, + thresholdNumReceived uint32, + thresholdSizeReceived uint64, + maxFloodingRounds uint32, +) ([]antiflood.QuotaStatusHandler, []process.BlackListHandler) { + + blacklistProcessors := make([]antiflood.QuotaStatusHandler, len(peers)) + blacklistHandlers := make([]process.BlackListHandler, len(peers)) + for i := range peers { + blacklistCache, _ := lrucache.NewCache(5000) + blacklistHandlers[i] = timecache.NewTimeCache(time.Minute * 5) + + blacklistProcessors[i], _ = antiflood.NewP2pBlackListProcessor( + blacklistCache, + blacklistHandlers[i], + thresholdNumReceived, + thresholdSizeReceived, + maxFloodingRounds, + ) + } + return blacklistProcessors, blacklistHandlers +} + +func printConnected(peers []p2p.Messenger) { + fmt.Println("Connected peers:") + for idx, peer := range peers { + fmt.Printf("%s, index %d has %d connections\n", + peer.ID().Pretty(), idx, len(peer.ConnectedPeers())) + } + fmt.Println() +} diff --git a/p2p/errors.go b/p2p/errors.go index ff4eade3f18..b45e086a3f0 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -99,3 +99,6 @@ var ErrNilPeerBlacklistHandler = errors.New("nil peer black list handler") // ErrPeerBlacklisted signals that a peer is blacklisted var ErrPeerBlacklisted = errors.New("peer is blacklisted") + +// ErrNilConfigVariable signals that a nil config variable has been provided +var ErrNilConfigVariable = errors.New("nil config variable") diff --git a/p2p/libp2p/connectionMonitor.go b/p2p/libp2p/connectionMonitor.go index 61be3f10ae7..d022b5311c4 100644 --- a/p2p/libp2p/connectionMonitor.go +++ b/p2p/libp2p/connectionMonitor.go @@ -63,7 +63,7 @@ func newConnectionMonitor( // HandleConnectedPeer is called whenever a new peer is connected to the current host func (cm *connectionMonitor) HandleConnectedPeer(pid p2p.PeerID) error { blacklistHandler := cm.libp2pContext.PeerBlacklist() - if blacklistHandler.Has(string(pid)) { + if blacklistHandler.Has(pid.Pretty()) { return fmt.Errorf("%w, pid: %s", p2p.ErrPeerBlacklisted, pid.Pretty()) } @@ -108,7 +108,7 @@ func (cm *connectionMonitor) CheckConnectionsBlocking() { peers := cm.netw.Peers() blacklistHandler := cm.libp2pContext.PeerBlacklist() for _, pid := range peers { - if blacklistHandler.Has(string(pid)) { + if blacklistHandler.Has(pid.Pretty()) { cm.closePeer(pid) } } diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 07bb84d9f43..0a65c678c77 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -273,13 +273,21 @@ func (netMes *networkMessenger) createConnectionMonitor(targetConnCount int) err } // ApplyOptions can set up different configurable options of a networkMessenger instance -func (netMes *networkMessenger) ApplyOptions(opts ...Option) error { +func (netMes *networkMessenger) ApplyOptions(opts ...p2p.Option) error { + cfg := &p2p.Config{} + for _, opt := range opts { - err := opt(netMes) + err := opt(cfg) if err != nil { return err } } + + err := netMes.ctxProvider.SetPeerBlacklist(cfg.BlacklistHandler) + if err != nil { + return err + } + return nil } diff --git a/p2p/libp2p/options.go b/p2p/libp2p/options.go index bb0a9545cfd..4a2d001ae36 100644 --- a/p2p/libp2p/options.go +++ b/p2p/libp2p/options.go @@ -4,13 +4,18 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" ) -// Option represents a functional configuration parameter that can operate -// over the networkMessenger struct. -type Option func(*networkMessenger) error - // WithPeerBlackList defines the option of setting a peer black list handler -func WithPeerBlackList(blacklistHandler p2p.BlacklistHandler) Option { - return func(mes *networkMessenger) error { - return mes.ctxProvider.SetPeerBlacklist(blacklistHandler) +func WithPeerBlackList(blacklistHandler p2p.BlacklistHandler) p2p.Option { + return func(cfg *p2p.Config) error { + if cfg == nil { + return p2p.ErrNilConfigVariable + } + if blacklistHandler == nil { + return p2p.ErrNilPeerBlacklistHandler + } + + cfg.BlacklistHandler = blacklistHandler + + return nil } } diff --git a/p2p/libp2p/options_test.go b/p2p/libp2p/options_test.go new file mode 100644 index 00000000000..516bbf55b05 --- /dev/null +++ b/p2p/libp2p/options_test.go @@ -0,0 +1,42 @@ +package libp2p + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/mock" + "github.com/stretchr/testify/assert" +) + +//------- WithPeerBlackList + +func TestWithPeerBlackList_NilConfigShouldErr(t *testing.T) { + t.Parallel() + + opt := WithPeerBlackList(&mock.NilBlacklistHandler{}) + err := opt(nil) + + assert.Equal(t, p2p.ErrNilConfigVariable, err) +} + +func TestWithPeerBlackList_NilBlackListHadlerShouldErr(t *testing.T) { + t.Parallel() + + opt := WithPeerBlackList(nil) + cfg := &p2p.Config{} + err := opt(cfg) + + assert.Equal(t, p2p.ErrNilPeerBlacklistHandler, err) +} + +func TestWithPeerBlackList_ShouldWork(t *testing.T) { + t.Parallel() + + nblh := &mock.NilBlacklistHandler{} + opt := WithPeerBlackList(nblh) + cfg := &p2p.Config{} + err := opt(cfg) + + assert.Nil(t, err) + assert.True(t, cfg.BlacklistHandler == nblh) +} diff --git a/p2p/memp2p/errors.go b/p2p/memp2p/errors.go index 3dde767ab1a..17e87241034 100644 --- a/p2p/memp2p/errors.go +++ b/p2p/memp2p/errors.go @@ -10,6 +10,3 @@ var ErrNotConnectedToNetwork = errors.New("not connected to network") // ErrReceivingPeerNotConnected signals that the receiving peer of a sending operation is not connected to the network var ErrReceivingPeerNotConnected = errors.New("receiving peer not connected to network") - -// ErrCannotSendToSelf signals that a peer tried to send a message to itself -var ErrCannotSendToSelf = errors.New("cannot send message to itself") diff --git a/p2p/memp2p/messenger.go b/p2p/memp2p/messenger.go index 0aee10b9cf5..d642b90a75c 100644 --- a/p2p/memp2p/messenger.go +++ b/p2p/memp2p/messenger.go @@ -68,6 +68,11 @@ func NewMessenger(network *Network) (*Messenger, error) { return messenger, nil } +// ApplyOptions does not apply any option provided in this implementation +func (messenger *Messenger) ApplyOptions(_ ...p2p.Option) error { + return nil +} + // ID returns the P2P ID of the messenger func (messenger *Messenger) ID() p2p.PeerID { return messenger.p2pID diff --git a/p2p/p2p.go b/p2p/p2p.go index 58192f53fa8..a95b9780d2d 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -34,6 +34,15 @@ func (pid PeerID) Pretty() string { return base58.Encode(pid.Bytes()) } +// Config is a DTO used for config passing variables +type Config struct { + BlacklistHandler BlacklistHandler +} + +// Option represents a functional configuration parameter that can operate +// over the networkMessenger struct. +type Option func(*Config) error + // ContextProvider defines an interface for providing context to various messenger components type ContextProvider interface { Context() context.Context @@ -61,6 +70,8 @@ type Reconnecter interface { type Messenger interface { io.Closer + ApplyOptions(opts ...Option) error + // ID is the Messenger's unique peer identifier across the network (a // string). It is derived from the public key of the P2P credentials. ID() PeerID diff --git a/process/throttle/antiflood/p2pBlackListProcessor.go b/process/throttle/antiflood/p2pBlackListProcessor.go new file mode 100644 index 00000000000..0f2510d046f --- /dev/null +++ b/process/throttle/antiflood/p2pBlackListProcessor.go @@ -0,0 +1,108 @@ +package antiflood + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type p2pBlackListProcessor struct { + cacher storage.Cacher + blacklistHandler process.BlackListHandler + thresholdNumReceivedFlood uint32 + thresholdSizeReceivedFlood uint64 + numFloodingRounds uint32 +} + +// NewP2pBlackListProcessor creates a new instance of p2pQuotaBlacklistProcessor able to determine +// a flooding peer and mark it accordingly +func NewP2pBlackListProcessor( + cacher storage.Cacher, + blacklistHandler process.BlackListHandler, + thresholdNumReceivedFlood uint32, + thresholdSizeReceivedFlood uint64, + numFloodingRounds uint32, +) (*p2pBlackListProcessor, error) { + + if check.IfNil(cacher) { + return nil, fmt.Errorf("%w, NewP2pBlackListProcessor", process.ErrNilCacher) + } + if check.IfNil(blacklistHandler) { + return nil, fmt.Errorf("%w, NewP2pBlackListProcessor", process.ErrNilBlackListHandler) + } + if thresholdNumReceivedFlood == 0 { + return nil, fmt.Errorf("%w, thresholdNumReceivedFlood == 0", process.ErrInvalidValue) + } + if thresholdSizeReceivedFlood == 0 { + return nil, fmt.Errorf("%w, thresholdSizeReceivedFlood == 0", process.ErrInvalidValue) + } + if numFloodingRounds == 0 { + return nil, fmt.Errorf("%w, numFloodingRounds == 0", process.ErrInvalidValue) + } + + return &p2pBlackListProcessor{ + cacher: cacher, + blacklistHandler: blacklistHandler, + thresholdNumReceivedFlood: thresholdNumReceivedFlood, + thresholdSizeReceivedFlood: thresholdSizeReceivedFlood, + numFloodingRounds: numFloodingRounds, + }, nil +} + +// ResetStatistics checks if an identifier reached its maximum flooding rounds. If it did, it will remove its +// cached information and adds it to the black list handler +func (pbp *p2pBlackListProcessor) ResetStatistics() { + keys := pbp.cacher.Keys() + for _, key := range keys { + obj, ok := pbp.cacher.Peek(key) + if !ok { + pbp.cacher.Remove(key) + continue + } + + val, ok := obj.(uint32) + if !ok { + pbp.cacher.Remove(key) + continue + } + + if val >= pbp.numFloodingRounds { + pbp.cacher.Remove(key) + _ = pbp.blacklistHandler.Add(string(key)) + } + } +} + +// AddQuota checks if the received quota for an identifier has exceeded the set thresholds +func (pbp *p2pBlackListProcessor) AddQuota(identifier string, numReceived uint32, sizeReceived uint64, _ uint32, _ uint64) { + isFloodingPeer := numReceived >= pbp.thresholdNumReceivedFlood || sizeReceived >= pbp.thresholdSizeReceivedFlood + if isFloodingPeer { + pbp.incrementStatsFloodingPeer(identifier) + } +} + +func (pbp *p2pBlackListProcessor) incrementStatsFloodingPeer(identifier string) { + obj, ok := pbp.cacher.Get([]byte(identifier)) + if !ok { + pbp.cacher.Put([]byte(identifier), uint32(1)) + return + } + + val, ok := obj.(uint32) + if !ok { + pbp.cacher.Put([]byte(identifier), uint32(1)) + return + } + + pbp.cacher.Put([]byte(identifier), val+1) +} + +// SetGlobalQuota does nothing (here to comply with QuotaStatusHandler interface) +func (pbp *p2pBlackListProcessor) SetGlobalQuota(_ uint32, _ uint64, _ uint32, _ uint64) {} + +// IsInterfaceNil returns true if there is no value under the interface +func (pbp *p2pBlackListProcessor) IsInterfaceNil() bool { + return pbp == nil +} diff --git a/process/throttle/antiflood/p2pBlackListProcessor_test.go b/process/throttle/antiflood/p2pBlackListProcessor_test.go new file mode 100644 index 00000000000..b82c5c43968 --- /dev/null +++ b/process/throttle/antiflood/p2pBlackListProcessor_test.go @@ -0,0 +1,377 @@ +package antiflood_test + +import ( + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" + "github.com/stretchr/testify/assert" +) + +//-------- NewP2pQuotaBlacklistProcessor + +func TestNewP2pQuotaBlacklistProcessor_NilCacherShouldErr(t *testing.T) { + t.Parallel() + + pbp, err := antiflood.NewP2pBlackListProcessor( + nil, + &mock.BlackListHandlerStub{}, + 1, + 1, + 1, + ) + + assert.True(t, check.IfNil(pbp)) + assert.True(t, errors.Is(err, process.ErrNilCacher)) +} + +func TestNewP2pQuotaBlacklistProcessor_NilBlackListHandlerShouldErr(t *testing.T) { + t.Parallel() + + pbp, err := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{}, + nil, + 1, + 1, + 1, + ) + + assert.True(t, check.IfNil(pbp)) + assert.True(t, errors.Is(err, process.ErrNilBlackListHandler)) +} + +func TestNewP2pQuotaBlacklistProcessor_InvalidThresholdNumReceivedFloodShouldErr(t *testing.T) { + t.Parallel() + + pbp, err := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{}, + &mock.BlackListHandlerStub{}, + 0, + 1, + 1, + ) + + assert.True(t, check.IfNil(pbp)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + +func TestNewP2pQuotaBlacklistProcessor_InvalidThresholdSizeReceivedFloodShouldErr(t *testing.T) { + t.Parallel() + + pbp, err := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{}, + &mock.BlackListHandlerStub{}, + 1, + 0, + 1, + ) + + assert.True(t, check.IfNil(pbp)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + +func TestNewP2pQuotaBlacklistProcessor_InvalidNumFloodingRoundsShouldErr(t *testing.T) { + t.Parallel() + + pbp, err := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{}, + &mock.BlackListHandlerStub{}, + 1, + 1, + 0, + ) + + assert.True(t, check.IfNil(pbp)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) +} + +func TestNewP2pQuotaBlacklistProcessor_ShouldWork(t *testing.T) { + t.Parallel() + + pbp, err := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{}, + &mock.BlackListHandlerStub{}, + 1, + 1, + 1, + ) + + assert.False(t, check.IfNil(pbp)) + assert.Nil(t, err) +} + +//------- AddQuota + +func TestP2pQuotaBlacklistProcessor_AddQuotaUnderThresholdShouldNotCallGetOrPut(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + GetCalled: func(key []byte) (interface{}, bool) { + assert.Fail(t, "should not have called get") + return nil, false + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + assert.Fail(t, "should not have called put") + return false + }, + }, + &mock.BlackListHandlerStub{}, + thresholdNum, + thresholdSize, + 1, + ) + + pbp.AddQuota("identifier", thresholdNum-1, thresholdSize-1, 1, 1) +} + +func TestP2pQuotaBlacklistProcessor_AddQuotaOverThresholdInexistentDataOnGetShouldPutOne(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + + putCalled := false + identifier := "identifier" + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + GetCalled: func(key []byte) (interface{}, bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + putCalled = true + assert.Equal(t, uint32(1), value) + assert.Equal(t, identifier, string(key)) + + return false + }, + }, + &mock.BlackListHandlerStub{}, + thresholdNum, + thresholdSize, + 1, + ) + + pbp.AddQuota(identifier, thresholdNum, thresholdSize, 1, 1) + + assert.True(t, putCalled) +} + +func TestP2pQuotaBlacklistProcessor_AddQuotaOverThresholdDataNotValidOnGetShouldPutOne(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + + putCalled := false + identifier := "identifier" + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + GetCalled: func(key []byte) (interface{}, bool) { + return "invalid data", true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + putCalled = true + assert.Equal(t, uint32(1), value) + assert.Equal(t, identifier, string(key)) + + return false + }, + }, + &mock.BlackListHandlerStub{}, + thresholdNum, + thresholdSize, + 1, + ) + + pbp.AddQuota(identifier, thresholdNum, thresholdSize, 1, 1) + + assert.True(t, putCalled) +} + +func TestP2pQuotaBlacklistProcessor_AddQuotaShouldIncrement(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + + putCalled := false + identifier := "identifier" + existentValue := uint32(445) + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + GetCalled: func(key []byte) (interface{}, bool) { + return existentValue, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + putCalled = true + assert.Equal(t, existentValue+1, value) + assert.Equal(t, identifier, string(key)) + + return false + }, + }, + &mock.BlackListHandlerStub{}, + thresholdNum, + thresholdSize, + 1, + ) + + pbp.AddQuota(identifier, thresholdNum, thresholdSize, 1, 1) + + assert.True(t, putCalled) +} + +//------- ResetStatistics + +func TestP2pQuotaBlacklistProcessor_ResetStatisticsRemoveNilValueKey(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + + nilValKey := "nil val key" + removedCalled := false + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + KeysCalled: func() [][]byte { + return [][]byte{[]byte(nilValKey)} + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + RemoveCalled: func(key []byte) { + if string(key) == nilValKey { + removedCalled = true + } + }, + }, + &mock.BlackListHandlerStub{}, + thresholdNum, + thresholdSize, + 1, + ) + + pbp.ResetStatistics() + + assert.True(t, removedCalled) +} + +func TestP2pQuotaBlacklistProcessor_ResetStatisticsRemoveInvalidValueValueKey(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + + invalidValKey := "invalid val key" + removedCalled := false + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + KeysCalled: func() [][]byte { + return [][]byte{[]byte(invalidValKey)} + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return "invalid value", true + }, + RemoveCalled: func(key []byte) { + if string(key) == invalidValKey { + removedCalled = true + } + }, + }, + &mock.BlackListHandlerStub{}, + thresholdNum, + thresholdSize, + 1, + ) + + pbp.ResetStatistics() + + assert.True(t, removedCalled) +} + +func TestP2pQuotaBlacklistProcessor_ResetStatisticsUnderNumFloodingRoundsShouldNotBlackList(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + numFloodingRounds := uint32(30) + + key := "key" + removedCalled := false + addToBlacklistCalled := false + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + KeysCalled: func() [][]byte { + return [][]byte{[]byte(key)} + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return numFloodingRounds - 1, true + }, + RemoveCalled: func(key []byte) { + removedCalled = true + }, + }, + &mock.BlackListHandlerStub{ + AddCalled: func(key string) error { + addToBlacklistCalled = true + + return nil + }, + }, + thresholdNum, + thresholdSize, + numFloodingRounds, + ) + + pbp.ResetStatistics() + + assert.False(t, removedCalled) + assert.False(t, addToBlacklistCalled) +} + +func TestP2pQuotaBlacklistProcessor_ResetStatisticsOverNumFloodingRoundsShouldBlackList(t *testing.T) { + t.Parallel() + + thresholdNum := uint32(10) + thresholdSize := uint64(20) + numFloodingRounds := uint32(30) + + key := "key" + removedCalled := false + addToBlacklistCalled := false + pbp, _ := antiflood.NewP2pBlackListProcessor( + &mock.CacherStub{ + KeysCalled: func() [][]byte { + return [][]byte{[]byte(key)} + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return numFloodingRounds, true + }, + RemoveCalled: func(key []byte) { + removedCalled = true + }, + }, + &mock.BlackListHandlerStub{ + AddCalled: func(key string) error { + addToBlacklistCalled = true + + return nil + }, + }, + thresholdNum, + thresholdSize, + numFloodingRounds, + ) + + pbp.ResetStatistics() + + assert.True(t, removedCalled) + assert.True(t, addToBlacklistCalled) +} diff --git a/statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go b/statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go deleted file mode 100644 index e2c5868cd1f..00000000000 --- a/statusHandler/p2pQuota/p2pQuotaBlacklistProcessor.go +++ /dev/null @@ -1,20 +0,0 @@ -package p2pQuota - -type p2pQuotaBlacklistProcessor struct { -} - -func (pqbp *p2pQuotaBlacklistProcessor) ResetStatistics() { - panic("implement me") -} - -func (pqbp *p2pQuotaBlacklistProcessor) AddQuota(identifier string, numReceived uint32, sizeReceived uint64, numProcessed uint32, sizeProcessed uint64) { - panic("implement me") -} - -// SetGlobalQuota does nothing (here to comply with QuotaStatusHandler interface) -func (pqbp *p2pQuotaBlacklistProcessor) SetGlobalQuota(_ uint32, _ uint64, _ uint32, _ uint64) {} - -// IsInterfaceNil returns true if there is no value under the interface -func (pqbp *p2pQuotaBlacklistProcessor) IsInterfaceNil() bool { - return pqbp == nil -} From e86ef89b2d526af9da0ab8e8b43cf7ef039df71f Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 31 Dec 2019 12:23:58 +0200 Subject: [PATCH 33/35] fixed failing test in p2p package --- p2p/libp2p/connectionMonitor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/libp2p/connectionMonitor_test.go b/p2p/libp2p/connectionMonitor_test.go index a390a0dfc83..d3513a9010c 100644 --- a/p2p/libp2p/connectionMonitor_test.go +++ b/p2p/libp2p/connectionMonitor_test.go @@ -201,7 +201,7 @@ func TestConnectionMonitor_BlackListedPeerShouldErr(t *testing.T) { libp2pContext := &Libp2pContext{ blacklistHandler: &mock.BlacklistHandlerStub{ HasCalled: func(key string) bool { - return key == string(pid) + return key == pid.Pretty() }, }, connHost: createStubConnectableHost(), From 067cccdb9af2de0fe4360b9ad1f28316f3e9d0b3 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 31 Dec 2019 12:27:15 +0200 Subject: [PATCH 34/35] fixed bot comments --- p2p/libp2p/connectionMonitor.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/p2p/libp2p/connectionMonitor.go b/p2p/libp2p/connectionMonitor.go index d022b5311c4..06680cae07f 100644 --- a/p2p/libp2p/connectionMonitor.go +++ b/p2p/libp2p/connectionMonitor.go @@ -42,7 +42,7 @@ func newConnectionMonitor( cm := &connectionMonitor{ reconnecter: reconnecter, - chDoReconnect: make(chan struct{}, 0), + chDoReconnect: make(chan struct{}), libp2pContext: libp2pContext, netw: libp2pContext.connHost.Network(), thresholdMinConnectedPeers: thresholdMinConnectedPeers, @@ -95,11 +95,9 @@ func (cm *connectionMonitor) HandleDisconnectedPeer(_ p2p.PeerID) error { // DoReconnectionBlocking will try to reconnect to the initial addresses (seeders) func (cm *connectionMonitor) DoReconnectionBlocking() { - select { - case <-cm.chDoReconnect: - if cm.reconnecter != nil { - cm.reconnecter.ReconnectToNetwork() - } + <-cm.chDoReconnect + if cm.reconnecter != nil { + cm.reconnecter.ReconnectToNetwork() } } From aadfc74dfeb964b67c7ab23c1ceaad53b23c5cf6 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 2 Jan 2020 16:16:12 +0200 Subject: [PATCH 35/35] finished implementation and integration --- cmd/node/config/config.toml | 9 +++- cmd/node/factory/structs.go | 86 +++++++++++++++++++++++++++++-------- config/config.go | 11 ++++- 3 files changed, 86 insertions(+), 20 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index f9327a064dd..9df1796c77f 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -257,13 +257,18 @@ Type = "LRU" [Antiflood] - PeerMaxMessagesPerSecond = 68 + PeerMaxMessagesPerSecond = 75 PeerMaxTotalSizePerSecond = 2097152 MaxMessagesPerSecond = 400 - MaxTotalSizePerSecond = 9437184 + MaxTotalSizePerSecond = 4194304 [Antiflood.Cache] Size = 5000 Type = "LRU" + [Antiflood.BlackList] + ThresholdNumMessagesPerSecond = 150 + ThresholdSizePerSecond = 4194304 + NumFloodingRounds = 10 + PeerBanDurationInSeconds = 300 [Logger] Path = "logs" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index b410cbb79ca..58fb0e8147f 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -74,7 +74,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" processSync "github.com/ElrondNetwork/elrond-go/process/sync" - antifloodThrottle "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" + processAntiflood "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -84,7 +84,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/storage/timecache" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" "github.com/urfave/cli" @@ -490,7 +490,14 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, mainConfig *config.Co return nil, err } - antifloodHandler, err := createAntifloodComponent(mainConfig, core.StatusHandler) + antifloodHandler, p2pPeerBlackList, err := createAntifloodAndBlackListComponents(mainConfig, core.StatusHandler) + if err != nil { + return nil, err + } + + err = netMessenger.ApplyOptions( + libp2p.WithPeerBlackList(p2pPeerBlackList), + ) if err != nil { return nil, err } @@ -501,11 +508,20 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, mainConfig *config.Co }, nil } -func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHandler) (consensus.P2PAntifloodHandler, error) { +func createAntifloodAndBlackListComponents( + mainConfig *config.Config, + status core.AppStatusHandler, +) (consensus.P2PAntifloodHandler, p2p.BlacklistHandler, error) { + cacheConfig := storageFactory.GetCacherFromConfig(mainConfig.Antiflood.Cache) antifloodCache, err := storageUnit.NewCache(cacheConfig.Type, cacheConfig.Size, cacheConfig.Shards) if err != nil { - return nil, err + return nil, nil, err + } + + blackListCache, err := storageUnit.NewCache(cacheConfig.Type, cacheConfig.Size, cacheConfig.Shards) + if err != nil { + return nil, nil, err } peerMaxMessagesPerSecond := mainConfig.Antiflood.PeerMaxMessagesPerSecond @@ -513,33 +529,60 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa maxMessagesPerSecond := mainConfig.Antiflood.MaxMessagesPerSecond maxTotalSizePerSecond := mainConfig.Antiflood.MaxTotalSizePerSecond - log.Debug("started antiflood component", - "peerMaxMessagesPerSecond", peerMaxMessagesPerSecond, - "peerMaxTotalSizePerSecond", core.ConvertBytes(peerMaxTotalSizePerSecond), - "maxMessagesPerSecond", maxMessagesPerSecond, - "maxTotalSizePerSecond", core.ConvertBytes(maxTotalSizePerSecond), - ) - quotaProcessor, err := p2pQuota.NewP2pQuotaProcessor(status) if err != nil { - return nil, err + return nil, nil, err } - floodPreventer, err := antifloodThrottle.NewQuotaFloodPreventer( + peerBanInSeconds := mainConfig.Antiflood.BlackList.PeerBanDurationInSeconds + if peerBanInSeconds == 0 { + return nil, nil, fmt.Errorf("Antiflood.BlackList.PeerBanDurationInSeconds should be greater than 0") + } + + p2pPeerBlackList := timecache.NewTimeCache(time.Second * time.Duration(peerBanInSeconds)) + blackListProcessor, err := processAntiflood.NewP2pBlackListProcessor( + blackListCache, + p2pPeerBlackList, + mainConfig.Antiflood.BlackList.ThresholdNumMessagesPerSecond, + mainConfig.Antiflood.BlackList.ThresholdSizePerSecond, + mainConfig.Antiflood.BlackList.NumFloodingRounds, + ) + if err != nil { + return nil, nil, err + } + + floodPreventer, err := processAntiflood.NewQuotaFloodPreventer( antifloodCache, - []antifloodThrottle.QuotaStatusHandler{quotaProcessor}, + []processAntiflood.QuotaStatusHandler{quotaProcessor, blackListProcessor}, peerMaxMessagesPerSecond, peerMaxTotalSizePerSecond, maxMessagesPerSecond, maxTotalSizePerSecond, ) if err != nil { - return nil, err + return nil, nil, err } + log.Debug("started antiflood & blacklist components", + "peerMaxMessagesPerSecond", peerMaxMessagesPerSecond, + "peerMaxTotalSizePerSecond", core.ConvertBytes(peerMaxTotalSizePerSecond), + "maxMessagesPerSecond", maxMessagesPerSecond, + "maxTotalSizePerSecond", core.ConvertBytes(maxTotalSizePerSecond), + "peerBanDurationInSeconds", peerBanInSeconds, + "thresholdNumMessagesPerSecond", mainConfig.Antiflood.BlackList.ThresholdNumMessagesPerSecond, + "thresholdSizePerSecond", mainConfig.Antiflood.BlackList.ThresholdSizePerSecond, + "numFloodingRounds", mainConfig.Antiflood.BlackList.NumFloodingRounds, + ) + startResetingFloodPreventer(floodPreventer) + startSweepingP2pPeerBlackList(p2pPeerBlackList) + + p2pAntiflood, err := antiflood.NewP2pAntiflood(floodPreventer) + if err != nil { + return nil, nil, err + } - return antiflood.NewP2pAntiflood(floodPreventer) + return p2pAntiflood, p2pPeerBlackList, nil } func startResetingFloodPreventer(floodPreventer p2p.FloodPreventer) { @@ -551,6 +594,15 @@ func startResetingFloodPreventer(floodPreventer p2p.FloodPreventer) { }() } +func startSweepingP2pPeerBlackList(p2pPeerBlackList process.BlackListHandler) { + go func() { + for { + time.Sleep(time.Second * 5) + p2pPeerBlackList.Sweep() + } + }() +} + type processComponentsFactoryArgs struct { coreComponents *coreComponentsFactoryArgs genesisConfig *sharding.Genesis diff --git a/config/config.go b/config/config.go index 66835d041a3..aa27a6d0c7b 100644 --- a/config/config.go +++ b/config/config.go @@ -102,7 +102,7 @@ type Config struct { ShardHeadersDataPool CacheConfig MetaHeaderNoncesDataPool CacheConfig - Antiflood AntifloodConfig + Antiflood AntifloodConfig EpochStartConfig EpochStartConfig Logger LoggerConfig Address AddressConfig @@ -194,9 +194,18 @@ type FacadeConfig struct { PprofEnabled bool } +// BlackListConfig will hold the p2p peer black list threshold values +type BlackListConfig struct { + ThresholdNumMessagesPerSecond uint32 + ThresholdSizePerSecond uint64 + NumFloodingRounds uint32 + PeerBanDurationInSeconds uint32 +} + // AntifloodConfig will hold all p2p antiflood parameters type AntifloodConfig struct { Cache CacheConfig + BlackList BlackListConfig PeerMaxMessagesPerSecond uint32 PeerMaxTotalSizePerSecond uint64 MaxMessagesPerSecond uint32