From 79b8e23088f2f0949f1b02c42377256865380536 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 17 Sep 2024 11:49:36 +0700 Subject: [PATCH] feat: new single node quorum and functional test feature_llmq_singlenode.py --- src/llmq/commitment.cpp | 31 ++-- src/llmq/dkgsession.cpp | 58 +++++++ src/llmq/dkgsession.h | 3 + src/llmq/dkgsessionhandler.cpp | 11 ++ src/llmq/params.h | 2 +- src/llmq/quorums.cpp | 6 + src/llmq/signing_shares.cpp | 45 +++++ src/rpc/quorums.cpp | 17 +- test/functional/feature_llmq_singlenode.py | 187 +++++++++++++++++++++ test/functional/test_runner.py | 1 + 10 files changed, 342 insertions(+), 19 deletions(-) create mode 100755 test/functional/feature_llmq_singlenode.py diff --git a/src/llmq/commitment.cpp b/src/llmq/commitment.cpp index 4727fa2533..d49678fae6 100644 --- a/src/llmq/commitment.cpp +++ b/src/llmq/commitment.cpp @@ -70,7 +70,7 @@ bool CFinalCommitment::Verify(CDeterministicMNManager& dmnman, CQuorumSnapshotMa LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid quorumPublicKey\n", quorumHash.ToString()); return false; } - if (quorumVvecHash.IsNull()) { + if (llmq_params.size != 1 && quorumVvecHash.IsNull()) { LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid quorumVvecHash\n", quorumHash.ToString()); return false; } @@ -115,19 +115,26 @@ bool CFinalCommitment::Verify(CDeterministicMNManager& dmnman, CQuorumSnapshotMa LogPrint(BCLog::LLMQ, "CFinalCommitment::%s members[%s] quorumPublicKey[%s] commitmentHash[%s]\n", __func__, ss3.str(), quorumPublicKey.ToString(), commitmentHash.ToString()); } - std::vector memberPubKeys; - for (const auto i : irange::range(members.size())) { - if (!signers[i]) { - continue; + if (llmq_params.size == 1) { + LogPrintf("pubkey operator: %s\n", members[0]->pdmnState->pubKeyOperator.Get().ToString()); + if (!membersSig.VerifyInsecure(members[0]->pdmnState->pubKeyOperator.Get(), commitmentHash)) { + LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid member signature\n", quorumHash.ToString()); + return false; + } + } else { + std::vector memberPubKeys; + for (const auto i : irange::range(members.size())) { + if (!signers[i]) { + continue; + } + memberPubKeys.emplace_back(members[i]->pdmnState->pubKeyOperator.Get()); } - memberPubKeys.emplace_back(members[i]->pdmnState->pubKeyOperator.Get()); - } - if (!membersSig.VerifySecureAggregated(memberPubKeys, commitmentHash)) { - LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid aggregated members signature\n", quorumHash.ToString()); - return false; + if (!membersSig.VerifySecureAggregated(memberPubKeys, commitmentHash)) { + LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid aggregated members signature\n", quorumHash.ToString()); + return false; + } } - if (!quorumSig.VerifyInsecure(quorumPublicKey, commitmentHash)) { LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid quorum signature\n", quorumHash.ToString()); return false; @@ -161,7 +168,7 @@ bool CFinalCommitment::VerifySizes(const Consensus::LLMQParams& params) const return false; } if (validMembers.size() != size_t(params.size)) { - LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid signers.size=%d\n", quorumHash.ToString(), signers.size()); + LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid validMembers.size=%d\n", quorumHash.ToString(), validMembers.size()); return false; } return true; diff --git a/src/llmq/dkgsession.cpp b/src/llmq/dkgsession.cpp index 263f383a07..0dd6621d9f 100644 --- a/src/llmq/dkgsession.cpp +++ b/src/llmq/dkgsession.cpp @@ -166,6 +166,8 @@ void CDKGSession::Contribute(CDKGPendingMessages& pendingMessages, PeerManager& return; } + assert(params.threshold > 1); // we should not get there with single-node-quorums + cxxtimer::Timer t1(true); logger.Batch("generating contributions"); if (!blsWorker.GenerateContributions(params.threshold, memberIds, vvecContribution, m_sk_contributions)) { @@ -1236,6 +1238,7 @@ std::vector CDKGSession::FinalizeCommitments() fqc.quorumVvecHash = first.quorumVvecHash; const bool isQuorumRotationEnabled{IsQuorumRotationEnabled(params, m_quorum_base_block_index)}; + // TODO: always put `true` here: so far as v19 is activated, we always write BASIC now fqc.nVersion = CFinalCommitment::GetVersion(isQuorumRotationEnabled, DeploymentActiveAfter(m_quorum_base_block_index, Params().GetConsensus(), Consensus::DEPLOYMENT_V19)); fqc.quorumIndex = isQuorumRotationEnabled ? quorumIndex : 0; @@ -1293,6 +1296,61 @@ std::vector CDKGSession::FinalizeCommitments() return finalCommitments; } +CFinalCommitment CDKGSession::FinalizeSingleCommitment() +{ + if (!AreWeMember()) { + return {}; + } + + CDKGLogger logger(*this, __func__, __LINE__); + + std::vector signerIds; + std::vector thresholdSigs; + + CFinalCommitment fqc(params, m_quorum_base_block_index->GetBlockHash()); + + + fqc.signers = {true}; + fqc.validMembers = {true}; + + CBLSSecretKey sk1; + sk1.MakeNewKey(); + + fqc.quorumPublicKey = sk1.GetPublicKey(); + fqc.quorumVvecHash = {}; + + // use just MN's operator public key as quorum pubkey. + // TODO: use sk1 here instead and use recovery mechanism from shares, but that's not trivial to do + const bool workaround_qpublic_key = true; + if (workaround_qpublic_key) { + fqc.quorumPublicKey = m_mn_activeman->GetPubKey(); + } + const bool isQuorumRotationEnabled{false}; + fqc.nVersion = CFinalCommitment::GetVersion(isQuorumRotationEnabled, DeploymentActiveAfter(m_quorum_base_block_index, Params().GetConsensus(), Consensus::DEPLOYMENT_V19)); + fqc.quorumIndex = 0; + + uint256 commitmentHash = BuildCommitmentHash(fqc.llmqType, fqc.quorumHash, fqc.validMembers, fqc.quorumPublicKey, fqc.quorumVvecHash); + fqc.quorumSig = sk1.Sign(commitmentHash, m_use_legacy_bls); + + fqc.membersSig = m_mn_activeman->Sign(commitmentHash, m_use_legacy_bls); + + if (workaround_qpublic_key) { + fqc.quorumSig = fqc.membersSig; + } + + if (!fqc.Verify(m_dmnman, m_qsnapman, m_quorum_base_block_index, true)) { + logger.Batch("failed to verify final commitment"); + assert(false); + } + + logger.Batch("final commitment: validMembers=%d, signers=%d, quorumPublicKey=%s", + fqc.CountValidMembers(), fqc.CountSigners(), fqc.quorumPublicKey.ToString()); + + logger.Flush(); + + return fqc; +} + CDKGMember* CDKGSession::GetMember(const uint256& proTxHash) const { auto it = membersMap.find(proTxHash); diff --git a/src/llmq/dkgsession.h b/src/llmq/dkgsession.h index af52b81f8d..afcbb2af3f 100644 --- a/src/llmq/dkgsession.h +++ b/src/llmq/dkgsession.h @@ -382,6 +382,9 @@ class CDKGSession // Phase 5: aggregate/finalize std::vector FinalizeCommitments(); + // All Phases 5-in-1 for single-node-quorum + CFinalCommitment FinalizeSingleCommitment(); + [[nodiscard]] bool AreWeMember() const { return !myProTxHash.IsNull(); } void MarkBadMember(size_t idx); diff --git a/src/llmq/dkgsessionhandler.cpp b/src/llmq/dkgsessionhandler.cpp index 95b1c0fd33..eb7970990c 100644 --- a/src/llmq/dkgsessionhandler.cpp +++ b/src/llmq/dkgsessionhandler.cpp @@ -549,6 +549,17 @@ void CDKGSessionHandler::HandleDKGRound(CConnman& connman, PeerManager& peerman) return changed; }); + if (params.size == 1) { + auto finalCommitment = curSession->FinalizeSingleCommitment(); + if (!finalCommitment.IsNull()) { // it can be null only if we are not member + if (auto inv_opt = quorumBlockProcessor.AddMineableCommitment(finalCommitment); inv_opt.has_value()) { + peerman.RelayInv(inv_opt.value()); + } + } + WaitForNextPhase(QuorumPhase::Initialized, QuorumPhase::Contribute, curQuorumHash); + return; + } + const auto tip_mn_list = m_dmnman.GetListAtChainTip(); utils::EnsureQuorumConnections(params, connman, m_dmnman, m_sporkman, m_qsnapman, tip_mn_list, pQuorumBaseBlockIndex, curSession->myProTxHash, /* is_masternode = */ m_mn_activeman != nullptr); diff --git a/src/llmq/params.h b/src/llmq/params.h index 107b54487b..d52a9db1d4 100644 --- a/src/llmq/params.h +++ b/src/llmq/params.h @@ -37,7 +37,7 @@ enum class LLMQType : uint8_t { LLMQ_TEST_PLATFORM = 106, // 3 members, 2 (66%) threshold, one per hour. // for devnets only. rotated version (v2) for devnets - LLMQ_DEVNET_DIP0024 = 105 // 8 members, 4 (50%) threshold, one per hour. Params might differ when -llmqdevnetparams is used + LLMQ_DEVNET_DIP0024 = 105, // 8 members, 4 (50%) threshold, one per hour. Params might differ when -llmqdevnetparams is used }; // Configures a LLMQ and its DKG diff --git a/src/llmq/quorums.cpp b/src/llmq/quorums.cpp index 062bde54f1..1e45183e0e 100644 --- a/src/llmq/quorums.cpp +++ b/src/llmq/quorums.cpp @@ -414,6 +414,12 @@ CQuorumPtr CQuorumManager::BuildQuorumFromCommitment(const Consensus::LLMQType l quorum->Init(std::move(qc), pQuorumBaseBlockIndex, minedBlockHash, members); + if (populate_cache && llmq_params_opt->size == 1) { + WITH_LOCK(cs_map_quorums, mapQuorumsCache[llmqType].insert(quorumHash, quorum)); + + return quorum; + } + bool hasValidVvec = false; if (WITH_LOCK(cs_db, return quorum->ReadContributions(*db))) { hasValidVvec = true; diff --git a/src/llmq/signing_shares.cpp b/src/llmq/signing_shares.cpp index ed5105651f..bf4d940056 100644 --- a/src/llmq/signing_shares.cpp +++ b/src/llmq/signing_shares.cpp @@ -778,6 +778,22 @@ void CSigSharesManager::TryRecoverSig(PeerManager& peerman, const CQuorumCPtr& q return; } + if (quorum->params.size == 1) { + if (sigSharesForSignHash->empty()) { + LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- impossible to recover single-node signature - no shares yet. id=%s, msgHash=%s\n", __func__, + id.ToString(), msgHash.ToString()); + return; + } + const auto& sigShare = sigSharesForSignHash->begin()->second; + CBLSSignature recoveredSig = sigShare.sigShare.Get(); + LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- recover single-node signature. id=%s, msgHash=%s\n", __func__, + id.ToString(), msgHash.ToString()); + + auto rs = std::make_shared(quorum->params.type, quorum->qc->quorumHash, id, msgHash, recoveredSig); + sigman.ProcessRecoveredSig(rs, peerman); + return; // end of single-quorum processing + } + sigSharesForRecovery.reserve((size_t) quorum->params.threshold); idsForRecovery.reserve((size_t) quorum->params.threshold); for (auto it = sigSharesForSignHash->begin(); it != sigSharesForSignHash->end() && sigSharesForRecovery.size() < size_t(quorum->params.threshold); ++it) { @@ -1524,6 +1540,35 @@ std::optional CSigSharesManager::CreateSigShare(const CQuorumCPtr& qu return std::nullopt; } + if (quorum->params.size == 1) { + int memberIdx = quorum->GetMemberIndex(activeMasterNodeProTxHash); + if (memberIdx == -1) { + // this should really not happen (IsValidMember gave true) + return std::nullopt; + } + + CSigShare sigShare(quorum->params.type, quorum->qc->quorumHash, id, msgHash, uint16_t(memberIdx), {}); + uint256 signHash = sigShare.buildSignHash(); + + // TODO: This one should be SIGN by QUORUM key, not by OPERATOR key + // see TODO in CDKGSession::FinalizeSingleCommitment for details + sigShare.sigShare.Set(m_mn_activeman->Sign(signHash, bls::bls_legacy_scheme.load()), bls::bls_legacy_scheme.load()); + + if (!sigShare.sigShare.Get().IsValid()) { + LogPrintf("CSigSharesManager::%s -- failed to sign sigShare. signHash=%s, id=%s, msgHash=%s, time=%s\n", __func__, + signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), t.count()); + return std::nullopt; + } + + sigShare.UpdateKey(); + + LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- created sigShare. signHash=%s, id=%s, msgHash=%s, llmqType=%d, quorum=%s, time=%s\n", __func__, + signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), ToUnderlying(quorum->params.type), quorum->qc->quorumHash.ToString(), t.count()); + + return sigShare; + + + } const CBLSSecretKey& skShare = quorum->GetSkShare(); if (!skShare.IsValid()) { LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- we don't have our skShare for quorum %s\n", __func__, quorum->qc->quorumHash.ToString()); diff --git a/src/rpc/quorums.cpp b/src/rpc/quorums.cpp index bd2a4c82ab..bb12b8edf7 100644 --- a/src/rpc/quorums.cpp +++ b/src/rpc/quorums.cpp @@ -179,7 +179,7 @@ static RPCHelpMan quorum_list_extended() }; } -static UniValue BuildQuorumInfo(const llmq::CQuorumBlockProcessor& quorum_block_processor, const llmq::CQuorumCPtr& quorum, bool includeMembers, bool includeSkShare) +static UniValue BuildQuorumInfo(const llmq::CQuorumBlockProcessor& quorum_block_processor, const llmq::CQuorumCPtr& quorum, bool includeMembers, bool includeSkShare, bool single_node_quorum = false) { UniValue ret(UniValue::VOBJ); @@ -210,9 +210,13 @@ static UniValue BuildQuorumInfo(const llmq::CQuorumBlockProcessor& quorum_block_ mo.pushKV("pubKeyOperator", dmn->pdmnState->pubKeyOperator.ToString()); mo.pushKV("valid", quorum->qc->validMembers[i]); if (quorum->qc->validMembers[i]) { - CBLSPublicKey pubKey = quorum->GetPubKeyShare(i); - if (pubKey.IsValid()) { - mo.pushKV("pubKeyShare", pubKey.ToString()); + if (single_node_quorum) { + mo.pushKV("pubKeyShare", dmn->pdmnState->pubKeyOperator.ToString()); + } else { + CBLSPublicKey pubKey = quorum->GetPubKeyShare(i); + if (pubKey.IsValid()) { + mo.pushKV("pubKeyShare", pubKey.ToString()); + } } } membersArr.push_back(mo); @@ -245,7 +249,8 @@ static RPCHelpMan quorum_info() const LLMQContext& llmq_ctx = EnsureLLMQContext(node); const Consensus::LLMQType llmqType{static_cast(ParseInt32V(request.params[0], "llmqType"))}; - if (!Params().GetLLMQ(llmqType).has_value()) { + auto llmq_opt = Params().GetLLMQ(llmqType); + if (!llmq_opt.has_value()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "invalid LLMQ type"); } @@ -260,7 +265,7 @@ static RPCHelpMan quorum_info() throw JSONRPCError(RPC_INVALID_PARAMETER, "quorum not found"); } - return BuildQuorumInfo(*llmq_ctx.quorum_block_processor, quorum, true, includeSkShare); + return BuildQuorumInfo(*llmq_ctx.quorum_block_processor, quorum, true, includeSkShare, llmq_opt->size == 1); }, }; } diff --git a/test/functional/feature_llmq_singlenode.py b/test/functional/feature_llmq_singlenode.py new file mode 100755 index 0000000000..d9a7950c9c --- /dev/null +++ b/test/functional/feature_llmq_singlenode.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2024 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +''' +feature_llmq_singlenode.py + +Checks creating LLMQs signle node quorum creation and signing +This functional test is similar to feature_llmq_signing.py but difference are big enough to make implementation common. + +''' + +import time + +from test_framework.authproxy import JSONRPCException +from test_framework.test_framework import DashTestFramework +from test_framework.util import ( + assert_raises_rpc_error, + assert_greater_than, + force_finish_mnsync, + wait_until_helper, +) + + +id = "0000000000000000000000000000000000000000000000000000000000000001" +msgHash = "0000000000000000000000000000000000000000000000000000000000000002" +msgHashConflict = "0000000000000000000000000000000000000000000000000000000000000003" + + +q_type=100 +class LLMQSigningTest(DashTestFramework): + def set_test_params(self): + self.set_dash_test_params(1, 0, [["-llmqtestinstantsenddip0024=llmq_test_instantsend", "-peertimeout=300000000"]], + evo_count=2) + self.set_dash_llmq_test_params(1, 1) + + + def mine_single_node_quorum(self): + node = self.nodes[0] + quorums = node.quorum('list')['llmq_test'] + + skip_count = 24 - (self.nodes[0].getblockcount() % 24) + if skip_count != 0: + self.bump_mocktime(1) + self.generate(self.nodes[0], skip_count) + time.sleep(1) + self.generate(self.nodes[0], 30) + new_quorums_list = node.quorum('list')['llmq_test'] + + self.log.info(f"Test Quorums at height={node.getblockcount()} : {new_quorums_list}") + assert new_quorums_list != quorums + + def check_sigs(self, hasrecsigs, isconflicting1, isconflicting2): + has_sig = False + conflicting_1 = False + conflicting_2 = False + + for mn in self.mninfo: + if mn.node.quorum("hasrecsig", q_type, id, msgHash): + has_sig = True + if mn.node.quorum("isconflicting", q_type, id, msgHash): + conflicting_1 = True + if mn.node.quorum("isconflicting", q_type, id, msgHashConflict): + conflicting_2 = True + if has_sig != hasrecsigs: + return False + if conflicting_1 != isconflicting1: + return False + if conflicting_2 != isconflicting2: + return False + + return True + + def wait_for_sigs(self, hasrecsigs, isconflicting1, isconflicting2, timeout): + self.wait_until(lambda: self.check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout) + + def assert_sigs_nochange(self, hasrecsigs, isconflicting1, isconflicting2, timeout): + assert not wait_until_helper(lambda: not self.check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout, do_assert = False) + + + def log_connections(self): + connections = [] + for idx in range(len(self.nodes)): + connections.append(self.nodes[idx].getconnectioncount()) + self.log.info(f"nodes connection count: {connections}") + + def run_test(self): + self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 0) + self.wait_for_sporks_same() + + self.dynamically_add_masternode(evo=False) + self.dynamically_add_masternode(evo=True) + self.connect_nodes(1, 2) + + self.mine_single_node_quorum() + + self.log_connections() + assert_greater_than(len(self.nodes[0].quorum('list')['llmq_test']), 0) + self.log.info("We have quorum waiting for ChainLock") + self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash()) + + self.log.info("Send funds and wait InstantSend lock") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + self.wait_for_instantlock(txid, self.nodes[0]) + + self.log.info("Test various options to sign messages with nodes") + recsig_time = self.mocktime + + # Initial state + self.wait_for_sigs(False, False, False, 1) + + # Sign first share without any optional parameter, should not result in recovered sig + # Sign second share and test optional quorumHash parameter, should not result in recovered sig + # 1. Providing an invalid quorum hash should fail and cause no changes for sigs + assert not self.mninfo[1].node.quorum("sign", q_type, id, msgHash, msgHash) + self.assert_sigs_nochange(False, False, False, 3) + # 2. Providing a valid quorum hash should succeed and cause no changes for sigss + quorumHash = self.mninfo[1].node.quorum("selectquorum", q_type, id)["quorumHash"] + + qnode = self.mninfo[0].node + try: + qnode.quorum("sign", q_type, id, msgHash, quorumHash, False) + except JSONRPCException as e: + if e.error['code'] == -8: # failed to create sigShare + qnode = self.mninfo[1].node + qnode.quorum("sign", q_type, id, msgHash, quorumHash, False) + else: + raise e + + self.mninfo[0].node.quorum("sign", q_type, id, msgHash) + self.mninfo[1].node.quorum("sign", q_type, id, msgHash) + + self.wait_for_sigs(True, False, True, 15) + has0 = self.nodes[0].quorum("hasrecsig", q_type, id, msgHash) + has1 = self.nodes[1].quorum("hasrecsig", q_type, id, msgHash) + has2 = self.nodes[2].quorum("hasrecsig", q_type, id, msgHash) + assert (has0 or has1 or has2) + + self.log.info("Test `quorum verify` rpc") + node = self.mninfo[0].node + recsig = qnode.quorum("getrecsig", q_type, id, msgHash) + self.log.info("Find quorum automatically") + height = node.getblockcount() + height_bad = node.getblockheader(recsig["quorumHash"])["height"] + hash_bad = node.getblockhash(0) + assert node.quorum("verify", q_type, id, msgHash, recsig["sig"]) + assert node.quorum("verify", q_type, id, msgHash, recsig["sig"], "", height) + assert not node.quorum("verify", q_type, id, msgHashConflict, recsig["sig"]) + assert not node.quorum("verify", q_type, id, msgHash, recsig["sig"], "", height_bad) + self.log.info("Use specific quorum") + assert node.quorum("verify", q_type, id, msgHash, recsig["sig"], recsig["quorumHash"]) + assert not node.quorum("verify", q_type, id, msgHashConflict, recsig["sig"], recsig["quorumHash"]) + assert_raises_rpc_error(-8, "quorum not found", node.quorum, "verify", q_type, id, msgHash, recsig["sig"], hash_bad) + + self.log.info("Mine one more quorum, so that we have 2 active ones, nothing should change") + self.mine_single_node_quorum() + self.assert_sigs_nochange(True, False, True, 3) + + + self.log.info("Mine 2 more quorums, so that the one used for the the recovered sig should become inactive, nothing should change") + self.mine_single_node_quorum() + self.mine_single_node_quorum() + self.assert_sigs_nochange(True, False, True, 3) + + self.log_connections() + self.log.info("Fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid") + self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime, update_schedulers=False) + self.log.info("Cleanup starts every 5 seconds") + self.wait_for_sigs(True, False, True, 15) + self.log.info("Fast forward 1 day, recovered sig should not be valid anymore") + self.bump_mocktime(int(60 * 60 * 24 * 1), update_schedulers=False) + self.log.info("Cleanup starts every 5 seconds") + self.wait_for_sigs(False, False, False, 15) + + self.log_connections() + self.log.info("Test chainlocks and InstantSend with new quorums") + block_hash = self.nodes[0].getbestblockhash() + self.log.info(f"Chainlock on block: {block_hash} is expecting") + self.wait_for_best_chainlock(self.nodes[0], block_hash) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + self.log.info(f"InstantSend lock on tx: {txid} is expecting") + self.wait_for_instantlock(txid, self.nodes[0]) + + +if __name__ == '__main__': + LLMQSigningTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index b989558443..3b675bc844 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -134,6 +134,7 @@ 'feature_llmq_evo.py', # NOTE: needs dash_hash to pass 'feature_llmq_is_cl_conflicts.py', # NOTE: needs dash_hash to pass 'feature_llmq_dkgerrors.py', # NOTE: needs dash_hash to pass + 'feature_llmq_singlenode.py', # NOTE: needs dash_hash to pass 'feature_dip4_coinbasemerkleroots.py', # NOTE: needs dash_hash to pass 'feature_mnehf.py', # NOTE: needs dash_hash to pass 'feature_governance.py --legacy-wallet',