diff --git a/pkg/chain/ethereum/beacon.go b/pkg/chain/ethereum/beacon.go
index 2597500659..6f37143e62 100644
--- a/pkg/chain/ethereum/beacon.go
+++ b/pkg/chain/ethereum/beacon.go
@@ -1,15 +1,8 @@
 package ethereum
 
 import (
-	"context"
-	"encoding/binary"
-	"encoding/hex"
 	"fmt"
 	"math/big"
-	"math/rand"
-	"sync"
-
-	bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
 
 	"github.com/ethereum/go-ethereum/crypto"
 	beaconchain "github.com/keep-network/keep-core/pkg/beacon/chain"
@@ -28,14 +21,14 @@ const (
 	RandomBeaconContractName = "RandomBeacon"
 )
 
+var errNotImplemented = fmt.Errorf("not implemented")
+
 // BeaconChain represents a beacon-specific chain handle.
 type BeaconChain struct {
 	*baseChain
 
 	randomBeacon  *contract.RandomBeacon
 	sortitionPool *contract.BeaconSortitionPool
-
-	mockRandomBeacon *mockRandomBeacon
 }
 
 // newBeaconChain construct a new instance of the beacon-specific Ethereum
@@ -98,10 +91,9 @@ func newBeaconChain(
 	}
 
 	return &BeaconChain{
-		baseChain:        baseChain,
-		randomBeacon:     randomBeacon,
-		sortitionPool:    sortitionPool,
-		mockRandomBeacon: newMockRandomBeacon(baseChain.blockCounter),
+		baseChain:     baseChain,
+		randomBeacon:  randomBeacon,
+		sortitionPool: sortitionPool,
 	}, nil
 }
 
@@ -241,6 +233,16 @@ func (bc *BeaconChain) IsBetaOperator() (bool, error) {
 	return bc.sortitionPool.IsBetaOperator(bc.key.Address)
 }
 
+// GetOperatorID returns the ID number of the given operator address. An ID
+// number of 0 means the operator has not been allocated an ID number yet.
+func (bc *BeaconChain) GetOperatorID(
+	operatorAddress chain.Address,
+) (chain.OperatorID, error) {
+	return bc.sortitionPool.GetOperatorID(
+		common.HexToAddress(operatorAddress.String()),
+	)
+}
+
 // SelectGroup returns the group members for the group generated by
 // the given seed. This function can return an error if the beacon chain's
 // state does not allow for group selection at the moment.
@@ -283,7 +285,7 @@ func (bc *BeaconChain) OnGroupRegistered(
 
 // TODO: Implement a real IsGroupRegistered function.
 func (bc *BeaconChain) IsGroupRegistered(groupPublicKey []byte) (bool, error) {
-	return bc.mockRandomBeacon.IsGroupRegistered(groupPublicKey)
+	return false, errNotImplemented
 }
 
 // TODO: Implement a real IsStaleGroup function.
@@ -297,7 +299,7 @@ func (bc *BeaconChain) IsStaleGroup(groupPublicKey []byte) (bool, error) {
 func (bc *BeaconChain) OnDKGStarted(
 	handler func(event *event.DKGStarted),
 ) subscription.EventSubscription {
-	return bc.mockRandomBeacon.OnDKGStarted(handler)
+	return subscription.NewEventSubscription(func() {})
 }
 
 // TODO: Implement a real SubmitDKGResult action. The current implementation
@@ -308,11 +310,7 @@ func (bc *BeaconChain) SubmitDKGResult(
 	dkgResult *beaconchain.DKGResult,
 	signatures map[beaconchain.GroupMemberIndex][]byte,
 ) error {
-	return bc.mockRandomBeacon.SubmitDKGResult(
-		participantIndex,
-		dkgResult,
-		signatures,
-	)
+	return errNotImplemented
 }
 
 // TODO: Implement a real OnDKGResultSubmitted event subscription. The current
@@ -322,7 +320,7 @@ func (bc *BeaconChain) SubmitDKGResult(
 func (bc *BeaconChain) OnDKGResultSubmitted(
 	handler func(event *event.DKGResultSubmission),
 ) subscription.EventSubscription {
-	return bc.mockRandomBeacon.OnDKGResultSubmitted(handler)
+	return subscription.NewEventSubscription(func() {})
 }
 
 // CalculateDKGResultHash calculates Keccak-256 hash of the DKG result. Operation
@@ -390,338 +388,44 @@ func (bc *BeaconChain) IsRecognized(operatorPublicKey *operator.PublicKey) (bool
 func (bc *BeaconChain) SubmitRelayEntry(
 	entry []byte,
 ) error {
-	return bc.mockRandomBeacon.SubmitRelayEntry(entry)
+	return errNotImplemented
 }
 
 // TODO: Implement a real OnRelayEntrySubmitted function.
 func (bc *BeaconChain) OnRelayEntrySubmitted(
 	handler func(entry *event.RelayEntrySubmitted),
 ) subscription.EventSubscription {
-	return bc.mockRandomBeacon.OnRelayEntrySubmitted(handler)
+	return subscription.NewEventSubscription(func() {})
 }
 
 // TODO: Implement a real OnRelayEntryRequested function.
 func (bc *BeaconChain) OnRelayEntryRequested(
 	handler func(request *event.RelayEntryRequested),
 ) subscription.EventSubscription {
-	return bc.mockRandomBeacon.OnRelayEntryRequested(handler)
+	return subscription.NewEventSubscription(func() {})
 }
 
 // TODO: Implement a real ReportRelayEntryTimeout function.
 func (bc *BeaconChain) ReportRelayEntryTimeout() error {
-	return bc.mockRandomBeacon.ReportRelayEntryTimeout()
+	return errNotImplemented
 }
 
 // TODO: Implement a real IsEntryInProgress function.
 func (bc *BeaconChain) IsEntryInProgress() (bool, error) {
-	return bc.mockRandomBeacon.IsEntryInProgress()
+	return false, nil // no chain integration so not in progress
 }
 
 // TODO: Implement a real CurrentRequestStartBlock function.
 func (bc *BeaconChain) CurrentRequestStartBlock() (*big.Int, error) {
-	return bc.mockRandomBeacon.CurrentRequestStartBlock()
+	return nil, errNotImplemented
 }
 
 // TODO: Implement a real CurrentRequestPreviousEntry function.
 func (bc *BeaconChain) CurrentRequestPreviousEntry() ([]byte, error) {
-	return bc.mockRandomBeacon.CurrentRequestPreviousEntry()
+	return nil, errNotImplemented
 }
 
 // TODO: Implement a real CurrentRequestGroupPublicKey function.
 func (bc *BeaconChain) CurrentRequestGroupPublicKey() ([]byte, error) {
-	return bc.mockRandomBeacon.CurrentRequestGroupPublicKey()
-}
-
-// TODO: Temporary mock that simulates the behavior of the RandomBeacon
-// contract. Should be removed eventually.
-type mockRandomBeacon struct {
-	blockCounter chain.BlockCounter
-
-	dkgResultSubmissionHandlersMutex sync.Mutex
-	dkgResultSubmissionHandlers      map[int]func(submission *event.DKGResultSubmission)
-
-	currentDkgMutex      sync.RWMutex
-	currentDkgStartBlock *big.Int
-
-	activeGroupMutex         sync.RWMutex
-	activeGroup              []byte
-	activeGroupOperableBlock *big.Int
-
-	groupsMutex sync.RWMutex
-	groups      map[string]bool
-
-	currentRequestMutex         sync.RWMutex
-	currentRequestStartBlock    *big.Int
-	currentRequestPreviousEntry []byte
-	currentRequestGroup         []byte
-
-	relayEntrySubmissionHandlersMutex sync.Mutex
-	relayEntrySubmissionHandlers      map[int]func(submission *event.RelayEntrySubmitted)
-}
-
-func newMockRandomBeacon(blockCounter chain.BlockCounter) *mockRandomBeacon {
-	return &mockRandomBeacon{
-		blockCounter:                 blockCounter,
-		dkgResultSubmissionHandlers:  make(map[int]func(submission *event.DKGResultSubmission)),
-		groups:                       make(map[string]bool),
-		relayEntrySubmissionHandlers: make(map[int]func(submission *event.RelayEntrySubmitted)),
-	}
-}
-
-func (mrb *mockRandomBeacon) OnDKGStarted(
-	handler func(event *event.DKGStarted),
-) subscription.EventSubscription {
-	ctx, cancelCtx := context.WithCancel(context.Background())
-	blocksChan := mrb.blockCounter.WatchBlocks(ctx)
-
-	go func() {
-		for {
-			select {
-			case block := <-blocksChan:
-				// Generate an event every 500th block.
-				if block%500 == 0 {
-					mrb.currentDkgMutex.Lock()
-					// The seed is keccak256(block).
-					blockBytes := make([]byte, 8)
-					binary.BigEndian.PutUint64(blockBytes, block)
-					seedBytes := crypto.Keccak256(blockBytes)
-					seed := new(big.Int).SetBytes(seedBytes)
-
-					mrb.currentDkgStartBlock = big.NewInt(int64(block))
-
-					go handler(&event.DKGStarted{
-						Seed:        seed,
-						BlockNumber: block,
-					})
-					mrb.currentDkgMutex.Unlock()
-				}
-			case <-ctx.Done():
-				return
-			}
-		}
-	}()
-
-	return subscription.NewEventSubscription(func() {
-		cancelCtx()
-	})
-}
-
-func (mrb *mockRandomBeacon) OnDKGResultSubmitted(
-	handler func(event *event.DKGResultSubmission),
-) subscription.EventSubscription {
-	mrb.dkgResultSubmissionHandlersMutex.Lock()
-	defer mrb.dkgResultSubmissionHandlersMutex.Unlock()
-
-	// #nosec G404 (insecure random number source (rand))
-	// Temporary test implementation doesn't require secure randomness.
-	handlerID := rand.Int()
-
-	mrb.dkgResultSubmissionHandlers[handlerID] = handler
-
-	return subscription.NewEventSubscription(func() {
-		mrb.dkgResultSubmissionHandlersMutex.Lock()
-		defer mrb.dkgResultSubmissionHandlersMutex.Unlock()
-
-		delete(mrb.dkgResultSubmissionHandlers, handlerID)
-	})
-}
-
-func (mrb *mockRandomBeacon) SubmitDKGResult(
-	participantIndex beaconchain.GroupMemberIndex,
-	dkgResult *beaconchain.DKGResult,
-	signatures map[beaconchain.GroupMemberIndex][]byte,
-) error {
-	mrb.dkgResultSubmissionHandlersMutex.Lock()
-	defer mrb.dkgResultSubmissionHandlersMutex.Unlock()
-
-	mrb.currentDkgMutex.Lock()
-	defer mrb.currentDkgMutex.Unlock()
-
-	mrb.activeGroupMutex.Lock()
-	defer mrb.activeGroupMutex.Unlock()
-
-	mrb.groupsMutex.Lock()
-	defer mrb.groupsMutex.Unlock()
-
-	// Abort if there is no DKG in progress. This check is needed to handle a
-	// situation in which two operators of the same client attempt to submit
-	// the DKG result.
-	if mrb.currentDkgStartBlock == nil {
-		return nil
-	}
-
-	blockNumber, err := mrb.blockCounter.CurrentBlock()
-	if err != nil {
-		return fmt.Errorf("failed to get the current block")
-	}
-
-	for _, handler := range mrb.dkgResultSubmissionHandlers {
-		go func(handler func(*event.DKGResultSubmission)) {
-			handler(&event.DKGResultSubmission{
-				MemberIndex:    uint32(participantIndex),
-				GroupPublicKey: dkgResult.GroupPublicKey,
-				Misbehaved:     dkgResult.Misbehaved,
-				BlockNumber:    blockNumber,
-			})
-		}(handler)
-	}
-
-	mrb.activeGroup = dkgResult.GroupPublicKey
-	mrb.activeGroupOperableBlock = new(big.Int).Add(
-		mrb.currentDkgStartBlock,
-		big.NewInt(150),
-	)
-	mrb.currentDkgStartBlock = nil
-	mrb.groups[hex.EncodeToString(dkgResult.GroupPublicKey)] = true
-
-	return nil
-}
-
-func (mrb *mockRandomBeacon) OnRelayEntryRequested(
-	handler func(request *event.RelayEntryRequested),
-) subscription.EventSubscription {
-	ctx, cancelCtx := context.WithCancel(context.Background())
-	blocksChan := mrb.blockCounter.WatchBlocks(ctx)
-
-	go func() {
-		for {
-			select {
-			case block := <-blocksChan:
-				// Generate an event every 50 block, if there is no other
-				// request in progress.
-				if block%50 == 0 {
-					mrb.activeGroupMutex.RLock()
-					mrb.currentRequestMutex.Lock()
-
-					if mrb.currentRequestStartBlock == nil && len(mrb.activeGroup) > 0 {
-						// If the active group is ready to receive the request.
-						if big.NewInt(int64(block)).Cmp(mrb.activeGroupOperableBlock) >= 0 {
-							blockBytes := make([]byte, 8)
-							binary.BigEndian.PutUint64(blockBytes, block)
-							blockHashBytes := crypto.Keccak256(blockBytes)
-							blockHash := new(big.Int).SetBytes(blockHashBytes)
-							previousEntry := new(bn256.G1).ScalarBaseMult(blockHash)
-
-							mrb.currentRequestStartBlock = big.NewInt(int64(block))
-							mrb.currentRequestPreviousEntry = previousEntry.Marshal()
-							mrb.currentRequestGroup = mrb.activeGroup
-
-							go handler(&event.RelayEntryRequested{
-								PreviousEntry:  mrb.currentRequestPreviousEntry,
-								GroupPublicKey: mrb.currentRequestGroup,
-								BlockNumber:    mrb.currentRequestStartBlock.Uint64(),
-							})
-						}
-					}
-
-					mrb.currentRequestMutex.Unlock()
-					mrb.activeGroupMutex.RUnlock()
-				}
-			case <-ctx.Done():
-				return
-			}
-		}
-	}()
-
-	return subscription.NewEventSubscription(func() {
-		cancelCtx()
-	})
-}
-
-func (mrb *mockRandomBeacon) OnRelayEntrySubmitted(
-	handler func(entry *event.RelayEntrySubmitted),
-) subscription.EventSubscription {
-	mrb.relayEntrySubmissionHandlersMutex.Lock()
-	defer mrb.relayEntrySubmissionHandlersMutex.Unlock()
-
-	// #nosec G404 (insecure random number source (rand))
-	// Temporary test implementation doesn't require secure randomness.
-	handlerID := rand.Int()
-
-	mrb.relayEntrySubmissionHandlers[handlerID] = handler
-
-	return subscription.NewEventSubscription(func() {
-		mrb.relayEntrySubmissionHandlersMutex.Lock()
-		defer mrb.relayEntrySubmissionHandlersMutex.Unlock()
-
-		delete(mrb.relayEntrySubmissionHandlers, handlerID)
-	})
-}
-
-func (mrb *mockRandomBeacon) SubmitRelayEntry(
-	entry []byte,
-) error {
-	mrb.relayEntrySubmissionHandlersMutex.Lock()
-	defer mrb.relayEntrySubmissionHandlersMutex.Unlock()
-
-	mrb.currentRequestMutex.Lock()
-	defer mrb.currentRequestMutex.Unlock()
-
-	// Abort if there is no request in progress.
-	if mrb.currentRequestStartBlock == nil {
-		return nil
-	}
-
-	blockNumber, err := mrb.blockCounter.CurrentBlock()
-	if err != nil {
-		return fmt.Errorf("failed to get block counter: [%v]", err)
-	}
-
-	for _, handler := range mrb.relayEntrySubmissionHandlers {
-		go func(handler func(submitted *event.RelayEntrySubmitted)) {
-			handler(&event.RelayEntrySubmitted{BlockNumber: blockNumber})
-		}(handler)
-	}
-
-	mrb.currentRequestStartBlock = nil
-	mrb.currentRequestPreviousEntry = nil
-	mrb.currentRequestGroup = nil
-
-	return nil
-}
-
-func (mrb *mockRandomBeacon) ReportRelayEntryTimeout() error {
-	mrb.currentRequestMutex.Lock()
-	defer mrb.currentRequestMutex.Unlock()
-
-	// Set the current request start block to nil, so that a new relay entry
-	// request can begin.
-	mrb.currentRequestStartBlock = nil
-
-	return nil
-}
-
-func (mrb *mockRandomBeacon) IsEntryInProgress() (bool, error) {
-	mrb.currentRequestMutex.RLock()
-	defer mrb.currentRequestMutex.RUnlock()
-
-	return mrb.currentRequestStartBlock != nil, nil
-}
-
-func (mrb *mockRandomBeacon) CurrentRequestStartBlock() (*big.Int, error) {
-	mrb.currentRequestMutex.RLock()
-	defer mrb.currentRequestMutex.RUnlock()
-
-	return mrb.currentRequestStartBlock, nil
-}
-
-func (mrb *mockRandomBeacon) CurrentRequestPreviousEntry() ([]byte, error) {
-	mrb.currentRequestMutex.RLock()
-	defer mrb.currentRequestMutex.RUnlock()
-
-	return mrb.currentRequestPreviousEntry, nil
-}
-
-func (mrb *mockRandomBeacon) CurrentRequestGroupPublicKey() ([]byte, error) {
-	mrb.currentRequestMutex.RLock()
-	defer mrb.currentRequestMutex.RUnlock()
-
-	return mrb.currentRequestGroup, nil
-}
-
-func (mrb *mockRandomBeacon) IsGroupRegistered(groupPublicKey []byte) (bool, error) {
-	mrb.groupsMutex.RLock()
-	defer mrb.groupsMutex.RUnlock()
-
-	return mrb.groups[hex.EncodeToString(groupPublicKey)], nil
+	return nil, errNotImplemented
 }
diff --git a/pkg/chain/ethereum/tbtc.go b/pkg/chain/ethereum/tbtc.go
index 686ff411ab..9f99230656 100644
--- a/pkg/chain/ethereum/tbtc.go
+++ b/pkg/chain/ethereum/tbtc.go
@@ -2,18 +2,25 @@ package ethereum
 
 import (
 	"context"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/sha256"
 	"encoding/binary"
 	"fmt"
 	"math/big"
-	"math/rand"
-	"sync"
+	"reflect"
+	"sort"
 
+	"github.com/ethereum/go-ethereum/accounts/abi"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
 
 	"github.com/keep-network/keep-common/pkg/chain/ethereum"
 	"github.com/keep-network/keep-core/pkg/chain"
-	"github.com/keep-network/keep-core/pkg/chain/ethereum/ecdsa/gen/contract"
+	ecdsaabi "github.com/keep-network/keep-core/pkg/chain/ethereum/ecdsa/gen/abi"
+	ecdsacontract "github.com/keep-network/keep-core/pkg/chain/ethereum/ecdsa/gen/contract"
+	tbtccontract "github.com/keep-network/keep-core/pkg/chain/ethereum/tbtc/gen/contract"
+	"github.com/keep-network/keep-core/pkg/internal/byteutils"
 	"github.com/keep-network/keep-core/pkg/operator"
 	"github.com/keep-network/keep-core/pkg/protocol/group"
 	"github.com/keep-network/keep-core/pkg/subscription"
@@ -23,6 +30,8 @@ import (
 
 // Definitions of contract names.
 const (
+	// TODO: The WalletRegistry address is taken from the Bridge contract.
+	//       Remove the possibility of passing it through the config.
 	WalletRegistryContractName = "WalletRegistry"
 	BridgeContractName         = "Bridge"
 )
@@ -31,10 +40,9 @@ const (
 type TbtcChain struct {
 	*baseChain
 
-	walletRegistry *contract.WalletRegistry
-
-	mockWalletRegistry *mockWalletRegistry
-	sortitionPool      *contract.EcdsaSortitionPool
+	bridge         *tbtccontract.Bridge
+	walletRegistry *ecdsacontract.WalletRegistry
+	sortitionPool  *ecdsacontract.EcdsaSortitionPool
 }
 
 // NewTbtcChain construct a new instance of the TBTC-specific Ethereum
@@ -43,24 +51,45 @@ func newTbtcChain(
 	config ethereum.Config,
 	baseChain *baseChain,
 ) (*TbtcChain, error) {
-	// FIXME: Use `WalletRegistryContractName` instead of `RandomBeaconContractName`.
-	// DKG for the WalletRegistry depends on the RandomBeacon group creation.
-	// Currently the client doesn't publish a generated group to the chain
-	// as it works against a mocked chain implementation. Without a Beacon group
-	// published to the chain, the WalletRegistry's DKG cannot start. As a workaround
-	// for the first stage of the Chaosnet we use the RandomBeacon's address,
-	// as the client only wants to get to the sortition pool to select a group.
-	walletRegistryAddress, err := config.ContractAddress(RandomBeaconContractName)
+	bridgeAddress, err := config.ContractAddress(BridgeContractName)
 	if err != nil {
 		return nil, fmt.Errorf(
 			"failed to resolve %s contract address: [%v]",
-			WalletRegistryContractName,
+			BridgeContractName,
+			err,
+		)
+	}
+
+	bridge, err :=
+		tbtccontract.NewBridge(
+			bridgeAddress,
+			baseChain.chainID,
+			baseChain.key,
+			baseChain.client,
+			baseChain.nonceManager,
+			baseChain.miningWaiter,
+			baseChain.blockCounter,
+			baseChain.transactionMutex,
+		)
+	if err != nil {
+		return nil, fmt.Errorf(
+			"failed to attach to Bridge contract: [%v]",
 			err,
 		)
 	}
 
+	references, err := bridge.ContractReferences()
+	if err != nil {
+		return nil, fmt.Errorf(
+			"failed to get contract references from Bridge: [%v]",
+			err,
+		)
+	}
+
+	walletRegistryAddress := references.EcdsaWalletRegistry
+
 	walletRegistry, err :=
-		contract.NewWalletRegistry(
+		ecdsacontract.NewWalletRegistry(
 			walletRegistryAddress,
 			baseChain.chainID,
 			baseChain.key,
@@ -86,7 +115,7 @@ func newTbtcChain(
 	}
 
 	sortitionPool, err :=
-		contract.NewEcdsaSortitionPool(
+		ecdsacontract.NewEcdsaSortitionPool(
 			sortitionPoolAddress,
 			baseChain.chainID,
 			baseChain.key,
@@ -104,26 +133,13 @@ func newTbtcChain(
 	}
 
 	return &TbtcChain{
-		baseChain:          baseChain,
-		walletRegistry:     walletRegistry,
-		mockWalletRegistry: newMockWalletRegistry(baseChain.blockCounter),
-		sortitionPool:      sortitionPool,
+		baseChain:      baseChain,
+		bridge:         bridge,
+		walletRegistry: walletRegistry,
+		sortitionPool:  sortitionPool,
 	}, nil
 }
 
-// GetConfig returns the expected configuration of the TBTC module.
-func (tc *TbtcChain) GetConfig() *tbtc.ChainConfig {
-	groupSize := 100
-	groupQuorum := 90
-	honestThreshold := 51
-
-	return &tbtc.ChainConfig{
-		GroupSize:       groupSize,
-		GroupQuorum:     groupQuorum,
-		HonestThreshold: honestThreshold,
-	}
-}
-
 // Staking returns address of the TokenStaking contract the WalletRegistry is
 // connected to.
 func (tc *TbtcChain) Staking() (chain.Address, error) {
@@ -293,16 +309,21 @@ func (tc *TbtcChain) IsBetaOperator() (bool, error) {
 	return tc.sortitionPool.IsBetaOperator(tc.key.Address)
 }
 
-// SelectGroup returns the group members for the group generated by
-// the given seed. This function can return an error if the beacon chain's
-// state does not allow for group selection at the moment.
-func (tc *TbtcChain) SelectGroup(seed *big.Int) (chain.Addresses, error) {
-	groupSize := big.NewInt(int64(tc.GetConfig().GroupSize))
-	seedBytes := [32]byte{}
-	seed.FillBytes(seedBytes[:])
+// GetOperatorID returns the ID number of the given operator address. An ID
+// number of 0 means the operator has not been allocated an ID number yet.
+func (tc *TbtcChain) GetOperatorID(
+	operatorAddress chain.Address,
+) (chain.OperatorID, error) {
+	return tc.sortitionPool.GetOperatorID(
+		common.HexToAddress(operatorAddress.String()),
+	)
+}
 
-	// TODO: Replace with a call to the WalletRegistry.selectGroup function.
-	operatorsIDs, err := tc.sortitionPool.SelectGroup(groupSize, seedBytes)
+// SelectGroup returns the group members selected for the current group
+// selection. The function returns an error if the chain's state does not allow
+// for group selection at the moment.
+func (tc *TbtcChain) SelectGroup() (*tbtc.GroupSelectionResult, error) {
+	operatorsIDs, err := tc.walletRegistry.SelectGroup()
 	if err != nil {
 		return nil, fmt.Errorf(
 			"cannot select group in the sortition pool: [%v]",
@@ -318,266 +339,564 @@ func (tc *TbtcChain) SelectGroup(seed *big.Int) (chain.Addresses, error) {
 		)
 	}
 
-	result := make([]chain.Address, len(operatorsAddresses))
-	for i := range result {
-		result[i] = chain.Address(operatorsAddresses[i].String())
+	// Should not happen as this is guaranteed by the contract but, just in case.
+	if len(operatorsIDs) != len(operatorsAddresses) {
+		return nil, fmt.Errorf("operators IDs and addresses mismatch")
+	}
+
+	ids := make([]chain.OperatorID, len(operatorsIDs))
+	addresses := make([]chain.Address, len(operatorsIDs))
+	for i := range ids {
+		ids[i] = operatorsIDs[i]
+		addresses[i] = chain.Address(operatorsAddresses[i].String())
 	}
 
-	return result, nil
+	return &tbtc.GroupSelectionResult{
+		OperatorsIDs:       ids,
+		OperatorsAddresses: addresses,
+	}, nil
 }
 
-// TODO: Implement a real OnDKGStarted function.
 func (tc *TbtcChain) OnDKGStarted(
 	handler func(event *tbtc.DKGStartedEvent),
 ) subscription.EventSubscription {
-	return tc.mockWalletRegistry.OnDKGStarted(handler)
+	onEvent := func(
+		seed *big.Int,
+		blockNumber uint64,
+	) {
+		handler(&tbtc.DKGStartedEvent{
+			Seed:        seed,
+			BlockNumber: blockNumber,
+		})
+	}
+
+	return tc.walletRegistry.DkgStartedEvent(nil, nil).OnEvent(onEvent)
 }
 
-// TODO: Implement a real OnDKGResultSubmitted event subscription. The current
-// implementation just pipes the DKG submission event generated within
-// SubmitDKGResult to the handlers registered in the
-// dkgResultSubmissionHandlers map.
 func (tc *TbtcChain) OnDKGResultSubmitted(
 	handler func(event *tbtc.DKGResultSubmittedEvent),
 ) subscription.EventSubscription {
-	return tc.mockWalletRegistry.OnDKGResultSubmitted(handler)
+	onEvent := func(
+		resultHash [32]byte,
+		seed *big.Int,
+		result ecdsaabi.EcdsaDkgResult,
+		blockNumber uint64,
+	) {
+		tbtcResult, err := convertDkgResultFromAbiType(result)
+		if err != nil {
+			logger.Errorf(
+				"unexpected DKG result in DKGResultSubmitted event: [%v]",
+				err,
+			)
+			return
+		}
+
+		handler(&tbtc.DKGResultSubmittedEvent{
+			Seed:        seed,
+			ResultHash:  resultHash,
+			Result:      tbtcResult,
+			BlockNumber: blockNumber,
+		})
+	}
+
+	return tc.walletRegistry.
+		DkgResultSubmittedEvent(nil, nil, nil).
+		OnEvent(onEvent)
 }
 
-// TODO: Implement a real SubmitDKGResult action. The current implementation
-// just creates and pipes the DKG submission event to the handlers
-// registered in the dkgResultSubmissionHandlers map.
-func (tc *TbtcChain) SubmitDKGResult(
-	memberIndex group.MemberIndex,
-	result *dkg.Result,
-	signatures map[group.MemberIndex][]byte,
-) error {
-	return tc.mockWalletRegistry.SubmitDKGResult(
-		memberIndex,
-		result,
-		signatures,
+// convertDkgResultFromAbiType converts the WalletRegistry-specific DKG
+// result to the format applicable for the TBTC application.
+func convertDkgResultFromAbiType(
+	result ecdsaabi.EcdsaDkgResult,
+) (*tbtc.DKGChainResult, error) {
+	if err := validateMemberIndex(result.SubmitterMemberIndex); err != nil {
+		return nil, fmt.Errorf(
+			"unexpected submitter member index: [%v]",
+			err,
+		)
+	}
+
+	signingMembersIndexes := make(
+		[]group.MemberIndex,
+		len(result.SigningMembersIndices),
 	)
+	for i, memberIndex := range result.SigningMembersIndices {
+		if err := validateMemberIndex(memberIndex); err != nil {
+			return nil, fmt.Errorf(
+				"unexpected signing member index: [%v]",
+				err,
+			)
+		}
+
+		signingMembersIndexes[i] = group.MemberIndex(memberIndex.Uint64())
+	}
+
+	return &tbtc.DKGChainResult{
+		SubmitterMemberIndex:     group.MemberIndex(result.SubmitterMemberIndex.Uint64()),
+		GroupPublicKey:           result.GroupPubKey,
+		MisbehavedMembersIndexes: result.MisbehavedMembersIndices,
+		Signatures:               result.Signatures,
+		SigningMembersIndexes:    signingMembersIndexes,
+		Members:                  result.Members,
+		MembersHash:              result.MembersHash,
+	}, nil
 }
 
-// TODO: Implement a real GetDKGState function.
-func (tc *TbtcChain) GetDKGState() (tbtc.DKGState, error) {
-	return tc.mockWalletRegistry.GetDKGState()
+// convertDkgResultToAbiType converts the TBTC-specific DKG result to
+// the format applicable for the WalletRegistry ABI.
+func convertDkgResultToAbiType(
+	result *tbtc.DKGChainResult,
+) ecdsaabi.EcdsaDkgResult {
+	signingMembersIndices := make([]*big.Int, len(result.SigningMembersIndexes))
+	for i, memberIndex := range result.SigningMembersIndexes {
+		signingMembersIndices[i] = big.NewInt(int64(memberIndex))
+	}
+
+	return ecdsaabi.EcdsaDkgResult{
+		SubmitterMemberIndex:     big.NewInt(int64(result.SubmitterMemberIndex)),
+		GroupPubKey:              result.GroupPublicKey,
+		MisbehavedMembersIndices: result.MisbehavedMembersIndexes,
+		Signatures:               result.Signatures,
+		SigningMembersIndices:    signingMembersIndices,
+		Members:                  result.Members,
+		MembersHash:              result.MembersHash,
+	}
 }
 
-// CalculateDKGResultHash calculates Keccak-256 hash of the DKG result. Operation
-// is performed off-chain.
-//
-// It first encodes the result using solidity ABI and then calculates Keccak-256
-// hash over it. This corresponds to the DKG result hash calculation on-chain.
-// Hashes calculated off-chain and on-chain must always match.
-func (tc *TbtcChain) CalculateDKGResultHash(
-	result *dkg.Result,
-) (dkg.ResultHash, error) {
-	groupPublicKeyBytes, err := result.GroupPublicKeyBytes()
-	if err != nil {
-		return dkg.ResultHash{}, err
+func validateMemberIndex(chainMemberIndex *big.Int) error {
+	maxMemberIndex := big.NewInt(group.MaxMemberIndex)
+	if chainMemberIndex.Cmp(maxMemberIndex) > 0 {
+		return fmt.Errorf("invalid member index value: [%v]", chainMemberIndex)
 	}
 
-	// Encode DKG result to the format matched with Solidity keccak256(abi.encodePacked(...))
-	// TODO: Adjust the message structure to the format needed by the wallet
-	//       registry contract:
-	//       \x19Ethereum signed message:\n${keccak256(groupPubKey,misbehavedIndices,startBlock)}
-	hash := crypto.Keccak256(groupPublicKeyBytes, result.MisbehavedMembersIndexes())
-	return dkg.ResultHashFromBytes(hash)
+	return nil
 }
 
-// TODO: This is a temporary function that should be removed once the client
-// is integrated with real on-chain contracts.
-func (tc *TbtcChain) OnSignatureRequested(
-	handler func(event *tbtc.SignatureRequestedEvent),
+func (tc *TbtcChain) OnDKGResultChallenged(
+	handler func(event *tbtc.DKGResultChallengedEvent),
 ) subscription.EventSubscription {
-	return tc.mockWalletRegistry.OnSignatureRequested(handler)
+	onEvent := func(
+		resultHash [32]byte,
+		challenger common.Address,
+		reason string,
+		blockNumber uint64,
+	) {
+		handler(&tbtc.DKGResultChallengedEvent{
+			ResultHash:  resultHash,
+			Challenger:  chain.Address(challenger.Hex()),
+			Reason:      reason,
+			BlockNumber: blockNumber,
+		})
+	}
+
+	return tc.walletRegistry.
+		DkgResultChallengedEvent(nil, nil, nil).
+		OnEvent(onEvent)
 }
 
-// TODO: Temporary mock that simulates the behavior of the WalletRegistry
-//
-//	contract. Should be removed eventually.
-type mockWalletRegistry struct {
-	blockCounter chain.BlockCounter
+func (tc *TbtcChain) OnDKGResultApproved(
+	handler func(event *tbtc.DKGResultApprovedEvent),
+) subscription.EventSubscription {
+	onEvent := func(
+		resultHash [32]byte,
+		approver common.Address,
+		blockNumber uint64,
+	) {
+		handler(&tbtc.DKGResultApprovedEvent{
+			ResultHash:  resultHash,
+			Approver:    chain.Address(approver.Hex()),
+			BlockNumber: blockNumber,
+		})
+	}
+
+	return tc.walletRegistry.
+		DkgResultApprovedEvent(nil, nil, nil).
+		OnEvent(onEvent)
+}
 
-	dkgResultSubmissionHandlersMutex sync.Mutex
-	dkgResultSubmissionHandlers      map[int]func(submission *tbtc.DKGResultSubmittedEvent)
+// AssembleDKGResult assembles the DKG chain result according to the rules
+// expected by the given chain.
+func (tc *TbtcChain) AssembleDKGResult(
+	submitterMemberIndex group.MemberIndex,
+	groupPublicKey *ecdsa.PublicKey,
+	operatingMembersIndexes []group.MemberIndex,
+	misbehavedMembersIndexes []group.MemberIndex,
+	signatures map[group.MemberIndex][]byte,
+	groupSelectionResult *tbtc.GroupSelectionResult,
+) (*tbtc.DKGChainResult, error) {
+	serializedGroupPublicKey, err := convertPubKeyToChainFormat(groupPublicKey)
+	if err != nil {
+		return nil, fmt.Errorf(
+			"could not convert group public key to chain format: [%v]",
+			err,
+		)
+	}
 
-	currentDkgMutex      sync.RWMutex
-	currentDkgStartBlock *big.Int
+	// Sort misbehavedMembersIndexes slice in ascending order as expected
+	// by the on-chain contract.
+	sort.Slice(misbehavedMembersIndexes[:], func(i, j int) bool {
+		return misbehavedMembersIndexes[i] < misbehavedMembersIndexes[j]
+	})
 
-	activeWalletMutex         sync.RWMutex
-	activeWallet              []byte
-	activeWalletOperableBlock *big.Int
+	signingMemberIndices, signatureBytes, err := convertSignaturesToChainFormat(
+		signatures,
+	)
+	if err != nil {
+		return nil, fmt.Errorf(
+			"could not convert signatures to chain format: [%v]",
+			err,
+		)
+	}
+
+	// Sort operatingOperatorsIDs slice in ascending order as the slice
+	// holding the operators IDs used to compute the members hash is
+	// expected to be sorted in the same way.
+	sort.Slice(operatingMembersIndexes[:], func(i, j int) bool {
+		return operatingMembersIndexes[i] < operatingMembersIndexes[j]
+	})
+
+	operatingOperatorsIDs := make([]chain.OperatorID, len(operatingMembersIndexes))
+	for i, operatingMemberIndex := range operatingMembersIndexes {
+		operatingOperatorsIDs[i] =
+			groupSelectionResult.OperatorsIDs[operatingMemberIndex-1]
+	}
+
+	membersHash, err := computeOperatorsIDsHash(operatingOperatorsIDs)
+	if err != nil {
+		return nil, fmt.Errorf("could not compute members hash: [%v]", err)
+	}
+
+	return &tbtc.DKGChainResult{
+		SubmitterMemberIndex:     submitterMemberIndex,
+		GroupPublicKey:           serializedGroupPublicKey[:],
+		MisbehavedMembersIndexes: misbehavedMembersIndexes,
+		Signatures:               signatureBytes,
+		SigningMembersIndexes:    signingMemberIndices,
+		Members:                  groupSelectionResult.OperatorsIDs,
+		MembersHash:              membersHash,
+	}, nil
 }
 
-func newMockWalletRegistry(blockCounter chain.BlockCounter) *mockWalletRegistry {
-	return &mockWalletRegistry{
-		blockCounter: blockCounter,
-		dkgResultSubmissionHandlers: make(
-			map[int]func(submission *tbtc.DKGResultSubmittedEvent),
-		),
+func (tc *TbtcChain) SubmitDKGResult(
+	dkgResult *tbtc.DKGChainResult,
+) error {
+	_, err := tc.walletRegistry.SubmitDkgResult(
+		convertDkgResultToAbiType(dkgResult),
+	)
+
+	return err
+}
+
+// computeOperatorsIDsHash computes the keccak256 hash for the given list
+// of operators IDs.
+func computeOperatorsIDsHash(operatorsIDs chain.OperatorIDs) ([32]byte, error) {
+	uint32SliceType, err := abi.NewType("uint32[]", "uint32[]", nil)
+	if err != nil {
+		return [32]byte{}, err
 	}
+
+	bytes, err := abi.Arguments{{Type: uint32SliceType}}.Pack(operatorsIDs)
+	if err != nil {
+		return [32]byte{}, err
+	}
+
+	return crypto.Keccak256Hash(bytes), nil
 }
 
-func (mwr *mockWalletRegistry) OnDKGStarted(
-	handler func(event *tbtc.DKGStartedEvent),
-) subscription.EventSubscription {
-	ctx, cancelCtx := context.WithCancel(context.Background())
-	blocksChan := mwr.blockCounter.WatchBlocks(ctx)
+// convertSignaturesToChainFormat converts signatures map to two slices. The
+// first slice contains indices of members from the map, sorted in ascending order
+// as required by the contract. The second slice is a slice of concatenated
+// signatures. Signatures and member indices are returned in the matching order.
+// It requires each signature to be exactly 65-byte long.
+func convertSignaturesToChainFormat(
+	signatures map[group.MemberIndex][]byte,
+) ([]group.MemberIndex, []byte, error) {
+	membersIndexes := make([]group.MemberIndex, 0)
+	for memberIndex := range signatures {
+		membersIndexes = append(membersIndexes, memberIndex)
+	}
 
-	go func() {
-		for {
-			select {
-			case block := <-blocksChan:
-				// Generate an event every 1000th block starting from block 250.
-				// The shift is done in order to avoid overlapping with beacon
-				// DKG test loop.
-				shift := uint64(250)
-				if block >= shift && (block-shift)%1000 == 0 {
-					// The seed is keccak256(block).
-					blockBytes := make([]byte, 8)
-					binary.BigEndian.PutUint64(blockBytes, block)
-					seedBytes := crypto.Keccak256(blockBytes)
-					seed := new(big.Int).SetBytes(seedBytes)
-
-					mwr.currentDkgStartBlock = big.NewInt(int64(block))
-
-					go handler(&tbtc.DKGStartedEvent{
-						Seed:        seed,
-						BlockNumber: block,
-					})
-				}
-			case <-ctx.Done():
-				return
-			}
+	sort.Slice(membersIndexes, func(i, j int) bool {
+		return membersIndexes[i] < membersIndexes[j]
+	})
+
+	signatureSize := 65
+
+	var signaturesSlice []byte
+
+	for _, memberIndex := range membersIndexes {
+		signature := signatures[memberIndex]
+
+		if len(signature) != signatureSize {
+			return nil, nil, fmt.Errorf(
+				"invalid signature size for member [%v] got [%d] bytes but [%d] bytes required",
+				memberIndex,
+				len(signature),
+				signatureSize,
+			)
 		}
-	}()
 
-	return subscription.NewEventSubscription(func() {
-		cancelCtx()
-	})
+		signaturesSlice = append(signaturesSlice, signature...)
+	}
+
+	return membersIndexes, signaturesSlice, nil
 }
 
-func (mwr *mockWalletRegistry) OnDKGResultSubmitted(
-	handler func(event *tbtc.DKGResultSubmittedEvent),
-) subscription.EventSubscription {
-	mwr.dkgResultSubmissionHandlersMutex.Lock()
-	defer mwr.dkgResultSubmissionHandlersMutex.Unlock()
+// convertPubKeyToChainFormat takes X and Y coordinates of a signer's public key
+// and concatenates it to a 64-byte long array. If any of coordinates is shorter
+// than 32-byte it is preceded with zeros.
+func convertPubKeyToChainFormat(publicKey *ecdsa.PublicKey) ([64]byte, error) {
+	var serialized [64]byte
 
-	// #nosec G404 (insecure random number source (rand))
-	// Temporary test implementation doesn't require secure randomness.
-	handlerID := rand.Int()
+	x, err := byteutils.LeftPadTo32Bytes(publicKey.X.Bytes())
+	if err != nil {
+		return serialized, err
+	}
 
-	mwr.dkgResultSubmissionHandlers[handlerID] = handler
+	y, err := byteutils.LeftPadTo32Bytes(publicKey.Y.Bytes())
+	if err != nil {
+		return serialized, err
+	}
 
-	return subscription.NewEventSubscription(func() {
-		mwr.dkgResultSubmissionHandlersMutex.Lock()
-		defer mwr.dkgResultSubmissionHandlersMutex.Unlock()
+	serializedBytes := append(x, y...)
 
-		delete(mwr.dkgResultSubmissionHandlers, handlerID)
-	})
+	copy(serialized[:], serializedBytes)
+
+	return serialized, nil
 }
 
-func (mwr *mockWalletRegistry) SubmitDKGResult(
-	memberIndex group.MemberIndex,
-	result *dkg.Result,
-	signatures map[group.MemberIndex][]byte,
-) error {
-	mwr.dkgResultSubmissionHandlersMutex.Lock()
-	defer mwr.dkgResultSubmissionHandlersMutex.Unlock()
+func (tc *TbtcChain) GetDKGState() (tbtc.DKGState, error) {
+	walletCreationState, err := tc.walletRegistry.GetWalletCreationState()
+	if err != nil {
+		return 0, err
+	}
 
-	mwr.currentDkgMutex.Lock()
-	defer mwr.currentDkgMutex.Unlock()
+	var state tbtc.DKGState
+
+	switch walletCreationState {
+	case 0:
+		state = tbtc.Idle
+	case 1:
+		state = tbtc.AwaitingSeed
+	case 2:
+		state = tbtc.AwaitingResult
+	case 3:
+		state = tbtc.Challenge
+	default:
+		err = fmt.Errorf(
+			"unexpected wallet creation state: [%v]",
+			walletCreationState,
+		)
+	}
 
-	mwr.activeWalletMutex.Lock()
-	defer mwr.activeWalletMutex.Unlock()
+	return state, err
+}
+
+// CalculateDKGResultSignatureHash calculates a 32-byte hash that is used
+// to produce a signature supporting the given groupPublicKey computed
+// as result of the given DKG process. The misbehavedMembersIndexes parameter
+// should contain indexes of members that were considered as misbehaved
+// during the DKG process. The startBlock argument is the block at which
+// the given DKG process started.
+func (tc *TbtcChain) CalculateDKGResultSignatureHash(
+	groupPublicKey *ecdsa.PublicKey,
+	misbehavedMembersIndexes []group.MemberIndex,
+	startBlock uint64,
+) (dkg.ResultSignatureHash, error) {
+	groupPublicKeyBytes := elliptic.Marshal(
+		groupPublicKey.Curve,
+		groupPublicKey.X,
+		groupPublicKey.Y,
+	)
+	// Crop the 04 prefix as the calculateDKGResultSignatureHash function
+	// expects an unprefixed 64-byte public key,
+	unprefixedGroupPublicKeyBytes := groupPublicKeyBytes[1:]
+
+	// Sort misbehavedMembersIndexes slice in ascending order as expected
+	// by the calculateDKGResultSignatureHash function.
+	sort.Slice(misbehavedMembersIndexes[:], func(i, j int) bool {
+		return misbehavedMembersIndexes[i] < misbehavedMembersIndexes[j]
+	})
+
+	return calculateDKGResultSignatureHash(
+		tc.chainID,
+		unprefixedGroupPublicKeyBytes,
+		misbehavedMembersIndexes,
+		big.NewInt(int64(startBlock)),
+	)
+}
 
-	// Abort if there is no DKG in progress. This check is needed to handle a
-	// situation in which two operators of the same client attempt to submit
-	// the DKG result.
-	if mwr.currentDkgStartBlock == nil {
-		return nil
+// calculateDKGResultSignatureHash computes the keccak256 hash for the given DKG
+// result parameters. It expects that the groupPublicKey is a 64-byte uncompressed
+// public key without the 04 prefix and misbehavedMembersIndexes slice is
+// sorted in ascending order. Those expectations are forced by the contract.
+func calculateDKGResultSignatureHash(
+	chainID *big.Int,
+	groupPublicKey []byte,
+	misbehavedMembersIndexes []group.MemberIndex,
+	startBlock *big.Int,
+) (dkg.ResultSignatureHash, error) {
+	publicKeySize := 64
+
+	if len(groupPublicKey) != publicKeySize {
+		return dkg.ResultSignatureHash{}, fmt.Errorf(
+			"wrong group public key length",
+		)
 	}
 
-	blockNumber, err := mwr.blockCounter.CurrentBlock()
+	uint256Type, err := abi.NewType("uint256", "uint256", nil)
+	if err != nil {
+		return dkg.ResultSignatureHash{}, err
+	}
+	bytesType, err := abi.NewType("bytes", "bytes", nil)
+	if err != nil {
+		return dkg.ResultSignatureHash{}, err
+	}
+	uint8SliceType, err := abi.NewType("uint8[]", "uint8[]", nil)
 	if err != nil {
-		return fmt.Errorf("failed to get the current block")
+		return dkg.ResultSignatureHash{}, err
 	}
 
-	groupPublicKeyBytes, err := result.GroupPublicKeyBytes()
+	bytes, err := abi.Arguments{
+		{Type: uint256Type},
+		{Type: bytesType},
+		{Type: uint8SliceType},
+		{Type: uint256Type},
+	}.Pack(
+		chainID,
+		groupPublicKey,
+		misbehavedMembersIndexes,
+		startBlock,
+	)
 	if err != nil {
-		return fmt.Errorf(
-			"failed to extract group public key bytes from the result [%v]",
-			err,
-		)
+		return dkg.ResultSignatureHash{}, err
 	}
 
-	for _, handler := range mwr.dkgResultSubmissionHandlers {
-		go func(handler func(*tbtc.DKGResultSubmittedEvent)) {
-			handler(&tbtc.DKGResultSubmittedEvent{
-				MemberIndex:         uint32(memberIndex),
-				GroupPublicKeyBytes: groupPublicKeyBytes,
-				Misbehaved:          result.MisbehavedMembersIndexes(),
-				BlockNumber:         blockNumber,
-			})
-		}(handler)
-	}
-
-	mwr.activeWallet = groupPublicKeyBytes
-	mwr.activeWalletOperableBlock = new(big.Int).Add(
-		mwr.currentDkgStartBlock,
-		// We add an arbitrary value that must cover the protocol duration
-		// and some additional time for all clients to submit the DKG
-		// result to their own internal mocked chain. This value is bigger than
-		// the value used in beacon as the tECDSA DKG takes more blocks.
-		big.NewInt(200),
+	return dkg.ResultSignatureHash(crypto.Keccak256Hash(bytes)), nil
+}
+
+func (tc *TbtcChain) IsDKGResultValid(
+	dkgResult *tbtc.DKGChainResult,
+) (bool, error) {
+	outcome, err := tc.walletRegistry.IsDkgResultValid(
+		convertDkgResultToAbiType(dkgResult),
 	)
-	mwr.currentDkgStartBlock = nil
+	if err != nil {
+		return false, fmt.Errorf("cannot check result validity: [%v]", err)
+	}
 
-	return nil
+	return parseDkgResultValidationOutcome(&outcome)
+}
+
+// parseDkgResultValidationOutcome parses the DKG validation outcome and returns
+// a boolean indicating whether the result is valid or not. The outcome parameter
+// must be a pointer to a struct containing a boolean flag as the first field.
+//
+// TODO: Find a better way to get the validity flag. This would require
+//       changes in the contracts binding generator.
+func parseDkgResultValidationOutcome(
+	outcome interface{},
+) (bool, error) {
+	value := reflect.ValueOf(outcome)
+	switch value.Kind() {
+	case reflect.Pointer:
+	default:
+		return false, fmt.Errorf("result validation outcome is not a pointer")
+	}
+
+	field := value.Elem().Field(0)
+	switch field.Kind() {
+	case reflect.Bool:
+		return field.Bool(), nil
+	default:
+		return false, fmt.Errorf("cannot parse result validation outcome")
+	}
+}
+
+func (tc *TbtcChain) ChallengeDKGResult(dkgResult *tbtc.DKGChainResult) error {
+	_, err := tc.walletRegistry.ChallengeDkgResult(
+		convertDkgResultToAbiType(dkgResult),
+	)
+
+	return err
+}
+
+func (tc *TbtcChain) ApproveDKGResult(dkgResult *tbtc.DKGChainResult) error {
+	_, err := tc.walletRegistry.ApproveDkgResult(
+		convertDkgResultToAbiType(dkgResult),
+	)
+
+	return err
+}
+
+func (tc *TbtcChain) DKGParameters() (*tbtc.DKGParameters, error) {
+	parameters, err := tc.walletRegistry.DkgParameters()
+	if err != nil {
+		return nil, err
+	}
+
+	return &tbtc.DKGParameters{
+		SubmissionTimeoutBlocks:       parameters.ResultSubmissionTimeout.Uint64(),
+		ChallengePeriodBlocks:         parameters.ResultChallengePeriodLength.Uint64(),
+		ApprovePrecedencePeriodBlocks: parameters.SubmitterPrecedencePeriodLength.Uint64(),
+	}, nil
 }
 
-func (mwr *mockWalletRegistry) OnSignatureRequested(
-	handler func(event *tbtc.SignatureRequestedEvent),
+// OnHeartbeatRequested runs a heartbeat loop that produces a heartbeat
+// request every ~8 hours. A single heartbeat request consists of 5 messages
+// that must be signed sequentially.
+func (tc *TbtcChain) OnHeartbeatRequested(
+	handler func(event *tbtc.HeartbeatRequestedEvent),
 ) subscription.EventSubscription {
 	ctx, cancelCtx := context.WithCancel(context.Background())
-	blocksChan := mwr.blockCounter.WatchBlocks(ctx)
+	blocksChan := tc.blockCounter.WatchBlocks(ctx)
 
 	go func() {
 		for {
 			select {
 			case block := <-blocksChan:
-				// Generate an event every 500 block.
-				if block%500 == 0 {
-					mwr.activeWalletMutex.RLock()
-
-					if len(mwr.activeWallet) > 0 {
-						// If the active wallet is ready to receive the request.
-						if big.NewInt(int64(block)).Cmp(
-							mwr.activeWalletOperableBlock,
-						) >= 0 {
-							blockBytes := make([]byte, 8)
-							binary.BigEndian.PutUint64(blockBytes, block)
-							blockHashBytes := crypto.Keccak256(blockBytes)
-							blockHash := new(big.Int).SetBytes(blockHashBytes)
-
-							messages := make([]*big.Int, 10)
-							for i := range messages {
-								messages[i] = new(big.Int).Add(
-									blockHash,
-									big.NewInt(int64(i)),
-								)
-							}
-
-							go handler(&tbtc.SignatureRequestedEvent{
-								WalletPublicKey: mwr.activeWallet,
-								Messages:        messages,
-								BlockNumber:     block,
-							})
-						}
+				// Generate a heartbeat every 2400 block, i.e. ~8 hours.
+				if block%2400 == 0 {
+					walletPublicKey, ok, err := tc.activeWalletPublicKey()
+					if err != nil {
+						logger.Errorf(
+							"cannot get active wallet for heartbeat request: [%v]",
+							err,
+						)
+						continue
+					}
+
+					if !ok {
+						logger.Infof("there is no active wallet for heartbeat at the moment")
+						continue
 					}
 
-					mwr.activeWalletMutex.RUnlock()
+					prefixBytes := make([]byte, 8)
+					binary.BigEndian.PutUint64(
+						prefixBytes,
+						0xffffffffffffffff,
+					)
+
+					messages := make([]*big.Int, 5)
+					for i := range messages {
+						suffixBytes := make([]byte, 8)
+						binary.BigEndian.PutUint64(
+							suffixBytes,
+							block+uint64(i),
+						)
+
+						preimage := append(prefixBytes, suffixBytes...)
+						preimageSha256 := sha256.Sum256(preimage)
+						message := sha256.Sum256(preimageSha256[:])
+
+						messages[i] = new(big.Int).SetBytes(message[:])
+					}
+
+					go handler(&tbtc.HeartbeatRequestedEvent{
+						WalletPublicKey: walletPublicKey,
+						Messages:        messages,
+						BlockNumber:     block,
+					})
 				}
 			case <-ctx.Done():
 				return
@@ -590,13 +909,38 @@ func (mwr *mockWalletRegistry) OnSignatureRequested(
 	})
 }
 
-func (mwr *mockWalletRegistry) GetDKGState() (tbtc.DKGState, error) {
-	mwr.currentDkgMutex.RLock()
-	defer mwr.currentDkgMutex.RUnlock()
+func (tc *TbtcChain) activeWalletPublicKey() ([]byte, bool, error) {
+	walletPublicKeyHash, err := tc.bridge.ActiveWalletPubKeyHash()
+	if err != nil {
+		return nil, false, fmt.Errorf(
+			"cannot get active wallet public key hash: [%v]",
+			err,
+		)
+	}
+
+	if walletPublicKeyHash == [20]byte{} {
+		return nil, false, nil
+	}
+
+	bridgeWalletData, err := tc.bridge.Wallets(walletPublicKeyHash)
+	if err != nil {
+		return nil, false, fmt.Errorf(
+			"cannot get active wallet data from Bridge: [%v]",
+			err,
+		)
+	}
 
-	if mwr.currentDkgStartBlock != nil {
-		return tbtc.AwaitingResult, nil
+	registryWalletData, err := tc.walletRegistry.GetWallet(bridgeWalletData.EcdsaWalletID)
+	if err != nil {
+		return nil, false, fmt.Errorf(
+			"cannot get active wallet data from WalletRegistry: [%v]",
+			err,
+		)
 	}
 
-	return tbtc.Idle, nil
+	publicKeyBytes := []byte{0x04} // pre-fill with uncompressed ECDSA public key prefix
+	publicKeyBytes = append(publicKeyBytes, registryWalletData.PublicKeyX[:]...)
+	publicKeyBytes = append(publicKeyBytes, registryWalletData.PublicKeyY[:]...)
+
+	return publicKeyBytes, true, nil
 }
diff --git a/pkg/chain/ethereum/tbtc_test.go b/pkg/chain/ethereum/tbtc_test.go
new file mode 100644
index 0000000000..8be8e82edb
--- /dev/null
+++ b/pkg/chain/ethereum/tbtc_test.go
@@ -0,0 +1,308 @@
+package ethereum
+
+import (
+	"bytes"
+	"crypto/ecdsa"
+	"encoding/hex"
+	"fmt"
+	"math/big"
+	"reflect"
+	"testing"
+
+	"github.com/keep-network/keep-core/pkg/chain"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/keep-network/keep-core/pkg/internal/testutils"
+	"github.com/keep-network/keep-core/pkg/protocol/group"
+)
+
+func TestComputeOperatorsIDsHash(t *testing.T) {
+	operatorIDs := []chain.OperatorID{
+		5, 1, 55, 45435534, 33, 345, 23, 235, 3333, 2,
+	}
+
+	hash, err := computeOperatorsIDsHash(operatorIDs)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedHash := "8cd41effd4ee91b56d6b2f836efdcac11ab1ef2ae228e348814d0e6c2966d01e"
+
+	testutils.AssertStringsEqual(
+		t,
+		"hash",
+		expectedHash,
+		hex.EncodeToString(hash[:]),
+	)
+}
+
+func TestConvertSignaturesToChainFormat(t *testing.T) {
+	signatureSize := 65
+
+	signature1 := common.LeftPadBytes([]byte{1, 2, 3}, signatureSize)
+	signature2 := common.LeftPadBytes([]byte{4, 5, 6}, signatureSize)
+	signature3 := common.LeftPadBytes([]byte{7}, signatureSize)
+	signature4 := common.LeftPadBytes([]byte{8, 9, 10}, signatureSize)
+	signature5 := common.LeftPadBytes([]byte{11, 12, 13}, signatureSize)
+
+	invalidSignature := common.LeftPadBytes([]byte("invalid"), signatureSize-1)
+
+	var tests = map[string]struct {
+		signaturesMap   map[group.MemberIndex][]byte
+		expectedIndices []group.MemberIndex
+		expectedError   error
+	}{
+		"one valid signature": {
+			signaturesMap: map[uint8][]byte{
+				1: signature1,
+			},
+			expectedIndices: []group.MemberIndex{1},
+		},
+		"five valid signatures": {
+			signaturesMap: map[group.MemberIndex][]byte{
+				3: signature3,
+				1: signature1,
+				4: signature4,
+				5: signature5,
+				2: signature2,
+			},
+			expectedIndices: []group.MemberIndex{1, 2, 3, 4, 5},
+		},
+		"invalid signature": {
+			signaturesMap: map[group.MemberIndex][]byte{
+				1: signature1,
+				2: invalidSignature,
+			},
+			expectedError: fmt.Errorf("invalid signature size for member [2] got [64] bytes but [65] bytes required"),
+		},
+	}
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			indicesSlice, signaturesSlice, err :=
+				convertSignaturesToChainFormat(test.signaturesMap)
+
+			if !reflect.DeepEqual(err, test.expectedError) {
+				t.Errorf(
+					"unexpected error\nexpected: [%v]\nactual:   [%v]\n",
+					test.expectedError,
+					err,
+				)
+			}
+
+			if test.expectedError == nil {
+				if !reflect.DeepEqual(test.expectedIndices, indicesSlice) {
+					t.Errorf(
+						"unexpected indices\n"+
+							"expected: [%v]\n"+
+							"actual:   [%v]\n",
+						test.expectedIndices,
+						indicesSlice,
+					)
+				}
+
+				testutils.AssertIntsEqual(
+					t,
+					"signatures slice length",
+					signatureSize*len(test.signaturesMap),
+					len(signaturesSlice),
+				)
+			}
+
+			for i, memberIndex := range indicesSlice {
+				actualSignature := signaturesSlice[signatureSize*i : signatureSize*(i+1)]
+				if !bytes.Equal(
+					test.signaturesMap[memberIndex],
+					actualSignature,
+				) {
+					t.Errorf(
+						"invalid signatures for member %v\nexpected: %v\nactual:   %v\n",
+						memberIndex,
+						test.signaturesMap[memberIndex],
+						actualSignature,
+					)
+				}
+			}
+		})
+	}
+}
+
+func TestConvertPubKeyToChainFormat(t *testing.T) {
+	bytes30 := []byte{229, 19, 136, 216, 125, 157, 135, 142, 67, 130,
+		136, 13, 76, 188, 32, 218, 243, 134, 95, 73, 155, 24, 38, 73, 117, 90,
+		215, 95, 216, 19}
+	bytes31 := []byte{182, 142, 176, 51, 131, 130, 111, 197, 191, 103, 180, 137,
+		171, 101, 34, 78, 251, 234, 118, 184, 16, 116, 238, 82, 131, 153, 134,
+		17, 46, 158, 94}
+
+	expectedResult := [64]byte{
+		// padding
+		00, 00,
+		// bytes30
+		229, 19, 136, 216, 125, 157, 135, 142, 67, 130, 136, 13, 76, 188, 32,
+		218, 243, 134, 95, 73, 155, 24, 38, 73, 117, 90, 215, 95, 216, 19,
+		// padding
+		00,
+		// bytes31
+		182, 142, 176, 51, 131, 130, 111, 197, 191, 103, 180, 137, 171, 101, 34,
+		78, 251, 234, 118, 184, 16, 116, 238, 82, 131, 153, 134, 17, 46, 158, 94,
+	}
+
+	actualResult, err := convertPubKeyToChainFormat(
+		&ecdsa.PublicKey{
+			X: new(big.Int).SetBytes(bytes30),
+			Y: new(big.Int).SetBytes(bytes31),
+		},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error [%v]", err)
+	}
+
+	testutils.AssertBytesEqual(
+		t,
+		expectedResult[:],
+		actualResult[:],
+	)
+}
+
+func TestValidateMemberIndex(t *testing.T) {
+	one := big.NewInt(1)
+	maxMemberIndex := big.NewInt(255)
+
+	var tests = map[string]struct {
+		chainMemberIndex *big.Int
+		expectedError    error
+	}{
+		"less than max member index": {
+			chainMemberIndex: new(big.Int).Sub(maxMemberIndex, one),
+			expectedError:    nil,
+		},
+		"max member index": {
+			chainMemberIndex: maxMemberIndex,
+			expectedError:    nil,
+		},
+		"greater than max member index": {
+			chainMemberIndex: new(big.Int).Add(maxMemberIndex, one),
+			expectedError:    fmt.Errorf("invalid member index value: [256]"),
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			err := validateMemberIndex(test.chainMemberIndex)
+
+			if !reflect.DeepEqual(err, test.expectedError) {
+				t.Errorf(
+					"unexpected error\nexpected: [%v]\nactual:   [%v]\n",
+					test.expectedError,
+					err,
+				)
+			}
+		})
+	}
+}
+
+func TestCalculateDKGResultSignatureHash(t *testing.T) {
+	chainID := big.NewInt(1)
+
+	groupPublicKey, err := hex.DecodeString(
+		"989d253b17a6a0f41838b84ff0d20e8898f9d7b1a98f2564da4cc29dcf8581d9d" +
+			"218b65e7d91c752f7b22eaceb771a9af3a6f3d3f010a5d471a1aeef7d7713af",
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	misbehavedMembersIndexes := []group.MemberIndex{2, 55}
+
+	startBlock := big.NewInt(2000)
+
+	hash, err := calculateDKGResultSignatureHash(
+		chainID,
+		groupPublicKey,
+		misbehavedMembersIndexes,
+		startBlock,
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedHash := "25f917154586c2be0b6364f5c4758580e535bc01ed4881211000c9267aef3a3b"
+
+	testutils.AssertStringsEqual(
+		t,
+		"hash",
+		expectedHash,
+		hex.EncodeToString(hash[:]),
+	)
+}
+
+func TestParseDkgResultValidationOutcome(t *testing.T) {
+	isValid, err := parseDkgResultValidationOutcome(
+		&struct {
+			bool
+			string
+		}{
+			true,
+			"",
+		},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	testutils.AssertBoolsEqual(t, "validation outcome", true, isValid)
+
+	isValid, err = parseDkgResultValidationOutcome(
+		&struct {
+			bool
+			string
+		}{
+			false,
+			"",
+		},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	testutils.AssertBoolsEqual(t, "validation outcome", false, isValid)
+
+	_, err = parseDkgResultValidationOutcome(
+		struct {
+			bool
+			string
+		}{
+			true,
+			"",
+		},
+	)
+	expectedErr := fmt.Errorf("result validation outcome is not a pointer")
+	if !reflect.DeepEqual(expectedErr, err) {
+		t.Errorf(
+			"unexpected error\n"+
+				"expected: [%v]\n"+
+				"actual:   [%v]",
+			expectedErr,
+			err,
+		)
+	}
+
+	_, err = parseDkgResultValidationOutcome(
+		&struct {
+			string
+			bool
+		}{
+			"",
+			true,
+		},
+	)
+	expectedErr = fmt.Errorf("cannot parse result validation outcome")
+	if !reflect.DeepEqual(expectedErr, err) {
+		t.Errorf(
+			"unexpected error\n"+
+				"expected: [%v]\n"+
+				"actual:   [%v]",
+			expectedErr,
+			err,
+		)
+	}
+}
diff --git a/pkg/chain/local_v1/local.go b/pkg/chain/local_v1/local.go
index ac5c651f28..6715634ab2 100644
--- a/pkg/chain/local_v1/local.go
+++ b/pkg/chain/local_v1/local.go
@@ -434,6 +434,12 @@ func (c *localChain) IsBetaOperator() (bool, error) {
 	panic("unsupported")
 }
 
+func (c *localChain) GetOperatorID(
+	operatorAddress chain.Address,
+) (chain.OperatorID, error) {
+	panic("unsupported")
+}
+
 func GenerateHandlerID() int {
 	// #nosec G404 (insecure random number source (rand))
 	// Local chain implementation doesn't require secure randomness.
diff --git a/pkg/chain/operator_id.go b/pkg/chain/operator_id.go
new file mode 100644
index 0000000000..1c88ac9334
--- /dev/null
+++ b/pkg/chain/operator_id.go
@@ -0,0 +1,15 @@
+package chain
+
+import "math"
+
+// OperatorID is a unique identifier of an operator assigned by the Sortition
+// Pool when the operator enters the pool for the first time. ID is never
+// changing for the given operator address.
+type OperatorID = uint32
+
+// MaxOperatorID is the maximum allowed value for OperatorID supported by
+// Sortition Pool contract.
+const MaxOperatorID = math.MaxUint32
+
+// OperatorIDs is a list of OperatorID values.
+type OperatorIDs []OperatorID
diff --git a/pkg/sortition/chain.go b/pkg/sortition/chain.go
index da15f0682d..cf53a6ff92 100644
--- a/pkg/sortition/chain.go
+++ b/pkg/sortition/chain.go
@@ -64,4 +64,7 @@ type Chain interface {
 	// Returns true if operator is a beta operator, false otherwise.
 	// Chaosnet status does not matter.
 	IsBetaOperator() (bool, error)
+
+	// GetOperatorID returns the operator ID for the given operator address.
+	GetOperatorID(operatorAddress chain.Address) (chain.OperatorID, error)
 }
diff --git a/pkg/sortition/internal/local/chain.go b/pkg/sortition/internal/local/chain.go
index 641a5a7387..1c85c4e519 100644
--- a/pkg/sortition/internal/local/chain.go
+++ b/pkg/sortition/internal/local/chain.go
@@ -215,6 +215,12 @@ func (c *Chain) IsBetaOperator() (bool, error) {
 	return c.isBetaOperator, nil
 }
 
+func (c *Chain) GetOperatorID(
+	operatorAddress chain.Address,
+) (chain.OperatorID, error) {
+	panic("unsupported")
+}
+
 func (c *Chain) SetCurrentTimestamp(currentTimestamp *big.Int) {
 	c.currentTimestamp = currentTimestamp
 }
diff --git a/pkg/tbtc/chain.go b/pkg/tbtc/chain.go
index dc13d664f8..2cebe013c9 100644
--- a/pkg/tbtc/chain.go
+++ b/pkg/tbtc/chain.go
@@ -1,6 +1,7 @@
 package tbtc
 
 import (
+	"crypto/ecdsa"
 	"math/big"
 
 	"github.com/keep-network/keep-core/pkg/chain"
@@ -23,10 +24,19 @@ const (
 // GroupSelectionChain defines the subset of the TBTC chain interface that
 // pertains to the group selection activities.
 type GroupSelectionChain interface {
-	// SelectGroup returns the group members for the group generated by
-	// the given seed. This function can return an error if the TBTC chain's
-	// state does not allow for group selection at the moment.
-	SelectGroup(seed *big.Int) (chain.Addresses, error)
+	// SelectGroup returns the group members selected for the current group
+	// selection. The function returns an error if the chain's state does not
+	// allow for group selection at the moment.
+	SelectGroup() (*GroupSelectionResult, error)
+}
+
+// GroupSelectionResult represents a group selection result, i.e. operators
+// selected to perform the DKG protocol. The result consists of two slices
+// of equal length holding the chain.OperatorID and chain.Address for each
+// selected operator.
+type GroupSelectionResult struct {
+	OperatorsIDs       chain.OperatorIDs
+	OperatorsAddresses chain.Addresses
 }
 
 // DistributedKeyGenerationChain defines the subset of the TBTC chain
@@ -45,20 +55,74 @@ type DistributedKeyGenerationChain interface {
 		func(event *DKGResultSubmittedEvent),
 	) subscription.EventSubscription
 
-	// SubmitDKGResult submits the DKG result to the chain, along with signatures
-	// over result hash from group participants supporting the result.
-	SubmitDKGResult(
-		memberIndex group.MemberIndex,
-		result *dkg.Result,
+	// OnDKGResultChallenged registers a callback that is invoked when an
+	// on-chain notification of the DKG result challenge is seen.
+	OnDKGResultChallenged(
+		func(event *DKGResultChallengedEvent),
+	) subscription.EventSubscription
+
+	// OnDKGResultApproved registers a callback that is invoked when an on-chain
+	// notification of the DKG result approval is seen.
+	OnDKGResultApproved(
+		func(event *DKGResultApprovedEvent),
+	) subscription.EventSubscription
+
+	// AssembleDKGResult assembles the DKG chain result according to the rules
+	// expected by the given chain.
+	AssembleDKGResult(
+		submitterMemberIndex group.MemberIndex,
+		groupPublicKey *ecdsa.PublicKey,
+		operatingMembersIndexes []group.MemberIndex,
+		misbehavedMembersIndexes []group.MemberIndex,
 		signatures map[group.MemberIndex][]byte,
-	) error
+		groupSelectionResult *GroupSelectionResult,
+	) (*DKGChainResult, error)
+
+	// SubmitDKGResult submits the DKG result to the chain.
+	SubmitDKGResult(dkgResult *DKGChainResult) error
 
 	// GetDKGState returns the current state of the DKG procedure.
 	GetDKGState() (DKGState, error)
 
-	// CalculateDKGResultHash calculates 256-bit hash of DKG result in standard
-	// specific for the chain. Operation is performed off-chain.
-	CalculateDKGResultHash(result *dkg.Result) (dkg.ResultHash, error)
+	// CalculateDKGResultSignatureHash calculates a 32-byte hash that is used
+	// to produce a signature supporting the given groupPublicKey computed
+	// as result of the given DKG process. The misbehavedMembersIndexes parameter
+	// should contain indexes of members that were considered as misbehaved
+	// during the DKG process. The startBlock argument is the block at which
+	// the given DKG process started.
+	CalculateDKGResultSignatureHash(
+		groupPublicKey *ecdsa.PublicKey,
+		misbehavedMembersIndexes []group.MemberIndex,
+		startBlock uint64,
+	) (dkg.ResultSignatureHash, error)
+
+	// IsDKGResultValid checks whether the submitted DKG result is valid from
+	// the on-chain contract standpoint.
+	IsDKGResultValid(dkgResult *DKGChainResult) (bool, error)
+
+	// ChallengeDKGResult challenges the submitted DKG result.
+	ChallengeDKGResult(dkgResult *DKGChainResult) error
+
+	// ApproveDKGResult approves the submitted DKG result.
+	ApproveDKGResult(dkgResult *DKGChainResult) error
+
+	// DKGParameters gets the current value of DKG-specific control parameters.
+	DKGParameters() (*DKGParameters, error)
+}
+
+// DKGChainResultHash represents a hash of the DKGChainResult. The algorithm
+// used is specific to the chain.
+type DKGChainResultHash [32]byte
+
+// DKGChainResult represents a DKG result submitted to the chain.
+type DKGChainResult struct {
+	SubmitterMemberIndex     group.MemberIndex
+	GroupPublicKey           []byte
+	MisbehavedMembersIndexes []group.MemberIndex
+	Signatures               []byte
+	SigningMembersIndexes    []group.MemberIndex
+	Members                  chain.OperatorIDs
+	MembersHash              [32]byte
 }
 
 // DKGStartedEvent represents a DKG start event.
@@ -67,42 +131,51 @@ type DKGStartedEvent struct {
 	BlockNumber uint64
 }
 
-// DKGResultSubmittedEvent represents a DKG result submission event. It is emitted
-// after a submitted DKG result is positively validated on the chain. It contains
-// the index of the member who submitted the result and a final public key of
-// the group.
+// DKGResultSubmittedEvent represents a DKG result submission event. It is
+// emitted after a submitted DKG result lands on the chain.
 type DKGResultSubmittedEvent struct {
-	MemberIndex         uint32
-	GroupPublicKeyBytes []byte
-	Misbehaved          []uint8
+	Seed        *big.Int
+	ResultHash  DKGChainResultHash
+	Result      *DKGChainResult
+	BlockNumber uint64
+}
 
+// DKGResultChallengedEvent represents a DKG result challenge event. It is
+// emitted after a submitted DKG result is challenged as an invalid result.
+type DKGResultChallengedEvent struct {
+	ResultHash  DKGChainResultHash
+	Challenger  chain.Address
+	Reason      string
 	BlockNumber uint64
 }
 
+// DKGResultApprovedEvent represents a DKG result approval event. It is
+// emitted after a submitted DKG result is approved as a valid result.
+type DKGResultApprovedEvent struct {
+	ResultHash  DKGChainResultHash
+	Approver    chain.Address
+	BlockNumber uint64
+}
+
+// DKGParameters contains values of DKG-specific control parameters.
+type DKGParameters struct {
+	SubmissionTimeoutBlocks       uint64
+	ChallengePeriodBlocks         uint64
+	ApprovePrecedencePeriodBlocks uint64
+}
+
 // BridgeChain defines the subset of the TBTC chain interface that pertains
 // specifically to the tBTC Bridge operations.
 type BridgeChain interface {
-	// OnSignatureRequested registers a callback that is invoked when an
-	// on-chain notification of the Bridge signature request is seen.
-	//
-	// TODO: This is a temporary function that should be removed once the client
-	//       is integrated with real on-chain contracts. This is because the
-	//       Bridge contract will not request wallets' signatures in such an
-	//       explicit way. It will emit some domain-specific events and specific
-	//       wallets will respond in an appropriate way, often by building and
-	//       signing BTC transactions. However, for this iteration, we want to
-	//       have a test signing loop so we are simulating that signatures are
-	//       requested by the Bridge explicitly.
-	OnSignatureRequested(
-		func(event *SignatureRequestedEvent),
+	// OnHeartbeatRequested registers a callback that is invoked when an
+	// on-chain notification of the Bridge heartbeat request is seen.
+	OnHeartbeatRequested(
+		func(event *HeartbeatRequestedEvent),
 	) subscription.EventSubscription
 }
 
-// SignatureRequestedEvent represents a Bridge signature request event.
-//
-// TODO: This is a temporary structure that should be removed once the client
-//       is integrated with real on-chain contracts.
-type SignatureRequestedEvent struct {
+// HeartbeatRequestedEvent represents a Bridge heartbeat request event.
+type HeartbeatRequestedEvent struct {
 	WalletPublicKey []byte
 	Messages        []*big.Int
 	BlockNumber     uint64
@@ -111,8 +184,6 @@ type SignatureRequestedEvent struct {
 // Chain represents the interface that the TBTC module expects to interact
 // with the anchoring blockchain on.
 type Chain interface {
-	// GetConfig returns the expected configuration of the TBTC module.
-	GetConfig() *ChainConfig
 	// BlockCounter returns the chain's block counter.
 	BlockCounter() (chain.BlockCounter, error)
 	// Signing returns the chain's signer.
@@ -126,23 +197,3 @@ type Chain interface {
 	DistributedKeyGenerationChain
 	BridgeChain
 }
-
-// ChainConfig contains the config data needed for the TBTC to operate.
-type ChainConfig struct {
-	// GroupSize is the target size of a group in TBTC.
-	GroupSize int
-	// GroupQuorum is the minimum number of active participants behaving
-	// according to the protocol needed to generate a group in TBTC. This value
-	// is smaller than the GroupSize and bigger than the HonestThreshold.
-	GroupQuorum int
-	// HonestThreshold is the minimum number of active participants behaving
-	// according to the protocol needed to generate a signature.
-	HonestThreshold int
-}
-
-// DishonestThreshold is the maximum number of misbehaving participants for
-// which it is still possible to generate a signature.
-// Misbehaviour is any misconduct to the protocol, including inactivity.
-func (cc *ChainConfig) DishonestThreshold() int {
-	return cc.GroupSize - cc.HonestThreshold
-}
diff --git a/pkg/tbtc/chain_test.go b/pkg/tbtc/chain_test.go
index 802845939c..60fbc3799d 100644
--- a/pkg/tbtc/chain_test.go
+++ b/pkg/tbtc/chain_test.go
@@ -1,8 +1,13 @@
 package tbtc
 
 import (
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"encoding/binary"
 	"fmt"
 	"math/big"
+	"math/rand"
+	"reflect"
 	"sync"
 
 	"github.com/keep-network/keep-core/pkg/chain"
@@ -14,32 +19,33 @@ import (
 	"golang.org/x/crypto/sha3"
 )
 
-var errNilDKGResult = fmt.Errorf("nil DKG result")
+const localChainOperatorID = chain.OperatorID(1)
 
 type localChain struct {
 	dkgResultSubmissionHandlersMutex sync.Mutex
 	dkgResultSubmissionHandlers      map[int]func(submission *DKGResultSubmittedEvent)
 
-	resultSubmissionMutex sync.Mutex
-	activeWallet          []byte
-	resultSubmitterIndex  group.MemberIndex
+	dkgResultApprovalHandlersMutex sync.Mutex
+	dkgResultApprovalHandlers      map[int]func(submission *DKGResultApprovedEvent)
+
+	dkgResultApprovalGuard func() bool
+
+	dkgResultChallengeHandlersMutex sync.Mutex
+	dkgResultChallengeHandlers      map[int]func(submission *DKGResultChallengedEvent)
+
+	dkgMutex       sync.Mutex
+	dkgState       DKGState
+	dkgResult      *DKGChainResult
+	dkgResultValid bool
 
 	blockCounter       chain.BlockCounter
-	chainConfig        *ChainConfig
 	operatorPrivateKey *operator.PrivateKey
 }
 
-// GetConfig returns the chain configuration.
-func (lc *localChain) GetConfig() *ChainConfig {
-	return lc.chainConfig
-}
-
-// BlockCounter returns the block counter associated with the chain.
 func (lc *localChain) BlockCounter() (chain.BlockCounter, error) {
 	return lc.blockCounter, nil
 }
 
-// Signing returns the signing associated with the chain.
 func (lc *localChain) Signing() chain.Signing {
 	return local_v1.NewSigner(lc.operatorPrivateKey)
 }
@@ -49,7 +55,7 @@ func (lc *localChain) OperatorKeyPair() (
 	*operator.PublicKey,
 	error,
 ) {
-	panic("unsupported")
+	return lc.operatorPrivateKey, &lc.operatorPrivateKey.PublicKey, nil
 }
 
 func (lc *localChain) OperatorToStakingProvider() (chain.Address, bool, error) {
@@ -100,7 +106,22 @@ func (lc *localChain) IsBetaOperator() (bool, error) {
 	panic("unsupported")
 }
 
-func (lc *localChain) SelectGroup(seed *big.Int) (chain.Addresses, error) {
+func (lc *localChain) GetOperatorID(
+	operatorAddress chain.Address,
+) (chain.OperatorID, error) {
+	thisOperatorAddress, err := lc.operatorAddress()
+	if err != nil {
+		return 0, err
+	}
+
+	if thisOperatorAddress != operatorAddress {
+		return 0, fmt.Errorf("local chain allows for one operator only")
+	}
+
+	return localChainOperatorID, nil
+}
+
+func (lc *localChain) SelectGroup() (*GroupSelectionResult, error) {
 	panic("not implemented")
 }
 
@@ -110,15 +131,13 @@ func (lc *localChain) OnDKGStarted(
 	panic("unsupported")
 }
 
-// OnDKGResultSubmitted registers a callback that is invoked when an on-chain
-// notification of the DKG result submission is seen.
 func (lc *localChain) OnDKGResultSubmitted(
 	handler func(event *DKGResultSubmittedEvent),
 ) subscription.EventSubscription {
 	lc.dkgResultSubmissionHandlersMutex.Lock()
 	defer lc.dkgResultSubmissionHandlersMutex.Unlock()
 
-	handlerID := local_v1.GenerateHandlerID()
+	handlerID := generateHandlerID()
 	lc.dkgResultSubmissionHandlers[handlerID] = handler
 
 	return subscription.NewEventSubscription(func() {
@@ -129,115 +148,334 @@ func (lc *localChain) OnDKGResultSubmitted(
 	})
 }
 
-// SubmitDKGResult submits the DKG result to the chain, along with signatures
-// over result hash from group participants supporting the result.
-func (lc *localChain) SubmitDKGResult(
-	memberIndex group.MemberIndex,
-	result *dkg.Result,
+func (lc *localChain) OnDKGResultChallenged(
+	handler func(event *DKGResultChallengedEvent),
+) subscription.EventSubscription {
+	lc.dkgResultChallengeHandlersMutex.Lock()
+	defer lc.dkgResultChallengeHandlersMutex.Unlock()
+
+	handlerID := generateHandlerID()
+	lc.dkgResultChallengeHandlers[handlerID] = handler
+
+	return subscription.NewEventSubscription(func() {
+		lc.dkgResultChallengeHandlersMutex.Lock()
+		defer lc.dkgResultChallengeHandlersMutex.Unlock()
+
+		delete(lc.dkgResultChallengeHandlers, handlerID)
+	})
+}
+
+func (lc *localChain) OnDKGResultApproved(
+	handler func(event *DKGResultApprovedEvent),
+) subscription.EventSubscription {
+	lc.dkgResultApprovalHandlersMutex.Lock()
+	defer lc.dkgResultApprovalHandlersMutex.Unlock()
+
+	handlerID := generateHandlerID()
+	lc.dkgResultApprovalHandlers[handlerID] = handler
+
+	return subscription.NewEventSubscription(func() {
+		lc.dkgResultApprovalHandlersMutex.Lock()
+		defer lc.dkgResultApprovalHandlersMutex.Unlock()
+
+		delete(lc.dkgResultApprovalHandlers, handlerID)
+	})
+}
+
+func (lc *localChain) startDKG() error {
+	lc.dkgMutex.Lock()
+	defer lc.dkgMutex.Unlock()
+
+	if lc.dkgState != Idle {
+		return fmt.Errorf("DKG not idle")
+	}
+
+	lc.dkgState = AwaitingResult
+
+	return nil
+}
+
+func (lc *localChain) AssembleDKGResult(
+	submitterMemberIndex group.MemberIndex,
+	groupPublicKey *ecdsa.PublicKey,
+	operatingMembersIndexes []group.MemberIndex,
+	misbehavedMembersIndexes []group.MemberIndex,
 	signatures map[group.MemberIndex][]byte,
+	groupSelectionResult *GroupSelectionResult,
+) (*DKGChainResult, error) {
+	groupPublicKeyBytes := elliptic.Marshal(
+		groupPublicKey.Curve,
+		groupPublicKey.X,
+		groupPublicKey.Y,
+	)
+
+	signingMembersIndexes := make([]group.MemberIndex, 0)
+	signaturesConcatenation := make([]byte, 0)
+	for memberIndex, signature := range signatures {
+		signingMembersIndexes = append(signingMembersIndexes, memberIndex)
+		signaturesConcatenation = append(signaturesConcatenation, signature...)
+	}
+
+	operatingOperatorsIDsBytes := make([]byte, 0)
+	for _, operatingMemberID := range operatingMembersIndexes {
+		operatorIDBytes := make([]byte, 4)
+		operatorID := groupSelectionResult.OperatorsIDs[operatingMemberID-1]
+		binary.BigEndian.PutUint32(operatorIDBytes, operatorID)
+
+		operatingOperatorsIDsBytes = append(
+			operatingOperatorsIDsBytes,
+			operatorIDBytes...,
+		)
+	}
+
+	return &DKGChainResult{
+		SubmitterMemberIndex:     submitterMemberIndex,
+		GroupPublicKey:           groupPublicKeyBytes,
+		MisbehavedMembersIndexes: misbehavedMembersIndexes,
+		Signatures:               signaturesConcatenation,
+		SigningMembersIndexes:    signingMembersIndexes,
+		Members:                  groupSelectionResult.OperatorsIDs,
+		MembersHash:              sha3.Sum256(operatingOperatorsIDsBytes),
+	}, nil
+}
+
+func (lc *localChain) SubmitDKGResult(
+	dkgResult *DKGChainResult,
 ) error {
 	lc.dkgResultSubmissionHandlersMutex.Lock()
 	defer lc.dkgResultSubmissionHandlersMutex.Unlock()
 
-	lc.resultSubmissionMutex.Lock()
-	defer lc.resultSubmissionMutex.Unlock()
+	lc.dkgMutex.Lock()
+	defer lc.dkgMutex.Unlock()
+
+	if lc.dkgState != AwaitingResult {
+		return fmt.Errorf("not awaiting DKG result")
+	}
 
 	blockNumber, err := lc.blockCounter.CurrentBlock()
 	if err != nil {
 		return fmt.Errorf("failed to get the current block")
 	}
 
-	groupPublicKeyBytes, err := result.GroupPublicKeyBytes()
-	if err != nil {
-		return fmt.Errorf(
-			"failed to extract group public key bytes from the result [%v]",
-			err,
-		)
-	}
+	resultHash := computeDkgChainResultHash(dkgResult)
 
 	for _, handler := range lc.dkgResultSubmissionHandlers {
 		handler(&DKGResultSubmittedEvent{
-			MemberIndex:         uint32(memberIndex),
-			GroupPublicKeyBytes: groupPublicKeyBytes,
-			Misbehaved:          result.MisbehavedMembersIndexes(),
-			BlockNumber:         blockNumber,
+			Seed:        nil,
+			ResultHash:  resultHash,
+			Result:      dkgResult,
+			BlockNumber: blockNumber,
 		})
 	}
 
-	lc.activeWallet = groupPublicKeyBytes
-	lc.resultSubmitterIndex = memberIndex
+	lc.dkgState = Challenge
+	lc.dkgResult = dkgResult
+	lc.dkgResultValid = true
 
 	return nil
 }
 
-// GetDKGState returns the current state of the DKG procedure.
 func (lc *localChain) GetDKGState() (DKGState, error) {
-	return AwaitingResult, nil
+	lc.dkgMutex.Lock()
+	defer lc.dkgMutex.Unlock()
+
+	return lc.dkgState, nil
 }
 
-// CalculateDKGResultHash calculates 256-bit hash of DKG result using SHA3-256
-// hashing algorithm.
-func (lc *localChain) CalculateDKGResultHash(
-	result *dkg.Result,
-) (dkg.ResultHash, error) {
-	if result == nil {
-		return dkg.ResultHash{}, errNilDKGResult
+func (lc *localChain) CalculateDKGResultSignatureHash(
+	groupPublicKey *ecdsa.PublicKey,
+	misbehavedMembersIndexes []group.MemberIndex,
+	startBlock uint64,
+) (dkg.ResultSignatureHash, error) {
+	if groupPublicKey == nil {
+		return dkg.ResultSignatureHash{}, fmt.Errorf("group public key is nil")
 	}
 
-	encodedDKGResult := fmt.Sprint(result)
-	dkgResultHash := dkg.ResultHash(
-		sha3.Sum256([]byte(encodedDKGResult)),
+	encoded := fmt.Sprint(
+		groupPublicKey,
+		misbehavedMembersIndexes,
+		startBlock,
 	)
-	return dkgResultHash, nil
+
+	return sha3.Sum256([]byte(encoded)), nil
+}
+
+func (lc *localChain) IsDKGResultValid(dkgResult *DKGChainResult) (bool, error) {
+	lc.dkgMutex.Lock()
+	defer lc.dkgMutex.Unlock()
+
+	if lc.dkgState != Challenge {
+		return false, fmt.Errorf("not in DKG result challenge period")
+	}
+
+	if !reflect.DeepEqual(dkgResult, lc.dkgResult) {
+		return false, fmt.Errorf("result does not match the submitted one")
+	}
+
+	return lc.dkgResultValid, nil
+}
+
+func (lc *localChain) invalidateDKGResult(dkgResult *DKGChainResult) error {
+	lc.dkgMutex.Lock()
+	defer lc.dkgMutex.Unlock()
+
+	if lc.dkgState != Challenge {
+		return fmt.Errorf("not in DKG result challenge period")
+	}
+
+	if !reflect.DeepEqual(dkgResult, lc.dkgResult) {
+		return fmt.Errorf("result does not match the submitted one")
+	}
+
+	lc.dkgResultValid = false
+
+	return nil
 }
 
-func (lc *localChain) OnSignatureRequested(
-	handler func(event *SignatureRequestedEvent),
+func (lc *localChain) ChallengeDKGResult(dkgResult *DKGChainResult) error {
+	lc.dkgResultChallengeHandlersMutex.Lock()
+	defer lc.dkgResultChallengeHandlersMutex.Unlock()
+
+	lc.dkgMutex.Lock()
+	defer lc.dkgMutex.Unlock()
+
+	if lc.dkgState != Challenge {
+		return fmt.Errorf("not in DKG result challenge period")
+	}
+
+	if !reflect.DeepEqual(dkgResult, lc.dkgResult) {
+		return fmt.Errorf("result does not match the submitted one")
+	}
+
+	if lc.dkgResultValid {
+		return fmt.Errorf("submitted result is valid")
+	}
+
+	blockNumber, err := lc.blockCounter.CurrentBlock()
+	if err != nil {
+		return fmt.Errorf("failed to get the current block")
+	}
+
+	for _, handler := range lc.dkgResultChallengeHandlers {
+		handler(&DKGResultChallengedEvent{
+			ResultHash:  computeDkgChainResultHash(dkgResult),
+			Challenger:  "",
+			Reason:      "",
+			BlockNumber: blockNumber,
+		})
+	}
+
+	lc.dkgState = AwaitingResult
+	lc.dkgResult = nil
+	lc.dkgResultValid = false
+
+	return nil
+}
+
+func (lc *localChain) ApproveDKGResult(dkgResult *DKGChainResult) error {
+	lc.dkgResultApprovalHandlersMutex.Lock()
+	defer lc.dkgResultApprovalHandlersMutex.Unlock()
+
+	lc.dkgMutex.Lock()
+	defer lc.dkgMutex.Unlock()
+
+	if lc.dkgState != Challenge {
+		return fmt.Errorf("not in DKG result challenge period")
+	}
+
+	if !reflect.DeepEqual(dkgResult, lc.dkgResult) {
+		return fmt.Errorf("result does not match the submitted one")
+	}
+
+	if !lc.dkgResultValid {
+		return fmt.Errorf("submitted result is invalid")
+	}
+
+	if lc.dkgResultApprovalGuard != nil && !lc.dkgResultApprovalGuard() {
+		return fmt.Errorf("rejected by guard")
+	}
+
+	blockNumber, err := lc.blockCounter.CurrentBlock()
+	if err != nil {
+		return fmt.Errorf("failed to get the current block")
+	}
+
+	for _, handler := range lc.dkgResultApprovalHandlers {
+		handler(&DKGResultApprovedEvent{
+			ResultHash:  computeDkgChainResultHash(dkgResult),
+			Approver:    "",
+			BlockNumber: blockNumber,
+		})
+	}
+
+	lc.dkgState = Idle
+	lc.dkgResult = nil
+	lc.dkgResultValid = false
+
+	return nil
+}
+
+func (lc *localChain) DKGParameters() (*DKGParameters, error) {
+	return &DKGParameters{
+		SubmissionTimeoutBlocks:       10,
+		ChallengePeriodBlocks:         15,
+		ApprovePrecedencePeriodBlocks: 5,
+	}, nil
+}
+
+func (lc *localChain) OnHeartbeatRequested(
+	handler func(event *HeartbeatRequestedEvent),
 ) subscription.EventSubscription {
 	panic("unsupported")
 }
 
+func (lc *localChain) operatorAddress() (chain.Address, error) {
+	_, operatorPublicKey, err := lc.OperatorKeyPair()
+	if err != nil {
+		return "", err
+	}
+
+	return lc.Signing().PublicKeyToAddress(operatorPublicKey)
+}
+
 // Connect sets up the local chain.
-func Connect(
-	groupSize int,
-	groupQuorum int,
-	honestThreshold int,
-) *localChain {
+func Connect() *localChain {
 	operatorPrivateKey, _, err := operator.GenerateKeyPair(local_v1.DefaultCurve)
 	if err != nil {
 		panic(err)
 	}
 
-	return ConnectWithKey(
-		groupSize,
-		groupQuorum,
-		honestThreshold,
-		operatorPrivateKey,
-	)
+	return ConnectWithKey(operatorPrivateKey)
 }
 
 // ConnectWithKey sets up the local chain using the provided operator private
 // key.
-func ConnectWithKey(
-	groupSize int,
-	groupQuorum int,
-	honestThreshold int,
-	operatorPrivateKey *operator.PrivateKey,
-) *localChain {
+func ConnectWithKey(operatorPrivateKey *operator.PrivateKey) *localChain {
 	blockCounter, _ := local_v1.BlockCounter()
 
-	chainConfig := &ChainConfig{
-		GroupSize:       groupSize,
-		GroupQuorum:     groupQuorum,
-		HonestThreshold: honestThreshold,
-	}
-
-	return &localChain{
+	localChain := &localChain{
 		dkgResultSubmissionHandlers: make(
 			map[int]func(submission *DKGResultSubmittedEvent),
 		),
+		dkgResultApprovalHandlers: make(
+			map[int]func(submission *DKGResultApprovedEvent),
+		),
+		dkgResultChallengeHandlers: make(
+			map[int]func(submission *DKGResultChallengedEvent),
+		),
 		blockCounter:       blockCounter,
-		chainConfig:        chainConfig,
 		operatorPrivateKey: operatorPrivateKey,
 	}
+
+	return localChain
+}
+
+func computeDkgChainResultHash(result *DKGChainResult) DKGChainResultHash {
+	return sha3.Sum256(result.GroupPublicKey)
+}
+
+func generateHandlerID() int {
+	// #nosec G404 (insecure random number source (rand))
+	// Local chain implementation doesn't require secure randomness.
+	return rand.Int()
 }
diff --git a/pkg/tbtc/deduplicator.go b/pkg/tbtc/deduplicator.go
index 6338306ac3..d0d3faa94c 100644
--- a/pkg/tbtc/deduplicator.go
+++ b/pkg/tbtc/deduplicator.go
@@ -1,15 +1,21 @@
 package tbtc
 
 import (
-	"github.com/keep-network/keep-common/pkg/cache"
+	"encoding/hex"
 	"math/big"
+	"strconv"
 	"time"
+
+	"github.com/keep-network/keep-common/pkg/cache"
 )
 
 const (
 	// DKGSeedCachePeriod is the time period the cache maintains
 	// the DKG seed corresponding to a DKG instance.
 	DKGSeedCachePeriod = 7 * 24 * time.Hour
+	// DKGResultHashCachePeriod is the time period the cache maintains
+	// the given DKG result hash.
+	DKGResultHashCachePeriod = 7 * 24 * time.Hour
 )
 
 // deduplicator decides whether the given event should be handled by the
@@ -24,13 +30,16 @@ const (
 //
 // Those events are supported:
 // - DKG started
+// - DKG result submitted
 type deduplicator struct {
-	dkgSeedCache *cache.TimeCache
+	dkgSeedCache       *cache.TimeCache
+	dkgResultHashCache *cache.TimeCache
 }
 
 func newDeduplicator() *deduplicator {
 	return &deduplicator{
-		dkgSeedCache: cache.NewTimeCache(DKGSeedCachePeriod),
+		dkgSeedCache:       cache.NewTimeCache(DKGSeedCachePeriod),
+		dkgResultHashCache: cache.NewTimeCache(DKGResultHashCachePeriod),
 	}
 }
 
@@ -55,3 +64,29 @@ func (d *deduplicator) notifyDKGStarted(
 	// with the execution.
 	return false
 }
+
+// notifyDKGResultSubmitted notifies the client wants to start some actions
+// upon the DKG result submission. It returns boolean indicating whether the
+// client should proceed with the actions or ignore the event as a duplicate.
+func (d *deduplicator) notifyDKGResultSubmitted(
+	newDKGResultSeed *big.Int,
+	newDKGResultHash DKGChainResultHash,
+	newDKGResultBlock uint64,
+) bool {
+	d.dkgResultHashCache.Sweep()
+
+	cacheKey := newDKGResultSeed.Text(16) +
+		hex.EncodeToString(newDKGResultHash[:]) +
+		strconv.Itoa(int(newDKGResultBlock))
+
+	// If the key is not in the cache, that means the result was not handled
+	// yet and the client should proceed with the execution.
+	if !d.dkgResultHashCache.Has(cacheKey) {
+		d.dkgResultHashCache.Add(cacheKey)
+		return true
+	}
+
+	// Otherwise, the DKG result is a duplicate and the client should not
+	// proceed with the execution.
+	return false
+}
diff --git a/pkg/tbtc/deduplicator_test.go b/pkg/tbtc/deduplicator_test.go
index 0fe3ec74bc..4fdeb5ec7c 100644
--- a/pkg/tbtc/deduplicator_test.go
+++ b/pkg/tbtc/deduplicator_test.go
@@ -1,13 +1,16 @@
 package tbtc
 
 import (
-	"github.com/keep-network/keep-common/pkg/cache"
+	"encoding/hex"
 	"math/big"
 	"testing"
 	"time"
+
+	"github.com/keep-network/keep-common/pkg/cache"
 )
 
 const testDKGSeedCachePeriod = 1 * time.Second
+const testDKGResultHashCachePeriod = 1 * time.Second
 
 func TestNotifyDKGStarted(t *testing.T) {
 	deduplicator := deduplicator{
@@ -44,3 +47,68 @@ func TestNotifyDKGStarted(t *testing.T) {
 		t.Fatal("should be allowed to join DKG")
 	}
 }
+
+func TestNotifyDKGResultSubmitted(t *testing.T) {
+	deduplicator := deduplicator{
+		dkgResultHashCache: cache.NewTimeCache(testDKGResultHashCachePeriod),
+	}
+
+	hash1Bytes, err := hex.DecodeString("92327ddff69a2b8c7ae787c5d590a2f14586089e6339e942d56e82aa42052cd9")
+	if err != nil {
+		t.Fatal(err)
+	}
+	var hash1 [32]byte
+	copy(hash1[:], hash1Bytes)
+
+	hash2Bytes, err := hex.DecodeString("23c0062913c4614bdff07f94475ceb4c585df53f71611776c3521ed8f8785913")
+	if err != nil {
+		t.Fatal(err)
+	}
+	var hash2 [32]byte
+	copy(hash2[:], hash2Bytes)
+
+	// Add the original parameters.
+	canProcess := deduplicator.notifyDKGResultSubmitted(big.NewInt(100), hash1, 500)
+	if !canProcess {
+		t.Fatal("should be allowed to process")
+	}
+
+	// Add with different seed.
+	canProcess = deduplicator.notifyDKGResultSubmitted(big.NewInt(101), hash1, 500)
+	if !canProcess {
+		t.Fatal("should be allowed to process")
+	}
+
+	// Add with different result hash.
+	canProcess = deduplicator.notifyDKGResultSubmitted(big.NewInt(100), hash2, 500)
+	if !canProcess {
+		t.Fatal("should be allowed to process")
+	}
+
+	// Add with different result block.
+	canProcess = deduplicator.notifyDKGResultSubmitted(big.NewInt(100), hash1, 501)
+	if !canProcess {
+		t.Fatal("should be allowed to process")
+	}
+
+	// Add with all different parameters.
+	canProcess = deduplicator.notifyDKGResultSubmitted(big.NewInt(101), hash2, 501)
+	if !canProcess {
+		t.Fatal("should be allowed to process")
+	}
+
+	// Add the original parameters before caching period elapses.
+	canProcess = deduplicator.notifyDKGResultSubmitted(big.NewInt(100), hash1, 500)
+	if canProcess {
+		t.Fatal("should not be allowed to process")
+	}
+
+	// Wait until caching period elapses.
+	time.Sleep(testDKGResultHashCachePeriod)
+
+	// Add the original parameters again.
+	canProcess = deduplicator.notifyDKGResultSubmitted(big.NewInt(100), hash1, 500)
+	if !canProcess {
+		t.Fatal("should be allowed to process")
+	}
+}
diff --git a/pkg/tbtc/dkg.go b/pkg/tbtc/dkg.go
index ce8384a072..e1e066cd40 100644
--- a/pkg/tbtc/dkg.go
+++ b/pkg/tbtc/dkg.go
@@ -2,10 +2,10 @@ package tbtc
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"math/big"
 	"sort"
-	"time"
 
 	"go.uber.org/zap"
 
@@ -19,23 +19,33 @@ import (
 	"github.com/keep-network/keep-core/pkg/tecdsa/dkg"
 )
 
-// TODO: Revisit those constants, especially dkgResultSubmissionDelayStep
-// which should be bigger once the contract integration is ready.
 const (
-	// dkgAttemptMaxBlockDuration determines the maximum block duration of a
-	// single DKG attempt.
-	dkgAttemptMaxBlockDuration = 150
-	// dkgResultSubmissionDelayStep determines the delay step that is used to
-	// calculate the submission delay time that should be respected by the
-	// given member to avoid all members submitting the same DKG result at the
-	// same time.
-	dkgResultSubmissionDelayStep = 10 * time.Second
+	// dkgResultSubmissionDelayStep determines the delay step in blocks that
+	// is used to calculate the submission delay period that should be respected
+	// by the given member to avoid all members submitting the same DKG result
+	// at the same time.
+	dkgResultSubmissionDelayStepBlocks = 15
+	// dkgResultApprovalDelayStepBlocks determines the delay step in blocks
+	// that is used to calculate the approval delay period that should be
+	// respected by the given member to avoid all members approving the same
+	// DKG result at the same time.
+	dkgResultApprovalDelayStepBlocks = 15
+	// dkgResultChallengeConfirmationBlocks determines the block length of
+	// the confirmation period that is preserved after a DKG result challenge
+	// submission. Once the period elapses, the DKG state is checked to confirm
+	// the challenge was accepted successfully.
+	dkgResultChallengeConfirmationBlocks = 20
 )
 
 // dkgExecutor is a component responsible for the full execution of ECDSA
 // Distributed Key Generation: determining members selected to the signing
 // group, executing off-chain protocol, and publishing the result to the chain.
 type dkgExecutor struct {
+	groupParameters *GroupParameters
+
+	operatorIDFn    func() (chain.OperatorID, error)
+	operatorAddress chain.Address
+
 	chain          Chain
 	netProvider    net.Provider
 	walletRegistry *walletRegistry
@@ -50,6 +60,9 @@ type dkgExecutor struct {
 // newDkgExecutor creates a new instance of dkgExecutor struct. There should
 // be only one instance of dkgExecutor.
 func newDkgExecutor(
+	groupParameters *GroupParameters,
+	operatorIDFn func() (chain.OperatorID, error),
+	operatorAddress chain.Address,
 	chain Chain,
 	netProvider net.Provider,
 	walletRegistry *walletRegistry,
@@ -71,12 +84,15 @@ func newDkgExecutor(
 	)
 
 	return &dkgExecutor{
-		chain:          chain,
-		netProvider:    netProvider,
-		walletRegistry: walletRegistry,
-		protocolLatch:  protocolLatch,
-		tecdsaExecutor: tecdsaExecutor,
-		waitForBlockFn: waitForBlockFn,
+		groupParameters: groupParameters,
+		operatorIDFn:    operatorIDFn,
+		operatorAddress: operatorAddress,
+		chain:           chain,
+		netProvider:     netProvider,
+		walletRegistry:  walletRegistry,
+		protocolLatch:   protocolLatch,
+		tecdsaExecutor:  tecdsaExecutor,
+		waitForBlockFn:  waitForBlockFn,
 	}
 }
 
@@ -91,14 +107,16 @@ func (de *dkgExecutor) preParamsCount() int {
 // the result to the chain.
 func (de *dkgExecutor) executeDkgIfEligible(
 	seed *big.Int,
-	startBlockNumber uint64,
+	startBlock uint64,
 ) {
 	dkgLogger := logger.With(
 		zap.String("seed", fmt.Sprintf("0x%x", seed)),
 	)
 
 	dkgLogger.Info("checking eligibility for DKG")
-	memberIndexes, selectedSigningGroupOperators, err := de.checkEligibility(dkgLogger, seed)
+	memberIndexes, groupSelectionResult, err := de.checkEligibility(
+		dkgLogger,
+	)
 	if err != nil {
 		dkgLogger.Errorf("could not check eligibility for DKG: [%v]", err)
 		return
@@ -125,8 +143,8 @@ func (de *dkgExecutor) executeDkgIfEligible(
 			dkgLogger,
 			seed,
 			memberIndexes,
-			selectedSigningGroupOperators,
-			startBlockNumber,
+			groupSelectionResult,
+			startBlock,
 		)
 	} else {
 		dkgLogger.Infof("not eligible for DKG")
@@ -138,47 +156,40 @@ func (de *dkgExecutor) executeDkgIfEligible(
 // - Indexes of members selected to the signing group and controlled by this
 //   operator. The indexes are in range [1, `groupSize`]. The slice is nil if
 //   none of the selected signing group members is controlled by this operator.
-// - Addresses of all signing group members. There are always `groupSize`
-//   elements in this slice.
+// - Group selection result holding chain.OperatorID and chain.Address for
+//   operators selected to the signing group. There are always `groupSize`
+//   selected operators.
 func (de *dkgExecutor) checkEligibility(
 	dkgLogger log.StandardLogger,
-	seed *big.Int,
-) ([]uint8, chain.Addresses, error) {
-	selectedSigningGroupOperators, err := de.chain.SelectGroup(seed)
+) ([]uint8, *GroupSelectionResult, error) {
+	groupSelectionResult, err := de.chain.SelectGroup()
 	if err != nil {
 		return nil, nil, fmt.Errorf("selecting group not possible: [%v]", err)
 	}
 
-	dkgLogger.Infof("selected group members for DKG = %s", selectedSigningGroupOperators)
+	dkgLogger.Infof(
+		"selected group members for DKG = %s",
+		groupSelectionResult.OperatorsAddresses,
+	)
 
-	if len(selectedSigningGroupOperators) > de.chain.GetConfig().GroupSize {
+	if len(groupSelectionResult.OperatorsAddresses) > de.groupParameters.GroupSize {
 		return nil, nil, fmt.Errorf(
 			"group size larger than supported: [%v]",
-			len(selectedSigningGroupOperators),
+			len(groupSelectionResult.OperatorsAddresses),
 		)
 	}
 
-	_, operatorPublicKey, err := de.chain.OperatorKeyPair()
-	if err != nil {
-		return nil, nil, fmt.Errorf("failed to get operator public key: [%v]", err)
-	}
-
-	operatorAddress, err := de.chain.Signing().PublicKeyToAddress(operatorPublicKey)
-	if err != nil {
-		return nil, nil, fmt.Errorf("failed to get operator address: [%v]", err)
-	}
-
 	indexes := make([]uint8, 0)
-	for index, operator := range selectedSigningGroupOperators {
+	for index, operator := range groupSelectionResult.OperatorsAddresses {
 		// See if we are amongst those chosen
-		if operator == operatorAddress {
+		if operator == de.operatorAddress {
 			// The group member index should be in range [1, groupSize] so we
 			// need to add 1.
 			indexes = append(indexes, uint8(index)+1)
 		}
 	}
 
-	return indexes, selectedSigningGroupOperators, nil
+	return indexes, groupSelectionResult, nil
 }
 
 // setupBroadcastChannel creates and initializes broadcast channel for the
@@ -198,7 +209,6 @@ func (de *dkgExecutor) setupBroadcastChannel(
 	}
 
 	dkg.RegisterUnmarshallers(broadcastChannel)
-	registerStopPillUnmarshaller(broadcastChannel)
 
 	err = broadcastChannel.SetFilter(membershipValidator.IsInGroup)
 	if err != nil {
@@ -219,12 +229,12 @@ func (de *dkgExecutor) generateSigningGroup(
 	dkgLogger *zap.SugaredLogger,
 	seed *big.Int,
 	memberIndexes []uint8,
-	selectedSigningGroupOperators chain.Addresses,
-	startBlockNumber uint64,
+	groupSelectionResult *GroupSelectionResult,
+	startBlock uint64,
 ) {
 	membershipValidator := group.NewMembershipValidator(
 		dkgLogger,
-		selectedSigningGroupOperators,
+		groupSelectionResult.OperatorsAddresses,
 		de.chain.Signing(),
 	)
 
@@ -234,7 +244,13 @@ func (de *dkgExecutor) generateSigningGroup(
 		return
 	}
 
-	chainConfig := de.chain.GetConfig()
+	dkgParameters, err := de.chain.DKGParameters()
+	if err != nil {
+		dkgLogger.Errorf("cannot get DKG parameters: [%v]", err)
+		return
+	}
+
+	dkgTimeoutBlock := startBlock + dkgParameters.SubmissionTimeoutBlocks
 
 	for _, index := range memberIndexes {
 		// Capture the member index for the goroutine.
@@ -244,6 +260,33 @@ func (de *dkgExecutor) generateSigningGroup(
 			de.protocolLatch.Lock()
 			defer de.protocolLatch.Unlock()
 
+			ctx, cancelCtx := withCancelOnBlock(
+				context.Background(),
+				dkgTimeoutBlock,
+				de.waitForBlockFn,
+			)
+			defer cancelCtx()
+
+			// TODO: This subscription has to be updated once we implement
+			//       re-submitting DKG result to the chain after a challenge.
+			//       See https://github.com/keep-network/keep-core/issues/3450
+			subscription := de.chain.OnDKGResultSubmitted(
+				func(event *DKGResultSubmittedEvent) {
+					defer cancelCtx()
+
+					dkgLogger.Infof(
+						"[member:%v] DKG result with group public "+
+							"key [0x%x] and result hash [0x%x] submitted "+
+							"at block [%v] by member [%v]",
+						memberIndex,
+						event.Result.GroupPublicKey,
+						event.ResultHash,
+						event.BlockNumber,
+						event.Result.SubmitterMemberIndex,
+					)
+				})
+			defer subscription.Unsubscribe()
+
 			announcer := announcer.New(
 				fmt.Sprintf("%v-%v", ProtocolName, "dkg"),
 				broadcastChannel,
@@ -253,67 +296,37 @@ func (de *dkgExecutor) generateSigningGroup(
 			retryLoop := newDkgRetryLoop(
 				dkgLogger,
 				seed,
-				startBlockNumber,
+				startBlock,
 				memberIndex,
-				selectedSigningGroupOperators,
-				chainConfig,
+				groupSelectionResult.OperatorsAddresses,
+				de.groupParameters,
 				announcer,
 			)
 
-			// TODO: For this client iteration, the retry loop is started
-			//       with a 168h timeout and a stop pill sent by any group
-			//       member. Once the WalletRegistry is integrated, the stop
-			//       signal should be generated by observing the DKG result
-			//       submission or timeout.
-			loopCtx, cancelLoopCtx := context.WithTimeout(
-				context.Background(),
-				7*24*time.Hour,
-			)
-			defer cancelLoopCtx()
-			cancelDkgContextOnStopSignal(
-				loopCtx,
-				cancelLoopCtx,
-				broadcastChannel,
-				seed.Text(16),
-			)
-
 			result, err := retryLoop.start(
-				loopCtx,
+				ctx,
 				de.waitForBlockFn,
 				func(attempt *dkgAttemptParams) (*dkg.Result, error) {
 					dkgAttemptLogger := dkgLogger.With(
 						zap.Uint("attempt", attempt.number),
 						zap.Uint64("attemptStartBlock", attempt.startBlock),
+						zap.Uint64("attemptTimeoutBlock", attempt.timeoutBlock),
 					)
 
 					dkgAttemptLogger.Infof(
 						"[member:%v] scheduled dkg attempt "+
 							"with [%v] group members (excluded: [%v])",
 						memberIndex,
-						chainConfig.GroupSize-len(attempt.excludedMembersIndexes),
+						de.groupParameters.GroupSize-len(attempt.excludedMembersIndexes),
 						attempt.excludedMembersIndexes,
 					)
 
 					// Set up the attempt timeout signal.
-					attemptCtx, cancelAttemptCtx := context.WithCancel(
-						loopCtx,
+					attemptCtx, _ := withCancelOnBlock(
+						ctx,
+						attempt.timeoutBlock,
+						de.waitForBlockFn,
 					)
-					go func() {
-						defer cancelAttemptCtx()
-
-						err := de.waitForBlockFn(
-							loopCtx,
-							attempt.startBlock+dkgAttemptMaxBlockDuration,
-						)
-						if err != nil {
-							dkgAttemptLogger.Warnf(
-								"[member:%v] failed waiting for "+
-									"attempt stop signal: [%v]",
-								memberIndex,
-								err,
-							)
-						}
-					}()
 
 					// sessionID must be different for each attempt.
 					sessionID := fmt.Sprintf(
@@ -328,8 +341,8 @@ func (de *dkgExecutor) generateSigningGroup(
 						seed,
 						sessionID,
 						memberIndex,
-						chainConfig.GroupSize,
-						chainConfig.DishonestThreshold(),
+						de.groupParameters.GroupSize,
+						de.groupParameters.DishonestThreshold(),
 						attempt.excludedMembersIndexes,
 						broadcastChannel,
 						membershipValidator,
@@ -344,56 +357,31 @@ func (de *dkgExecutor) generateSigningGroup(
 						return nil, err
 					}
 
-					// Schedule the stop pill to be sent a fixed amount of
-					// time after the result is returned. Do not do it
-					// immediately as other members can be very close
-					// to produce the result as well. This mechanism should
-					// be more sophisticated but since it is temporary, we
-					// can live with it for now.
-					go func() {
-						time.Sleep(1 * time.Minute)
-						if err := sendDkgStopPill(
-							loopCtx,
-							broadcastChannel,
-							seed.Text(16),
-							attempt.number,
-						); err != nil {
-							dkgLogger.Errorf(
-								"[member:%v] could not send the stop pill: [%v]",
-								memberIndex,
-								err,
-							)
-						}
-					}()
-
 					return result, nil
 				},
 			)
 			if err != nil {
+				if errors.Is(err, context.Canceled) {
+					dkgLogger.Infof(
+						"[member:%v] DKG is no longer awaiting the result; "+
+							"aborting DKG protocol execution",
+						memberIndex,
+					)
+					return
+				}
+
 				dkgLogger.Errorf(
-					"[member:%v] failed to execute dkg: [%v]",
+					"[member:%v] failed to execute DKG: [%v]",
 					memberIndex,
 					err,
 				)
 				return
 			}
-			// TODO: This condition should go away once we integrate
-			// WalletRegistry contract. In this scenario, member received
-			// a StopPill from some other group member and it means that
-			// the result has been produced but the current member did not
-			// participate in the work so they do not know the result.
-			if result == nil {
-				dkgLogger.Infof(
-					"[member:%v] dkg retry loop received stop signal",
-					memberIndex,
-				)
-				return
-			}
 
 			signer, err := de.registerSigner(
 				result,
 				memberIndex,
-				selectedSigningGroupOperators,
+				groupSelectionResult.OperatorsAddresses,
 			)
 			if err != nil {
 				dkgLogger.Errorf(
@@ -405,20 +393,33 @@ func (de *dkgExecutor) generateSigningGroup(
 
 			dkgLogger.Infof("registered %s", signer)
 
-			err = de.submitDkgResult(
+			err = de.publishDkgResult(
+				ctx,
 				dkgLogger,
 				seed,
 				memberIndex,
 				broadcastChannel,
 				membershipValidator,
 				result,
+				groupSelectionResult,
+				startBlock,
 			)
 			if err != nil {
+				if errors.Is(err, context.Canceled) {
+					dkgLogger.Infof(
+						"[member:%v] DKG is no longer awaiting the result; "+
+							"aborting DKG result publication",
+						memberIndex,
+					)
+					return
+				}
+
 				dkgLogger.Errorf(
-					"[member:%v] DKG result publication process failed [%v]",
+					"[member:%v] DKG result publication failed [%v]",
 					memberIndex,
 					err,
 				)
+				return
 			}
 		}()
 	}
@@ -433,7 +434,6 @@ func (de *dkgExecutor) registerSigner(
 	memberIndex group.MemberIndex,
 	selectedSigningGroupOperators chain.Addresses,
 ) (*signer, error) {
-	chainConfig := de.chain.GetConfig()
 	// Final signing group may differ from the original DKG
 	// group outputted by the sortition protocol. One need to
 	// determine the final signing group based on the selected
@@ -443,7 +443,7 @@ func (de *dkgExecutor) registerSigner(
 		finalSigningGroup(
 			selectedSigningGroupOperators,
 			operatingMemberIndexes,
-			chainConfig,
+			de.groupParameters,
 		)
 	if err != nil {
 		return nil, fmt.Errorf("failed to resolve final signing group members")
@@ -480,49 +480,236 @@ func (de *dkgExecutor) registerSigner(
 	return signer, nil
 }
 
-// submitDkgResult submits the DKG result to the chain.
-func (de *dkgExecutor) submitDkgResult(
+// publishDkgResult performs the DKG result publication process.
+func (de *dkgExecutor) publishDkgResult(
+	ctx context.Context,
 	dkgLogger log.StandardLogger,
 	seed *big.Int,
 	memberIndex group.MemberIndex,
 	broadcastChannel net.BroadcastChannel,
 	membershipValidator *group.MembershipValidator,
-	result *dkg.Result,
+	dkgResult *dkg.Result,
+	groupSelectionResult *GroupSelectionResult,
+	startBlock uint64,
 ) error {
-	// Set up the publication stop signal that should allow to
-	// perform all the result-signing-related actions and
-	// handle the worst case when the result is submitted by the
-	// last group member.
-	chainConfig := de.chain.GetConfig()
-	publicationTimeout := time.Duration(chainConfig.GroupSize) *
-		dkgResultSubmissionDelayStep
-	publicationCtx, cancelPublicationCtx := context.WithTimeout(
-		context.Background(),
-		publicationTimeout,
-	)
-	// TODO: Call cancelPublicationCtx() when the result is
-	//       available and published and remove this goroutine.
-	//       This goroutine is duplicating context.WithTimeout work
-	//       right now but is here to emphasize the need of manual
-	//       context cancellation.
-	go func() {
-		defer cancelPublicationCtx()
-		time.Sleep(publicationTimeout)
-	}()
-
 	return dkg.Publish(
-		publicationCtx,
+		ctx,
 		dkgLogger,
 		seed.Text(16),
 		memberIndex,
 		broadcastChannel,
 		membershipValidator,
-		newDkgResultSigner(de.chain),
-		newDkgResultSubmitter(dkgLogger, de.chain),
-		result,
+		newDkgResultSigner(de.chain, startBlock),
+		newDkgResultSubmitter(
+			dkgLogger,
+			de.chain,
+			de.groupParameters,
+			groupSelectionResult,
+			de.waitForBlockFn,
+		),
+		dkgResult,
 	)
 }
 
+// executeDkgValidation performs the submitted DKG result validation process.
+// If the result is not valid, this function submits an on-chain result
+// challenge. If the result is valid and the given node was involved in the DKG,
+// this function schedules an on-chain approve that is submitted once the
+// challenge period elapses.
+func (de *dkgExecutor) executeDkgValidation(
+	seed *big.Int,
+	submissionBlock uint64,
+	result *DKGChainResult,
+	resultHash [32]byte,
+) {
+	dkgLogger := logger.With(
+		zap.String("seed", fmt.Sprintf("0x%x", seed)),
+		zap.String("groupPublicKey", fmt.Sprintf("0x%x", result.GroupPublicKey)),
+		zap.String("resultHash", fmt.Sprintf("0x%x", resultHash)),
+	)
+
+	dkgLogger.Infof("starting DKG result validation")
+
+	isValid, err := de.chain.IsDKGResultValid(result)
+	if err != nil {
+		dkgLogger.Errorf("cannot validate DKG result: [%v]", err)
+		return
+	}
+
+	if !isValid {
+		dkgLogger.Infof("DKG result is invalid")
+
+		i := uint64(0)
+
+		// Challenges are done along with DKG state confirmations. This is
+		// needed to handle chain reorgs that may wipe out the block holding
+		// the challenge transaction. The state check done upon the confirmation
+		// block makes sure the submitted challenge changed the DKG state
+		// as expected. If the DKG state was not changed, the challenge is
+		// re-submitted.
+		for {
+			i++
+
+			err = de.chain.ChallengeDKGResult(result)
+			if err != nil {
+				dkgLogger.Errorf(
+					"cannot challenge invalid DKG result: [%v]",
+					err,
+				)
+				return
+			}
+
+			confirmationBlock := submissionBlock +
+				(i * dkgResultChallengeConfirmationBlocks)
+
+			dkgLogger.Infof(
+				"challenging invalid DKG result; waiting for "+
+					"block [%v] to confirm DKG state",
+				confirmationBlock,
+			)
+
+			err := de.waitForBlockFn(context.Background(), confirmationBlock)
+			if err != nil {
+				dkgLogger.Errorf(
+					"error while waiting for challenge confirmation: [%v]",
+					err,
+				)
+				return
+			}
+
+			state, err := de.chain.GetDKGState()
+			if err != nil {
+				dkgLogger.Errorf("cannot check DKG state: [%v]", err)
+				return
+			}
+
+			if state != Challenge {
+				dkgLogger.Infof(
+					"invalid DKG result challenged successfully",
+				)
+				return
+			}
+
+			dkgLogger.Infof(
+				"invalid DKG result still not challenged; retrying",
+			)
+		}
+	}
+
+	dkgLogger.Infof("DKG result is valid")
+
+	operatorID, err := de.operatorIDFn()
+	if err != nil {
+		dkgLogger.Errorf("cannot get node's operator ID: [%v]", err)
+		return
+	}
+
+	// Determine the member indexes controlled by this node's operator.
+	memberIndexes := make([]group.MemberIndex, 0)
+	for index, memberOperatorID := range result.Members {
+		if memberOperatorID == operatorID {
+			// The group member index should be in range [1, groupSize] so we
+			// need to add 1.
+			memberIndexes = append(memberIndexes, group.MemberIndex(index+1))
+		}
+	}
+
+	if len(memberIndexes) == 0 {
+		dkgLogger.Infof(
+			"not eligible for DKG result approval; my operator "+
+				"ID [%v] is not among DKG participants [%v]",
+			operatorID,
+			result.Members,
+		)
+		return
+	}
+
+	dkgLogger.Infof("scheduling DKG result approval")
+
+	parameters, err := de.chain.DKGParameters()
+	if err != nil {
+		dkgLogger.Errorf("cannot get current DKG parameters: [%v]", err)
+		return
+	}
+
+	// The challenge period starts at the result submission block and lasts
+	// for challengePeriodBlocks.
+	challengePeriodEndBlock := submissionBlock + parameters.ChallengePeriodBlocks
+	// The approval is possible one block after the challenge period end.
+	// The result submitter has precedence for approvePrecedencePeriodBlocks.
+	approvePrecedencePeriodStartBlock := challengePeriodEndBlock + 1
+	// Everyone else can approve once the precedence period ends.
+	approvePeriodStartBlock := approvePrecedencePeriodStartBlock +
+		parameters.ApprovePrecedencePeriodBlocks
+
+	for _, currentMemberIndex := range memberIndexes {
+		go func(memberIndex group.MemberIndex) {
+			var approveBlock uint64
+
+			if memberIndex == result.SubmitterMemberIndex {
+				// The submitter can approve earlier, during the precedence
+				// period.
+				approveBlock = approvePrecedencePeriodStartBlock
+			} else {
+				// Everyone else must approve after the precedence period ends.
+				// Each member preserves a delay according to their index
+				// to avoid simultaneous approval.
+				delayBlocks := uint64(memberIndex-1) * dkgResultApprovalDelayStepBlocks
+				approveBlock = approvePeriodStartBlock + delayBlocks
+			}
+
+			dkgLogger.Infof(
+				"[member:%v] waiting for block [%v] to approve DKG result",
+				memberIndex,
+				approveBlock,
+			)
+
+			ctx, cancelCtx := context.WithCancel(context.Background())
+			defer cancelCtx()
+
+			subscription := de.chain.OnDKGResultApproved(
+				func(event *DKGResultApprovedEvent) {
+					cancelCtx()
+				},
+			)
+			defer subscription.Unsubscribe()
+
+			err := de.waitForBlockFn(ctx, approveBlock)
+			if err != nil {
+				dkgLogger.Errorf(
+					"[member:%v] error while waiting for DKG result "+
+						"approve block: [%v]",
+					memberIndex,
+					err,
+				)
+				return
+			}
+
+			// If the context got cancelled that means the result was approved
+			// by someone else.
+			if ctx.Err() != nil {
+				dkgLogger.Infof(
+					"[member:%v] DKG result approved by someone else",
+					memberIndex,
+				)
+				return
+			}
+
+			err = de.chain.ApproveDKGResult(result)
+			if err != nil {
+				dkgLogger.Errorf(
+					"[member:%v] cannot approve DKG result: [%v]",
+					memberIndex,
+					err,
+				)
+				return
+			}
+
+			dkgLogger.Infof("[member:%v] approving DKG result", memberIndex)
+		}(currentMemberIndex)
+	}
+}
+
 // finalSigningGroup takes three parameters:
 //   - selectedOperators: Contains addresses of all selected operators. Slice
 //     length equals to the groupSize. Each element with index N corresponds
@@ -549,17 +736,20 @@ func (de *dkgExecutor) submitDkgResult(
 // operatingMembersIndexes: [5, 1, 3]
 // finalOperators: [0xAA, 0xCC, 0xEE]
 // finalMembersIndexes: [1:1, 3:2, 5:3]
+//
+// Please see docs of IdentityConverter from pkg/tecdsa/common for more
+// information about shifting indexes.
 func finalSigningGroup(
 	selectedOperators []chain.Address,
 	operatingMembersIndexes []group.MemberIndex,
-	chainConfig *ChainConfig,
+	groupParameters *GroupParameters,
 ) (
 	[]chain.Address,
 	map[group.MemberIndex]group.MemberIndex,
 	error,
 ) {
-	if len(selectedOperators) != chainConfig.GroupSize ||
-		len(operatingMembersIndexes) < chainConfig.GroupQuorum {
+	if len(selectedOperators) != groupParameters.GroupSize ||
+		len(operatingMembersIndexes) < groupParameters.GroupQuorum {
 		return nil, nil, fmt.Errorf("invalid input parameters")
 	}
 
diff --git a/pkg/tbtc/dkg_loop.go b/pkg/tbtc/dkg_loop.go
index 56d5af861b..bb8ce7392c 100644
--- a/pkg/tbtc/dkg_loop.go
+++ b/pkg/tbtc/dkg_loop.go
@@ -15,6 +15,32 @@ import (
 	"golang.org/x/exp/slices"
 )
 
+const (
+	// dkgAttemptAnnouncementDelayBlocks determines the duration of the
+	// announcement phase delay that is preserved before starting the
+	// announcement phase.
+	dkgAttemptAnnouncementDelayBlocks = 1
+	// dkgAttemptAnnouncementActiveBlocks determines the duration of the
+	// announcement phase that is performed at the beginning of each DKG
+	// attempt.
+	dkgAttemptAnnouncementActiveBlocks = 5
+	// dkgAttemptProtocolBlocks determines the maximum block duration of the
+	// actual protocol computations.
+	dkgAttemptMaximumProtocolBlocks = 150
+	// dkgAttemptCoolDownBlocks determines the duration of the cool down
+	// period that is preserved between subsequent DKG attempts.
+	dkgAttemptCoolDownBlocks = 5
+)
+
+// dkgAttemptMaximumBlocks returns the maximum block duration of a single
+// DKG attempt.
+func dkgAttemptMaximumBlocks() uint {
+	return dkgAttemptAnnouncementDelayBlocks +
+		dkgAttemptAnnouncementActiveBlocks +
+		dkgAttemptMaximumProtocolBlocks +
+		dkgAttemptCoolDownBlocks
+}
+
 // dkgAnnouncer represents a component responsible for exchanging readiness
 // announcements for the given DKG attempt for the given seed.
 type dkgAnnouncer interface {
@@ -36,11 +62,9 @@ type dkgRetryLoop struct {
 	memberIndex       group.MemberIndex
 	selectedOperators chain.Addresses
 
-	chainConfig *ChainConfig
+	groupParameters *GroupParameters
 
-	announcer                dkgAnnouncer
-	announcementDelayBlocks  uint64
-	announcementActiveBlocks uint64
+	announcer dkgAnnouncer
 
 	attemptCounter    uint
 	attemptStartBlock uint64
@@ -56,7 +80,7 @@ func newDkgRetryLoop(
 	initialStartBlock uint64,
 	memberIndex group.MemberIndex,
 	selectedOperators chain.Addresses,
-	chainConfig *ChainConfig,
+	groupParameters *GroupParameters,
 	announcer dkgAnnouncer,
 ) *dkgRetryLoop {
 	// Compute the 8-byte seed needed for the random retry algorithm. We take
@@ -67,18 +91,16 @@ func newDkgRetryLoop(
 	attemptSeed := int64(binary.BigEndian.Uint64(seedSha256[:8]))
 
 	return &dkgRetryLoop{
-		logger:                   logger,
-		seed:                     seed,
-		memberIndex:              memberIndex,
-		selectedOperators:        selectedOperators,
-		chainConfig:              chainConfig,
-		announcer:                announcer,
-		announcementDelayBlocks:  1,
-		announcementActiveBlocks: 5,
-		attemptCounter:           0,
-		attemptStartBlock:        initialStartBlock,
-		attemptSeed:              attemptSeed,
-		attemptDelayBlocks:       5,
+		logger:             logger,
+		seed:               seed,
+		memberIndex:        memberIndex,
+		selectedOperators:  selectedOperators,
+		groupParameters:    groupParameters,
+		announcer:          announcer,
+		attemptCounter:     0,
+		attemptStartBlock:  initialStartBlock,
+		attemptSeed:        attemptSeed,
+		attemptDelayBlocks: 5,
 	}
 }
 
@@ -86,6 +108,7 @@ func newDkgRetryLoop(
 type dkgAttemptParams struct {
 	number                 uint
 	startBlock             uint64
+	timeoutBlock           uint64
 	excludedMembersIndexes []group.MemberIndex
 }
 
@@ -110,23 +133,20 @@ func (drl *dkgRetryLoop) start(
 		//
 		// That said, we need to increment the previous attempt start
 		// block by the number of blocks equal to the protocol duration and
-		// by some additional delay blocks. We need a small fixed delay in
+		// by some additional delay blocks. We need a small cool down in
 		// order to mitigate all corner cases where the actual attempt duration
 		// was slightly longer than the expected duration determined by the
-		// dkg.ProtocolBlocks function.
+		// dkgAttemptMaximumProtocolBlocks constant.
 		//
 		// For example, the attempt may fail at the end of the protocol but the
 		// error is returned after some time and more blocks than expected are
 		// mined in the meantime.
 		if drl.attemptCounter > 1 {
 			drl.attemptStartBlock = drl.attemptStartBlock +
-				drl.announcementDelayBlocks +
-				drl.announcementActiveBlocks +
-				dkgAttemptMaxBlockDuration +
-				drl.attemptDelayBlocks
+				uint64(dkgAttemptMaximumBlocks())
 		}
 
-		announcementStartBlock := drl.attemptStartBlock + drl.announcementDelayBlocks
+		announcementStartBlock := drl.attemptStartBlock + dkgAttemptAnnouncementDelayBlocks
 		err := waitForBlockFn(ctx, announcementStartBlock)
 		if err != nil {
 			return nil, fmt.Errorf(
@@ -140,7 +160,7 @@ func (drl *dkgRetryLoop) start(
 
 		// Set up the announcement phase stop signal.
 		announceCtx, cancelAnnounceCtx := context.WithCancel(ctx)
-		announcementEndBlock := announcementStartBlock + drl.announcementActiveBlocks
+		announcementEndBlock := announcementStartBlock + dkgAttemptAnnouncementActiveBlocks
 		go func() {
 			defer cancelAnnounceCtx()
 
@@ -180,10 +200,10 @@ func (drl *dkgRetryLoop) start(
 
 		// Check the loop stop signal.
 		if ctx.Err() != nil {
-			return nil, nil
+			return nil, ctx.Err()
 		}
 
-		if len(readyMembersIndexes) >= drl.chainConfig.GroupQuorum {
+		if len(readyMembersIndexes) >= drl.groupParameters.GroupQuorum {
 			drl.logger.Infof(
 				"[member:%v] completed announcement phase for attempt [%v] "+
 					"with quorum of [%v] members ready to perform DKG",
@@ -219,6 +239,8 @@ func (drl *dkgRetryLoop) start(
 			drl.memberIndex,
 		)
 
+		timeoutBlock := announcementEndBlock + dkgAttemptMaximumProtocolBlocks
+
 		var result *dkg.Result
 		var attemptErr error
 
@@ -226,6 +248,7 @@ func (drl *dkgRetryLoop) start(
 			result, attemptErr = dkgAttemptFn(&dkgAttemptParams{
 				number:                 drl.attemptCounter,
 				startBlock:             announcementEndBlock,
+				timeoutBlock:           timeoutBlock,
 				excludedMembersIndexes: excludedMembersIndexes,
 			})
 		} else {
@@ -322,7 +345,7 @@ func (drl *dkgRetryLoop) qualifiedOperatorsSet(
 		readyOperators,
 		drl.attemptSeed,
 		retryCount,
-		uint(drl.chainConfig.GroupQuorum),
+		uint(drl.groupParameters.GroupQuorum),
 	)
 	if err != nil {
 		return nil, fmt.Errorf(
diff --git a/pkg/tbtc/dkg_loop_test.go b/pkg/tbtc/dkg_loop_test.go
index 9b6599c705..ff6866e350 100644
--- a/pkg/tbtc/dkg_loop_test.go
+++ b/pkg/tbtc/dkg_loop_test.go
@@ -19,7 +19,7 @@ import (
 func TestDkgRetryLoop(t *testing.T) {
 	seed := big.NewInt(100)
 
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       10,
 		GroupQuorum:     8,
 		HonestThreshold: 6,
@@ -80,6 +80,7 @@ func TestDkgRetryLoop(t *testing.T) {
 			expectedLastAttempt: &dkgAttemptParams{
 				number:                 1,
 				startBlock:             206,
+				timeoutBlock:           356, // start block + 150
 				excludedMembersIndexes: []group.MemberIndex{},
 			},
 		},
@@ -103,6 +104,7 @@ func TestDkgRetryLoop(t *testing.T) {
 			expectedLastAttempt: &dkgAttemptParams{
 				number:                 1,
 				startBlock:             206,
+				timeoutBlock:           356, // start block + 150
 				excludedMembersIndexes: []group.MemberIndex{9, 10},
 			},
 		},
@@ -129,6 +131,7 @@ func TestDkgRetryLoop(t *testing.T) {
 			expectedLastAttempt: &dkgAttemptParams{
 				number:                 2,
 				startBlock:             367, // 206 + 1 * (6 + 150 + 5)
+				timeoutBlock:           517, // start block + 150
 				excludedMembersIndexes: []group.MemberIndex{2, 5},
 			},
 		},
@@ -153,6 +156,7 @@ func TestDkgRetryLoop(t *testing.T) {
 			expectedLastAttempt: &dkgAttemptParams{
 				number:                 2,
 				startBlock:             367, // 206 + 1 * (6 + 150 + 5)
+				timeoutBlock:           517, // start block + 150
 				excludedMembersIndexes: []group.MemberIndex{2, 5},
 			},
 		},
@@ -180,6 +184,7 @@ func TestDkgRetryLoop(t *testing.T) {
 			expectedLastAttempt: &dkgAttemptParams{
 				number:                 2,
 				startBlock:             367, // 206 + 1 * (6 + 150 + 5)
+				timeoutBlock:           517, // start block + 150
 				excludedMembersIndexes: []group.MemberIndex{2, 5},
 			},
 		},
@@ -207,6 +212,7 @@ func TestDkgRetryLoop(t *testing.T) {
 			expectedLastAttempt: &dkgAttemptParams{
 				number:                 3,
 				startBlock:             528, // 206 + 2 * (6 + 150 + 5)
+				timeoutBlock:           678, // start block + 150
 				excludedMembersIndexes: []group.MemberIndex{9},
 			},
 		},
@@ -224,7 +230,7 @@ func TestDkgRetryLoop(t *testing.T) {
 			dkgAttemptFn: func(attempt *dkgAttemptParams) (*dkg.Result, error) {
 				return nil, fmt.Errorf("invalid data")
 			},
-			expectedErr:         nil,
+			expectedErr:         context.Canceled,
 			expectedResult:      nil,
 			expectedLastAttempt: nil,
 		},
@@ -243,7 +249,7 @@ func TestDkgRetryLoop(t *testing.T) {
 				200,
 				test.memberIndex,
 				selectedOperators,
-				chainConfig,
+				groupParameters,
 				announcer,
 			)
 
diff --git a/pkg/tbtc/dkg_submit.go b/pkg/tbtc/dkg_submit.go
index c77f7a9ef7..12aff54587 100644
--- a/pkg/tbtc/dkg_submit.go
+++ b/pkg/tbtc/dkg_submit.go
@@ -3,7 +3,6 @@ package tbtc
 import (
 	"context"
 	"fmt"
-	"time"
 
 	"github.com/ipfs/go-log/v2"
 	"github.com/keep-network/keep-core/pkg/protocol/group"
@@ -13,19 +12,34 @@ import (
 // dkgResultSigner is responsible for signing the DKG result and verification of
 // signatures generated by other group members.
 type dkgResultSigner struct {
-	chain Chain
+	chain         Chain
+	dkgStartBlock uint64
 }
 
-func newDkgResultSigner(chain Chain) *dkgResultSigner {
+func newDkgResultSigner(chain Chain, dkgStartBlock uint64) *dkgResultSigner {
 	return &dkgResultSigner{
-		chain: chain,
+		chain:         chain,
+		dkgStartBlock: dkgStartBlock,
 	}
 }
 
 // SignResult signs the provided DKG result. It returns the information
 // pertaining to the signing process: public key, signature, result hash.
 func (drs *dkgResultSigner) SignResult(result *dkg.Result) (*dkg.SignedResult, error) {
-	resultHash, err := drs.chain.CalculateDKGResultHash(result)
+	if result == nil {
+		return nil, fmt.Errorf("result is nil")
+	}
+
+	groupPublicKey, err := result.GroupPublicKey()
+	if err != nil {
+		return nil, fmt.Errorf("cannot get group public key: [%v]", err)
+	}
+
+	resultHash, err := drs.chain.CalculateDKGResultSignatureHash(
+		groupPublicKey,
+		result.MisbehavedMembersIndexes(),
+		drs.dkgStartBlock,
+	)
 	if err != nil {
 		return nil, fmt.Errorf(
 			"dkg result hash calculation failed [%w]",
@@ -63,48 +77,49 @@ func (drs *dkgResultSigner) VerifySignature(signedResult *dkg.SignedResult) (boo
 // dkgResultSubmitter is responsible for submitting the DKG result to the chain.
 type dkgResultSubmitter struct {
 	dkgLogger log.StandardLogger
-	chain     Chain
+
+	chain                Chain
+	groupParameters      *GroupParameters
+	groupSelectionResult *GroupSelectionResult
+
+	waitForBlockFn waitForBlockFn
 }
 
 func newDkgResultSubmitter(
 	dkgLogger log.StandardLogger,
 	chain Chain,
+	groupParameters *GroupParameters,
+	groupSelectionResult *GroupSelectionResult,
+	waitForBlockFn waitForBlockFn,
 ) *dkgResultSubmitter {
 	return &dkgResultSubmitter{
-		dkgLogger: dkgLogger,
-		chain:     chain,
+		dkgLogger:            dkgLogger,
+		chain:                chain,
+		groupSelectionResult: groupSelectionResult,
+		groupParameters:      groupParameters,
+		waitForBlockFn:       waitForBlockFn,
 	}
 }
 
 // SubmitResult submits the DKG result along with submitting signatures to the
 // chain. In the process, it checks if the number of signatures is above
 // the required threshold, whether the result was already submitted and waits
-// until the member is eligible for DKG result submission.
+// until the member is eligible for DKG result submission or the given context
+// is done, whichever comes first.
 func (drs *dkgResultSubmitter) SubmitResult(
 	ctx context.Context,
 	memberIndex group.MemberIndex,
 	result *dkg.Result,
 	signatures map[group.MemberIndex][]byte,
 ) error {
-	config := drs.chain.GetConfig()
-
-	if len(signatures) < config.GroupQuorum {
+	if len(signatures) < drs.groupParameters.GroupQuorum {
 		return fmt.Errorf(
 			"could not submit result with [%v] signatures for group quorum [%v]",
 			len(signatures),
-			config.GroupQuorum,
+			drs.groupParameters.GroupQuorum,
 		)
 	}
 
-	resultSubmittedChan := make(chan uint64)
-
-	subscription := drs.chain.OnDKGResultSubmitted(
-		func(event *DKGResultSubmittedEvent) {
-			resultSubmittedChan <- event.BlockNumber
-		},
-	)
-	defer subscription.Unsubscribe()
-
 	dkgState, err := drs.chain.GetDKGState()
 	if err != nil {
 		return fmt.Errorf("could not check DKG state: [%w]", err)
@@ -114,66 +129,77 @@ func (drs *dkgResultSubmitter) SubmitResult(
 		// Someone who was ahead of us in the queue submitted the result. Giving up.
 		drs.dkgLogger.Infof(
 			"[member:%v] DKG is no longer awaiting the result; "+
-				"aborting DKG result submission",
+				"aborting DKG result on-chain submission",
 			memberIndex,
 		)
 		return nil
 	}
 
-	submissionDelay := time.Duration(memberIndex-1) * dkgResultSubmissionDelayStep
+	blockCounter, err := drs.chain.BlockCounter()
+	if err != nil {
+		return err
+	}
+
+	// We can't determine a common block at which the publication starts.
+	// However, all we want here is to ensure the members does not submit
+	// in the same time. This can be achieved by simply using the index-based
+	// delay starting from the current block.
+	currentBlock, err := blockCounter.CurrentBlock()
+	if err != nil {
+		return fmt.Errorf("cannot get current block: [%v]", err)
+	}
+	delayBlocks := uint64(memberIndex-1) * dkgResultSubmissionDelayStepBlocks
+	submissionBlock := currentBlock + delayBlocks
 
 	drs.dkgLogger.Infof(
-		"[member:%v] waiting [%v] to submit",
+		"[member:%v] waiting for block [%v] to submit DKG result",
 		memberIndex,
-		submissionDelay,
+		submissionBlock,
 	)
 
-	submissionTimer := time.NewTimer(submissionDelay)
-	defer submissionTimer.Stop()
-
-	for {
-		select {
-		case <-submissionTimer.C:
-			// Member becomes eligible to submit the result. Result submission
-			// would trigger the sender side of the result submission event
-			// listener but also cause the receiver side (this select)
-			// termination that will result with a dangling goroutine blocked
-			// forever on the `onSubmittedResultChan` channel. This would
-			// cause a resource leak. In order to avoid that, we should
-			// unsubscribe from the result submission event listener before
-			// submitting the result.
-			subscription.Unsubscribe()
-
-			publicKeyBytes, err := result.GroupPublicKeyBytes()
-			if err != nil {
-				return fmt.Errorf("cannot get public key bytes [%w]", err)
-			}
-
-			drs.dkgLogger.Infof(
-				"[member:%v] submitting DKG result with public key [0x%x] and "+
-					"[%v] supporting member signatures",
-				memberIndex,
-				publicKeyBytes,
-				len(signatures),
-			)
-
-			return drs.chain.SubmitDKGResult(
-				memberIndex,
-				result,
-				signatures,
-			)
-		case blockNumber := <-resultSubmittedChan:
-			drs.dkgLogger.Infof(
-				"[member:%v] leaving; DKG result submitted by other member "+
-					"at block [%v]",
-				memberIndex,
-				blockNumber,
-			)
-			// A result has been submitted by other member. Leave without
-			// publishing the result.
-			return nil
-		case <-ctx.Done():
-			return fmt.Errorf("result publication timed out")
-		}
+	err = drs.waitForBlockFn(ctx, submissionBlock)
+	if err != nil {
+		return fmt.Errorf(
+			"error while waiting for DKG result submission block: [%v]",
+			err,
+		)
 	}
+
+	if ctx.Err() != nil {
+		// The context was cancelled by the upstream. Regardless of the cause,
+		// that means the DKG is no longer awaiting the result, and we can
+		// safely return.
+		drs.dkgLogger.Infof(
+			"[member:%v] DKG is no longer awaiting the result; "+
+				"aborting DKG result on-chain submission",
+			memberIndex,
+		)
+		return nil
+	}
+
+	drs.dkgLogger.Infof(
+		"[member:%v] submitting DKG result with [%v] supporting "+
+			"member signatures",
+		memberIndex,
+		len(signatures),
+	)
+
+	groupPublicKey, err := result.GroupPublicKey()
+	if err != nil {
+		return fmt.Errorf("cannot get group public key [%w]", err)
+	}
+
+	dkgResult, err := drs.chain.AssembleDKGResult(
+		memberIndex,
+		groupPublicKey,
+		result.Group.OperatingMemberIndexes(),
+		result.MisbehavedMembersIndexes(),
+		signatures,
+		drs.groupSelectionResult,
+	)
+	if err != nil {
+		return fmt.Errorf("cannot assemble DKG chain result [%w]", err)
+	}
+
+	return drs.chain.SubmitDKGResult(dkgResult)
 }
diff --git a/pkg/tbtc/dkg_submit_test.go b/pkg/tbtc/dkg_submit_test.go
index e9d1c27285..9d4c124e8c 100644
--- a/pkg/tbtc/dkg_submit_test.go
+++ b/pkg/tbtc/dkg_submit_test.go
@@ -7,6 +7,8 @@ import (
 	"testing"
 	"time"
 
+	"github.com/keep-network/keep-core/pkg/chain"
+
 	"github.com/keep-network/keep-core/pkg/internal/tecdsatest"
 	"github.com/keep-network/keep-core/pkg/internal/testutils"
 	"github.com/keep-network/keep-core/pkg/protocol/group"
@@ -16,15 +18,16 @@ import (
 )
 
 func TestSignResult_SigningSuccessful(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSigner := newDkgResultSigner(chain)
+	chain := Connect()
+	dkgStartBlock := uint64(2000)
+	dkgResultSigner := newDkgResultSigner(chain, dkgStartBlock)
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
 		t.Fatalf("failed to load test data: [%v]", err)
 	}
 	result := &dkg.Result{
-		Group:           group.NewGroup(32, 64),
+		Group:           group.NewGroup(2, 5),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
 
@@ -47,8 +50,19 @@ func TestSignResult_SigningSuccessful(t *testing.T) {
 		)
 	}
 
-	expectedDKGResultHash := dkg.ResultHash(
-		sha3.Sum256([]byte(fmt.Sprint(result))),
+	groupPublicKey, err := result.GroupPublicKey()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedDKGResultHash := dkg.ResultSignatureHash(
+		sha3.Sum256(
+			[]byte(fmt.Sprint(
+				groupPublicKey,
+				result.MisbehavedMembersIndexes(),
+				dkgStartBlock,
+			)),
+		),
 	)
 	if expectedDKGResultHash != signedResult.ResultHash {
 		t.Errorf(
@@ -81,16 +95,14 @@ func TestSignResult_SigningSuccessful(t *testing.T) {
 }
 
 func TestSignResult_ErrorDuringDkgResultHashCalculation(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSigner := newDkgResultSigner(chain)
+	chain := Connect()
+	dkgStartBlock := uint64(2000)
+	dkgResultSigner := newDkgResultSigner(chain, dkgStartBlock)
 
 	// Use nil as the DKG result to cause hash calculation error
 	_, err := dkgResultSigner.SignResult(nil)
 
-	expectedError := fmt.Errorf(
-		"dkg result hash calculation failed [%w]",
-		errNilDKGResult,
-	)
+	expectedError := fmt.Errorf("result is nil")
 	if !reflect.DeepEqual(expectedError, err) {
 		t.Errorf(
 			"unexpected error\nexpected: %v\nactual:   %v\n",
@@ -101,15 +113,16 @@ func TestSignResult_ErrorDuringDkgResultHashCalculation(t *testing.T) {
 }
 
 func TestVerifySignature_VerificationSuccessful(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSigner := newDkgResultSigner(chain)
+	chain := Connect()
+	dkgStartBlock := uint64(2000)
+	dkgResultSigner := newDkgResultSigner(chain, dkgStartBlock)
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
 		t.Fatalf("failed to load test data: [%v]", err)
 	}
 	result := &dkg.Result{
-		Group:           group.NewGroup(32, 64),
+		Group:           group.NewGroup(2, 5),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
 
@@ -132,8 +145,9 @@ func TestVerifySignature_VerificationSuccessful(t *testing.T) {
 }
 
 func TestVerifySignature_VerificationFailure(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSigner := newDkgResultSigner(chain)
+	chain := Connect()
+	dkgStartBlock := uint64(2000)
+	dkgResultSigner := newDkgResultSigner(chain, dkgStartBlock)
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
@@ -141,7 +155,7 @@ func TestVerifySignature_VerificationFailure(t *testing.T) {
 	}
 
 	result := &dkg.Result{
-		Group:           group.NewGroup(32, 64),
+		Group:           group.NewGroup(2, 5),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
 	signedResult, err := dkgResultSigner.SignResult(result)
@@ -150,9 +164,10 @@ func TestVerifySignature_VerificationFailure(t *testing.T) {
 	}
 
 	anotherResult := &dkg.Result{
-		Group:           group.NewGroup(30, 64),
+		Group:           group.NewGroup(2, 5),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
+	anotherResult.Group.MarkMemberAsInactive(3)
 	anotherSignedResult, err := dkgResultSigner.SignResult(anotherResult)
 	if err != nil {
 		t.Fatal(err)
@@ -168,16 +183,17 @@ func TestVerifySignature_VerificationFailure(t *testing.T) {
 	}
 
 	if verificationSuccessful {
-		t.Fatal(
-			"Expected unsuccessful verification of signature, but it was " +
-				"successful",
+		t.Errorf(
+			"expected unsuccessful verification of signature, " +
+				"but it was successful",
 		)
 	}
 }
 
 func TestVerifySignature_VerificationError(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSigner := newDkgResultSigner(chain)
+	chain := Connect()
+	dkgStartBlock := uint64(2000)
+	dkgResultSigner := newDkgResultSigner(chain, dkgStartBlock)
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
@@ -185,7 +201,7 @@ func TestVerifySignature_VerificationError(t *testing.T) {
 	}
 
 	result := &dkg.Result{
-		Group:           group.NewGroup(32, 64),
+		Group:           group.NewGroup(2, 5),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
 	signedResult, err := dkgResultSigner.SignResult(result)
@@ -214,15 +230,56 @@ func TestVerifySignature_VerificationError(t *testing.T) {
 }
 
 func TestSubmitResult_MemberSubmitsResult(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSubmitter := newDkgResultSubmitter(&testutils.MockLogger{}, chain)
+	groupParameters := &GroupParameters{
+		GroupSize:       5,
+		GroupQuorum:     4,
+		HonestThreshold: 3,
+	}
+
+	localChain := Connect()
+
+	err := localChain.startDKG()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorAddress, err := localChain.operatorAddress()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorID, err := localChain.GetOperatorID(operatorAddress)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var operatorsIDs chain.OperatorIDs
+	var operatorsAddresses chain.Addresses
+
+	for memberIndex := uint8(1); int(memberIndex) <= groupParameters.GroupSize; memberIndex++ {
+		operatorsIDs = append(operatorsIDs, operatorID)
+		operatorsAddresses = append(operatorsAddresses, operatorAddress)
+	}
+
+	groupSelectionResult := &GroupSelectionResult{
+		OperatorsIDs:       operatorsIDs,
+		OperatorsAddresses: operatorsAddresses,
+	}
+
+	dkgResultSubmitter := newDkgResultSubmitter(
+		&testutils.MockLogger{},
+		localChain,
+		groupParameters,
+		groupSelectionResult,
+		testWaitForBlockFn(localChain),
+	)
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
 		t.Fatalf("failed to load test data: [%v]", err)
 	}
 	result := &dkg.Result{
-		Group:           group.NewGroup(32, 64),
+		Group:           group.NewGroup(groupParameters.DishonestThreshold(), groupParameters.GroupSize),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
 
@@ -247,30 +304,71 @@ func TestSubmitResult_MemberSubmitsResult(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	expectedActiveWallet, err := result.GroupPublicKeyBytes()
+	expectedGroupPublicKey, err := result.GroupPublicKeyBytes()
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if !reflect.DeepEqual(expectedActiveWallet, chain.activeWallet) {
+	if !reflect.DeepEqual(expectedGroupPublicKey, localChain.dkgResult.GroupPublicKey) {
 		t.Errorf(
-			"unexpected active wallet bytes \nexpected: [0x%x]\nactual:   [0x%x]\n",
-			expectedActiveWallet,
-			chain.activeWallet,
+			"unexpected group public key \nexpected: [0x%x]\nactual:   [0x%x]\n",
+			expectedGroupPublicKey,
+			localChain.dkgResult.GroupPublicKey,
 		)
 	}
 }
 
-func TestSubmitResult_MemberDoesNotSubmitsResult(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSubmitter := newDkgResultSubmitter(&testutils.MockLogger{}, chain)
+func TestSubmitResult_AnotherMemberSubmitsResult(t *testing.T) {
+	groupParameters := &GroupParameters{
+		GroupSize:       5,
+		GroupQuorum:     4,
+		HonestThreshold: 3,
+	}
+
+	localChain := Connect()
+
+	err := localChain.startDKG()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorAddress, err := localChain.operatorAddress()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorID, err := localChain.GetOperatorID(operatorAddress)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var operatorsIDs chain.OperatorIDs
+	var operatorsAddresses chain.Addresses
+
+	for memberIndex := uint8(1); int(memberIndex) <= groupParameters.GroupSize; memberIndex++ {
+		operatorsIDs = append(operatorsIDs, operatorID)
+		operatorsAddresses = append(operatorsAddresses, operatorAddress)
+	}
+
+	groupSelectionResult := &GroupSelectionResult{
+		OperatorsIDs:       operatorsIDs,
+		OperatorsAddresses: operatorsAddresses,
+	}
+
+	dkgResultSubmitter := newDkgResultSubmitter(
+		&testutils.MockLogger{},
+		localChain,
+		groupParameters,
+		groupSelectionResult,
+		testWaitForBlockFn(localChain),
+	)
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
 		t.Fatalf("failed to load test data: [%v]", err)
 	}
 	result := &dkg.Result{
-		Group:           group.NewGroup(32, 64),
+		Group:           group.NewGroup(groupParameters.DishonestThreshold(), groupParameters.GroupSize),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
 	signatures := map[group.MemberIndex][]byte{
@@ -283,6 +381,13 @@ func TestSubmitResult_MemberDoesNotSubmitsResult(t *testing.T) {
 	ctx, cancelCtx := context.WithCancel(context.Background())
 	defer cancelCtx()
 
+	// Set up a global listener that will cancel the common context upon result
+	// submission. That mimics the real-world scenario.
+	localChain.OnDKGResultSubmitted(
+		func(event *DKGResultSubmittedEvent) {
+			cancelCtx()
+		})
+
 	secondMemberSubmissionChannel := make(chan error)
 
 	// Attempt to submit result for the second member on a separate goroutine.
@@ -317,42 +422,161 @@ func TestSubmitResult_MemberDoesNotSubmitsResult(t *testing.T) {
 
 	// Check that the second member returned without errors
 	secondMemberErr := <-secondMemberSubmissionChannel
-	if err != nil {
+	if secondMemberErr != nil {
 		t.Fatal(secondMemberErr)
 	}
 
-	if chain.resultSubmitterIndex != firstMemberIndex {
+	if localChain.dkgResult.SubmitterMemberIndex != firstMemberIndex {
 		t.Errorf(
 			"unexpected result submitter index \nexpected: %v\nactual:   %v\n",
 			firstMemberIndex,
-			chain.resultSubmitterIndex,
+			localChain.dkgResult.SubmitterMemberIndex,
 		)
 	}
 
-	expectedActiveWallet, err := result.GroupPublicKeyBytes()
+	expectedGroupPublicKey, err := result.GroupPublicKeyBytes()
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if !reflect.DeepEqual(expectedActiveWallet, chain.activeWallet) {
+	if !reflect.DeepEqual(expectedGroupPublicKey, localChain.dkgResult.GroupPublicKey) {
 		t.Errorf(
-			"unexpected active wallet bytes \nexpected: [0x%x]\nactual:   [0x%x]\n",
-			expectedActiveWallet,
-			chain.activeWallet,
+			"unexpected group public key \nexpected: [0x%x]\nactual:   [0x%x]\n",
+			expectedGroupPublicKey,
+			localChain.dkgResult.GroupPublicKey,
 		)
 	}
 }
 
+func TestSubmitResult_ContextCancelled(t *testing.T) {
+	groupParameters := &GroupParameters{
+		GroupSize:       5,
+		GroupQuorum:     4,
+		HonestThreshold: 3,
+	}
+
+	localChain := Connect()
+
+	err := localChain.startDKG()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorAddress, err := localChain.operatorAddress()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorID, err := localChain.GetOperatorID(operatorAddress)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var operatorsIDs chain.OperatorIDs
+	var operatorsAddresses chain.Addresses
+
+	for memberIndex := uint8(1); int(memberIndex) <= groupParameters.GroupSize; memberIndex++ {
+		operatorsIDs = append(operatorsIDs, operatorID)
+		operatorsAddresses = append(operatorsAddresses, operatorAddress)
+	}
+
+	groupSelectionResult := &GroupSelectionResult{
+		OperatorsIDs:       operatorsIDs,
+		OperatorsAddresses: operatorsAddresses,
+	}
+
+	dkgResultSubmitter := newDkgResultSubmitter(
+		&testutils.MockLogger{},
+		localChain,
+		groupParameters,
+		groupSelectionResult,
+		testWaitForBlockFn(localChain),
+	)
+
+	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
+	if err != nil {
+		t.Fatalf("failed to load test data: [%v]", err)
+	}
+	result := &dkg.Result{
+		Group:           group.NewGroup(groupParameters.DishonestThreshold(), groupParameters.GroupSize),
+		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
+	}
+
+	memberIndex := group.MemberIndex(1)
+	signatures := map[group.MemberIndex][]byte{
+		1: []byte("signature 1"),
+		2: []byte("signature 2"),
+		3: []byte("signature 3"),
+		4: []byte("signature 4"),
+	}
+
+	ctx, cancelCtx := context.WithCancel(context.Background())
+
+	// Simulate the case when timeout occurs and the context gets cancelled.
+	cancelCtx()
+
+	err = dkgResultSubmitter.SubmitResult(
+		ctx,
+		memberIndex,
+		result,
+		signatures,
+	)
+	if err != nil {
+		t.Errorf("unexpected error [%v]", err)
+	}
+}
+
 func TestSubmitResult_TooFewSignatures(t *testing.T) {
-	chain := Connect(5, 4, 3)
-	dkgResultSubmitter := newDkgResultSubmitter(&testutils.MockLogger{}, chain)
+	groupParameters := &GroupParameters{
+		GroupSize:       5,
+		GroupQuorum:     4,
+		HonestThreshold: 3,
+	}
+
+	localChain := Connect()
+
+	err := localChain.startDKG()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorAddress, err := localChain.operatorAddress()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	operatorID, err := localChain.GetOperatorID(operatorAddress)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var operatorsIDs chain.OperatorIDs
+	var operatorsAddresses chain.Addresses
+
+	for memberIndex := uint8(1); int(memberIndex) <= groupParameters.GroupSize; memberIndex++ {
+		operatorsIDs = append(operatorsIDs, operatorID)
+		operatorsAddresses = append(operatorsAddresses, operatorAddress)
+	}
+
+	groupSelectionResult := &GroupSelectionResult{
+		OperatorsIDs:       operatorsIDs,
+		OperatorsAddresses: operatorsAddresses,
+	}
+
+	dkgResultSubmitter := newDkgResultSubmitter(
+		&testutils.MockLogger{},
+		localChain,
+		groupParameters,
+		groupSelectionResult,
+		testWaitForBlockFn(localChain),
+	)
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
 		t.Fatalf("failed to load test data: [%v]", err)
 	}
 	result := &dkg.Result{
-		Group:           group.NewGroup(32, 64),
+		Group:           group.NewGroup(groupParameters.DishonestThreshold(), groupParameters.GroupSize),
 		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
 	}
 
diff --git a/pkg/tbtc/dkg_test.go b/pkg/tbtc/dkg_test.go
index 7927874a9a..237059df8c 100644
--- a/pkg/tbtc/dkg_test.go
+++ b/pkg/tbtc/dkg_test.go
@@ -1,9 +1,13 @@
 package tbtc
 
 import (
+	"context"
 	"fmt"
 	"reflect"
 	"testing"
+	"time"
+
+	"golang.org/x/exp/slices"
 
 	"github.com/keep-network/keep-core/pkg/chain"
 	"github.com/keep-network/keep-core/pkg/internal/tecdsatest"
@@ -11,22 +15,22 @@ import (
 	"github.com/keep-network/keep-core/pkg/protocol/group"
 	"github.com/keep-network/keep-core/pkg/tecdsa"
 	"github.com/keep-network/keep-core/pkg/tecdsa/dkg"
+	"golang.org/x/crypto/sha3"
 )
 
-func TestRegisterSigner(t *testing.T) {
+func TestDkgExecutor_RegisterSigner(t *testing.T) {
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
 	if err != nil {
 		t.Fatalf("failed to load test data: [%v]", err)
 	}
 
-	const (
-		groupSize          = 5
-		groupQuorum        = 3
-		honestThreshold    = 2
-		dishonestThreshold = 3
-	)
+	groupParameters := &GroupParameters{
+		GroupSize:       5,
+		GroupQuorum:     3,
+		HonestThreshold: 2,
+	}
 
-	localChain := Connect(groupSize, groupQuorum, honestThreshold)
+	localChain := Connect()
 
 	selectedOperators := []chain.Address{
 		"0xAA",
@@ -88,11 +92,12 @@ func TestRegisterSigner(t *testing.T) {
 
 			dkgExecutor := &dkgExecutor{
 				// setting only the fields really needed for this test
-				chain:          localChain,
-				walletRegistry: walletRegistry,
+				groupParameters: groupParameters,
+				chain:           localChain,
+				walletRegistry:  walletRegistry,
 			}
 
-			group := group.NewGroup(dishonestThreshold, groupSize)
+			group := group.NewGroup(groupParameters.DishonestThreshold(), groupParameters.GroupSize)
 			for _, disqualifiedMember := range test.disqualifiedMemberIndexes {
 				group.MarkMemberAsDisqualified(disqualifiedMember)
 			}
@@ -160,8 +165,222 @@ func TestRegisterSigner(t *testing.T) {
 	}
 }
 
+func TestDkgExecutor_ExecuteDkgValidation(t *testing.T) {
+	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(1)
+	if err != nil {
+		t.Fatalf("failed to load test data: [%v]", err)
+	}
+
+	groupParameters := &GroupParameters{
+		GroupSize:       5,
+		GroupQuorum:     3,
+		HonestThreshold: 2,
+	}
+
+	tecdsaDkgResult := &dkg.Result{
+		Group:           group.NewGroup(groupParameters.DishonestThreshold(), groupParameters.GroupSize),
+		PrivateKeyShare: tecdsa.NewPrivateKeyShare(testData[0]),
+	}
+
+	groupPublicKey, err := tecdsaDkgResult.GroupPublicKeyBytes()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var tests = map[string]struct {
+		submitterMemberIndex     group.MemberIndex
+		resultValid              bool
+		rejectedApprovalsIndexes []int
+		expectedEvent            interface{}
+		expectedDkgState         DKGState
+	}{
+		"result approved by the submitter": {
+			submitterMemberIndex: group.MemberIndex(1),
+			resultValid:          true,
+			expectedEvent: &DKGResultApprovedEvent{
+				ResultHash: sha3.Sum256(groupPublicKey),
+				Approver:   "",
+				// 16 is the next block after 15 blocks of the challenge period
+				BlockNumber: 16,
+			},
+			expectedDkgState: Idle,
+		},
+		"result approved by a non-submitter": {
+			submitterMemberIndex: group.MemberIndex(1),
+			resultValid:          true,
+			// Reject the first approval (with index 0) that will be made by
+			// member 1 (the submitter) in order to force the member 2 to
+			// approve after the precedence period.
+			rejectedApprovalsIndexes: []int{0},
+			expectedEvent: &DKGResultApprovedEvent{
+				ResultHash: sha3.Sum256(groupPublicKey),
+				Approver:   "",
+				// 36 is the next block after 15 blocks of the challenge period,
+				// 5 blocks of the precedence period, and 15 blocks of the delay
+				// for member 2
+				BlockNumber: 36,
+			},
+			expectedDkgState: Idle,
+		},
+		"result challenged": {
+			submitterMemberIndex: group.MemberIndex(1),
+			resultValid:          false,
+			expectedEvent: &DKGResultChallengedEvent{
+				ResultHash:  sha3.Sum256(groupPublicKey),
+				Challenger:  "",
+				Reason:      "",
+				BlockNumber: 0, // challenge is submitted immediately
+			},
+			expectedDkgState: AwaitingResult,
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			localChain := Connect()
+
+			approvalIndex := 0
+			localChain.dkgResultApprovalGuard = func() bool {
+				rejectedApproval := slices.Contains(
+					test.rejectedApprovalsIndexes,
+					approvalIndex,
+				)
+				approvalIndex++
+				return !rejectedApproval
+			}
+
+			operatorAddress, err := localChain.operatorAddress()
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			operatorID, err := localChain.GetOperatorID(operatorAddress)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			signatures := make(map[group.MemberIndex][]byte)
+			operatorsIDs := make(chain.OperatorIDs, groupParameters.GroupSize)
+			operatorsAddresses := make(chain.Addresses, groupParameters.GroupSize)
+
+			for memberIndex := uint8(1); int(memberIndex) <= groupParameters.GroupSize; memberIndex++ {
+				signatures[memberIndex] = []byte{memberIndex}
+				operatorsIDs[memberIndex-1] = operatorID
+				operatorsAddresses[memberIndex-1] = operatorAddress
+			}
+
+			groupSelectionResult := &GroupSelectionResult{
+				OperatorsIDs:       operatorsIDs,
+				OperatorsAddresses: operatorsAddresses,
+			}
+
+			dkgResultSubmittedEventChan := make(chan *DKGResultSubmittedEvent, 1)
+			_ = localChain.OnDKGResultSubmitted(
+				func(event *DKGResultSubmittedEvent) {
+					dkgResultSubmittedEventChan <- event
+				},
+			)
+
+			err = localChain.startDKG()
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			groupPublicKey, err := tecdsaDkgResult.GroupPublicKey()
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			dkgResult, err := localChain.AssembleDKGResult(
+				test.submitterMemberIndex,
+				groupPublicKey,
+				tecdsaDkgResult.Group.OperatingMemberIndexes(),
+				tecdsaDkgResult.MisbehavedMembersIndexes(),
+				signatures,
+				groupSelectionResult,
+			)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			err = localChain.SubmitDKGResult(dkgResult)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			dkgResultSubmittedEvent := <-dkgResultSubmittedEventChan
+
+			if !test.resultValid {
+				err = localChain.invalidateDKGResult(dkgResultSubmittedEvent.Result)
+				if err != nil {
+					t.Fatal(err)
+				}
+			}
+
+			// Setting only the fields really needed for this test.
+			dkgExecutor := &dkgExecutor{
+				groupParameters: groupParameters,
+				operatorIDFn: func() (chain.OperatorID, error) {
+					return operatorID, nil
+				},
+				operatorAddress: operatorAddress,
+				chain:           localChain,
+				waitForBlockFn:  testWaitForBlockFn(localChain),
+			}
+
+			eventChan := make(chan interface{}, 1)
+
+			_ = localChain.OnDKGResultChallenged(
+				func(event *DKGResultChallengedEvent) {
+					eventChan <- event
+				},
+			)
+			_ = localChain.OnDKGResultApproved(
+				func(event *DKGResultApprovedEvent) {
+					eventChan <- event
+				},
+			)
+
+			dkgExecutor.executeDkgValidation(
+				dkgResultSubmittedEvent.Seed,
+				dkgResultSubmittedEvent.BlockNumber,
+				dkgResultSubmittedEvent.Result,
+				dkgResultSubmittedEvent.ResultHash,
+			)
+
+			var event interface{}
+			select {
+			case event = <-eventChan:
+			case <-time.After(1 * time.Minute):
+			}
+
+			if !reflect.DeepEqual(test.expectedEvent, event) {
+				t.Errorf(
+					"unexpected event\n"+
+						"expected: [%+v]\n"+
+						"actual:   [%+v]",
+					test.expectedEvent,
+					event,
+				)
+			}
+
+			dkgState, err := localChain.GetDKGState()
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			testutils.AssertIntsEqual(
+				t,
+				"DKG state",
+				int(test.expectedDkgState),
+				int(dkgState),
+			)
+		})
+	}
+}
+
 func TestFinalSigningGroup(t *testing.T) {
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       5,
 		GroupQuorum:     3,
 		HonestThreshold: 2,
@@ -212,7 +431,7 @@ func TestFinalSigningGroup(t *testing.T) {
 				finalSigningGroup(
 					test.selectedOperators,
 					test.operatingMembersIndexes,
-					chainConfig,
+					groupParameters,
 				)
 
 			if !reflect.DeepEqual(test.expectedError, err) {
@@ -253,3 +472,24 @@ func TestFinalSigningGroup(t *testing.T) {
 		})
 	}
 }
+
+func testWaitForBlockFn(localChain *localChain) waitForBlockFn {
+	return func(ctx context.Context, block uint64) error {
+		blockCounter, err := localChain.BlockCounter()
+		if err != nil {
+			return err
+		}
+
+		wait, err := blockCounter.BlockHeightWaiter(block)
+		if err != nil {
+			return err
+		}
+
+		select {
+		case <-wait:
+		case <-ctx.Done():
+		}
+
+		return nil
+	}
+}
diff --git a/pkg/tbtc/gen/pb/message.pb.go b/pkg/tbtc/gen/pb/message.pb.go
index d6e556b066..b84c30ba61 100644
--- a/pkg/tbtc/gen/pb/message.pb.go
+++ b/pkg/tbtc/gen/pb/message.pb.go
@@ -20,69 +20,6 @@ const (
 	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
 )
 
-type StopPill struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
-	AttemptNumber uint64 `protobuf:"varint,1,opt,name=attemptNumber,proto3" json:"attemptNumber,omitempty"`
-	DkgSeed       string `protobuf:"bytes,2,opt,name=dkgSeed,proto3" json:"dkgSeed,omitempty"`
-	MessageToSign string `protobuf:"bytes,3,opt,name=messageToSign,proto3" json:"messageToSign,omitempty"`
-}
-
-func (x *StopPill) Reset() {
-	*x = StopPill{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_pkg_tbtc_gen_pb_message_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
-}
-
-func (x *StopPill) String() string {
-	return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StopPill) ProtoMessage() {}
-
-func (x *StopPill) ProtoReflect() protoreflect.Message {
-	mi := &file_pkg_tbtc_gen_pb_message_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		if ms.LoadMessageInfo() == nil {
-			ms.StoreMessageInfo(mi)
-		}
-		return ms
-	}
-	return mi.MessageOf(x)
-}
-
-// Deprecated: Use StopPill.ProtoReflect.Descriptor instead.
-func (*StopPill) Descriptor() ([]byte, []int) {
-	return file_pkg_tbtc_gen_pb_message_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *StopPill) GetAttemptNumber() uint64 {
-	if x != nil {
-		return x.AttemptNumber
-	}
-	return 0
-}
-
-func (x *StopPill) GetDkgSeed() string {
-	if x != nil {
-		return x.DkgSeed
-	}
-	return ""
-}
-
-func (x *StopPill) GetMessageToSign() string {
-	if x != nil {
-		return x.MessageToSign
-	}
-	return ""
-}
-
 type SigningDoneMessage struct {
 	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
@@ -98,7 +35,7 @@ type SigningDoneMessage struct {
 func (x *SigningDoneMessage) Reset() {
 	*x = SigningDoneMessage{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_pkg_tbtc_gen_pb_message_proto_msgTypes[1]
+		mi := &file_pkg_tbtc_gen_pb_message_proto_msgTypes[0]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -111,7 +48,7 @@ func (x *SigningDoneMessage) String() string {
 func (*SigningDoneMessage) ProtoMessage() {}
 
 func (x *SigningDoneMessage) ProtoReflect() protoreflect.Message {
-	mi := &file_pkg_tbtc_gen_pb_message_proto_msgTypes[1]
+	mi := &file_pkg_tbtc_gen_pb_message_proto_msgTypes[0]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -124,7 +61,7 @@ func (x *SigningDoneMessage) ProtoReflect() protoreflect.Message {
 
 // Deprecated: Use SigningDoneMessage.ProtoReflect.Descriptor instead.
 func (*SigningDoneMessage) Descriptor() ([]byte, []int) {
-	return file_pkg_tbtc_gen_pb_message_proto_rawDescGZIP(), []int{1}
+	return file_pkg_tbtc_gen_pb_message_proto_rawDescGZIP(), []int{0}
 }
 
 func (x *SigningDoneMessage) GetSenderID() uint32 {
@@ -167,26 +104,19 @@ var File_pkg_tbtc_gen_pb_message_proto protoreflect.FileDescriptor
 var file_pkg_tbtc_gen_pb_message_proto_rawDesc = []byte{
 	0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x62, 0x74, 0x63, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70,
 	0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x04, 0x74, 0x62, 0x74, 0x63, 0x22, 0x70, 0x0a, 0x08, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x69, 0x6c,
-	0x6c, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x4e, 0x75, 0x6d, 0x62,
-	0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70,
-	0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6b, 0x67, 0x53, 0x65,
-	0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6b, 0x67, 0x53, 0x65, 0x65,
-	0x64, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x53, 0x69,
-	0x67, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
-	0x65, 0x54, 0x6f, 0x53, 0x69, 0x67, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e,
-	0x69, 0x6e, 0x67, 0x44, 0x6f, 0x6e, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a,
-	0x0a, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
-	0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
-	0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73,
-	0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x4e,
-	0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x61, 0x74, 0x74,
-	0x65, 0x6d, 0x70, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69,
-	0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
-	0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x42,
-	0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x42,
-	0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33,
+	0x04, 0x74, 0x62, 0x74, 0x63, 0x22, 0xaa, 0x01, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e,
+	0x67, 0x44, 0x6f, 0x6e, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08,
+	0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08,
+	0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73,
+	0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
+	0x67, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x4e, 0x75, 0x6d,
+	0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x65, 0x6d,
+	0x70, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67,
+	0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x42, 0x6c, 0x6f,
+	0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x42, 0x6c, 0x6f,
+	0x63, 0x6b, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
 }
 
 var (
@@ -201,10 +131,9 @@ func file_pkg_tbtc_gen_pb_message_proto_rawDescGZIP() []byte {
 	return file_pkg_tbtc_gen_pb_message_proto_rawDescData
 }
 
-var file_pkg_tbtc_gen_pb_message_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_pkg_tbtc_gen_pb_message_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
 var file_pkg_tbtc_gen_pb_message_proto_goTypes = []interface{}{
-	(*StopPill)(nil),           // 0: tbtc.StopPill
-	(*SigningDoneMessage)(nil), // 1: tbtc.SigningDoneMessage
+	(*SigningDoneMessage)(nil), // 0: tbtc.SigningDoneMessage
 }
 var file_pkg_tbtc_gen_pb_message_proto_depIdxs = []int32{
 	0, // [0:0] is the sub-list for method output_type
@@ -221,18 +150,6 @@ func file_pkg_tbtc_gen_pb_message_proto_init() {
 	}
 	if !protoimpl.UnsafeEnabled {
 		file_pkg_tbtc_gen_pb_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*StopPill); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_pkg_tbtc_gen_pb_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
 			switch v := v.(*SigningDoneMessage); i {
 			case 0:
 				return &v.state
@@ -251,7 +168,7 @@ func file_pkg_tbtc_gen_pb_message_proto_init() {
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: file_pkg_tbtc_gen_pb_message_proto_rawDesc,
 			NumEnums:      0,
-			NumMessages:   2,
+			NumMessages:   1,
 			NumExtensions: 0,
 			NumServices:   0,
 		},
diff --git a/pkg/tbtc/gen/pb/message.proto b/pkg/tbtc/gen/pb/message.proto
index 6ab2e07980..5bab60171d 100644
--- a/pkg/tbtc/gen/pb/message.proto
+++ b/pkg/tbtc/gen/pb/message.proto
@@ -3,12 +3,6 @@ syntax = "proto3";
 option go_package = "./pb";
 package tbtc;
 
-message StopPill {
-    uint64 attemptNumber = 1;
-    string dkgSeed = 2;
-    string messageToSign = 3;
-}
-
 message SigningDoneMessage {
     uint32 senderID = 1;
     bytes message = 2;
diff --git a/pkg/tbtc/node.go b/pkg/tbtc/node.go
index 9812c78f83..4338c5970f 100644
--- a/pkg/tbtc/node.go
+++ b/pkg/tbtc/node.go
@@ -8,6 +8,8 @@ import (
 	"math/big"
 	"sync"
 
+	"github.com/keep-network/keep-core/pkg/chain"
+
 	"go.uber.org/zap"
 
 	"github.com/keep-network/keep-common/pkg/persistence"
@@ -39,6 +41,8 @@ const (
 
 // node represents the current state of an ECDSA node.
 type node struct {
+	groupParameters *GroupParameters
+
 	chain          Chain
 	netProvider    net.Provider
 	walletRegistry *walletRegistry
@@ -53,19 +57,21 @@ type node struct {
 }
 
 func newNode(
+	groupParameters *GroupParameters,
 	chain Chain,
 	netProvider net.Provider,
 	keyStorePersistance persistence.ProtectedHandle,
 	workPersistence persistence.BasicHandle,
 	scheduler *generator.Scheduler,
 	config Config,
-) *node {
+) (*node, error) {
 	walletRegistry := newWalletRegistry(keyStorePersistance)
 
 	latch := generator.NewProtocolLatch()
 	scheduler.RegisterProtocol(latch)
 
 	node := &node{
+		groupParameters:  groupParameters,
 		chain:            chain,
 		netProvider:      netProvider,
 		walletRegistry:   walletRegistry,
@@ -73,9 +79,20 @@ func newNode(
 		signingExecutors: make(map[string]*signingExecutor),
 	}
 
+	// Only the operator address is known at this point and can be pre-fetched.
+	// The operator ID must be determined later as the operator may not be in
+	// the sortition pool yet.
+	operatorAddress, err := node.operatorAddress()
+	if err != nil {
+		return nil, fmt.Errorf("cannot get node's operator adress: [%v]", err)
+	}
+
 	// TODO: This chicken and egg problem should be solved when
 	// waitForBlockHeight becomes a part of BlockHeightWaiter interface.
 	node.dkgExecutor = newDkgExecutor(
+		node.groupParameters,
+		node.operatorID,
+		operatorAddress,
 		chain,
 		netProvider,
 		walletRegistry,
@@ -86,7 +103,40 @@ func newNode(
 		node.waitForBlockHeight,
 	)
 
-	return node
+	return node, nil
+}
+
+// operatorAddress returns the node's operator address.
+func (n *node) operatorAddress() (chain.Address, error) {
+	_, operatorPublicKey, err := n.chain.OperatorKeyPair()
+	if err != nil {
+		return "", fmt.Errorf("failed to get operator public key: [%v]", err)
+	}
+
+	operatorAddress, err := n.chain.Signing().PublicKeyToAddress(operatorPublicKey)
+	if err != nil {
+		return "", fmt.Errorf(
+			"failed to convert operator public key to address: [%v]",
+			err,
+		)
+	}
+
+	return operatorAddress, nil
+}
+
+// operatorAddress returns the node's operator ID.
+func (n *node) operatorID() (chain.OperatorID, error) {
+	operatorAddress, err := n.operatorAddress()
+	if err != nil {
+		return 0, fmt.Errorf("failed to get operator address: [%v]", err)
+	}
+
+	operatorID, err := n.chain.GetOperatorID(operatorAddress)
+	if err != nil {
+		return 0, fmt.Errorf("failed to get operator ID: [%v]", err)
+	}
+
+	return operatorID, nil
 }
 
 // joinDKGIfEligible takes a seed value and undergoes the process of the
@@ -94,8 +144,22 @@ func newNode(
 // the group generated by that seed. This is an interactive on-chain process,
 // and joinDKGIfEligible can block for an extended period of time while it
 // completes the on-chain operation.
-func (n *node) joinDKGIfEligible(seed *big.Int, startBlockNumber uint64) {
-	n.dkgExecutor.executeDkgIfEligible(seed, startBlockNumber)
+func (n *node) joinDKGIfEligible(seed *big.Int, startBlock uint64) {
+	n.dkgExecutor.executeDkgIfEligible(seed, startBlock)
+}
+
+// validateDKG performs the submitted DKG result validation process.
+// If the result is not valid, this function submits an on-chain result
+// challenge. If the result is valid and the given node was involved in the DKG,
+// this function schedules an on-chain approve that is submitted once the
+// challenge period elapses.
+func (n *node) validateDKG(
+	seed *big.Int,
+	submissionBlock uint64,
+	result *DKGChainResult,
+	resultHash [32]byte,
+) {
+	n.dkgExecutor.executeDkgValidation(seed, submissionBlock, result, resultHash)
 }
 
 // getSigningExecutor gets the signing executor responsible for executing
@@ -179,7 +243,7 @@ func (n *node) getSigningExecutor(
 		signers,
 		broadcastChannel,
 		membershipValidator,
-		n.chain.GetConfig(),
+		n.groupParameters,
 		n.protocolLatch,
 		blockCounter.CurrentBlock,
 		n.waitForBlockHeight,
diff --git a/pkg/tbtc/node_test.go b/pkg/tbtc/node_test.go
index 5c79f575a8..200c250084 100644
--- a/pkg/tbtc/node_test.go
+++ b/pkg/tbtc/node_test.go
@@ -17,17 +17,13 @@ import (
 )
 
 func TestNode_GetSigningExecutor(t *testing.T) {
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       5,
 		GroupQuorum:     4,
 		HonestThreshold: 3,
 	}
 
-	localChain := Connect(
-		chainConfig.GroupSize,
-		chainConfig.GroupQuorum,
-		chainConfig.HonestThreshold,
-	)
+	localChain := Connect()
 	localProvider := local.Connect()
 
 	signer := createMockSigner(t)
@@ -36,7 +32,8 @@ func TestNode_GetSigningExecutor(t *testing.T) {
 	// required to make the node controlling the signer's wallet.
 	keyStorePersistence := createMockKeyStorePersistence(t, signer)
 
-	node := newNode(
+	node, err := newNode(
+		groupParameters,
 		localChain,
 		localProvider,
 		keyStorePersistence,
@@ -44,6 +41,9 @@ func TestNode_GetSigningExecutor(t *testing.T) {
 		generator.StartScheduler(),
 		Config{},
 	)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	walletPublicKey := signer.wallet.publicKey
 	walletPublicKeyBytes, err := marshalPublicKey(walletPublicKey)
diff --git a/pkg/tbtc/signing.go b/pkg/tbtc/signing.go
index a67cd6d997..3c832075be 100644
--- a/pkg/tbtc/signing.go
+++ b/pkg/tbtc/signing.go
@@ -46,7 +46,7 @@ type signingExecutor struct {
 	signers             []*signer
 	broadcastChannel    net.BroadcastChannel
 	membershipValidator *group.MembershipValidator
-	chainConfig         *ChainConfig
+	groupParameters     *GroupParameters
 	protocolLatch       *generator.ProtocolLatch
 
 	// currentBlockFn is a function used to get the current block.
@@ -64,7 +64,7 @@ func newSigningExecutor(
 	signers []*signer,
 	broadcastChannel net.BroadcastChannel,
 	membershipValidator *group.MembershipValidator,
-	chainConfig *ChainConfig,
+	groupParameters *GroupParameters,
 	protocolLatch *generator.ProtocolLatch,
 	currentBlockFn func() (uint64, error),
 	waitForBlockFn waitForBlockFn,
@@ -75,7 +75,7 @@ func newSigningExecutor(
 		signers:              signers,
 		broadcastChannel:     broadcastChannel,
 		membershipValidator:  membershipValidator,
-		chainConfig:          chainConfig,
+		groupParameters:      groupParameters,
 		protocolLatch:        protocolLatch,
 		currentBlockFn:       currentBlockFn,
 		waitForBlockFn:       waitForBlockFn,
@@ -219,7 +219,7 @@ func (se *signingExecutor) sign(
 			)
 
 			doneCheck := newSigningDoneCheck(
-				se.chainConfig.GroupSize,
+				se.groupParameters.GroupSize,
 				se.broadcastChannel,
 				se.membershipValidator,
 			)
@@ -230,7 +230,7 @@ func (se *signingExecutor) sign(
 				startBlock,
 				signer.signingGroupMemberIndex,
 				wallet.signingGroupOperators,
-				se.chainConfig,
+				se.groupParameters,
 				announcer,
 				doneCheck,
 			)
@@ -300,7 +300,7 @@ func (se *signingExecutor) sign(
 						signer.privateKeyShare,
 						wallet.groupSize(),
 						wallet.groupDishonestThreshold(
-							se.chainConfig.HonestThreshold,
+							se.groupParameters.HonestThreshold,
 						),
 						attempt.excludedMembersIndexes,
 						se.broadcastChannel,
diff --git a/pkg/tbtc/signing_done_test.go b/pkg/tbtc/signing_done_test.go
index 827b549c20..e421eb39fc 100644
--- a/pkg/tbtc/signing_done_test.go
+++ b/pkg/tbtc/signing_done_test.go
@@ -22,13 +22,13 @@ import (
 
 // TestSigningDoneCheck is a happy path test.
 func TestSigningDoneCheck(t *testing.T) {
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       5,
 		GroupQuorum:     4,
 		HonestThreshold: 3,
 	}
 
-	doneCheck := setupSigningDoneCheck(t, chainConfig)
+	doneCheck := setupSigningDoneCheck(t, groupParameters)
 
 	memberIndexes := make([]group.MemberIndex, doneCheck.groupSize)
 	for i := range memberIndexes {
@@ -42,7 +42,7 @@ func TestSigningDoneCheck(t *testing.T) {
 	message := big.NewInt(100)
 	attemptNumber := uint64(2)
 	attemptTimeoutBlock := uint64(1000)
-	attemptMemberIndexes := memberIndexes[:chainConfig.HonestThreshold]
+	attemptMemberIndexes := memberIndexes[:groupParameters.HonestThreshold]
 	result := &signing.Result{
 		Signature: &tecdsa.Signature{
 			R:          big.NewInt(200),
@@ -142,13 +142,13 @@ func TestSigningDoneCheck(t *testing.T) {
 // TestSigningDoneCheck_MissingConfirmation covers scenario when one member
 // did not provide a done check on time.
 func TestSigningDoneCheck_MissingConfirmation(t *testing.T) {
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       5,
 		GroupQuorum:     4,
 		HonestThreshold: 3,
 	}
 
-	doneCheck := setupSigningDoneCheck(t, chainConfig)
+	doneCheck := setupSigningDoneCheck(t, groupParameters)
 
 	memberIndexes := make([]group.MemberIndex, doneCheck.groupSize)
 	for i := range memberIndexes {
@@ -162,7 +162,7 @@ func TestSigningDoneCheck_MissingConfirmation(t *testing.T) {
 	message := big.NewInt(100)
 	attemptNumber := uint64(1)
 	attemptTimeoutBlock := uint64(1000)
-	attemptMemberIndexes := memberIndexes[:chainConfig.HonestThreshold]
+	attemptMemberIndexes := memberIndexes[:groupParameters.HonestThreshold]
 	result := &signing.Result{
 		Signature: &tecdsa.Signature{
 			R:          big.NewInt(200),
@@ -179,7 +179,7 @@ func TestSigningDoneCheck_MissingConfirmation(t *testing.T) {
 		attemptMemberIndexes,
 	)
 
-	for i := 1; i < chainConfig.HonestThreshold; i++ {
+	for i := 1; i < groupParameters.HonestThreshold; i++ {
 		err := doneCheck.signalDone(
 			ctx,
 			uint8(i),
@@ -205,13 +205,13 @@ func TestSigningDoneCheck_MissingConfirmation(t *testing.T) {
 // TestSigningDoneCheck_AnotherSignature covers scenario when one member
 // did provide signature other than other members.
 func TestSigningDoneCheck_AnotherSignature(t *testing.T) {
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       5,
 		GroupQuorum:     4,
 		HonestThreshold: 3,
 	}
 
-	doneCheck := setupSigningDoneCheck(t, chainConfig)
+	doneCheck := setupSigningDoneCheck(t, groupParameters)
 
 	memberIndexes := make([]group.MemberIndex, doneCheck.groupSize)
 	for i := range memberIndexes {
@@ -225,7 +225,7 @@ func TestSigningDoneCheck_AnotherSignature(t *testing.T) {
 	message := big.NewInt(100)
 	attemptNumber := uint64(1)
 	attemptTimeoutBlock := uint64(1000)
-	attemptMemberIndexes := memberIndexes[:chainConfig.HonestThreshold]
+	attemptMemberIndexes := memberIndexes[:groupParameters.HonestThreshold]
 	correctResult := &signing.Result{
 		Signature: &tecdsa.Signature{
 			R:          big.NewInt(200),
@@ -249,8 +249,8 @@ func TestSigningDoneCheck_AnotherSignature(t *testing.T) {
 		attemptMemberIndexes,
 	)
 
-	// chainConfig.HonestThreshold members provide correct signature
-	for i := 1; i < chainConfig.HonestThreshold; i++ {
+	// groupParameters.HonestThreshold members provide correct signature
+	for i := 1; i < groupParameters.HonestThreshold; i++ {
 		err := doneCheck.signalDone(
 			ctx,
 			uint8(i),
@@ -267,7 +267,7 @@ func TestSigningDoneCheck_AnotherSignature(t *testing.T) {
 	// one member provides incorrect signature
 	err := doneCheck.signalDone(
 		ctx,
-		uint8(chainConfig.HonestThreshold),
+		uint8(groupParameters.HonestThreshold),
 		message,
 		attemptNumber,
 		incorrectResult,
@@ -295,7 +295,7 @@ func TestSigningDoneCheck_AnotherSignature(t *testing.T) {
 // to perform test checks.
 func setupSigningDoneCheck(
 	t *testing.T,
-	chainConfig *ChainConfig,
+	groupParameters *GroupParameters,
 ) *signingDoneCheck {
 	operatorPrivateKey, operatorPublicKey, err := operator.GenerateKeyPair(
 		local_v1.DefaultCurve,
@@ -304,12 +304,7 @@ func setupSigningDoneCheck(
 		t.Fatal(err)
 	}
 
-	localChain := ConnectWithKey(
-		chainConfig.GroupSize,
-		chainConfig.GroupQuorum,
-		chainConfig.HonestThreshold,
-		operatorPrivateKey,
-	)
+	localChain := ConnectWithKey(operatorPrivateKey)
 
 	localProvider := local.ConnectWithKey(operatorPublicKey)
 
@@ -321,7 +316,7 @@ func setupSigningDoneCheck(
 	}
 
 	var operators []chain.Address
-	for i := 0; i < chainConfig.GroupSize; i++ {
+	for i := 0; i < groupParameters.GroupSize; i++ {
 		operators = append(operators, operatorAddress)
 	}
 
@@ -337,7 +332,7 @@ func setupSigningDoneCheck(
 	)
 
 	return newSigningDoneCheck(
-		chainConfig.GroupSize,
+		groupParameters.GroupSize,
 		broadcastChannel,
 		membershipValidator,
 	)
diff --git a/pkg/tbtc/signing_loop.go b/pkg/tbtc/signing_loop.go
index 537e46bf5e..6262f3fc19 100644
--- a/pkg/tbtc/signing_loop.go
+++ b/pkg/tbtc/signing_loop.go
@@ -85,7 +85,7 @@ type signingRetryLoop struct {
 	signingGroupMemberIndex group.MemberIndex
 	signingGroupOperators   chain.Addresses
 
-	chainConfig *ChainConfig
+	groupParameters *GroupParameters
 
 	announcer signingAnnouncer
 
@@ -102,7 +102,7 @@ func newSigningRetryLoop(
 	initialStartBlock uint64,
 	signingGroupMemberIndex group.MemberIndex,
 	signingGroupOperators chain.Addresses,
-	chainConfig *ChainConfig,
+	groupParameters *GroupParameters,
 	announcer signingAnnouncer,
 	doneCheck signingDoneCheckStrategy,
 ) *signingRetryLoop {
@@ -118,7 +118,7 @@ func newSigningRetryLoop(
 		message:                 message,
 		signingGroupMemberIndex: signingGroupMemberIndex,
 		signingGroupOperators:   signingGroupOperators,
-		chainConfig:             chainConfig,
+		groupParameters:         groupParameters,
 		announcer:               announcer,
 		attemptCounter:          0,
 		attemptStartBlock:       initialStartBlock,
@@ -235,7 +235,7 @@ func (srl *signingRetryLoop) start(
 			return nil, ctx.Err()
 		}
 
-		if len(readyMembersIndexes) >= srl.chainConfig.HonestThreshold {
+		if len(readyMembersIndexes) >= srl.groupParameters.HonestThreshold {
 			srl.logger.Infof(
 				"[member:%v] completed announcement phase for attempt [%v] "+
 					"with honest majority of [%v] members ready to sign",
@@ -433,7 +433,7 @@ func (srl *signingRetryLoop) qualifiedOperatorsSet(
 		readySigningGroupOperators,
 		srl.attemptSeed,
 		retryCount,
-		uint(srl.chainConfig.HonestThreshold),
+		uint(srl.groupParameters.HonestThreshold),
 	)
 	if err != nil {
 		return nil, fmt.Errorf(
@@ -472,7 +472,7 @@ func (srl *signingRetryLoop) excludedMembersIndexes(
 
 	// Make sure we always use just the smallest required count of
 	// signing members for performance reasons
-	if len(includedMembersIndexes) > srl.chainConfig.HonestThreshold {
+	if len(includedMembersIndexes) > srl.groupParameters.HonestThreshold {
 		// #nosec G404 (insecure random number source (rand))
 		// Shuffling does not require secure randomness.
 		rng := rand.New(rand.NewSource(
@@ -492,7 +492,7 @@ func (srl *signingRetryLoop) excludedMembersIndexes(
 		// the excluded members list.
 		excludedMembersIndexes = append(
 			excludedMembersIndexes,
-			includedMembersIndexes[srl.chainConfig.HonestThreshold:]...,
+			includedMembersIndexes[srl.groupParameters.HonestThreshold:]...,
 		)
 		// Sort the resulting excluded members list in ascending order.
 		sort.Slice(excludedMembersIndexes, func(i, j int) bool {
diff --git a/pkg/tbtc/signing_loop_test.go b/pkg/tbtc/signing_loop_test.go
index bc7ec0f10a..fdf9baca26 100644
--- a/pkg/tbtc/signing_loop_test.go
+++ b/pkg/tbtc/signing_loop_test.go
@@ -18,7 +18,7 @@ import (
 func TestSigningRetryLoop(t *testing.T) {
 	message := big.NewInt(100)
 
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       10,
 		HonestThreshold: 6,
 	}
@@ -471,7 +471,7 @@ func TestSigningRetryLoop(t *testing.T) {
 				200,
 				test.signingGroupMemberIndex,
 				signingGroupOperators,
-				chainConfig,
+				groupParameters,
 				announcer,
 				doneCheck,
 			)
diff --git a/pkg/tbtc/signing_test.go b/pkg/tbtc/signing_test.go
index ebe46c7ccf..46d4234121 100644
--- a/pkg/tbtc/signing_test.go
+++ b/pkg/tbtc/signing_test.go
@@ -108,7 +108,7 @@ func TestSigningExecutor_SignBatch(t *testing.T) {
 // setupSigningExecutor sets up an instance of the signing executor ready
 // to perform test signing.
 func setupSigningExecutor(t *testing.T) *signingExecutor {
-	chainConfig := &ChainConfig{
+	groupParameters := &GroupParameters{
 		GroupSize:       5,
 		GroupQuorum:     4,
 		HonestThreshold: 3,
@@ -121,12 +121,7 @@ func setupSigningExecutor(t *testing.T) *signingExecutor {
 		t.Fatal(err)
 	}
 
-	localChain := ConnectWithKey(
-		chainConfig.GroupSize,
-		chainConfig.GroupQuorum,
-		chainConfig.HonestThreshold,
-		operatorPrivateKey,
-	)
+	localChain := ConnectWithKey(operatorPrivateKey)
 
 	localProvider := local.ConnectWithKey(operatorPublicKey)
 
@@ -138,12 +133,12 @@ func setupSigningExecutor(t *testing.T) *signingExecutor {
 	}
 
 	var operators []chain.Address
-	for i := 0; i < chainConfig.GroupSize; i++ {
+	for i := 0; i < groupParameters.GroupSize; i++ {
 		operators = append(operators, operatorAddress)
 	}
 
 	testData, err := tecdsatest.LoadPrivateKeyShareTestFixtures(
-		chainConfig.GroupSize,
+		groupParameters.GroupSize,
 	)
 	if err != nil {
 		t.Fatalf("failed to load test data: [%v]", err)
@@ -165,7 +160,8 @@ func setupSigningExecutor(t *testing.T) *signingExecutor {
 
 	keyStorePersistence := createMockKeyStorePersistence(t, signers...)
 
-	node := newNode(
+	node, err := newNode(
+		groupParameters,
 		localChain,
 		localProvider,
 		keyStorePersistence,
@@ -173,6 +169,9 @@ func setupSigningExecutor(t *testing.T) *signingExecutor {
 		generator.StartScheduler(),
 		Config{},
 	)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	executor, ok, err := node.getSigningExecutor(signers[0].wallet.publicKey)
 	if err != nil {
diff --git a/pkg/tbtc/stop.go b/pkg/tbtc/stop.go
deleted file mode 100644
index 918445233f..0000000000
--- a/pkg/tbtc/stop.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package tbtc
-
-import (
-	"context"
-	"fmt"
-
-	"github.com/keep-network/keep-core/pkg/net"
-	"github.com/keep-network/keep-core/pkg/tbtc/gen/pb"
-	"google.golang.org/protobuf/proto"
-)
-
-// TODO: This file should be gone once the contract integration is implemented.
-
-// StopPill is a temporary workaround for a missing chain integration. When
-// a group member is not selected for the current attempt of key generation or
-// signing and there is no other member from the same client selected for the
-// protocol execution, the member does not know what was the result of the
-// protocol execution and if it completed or not. In other words, the member
-// will stay hung on the block waiter, waiting for their turn. The StopPill is
-// sent via broadcast channel on a successful protocol execution and tells all
-// members waiting for their turn in the retry loop to stop because the result
-// was produced.
-type StopPill struct {
-	attemptNumber uint64
-	dkgSeed       string // empty if the stop pill is sent for signing
-	messageToSign string // empty if the stop pill is sent for DKG
-}
-
-func (sp *StopPill) Type() string {
-	return "tecdsa/stop_pill"
-}
-
-func (sp *StopPill) Marshal() ([]byte, error) {
-	return proto.Marshal(&pb.StopPill{
-		AttemptNumber: sp.attemptNumber,
-		DkgSeed:       sp.dkgSeed,
-		MessageToSign: sp.messageToSign,
-	})
-}
-
-func (sp *StopPill) Unmarshal(bytes []byte) error {
-	pbStopPill := pb.StopPill{}
-	if err := proto.Unmarshal(bytes, &pbStopPill); err != nil {
-		return fmt.Errorf("failed to unmarshal StopPill: [%v]", err)
-	}
-
-	sp.attemptNumber = pbStopPill.AttemptNumber
-	sp.dkgSeed = pbStopPill.DkgSeed
-	sp.messageToSign = pbStopPill.MessageToSign
-
-	return nil
-}
-
-func registerStopPillUnmarshaller(channel net.BroadcastChannel) {
-	channel.SetUnmarshaler(func() net.TaggedUnmarshaler {
-		return &StopPill{}
-	})
-}
-
-func sendDkgStopPill(
-	ctx context.Context,
-	broadcastChannel net.BroadcastChannel,
-	dkgSeed string,
-	attemptNumber uint,
-) error {
-	stopPill := &StopPill{
-		attemptNumber: uint64(attemptNumber),
-		dkgSeed:       dkgSeed,
-	}
-	return broadcastChannel.Send(ctx, stopPill)
-}
-
-func cancelDkgContextOnStopSignal(
-	ctx context.Context,
-	cancelFn func(),
-	broadcastChannel net.BroadcastChannel,
-	dkgSeed string,
-) {
-	broadcastChannel.Recv(ctx, func(msg net.Message) {
-		switch stopPill := msg.Payload().(type) {
-		case *StopPill:
-			if stopPill.dkgSeed == dkgSeed {
-				cancelFn()
-			}
-		}
-	})
-}
diff --git a/pkg/tbtc/tbtc.go b/pkg/tbtc/tbtc.go
index e8ea83ab70..324d94af54 100644
--- a/pkg/tbtc/tbtc.go
+++ b/pkg/tbtc/tbtc.go
@@ -23,6 +23,26 @@ var logger = log.Logger("keep-tbtc")
 // ProtocolName denotes the name of the protocol defined by this package.
 const ProtocolName = "tbtc"
 
+// GroupParameters is a structure grouping TBTC group parameters.
+type GroupParameters struct {
+	// GroupSize is the target size of a group in TBTC.
+	GroupSize int
+	// GroupQuorum is the minimum number of active participants behaving
+	// according to the protocol needed to generate a group in TBTC. This value
+	// is smaller than the GroupSize and bigger than the HonestThreshold.
+	GroupQuorum int
+	// HonestThreshold is the minimum number of active participants behaving
+	// according to the protocol needed to generate a signature.
+	HonestThreshold int
+}
+
+// DishonestThreshold is the maximum number of misbehaving participants for
+// which it is still possible to generate a signature. Misbehaviour is any
+// misconduct to the protocol, including inactivity.
+func (gp *GroupParameters) DishonestThreshold() int {
+	return gp.GroupSize - gp.HonestThreshold
+}
+
 const (
 	DefaultPreParamsPoolSize              = 1000
 	DefaultPreParamsGenerationTimeout     = 2 * time.Minute
@@ -59,7 +79,25 @@ func Initialize(
 	config Config,
 	clientInfo *clientinfo.Registry,
 ) error {
-	node := newNode(chain, netProvider, keyStorePersistence, workPersistence, scheduler, config)
+	groupParameters := &GroupParameters{
+		GroupSize:       100,
+		GroupQuorum:     90,
+		HonestThreshold: 51,
+	}
+
+	node, err := newNode(
+		groupParameters,
+		chain,
+		netProvider,
+		keyStorePersistence,
+		workPersistence,
+		scheduler,
+		config,
+	)
+	if err != nil {
+		return fmt.Errorf("cannot set up TBTC node: [%v]", err)
+	}
+
 	deduplicator := newDeduplicator()
 
 	if clientInfo != nil {
@@ -74,7 +112,7 @@ func Initialize(
 		)
 	}
 
-	err := sortition.MonitorPool(
+	err = sortition.MonitorPool(
 		ctx,
 		logger,
 		chain,
@@ -121,9 +159,41 @@ func Initialize(
 		}()
 	})
 
-	// TODO: This is a temporary signing loop trigger that should be removed
-	//       once the client is integrated with real on-chain contracts.
-	_ = chain.OnSignatureRequested(func(event *SignatureRequestedEvent) {
+	_ = chain.OnDKGResultSubmitted(func(event *DKGResultSubmittedEvent) {
+		go func() {
+			if ok := deduplicator.notifyDKGResultSubmitted(
+				event.Seed,
+				event.ResultHash,
+				event.BlockNumber,
+			); !ok {
+				logger.Warnf(
+					"Result with hash [0x%x] for DKG with seed [0x%x] "+
+						"and starting block [%v] has been already processed",
+					event.ResultHash,
+					event.Seed,
+					event.BlockNumber,
+				)
+				return
+			}
+
+			logger.Infof(
+				"Result with hash [0x%x] for DKG with seed [0x%x] "+
+					"submitted at block [%v]",
+				event.ResultHash,
+				event.Seed,
+				event.BlockNumber,
+			)
+
+			node.validateDKG(
+				event.Seed,
+				event.BlockNumber,
+				event.Result,
+				event.ResultHash,
+			)
+		}()
+	})
+
+	_ = chain.OnHeartbeatRequested(func(event *HeartbeatRequestedEvent) {
 		go func() {
 			// There is no need to deduplicate. Test loop events are unique.
 			messagesDigests := make([]string, len(event.Messages))
@@ -137,7 +207,7 @@ func Initialize(
 			}
 
 			logger.Infof(
-				"signature of messages [%s] requested from "+
+				"heartbeat [%s] requested from "+
 					"wallet [0x%x] at block [%v]",
 				strings.Join(messagesDigests, ", "),
 				event.WalletPublicKey,
@@ -171,7 +241,7 @@ func Initialize(
 			}
 
 			logger.Infof(
-				"generated [%v] signatures for messages [%s] as "+
+				"generated [%v] signatures for heartbeat [%s] as "+
 					"requested from wallet [0x%x] at block [%v]",
 				len(signatures),
 				strings.Join(messagesDigests, ", "),
diff --git a/pkg/tbtc/wallet.go b/pkg/tbtc/wallet.go
index 46fa00973c..182f72aa78 100644
--- a/pkg/tbtc/wallet.go
+++ b/pkg/tbtc/wallet.go
@@ -33,8 +33,8 @@ type wallet struct {
 }
 
 // groupSize returns the actual size of the wallet's signing group. This
-// value may be different from the `GroupSize` parameter of the chain config
-// as some candidates may be excluded during distributed key generation.
+// value may be different from the GroupParameters.GroupSize parameter as some
+// candidates may be excluded during distributed key generation.
 func (w *wallet) groupSize() int {
 	return len(w.signingGroupOperators)
 }
diff --git a/pkg/tecdsa/dkg/dkg.go b/pkg/tecdsa/dkg/dkg.go
index 3d77a60efa..a2acfef876 100644
--- a/pkg/tecdsa/dkg/dkg.go
+++ b/pkg/tecdsa/dkg/dkg.go
@@ -124,7 +124,7 @@ func (e *Executor) PreParamsCount() int {
 type SignedResult struct {
 	PublicKey  []byte
 	Signature  []byte
-	ResultHash ResultHash
+	ResultHash ResultSignatureHash
 }
 
 // ResultSigner is the interface that provides ability to sign the DKG result
diff --git a/pkg/tecdsa/dkg/marshaling.go b/pkg/tecdsa/dkg/marshaling.go
index 00e136c506..4e2815d62e 100644
--- a/pkg/tecdsa/dkg/marshaling.go
+++ b/pkg/tecdsa/dkg/marshaling.go
@@ -249,7 +249,7 @@ func (rsm *resultSignatureMessage) Unmarshal(bytes []byte) error {
 	}
 	rsm.senderID = group.MemberIndex(pbMsg.SenderID)
 
-	resultHash, err := ResultHashFromBytes(pbMsg.ResultHash)
+	resultHash, err := ResultSignatureHashFromBytes(pbMsg.ResultHash)
 	if err != nil {
 		return err
 	}
diff --git a/pkg/tecdsa/dkg/marshaling_test.go b/pkg/tecdsa/dkg/marshaling_test.go
index a1af8f7437..314f19376c 100644
--- a/pkg/tecdsa/dkg/marshaling_test.go
+++ b/pkg/tecdsa/dkg/marshaling_test.go
@@ -293,7 +293,7 @@ func TestFuzzResultSignatureMessage_MarshalingRoundtrip(t *testing.T) {
 	for i := 0; i < 10; i++ {
 		var (
 			senderID   group.MemberIndex
-			resultHash ResultHash
+			resultHash ResultSignatureHash
 			signature  []byte
 			publicKey  []byte
 			sessionID  string
diff --git a/pkg/tecdsa/dkg/member.go b/pkg/tecdsa/dkg/member.go
index a6a79f71de..0418d03bb2 100644
--- a/pkg/tecdsa/dkg/member.go
+++ b/pkg/tecdsa/dkg/member.go
@@ -242,7 +242,7 @@ type signingMember struct {
 	// Identifier of the particular DKG session this member is part of.
 	sessionID string
 	// Hash of DKG result preferred by the current participant.
-	preferredDKGResultHash ResultHash
+	preferredDKGResultHash ResultSignatureHash
 	// Signature over preferredDKGResultHash calculated by the member.
 	selfDKGResultSignature []byte
 }
diff --git a/pkg/tecdsa/dkg/message.go b/pkg/tecdsa/dkg/message.go
index fb4754d917..ca9364ac57 100644
--- a/pkg/tecdsa/dkg/message.go
+++ b/pkg/tecdsa/dkg/message.go
@@ -153,7 +153,7 @@ func (tfm *tssFinalizationMessage) Type() string {
 type resultSignatureMessage struct {
 	senderID group.MemberIndex
 
-	resultHash ResultHash
+	resultHash ResultSignatureHash
 	signature  []byte
 	publicKey  []byte
 	sessionID  string
diff --git a/pkg/tecdsa/dkg/protocol_test.go b/pkg/tecdsa/dkg/protocol_test.go
index aaf534b677..e1dd4ff1dd 100644
--- a/pkg/tecdsa/dkg/protocol_test.go
+++ b/pkg/tecdsa/dkg/protocol_test.go
@@ -1185,7 +1185,7 @@ func TestSignDKGResult(t *testing.T) {
 
 	publicKey := []byte("publicKey")
 	signature := []byte("signature")
-	resultHash := ResultHash{0: 11, 6: 22, 31: 33}
+	resultHash := ResultSignatureHash{0: 11, 6: 22, 31: 33}
 
 	resultSigner := newMockResultSigner(publicKey)
 	resultSigner.setSigningOutcome(result, &signingOutcome{
@@ -1245,7 +1245,7 @@ func TestSignDKGResult_ErrorDuringSigning(t *testing.T) {
 	resultSigner := newMockResultSigner([]byte("publicKey"))
 	resultSigner.setSigningOutcome(result, &signingOutcome{
 		signature:  []byte("signature"),
-		resultHash: ResultHash{0: 11, 6: 22, 31: 33},
+		resultHash: ResultSignatureHash{0: 11, 6: 22, 31: 33},
 		err:        fmt.Errorf("dummy error"),
 	})
 
@@ -1266,7 +1266,7 @@ func TestSignDKGResult_ErrorDuringSigning(t *testing.T) {
 
 func TestVerifyDKGResultSignatures(t *testing.T) {
 	signingMember := initializeSigningMember()
-	signingMember.preferredDKGResultHash = ResultHash{11: 11}
+	signingMember.preferredDKGResultHash = ResultSignatureHash{11: 11}
 	signingMember.selfDKGResultSignature = []byte("sign 1")
 
 	type messageWithOutcome struct {
@@ -1283,7 +1283,7 @@ func TestVerifyDKGResultSignatures(t *testing.T) {
 				{
 					&resultSignatureMessage{
 						senderID:   2,
-						resultHash: ResultHash{11: 11},
+						resultHash: ResultSignatureHash{11: 11},
 						signature:  []byte("sign 2"),
 						publicKey:  []byte("pubKey 2"),
 						sessionID:  "session-1",
@@ -1296,7 +1296,7 @@ func TestVerifyDKGResultSignatures(t *testing.T) {
 				{
 					&resultSignatureMessage{
 						senderID:   3,
-						resultHash: ResultHash{11: 11},
+						resultHash: ResultSignatureHash{11: 11},
 						signature:  []byte("sign 3"),
 						publicKey:  []byte("pubKey 3"),
 						sessionID:  "session-1",
@@ -1319,7 +1319,7 @@ func TestVerifyDKGResultSignatures(t *testing.T) {
 				{
 					&resultSignatureMessage{
 						senderID:   2,
-						resultHash: ResultHash{12: 12},
+						resultHash: ResultSignatureHash{12: 12},
 						signature:  []byte("sign 2"),
 						publicKey:  []byte("pubKey 2"),
 						sessionID:  "session-1",
@@ -1340,7 +1340,7 @@ func TestVerifyDKGResultSignatures(t *testing.T) {
 				{
 					&resultSignatureMessage{
 						senderID:   2,
-						resultHash: ResultHash{11: 11},
+						resultHash: ResultSignatureHash{11: 11},
 						signature:  []byte("sign 2"),
 						publicKey:  []byte("pubKey 2"),
 						sessionID:  "session-1",
@@ -1360,7 +1360,7 @@ func TestVerifyDKGResultSignatures(t *testing.T) {
 				{
 					&resultSignatureMessage{
 						senderID:   2,
-						resultHash: ResultHash{11: 11},
+						resultHash: ResultSignatureHash{11: 11},
 						signature:  []byte("bad sign"),
 						publicKey:  []byte("pubKey 2"),
 						sessionID:  "session-1",
@@ -1900,7 +1900,7 @@ func newTssPreParams(
 
 type signingOutcome struct {
 	signature  []byte
-	resultHash ResultHash
+	resultHash ResultSignatureHash
 	err        error
 }
 
@@ -1974,7 +1974,7 @@ func (mrs *mockResultSigner) VerifySignature(signedResult *SignedResult) (bool,
 func signatureVerificationKey(
 	publicKey []byte,
 	signature []byte,
-	resultHash ResultHash,
+	resultHash ResultSignatureHash,
 ) string {
 	return fmt.Sprintf("%s-%s-%s", publicKey, signature, resultHash[:])
 }
diff --git a/pkg/tecdsa/dkg/result.go b/pkg/tecdsa/dkg/result.go
index 151bd0fd2b..83fe5311d7 100644
--- a/pkg/tecdsa/dkg/result.go
+++ b/pkg/tecdsa/dkg/result.go
@@ -1,6 +1,7 @@
 package dkg
 
 import (
+	"crypto/ecdsa"
 	"crypto/elliptic"
 	"fmt"
 	"sort"
@@ -19,16 +20,27 @@ type Result struct {
 	PrivateKeyShare *tecdsa.PrivateKeyShare
 }
 
-// GroupPublicKeyBytes returns the public key corresponding to the private
+// GroupPublicKey returns the public key corresponding to the private
 // key share generated during the DKG protocol execution.
-func (r *Result) GroupPublicKeyBytes() ([]byte, error) {
+func (r *Result) GroupPublicKey() (*ecdsa.PublicKey, error) {
 	if r.PrivateKeyShare == nil {
 		return nil, fmt.Errorf(
 			"cannot retrieve group public key as private key share is nil",
 		)
 	}
 
-	publicKey := r.PrivateKeyShare.PublicKey()
+	return r.PrivateKeyShare.PublicKey(), nil
+}
+
+// GroupPublicKeyBytes returns the public key corresponding to the private
+// key share generated during the DKG protocol execution. The resulting
+// slice has 65 bytes and starts with the 04 prefix denoting an uncompressed
+// key.
+func (r *Result) GroupPublicKeyBytes() ([]byte, error) {
+	publicKey, err := r.GroupPublicKey()
+	if err != nil {
+		return nil, err
+	}
 
 	return elliptic.Marshal(
 		publicKey.Curve,
@@ -61,20 +73,21 @@ func (r *Result) MisbehavedMembersIndexes() []group.MemberIndex {
 	return sorted
 }
 
-const ResultHashByteSize = 32
+const ResultSignatureHashByteSize = 32
 
-// ResultHash is a 256-bit hash of DKG Result. The hashing algorithm used
-// depends on the client code.
-type ResultHash [ResultHashByteSize]byte
+// ResultSignatureHash is a signature hash of the DKG Result. The hashing
+// algorithm used depends on the client code.
+type ResultSignatureHash [ResultSignatureHashByteSize]byte
 
-// ResultHashFromBytes converts bytes slice to DKG Result Hash. It requires
-// provided bytes slice size to be exactly 32 bytes.
-func ResultHashFromBytes(bytes []byte) (ResultHash, error) {
-	var hash ResultHash
+// ResultSignatureHashFromBytes converts bytes slice to ResultSignatureHash.
+// It requires provided bytes slice size to be exactly
+// ResultSignatureHashByteSize.
+func ResultSignatureHashFromBytes(bytes []byte) (ResultSignatureHash, error) {
+	var hash ResultSignatureHash
 
-	if len(bytes) != ResultHashByteSize {
+	if len(bytes) != ResultSignatureHashByteSize {
 		return hash, fmt.Errorf(
-			"bytes length is not equal %v", ResultHashByteSize,
+			"bytes length is not equal %v", ResultSignatureHashByteSize,
 		)
 	}
 	copy(hash[:], bytes[:])