diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 9010270c3f55..a448c8ebad93 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -1461,6 +1461,19 @@ func (b *RaftBackend) StartRemovedChecker(ctx context.Context) { for { select { case <-ticker.C: + // If the raft cluster has been torn down (which will happen on + // seal) the raft backend will be uninitialized. We want to exit + // the loop in that case. If the cluster unseals, we'll get a + // new backend setup and that will have its own removed checker. + + // There is a ctx.Done() check below that will also exit, but + // in most (if not all) places we pass in context.Background() + // to this function. Checking initialization will prevent this + // loop from continuing to run after the raft backend is stopped + // regardless of the context. + if !b.Initialized() { + return + } removed, err := b.IsNodeRemoved(ctx, b.localID) if err != nil { logger.Error("failed to check if node is removed", "node ID", b.localID, "error", err) diff --git a/vault/external_tests/raftha/raft_ha_test.go b/vault/external_tests/raftha/raft_ha_test.go index bd163566157a..705712d9adea 100644 --- a/vault/external_tests/raftha/raft_ha_test.go +++ b/vault/external_tests/raftha/raft_ha_test.go @@ -364,6 +364,9 @@ func TestRaftHACluster_Removed_ReAdd(t *testing.T) { if !server.Healthy { return fmt.Errorf("server %s is unhealthy", serverID) } + if server.NodeType != "voter" { + return fmt.Errorf("server %s has type %s", serverID, server.NodeType) + } } return nil })