diff --git a/CHANGELOG.md b/CHANGELOG.md index c46a66af9..960a19c17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ### Added * `MysqlDatabase` `MysqlUser` Add delete policy * Add `PtHeartbeatResources` in `.Spec.PodSpec` to allow the user specifying resources for pt-heartbeat. + * Add `CandidateNode` field in `MysqlBackup.Spec` to allow the user specifying candidate node for backup. * Set `MysqlCluter.Spec.BackupSchedule` to empty string to disable recurrent backups ### Changed * Set default MySQL server version to `5.7.35` diff --git a/config/crd/bases/mysql.presslabs.org_mysqlbackups.yaml b/config/crd/bases/mysql.presslabs.org_mysqlbackups.yaml index 20a793935..0a7e86069 100644 --- a/config/crd/bases/mysql.presslabs.org_mysqlbackups.yaml +++ b/config/crd/bases/mysql.presslabs.org_mysqlbackups.yaml @@ -36,6 +36,9 @@ spec: backupURL: description: BackupURL represents the URL to the backup location, this can be partially specifyied. Default is used the one specified in the cluster. type: string + candidateNode: + description: CandidateNode is the node host that will be used to take the backup. If not set or set to wrong node, the operator will calculate candidate node by itself. + type: string clusterName: description: ClustterName represents the cluster for which to take backup type: string diff --git a/deploy/charts/mysql-operator/crds/mysql.presslabs.org_mysqlbackups.yaml b/deploy/charts/mysql-operator/crds/mysql.presslabs.org_mysqlbackups.yaml index 0b5cc05f6..5e3a08209 100644 --- a/deploy/charts/mysql-operator/crds/mysql.presslabs.org_mysqlbackups.yaml +++ b/deploy/charts/mysql-operator/crds/mysql.presslabs.org_mysqlbackups.yaml @@ -37,6 +37,9 @@ spec: backupURL: description: BackupURL represents the URL to the backup location, this can be partially specifyied. Default is used the one specified in the cluster. type: string + candidateNode: + description: CandidateNode is the node host that will be used to take the backup. If not set or set to wrong node, the operator will calculate candidate node by itself. + type: string clusterName: description: ClustterName represents the cluster for which to take backup type: string diff --git a/pkg/apis/mysql/v1alpha1/mysqlbackup_types.go b/pkg/apis/mysql/v1alpha1/mysqlbackup_types.go index 330eed777..61bffe799 100644 --- a/pkg/apis/mysql/v1alpha1/mysqlbackup_types.go +++ b/pkg/apis/mysql/v1alpha1/mysqlbackup_types.go @@ -44,6 +44,11 @@ type MysqlBackupSpec struct { // default it's used softDelete. // +optional RemoteDeletePolicy DeletePolicy `json:"remoteDeletePolicy,omitempty"` + + // CandidateNode is the node host that will be used to take the backup. + // If not set or set to wrong node, the operator will calculate candidate node by itself. + // +optional + CandidateNode string `json:"candidateNode,omitempty"` } // BackupCondition defines condition struct for backup resource @@ -94,7 +99,6 @@ type MysqlBackupStatus struct { // MysqlBackup is the Schema for the mysqlbackups API // +kubebuilder:object:root=true -// type MysqlBackup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -105,7 +109,6 @@ type MysqlBackup struct { // MysqlBackupList contains a list of MysqlBackup // +kubebuilder:object:root=true -// type MysqlBackupList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/pkg/controller/mysqlbackup/internal/syncer/job.go b/pkg/controller/mysqlbackup/internal/syncer/job.go index af13de3dc..33ae9a15f 100644 --- a/pkg/controller/mysqlbackup/internal/syncer/job.go +++ b/pkg/controller/mysqlbackup/internal/syncer/job.go @@ -88,9 +88,17 @@ func (s *jobSyncer) SyncFn() error { return nil } -// getBackupCandidate returns the hostname of the first not-lagged and +// getBackupCandidate returns candidate node in mysqlbackup spec at first, +// if not, it will return the hostname of the first not-lagged and // replicating slave node, else returns the master node. func (s *jobSyncer) getBackupCandidate() string { + if s.backup.Spec.CandidateNode != "" { + if err := s.backup.Validate(s.cluster); err == nil { + return s.backup.Spec.CandidateNode + } + log.Info("backup's candidate node is not valid, will try to calculate candidate node") + } + for _, node := range s.cluster.Status.Nodes { master := s.cluster.GetNodeCondition(node.Name, api.NodeConditionMaster) replicating := s.cluster.GetNodeCondition(node.Name, api.NodeConditionReplicating) diff --git a/pkg/controller/mysqlbackup/mysqlbackup_controller_test.go b/pkg/controller/mysqlbackup/mysqlbackup_controller_test.go index 8744cd8b6..6e1cbaa31 100644 --- a/pkg/controller/mysqlbackup/mysqlbackup_controller_test.go +++ b/pkg/controller/mysqlbackup/mysqlbackup_controller_test.go @@ -119,16 +119,7 @@ var _ = Describe("MysqlBackup controller", func() { BeforeEach(func() { // create a cluster with 2 nodes Expect(c.Create(context.TODO(), cluster.Unwrap())).To(Succeed()) - cluster.Status.Nodes = []api.NodeStatus{ - { - Name: cluster.GetPodHostname(0), - Conditions: testutil.NodeConditions(true, false, false, false), - }, - { - Name: cluster.GetPodHostname(1), - Conditions: testutil.NodeConditions(false, true, false, true), - }, - } + cluster.Status.Nodes = getHealthyNodeStatus(cluster, 2) Expect(c.Status().Update(context.TODO(), cluster.Unwrap())).To(Succeed()) // create the backup Expect(c.Create(context.TODO(), backup.Unwrap())).To(Succeed()) @@ -316,8 +307,74 @@ var _ = Describe("MysqlBackup controller", func() { Expect(c.Delete(context.TODO(), cluster.Unwrap())).To(Succeed()) }) }) + + When("candidate node is setted to wrong node", func() { + BeforeEach(func() { + backup.Spec.CandidateNode = cluster.GetPodHostname(3) + Expect(c.Create(context.TODO(), backup.Unwrap())).To(Succeed()) + + Expect(c.Create(context.TODO(), cluster.Unwrap())).To(Succeed()) + cluster.Status.Nodes = getHealthyNodeStatus(cluster, 2) + Expect(c.Status().Update(context.TODO(), cluster.Unwrap())).To(Succeed()) + + Eventually(requests, timeout).Should(Receive(Equal(expectedRequest))) + Eventually(requests, timeout).Should(Receive(Equal(expectedRequest))) + testutil.DrainChan(requests) + }) + AfterEach(func() { + Expect(c.Delete(context.TODO(), backup.Unwrap())).To(Succeed()) + Expect(c.Delete(context.TODO(), cluster.Unwrap())).To(Succeed()) + }) + + It("should take backup from replica 1", func() { + job := &batch.Job{} + Expect(c.Get(context.TODO(), jobKey, job)).To(Succeed()) + Expect(job.Spec.Template.Spec.Containers[0].Args).To(ContainElement(Equal(cluster.GetPodHostname(1)))) + }) + }) + + When("candidate node is setted to master", func() { + BeforeEach(func() { + backup.Spec.CandidateNode = cluster.GetPodHostname(0) + Expect(c.Create(context.TODO(), backup.Unwrap())).To(Succeed()) + + Expect(c.Create(context.TODO(), cluster.Unwrap())).To(Succeed()) + cluster.Status.Nodes = getHealthyNodeStatus(cluster, 2) + Expect(c.Status().Update(context.TODO(), cluster.Unwrap())).To(Succeed()) + + Eventually(requests, timeout).Should(Receive(Equal(expectedRequest))) + Eventually(requests, timeout).Should(Receive(Equal(expectedRequest))) + testutil.DrainChan(requests) + }) + AfterEach(func() { + Expect(c.Delete(context.TODO(), backup.Unwrap())).To(Succeed()) + Expect(c.Delete(context.TODO(), cluster.Unwrap())).To(Succeed()) + }) + + It("should take backup from master", func() { + job := &batch.Job{} + Expect(c.Get(context.TODO(), jobKey, job)).To(Succeed()) + Expect(job.Spec.Template.Spec.Containers[0].Args).To(ContainElement(Equal(cluster.GetPodHostname(0)))) + }) + }) }) +func getHealthyNodeStatus(cluster *mysqlcluster.MysqlCluster, count int) []api.NodeStatus { + status := []api.NodeStatus{ + { + Name: cluster.GetPodHostname(0), + Conditions: testutil.NodeConditions(true, false, false, false), + }, + } + for i := 1; i < count; i++ { + status = append(status, api.NodeStatus{ + Name: cluster.GetPodHostname(i), + Conditions: testutil.NodeConditions(false, true, false, false), + }) + } + return status +} + func refreshFn(c client.Client, backupKey types.NamespacedName) func() *api.MysqlBackup { return func() *api.MysqlBackup { backup := &api.MysqlBackup{} diff --git a/pkg/internal/mysqlbackup/validation.go b/pkg/internal/mysqlbackup/validation.go new file mode 100644 index 000000000..a51a28fbf --- /dev/null +++ b/pkg/internal/mysqlbackup/validation.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 Pressinfra SRL + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlbackup + +import ( + "fmt" + + "github.com/bitpoke/mysql-operator/pkg/internal/mysqlcluster" +) + +// Validate checks if the backup spec is validated +func (c *MysqlBackup) Validate(cluster *mysqlcluster.MysqlCluster) error { + // TODO: this validation should be done in an admission web-hook + + if c.Spec.CandidateNode != "" { + validate := false + for i := 0; i < int(*cluster.Spec.Replicas); i++ { + if c.Spec.CandidateNode == cluster.GetPodHostname(i) { + validate = true + break + } + } + if !validate { + return fmt.Errorf("spec.candidateNode is not a valid node") + } + } + return nil +}