diff --git a/Dockerfile.sidecar b/Dockerfile.sidecar index 4eac67be..39339044 100644 --- a/Dockerfile.sidecar +++ b/Dockerfile.sidecar @@ -43,13 +43,20 @@ RUN set -ex; \ ARG XTRABACKUP_PKG=percona-xtrabackup-24 RUN set -ex; \ apt-get update; \ - apt-get install -y --no-install-recommends gnupg2 wget lsb-release curl bc; \ + apt-get install -y --no-install-recommends gnupg2 wget lsb-release curl bc fuse jq openssh-server; \ wget -P /tmp --no-check-certificate https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb; \ dpkg -i /tmp/percona-release_latest.$(lsb_release -sc)_all.deb; \ apt-get update; \ apt-get install -y --no-install-recommends ${XTRABACKUP_PKG}; \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - +#ADD http://mirrors.woqutech.com/download/qfusion/files/bin/juicefs-1.0.0-rc1-linux-amd64 /usr/local/bin/juicefs +# COPY juicefs/juicefs /usr/local/bin/juicefs +RUN wget --no-check-certificate "https://d.juicefs.com/juicefs/releases/download/v1.0.2/juicefs-1.0.2-linux-amd64.tar.gz" && tar -zxf "juicefs-1.0.2-linux-amd64.tar.gz" ;\ + mv juicefs /usr/local/bin/juicefs; \ + chmod +x /usr/local/bin/juicefs ; mkdir -p /run/sshd; \ + mkdir -p /root/.ssh; \ + chmod 700 /root/.ssh WORKDIR / COPY --from=builder /workspace/bin/sidecar /usr/local/bin/sidecar -ENTRYPOINT ["sidecar"] +COPY script/*.sh / +CMD [ "sidecar" ] diff --git a/api/v1alpha1/backup_types.go b/api/v1alpha1/backup_types.go index 3fc018a2..d177fbde 100644 --- a/api/v1alpha1/backup_types.go +++ b/api/v1alpha1/backup_types.go @@ -21,6 +21,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type JuiceOpt struct { + // sqlite or redis + JuiceMeta string `json:"juiceMeta"` + // backupSecrete name for S3 + BackupSecretName string `json:"backupSecretName"` + JuiceName string `json:"juiceName"` +} + // This is the backup Job CRD. // BackupSpec defines the desired state of Backup type BackupSpec struct { @@ -40,6 +48,9 @@ type BackupSpec struct { // +optional NFSServerAddress string `json:"nfsServerAddress,omitempty"` + // Represents the juicefs parameters which need. + // +optional + JuiceOpt *JuiceOpt `json:"juiceOpt,omitempty"` // ClusterName represents the cluster name to backup ClusterName string `json:"clusterName"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index db850e7c..444caadf 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -104,6 +104,11 @@ func (in *BackupList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { *out = *in + if in.JuiceOpt != nil { + in, out := &in.JuiceOpt, &out.JuiceOpt + *out = new(JuiceOpt) + **out = **in + } if in.HistoryLimit != nil { in, out := &in.HistoryLimit, &out.HistoryLimit *out = new(int32) @@ -174,6 +179,21 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JuiceOpt) DeepCopyInto(out *JuiceOpt) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JuiceOpt. +func (in *JuiceOpt) DeepCopy() *JuiceOpt { + if in == nil { + return nil + } + out := new(JuiceOpt) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricsOpts) DeepCopyInto(out *MetricsOpts) { *out = *in diff --git a/backup/syncer/job.go b/backup/syncer/job.go index b1e7e8a2..96b97972 100644 --- a/backup/syncer/job.go +++ b/backup/syncer/job.go @@ -17,13 +17,16 @@ limitations under the License. package syncer import ( + "context" "fmt" + "strings" "github.com/presslabs/controller-util/pkg/syncer" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" v1alpha1 "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" @@ -33,6 +36,7 @@ import ( ) type jobSyncer struct { + client client.Client job *batchv1.Job backup *backup.Backup } @@ -50,6 +54,7 @@ func NewJobSyncer(c client.Client, backup *backup.Backup) syncer.Interface { } sync := &jobSyncer{ + client: c, job: obj, backup: backup, } @@ -174,6 +179,10 @@ func (s *jobSyncer) ensurePodSpec(in corev1.PodSpec) corev1.PodSpec { MountPath: utils.XtrabckupLocal, }, } + } else if s.backup.Spec.JuiceOpt != nil { + // Deal it for juiceOpt + s.buildJuicefsBackPod(&in) + } else { // in.Containers[0].ImagePullPolicy = s.opt.ImagePullPolicy in.Containers[0].Args = []string{ @@ -238,3 +247,88 @@ func (s *jobSyncer) ensurePodSpec(in corev1.PodSpec) corev1.PodSpec { } return in } + +func (s *jobSyncer) buildJuicefsBackPod(in *corev1.PodSpec) error { + // add volumn about pvc + var defMode int32 = 0600 + var err error + var cmdstr string + in.Volumes = []corev1.Volume{ + { + Name: utils.SShVolumnName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-ssh-key", s.backup.Spec.ClusterName), + DefaultMode: &defMode, + }, + }, + }, + } + + in.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: utils.SShVolumnName, + MountPath: utils.SshVolumnPath, + }, + } + + // PodName.clusterName-mysql.Namespace + // sample-mysql-0.sample-mysql.default + hostname := fmt.Sprintf("%s.%s-mysql.%s", s.backup.Spec.HostName, s.backup.Spec.ClusterName, s.backup.Namespace) + if cmdstr, err = s.buildJuicefsCmd(s.backup.Spec.JuiceOpt.BackupSecretName); err != nil { + return err + } + + in.Containers[0].Command = []string{"bash", "-c", "--", `cp /etc/secret-ssh/* /root/.ssh +chmod 600 /root/.ssh/authorized_keys ;` + + strings.Join([]string{ + "ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", hostname, cmdstr, + }, " ")} + + return nil +} + +func (s *jobSyncer) buildJuicefsCmd(secName string) (string, error) { + juiceopt := s.backup.Spec.JuiceOpt + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secName, + Namespace: s.backup.Namespace, + }, + } + err := s.client.Get(context.TODO(), + types.NamespacedName{Namespace: s.backup.Namespace, + Name: secName}, secret) + + if err != nil { + return "", err + } + url, bucket := secret.Data["s3-endpoint"], secret.Data["s3-bucket"] + accesskey, secretkey := secret.Data["s3-access-key"], secret.Data["s3-secret-key"] + juicebucket := installBucket(string(url), string(bucket)) + cmdstr := fmt.Sprintf(`<$JSONFILE + else + echo exist the json file + fi +} +function read() { + max=0 + IFS_OLD=$IFS + IFS=$(echo -en "\n\b") + for i in $(jq -c '.backup_chains[]' $JSONFILE); + do + #echo $i | jq '.type' + val=$(echo $i | jq '."target-dir"|match("\\d+")|.string|tonumber') + #echo $val + if [[ $max < $val ]] ; then + max=$val + fi + done + IFS=$IFS_OLD + echo $max +} + +function getDate() { + date '+%Y-%m-%d %H:%M:%S' +} + +function parseDateToUnix() { + local t=$1 + + echo date -d $t '+%s'|sh +} +function checkTime() { + local time=$1 # get the parameter + val=0 + IFS_OLD=$IFS + IFS=$(echo -en "\n\b") + for i in $(jq -c '.backup_chains[]' $JSONFILE); + do + traw=$(echo $i|jq '."time"') + val=$(echo $i | jq '."target-dir"|match("\\d+")|.string|tonumber') + t=$(echo date -d $traw '+%s'|sh) + cmptime=$(echo date -d "\"$time\"" '+%s'|sh) + if [ $t -ge $cmptime ]; then + break + fi + done + IFS=$IFS_OLD + echo $val + +} + +function appendinc() { + num=$1 + incbase="$BASE/backups/base" + #echo $BASE/backups/inc$(echo $num + 1|bc) + if ! [ $num -eq 0 ]; then + incbase=$BASE/backups/inc$num + fi + jq ".backup_chains += [{\"type\": \"incr-backup\", \"time\": \"$(getDate)\", \"target-dir\": \"$BASE/backups/inc$(echo $num + 1|bc)\", + \"incremental-basedir\": \"$incbase\" }]" $JSONFILE >"tmp.json" && mv ./tmp.json $JSONFILE +} + +function appendbase() { + jq ".backup_chains += [{\"type\": \"full-backup\", \"time\": \"$(getDate)\", \"target-dir\": \"$BASE/backups/base\"}]" $JSONFILE >"tmp.json" && mv ./tmp.json $JSONFILE + sleep 2 +} + +function fullbackup() { + mkdir -p /$BASE/backups/base + xtrabackup --backup --host=127.0.0.1 --user=root --password='' --datadir=/var/lib/mysql/ --target-dir=/$BASE/backups/base + success=$? + if [ $success ]; then + appendbase + fi +} + +function incrbackup() { + num=$1 + incbase="$BASE/backups/base" + #echo $BASE/backups/inc$(echo $num + 1|bc) + if ! [ $num -eq 0 ]; then + incbase=$BASE/backups/inc$num + fi + xtrabackup --backup --host=127.0.0.1 --user=root --password='' --datadir=/var/lib/mysql/ --target-dir=$BASE/backups/inc$(echo $num + 1|bc) \ + --incremental-basedir=$incbase + success=$? + if [ $success ]; then + appendinc $num + fi +} + +function backup() { + if ! [ -r $JSONFILE ] ; then + jq -n --arg cluster $CLUSTER_NAME --arg namespace $NAMESPACE '{"cluster_name": $cluster, "namespace": $namespace,"backup_chains": []}' >$JSONFILE + sleep 3 + echo now do the fullbackup + fullbackup + else + num=$(read) + incrbackup $num + fi + +} + + +function restore() { + local restorTime=$1 + local from=$2 + jsonfile=$BASE/$from-backup.json + if [ $# -ne 2 ] ; then + echo you can use it as restore date cluster-from + fi + local total=$(checkTime $restorTime) + for index in $(seq 0 $total); do + # at restore, base always use /backups/base + base=$(jq -c ".backup_chains[0][\"target-dir\"]" $jsonfile) + type=$(jq -c ".backup_chains[$index][\"type\"]" $jsonfile) + inc=$(jq -c ".backup_chains[$index][\"target-dir\"]" $jsonfile) + cmd="" + # echo $i, $base, $type,$inc + case $type in + "\"full-backup\"") + cmd=$(echo xtrabackup --prepare --apply-log-only --target-dir=$base) + echo $cmd + echo $cmd|sh + ;; + "\"incr-backup\"") + if [ $index -eq $total ]; then + cmd=$(echo xtrabackup --prepare --target-dir=$base --incremental-dir=$inc) + else + cmd=$(echo xtrabackup --prepare --apply-log-only --target-dir=$base --incremental-dir=$inc) + fi + echo $cmd + echo $cmd|sh + ;; + *) + echo nothing + ;; + esac + done + # check /var/lib/mysql is emtpty + if ! [ -d "/var/lib/mysql/mysql" ]; then + base=$(jq -c ".backup_chains[0][\"target-dir\"]" $JSONFILE) + cmd=$(echo xtrabackup --copy-back --target-dir=$base --datadir=/var/lib/mysql) + echo $cmd + echo $cmd|sh + chown -R mysql.mysql /var/lib/mysql + else + echo the dir is not empty, cannot copy back + fi +} + diff --git a/script/sshd.sh b/script/sshd.sh new file mode 100755 index 00000000..b0a49bbc --- /dev/null +++ b/script/sshd.sh @@ -0,0 +1,4 @@ +cp /etc/secret-ssh/* /root/.ssh +chmod 600 /root/.ssh/authorized_keys +/usr/sbin/sshd -D -e -f /etc/ssh/sshd_config & +echo "start..." diff --git a/utils/constants.go b/utils/constants.go index 8cab16bf..fa203a6c 100644 --- a/utils/constants.go +++ b/utils/constants.go @@ -88,6 +88,12 @@ const ( LogsVolumeName = "logs" DataVolumeName = "data" SysVolumeName = "host-sys" + + // just for juicefs + SysFuseVolume = "host-fuse" + SshPortName = "ssh" + SshPort = 22 + ScriptsVolumeName = "scripts" XenonConfVolumeName = "xenon-conf" InitFileVolumeName = "init-mysql" @@ -100,6 +106,8 @@ const ( LogsVolumeMountPath = "/var/log/mysql" DataVolumeMountPath = "/var/lib/mysql" SysVolumeMountPath = "/host-sys" + + SysFuseVolumnMountPath = "/dev/fuse" ScriptsVolumeMountPath = "/scripts" XenonConfVolumeMountPath = "/etc/xenon" InitFileVolumeMountPath = "/docker-entrypoint-initdb.d" @@ -129,6 +137,10 @@ const ( TlsVolumeName = "tls" // TlsMountPath is the volume mount path for tls TlsMountPath = "/etc/mysql-ssl" + + // ssh path + SShVolumnName = "ssh-key" + SshVolumnPath = "/etc/secret-ssh" ) // ResourceName is the type for aliasing resources that will be created. @@ -165,6 +177,8 @@ const ( JobAnonationDate = "backupDate" // Job Annonations type JobAnonationType = "backupType" + // SSh key + SShKey = "ssh" ) // JobType