diff --git a/.github/workflows/objectstorage.yaml b/.github/workflows/objectstorage.yaml new file mode 100644 index 00000000000..13998423e39 --- /dev/null +++ b/.github/workflows/objectstorage.yaml @@ -0,0 +1,147 @@ +name: Build Object Storage Cluster image + +on: + workflow_call: + inputs: + push_image: + description: 'Push image' + required: false + type: boolean + default: false + push_image_tag: + description: 'Push all-in-one image tag, default is latest' + default: 'latest' + required: false + type: string + build_from: + description: 'Build all-in-one image from components image tag, default is latest' + default: 'latest' + required: false + type: string + workflow_dispatch: + inputs: + push_image: + description: 'Push image' + required: false + type: boolean + default: false + push_image_tag: + description: 'Push all-in-one image tag, default is latest' + default: 'latest' + required: false + type: string + build_from: + description: 'Build all-in-one image from components image tag, default is latest' + default: 'latest' + required: false + type: string + push: + branches: [ "main" ] + paths: + - "deploy/objectstorage/**" + - ".github/workflows/objectstorage.yml" + - "!**/*.md" + - "!**/*.yaml" + pull_request: + branches: [ "*" ] + paths: + - "deploy/objectstorage/**" + - ".github/workflows/objectstorage.yml" + - "!**/*.md" + - "!**/*.yaml" + +env: + # Common versions + GO_VERSION: "1.20" + DEFAULT_OWNER: "labring" + +jobs: + save-sealos: + uses: ./.github/workflows/import-save-sealos.yml + + build-cluster-image: + if: ${{ (github.event_name == 'release') ||(github.event_name == 'push') || (inputs.push_image == true) }} + needs: + - save-sealos + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Expose git commit data + uses: rlespinasse/git-commit-data-action@v1 + - name: Check if tag + id: check_tag + run: | + if [[ "${{ github.ref }}" == refs/tags/* ]]; then + echo "isTag=true" >> "$GITHUB_OUTPUT" + else + echo "isTag=false" >> "$GITHUB_OUTPUT" + fi + - name: Prepare + id: prepare + run: | + bash ./scripts/resolve-tag-image.sh "${{ inputs.push_image }}" "${{ steps.check_tag.outputs.isTag }}" "${{ inputs.push_image_tag }}" + echo repo=ghcr.io/${{ github.repository_owner }}/sealos-cloud-objectstorage >> $GITHUB_OUTPUT + - name: Download sealos + uses: actions/download-artifact@v3 + with: + name: sealos + path: /tmp/ + - name: Verify sealos + run: | + sudo chmod a+x /tmp/sealos + sudo mv /tmp/sealos /usr/bin/sealos + sudo sealos version + + # todo: mutate image tag in images/shim and scripts or change scripts to use changeable tags + + - name: Sealos login to ghcr.io + # if push to master, then login to ghcr.io + run: | + sudo sealos login -u ${{ github.repository_owner }} -p ${{ secrets.GH_PAT }} --debug ghcr.io + + - name: Build sealos cloud cluster image + working-directory: deploy/objectstorage + run: | + [ -z "${{ inputs.build_from }}" ] && BuildFromTag="latest" || BuildFromTag="${{ inputs.build_from }}"; echo "BuildFromTag=${BuildFromTag}" + sed -i "s#labring#${{ github.repository_owner }}#g" init.sh + sed -i "s#latest#${BuildFromTag}#g" init.sh + + sudo bash init.sh amd64 + sudo sealos build -t ${{ steps.prepare.outputs.repo }}:${{ steps.prepare.outputs.tag_name }}-amd64 --platform linux/amd64 -f Kubefile + sudo sealos build -t ${{ steps.prepare.outputs.repo }}:latest-amd64 --platform linux/amd64 -f Kubefile + + + # delete old registry cache + sudo rm -rf registry + sudo rm -rf tars + + sudo bash init.sh arm64 + sudo sealos build -t ${{ steps.prepare.outputs.repo }}:${{ steps.prepare.outputs.tag_name }}-arm64 --platform linux/arm64 -f Kubefile + sudo sealos build -t ${{ steps.prepare.outputs.repo }}:latest-arm64 --platform linux/arm64 -f Kubefile + + - name: Manifest Cluster Images + # if push to master, then patch images to ghcr.io + run: | + sudo sealos images + bash docker/patch/manifest-cluster-images.sh ${{ steps.prepare.outputs.repo }}:${{ steps.prepare.outputs.tag_name }} + bash docker/patch/manifest-cluster-images.sh ${{ steps.prepare.outputs.repo }}:latest + env: + OWNER: ${{ github.repository_owner }} + + - name: Renew issue and Sync Images + uses: labring/gh-rebot@v0.0.6 + if: ${{ github.repository_owner == env.DEFAULT_OWNER }} + with: + version: v0.0.8-rc1 + env: + GH_TOKEN: "${{ secrets.GH_PAT }}" + SEALOS_TYPE: "issue_renew" + SEALOS_ISSUE_TITLE: "[DaylyReport] Auto build for sealos" + SEALOS_ISSUE_BODYFILE: "scripts/ISSUE_RENEW.md" + SEALOS_ISSUE_LABEL: "dayly-report" + SEALOS_ISSUE_TYPE: "day" + SEALOS_ISSUE_REPO: "labring-actions/cluster-image" + SEALOS_COMMENT_BODY: "/imagesync ghcr.io/${{ github.repository_owner }}/sealos-cloud:${{ steps.prepare.outputs.tag_name }}" diff --git a/deploy/objectstorage/Kubefile b/deploy/objectstorage/Kubefile new file mode 100644 index 00000000000..5e8ef94547a --- /dev/null +++ b/deploy/objectstorage/Kubefile @@ -0,0 +1,14 @@ +FROM scratch +COPY tars tars +COPY etc etc +COPY scripts scripts +COPY manifests manifests + +ENV cloudDomain=${cloudDomain:-"127.0.0.1.nip.io"} +ENV cloudPort="" +ENV minioStorageSize=${minioStorageSize:-1Gi} +ENV promStorageSize=${promStorageSize:-1Gi} +ENV minioAdminUser=${minioAdminUser:-"admin"} +ENV minioAdminPassword=${minioAdminPassword:-"passw0rd"} + +CMD ["bash scripts/init.sh"] diff --git a/deploy/objectstorage/README.md b/deploy/objectstorage/README.md new file mode 100644 index 00000000000..a4a7eb4e12e --- /dev/null +++ b/deploy/objectstorage/README.md @@ -0,0 +1,6 @@ +# sealos cloud object storage cluster image +## prepare + +1. install minio operator +2. install prometheus operator +3. run object storage cluster image \ No newline at end of file diff --git a/deploy/objectstorage/etc/minio/policy/kubeblocks.json b/deploy/objectstorage/etc/minio/policy/kubeblocks.json new file mode 100644 index 00000000000..ad32526b1ca --- /dev/null +++ b/deploy/objectstorage/etc/minio/policy/kubeblocks.json @@ -0,0 +1,44 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:AbortMultipartUpload", + "s3:GetBucketTagging", + "s3:ListBucket", + "s3:ListMultipartUploadParts", + "s3:PutBucketTagging", + "s3:PutObject", + "s3:CreateBucket", + "s3:DeleteObject", + "s3:GetBucketLocation", + "s3:GetBucketPolicy", + "s3:GetObject" + ], + "Resource": [ + "arn:aws:s3:::file-migration/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:AbortMultipartUpload", + "s3:CreateBucket", + "s3:DeleteObject", + "s3:GetBucketLocation", + "s3:PutObject", + "s3:GetBucketPolicy", + "s3:GetBucketTagging", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts", + "s3:PutBucketTagging" + ], + "Resource": [ + "arn:aws:s3:::file-backup/*" + ] + } + ] +} \ No newline at end of file diff --git a/deploy/objectstorage/etc/minio/policy/user_deny_write.json b/deploy/objectstorage/etc/minio/policy/user_deny_write.json new file mode 100644 index 00000000000..07d0eb87c18 --- /dev/null +++ b/deploy/objectstorage/etc/minio/policy/user_deny_write.json @@ -0,0 +1,14 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": [ + "s3:PutObject" + ], + "Resource": [ + "arn:aws:s3:::${aws:username}-*" + ] + } + ] +} \ No newline at end of file diff --git a/deploy/objectstorage/etc/minio/policy/user_normal.json b/deploy/objectstorage/etc/minio/policy/user_normal.json new file mode 100644 index 00000000000..35c98adf9c8 --- /dev/null +++ b/deploy/objectstorage/etc/minio/policy/user_normal.json @@ -0,0 +1,23 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts", + "s3:GetBucketPolicy", + "s3:GetBucketLocation", + "s3:GetBucketTagging", + "s3:PutBucketTagging", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${aws:username}-*" + ] + } + ] +} \ No newline at end of file diff --git a/deploy/objectstorage/images/shim/imageList b/deploy/objectstorage/images/shim/imageList new file mode 100644 index 00000000000..3ee611a7073 --- /dev/null +++ b/deploy/objectstorage/images/shim/imageList @@ -0,0 +1,2 @@ +quay.io/prometheus/prometheus:v2.45.0 +minio/minio:RELEASE.2023-11-11T08-14-41Z \ No newline at end of file diff --git a/deploy/objectstorage/init.sh b/deploy/objectstorage/init.sh new file mode 100644 index 00000000000..d0b27b91c54 --- /dev/null +++ b/deploy/objectstorage/init.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -e +export readonly ARCH=${1:-amd64} +mkdir -p tars + +RetryPullImageInterval=3 +RetrySleepSeconds=3 + +retryPullImage() { + local image=$1 + local retry=0 + local retryMax=3 + set +e + while [ $retry -lt $RetryPullImageInterval ]; do + sealos pull --policy=always --platform=linux/"${ARCH}" $image >/dev/null && break + retry=$(($retry + 1)) + echo "retry pull image $image, retry times: $retry" + sleep $RetrySleepSeconds + done + set -e + if [ $retry -eq $retryMax ]; then + echo "pull image $image failed" + exit 1 + fi +} + +retryPullImage ghcr.io/labring/sealos-cloud-objectstorage-controller:latest +retryPullImage ghcr.io/labring/sealos-cloud-objectstorage-frontend:latest +retryPullImage ghcr.io/labring/sealos-cloud-minio-service:latest + +sealos save -o tars/objectstorage-controller.tar ghcr.io/labring/sealos-cloud-objectstorage-controller:latest +sealos save -o tars/objectstorage-frontend.tar ghcr.io/labring/sealos-cloud-objectstorage-frontend:latest +sealos save -o tars/objectstorage-service.tar ghcr.io/labring/sealos-cloud-minio-service:latest diff --git a/deploy/objectstorage/manifests/minio/deploy.yaml.tmpl b/deploy/objectstorage/manifests/minio/deploy.yaml.tmpl new file mode 100644 index 00000000000..b98eb33c778 --- /dev/null +++ b/deploy/objectstorage/manifests/minio/deploy.yaml.tmpl @@ -0,0 +1,185 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: objectstorage-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: object-storage-env-configuration + namespace: objectstorage-system + labels: + v1.min.io/tenant: object-storage +data: + config.env: >- + {ENCODED_CONFIG_ENV} +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: object-storage-secret + namespace: objectstorage-system + labels: + v1.min.io/tenant: object-storage +data: + accesskey: '' + secretkey: '' +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: object-storage-user-0 + namespace: objectstorage-system + labels: + v1.min.io/tenant: object-storage +immutable: true +data: + CONSOLE_ACCESS_KEY: {CONSOLE_ACCESS_KEY} + CONSOLE_SECRET_KEY: {CONSOLE_SECRET_KEY} +type: Opaque +--- +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: object-storage + namespace: objectstorage-system +spec: + configuration: + name: object-storage-env-configuration + credsSecret: + name: object-storage-secret + exposeServices: + console: true + minio: true + features: {} + image: minio/minio:RELEASE.2023-11-11T08-14-41Z + imagePullSecret: {} + mountPath: /export + pools: + - name: pool-0 + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 100m + memory: 256Mi + runtimeClassName: '' + servers: 4 + volumeClaimTemplate: + metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .minioStorageSize }} + status: {} + volumesPerServer: 1 + requestAutoCert: false + users: + - name: object-storage-user-0 +scheduler: + name: '' +--- +apiVersion: v1 +kind: Service +metadata: + name: object-storage + namespace: objectstorage-system + labels: + v1.min.io/tenant: object-storage +spec: + ports: + - name: http-minio + protocol: TCP + port: 80 + targetPort: 9000 + selector: + v1.min.io/tenant: object-storage + type: LoadBalancer + sessionAffinity: None + externalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + allocateLoadBalancerNodePorts: true + internalTrafficPolicy: Cluster +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: object-storage-api + namespace: objectstorage-system + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: 3g + nginx.ingress.kubernetes.io/server-snippet: | + client_header_buffer_size 64k; + large_client_header_buffers 4 128k; + nginx.ingress.kubernetes.io/ssl-redirect: 'false' + nginx.ingress.kubernetes.io/backend-protocol: HTTP + nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/client-body-buffer-size: 64k + nginx.ingress.kubernetes.io/proxy-buffer-size: 64k + nginx.ingress.kubernetes.io/configuration-snippet: | + if ($request_uri ~* \.(js|css|gif|jpe?g|png)) { + expires 30d; + add_header Cache-Control "public"; + } +spec: + rules: + - host: objectstorageapi.{{ .cloudDomain }} + http: + paths: + - pathType: Prefix + path: /()(.*) + backend: + service: + name: object-storage + port: + number: 80 + tls: + - hosts: + - objectstorageapi.{{ .cloudDomain }} + secretName: wildcard-cert +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: object-storage-console + namespace: objectstorage-system + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: 3g + nginx.ingress.kubernetes.io/server-snippet: | + client_header_buffer_size 64k; + large_client_header_buffers 4 128k; + nginx.ingress.kubernetes.io/ssl-redirect: 'false' + nginx.ingress.kubernetes.io/backend-protocol: HTTP + nginx.ingress.kubernetes.io/client-body-buffer-size: 64k + nginx.ingress.kubernetes.io/proxy-buffer-size: 64k + nginx.ingress.kubernetes.io/configuration-snippet: | + if ($request_uri ~* \.(js|css|gif|jpe?g|png)) { + expires 30d; + add_header Cache-Control "public"; + } +spec: + rules: + - host: osconsole.{{ .cloudDomain }} + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: object-storage-console + port: + number: 9090 + tls: + - hosts: + - osconsole.{{ .cloudDomain }} + secretName: wildcard-cert \ No newline at end of file diff --git a/deploy/objectstorage/manifests/prometheus/deploy.yaml.tmpl b/deploy/objectstorage/manifests/prometheus/deploy.yaml.tmpl new file mode 100644 index 00000000000..6cb6e0db89e --- /dev/null +++ b/deploy/objectstorage/manifests/prometheus/deploy.yaml.tmpl @@ -0,0 +1,97 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + labels: + app: prometheus-object-storage + name: object-storage + namespace: objectstorage-system +spec: + podMetadata: + labels: + app: prometheus-object-storage + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + evaluationInterval: 60s + image: quay.io/prometheus/prometheus:v2.45.0 + serviceMonitorSelector: {} + probeSelector: {} + ruleSelector: {} + portName: http-web + retention: 10d + scrapeInterval: 60s + serviceAccountName: object-storage-sa + replicas: 1 + shards: 1 + storage: + volumeClaimTemplate: + metadata: + annotations: + path: /prometheus + value: {{ .promStorageSize }} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .promStorageSize }} +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus-object-storage + namespace: objectstorage-system +spec: + ports: + - port: 9090 + targetPort: http-web + protocol: TCP + name: http-web + selector: + app: prometheus-object-storage + type: ClusterIP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: object-storage-sa + namespace: objectstorage-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: object-storage-role + namespace: objectstorage-system +rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: object-storage-rolebind + namespace: objectstorage-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: object-storage-role +subjects: + - kind: ServiceAccount + name: object-storage-sa + namespace: objectstorage-system + diff --git a/deploy/objectstorage/scripts/init.sh b/deploy/objectstorage/scripts/init.sh new file mode 100644 index 00000000000..30a751dc66f --- /dev/null +++ b/deploy/objectstorage/scripts/init.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# 1. create minio instance +bash scripts/minio.sh +# 2. create prometheus instance +bash scripts/prometheus.sh +# 3. run objectstorage controller +sealos run tars/objectstorage-controller.tar -e cloudDomain=${cloudDomain} +# 4. run objectstorage frontend +sealos run tars/objectstorage-frontend.tar -e cloudDomain=${cloudDomain} +# 5. run objectstorage monitor service +sealos run tars/objectstorage-service.tar -e cloudDomain=${cloudDomain} diff --git a/deploy/objectstorage/scripts/minio.sh b/deploy/objectstorage/scripts/minio.sh new file mode 100644 index 00000000000..be8e07932cf --- /dev/null +++ b/deploy/objectstorage/scripts/minio.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +set -e + +function deploy_minio() { + MINIO_EXTERNAL_ENDPOINT="https://objectstorageapi.${cloudDomain}" + CONSOLE_ACCESS_KEY=$(echo -n "${minioAdminUser}" | base64 -w 0) + CONSOLE_SECRET_KEY=$(echo -n "${minioAdminPassword}" | base64 -w 0) + + + MINIO_ROOT_USER=$(openssl rand -hex 12 | head -c 16) + MINIO_ROOT_PASSWORD=$(openssl rand -hex 24 | head -c 32) + + CONFIG_ENV="export MINIO_STORAGE_CLASS_STANDARD=\"EC:2\" + export MINIO_BROWSER=\"on\" + export MINIO_ROOT_USER=\"${MINIO_ROOT_USER}\" + export MINIO_ROOT_PASSWORD=\"${MINIO_ROOT_PASSWORD}\"" + + ENCODED_CONFIG_ENV=$(echo -n "$CONFIG_ENV" | base64 -w 0) + + if kubectl get secret object-storage-env-configuration -n objectstorage-system 2>/dev/null >/dev/; then + ENCODED_CONFIG_ENV=$(kubectl get secret object-storage-env-configuration -n objectstorage-system -o jsonpath='{.data.config\.env}') + fi + + sed -i 's/{ENCODED_CONFIG_ENV}/'${ENCODED_CONFIG_ENV}'/g' manifests/minio/deploy.yaml + sed -i 's/{CONSOLE_ACCESS_KEY}/'${CONSOLE_ACCESS_KEY}'/g' manifests/minio/deploy.yaml + sed -i 's/{CONSOLE_SECRET_KEY}/'${CONSOLE_SECRET_KEY}'/g' manifests/minio/deploy.yaml + + kubectl apply -f manifests/minio/deploy.yaml +} + +function init_minio() { + if [ ! -f "$HOME/minio-binaries/mc" ]; then + curl https://dl.min.io/client/mc/release/linux-amd64/mc --create-dirs -o $HOME/minio-binaries/mc + fi + + chmod +x $HOME/minio-binaries/mc + export PATH=$PATH:$HOME/minio-binaries/ + + while kubectl wait -l statefulset.kubernetes.io/pod-name=object-storage-pool-0-0 --for=condition=ready pod -n objectstorage-system --timeout=-1s 2>&1 | grep -q "error: no matching resources found"; do + sleep 1 + done + + kubectl wait -l statefulset.kubernetes.io/pod-name=object-storage-pool-0-0 --for=condition=ready pod -n objectstorage-system --timeout=-1s + kubectl wait -l statefulset.kubernetes.io/pod-name=object-storage-pool-0-1 --for=condition=ready pod -n objectstorage-system --timeout=-1s + kubectl wait -l statefulset.kubernetes.io/pod-name=object-storage-pool-0-2 --for=condition=ready pod -n objectstorage-system --timeout=-1s + kubectl wait -l statefulset.kubernetes.io/pod-name=object-storage-pool-0-3 --for=condition=ready pod -n objectstorage-system --timeout=-1s + + while mc alias set objectstorage ${MINIO_EXTERNAL_ENDPOINT} ${minioAdminUser} ${minioAdminPassword} 2>&1 | grep -q "Unable to initialize new alias from the provided credentials."; do + sleep 1 + done + + mc admin policy create objectstorage userNormal etc/minio/policy/user_normal.json + mc admin policy create objectstorage userDenyWrite etc/minio/policy/user_deny_write.json + mc admin policy create objectstorage kubeblocks etc/minio/policy/kubeblocks.json + + mc admin user add objectstorage kubeblocks sealos.12345 + mc admin user add objectstorage testuser sealos2023 + mc admin group add objectstorage userNormal testuser + mc admin group add objectstorage userDenyWrite testuser + + mc admin user remove objectstorage testuser + + mc admin policy attach objectstorage userNormal --group userNormal + mc admin policy attach objectstorage userDenyWrite --group userDenyWrite + mc admin policy attach objectstorage kubeblocks --user kubeblocks +} + +function install() { + deploy_minio + + init_minio +} + +install + diff --git a/deploy/objectstorage/scripts/prometheus.sh b/deploy/objectstorage/scripts/prometheus.sh new file mode 100644 index 00000000000..dbe78193419 --- /dev/null +++ b/deploy/objectstorage/scripts/prometheus.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -e + +function deploy_prometheus() { + kubectl apply -f manifests/prometheus/deploy.yaml +} + +function install() { + deploy_prometheus +} + +install \ No newline at end of file