Skip to content

Commit

Permalink
Merge pull request #120 from scality/bugfix/COSI-79-fix-role-permissions
Browse files Browse the repository at this point in the history
COSI-79: Allow end-users to create buckets and access from any namespace
anurag4DSB authored Jan 23, 2025

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
2 parents 1ba6548 + 908c8ce commit 30daef1
Showing 14 changed files with 199 additions and 56 deletions.
2 changes: 1 addition & 1 deletion .github/scripts/capture_k8s_logs.sh
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@ set -e
# Create a directory to store the logs
mkdir -p logs/kind_cluster_logs
LOG_FILE_PATH=".github/e2e_tests/artifacts/logs/kind_cluster_logs"
mkdir -p "$(dirname "$LOG_FILE_PATH")" # Ensure the log directory exists
mkdir -p "$LOG_FILE_PATH" # Ensure the log directory exists
# Define namespaces to capture logs from
namespaces=("default" "container-object-storage-system")

2 changes: 1 addition & 1 deletion .github/scripts/e2e_tests_brownfield_use_case.sh
Original file line number Diff line number Diff line change
@@ -9,7 +9,7 @@ SECRET_NAME="brownfield-bucket-secret"
IAM_ENDPOINT="http://$HOST_IP:8600"
S3_ENDPOINT="http://$HOST_IP:8000"
BUCKET_NAME="brownfield-bucket"
NAMESPACE="container-object-storage-system"
NAMESPACE="default"
REGION="us-west-1"

# Error handling function
110 changes: 102 additions & 8 deletions .github/workflows/helm-validation.yml
Original file line number Diff line number Diff line change
@@ -18,13 +18,29 @@ on:
default: 5

jobs:
smoke-test-installation-with-helm:
e2e-tests-with-helm:
runs-on: ubuntu-latest

steps:
- name: Check out repository
uses: actions/checkout@v4

- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: "${{ github.repository_owner }}"
password: "${{ github.token }}"

- name: Restore Cached Docker Images
id: cache_docker_images
uses: actions/cache@v4
with:
path: /tmp/.docker_cache
key: docker-${{ runner.os }}-${{ hashFiles('.github/s3_and_iam_deployment/.env') }}
restore-keys: |
docker-${{ runner.os }}-
- name: Set up Helm
uses: azure/[email protected]
with:
@@ -37,6 +53,35 @@ jobs:
wait: 90s
cluster_name: helm-test-cluster

- name: Verify KIND cluster is running
run: |
kubectl cluster-info
kubectl get nodes
- name: Setup COSI, S3 and IAM environments
run: |
set -e -o pipefail
(
echo "=== Setup COSI Controller, CRDs and Driver ==="
kubectl create -k github.com/kubernetes-sigs/container-object-storage-interface
make container
kind load docker-image ghcr.io/scality/cosi-driver:latest --name helm-test-cluster
) &
(
echo "=== Loading cached S3 and IAM Docker images ==="
if [ -d /tmp/.docker_cache ] && [ "$(ls -A /tmp/.docker_cache 2>/dev/null)" ]; then
for image in /tmp/.docker_cache/*.tar; do
docker load -i "$image" || true # continue on failure
done
else
echo "No cached images found. Skipping load."
fi
) &
# Wait for both background processes
wait
- name: "Debug: SSH to runner"
uses: scality/actions/action-ssh-to-runner@v1
with:
@@ -49,15 +94,29 @@ jobs:
timeout-minutes: 10
continue-on-error: true

- name: Build COSI Driver Docker Image
run: |
make container
- name: Setup IAM and S3 Services
run: |-
set -e -o pipefail;
mkdir -p logs/s3 logs/iam logs/cosi_driver data/vaultdb
chown -R runner:docker logs data
chmod -R ugo+rwx logs data
docker compose --profile iam_s3 up -d --quiet-pull
bash ../scripts/wait_for_local_port.bash 8600 30
bash ../scripts/wait_for_local_port.bash 8000 30
working-directory: .github/s3_and_iam_deployment

- name: Load Docker Image into Kind Cluster
- name: Save Images to Cache if not present
if: steps.cache_docker_images.outputs.cache-hit != 'true'
run: |
kind load docker-image ghcr.io/scality/cosi-driver:latest --name helm-test-cluster
source .github/s3_and_iam_deployment/.env
echo "Vault Image: $VAULT_IMAGE"
echo "CloudServer Image: $CLOUDSERVER_IMAGE"
mkdir -p /tmp/.docker_cache
docker save "$VAULT_IMAGE" -o /tmp/.docker_cache/vault_image.tar
docker save "$CLOUDSERVER_IMAGE" -o /tmp/.docker_cache/cloudserver_image.tar
shell: bash

- name: Install Scality COSI Helm Chart
- name: Install Scality COSI Driver using Helm Chart
run: |
helm install scality-cosi-driver ./helm/scality-cosi-driver \
--namespace container-object-storage-system \
@@ -73,6 +132,14 @@ jobs:
run: |
.github/scripts/verify_helm_install.sh
- name: E2E tests for greenfield use case using kustomize
run: |
.github/scripts/e2e_tests_greenfield_use_case.sh
- name: E2E tests for brownfield use case using kustomize
run: |
.github/scripts/e2e_tests_brownfield_use_case.sh
# the script accepts number of requests for APIs: CREATE_BUCKET, DELETE_BUCKET, GET_INFO
# GRANT_ACCESS and REVOKE_ACCESS in order
# Example below we are testing for those API counts:
@@ -83,7 +150,7 @@ jobs:
# - 0 REVOKE_ACCESS
- name: Verify metrics for healthcheck route
run: |
.github/scripts/e2e_tests_metrics.sh 0 0 1 0 0
.github/scripts/e2e_tests_metrics.sh 2 1 1 2 2
- name: "Delay completion"
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
@@ -92,8 +159,35 @@ jobs:
completion_delay_m: ${{ inputs.debug_delay_duration_minutes }}
continue-on-error: true

- name: Cleaup IAM and S3 Services
run: docker compose --profile iam_s3 down
working-directory: .github/s3_and_iam_deployment

- name: Move S3 and IAM logs and data to artifacts directory
if: always()
run: |-
set -e -o pipefail;
mkdir -p .github/e2e_tests/artifacts/logs .github/e2e_tests/artifacts/data
cp -r .github/s3_and_iam_deployment/logs/* .github/e2e_tests/artifacts/logs/
cp -r .github/s3_and_iam_deployment/data/* .github/e2e_tests/artifacts/data/
- name: Capture Kubernetes Logs in artifacts directory
if: always()
run: |
.github/scripts/capture_k8s_logs.sh
- name: Cleanup Helm Release and Namespace
run: |
helm uninstall scality-cosi-driver -n container-object-storage-system
kubectl delete namespace container-object-storage-system
if: always()

- name: Upload logs and data to Scality artifacts
if: always()
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: .github/e2e_tests/artifacts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: End to End Feature Tests
name: Kustomize Validation

on:
push:
@@ -18,7 +18,7 @@ on:
default: 5

jobs:
e2e-tests-with-kind:
e2e-tests-with-kustomize:
runs-on: ubuntu-latest

steps:
1 change: 0 additions & 1 deletion cosi-examples/brownfield/bucket.yaml
Original file line number Diff line number Diff line change
@@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: Bucket
metadata:
name: brownfield-bucket # should be same as bucket name
namespace: container-object-storage-system
spec:
bucketClaim: {}
bucketClassName: brownfield-bucket-class
1 change: 0 additions & 1 deletion cosi-examples/brownfield/bucketaccess.yaml
Original file line number Diff line number Diff line change
@@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketAccess
metadata:
name: brownfield-bucket-access
namespace: container-object-storage-system
spec:
bucketAccessClassName: brownfield-bucket-access-class
bucketClaimName: brownfield-bucket-claim
1 change: 0 additions & 1 deletion cosi-examples/brownfield/bucketaccessclass.yaml
Original file line number Diff line number Diff line change
@@ -2,7 +2,6 @@ kind: BucketAccessClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: brownfield-bucket-access-class
namespace: container-object-storage-system
driverName: cosi.scality.com
authenticationType: KEY
parameters:
1 change: 0 additions & 1 deletion cosi-examples/brownfield/bucketclaim.yaml
Original file line number Diff line number Diff line change
@@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketClaim
metadata:
name: brownfield-bucket-claim
namespace: container-object-storage-system
spec:
bucketClassName: brownfield-bucket-class
existingBucketName: brownfield-bucket # name of Bucket object
1 change: 0 additions & 1 deletion cosi-examples/brownfield/bucketclass.yaml
Original file line number Diff line number Diff line change
@@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketClass
metadata:
name: brownfield-bucket-class
namespace: container-object-storage-system
driverName: cosi.scality.com
deletionPolicy: Delete
parameters:
29 changes: 9 additions & 20 deletions docs/Usage.md
Original file line number Diff line number Diff line change
@@ -77,7 +77,6 @@ In the **Scality COSI Driver**, both **Greenfield** and **Brownfield** provision
> Note:
> For **fully working** examples, see the YAMLs in the [cosi-examples/brownfield](../cosi-examples/brownfield/) and [cosi-examples/greenfield](../cosi-examples/greenfield/) directories.
> For brownfield scenario it is madatory to create COSI CRs in the same namespace as COSI driver and controller.
### 1.1 Greenfield: Creating a New Bucket
@@ -92,7 +91,6 @@ Greenfield provisioning will create a brand-new S3 bucket in your object store,
kind: BucketClass
metadata:
name: greenfield-bucketclass
namespace: container-object-storage-system
driverName: cosi.scality.com
deletionPolicy: Delete
parameters:
@@ -115,7 +113,6 @@ Greenfield provisioning will create a brand-new S3 bucket in your object store,
kind: BucketClaim
metadata:
name: my-greenfield-bucketclaim
namespace: container-object-storage-system
spec:
bucketClassName: greenfield-bucketclass
protocols:
@@ -131,8 +128,6 @@ Greenfield provisioning will create a brand-new S3 bucket in your object store,
Brownfield provisioning allows you to manage an **already-existing** S3 bucket in Kubernetes.
> Note: For brownfield scenario, COSI CRs for Bucket and Access provisioning should be created in the same namespace as COSI driver and controller.
1. **Verify Existing Bucket**
Ensure the bucket already exists in S3 either through Storage Administrator or by running the following AWS CLI command:
@@ -151,7 +146,6 @@ Brownfield provisioning allows you to manage an **already-existing** S3 bucket i
kind: BucketClass
metadata:
name: brownfield-bucketclass
namespace: container-object-storage-system
driverName: cosi.scality.com
deletionPolicy: Delete
parameters:
@@ -172,7 +166,6 @@ Brownfield provisioning allows you to manage an **already-existing** S3 bucket i
kind: Bucket
metadata:
name: "<EXISTING_BUCKET_NAME>"
namespace: container-object-storage-system
spec:
bucketClaim: {}
driverName: cosi.scality.com
@@ -199,9 +192,8 @@ Brownfield provisioning allows you to manage an **already-existing** S3 bucket i
kind: BucketClaim
metadata:
name: my-brownfield-bucketclaim
namespace: container-object-storage-system
spec:
bucketClassName: brownfield-bucket-class
bucketClassName: brownfield-bucketclass
existingBucketName: "<EXISTING_BUCKET_NAME>"
protocols:
- S3
@@ -244,17 +236,15 @@ A `BucketAccessClass` defines how access (IAM policy or S3 keys) is granted:
```bash
cat <<EOF | kubectl apply -f -
apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketAccessClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: bucketaccessclass
namespace: container-object-storage-system
spec:
driverName: cosi.scality.com
authenticationType: KEY
parameters:
objectStorageSecretName: s3-secret-for-cosi
objectStorageSecretNamespace: default
name: bucket-access-class
driverName: cosi.scality.com
authenticationType: KEY
parameters:
objectStorageSecretName: s3-secret-for-cosi
objectStorageSecretNamespace: default
EOF
```
@@ -273,10 +263,9 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketAccess
metadata:
name: my-bucketaccess
namespace: container-object-storage-system
spec:
bucketClaimName: my-greenfield-bucketclaim # or my-brownfield-bucketclaim
bucketAccessClassName: bucketaccessclass
bucketAccessClassName: bucket-access-class
credentialsSecretName: my-s3-credentials
protocol: S3
EOF
4 changes: 2 additions & 2 deletions helm/scality-cosi-driver/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
apiVersion: v2
name: scality-cosi-driver
description: A Helm chart for deploying the Scality COSI Driver
version: 1.0.0
appVersion: "1.0"
version: 1.0.1
appVersion: "1.0.0"
45 changes: 39 additions & 6 deletions helm/scality-cosi-driver/templates/rbac.yaml
Original file line number Diff line number Diff line change
@@ -2,16 +2,49 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: scality-cosi-driver-provisioner-role
annotations:
description: Role for Scality COSI Driver Provisioner with permissions for managing COSI resources and related objects.
rules:
- apiGroups: ["objectstorage.k8s.io"]
resources: ["buckets", "bucketaccesses", "bucketclaims", "bucketaccessclasses"]
verbs: ["get", "list", "watch", "update", "create", "delete"]
resources:
- buckets
- bucketaccesses
- bucketclaims
- bucketaccessclasses
- buckets/status
- bucketaccesses/status
- bucketclaims/status
- bucketaccessclasses/status
verbs:
- create
- get
- update
- delete
- list
- watch
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
resources:
- leases
verbs:
- create
- get
- update
- delete
- list
- watch
- apiGroups: [""]
resources: ["secrets", "events"]
verbs: ["get", "delete", "update", "create"]
resources:
- secrets
- events
- services
- endpoints
verbs:
- create
- get
- update
- delete
- list
- watch

---
apiVersion: rbac.authorization.k8s.io/v1
50 changes: 41 additions & 9 deletions kustomize/base/rbac.yaml
Original file line number Diff line number Diff line change
@@ -2,25 +2,57 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: scality-cosi-driver-provisioner-role
annotations:
description: Role for Scality COSI Driver Provisioner with permissions for managing COSI resources and related objects.
rules:
- apiGroups: ["objectstorage.k8s.io"]
resources: ["buckets", "bucketaccesses", "bucketclaims", "bucketaccessclasses", "buckets/status", "bucketaccesses/status", "bucketclaims/status", "bucketaccessclasses/status"]
verbs: ["get", "list", "watch", "update", "create", "delete"]
resources:
- buckets
- bucketaccesses
- bucketclaims
- bucketaccessclasses
- buckets/status
- bucketaccesses/status
- bucketclaims/status
- bucketaccessclasses/status
verbs:
- create
- get
- update
- delete
- list
- watch
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
resources:
- leases # Leader election leases
verbs:
- create
- get
- update
- delete
- list
- watch
- apiGroups: [""]
resources: ["secrets", "events"]
verbs: ["get", "delete", "update", "create"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get", "list", "watch"]
resources:
- events # Emitted from COSI Provisioner Sidecar
- secrets # Created during access granting
- services # Ensure proper service resource permissions
- endpoints
verbs:
- create
- get
- update
- delete
- list
- watch

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: scality-cosi-driver-provisioner-role-binding
annotations:
description: Binds the Scality COSI Driver ServiceAccount to the ClusterRole for cluster-wide permissions.
subjects:
- kind: ServiceAccount
name: scality-cosi-driver-provisioner
4 changes: 2 additions & 2 deletions pkg/driver/provisioner_server_impl.go
Original file line number Diff line number Diff line change
@@ -324,10 +324,10 @@ func initializeObjectStorageClient(ctx context.Context, clientset kubernetes.Int
case "IAM":
client, err = iamclient.InitIAMClient(ctx, *storageClientParameters)
if err != nil {
klog.ErrorS(err, "Failed to initialize IAM client", "endpoint", storageClientParameters.Endpoint)
klog.ErrorS(err, "Failed to initialize IAM client", "endpoint", storageClientParameters.IAMEndpoint)
return nil, nil, status.Error(codes.Internal, "failed to initialize IAM client")
}
klog.V(c.LvlDebug).InfoS("Successfully initialized IAM client", "endpoint", storageClientParameters.Endpoint)
klog.V(c.LvlDebug).InfoS("Successfully initialized IAM client", "endpoint", storageClientParameters.IAMEndpoint)
default:
klog.ErrorS(nil, "Unsupported object storage provider service", "service", service)
return nil, nil, status.Error(codes.Internal, "unsupported object storage provider service")

0 comments on commit 30daef1

Please sign in to comment.