Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(vd): add info represents quota exceed state #586

Open
wants to merge 23 commits into
base: main
Choose a base branch
from
2 changes: 2 additions & 0 deletions api/core/v1alpha2/vdcondition/condition.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ const (
Ready ReadyReason = "Ready"
// Lost indicates that the underlying PersistentVolumeClaim has been lost and the `VirtualDisk` can no longer be used.
Lost ReadyReason = "PVCLost"
// QuotaExceeded indicates that the VirtualDisk is reached project quotas and can not be provisioned.
QuotaExceeded ReadyReason = "QuotaExceeded"

// ResizingNotRequested indicates that the resize operation has not been requested yet.
ResizingNotRequested ResizedReason = "NotRequested"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
diff --git a/pkg/controller/datavolume/controller-base.go b/pkg/controller/datavolume/controller-base.go
index acd09cb94..2fb859150 100644
--- a/pkg/controller/datavolume/controller-base.go
+++ b/pkg/controller/datavolume/controller-base.go
@@ -55,6 +55,7 @@ import (
cloneMetrics "kubevirt.io/containerized-data-importer/pkg/monitoring/metrics/cdi-cloner"
metrics "kubevirt.io/containerized-data-importer/pkg/monitoring/metrics/cdi-controller"
importMetrics "kubevirt.io/containerized-data-importer/pkg/monitoring/metrics/cdi-importer"
+ patchedDV "kubevirt.io/containerized-data-importer/pkg/patcheddatavolume"
"kubevirt.io/containerized-data-importer/pkg/token"
"kubevirt.io/containerized-data-importer/pkg/util"
)
@@ -1035,6 +1036,7 @@ func (r *ReconcilerBase) updateConditions(dataVolume *cdiv1.DataVolume, pvc *cor
dataVolume.Status.Conditions = updateBoundCondition(dataVolume.Status.Conditions, pvc, message, reason)
dataVolume.Status.Conditions = UpdateReadyCondition(dataVolume.Status.Conditions, readyStatus, message, reason)
dataVolume.Status.Conditions = updateRunningCondition(dataVolume.Status.Conditions, anno)
+ dataVolume.Status.Conditions = patchedDV.UpdateDVQuotaNotExceededCondition(dataVolume.Status.Conditions)
}

func (r *ReconcilerBase) emitConditionEvent(dataVolume *cdiv1.DataVolume, originalCond []cdiv1.DataVolumeCondition) {
diff --git a/pkg/controller/import-controller.go b/pkg/controller/import-controller.go
index 49f1ff898..972f8ab5f 100644
--- a/pkg/controller/import-controller.go
+++ b/pkg/controller/import-controller.go
@@ -34,6 +34,7 @@ import (
"kubevirt.io/containerized-data-importer/pkg/common"
cc "kubevirt.io/containerized-data-importer/pkg/controller/common"
featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates"
+ patchedDV "kubevirt.io/containerized-data-importer/pkg/patcheddatavolume"
"kubevirt.io/containerized-data-importer/pkg/util"
"kubevirt.io/containerized-data-importer/pkg/util/naming"
sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/api"
@@ -753,6 +754,12 @@ func (r *ImportReconciler) createScratchPvcForPod(pvc *corev1.PersistentVolumeCl
// Scratch PVC doesn't exist yet, create it. Determine which storage class to use.
_, err = createScratchPersistentVolumeClaim(r.client, pvc, pod, scratchPVCName, storageClassName, r.installerLabels, r.recorder)
if err != nil {
+ if strings.Contains(err.Error(), "exceeded quota") {
+ innerErr := patchedDV.UpdateDVQuotaNotExceededConditionByPVC(r.client, pvc, corev1.ConditionFalse, fmt.Sprintf("Exceeded quota: %q", err.Error()), patchedDV.QuotaExceededReason)
+ if innerErr != nil {
+ return innerErr
+ }
+ }
return err
}
anno[cc.AnnBoundCondition] = "false"
diff --git a/pkg/controller/populators/populator-base.go b/pkg/controller/populators/populator-base.go
index 6c6fd8f8a..8fcda592c 100644
--- a/pkg/controller/populators/populator-base.go
+++ b/pkg/controller/populators/populator-base.go
@@ -18,7 +18,9 @@ package populators

import (
"context"
+ "fmt"
"reflect"
+ "strings"

"github.com/go-logr/logr"

@@ -40,6 +42,7 @@ import (
"kubevirt.io/containerized-data-importer/pkg/common"
cc "kubevirt.io/containerized-data-importer/pkg/controller/common"
featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates"
+ patchedDV "kubevirt.io/containerized-data-importer/pkg/patcheddatavolume"
"kubevirt.io/containerized-data-importer/pkg/util"
)

@@ -182,6 +185,11 @@ func (r *ReconcilerBase) createPVCPrime(pvc *corev1.PersistentVolumeClaim, sourc
annotations[cc.AnnPodRetainAfterCompletion] = pvc.Annotations[cc.AnnPodRetainAfterCompletion]
}

+ dvUid, ok := pvc.Annotations[cc.AnnCreatedForDataVolume]
+ if ok {
+ annotations[cc.AnnCreatedForDataVolume] = dvUid
+ }
+
// Assemble PVC' spec
pvcPrime := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
@@ -213,6 +221,12 @@ func (r *ReconcilerBase) createPVCPrime(pvc *corev1.PersistentVolumeClaim, sourc
}

if err := r.client.Create(context.TODO(), pvcPrime); err != nil {
+ if strings.Contains(err.Error(), "exceeded quota") {
+ innerErr := patchedDV.UpdateDVQuotaNotExceededConditionByPVC(r.client, pvc, corev1.ConditionFalse, fmt.Sprintf("Exceeded quota: %q", err.Error()), patchedDV.QuotaExceededReason)
+ if innerErr != nil {
+ return nil, innerErr
+ }
+ }
return nil, err
}
r.recorder.Eventf(pvc, corev1.EventTypeNormal, createdPVCPrimeSuccessfully, messageCreatedPVCPrimeSuccessfully)
diff --git a/pkg/controller/upload-controller.go b/pkg/controller/upload-controller.go
index 4c153257a..e21e40312 100644
--- a/pkg/controller/upload-controller.go
+++ b/pkg/controller/upload-controller.go
@@ -51,6 +51,7 @@ import (
cc "kubevirt.io/containerized-data-importer/pkg/controller/common"
featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates"
"kubevirt.io/containerized-data-importer/pkg/operator"
+ patchedDV "kubevirt.io/containerized-data-importer/pkg/patcheddatavolume"
"kubevirt.io/containerized-data-importer/pkg/util"
"kubevirt.io/containerized-data-importer/pkg/util/cert/fetcher"
"kubevirt.io/containerized-data-importer/pkg/util/cert/generator"
@@ -473,6 +474,12 @@ func (r *UploadReconciler) getOrCreateScratchPvc(pvc *corev1.PersistentVolumeCla
// Scratch PVC doesn't exist yet, create it.
scratchPvc, err = createScratchPersistentVolumeClaim(r.client, pvc, pod, name, storageClassName, map[string]string{}, r.recorder)
if err != nil {
+ if strings.Contains(err.Error(), "exceeded quota") {
+ innerErr := patchedDV.UpdateDVQuotaNotExceededConditionByPVC(r.client, pvc, corev1.ConditionFalse, fmt.Sprintf("Exceeded quota: %q", err.Error()), patchedDV.QuotaExceededReason)
+ if innerErr != nil {
+ return nil, innerErr
+ }
+ }
return nil, err
}
} else {
diff --git a/pkg/patcheddatavolume/patched_datavolume.go b/pkg/patcheddatavolume/patched_datavolume.go
new file mode 100644
index 000000000..29fc38c97
--- /dev/null
+++ b/pkg/patcheddatavolume/patched_datavolume.go
@@ -0,0 +1,124 @@
+package patcheddatavolume
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
+ "kubevirt.io/containerized-data-importer/pkg/controller/common"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+const (
+ QoutaNotExceededConditionType cdiv1.DataVolumeConditionType = "QuotaNotExceeded"
+
+ QuotaNotExceededReason string = "QuotaNotExceeded"
+ QuotaExceededReason string = "QuotaExceeded"
+
+ RunningConditionErrorReason string = "Error"
+)
+
+func FindConditionByType(conditionType cdiv1.DataVolumeConditionType, conditions []cdiv1.DataVolumeCondition) *cdiv1.DataVolumeCondition {
+ for i, condition := range conditions {
+ if condition.Type == conditionType {
+ return &conditions[i]
+ }
+ }
+ return nil
+}
+
+func UpdateDVQuotaNotExceededCondition(conditions []cdiv1.DataVolumeCondition) []cdiv1.DataVolumeCondition {
+ CreateDVQuotaIsNotExceededConditionIfNotExists(&conditions)
+ readyCondition := FindConditionByType(cdiv1.DataVolumeReady, conditions)
+ boundCondition := FindConditionByType(cdiv1.DataVolumeBound, conditions)
+ runningCondition := FindConditionByType(cdiv1.DataVolumeRunning, conditions)
+
+ switch {
+ case readyCondition != nil && readyCondition.Reason == common.ErrExceededQuota:
+ conditions = updateCondition(conditions, QoutaNotExceededConditionType, corev1.ConditionFalse, fmt.Sprintf("Exceeded quota: %q", readyCondition.Message), QuotaExceededReason)
+ case boundCondition != nil && boundCondition.Reason == common.ErrExceededQuota:
+ conditions = updateCondition(conditions, QoutaNotExceededConditionType, corev1.ConditionFalse, fmt.Sprintf("Exceeded quota: %q", boundCondition.Message), QuotaExceededReason)
+ case runningCondition != nil:
+ if runningCondition.Reason == common.ErrExceededQuota ||
+ runningCondition.Reason == RunningConditionErrorReason && strings.Contains(runningCondition.Message, "exceeded quota") {
+ conditions = updateCondition(conditions, QoutaNotExceededConditionType, corev1.ConditionFalse, fmt.Sprintf("Exceeded quota: %q", runningCondition.Message), QuotaExceededReason)
+ } else if runningCondition.Status == corev1.ConditionTrue {
+ conditions = updateCondition(conditions, QoutaNotExceededConditionType, corev1.ConditionTrue, "", QuotaNotExceededReason)
+ }
+ }
+
+ return conditions
+}
+
+func UpdateDVQuotaNotExceededConditionByPVC(clientObject client.Client, pvc *corev1.PersistentVolumeClaim, status corev1.ConditionStatus, message, reason string) error {
+ dv := getDVByPVC(clientObject, pvc, common.AnnCreatedForDataVolume)
+ if dv == nil {
+ return nil
+ }
+
+ dv.Status.Conditions = updateCondition(dv.Status.Conditions, QoutaNotExceededConditionType, status, message, reason)
+ return clientObject.Status().Update(context.TODO(), dv)
+}
+
+func CreateDVQuotaIsNotExceededConditionIfNotExists(conditions *[]cdiv1.DataVolumeCondition) {
+ if conditions == nil {
+ return
+ }
+
+ condition := FindConditionByType(QoutaNotExceededConditionType, *conditions)
+ if condition == nil {
+ *conditions = append(*conditions, cdiv1.DataVolumeCondition{
+ Type: QoutaNotExceededConditionType,
+ Status: corev1.ConditionTrue,
+ Reason: QuotaNotExceededReason,
+ Message: "",
+ })
+ }
+}
+
+func updateCondition(conditions []cdiv1.DataVolumeCondition, conditionType cdiv1.DataVolumeConditionType, status corev1.ConditionStatus, message, reason string) []cdiv1.DataVolumeCondition {
+ condition := FindConditionByType(conditionType, conditions)
+ if condition == nil {
+ conditions = append(conditions, cdiv1.DataVolumeCondition{
+ Type: conditionType,
+ })
+ condition = &conditions[len(conditions)-1]
+ }
+ if condition.Status != status {
+ condition.LastTransitionTime = metav1.Now()
+ condition.Message = message
+ condition.Reason = reason
+ condition.LastHeartbeatTime = condition.LastTransitionTime
+ } else if condition.Message != message || condition.Reason != reason {
+ condition.Message = message
+ condition.Reason = reason
+ condition.LastHeartbeatTime = metav1.Now()
+ }
+ condition.Status = status
+ return conditions
+}
+
+func getDVByPVC(clientObject client.Client, pvc *corev1.PersistentVolumeClaim, ann string) *cdiv1.DataVolume {
+ uid, ok := pvc.Annotations[ann]
+ if !ok {
+ return nil
+ }
+
+ var dvList cdiv1.DataVolumeList
+
+ err := clientObject.List(context.TODO(), &dvList, client.InNamespace(pvc.Namespace))
+ if err != nil {
+ return nil
+ }
+
+ for _, dv := range dvList.Items {
+ if string(dv.UID) == uid {
+ return &dv
+ }
+ }
+
+ return nil
+}
6 changes: 6 additions & 0 deletions images/cdi-artifact/patches/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,3 +91,9 @@ Add annotation to manage provisioner tolerations to avoid unschedulable error.
When cloning from PVC to PVC, it's necessary to select a cloning strategy. By default, the cloning strategy `snapshot` is selected.
However, `replicated.csi.storage.deckhouse.io` and `local.csi.storage.deckhouse.io` can create snapshots only when using LVM Thin.
To avoid errors, for LVM Thick, it's necessary to use `copy` cloning strategy (`csi-clone` is also unavailable since the CSI itself creates a snapshot when using `csi-clone`).

#### `022-add-datavolume-quouta-not-exceeded-condition.patch`

A new condition, QuotaNotExceeded, has been added to the DataVolume resource to indicate that the project's quotas have not been exceeded.

This patch includes an architectural assumption where the condition of the DataVolume resource is modified by an external controller. In the future, CDI usage is planned to be discontinued, making this assumption non-disruptive.
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"errors"
"fmt"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
Expand Down Expand Up @@ -84,6 +85,11 @@ func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (rec
return reconcile.Result{}, err
}

var quotaNotExceededCondition *cdiv1.DataVolumeCondition
if dv != nil {
quotaNotExceededCondition = service.GetDataVolumeCondition(DVQoutaNotExceededConditionType, dv.Status.Conditions)
}

switch {
case isDiskProvisioningFinished(condition):
log.Debug("Disk provisioning finished: clean up")
Expand Down Expand Up @@ -139,6 +145,13 @@ func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (rec
Message("PVC Provisioner not found: create the new one.")

return reconcile.Result{Requeue: true}, nil
case quotaNotExceededCondition != nil && quotaNotExceededCondition.Status == corev1.ConditionFalse:
vd.Status.Phase = virtv2.DiskPending
cb.
Status(metav1.ConditionFalse).
Reason(vdcondition.QuotaExceeded).
Message(quotaNotExceededCondition.Message)
return reconcile.Result{}, nil
case pvc == nil:
vd.Status.Phase = virtv2.DiskProvisioning
cb.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ import (
"github.com/deckhouse/virtualization-controller/pkg/dvcr"
"github.com/deckhouse/virtualization-controller/pkg/eventrecord"
"github.com/deckhouse/virtualization-controller/pkg/logger"
"github.com/deckhouse/virtualization/api/core/v1alpha2"
virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2"
"github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition"
)
Expand Down Expand Up @@ -106,6 +105,11 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco
return reconcile.Result{}, err
}

var quotaNotExceededCondition *cdiv1.DataVolumeCondition
if dv != nil {
quotaNotExceededCondition = service.GetDataVolumeCondition(DVQoutaNotExceededConditionType, dv.Status.Conditions)
}

switch {
case isDiskProvisioningFinished(condition):
log.Debug("Disk provisioning finished: clean up")
Expand Down Expand Up @@ -136,7 +140,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco
ds.recorder.Event(
vd,
corev1.EventTypeNormal,
v1alpha2.ReasonDataSourceSyncStarted,
virtv2.ReasonDataSourceSyncStarted,
"The HTTP DataSource import to DVCR has started",
)

Expand Down Expand Up @@ -195,7 +199,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco
ds.recorder.Event(
vd,
corev1.EventTypeNormal,
v1alpha2.ReasonDataSourceSyncStarted,
virtv2.ReasonDataSourceSyncStarted,
"The HTTP DataSource import to PVC has started",
)

Expand Down Expand Up @@ -257,6 +261,13 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco
Message("PVC Provisioner not found: create the new one.")

return reconcile.Result{Requeue: true}, nil
case quotaNotExceededCondition != nil && quotaNotExceededCondition.Status == corev1.ConditionFalse:
vd.Status.Phase = virtv2.DiskPending
cb.
Status(metav1.ConditionFalse).
Reason(vdcondition.QuotaExceeded).
Message(quotaNotExceededCondition.Message)
return reconcile.Result{}, nil
case pvc == nil:
vd.Status.Phase = virtv2.DiskProvisioning
cb.
Expand All @@ -270,7 +281,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco
ds.recorder.Event(
vd,
corev1.EventTypeNormal,
v1alpha2.ReasonDataSourceSyncCompleted,
virtv2.ReasonDataSourceSyncCompleted,
"The HTTP DataSource import has completed",
)

Expand Down
Loading
Loading