From 17a4d9aa6ccd6a1a0ece4e88cbd9cb8a58f27e22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20R=C3=B6hrich?= Date: Mon, 26 Aug 2024 15:02:37 +0200 Subject: [PATCH] Tests: Enable Acceptance Tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable acceptance tests and fix existing tests. 1) Enable running acceptance tests on `make test` command when the user places a kubeconfig file into the repository. This kubeconfig is necessary since the acceptance tests are checked against a real Harvester cluster. If the kubeconfig file is missing, only unit tests are run, which doesn't require a Harvester cluster. 2) Fix existing tests. Some existing acceptance tests where using invalid terraform resources. These are fixed 3) Fix volume and network resource providers. Running acceptance tests revealed bugs in the volume and network resource providers. Both were using an incorrect method to determine if a resource had been deleted in Harvester. The bug was that the resource providers were waiting for a deletion event, despite the fact that the respective controllers don't emit such an event. The fix is to observe the resource through a periodic read. 4) Fix propagating the cluster network name of a network resource into the terraform state. The terraform state needs to keep track of this property to determine if the resource definition has changed. Otherwise the resource will always count as modified, which is in some cases wrong. This bug was also revealed by the acceptance tests. Signed-off-by: Moritz Röhrich --- Dockerfile.dapper | 13 +++-- internal/provider/network/resource_network.go | 48 ++++++++++++++----- .../network/resource_network_constructor.go | 18 ++++--- internal/provider/volume/resource_volume.go | 42 +++++++++++++--- internal/tests/resource_network_test.go | 21 ++++++-- internal/tests/resource_storageclass_test.go | 2 + scripts/ci | 2 +- scripts/default | 5 +- scripts/test | 9 +++- 9 files changed, 122 insertions(+), 38 deletions(-) diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 8a4e6648..f329839e 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -4,7 +4,7 @@ ARG DAPPER_HOST_ARCH ENV ARCH $DAPPER_HOST_ARCH RUN zypper -n rm container-suseconnect && \ - zypper -n install curl docker gzip tar wget awk zip + zypper -n install curl docker gzip tar wget awk zip unzip # install goimports RUN GO111MODULE=on go install golang.org/x/tools/cmd/goimports@v0.1.11 @@ -13,9 +13,12 @@ RUN GO111MODULE=on go install golang.org/x/tools/cmd/goimports@v0.1.11 RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.57.1 # The docker version in dapper is too old to have buildx. Install it manually. -RUN wget https://github.com/docker/buildx/releases/download/v0.13.1/buildx-v0.13.1.linux-${ARCH} && \ +RUN wget --quiet https://github.com/docker/buildx/releases/download/v0.13.1/buildx-v0.13.1.linux-${ARCH} && \ + wget --quiet https://releases.hashicorp.com/terraform/0.13.4/terraform_0.13.4_linux_${ARCH}.zip && \ chmod +x buildx-v0.13.1.linux-${ARCH} && \ - mv buildx-v0.13.1.linux-${ARCH} /usr/local/bin/buildx + unzip terraform_0.13.4_linux_${ARCH}.zip && \ + mv buildx-v0.13.1.linux-${ARCH} /usr/local/bin/buildx && \ + mv terraform /usr/local/bin/terraform ENV DAPPER_ENV REPO TAG DRONE_TAG ENV DAPPER_SOURCE /go/src/github.com/harvester/terraform-provider-harvester @@ -23,6 +26,10 @@ ENV DAPPER_OUTPUT ./bin ./dist ENV DAPPER_DOCKER_SOCKET true ENV HOME ${DAPPER_SOURCE} +COPY go.mod ${DAPPER_SOURCE}/go.mod +COPY go.sum ${DAPPER_SOURCE}/go.sum WORKDIR ${DAPPER_SOURCE} +RUN go mod download + ENTRYPOINT ["./scripts/entry"] CMD ["ci"] diff --git a/internal/provider/network/resource_network.go b/internal/provider/network/resource_network.go index b7834e30..e71fb05f 100644 --- a/internal/provider/network/resource_network.go +++ b/internal/provider/network/resource_network.go @@ -2,10 +2,10 @@ package network import ( "context" - "fmt" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -42,7 +42,8 @@ func resourceNetworkCreate(ctx context.Context, d *schema.ResourceData, meta int c := meta.(*client.Client) namespace := d.Get(constants.FieldCommonNamespace).(string) name := d.Get(constants.FieldCommonName).(string) - toCreate, err := util.ResourceConstruct(d, Creator(c, ctx, namespace, name)) + clusterNetworkName := d.Get(constants.FieldNetworkClusterNetworkName).(string) + toCreate, err := util.ResourceConstruct(d, Creator(c, ctx, namespace, name, clusterNetworkName)) if err != nil { return diag.FromErr(err) } @@ -106,16 +107,16 @@ func resourceNetworkDelete(ctx context.Context, d *schema.ResourceData, meta int return diag.FromErr(err) } - ctxDeadline, _ := ctx.Deadline() - events, err := c.HarvesterClient. - K8sCniCncfIoV1(). - NetworkAttachmentDefinitions(namespace). - Watch(ctx, util.WatchOptions(name, time.Until(ctxDeadline))) - if err != nil { - return diag.FromErr(err) + stateConf := &resource.StateChangeConf{ + Pending: []string{constants.StateCommonActive}, + Target: []string{constants.StateCommonRemoved}, + Refresh: resourceNetworkRefresh(ctx, d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, } - if !util.HasDeleted(events) { - return diag.FromErr(fmt.Errorf("timeout waiting for network %s to be deleted", d.Id())) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { + return diag.FromErr(err) } d.SetId("") @@ -129,3 +130,28 @@ func resourceNetworkImport(d *schema.ResourceData, obj *nadv1.NetworkAttachmentD } return util.ResourceStatesSet(d, stateGetter) } + +func resourceNetworkRefresh(ctx context.Context, d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + c := meta.(*client.Client) + namespace, name, err := helper.IDParts(d.Id()) + if err != nil { + return nil, constants.StateCommonError, err + } + + obj, err := c.HarvesterClient. + K8sCniCncfIoV1(). + NetworkAttachmentDefinitions(namespace). + Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return obj, constants.StateCommonRemoved, nil + } + return obj, constants.StateCommonError, err + } + if err = resourceNetworkImport(d, obj); err != nil { + return obj, constants.StateCommonError, err + } + return obj, constants.StateCommonActive, nil + } +} diff --git a/internal/provider/network/resource_network_constructor.go b/internal/provider/network/resource_network_constructor.go index 79ce84be..b702efe7 100644 --- a/internal/provider/network/resource_network_constructor.go +++ b/internal/provider/network/resource_network_constructor.go @@ -35,8 +35,10 @@ func (c *Constructor) Setup() util.Processors { Field: constants.FieldNetworkClusterNetworkName, Parser: func(i interface{}) error { c.ClusterNetworkName = i.(string) + c.Network.Labels[networkutils.KeyClusterNetworkLabel] = c.ClusterNetworkName return nil }, + Required: true, }, { Field: constants.FieldNetworkVlanID, @@ -118,18 +120,20 @@ func (c *Constructor) Result() (interface{}, error) { func newNetworkConstructor(c *client.Client, ctx context.Context, network *nadv1.NetworkAttachmentDefinition) util.Constructor { return &Constructor{ - Client: c, - Context: ctx, - Network: network, - Layer3NetworkConf: &networkutils.Layer3NetworkConf{}, + Client: c, + Context: ctx, + ClusterNetworkName: network.Labels[networkutils.KeyClusterNetworkLabel], + Network: network, + Layer3NetworkConf: &networkutils.Layer3NetworkConf{}, } } -func Creator(c *client.Client, ctx context.Context, namespace, name string) util.Constructor { - Network := &nadv1.NetworkAttachmentDefinition{ +func Creator(c *client.Client, ctx context.Context, namespace, name, clusterNetworkName string) util.Constructor { + network := &nadv1.NetworkAttachmentDefinition{ ObjectMeta: util.NewObjectMeta(namespace, name), } - return newNetworkConstructor(c, ctx, Network) + network.Labels[networkutils.KeyClusterNetworkLabel] = clusterNetworkName + return newNetworkConstructor(c, ctx, network) } func Updater(c *client.Client, ctx context.Context, network *nadv1.NetworkAttachmentDefinition) util.Constructor { diff --git a/internal/provider/volume/resource_volume.go b/internal/provider/volume/resource_volume.go index 7d9d8ef2..05cc7bb3 100644 --- a/internal/provider/volume/resource_volume.go +++ b/internal/provider/volume/resource_volume.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -104,14 +105,18 @@ func resourceVolumeDelete(ctx context.Context, d *schema.ResourceData, meta inte return diag.FromErr(err) } - ctxDeadline, _ := ctx.Deadline() - events, err := c.KubeClient.CoreV1().PersistentVolumeClaims(namespace).Watch(ctx, util.WatchOptions(name, time.Until(ctxDeadline))) - if err != nil { - return diag.FromErr(err) + stateConf := &resource.StateChangeConf{ + Pending: []string{constants.StateCommonActive}, + Target: []string{constants.StateCommonRemoved}, + Refresh: resourceVolumeRefresh(ctx, d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, } - if !util.HasDeleted(events) { - return diag.Errorf("timeout waiting for volume %s to be deleted", d.Id()) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { + return diag.FromErr(err) } + d.SetId("") return nil } @@ -123,3 +128,28 @@ func resourceVolumeImport(d *schema.ResourceData, obj *corev1.PersistentVolumeCl } return util.ResourceStatesSet(d, stateGetter) } + +func resourceVolumeRefresh(ctx context.Context, d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + c := meta.(*client.Client) + namespace, name, err := helper.IDParts(d.Id()) + if err != nil { + return nil, constants.StateCommonError, err + } + + obj, err := c.KubeClient. + CoreV1(). + PersistentVolumeClaims(namespace). + Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return obj, constants.StateCommonRemoved, nil + } + return obj, constants.StateCommonError, err + } + if err = resourceVolumeImport(d, obj); err != nil { + return obj, constants.StateCommonError, err + } + return obj, constants.StateCommonActive, nil + } +} diff --git a/internal/tests/resource_network_test.go b/internal/tests/resource_network_test.go index b736bf38..4d080341 100644 --- a/internal/tests/resource_network_test.go +++ b/internal/tests/resource_network_test.go @@ -21,10 +21,12 @@ const ( testAccNetworkResourceName = constants.ResourceTypeNetwork + "." + testAccNetworkName testAccNetworkDescription = "Terraform Harvester Network acceptance test" - testAccNetworkVlanID = "10" + testAccNetworkClusterNetworkName = "mgmt" + testAccNetworkVlanID = "0" testAccNetworkConfigTemplate = ` resource %s "%s" { + %s = "%s" %s = "%s" %s = "%s" %s = %s @@ -32,10 +34,11 @@ resource %s "%s" { ` ) -func buildNetworkConfig(name, description, vlanID string) string { +func buildNetworkConfig(name, description, clusterNetworkName, vlanID string) string { return fmt.Sprintf(testAccNetworkConfigTemplate, constants.ResourceTypeNetwork, name, constants.FieldCommonName, name, constants.FieldCommonDescription, description, + constants.FieldNetworkClusterNetworkName, clusterNetworkName, constants.FieldNetworkVlanID, vlanID) } @@ -50,11 +53,21 @@ func TestAccNetwork_basic(t *testing.T) { CheckDestroy: testAccCheckNetworkDestroy(ctx), Steps: []resource.TestStep{ { - Config: buildNetworkConfig(testAccNetworkName, testAccNetworkDescription, "4045"), + Config: buildNetworkConfig( + testAccNetworkName, + testAccNetworkDescription, + testAccNetworkClusterNetworkName, + "4095", + ), ExpectError: regexp.MustCompile(fmt.Sprintf(`expected %s to be in the range \(0 - 4094\)`, constants.FieldNetworkVlanID)), }, { - Config: buildNetworkConfig(testAccNetworkName, testAccNetworkDescription, testAccNetworkVlanID), + Config: buildNetworkConfig( + testAccNetworkName, + testAccNetworkDescription, + testAccNetworkClusterNetworkName, + testAccNetworkVlanID, + ), Check: resource.ComposeTestCheckFunc( testAccNetworkExists(ctx, testAccNetworkResourceName, network), resource.TestCheckResourceAttr(testAccNetworkResourceName, constants.FieldCommonName, testAccNetworkName), diff --git a/internal/tests/resource_storageclass_test.go b/internal/tests/resource_storageclass_test.go index 76195c70..ac2cc12e 100644 --- a/internal/tests/resource_storageclass_test.go +++ b/internal/tests/resource_storageclass_test.go @@ -24,6 +24,8 @@ const ( resource %s "%s" { %s = "%s" %s = "%s" + parameters = { + } } ` ) diff --git a/scripts/ci b/scripts/ci index e6fae796..b35955a7 100755 --- a/scripts/ci +++ b/scripts/ci @@ -3,7 +3,7 @@ set -e cd $(dirname $0) +./validate ./build ./test -./validate ./package diff --git a/scripts/default b/scripts/default index e6fae796..ba2afe8b 100755 --- a/scripts/default +++ b/scripts/default @@ -3,7 +3,4 @@ set -e cd $(dirname $0) -./build -./test -./validate -./package +./ci diff --git a/scripts/test b/scripts/test index e6474efd..f8bee313 100755 --- a/scripts/test +++ b/scripts/test @@ -3,5 +3,10 @@ set -e cd $(dirname $0)/.. -echo Running tests -go test -cover -tags=test . +if [ -f ./kubeconfig_test.yaml ] ; then + export KUBECONFIG="$(pwd)/kubeconfig_test.yaml" + export TF_ACC=1 +fi + +echo Running tests: +go test -v -cover -tags=test . ./...