Skip to content

Commit

Permalink
Update
Browse files Browse the repository at this point in the history
Signed-off-by: Yi Chen <[email protected]>
  • Loading branch information
ChenYi015 committed Jun 27, 2024
1 parent 38476e7 commit 3bd806b
Show file tree
Hide file tree
Showing 64 changed files with 2,541 additions and 16,590 deletions.
28 changes: 28 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ CONTAINER_TOOL ?= docker
IMAGE_REPOSITORY ?= docker.io/kubeflow/spark-operator
IMAGE_TAG ?= latest

# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.30.0

##@ General

# The help target prints out all targets with their descriptions organized
Expand Down Expand Up @@ -65,6 +68,31 @@ clean: ## Clean up caches and output
go clean -cache -testcache -r -x 2>&1 >/dev/null
-rm -rf _output

.PHONY: go-fmt
go-fmt: ## Run go fmt against code
go fmt ./...

.PHONY: go-vet
go-vet: ## Run go vet against code.
go vet ./...

.PHONY: test
test: manifests generate go-fmt go-vet envtest ## Run go tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out

# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors.
.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up.
test-e2e:
go test ./test/e2e/ -v -ginkgo.v

.PHONY: lint
lint: golangci-lint ## Run golangci-lint linter
$(GOLANGCI_LINT) run

.PHONY: lint-fix
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
$(GOLANGCI_LINT) run --fix

.PHONY: fmt-check
fmt-check: clean ## Run go fmt against code
@echo "running fmt check"; cd "$(dirname $0)"; \
Expand Down
2 changes: 2 additions & 0 deletions api/v1beta1/scheduledsparkapplication_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ import (
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.

// +kubebuilder:skip

func init() {
SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{})
}
Expand Down
18 changes: 10 additions & 8 deletions api/v1beta1/sparkapplication_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@ limitations under the License.
package v1beta1

import (
apiv1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.

// +kubebuilder:skip

func init() {
SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{})
}
Expand Down Expand Up @@ -81,7 +83,7 @@ type SparkApplicationSpec struct {
HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"`
// Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors.
// Optional.
Volumes []apiv1.Volume `json:"volumes,omitempty"`
Volumes []corev1.Volume `json:"volumes,omitempty"`
// Driver is the driver specification.
Driver DriverSpec `json:"driver"`
// Executor is the executor specification.
Expand Down Expand Up @@ -324,22 +326,22 @@ type SparkPodSpec struct {
Annotations map[string]string `json:"annotations,omitempty"`
// VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem.
// Optional.
VolumeMounts []apiv1.VolumeMount `json:"volumeMounts,omitempty"`
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
// Affinity specifies the affinity/anti-affinity settings for the pod.
// Optional.
Affinity *apiv1.Affinity `json:"affinity,omitempty"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod.
// Optional.
Tolerations []apiv1.Toleration `json:"tolerations,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// SecurityContext specifies the PodSecurityContext to apply.
// Optional.
SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty"`
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
// SchedulerName specifies the scheduler that will be used for scheduling
// Optional.
SchedulerName *string `json:"schedulerName,omitempty"`
// Sidecars is a list of sidecar containers that run along side the main Spark container.
// Optional.
Sidecars []apiv1.Container `json:"sidecars,omitempty"`
Sidecars []corev1.Container `json:"sidecars,omitempty"`
// HostNetwork indicates whether to request host networking for the pod or not.
// Optional.
HostNetwork *bool `json:"hostNetwork,omitempty"`
Expand All @@ -349,7 +351,7 @@ type SparkPodSpec struct {
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// DnsConfig dns settings for the pod, following the Kubernetes specifications.
// Optional.
DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"`
DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
}

// DriverSpec is specification of the driver.
Expand Down
14 changes: 0 additions & 14 deletions api/v1beta2/register.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ limitations under the License.
package v1beta2

import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)

Expand All @@ -34,15 +32,3 @@ var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version}
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

// addKnownTypes adds the set of types defined in this package to the supplied scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&SparkApplication{},
&SparkApplicationList{},
&ScheduledSparkApplication{},
&ScheduledSparkApplicationList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
Loading

0 comments on commit 3bd806b

Please sign in to comment.