Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

provider: enable k3d cluster log export helper #485

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ go 1.23.0
toolchain go1.23.4

require (
github.com/blang/semver/v4 v4.0.0
github.com/stretchr/testify v1.10.0
github.com/vladimirvivien/gexe v0.4.1
k8s.io/api v0.32.1
Expand All @@ -18,7 +19,6 @@ require (

require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
Expand Down
27 changes: 23 additions & 4 deletions third_party/k3d/k3d.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"os"
"strings"

"github.com/blang/semver/v4"
"k8s.io/apimachinery/pkg/util/json"

"k8s.io/client-go/rest"
Expand All @@ -37,7 +38,8 @@ import (
log "k8s.io/klog/v2"
)

var k3dVersion = "v5.7.2"
var k3dVersion = "v5.8.0"
var k3dWithExportLogSupport = "v5.8.0"

type Cluster struct {
path string
Expand Down Expand Up @@ -88,7 +90,7 @@ func WithImage(image string) support.ClusterOpts {
}

func NewCluster(name string) *Cluster {
return &Cluster{name: name}
return &Cluster{name: name, version: k3dVersion}
}

func NewProvider() support.E2EClusterProvider {
Expand Down Expand Up @@ -251,8 +253,25 @@ func (c *Cluster) GetKubectlContext() string {
}

func (c *Cluster) ExportLogs(ctx context.Context, dest string) error {
log.Warning("ExportLogs not implemented for k3d. Please use regular kubectl like commands to extract the logs from the cluster")
return nil
reqVersion, err := semver.Parse(c.version)
supVersion, _ := semver.Parse(k3dWithExportLogSupport)
if err != nil {
log.ErrorS(err, "failed to determine the k3d version to decide if the current version supporst the log export helpers. Please use regular kubectl like commands to extract the logs from the cluster")
return nil
}
var stdout, stderr bytes.Buffer
if reqVersion.GE(supVersion) {
p := utils.RunCommandWithCustomWriter(fmt.Sprintf("%s debug export-logs %s --path %s", c.path, c.name, dest), &stdout, &stderr)
err = p.Err()
if err != nil {
log.ErrorS(err, "failed to export cluster logs due to an error", "stdout", stdout.String(), "stderr", stderr.String(), "result", p.Result())
return err
}
return nil
} else {
log.Warning("ExportLogs not implemented for k3d. Please use regular kubectl like commands to extract the logs from the cluster")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we know what that kubectl command would be to extract logs? Could we just use it as a fallback instead of just reporting a warning?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@vladimirvivien Actually, That kubectl like is probably a mistake. Just kubectl won't be enough. We will also have to get a few more logs. And the problem is, we have to find a way to group them in a proper format. it can be tricky. (Also a duplicate work to replicate the logic).

If we want to add an export mechanism, we might be better off writing some mechanism that can do this for any provider that doesn't have a log export support.

vcluster doesn't support. So is the case with kwok.

For kwok there is an adhoc kubectl logs equivalent of export but it does only for the kwok's own components.

How do we go about doing this ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We also have to worry about fetching the journalctl logs + the dmesg + any other applicable logs. Might not be a bad idea to implement something generic that can be reused for any provider in the future that is lacking the log export capabilities

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

package utils

import (
	"bytes"
	"context"
	"fmt"
	"os"
	"path/filepath"

	v1 "k8s.io/api/core/v1"
	log "k8s.io/klog/v2"
	"sigs.k8s.io/e2e-framework/klient/k8s/resources"
	"sigs.k8s.io/e2e-framework/pkg/types"
)

type LogCollector struct {
	resourceFetcher *resources.Resources
	baseDir         string
}

func NewLogCollector(provider types.E2EClusterProvider, clusterName, destination string) (*LogCollector, error) {
	baseDir := filepath.Join(destination, fmt.Sprintf("debug-logs-%s", clusterName))
	if err := os.MkdirAll(baseDir, os.FileMode(0755)); err != nil {
		log.ErrorS(err, "failed to create base dir required to collect the logs", "dir", destination)
		return nil, err
	}

	resourceFetcher, err := resources.New(provider.KubernetesRestConfig())
	if err != nil {
		log.ErrorS(err, "failed to create resource fetcher")
		return nil, err
	}

	return &LogCollector{
		resourceFetcher: resourceFetcher,
		baseDir:         baseDir,
	}, nil
}

func (lc *LogCollector) CollectLogs(ctx context.Context) error {
	var namespaces v1.NamespaceList
	if err := lc.resourceFetcher.List(ctx, &namespaces); err != nil {
		log.ErrorS(err, "failed to list namespaces in the cluster")
		return err
	}

	for _, ns := range namespaces.Items {
		if err := lc.collectNamespaceLogs(ctx, ns.Name); err != nil {
			return err
		}
	}
	return nil
}

func (lc *LogCollector) collectNamespaceLogs(ctx context.Context, namespace string) error {
	log.V(3).InfoS("Collecting POD information for namespace", "namespace", namespace)
	var pods v1.PodList
	if err := lc.resourceFetcher.WithNamespace(namespace).List(ctx, &pods); err != nil {
		log.ErrorS(err, "failed to list pods in the namespace", "namespace", namespace)
		return err
	}

	for _, pod := range pods.Items {
		if err := lc.collectPodLogs(ctx, namespace, pod); err != nil {
			return err
		}
	}
	return nil
}

func (lc *LogCollector) collectPodLogs(ctx context.Context, namespace string, pod v1.Pod) error {
	uid := fmt.Sprintf("%s", pod.GetUID())
	if hash, ok := pod.GetAnnotations()["kubernetes.io/config.hash"]; ok {
		uid = hash
	}
	podBaseDir := filepath.Join(lc.baseDir, fmt.Sprintf("%s_%s_%s", namespace, pod.Name, uid))
	if err := os.MkdirAll(podBaseDir, os.FileMode(0755)); err != nil {
		return err
	}

	containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
	containerStatus := append(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses...)

	for _, container := range containers {
		if err := lc.collectContainerLogs(ctx, namespace, pod.Name, container, containerStatus, podBaseDir); err != nil {
			return err
		}
	}
	return nil
}

func (lc *LogCollector) collectContainerLogs(ctx context.Context, namespace, podName string, container v1.Container, containerStatus []v1.ContainerStatus, podBaseDir string) error {
	containerBaseDir := filepath.Join(podBaseDir, container.Name)
	if err := os.MkdirAll(containerBaseDir, os.FileMode(0755)); err != nil {
		return err
	}
	log.V(3).InfoS("Collecting logs for pod", "namespace", namespace, "pod", podName, "container", container.Name)

	var podLog bytes.Buffer
	if err := lc.resourceFetcher.GetPodLog(ctx, namespace, podName, container.Name, &podLog); err != nil {
		return err
	}

	restartCount := 0
	for _, cs := range containerStatus {
		if cs.Name == container.Name {
			restartCount = int(cs.RestartCount)
			break
		}
	}

	if err := os.WriteFile(filepath.Join(containerBaseDir, fmt.Sprintf("%d.log", restartCount)), podLog.Bytes(), os.FileMode(0644)); err != nil {
		return err
	}
	return nil
}

@cpanato @vladimirvivien Would something like this help ? We can also use the NodeLogQuery feature for fetching most of the node logs that we might want o collect.

return nil
}
}

func (c *Cluster) Destroy(ctx context.Context) error {
Expand Down