Skip to content

Commit

Permalink
refactor(logging): use slog instead of logrus (#501)
Browse files Browse the repository at this point in the history
Everything uses slog now and the logger is part of every struct
  • Loading branch information
acouvreur authored Feb 3, 2025
1 parent f29b13a commit 8844a36
Show file tree
Hide file tree
Showing 32 changed files with 440 additions and 448 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ $(PLATFORMS):
CGO_ENABLED=0 GOOS=$(os) GOARCH=$(arch) go build -trimpath -tags=nomsgpack -v -ldflags="${GO_LDFLAGS}" -o 'sablier_$(VERSION)_$(os)-$(arch)' .

run:
go run main.go start
go run main.go start --storage.file=state.json --logging.level=debug

generate:
go generate ./..
Expand Down
22 changes: 7 additions & 15 deletions app/discovery/autostop.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,15 @@ import (
"errors"
"github.com/sablierapp/sablier/app/providers"
"github.com/sablierapp/sablier/pkg/store"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"log/slog"
)

// StopAllUnregisteredInstances stops all auto-discovered running instances that are not yet registered
// as running instances by Sablier.
// By default, Sablier does not stop all already running instances. Meaning that you need to make an
// initial request in order to trigger the scaling to zero.
func StopAllUnregisteredInstances(ctx context.Context, provider providers.Provider, s store.Store) error {
log.Info("Stopping all unregistered running instances")

log.Tracef("Retrieving all instances with label [%v=true]", LabelEnable)
func StopAllUnregisteredInstances(ctx context.Context, provider providers.Provider, s store.Store, logger *slog.Logger) error {
instances, err := provider.InstanceList(ctx, providers.InstanceListOptions{
All: false, // Only running containers
Labels: []string{LabelEnable},
Expand All @@ -25,7 +22,6 @@ func StopAllUnregisteredInstances(ctx context.Context, provider providers.Provid
return err
}

log.Tracef("Found %v instances with label [%v=true]", len(instances), LabelEnable)
unregistered := make([]string, 0)
for _, instance := range instances {
_, err = s.Get(ctx, instance.Name)
Expand All @@ -34,29 +30,25 @@ func StopAllUnregisteredInstances(ctx context.Context, provider providers.Provid
}
}

log.Tracef("Found %v unregistered instances ", len(instances))
logger.DebugContext(ctx, "found instances to stop", slog.Any("instances", unregistered))

waitGroup := errgroup.Group{}

// Previously, the variables declared by a “for” loop were created once and updated by each iteration.
// In Go 1.22, each iteration of the loop creates new variables, to avoid accidental sharing bugs.
// The transition support tooling described in the proposal continues to work in the same way it did in Go 1.21.
for _, name := range unregistered {
waitGroup.Go(stopFunc(ctx, name, provider))
waitGroup.Go(stopFunc(ctx, name, provider, logger))
}

return waitGroup.Wait()
}

func stopFunc(ctx context.Context, name string, provider providers.Provider) func() error {
func stopFunc(ctx context.Context, name string, provider providers.Provider, logger *slog.Logger) func() error {
return func() error {
log.Tracef("Stopping %v...", name)
err := provider.Stop(ctx, name)
if err != nil {
log.Errorf("Could not stop %v: %v", name, err)
logger.ErrorContext(ctx, "failed to stop instance", slog.String("instance", name), slog.Any("error", err))
return err
}
log.Tracef("Successfully stopped %v", name)
logger.InfoContext(ctx, "stopped unregistered instance", slog.String("instance", name), slog.String("reason", "instance is enabled but not started by Sablier"))
return nil
}
}
5 changes: 3 additions & 2 deletions app/discovery/autostop_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package discovery_test
import (
"context"
"errors"
"github.com/neilotoole/slogt"
"github.com/sablierapp/sablier/app/discovery"
"github.com/sablierapp/sablier/app/instance"
"github.com/sablierapp/sablier/app/providers"
Expand Down Expand Up @@ -39,7 +40,7 @@ func TestStopAllUnregisteredInstances(t *testing.T) {
mockProvider.On("Stop", ctx, "instance3").Return(nil)

// Call the function under test
err = discovery.StopAllUnregisteredInstances(ctx, mockProvider, store)
err = discovery.StopAllUnregisteredInstances(ctx, mockProvider, store, slogt.New(t))
assert.NilError(t, err)

// Check expectations
Expand Down Expand Up @@ -71,7 +72,7 @@ func TestStopAllUnregisteredInstances_WithError(t *testing.T) {
mockProvider.On("Stop", ctx, "instance3").Return(nil)

// Call the function under test
err = discovery.StopAllUnregisteredInstances(ctx, mockProvider, store)
err = discovery.StopAllUnregisteredInstances(ctx, mockProvider, store, slogt.New(t))
assert.Error(t, err, "stop error")

// Check expectations
Expand Down
2 changes: 1 addition & 1 deletion app/http/routes/version_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
)

func TestGetVersion(t *testing.T) {

gin.SetMode(gin.TestMode)
version.Branch = "testing"
version.Revision = "8ffebca"

Expand Down
4 changes: 0 additions & 4 deletions app/instance/instance.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
package instance

import log "github.com/sirupsen/logrus"

var Ready = "ready"
var NotReady = "not-ready"
var Unrecoverable = "unrecoverable"
Expand All @@ -19,7 +17,6 @@ func (instance State) IsReady() bool {
}

func ErrorInstanceState(name string, err error, desiredReplicas int32) (State, error) {
log.Error(err.Error())
return State{
Name: name,
CurrentReplicas: 0,
Expand All @@ -30,7 +27,6 @@ func ErrorInstanceState(name string, err error, desiredReplicas int32) (State, e
}

func UnrecoverableInstanceState(name string, message string, desiredReplicas int32) State {
log.Warn(message)
return State{
Name: name,
CurrentReplicas: 0,
Expand Down
39 changes: 39 additions & 0 deletions app/logger.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package app

import (
"github.com/lmittmann/tint"
"github.com/sablierapp/sablier/config"
"log/slog"
"os"
"strings"
"time"
)

func setupLogger(config config.Logging) *slog.Logger {
w := os.Stderr
level := parseLogLevel(config.Level)
// create a new logger
logger := slog.New(tint.NewHandler(w, &tint.Options{
Level: level,
TimeFormat: time.Kitchen,
AddSource: true,
}))

return logger
}

func parseLogLevel(level string) slog.Level {
switch strings.ToUpper(level) {
case slog.LevelDebug.String():
return slog.LevelDebug
case slog.LevelInfo.String():
return slog.LevelInfo
case slog.LevelWarn.String():
return slog.LevelWarn
case slog.LevelError.String():
return slog.LevelError
default:
slog.Warn("invalid log level, defaulting to info", slog.String("level", level))
return slog.LevelInfo
}
}
65 changes: 34 additions & 31 deletions app/providers/docker/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"github.com/sablierapp/sablier/app/discovery"
"github.com/sablierapp/sablier/app/providers"
"io"
"log/slog"
"strings"

"github.com/docker/docker/api/types/container"
Expand All @@ -16,7 +17,6 @@ import (
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/sablierapp/sablier/app/instance"
log "github.com/sirupsen/logrus"
)

// Interface guard
Expand All @@ -25,32 +25,37 @@ var _ providers.Provider = (*DockerClassicProvider)(nil)
type DockerClassicProvider struct {
Client client.APIClient
desiredReplicas int32
l *slog.Logger
}

func NewDockerClassicProvider() (*DockerClassicProvider, error) {
func NewDockerClassicProvider(ctx context.Context, logger *slog.Logger) (*DockerClassicProvider, error) {
logger = logger.With(slog.String("provider", "docker"))
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, fmt.Errorf("cannot create docker client: %v", err)
}

serverVersion, err := cli.ServerVersion(context.Background())
serverVersion, err := cli.ServerVersion(ctx)
if err != nil {
return nil, fmt.Errorf("cannot connect to docker host: %v", err)
}

log.Tracef("connection established with docker %s (API %s)", serverVersion.Version, serverVersion.APIVersion)

logger.InfoContext(ctx, "connection established with docker",
slog.String("version", serverVersion.Version),
slog.String("api_version", serverVersion.APIVersion),
)
return &DockerClassicProvider{
Client: cli,
desiredReplicas: 1,
l: logger,
}, nil
}

func (provider *DockerClassicProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
func (p *DockerClassicProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
args := filters.NewArgs()
args.Add("label", fmt.Sprintf("%s=true", discovery.LabelEnable))

containers, err := provider.Client.ContainerList(ctx, container.ListOptions{
containers, err := p.Client.ContainerList(ctx, container.ListOptions{
All: true,
Filters: args,
})
Expand All @@ -70,60 +75,58 @@ func (provider *DockerClassicProvider) GetGroups(ctx context.Context) (map[strin
groups[groupName] = group
}

log.Debug(fmt.Sprintf("%v", groups))

return groups, nil
}

func (provider *DockerClassicProvider) Start(ctx context.Context, name string) error {
return provider.Client.ContainerStart(ctx, name, container.StartOptions{})
func (p *DockerClassicProvider) Start(ctx context.Context, name string) error {
return p.Client.ContainerStart(ctx, name, container.StartOptions{})
}

func (provider *DockerClassicProvider) Stop(ctx context.Context, name string) error {
return provider.Client.ContainerStop(ctx, name, container.StopOptions{})
func (p *DockerClassicProvider) Stop(ctx context.Context, name string) error {
return p.Client.ContainerStop(ctx, name, container.StopOptions{})
}

func (provider *DockerClassicProvider) GetState(ctx context.Context, name string) (instance.State, error) {
spec, err := provider.Client.ContainerInspect(ctx, name)
func (p *DockerClassicProvider) GetState(ctx context.Context, name string) (instance.State, error) {
spec, err := p.Client.ContainerInspect(ctx, name)
if err != nil {
return instance.State{}, err
}

// "created", "running", "paused", "restarting", "removing", "exited", or "dead"
switch spec.State.Status {
case "created", "paused", "restarting", "removing":
return instance.NotReadyInstanceState(name, 0, provider.desiredReplicas), nil
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
case "running":
if spec.State.Health != nil {
// // "starting", "healthy" or "unhealthy"
if spec.State.Health.Status == "healthy" {
return instance.ReadyInstanceState(name, provider.desiredReplicas), nil
return instance.ReadyInstanceState(name, p.desiredReplicas), nil
} else if spec.State.Health.Status == "unhealthy" {
if len(spec.State.Health.Log) >= 1 {
lastLog := spec.State.Health.Log[len(spec.State.Health.Log)-1]
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container is unhealthy: %s (%d)", lastLog.Output, lastLog.ExitCode), provider.desiredReplicas), nil
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container is unhealthy: %s (%d)", lastLog.Output, lastLog.ExitCode), p.desiredReplicas), nil
} else {
return instance.UnrecoverableInstanceState(name, "container is unhealthy: no log available", provider.desiredReplicas), nil
return instance.UnrecoverableInstanceState(name, "container is unhealthy: no log available", p.desiredReplicas), nil
}
} else {
return instance.NotReadyInstanceState(name, 0, provider.desiredReplicas), nil
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
}
}
return instance.ReadyInstanceState(name, provider.desiredReplicas), nil
return instance.ReadyInstanceState(name, p.desiredReplicas), nil
case "exited":
if spec.State.ExitCode != 0 {
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container exited with code \"%d\"", spec.State.ExitCode), provider.desiredReplicas), nil
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container exited with code \"%d\"", spec.State.ExitCode), p.desiredReplicas), nil
}
return instance.NotReadyInstanceState(name, 0, provider.desiredReplicas), nil
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
case "dead":
return instance.UnrecoverableInstanceState(name, "container in \"dead\" state cannot be restarted", provider.desiredReplicas), nil
return instance.UnrecoverableInstanceState(name, "container in \"dead\" state cannot be restarted", p.desiredReplicas), nil
default:
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container status \"%s\" not handled", spec.State.Status), provider.desiredReplicas), nil
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container status \"%s\" not handled", spec.State.Status), p.desiredReplicas), nil
}
}

func (provider *DockerClassicProvider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
msgs, errs := provider.Client.Events(ctx, types.EventsOptions{
func (p *DockerClassicProvider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
msgs, errs := p.Client.Events(ctx, types.EventsOptions{
Filters: filters.NewArgs(
filters.Arg("scope", "local"),
filters.Arg("type", string(events.ContainerEventType)),
Expand All @@ -134,21 +137,21 @@ func (provider *DockerClassicProvider) NotifyInstanceStopped(ctx context.Context
select {
case msg, ok := <-msgs:
if !ok {
log.Error("provider event stream is closed")
p.l.ErrorContext(ctx, "event stream closed")
return
}
// Send the container that has died to the channel
instance <- strings.TrimPrefix(msg.Actor.Attributes["name"], "/")
case err, ok := <-errs:
if !ok {
log.Error("provider event stream is closed", err)
p.l.ErrorContext(ctx, "event stream closed")
return
}
if errors.Is(err, io.EOF) {
log.Debug("provider event stream closed")
p.l.ErrorContext(ctx, "event stream closed")
return
}
log.Error("provider event stream error", err)
p.l.ErrorContext(ctx, "event stream error", slog.Any("error", err))
case <-ctx.Done():
return
}
Expand Down
Loading

0 comments on commit 8844a36

Please sign in to comment.