Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce metric.Collector interface #362

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 14 additions & 12 deletions cache/disk/disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,18 @@ import (
"syscall"

"github.com/buchgr/bazel-remote/cache"
"github.com/buchgr/bazel-remote/metric"
"github.com/buchgr/bazel-remote/utils/tempfile"

"github.com/djherbis/atime"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"

pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/golang/protobuf/proto"
)

var (
cacheHits = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_disk_cache_hits",
Help: "The total number of disk backend cache hits",
})
cacheMisses = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_disk_cache_misses",
Help: "The total number of disk backend cache misses",
})
cacheHits metric.Counter
cacheMisses metric.Counter
)

var tfc = tempfile.NewCreator()
Expand Down Expand Up @@ -70,7 +63,7 @@ const emptySha256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b785
// New returns a new instance of a filesystem-based cache rooted at `dir`,
// with a maximum size of `maxSizeBytes` bytes and an optional backend `proxy`.
// Cache is safe for concurrent use.
func New(dir string, maxSizeBytes int64, proxy cache.Proxy) *Cache {
func New(dir string, maxSizeBytes int64, proxy cache.Proxy, collector metric.Collector) *Cache {
// Create the directory structure.
hexLetters := []byte("0123456789abcdef")
for _, c1 := range hexLetters {
Expand Down Expand Up @@ -105,7 +98,7 @@ func New(dir string, maxSizeBytes int64, proxy cache.Proxy) *Cache {
c := &Cache{
dir: filepath.Clean(dir),
proxy: proxy,
lru: NewSizedLRU(maxSizeBytes, onEvict),
lru: NewSizedLRU(maxSizeBytes, onEvict, collector),
}

err := c.migrateDirectories()
Expand All @@ -118,6 +111,15 @@ func New(dir string, maxSizeBytes int64, proxy cache.Proxy) *Cache {
log.Fatalf("Loading of existing cache entries failed due to error: %v", err)
}

// Setup metrics
if collector != nil {
cacheHits = collector.NewCounter("bazel_remote_disk_cache_hits")
cacheMisses = collector.NewCounter("bazel_remote_disk_cache_misses")
} else {
cacheHits = metric.NoOpCounter()
cacheMisses = metric.NoOpCounter()
}

return c
}

Expand Down
32 changes: 15 additions & 17 deletions cache/disk/lru.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,13 @@ import (
"errors"
"fmt"

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/buchgr/bazel-remote/metric"
)

var (
gaugeCacheSizeBytes = promauto.NewGauge(prometheus.GaugeOpts{
Name: "bazel_remote_disk_cache_size_bytes",
Help: "The current number of bytes in the disk backend",
})

counterEvictedBytes = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_disk_cache_evicted_bytes_total",
Help: "The total number of bytes evicted from disk backend, due to full cache",
})

counterOverwrittenBytes = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_disk_cache_overwritten_bytes_total",
Help: "The total number of bytes removed from disk backend, due to put of already existing key",
})
gaugeCacheSizeBytes metric.Gauge
counterEvictedBytes metric.Counter
counterOverwrittenBytes metric.Counter
)

type sizedItem interface {
Expand Down Expand Up @@ -84,7 +72,17 @@ type entry struct {
}

// NewSizedLRU returns a new sizedLRU cache
func NewSizedLRU(maxSize int64, onEvict EvictCallback) SizedLRU {
func NewSizedLRU(maxSize int64, onEvict EvictCallback, collector metric.Collector) SizedLRU {
if collector != nil {
gaugeCacheSizeBytes = collector.NewGuage("bazel_remote_disk_cache_size_bytes")
counterEvictedBytes = collector.NewCounter("bazel_remote_disk_cache_evicted_bytes_total")
counterOverwrittenBytes = collector.NewCounter("bazel_remote_disk_cache_overwritten_bytes_total")
} else {
gaugeCacheSizeBytes = metric.NoOpGauge()
counterEvictedBytes = metric.NoOpCounter()
counterOverwrittenBytes = metric.NoOpCounter()
}

return &sizedLRU{
maxSize: maxSize,
ll: list.New(),
Expand Down
5 changes: 3 additions & 2 deletions cache/gcsproxy/gcsproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,15 @@ import (

"github.com/buchgr/bazel-remote/cache"
"github.com/buchgr/bazel-remote/cache/httpproxy"
"github.com/buchgr/bazel-remote/metric"

"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)

// New creates a cache that proxies requests to Google Cloud Storage.
func New(bucket string, useDefaultCredentials bool, jsonCredentialsFile string,
accessLogger cache.Logger, errorLogger cache.Logger, numUploaders, maxQueuedUploads int) (cache.Proxy, error) {
accessLogger cache.Logger, errorLogger cache.Logger, numUploaders, maxQueuedUploads int, collector metric.Collector) (cache.Proxy, error) {
var remoteClient *http.Client
var err error

Expand Down Expand Up @@ -53,5 +54,5 @@ func New(bucket string, useDefaultCredentials bool, jsonCredentialsFile string,
Path: bucket,
}

return httpproxy.New(&baseURL, remoteClient, accessLogger, errorLogger, numUploaders, maxQueuedUploads), nil
return httpproxy.New(&baseURL, remoteClient, accessLogger, errorLogger, numUploaders, maxQueuedUploads, collector), nil
}
24 changes: 12 additions & 12 deletions cache/httpproxy/httpproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@ import (
"strconv"

"github.com/buchgr/bazel-remote/cache"

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/buchgr/bazel-remote/metric"
)

type uploadReq struct {
Expand All @@ -33,14 +31,8 @@ type remoteHTTPProxyCache struct {
}

var (
cacheHits = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_http_cache_hits",
Help: "The total number of HTTP backend cache hits",
})
cacheMisses = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_http_cache_misses",
Help: "The total number of HTTP backend cache misses",
})
cacheHits metric.Counter
cacheMisses metric.Counter
)

func uploadFile(remote *http.Client, baseURL *url.URL, accessLogger cache.Logger,
Expand Down Expand Up @@ -84,7 +76,15 @@ func uploadFile(remote *http.Client, baseURL *url.URL, accessLogger cache.Logger

// New creates a cache that proxies requests to a HTTP remote cache.
func New(baseURL *url.URL, remote *http.Client, accessLogger cache.Logger,
errorLogger cache.Logger, numUploaders, maxQueuedUploads int) cache.Proxy {
errorLogger cache.Logger, numUploaders, maxQueuedUploads int, collector metric.Collector) cache.Proxy {

if collector != nil {
cacheHits = collector.NewCounter("bazel_remote_http_cache_hits")
cacheMisses = collector.NewCounter("bazel_remote_http_cache_misses")
} else {
cacheHits = metric.NoOpCounter()
cacheMisses = metric.NoOpCounter()
}

proxy := &remoteHTTPProxyCache{
remote: remote,
Expand Down
23 changes: 12 additions & 11 deletions cache/s3proxy/s3proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,9 @@ import (

"github.com/buchgr/bazel-remote/cache"
"github.com/buchgr/bazel-remote/config"
"github.com/buchgr/bazel-remote/metric"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)

type uploadReq struct {
Expand All @@ -33,25 +32,27 @@ type s3Cache struct {
}

var (
cacheHits = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_s3_cache_hits",
Help: "The total number of s3 backend cache hits",
})
cacheMisses = promauto.NewCounter(prometheus.CounterOpts{
Name: "bazel_remote_s3_cache_misses",
Help: "The total number of s3 backend cache misses",
})
cacheHits metric.Counter
cacheMisses metric.Counter
)

// Used in place of minio's verbose "NoSuchKey" error.
var errNotFound = errors.New("NOT FOUND")

// New returns a new instance of the S3-API based cache
func New(s3Config *config.S3CloudStorageConfig, accessLogger cache.Logger,
errorLogger cache.Logger, numUploaders, maxQueuedUploads int) cache.Proxy {
errorLogger cache.Logger, numUploaders, maxQueuedUploads int, collector metric.Collector) cache.Proxy {

fmt.Println("Using S3 backend.")

if collector != nil {
cacheHits = collector.NewCounter("bazel_remote_http_cache_hits")
cacheMisses = collector.NewCounter("bazel_remote_http_cache_misses")
} else {
cacheHits = metric.NoOpCounter()
cacheMisses = metric.NoOpCounter()
}

var minioCore *minio.Core
var err error

Expand Down
37 changes: 14 additions & 23 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ import (
"github.com/buchgr/bazel-remote/cache/disk"
"github.com/buchgr/bazel-remote/cache/gcsproxy"
"github.com/buchgr/bazel-remote/cache/s3proxy"
"github.com/buchgr/bazel-remote/metric"
"github.com/buchgr/bazel-remote/metric/prometheus"

"github.com/buchgr/bazel-remote/cache/httpproxy"

Expand All @@ -25,10 +27,6 @@ import (
"github.com/buchgr/bazel-remote/utils/rlimit"

grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
httpmetrics "github.com/slok/go-http-metrics/metrics/prometheus"
middleware "github.com/slok/go-http-metrics/middleware"
middlewarestd "github.com/slok/go-http-metrics/middleware/std"
"github.com/urfave/cli/v2"

"google.golang.org/grpc"
Expand All @@ -43,9 +41,6 @@ const (
// is set through linker options.
var gitCommit string

// durationBuckets is the buckets used for Prometheus histograms in seconds.
var durationBuckets = []float64{.5, 1, 2.5, 5, 10, 20, 40, 80, 160, 320}

func main() {

log.SetFlags(logFlags)
Expand Down Expand Up @@ -312,11 +307,16 @@ func main() {
accessLogger := log.New(os.Stdout, "", logFlags)
errorLogger := log.New(os.Stderr, "", logFlags)

var metricCollector metric.Collector
if c.EnableEndpointMetrics {
metricCollector = prometheus.NewCollector()
}

var proxyCache cache.Proxy
if c.GoogleCloudStorage != nil {
proxyCache, err = gcsproxy.New(c.GoogleCloudStorage.Bucket,
c.GoogleCloudStorage.UseDefaultCredentials, c.GoogleCloudStorage.JSONCredentialsFile,
accessLogger, errorLogger, c.NumUploaders, c.MaxQueuedUploads)
accessLogger, errorLogger, c.NumUploaders, c.MaxQueuedUploads, metricCollector)
if err != nil {
log.Fatal(err)
}
Expand All @@ -328,12 +328,12 @@ func main() {
log.Fatal(err)
}
proxyCache = httpproxy.New(baseURL,
httpClient, accessLogger, errorLogger, c.NumUploaders, c.MaxQueuedUploads)
httpClient, accessLogger, errorLogger, c.NumUploaders, c.MaxQueuedUploads, metricCollector)
} else if c.S3CloudStorage != nil {
proxyCache = s3proxy.New(c.S3CloudStorage, accessLogger, errorLogger, c.NumUploaders, c.MaxQueuedUploads)
proxyCache = s3proxy.New(c.S3CloudStorage, accessLogger, errorLogger, c.NumUploaders, c.MaxQueuedUploads, metricCollector)
}

diskCache := disk.New(c.Dir, int64(c.MaxSize)*1024*1024*1024, proxyCache)
diskCache := disk.New(c.Dir, int64(c.MaxSize)*1024*1024*1024, proxyCache, metricCollector)

mux := http.NewServeMux()
httpServer := &http.Server{
Expand Down Expand Up @@ -365,17 +365,8 @@ func main() {
}
log.Println("Mangling non-empty instance names with AC keys:", acKeyManglingStatus)

if c.EnableEndpointMetrics {
metricsMdlw := middleware.New(middleware.Config{
Recorder: httpmetrics.NewRecorder(httpmetrics.Config{
DurationBuckets: durationBuckets,
}),
})
mux.Handle("/metrics", middlewarestd.Handler("metrics", metricsMdlw, promhttp.Handler()))
mux.Handle("/status", middlewarestd.Handler("status", metricsMdlw, http.HandlerFunc(h.StatusPageHandler)))
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
middlewarestd.Handler(r.Method, metricsMdlw, http.HandlerFunc(cacheHandler)).ServeHTTP(w, r)
})
if metricCollector != nil {
prometheus.WrapEndpoints(mux, cacheHandler, h.StatusPageHandler)
} else {
mux.HandleFunc("/status", h.StatusPageHandler)
mux.HandleFunc("/", cacheHandler)
Expand All @@ -397,7 +388,7 @@ func main() {
if c.EnableEndpointMetrics {
streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor)
unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor)
grpc_prometheus.EnableHandlingTimeHistogram(grpc_prometheus.WithHistogramBuckets(durationBuckets))
grpc_prometheus.EnableHandlingTimeHistogram(grpc_prometheus.WithHistogramBuckets([]float64{.5, 1, 2.5, 5, 10, 20, 40, 80, 160, 320}))
}

if len(c.TLSCertFile) > 0 && len(c.TLSKeyFile) > 0 {
Expand Down
34 changes: 34 additions & 0 deletions metric/collector.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
package metric

// Counter is a standard metric counter
type Counter interface {
Inc()
Add(value float64)
}

type noop struct{}

func (c *noop) Inc() {}
func (c *noop) Set(v float64) {}
func (c *noop) Add(value float64) {}

// NoOpCounter is a Counter that does nothing
func NoOpCounter() Counter {
return &noop{}
}

// Gauge is a standard metric gauge
type Gauge interface {
Set(value float64)
}

// NoOpGauge is a Gauge that does nothing
func NoOpGauge() Gauge {
return &noop{}
}

// Collector is an interface for creating metrics
type Collector interface {
NewCounter(name string) Counter
NewGuage(name string) Gauge
}
Loading