diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ab86e9eba..a6751e6e81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,8 @@ Main (unreleased) - Fixed an issue in the `prometheus.exporter.postgres` component that would leak goroutines when the target was not reachable (@dehaansa) +- Fixed issue with reloading configuration and prometheus metrics duplication in `prometheus.write.queue`. (@mattdurham) + ### Other changes - Change the stability of the `livedebugging` feature from "experimental" to "generally available". (@wildum) diff --git a/Makefile b/Makefile index 579eea9a7d..b2a6146b55 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ lint: alloylint # final command runs tests for all other submodules. test: $(GO_ENV) go test $(GO_FLAGS) -race $(shell go list ./... | grep -v /integration-tests/) - $(GO_ENV) go test $(GO_FLAGS) ./internal/static/integrations/node_exporter ./internal/static/logs ./internal/component/otelcol/processor/tail_sampling ./internal/component/loki/source/file ./internal/component/loki/source/docker ./internal/component/prometheus/write/queue/serialization ./internal/component/prometheus/write/queue/network + $(GO_ENV) go test $(GO_FLAGS) ./internal/static/integrations/node_exporter ./internal/static/logs ./internal/component/otelcol/processor/tail_sampling ./internal/component/loki/source/file ./internal/component/loki/source/docker $(GO_ENV) find . -name go.mod -not -path "./go.mod" -execdir go test -race ./... \; test-packages: diff --git a/go.mod b/go.mod index 41fa2a8f76..5ab95d477b 100644 --- a/go.mod +++ b/go.mod @@ -72,6 +72,7 @@ require ( github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grafana/vmware_exporter v0.0.5-beta + github.com/grafana/walqueue v0.0.0-20241114193920-da8174120940 github.com/hashicorp/consul/api v1.29.5 github.com/hashicorp/go-discover v0.0.0-20230724184603-e89ebd1b2f65 github.com/hashicorp/go-multierror v1.1.1 @@ -165,7 +166,7 @@ require ( github.com/prometheus/mysqld_exporter v0.14.0 github.com/prometheus/node_exporter v1.6.0 github.com/prometheus/procfs v0.15.1 - github.com/prometheus/prometheus v0.54.1 // a.k.a. v2.51.2 + github.com/prometheus/prometheus v0.55.1 // a.k.a. v2.51.2 github.com/prometheus/snmp_exporter v0.26.0 // if you update the snmp_exporter version, make sure to update the SNMP_VERSION in _index github.com/prometheus/statsd_exporter v0.22.8 github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 @@ -468,7 +469,7 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/eapache/go-resiliency v1.7.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/efficientgo/core v1.0.0-rc.2 // indirect @@ -744,7 +745,7 @@ require ( github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect github.com/tg123/go-htpasswd v1.2.2 // indirect - github.com/tinylib/msgp v1.2.2 + github.com/tinylib/msgp v1.2.4 // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect github.com/tklauser/numcpus v0.7.0 // indirect github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect @@ -752,7 +753,7 @@ require ( github.com/vertica/vertica-sql-go v1.3.3 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect - github.com/vladopajic/go-actor v0.9.0 + github.com/vladopajic/go-actor v0.9.0 // indirect github.com/vmware/govmomi v0.44.1 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/willf/bitset v1.1.11 // indirect @@ -789,7 +790,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect go.opentelemetry.io/otel/log v0.7.0 // indirect @@ -942,3 +943,6 @@ exclude ( ) replace github.com/prometheus/procfs => github.com/prometheus/procfs v0.12.0 + +// This is to handle issues witn synchronous mailbox and closing channels. +replace github.com/vladopajic/go-actor => github.com/grafana/go-actor v0.0.0-20241113133736-e18c4a5c12f4 diff --git a/go.sum b/go.sum index 6a139e700d..409391ecb1 100644 --- a/go.sum +++ b/go.sum @@ -1223,6 +1223,8 @@ github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb h1:AWE6+kvtE18HP+lRWNUCyvymyrFSXs6TcS2vXIXGIuw= github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb/go.mod h1:kkWM4WUV230bNG3urVRWPBnSJHs64y/0RmWjftnnn0c= +github.com/grafana/go-actor v0.0.0-20241113133736-e18c4a5c12f4 h1:jid0h8vbKxOfHbVu/5exi6fz2y9/vKmtcKtTfuXElMY= +github.com/grafana/go-actor v0.0.0-20241113133736-e18c4a5c12f4/go.mod h1:b4thGZ60fnjC3TaJ4XeCN+uZXM+ec27t3ibqFfd8iAk= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/go-offsets-tracker v0.1.7 h1:2zBQ7iiGzvyXY7LA8kaaSiEqH/Yx82UcfRabbY5aOG4= @@ -1276,6 +1278,8 @@ github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 h1:bjh0PVYSVVFxzINqPF github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0/go.mod h1:7t5XR+2IA8P2qggOAHTj/GCZfoLBle3OvNSYh1VkRBU= github.com/grafana/vmware_exporter v0.0.5-beta h1:2JCqzIWJzns8FN78wPsueC9rT3e3kZo2OUoL5kGMjdM= github.com/grafana/vmware_exporter v0.0.5-beta/go.mod h1:1CecUZII0zVsVcHtNfNeTTcxK7EksqAsAn/TCCB0Mh4= +github.com/grafana/walqueue v0.0.0-20241114193920-da8174120940 h1:g086EMuMz94kliAaT5RanZ+R/wp5JdD4MZdoCWg0oDQ= +github.com/grafana/walqueue v0.0.0-20241114193920-da8174120940/go.mod h1:vaxO1V0q1dptHEiTIMW1krRy+aehkYyC2YGrKPyGxHY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445 h1:FlKQKUYPZ5yDCN248M3R7x8yu2E3yEZ0H7aLomE4EoE= github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445/go.mod h1:L69/dBlPQlWkcnU76WgcppK5e4rrxzQdi6LhLnK/ytA= @@ -2446,8 +2450,8 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= -github.com/tinylib/msgp v1.2.2 h1:iHiBE1tJQwFI740SPEPkGE8cfhNfrqOYRlH450BnC/4= -github.com/tinylib/msgp v1.2.2/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= +github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= @@ -2488,8 +2492,6 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1 github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= -github.com/vladopajic/go-actor v0.9.0 h1:fFj5RDGo4YZ6XCx2BWCThx/efOGRwokTpsc3CWHVEIU= -github.com/vladopajic/go-actor v0.9.0/go.mod h1:CKVRXStfjEIi7K74SyFQv/KfM8a/Po57bxmbBGv9YwE= github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= diff --git a/internal/component/otelcol/exporter/datadog/config/config_datadog_test.go b/internal/component/otelcol/exporter/datadog/config/config_datadog_test.go index bb4c654bb1..a6b640a351 100644 --- a/internal/component/otelcol/exporter/datadog/config/config_datadog_test.go +++ b/internal/component/otelcol/exporter/datadog/config/config_datadog_test.go @@ -221,4 +221,3 @@ func TestUnmarshalDatadogLogsConfig(t *testing.T) { }) } } - diff --git a/internal/component/prometheus/write/queue/component.go b/internal/component/prometheus/write/queue/component.go index 29c22253b4..9518389f2c 100644 --- a/internal/component/prometheus/write/queue/component.go +++ b/internal/component/prometheus/write/queue/component.go @@ -8,12 +8,8 @@ import ( "github.com/go-kit/log" "github.com/grafana/alloy/internal/component" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/filequeue" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/network" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/serialization" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" "github.com/grafana/alloy/internal/featuregate" - "github.com/prometheus/client_golang/prometheus" + promqueue "github.com/grafana/walqueue/implementations/prometheus" "github.com/prometheus/prometheus/storage" ) @@ -34,7 +30,7 @@ func NewComponent(opts component.Options, args Arguments) (*Queue, error) { opts: opts, args: args, log: opts.Logger, - endpoints: map[string]*endpoint{}, + endpoints: map[string]promqueue.Queue{}, } err := s.createEndpoints() @@ -58,7 +54,7 @@ type Queue struct { args Arguments opts component.Options log log.Logger - endpoints map[string]*endpoint + endpoints map[string]promqueue.Queue } // Run starts the component, blocking until ctx is canceled or the component @@ -90,60 +86,52 @@ func (s *Queue) Update(args component.Arguments) error { defer s.mut.Unlock() newArgs := args.(Arguments) - sync.OnceFunc(func() { - s.opts.OnStateChange(Exports{Receiver: s}) - }) // If they are the same do nothing. if reflect.DeepEqual(newArgs, s.args) { return nil } s.args = newArgs - // TODO @mattdurham need to cycle through the endpoints figuring out what changed instead of this global stop and start. - // This will cause data in the endpoints and their children to be lost. - if len(s.endpoints) > 0 { - for _, ep := range s.endpoints { + // Figure out which endpoint is new, which is updated, and which needs to be gone. + // So add all the endpoints and then if they are in the new config then remove them from deletable. + deletableEndpoints := make(map[string]struct{}) + for k := range s.endpoints { + deletableEndpoints[k] = struct{}{} + } + + for _, epCfg := range s.args.Endpoints { + delete(deletableEndpoints, epCfg.Name) + ep, found := s.endpoints[epCfg.Name] + // If found stop and recreate. + if found { + // Stop and loose all the signals in the queue. + // TODO drain the signals and re-add them ep.Stop() } - s.endpoints = map[string]*endpoint{} - } - err := s.createEndpoints() - if err != nil { - return err + nativeCfg := epCfg.ToNativeType() + // Create + end, err := promqueue.NewQueue(epCfg.Name, nativeCfg, filepath.Join(s.opts.DataPath, epCfg.Name, "wal"), uint32(s.args.Persistence.MaxSignalsToBatch), s.args.Persistence.BatchInterval, s.args.TTL, s.opts.Registerer, "alloy", s.opts.Logger) + if err != nil { + return err + } + end.Start() + s.endpoints[epCfg.Name] = end + } - for _, ep := range s.endpoints { - ep.Start() + // Now we need to figure out the endpoints that were not touched and able to be deleted. + for name := range deletableEndpoints { + s.endpoints[name].Stop() + delete(s.endpoints, name) } return nil } func (s *Queue) createEndpoints() error { - // @mattdurham not in love with this code. for _, ep := range s.args.Endpoints { - reg := prometheus.WrapRegistererWith(prometheus.Labels{"endpoint": ep.Name}, s.opts.Registerer) - stats := types.NewStats("alloy", "queue_series", reg) - stats.SeriesBackwardsCompatibility(reg) - meta := types.NewStats("alloy", "queue_metadata", reg) - meta.MetaBackwardsCompatibility(reg) - cfg := ep.ToNativeType() - client, err := network.New(cfg, s.log, stats.UpdateNetwork, meta.UpdateNetwork) - if err != nil { - return err - } - end := NewEndpoint(client, nil, s.args.TTL, s.opts.Logger) - fq, err := filequeue.NewQueue(filepath.Join(s.opts.DataPath, ep.Name, "wal"), func(ctx context.Context, dh types.DataHandle) { - _ = end.incoming.Send(ctx, dh) - }, s.opts.Logger) - if err != nil { - return err - } - serial, err := serialization.NewSerializer(types.SerializerConfig{ - MaxSignalsInBatch: uint32(s.args.Persistence.MaxSignalsToBatch), - FlushFrequency: s.args.Persistence.BatchInterval, - }, fq, stats.UpdateSerializer, s.opts.Logger) + nativeCfg := ep.ToNativeType() + end, err := promqueue.NewQueue(ep.Name, nativeCfg, filepath.Join(s.opts.DataPath, ep.Name, "wal"), uint32(s.args.Persistence.MaxSignalsToBatch), s.args.Persistence.BatchInterval, s.args.TTL, s.opts.Registerer, "alloy", s.opts.Logger) if err != nil { return err } - end.serializer = serial s.endpoints[ep.Name] = end } return nil @@ -158,7 +146,7 @@ func (c *Queue) Appender(ctx context.Context) storage.Appender { children := make([]storage.Appender, 0) for _, ep := range c.endpoints { - children = append(children, serialization.NewAppender(ctx, c.args.TTL, ep.serializer, c.opts.Logger)) + children = append(children, ep.Appender(ctx)) } return &fanout{children: children} } diff --git a/internal/component/prometheus/write/queue/e2e_stats_test.go b/internal/component/prometheus/write/queue/e2e_stats_test.go index 16c1e33fe0..bb4c059d1c 100644 --- a/internal/component/prometheus/write/queue/e2e_stats_test.go +++ b/internal/component/prometheus/write/queue/e2e_stats_test.go @@ -615,10 +615,7 @@ func runE2eStats(t *testing.T, test statsTest) { } require.NoError(t, app.Commit()) }() - tm := time.NewTimer(8 * time.Second) - <-tm.C - cancel() - + time.Sleep(5 * time.Second) require.Eventually(t, func() bool { dtos, gatherErr := reg.Gather() require.NoError(t, gatherErr) @@ -632,9 +629,13 @@ func runE2eStats(t *testing.T, test statsTest) { // Make sure we have a few metrics. return found > 1 }, 10*time.Second, 1*time.Second) + metrics := make(map[string]float64) dtos, err := reg.Gather() require.NoError(t, err) + // Cancel needs to be here since it will unregister the metrics. + cancel() + // Get the value of metrics. for _, d := range dtos { metrics[*d.Name] = getValue(d) diff --git a/internal/component/prometheus/write/queue/e2e_test.go b/internal/component/prometheus/write/queue/e2e_test.go index 5593a6cbdc..dfd3963d47 100644 --- a/internal/component/prometheus/write/queue/e2e_test.go +++ b/internal/component/prometheus/write/queue/e2e_test.go @@ -14,9 +14,9 @@ import ( "github.com/golang/snappy" "github.com/grafana/alloy/internal/component" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" "github.com/grafana/alloy/internal/runtime/logging" "github.com/grafana/alloy/internal/util" + "github.com/grafana/walqueue/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -155,6 +155,7 @@ func runTest(t *testing.T, add func(index int, appendable storage.Appender) (flo require.NoError(t, err) ctx := context.Background() ctx, cancel := context.WithCancel(ctx) + go func() { runErr := c.Run(ctx) require.NoError(t, runErr) @@ -178,6 +179,7 @@ func runTest(t *testing.T, add func(index int, appendable storage.Appender) (flo require.NoError(t, app.Commit()) }() } + // This is a weird use case to handle eventually. // With race turned on this can take a long time. tm := time.NewTimer(20 * time.Second) @@ -186,6 +188,7 @@ func runTest(t *testing.T, add func(index int, appendable storage.Appender) (flo case <-tm.C: require.Truef(t, false, "failed to collect signals in the appropriate time") } + cancel() for i := 0; i < samples.Len(); i++ { @@ -213,7 +216,7 @@ func runTest(t *testing.T, add func(index int, appendable storage.Appender) (flo } require.Eventuallyf(t, func() bool { return types.OutStandingTimeSeriesBinary.Load() == 0 - }, 2*time.Second, 100*time.Millisecond, "there are %d time series not collected", types.OutStandingTimeSeriesBinary.Load()) + }, 20*time.Second, 1*time.Second, "there are %d time series not collected", types.OutStandingTimeSeriesBinary.Load()) } func handlePost(t *testing.T, _ http.ResponseWriter, r *http.Request) ([]prompb.TimeSeries, []prompb.MetricMetadata) { diff --git a/internal/component/prometheus/write/queue/endpoint.go b/internal/component/prometheus/write/queue/endpoint.go deleted file mode 100644 index de446ae808..0000000000 --- a/internal/component/prometheus/write/queue/endpoint.go +++ /dev/null @@ -1,133 +0,0 @@ -package queue - -import ( - "context" - "strconv" - "time" - - snappy "github.com/eapache/go-xerial-snappy" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/vladopajic/go-actor/actor" -) - -var _ actor.Worker = (*endpoint)(nil) - -// endpoint handles communication between the serializer, filequeue and network. -type endpoint struct { - network types.NetworkClient - serializer types.Serializer - log log.Logger - ttl time.Duration - incoming actor.Mailbox[types.DataHandle] - buf []byte - self actor.Actor -} - -func NewEndpoint(client types.NetworkClient, serializer types.Serializer, ttl time.Duration, logger log.Logger) *endpoint { - return &endpoint{ - network: client, - serializer: serializer, - log: logger, - ttl: ttl, - incoming: actor.NewMailbox[types.DataHandle](), - buf: make([]byte, 0, 1024), - } -} - -func (ep *endpoint) Start() { - ep.self = actor.Combine(actor.New(ep), ep.incoming).Build() - ep.self.Start() - ep.serializer.Start() - ep.network.Start() -} - -func (ep *endpoint) Stop() { - // Stop in order of data flow. This prevents errors around stopped mailboxes that can pop up. - ep.serializer.Stop() - ep.network.Stop() - ep.self.Stop() -} - -func (ep *endpoint) DoWork(ctx actor.Context) actor.WorkerStatus { - select { - case <-ctx.Done(): - return actor.WorkerEnd - case file, ok := <-ep.incoming.ReceiveC(): - if !ok { - return actor.WorkerEnd - } - meta, buf, err := file.Pop() - if err != nil { - level.Error(ep.log).Log("msg", "unable to get file contents", "name", file.Name, "err", err) - return actor.WorkerContinue - } - ep.deserializeAndSend(ctx, meta, buf) - return actor.WorkerContinue - } -} - -func (ep *endpoint) deserializeAndSend(ctx context.Context, meta map[string]string, buf []byte) { - var err error - ep.buf, err = snappy.DecodeInto(ep.buf, buf) - if err != nil { - level.Debug(ep.log).Log("msg", "error snappy decoding", "err", err) - return - } - // The version of each file is in the metadata. Right now there is only one version - // supported but in the future the ability to support more. Along with different - // compression. - version, ok := meta["version"] - if !ok { - level.Error(ep.log).Log("msg", "version not found for deserialization") - return - } - if version != types.AlloyFileVersion { - level.Error(ep.log).Log("msg", "invalid version found for deserialization", "version", version) - return - } - // Grab the amounts of each type and we can go ahead and alloc the space. - seriesCount, _ := strconv.Atoi(meta["series_count"]) - metaCount, _ := strconv.Atoi(meta["meta_count"]) - stringsCount, _ := strconv.Atoi(meta["strings_count"]) - sg := &types.SeriesGroup{ - Series: make([]*types.TimeSeriesBinary, seriesCount), - Metadata: make([]*types.TimeSeriesBinary, metaCount), - Strings: make([]string, stringsCount), - } - // Prefill our series with items from the pool to limit allocs. - for i := 0; i < seriesCount; i++ { - sg.Series[i] = types.GetTimeSeriesFromPool() - } - for i := 0; i < metaCount; i++ { - sg.Metadata[i] = types.GetTimeSeriesFromPool() - } - sg, ep.buf, err = types.DeserializeToSeriesGroup(sg, ep.buf) - if err != nil { - level.Debug(ep.log).Log("msg", "error deserializing", "err", err) - return - } - - for _, series := range sg.Series { - // One last chance to check the TTL. Writing to the filequeue will check it but - // in a situation where the network is down and writing backs up we dont want to send - // data that will get rejected. - seriesAge := time.Since(time.Unix(series.TS, 0)) - if seriesAge > ep.ttl { - // TODO @mattdurham add metric here for ttl expired. - continue - } - sendErr := ep.network.SendSeries(ctx, series) - if sendErr != nil { - level.Error(ep.log).Log("msg", "error sending to write client", "err", sendErr) - } - } - - for _, md := range sg.Metadata { - sendErr := ep.network.SendMetadata(ctx, md) - if sendErr != nil { - level.Error(ep.log).Log("msg", "error sending metadata to write client", "err", sendErr) - } - } -} diff --git a/internal/component/prometheus/write/queue/filequeue/filequeue.go b/internal/component/prometheus/write/queue/filequeue/filequeue.go deleted file mode 100644 index 7088d3df4b..0000000000 --- a/internal/component/prometheus/write/queue/filequeue/filequeue.go +++ /dev/null @@ -1,191 +0,0 @@ -package filequeue - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/go-kit/log" - "github.com/vladopajic/go-actor/actor" - - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/grafana/alloy/internal/runtime/logging/level" -) - -var _ actor.Worker = (*queue)(nil) -var _ types.FileStorage = (*queue)(nil) - -// queue represents an on-disk queue. This is a list implemented as files ordered by id with a name pattern: .committed -// Each file contains a byte buffer and an optional metatdata map. -type queue struct { - self actor.Actor - directory string - maxID int - logger log.Logger - dataQueue actor.Mailbox[types.Data] - // Out is where to send data when pulled from queue, it is assumed that it will - // block until ready for another record. - out func(ctx context.Context, dh types.DataHandle) - // existingFiles is the list of files found initially. - existingFiles []string -} - -// NewQueue returns a implementation of FileStorage. -func NewQueue(directory string, out func(ctx context.Context, dh types.DataHandle), logger log.Logger) (types.FileStorage, error) { - err := os.MkdirAll(directory, 0777) - if err != nil { - return nil, err - } - - // We dont actually support uncommitted but I think its good to at least have some naming to avoid parsing random files - // that get installed into the system. - matches, _ := filepath.Glob(filepath.Join(directory, "*.committed")) - ids := make([]int, len(matches)) - - // Try and grab the id from each file. - // e.g. grab 1 from `1.committed` - for i, fileName := range matches { - id, err := strconv.Atoi(strings.ReplaceAll(filepath.Base(fileName), ".committed", "")) - if err != nil { - level.Error(logger).Log("msg", "unable to convert numeric prefix for committed file", "err", err, "file", fileName) - continue - } - ids[i] = id - } - sort.Ints(ids) - var currentMaxID int - if len(ids) > 0 { - currentMaxID = ids[len(ids)-1] - } - q := &queue{ - directory: directory, - maxID: currentMaxID, - logger: logger, - out: out, - dataQueue: actor.NewMailbox[types.Data](), - existingFiles: make([]string, 0), - } - - // Save the existing files in `q.existingFiles`, which will have their data pushed to `out` when actor starts. - for _, id := range ids { - name := filepath.Join(directory, fmt.Sprintf("%d.committed", id)) - q.existingFiles = append(q.existingFiles, name) - } - return q, nil -} - -func (q *queue) Start() { - // Actors and mailboxes have to be started. It makes sense to combine them into one unit since they - // have the same lifespan. - q.self = actor.Combine(actor.New(q), q.dataQueue).Build() - q.self.Start() -} - -func (q *queue) Stop() { - q.self.Stop() -} - -// Store will add records to the dataQueue that will add the data to the filesystem. This is an unbuffered channel. -// Its possible in the future we would want to make it a buffer of 1, but so far it hasnt been an issue in testing. -func (q *queue) Store(ctx context.Context, meta map[string]string, data []byte) error { - return q.dataQueue.Send(ctx, types.Data{ - Meta: meta, - Data: data, - }) -} - -// get returns the data of the file or an error if something wrong went on. -func get(logger log.Logger, name string) (map[string]string, []byte, error) { - defer deleteFile(logger, name) - buf, err := readFile(name) - if err != nil { - return nil, nil, err - } - r := &Record{} - _, err = r.UnmarshalMsg(buf) - if err != nil { - return nil, nil, err - } - return r.Meta, r.Data, nil -} - -// DoWork allows most of the queue to be single threaded with work only coming in and going out via mailboxes(channels). -func (q *queue) DoWork(ctx actor.Context) actor.WorkerStatus { - // Queue up our existing items. - for _, name := range q.existingFiles { - q.out(ctx, types.DataHandle{ - Name: name, - Pop: func() (map[string]string, []byte, error) { - return get(q.logger, name) - }, - }) - } - // We only want to process existing files once. - q.existingFiles = nil - select { - case <-ctx.Done(): - return actor.WorkerEnd - case item, ok := <-q.dataQueue.ReceiveC(): - if !ok { - return actor.WorkerEnd - } - name, err := q.add(item.Meta, item.Data) - if err != nil { - level.Error(q.logger).Log("msg", "error adding item - dropping data", "err", err) - return actor.WorkerContinue - } - // The idea is that this will callee will block/process until the callee is ready for another file. - q.out(ctx, types.DataHandle{ - Name: name, - Pop: func() (map[string]string, []byte, error) { - return get(q.logger, name) - }, - }) - return actor.WorkerContinue - } -} - -// Add a file to the queue (as committed). -func (q *queue) add(meta map[string]string, data []byte) (string, error) { - if meta == nil { - meta = make(map[string]string) - } - q.maxID++ - name := filepath.Join(q.directory, fmt.Sprintf("%d.committed", q.maxID)) - r := &Record{ - Meta: meta, - Data: data, - } - // Not reusing a buffer here since allocs are not bad here and we are trying to reduce memory. - rBuf, err := r.MarshalMsg(nil) - if err != nil { - return "", err - } - err = q.writeFile(name, rBuf) - if err != nil { - return "", err - } - return name, nil -} - -func (q *queue) writeFile(name string, data []byte) error { - return os.WriteFile(name, data, 0644) -} - -func deleteFile(logger log.Logger, name string) { - err := os.Remove(name) - if err != nil { - level.Error(logger).Log("msg", "unable to delete file", "err", err, "file", name) - } -} -func readFile(name string) ([]byte, error) { - bb, err := os.ReadFile(name) - if err != nil { - return nil, err - } - return bb, err -} diff --git a/internal/component/prometheus/write/queue/filequeue/filequeue_test.go b/internal/component/prometheus/write/queue/filequeue/filequeue_test.go deleted file mode 100644 index d36e0b71ed..0000000000 --- a/internal/component/prometheus/write/queue/filequeue/filequeue_test.go +++ /dev/null @@ -1,253 +0,0 @@ -package filequeue - -import ( - "context" - "os" - "path/filepath" - "runtime" - "strconv" - "testing" - "time" - - "github.com/vladopajic/go-actor/actor" - "go.uber.org/goleak" - - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - - "github.com/go-kit/log" - "github.com/stretchr/testify/require" -) - -func TestFileQueue(t *testing.T) { - defer goleak.VerifyNone(t) - dir := t.TempDir() - log := log.NewNopLogger() - mbx := actor.NewMailbox[types.DataHandle]() - mbx.Start() - defer mbx.Stop() - q, err := NewQueue(dir, func(ctx context.Context, dh types.DataHandle) { - _ = mbx.Send(ctx, dh) - }, log) - require.NoError(t, err) - q.Start() - defer q.Stop() - err = q.Store(context.Background(), nil, []byte("test")) - - require.NoError(t, err) - - meta, buf, err := getHandle(t, mbx) - require.NoError(t, err) - require.True(t, string(buf) == "test") - require.Len(t, meta, 0) - - // Ensure nothing new comes through. - timer := time.NewTicker(100 * time.Millisecond) - select { - case <-timer.C: - return - case <-mbx.ReceiveC(): - require.True(t, false) - } -} - -func TestMetaFileQueue(t *testing.T) { - defer goleak.VerifyNone(t) - - dir := t.TempDir() - log := log.NewNopLogger() - mbx := actor.NewMailbox[types.DataHandle]() - mbx.Start() - defer mbx.Stop() - q, err := NewQueue(dir, func(ctx context.Context, dh types.DataHandle) { - _ = mbx.Send(ctx, dh) - }, log) - q.Start() - defer q.Stop() - require.NoError(t, err) - err = q.Store(context.Background(), map[string]string{"name": "bob"}, []byte("test")) - require.NoError(t, err) - - meta, buf, err := getHandle(t, mbx) - require.NoError(t, err) - require.True(t, string(buf) == "test") - require.Len(t, meta, 1) - require.True(t, meta["name"] == "bob") -} - -func TestCorruption(t *testing.T) { - defer goleak.VerifyNone(t) - - dir := t.TempDir() - log := log.NewNopLogger() - mbx := actor.NewMailbox[types.DataHandle]() - mbx.Start() - defer mbx.Stop() - q, err := NewQueue(dir, func(ctx context.Context, dh types.DataHandle) { - _ = mbx.Send(ctx, dh) - }, log) - q.Start() - defer q.Stop() - require.NoError(t, err) - - err = q.Store(context.Background(), map[string]string{"name": "bob"}, []byte("first")) - require.NoError(t, err) - err = q.Store(context.Background(), map[string]string{"name": "bob"}, []byte("second")) - - require.NoError(t, err) - - // Send is async so may need to wait a bit for it happen. - require.Eventually(t, func() bool { - // First should be 1.committed - _, errStat := os.Stat(filepath.Join(dir, "1.committed")) - return errStat == nil - }, 2*time.Second, 100*time.Millisecond) - - fi, err := os.Stat(filepath.Join(dir, "1.committed")) - - require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, fi.Name()), []byte("bad"), 0644) - require.NoError(t, err) - - _, _, err = getHandle(t, mbx) - require.Error(t, err) - - meta, buf, err := getHandle(t, mbx) - require.NoError(t, err) - require.True(t, string(buf) == "second") - require.Len(t, meta, 1) -} - -func TestFileDeleted(t *testing.T) { - defer goleak.VerifyNone(t) - - dir := t.TempDir() - log := log.NewNopLogger() - mbx := actor.NewMailbox[types.DataHandle]() - mbx.Start() - defer mbx.Stop() - q, err := NewQueue(dir, func(ctx context.Context, dh types.DataHandle) { - _ = mbx.Send(ctx, dh) - }, log) - q.Start() - defer q.Stop() - require.NoError(t, err) - - evenHandles := make([]string, 0) - for i := 0; i < 10; i++ { - err = q.Store(context.Background(), map[string]string{"name": "bob"}, []byte(strconv.Itoa(i))) - - require.NoError(t, err) - if i%2 == 0 { - evenHandles = append(evenHandles, filepath.Join(dir, strconv.Itoa(i+1)+".committed")) - } - } - - // Send is async so may need to wait a bit for it happen, check for the last file written. - require.Eventually(t, func() bool { - _, errStat := os.Stat(filepath.Join(dir, "10.committed")) - return errStat == nil - }, 2*time.Second, 100*time.Millisecond) - - for _, h := range evenHandles { - _ = os.Remove(h) - } - // Every even file was deleted and should have an error. - for i := 0; i < 10; i++ { - _, buf2, err := getHandle(t, mbx) - if i%2 == 0 { - require.Error(t, err) - } else { - require.NoError(t, err) - require.True(t, string(buf2) == strconv.Itoa(i)) - } - } -} - -func TestOtherFiles(t *testing.T) { - if runtime.GOOS == "windows" { - // TODO: Fix this test as we mature the file queue - t.Skip("This test is very flaky on Windows. Will need to fix it as we mature the filequeue.") - } - defer goleak.VerifyNone(t) - - dir := t.TempDir() - log := log.NewNopLogger() - mbx := actor.NewMailbox[types.DataHandle]() - mbx.Start() - defer mbx.Stop() - q, err := NewQueue(dir, func(ctx context.Context, dh types.DataHandle) { - _ = mbx.Send(ctx, dh) - }, log) - q.Start() - defer q.Stop() - require.NoError(t, err) - - err = q.Store(context.Background(), nil, []byte("first")) - require.NoError(t, err) - os.Create(filepath.Join(dir, "otherfile")) - _, buf, err := getHandle(t, mbx) - require.NoError(t, err) - require.True(t, string(buf) == "first") -} - -func TestResuming(t *testing.T) { - defer goleak.VerifyNone(t) - - dir := t.TempDir() - log := log.NewNopLogger() - mbx := actor.NewMailbox[types.DataHandle]() - mbx.Start() - q, err := NewQueue(dir, func(ctx context.Context, dh types.DataHandle) { - _ = mbx.Send(ctx, dh) - }, log) - q.Start() - require.NoError(t, err) - - err = q.Store(context.Background(), nil, []byte("first")) - - require.NoError(t, err) - - err = q.Store(context.Background(), nil, []byte("second")) - - require.NoError(t, err) - time.Sleep(1 * time.Second) - mbx.Stop() - q.Stop() - - mbx2 := actor.NewMailbox[types.DataHandle]() - mbx2.Start() - defer mbx2.Stop() - q2, err := NewQueue(dir, func(ctx context.Context, dh types.DataHandle) { - _ = mbx2.Send(ctx, dh) - }, log) - require.NoError(t, err) - q2.Start() - defer q2.Stop() - err = q2.Store(context.Background(), nil, []byte("third")) - - require.NoError(t, err) - _, buf, err := getHandle(t, mbx2) - require.NoError(t, err) - require.True(t, string(buf) == "first") - - _, buf, err = getHandle(t, mbx2) - require.NoError(t, err) - require.True(t, string(buf) == "second") - - _, buf, err = getHandle(t, mbx2) - require.NoError(t, err) - require.True(t, string(buf) == "third") -} - -func getHandle(t *testing.T, mbx actor.MailboxReceiver[types.DataHandle]) (map[string]string, []byte, error) { - timer := time.NewTicker(5 * time.Second) - select { - case <-timer.C: - require.True(t, false) - // This is only here to satisfy the linting. - return nil, nil, nil - case item, ok := <-mbx.ReceiveC(): - require.True(t, ok) - return item.Pop() - } -} diff --git a/internal/component/prometheus/write/queue/filequeue/record.go b/internal/component/prometheus/write/queue/filequeue/record.go deleted file mode 100644 index 2d6b12a034..0000000000 --- a/internal/component/prometheus/write/queue/filequeue/record.go +++ /dev/null @@ -1,11 +0,0 @@ -package filequeue - -// Record wraps the input data and combines it with the metadata. -// -//go:generate msgp -type Record struct { - // Meta holds a key value pair that can include information about the data. - // Such as compression used, file format version and other important bits of data. - Meta map[string]string - Data []byte -} diff --git a/internal/component/prometheus/write/queue/filequeue/record_gen.go b/internal/component/prometheus/write/queue/filequeue/record_gen.go deleted file mode 100644 index 285940eb88..0000000000 --- a/internal/component/prometheus/write/queue/filequeue/record_gen.go +++ /dev/null @@ -1,206 +0,0 @@ -package filequeue - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *Record) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Meta": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - if z.Meta == nil { - z.Meta = make(map[string]string, zb0002) - } else if len(z.Meta) > 0 { - for key := range z.Meta { - delete(z.Meta, key) - } - } - for zb0002 > 0 { - zb0002-- - var za0001 string - var za0002 string - za0001, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - za0002, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Meta", za0001) - return - } - z.Meta[za0001] = za0002 - } - case "Data": - z.Data, err = dc.ReadBytes(z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *Record) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "Meta" - err = en.Append(0x82, 0xa4, 0x4d, 0x65, 0x74, 0x61) - if err != nil { - return - } - err = en.WriteMapHeader(uint32(len(z.Meta))) - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - for za0001, za0002 := range z.Meta { - err = en.WriteString(za0001) - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - err = en.WriteString(za0002) - if err != nil { - err = msgp.WrapError(err, "Meta", za0001) - return - } - } - // write "Data" - err = en.Append(0xa4, 0x44, 0x61, 0x74, 0x61) - if err != nil { - return - } - err = en.WriteBytes(z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *Record) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Meta" - o = append(o, 0x82, 0xa4, 0x4d, 0x65, 0x74, 0x61) - o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) - for za0001, za0002 := range z.Meta { - o = msgp.AppendString(o, za0001) - o = msgp.AppendString(o, za0002) - } - // string "Data" - o = append(o, 0xa4, 0x44, 0x61, 0x74, 0x61) - o = msgp.AppendBytes(o, z.Data) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Record) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Meta": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - if z.Meta == nil { - z.Meta = make(map[string]string, zb0002) - } else if len(z.Meta) > 0 { - for key := range z.Meta { - delete(z.Meta, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 string - zb0002-- - za0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - za0002, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Meta", za0001) - return - } - z.Meta[za0001] = za0002 - } - case "Data": - z.Data, bts, err = msgp.ReadBytesBytes(bts, z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Record) Msgsize() (s int) { - s = 1 + 5 + msgp.MapHeaderSize - if z.Meta != nil { - for za0001, za0002 := range z.Meta { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) - } - } - s += 5 + msgp.BytesPrefixSize + len(z.Data) - return -} diff --git a/internal/component/prometheus/write/queue/filequeue/record_gen_test.go b/internal/component/prometheus/write/queue/filequeue/record_gen_test.go deleted file mode 100644 index 6206b5f93c..0000000000 --- a/internal/component/prometheus/write/queue/filequeue/record_gen_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package filequeue - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalRecord(t *testing.T) { - v := Record{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgRecord(b *testing.B) { - v := Record{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgRecord(b *testing.B) { - v := Record{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalRecord(b *testing.B) { - v := Record{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeRecord(t *testing.T) { - v := Record{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeRecord Msgsize() is inaccurate") - } - - vn := Record{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeRecord(b *testing.B) { - v := Record{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeRecord(b *testing.B) { - v := Record{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/internal/component/prometheus/write/queue/network/benchmark_test.go b/internal/component/prometheus/write/queue/network/benchmark_test.go deleted file mode 100644 index f8bc6a3f5f..0000000000 --- a/internal/component/prometheus/write/queue/network/benchmark_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package network - -import ( - "context" - "testing" - - "github.com/vladopajic/go-actor/actor" -) - -func BenchmarkMailbox(b *testing.B) { - // This should be 260 ns roughly or 3m messages a second. - mbx := actor.NewMailbox[struct{}]() - mbx.Start() - defer mbx.Stop() - - doneC := make(chan any) - - go func() { - for range b.N { - <-mbx.ReceiveC() - } - - close(doneC) - }() - - ctx := context.Background() - for range b.N { - mbx.Send(ctx, struct{}{}) - } - - <-doneC -} diff --git a/internal/component/prometheus/write/queue/network/loop.go b/internal/component/prometheus/write/queue/network/loop.go deleted file mode 100644 index 4654b190e3..0000000000 --- a/internal/component/prometheus/write/queue/network/loop.go +++ /dev/null @@ -1,371 +0,0 @@ -package network - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/golang/protobuf/proto" - "github.com/golang/snappy" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/prometheus/prometheus/prompb" - "github.com/vladopajic/go-actor/actor" - "go.uber.org/atomic" -) - -var _ actor.Worker = (*loop)(nil) - -// loop handles the low level sending of data. It's conceptually a queue. -// loop makes no attempt to save or restore signals in the queue. -// loop config cannot be updated, it is easier to recreate. This does mean we lose any signals in the queue. -type loop struct { - isMeta bool - seriesMbx actor.Mailbox[*types.TimeSeriesBinary] - client *http.Client - cfg types.ConnectionConfig - log log.Logger - lastSend time.Time - statsFunc func(s types.NetworkStats) - stopCalled atomic.Bool - externalLabels map[string]string - series []*types.TimeSeriesBinary - self actor.Actor - ticker *time.Ticker - req *prompb.WriteRequest - buf *proto.Buffer - sendBuffer []byte -} - -func newLoop(cc types.ConnectionConfig, isMetaData bool, l log.Logger, stats func(s types.NetworkStats)) *loop { - // TODO @mattdurham add TLS support afer the initial push. - return &loop{ - isMeta: isMetaData, - // In general we want a healthy queue of items, in this case we want to have 2x our maximum send sized ready. - seriesMbx: actor.NewMailbox[*types.TimeSeriesBinary](actor.OptCapacity(2*cc.BatchCount), actor.OptAsChan()), - client: &http.Client{}, - cfg: cc, - log: log.With(l, "name", "loop", "url", cc.URL), - statsFunc: stats, - externalLabels: cc.ExternalLabels, - ticker: time.NewTicker(1 * time.Second), - buf: proto.NewBuffer(nil), - sendBuffer: make([]byte, 0), - req: &prompb.WriteRequest{ - // We know BatchCount is the most we will ever send. - Timeseries: make([]prompb.TimeSeries, 0, cc.BatchCount), - }, - } -} - -func (l *loop) Start() { - l.self = actor.Combine(l.actors()...).Build() - l.self.Start() -} - -func (l *loop) Stop() { - l.stopCalled.Store(true) - l.self.Stop() -} - -func (l *loop) actors() []actor.Actor { - return []actor.Actor{ - actor.New(l), - l.seriesMbx, - } -} - -func (l *loop) DoWork(ctx actor.Context) actor.WorkerStatus { - // Main select loop - select { - case <-ctx.Done(): - return actor.WorkerEnd - // Ticker is to ensure the flush timer is called. - case <-l.ticker.C: - if len(l.series) == 0 { - return actor.WorkerContinue - } - if time.Since(l.lastSend) > l.cfg.FlushInterval { - l.trySend(ctx) - } - return actor.WorkerContinue - case series, ok := <-l.seriesMbx.ReceiveC(): - if !ok { - return actor.WorkerEnd - } - l.series = append(l.series, series) - if len(l.series) >= l.cfg.BatchCount { - l.trySend(ctx) - } - return actor.WorkerContinue - } -} - -// trySend is the core functionality for sending data to a endpoint. It will attempt retries as defined in MaxRetryAttempts. -func (l *loop) trySend(ctx context.Context) { - attempts := 0 - for { - start := time.Now() - result := l.send(ctx, attempts) - duration := time.Since(start) - l.statsFunc(types.NetworkStats{ - SendDuration: duration, - }) - if result.err != nil { - level.Error(l.log).Log("msg", "error in sending telemetry", "err", result.err.Error()) - } - if result.successful { - l.sendingCleanup() - return - } - if !result.recoverableError { - l.sendingCleanup() - return - } - attempts++ - if attempts > int(l.cfg.MaxRetryAttempts) && l.cfg.MaxRetryAttempts > 0 { - level.Debug(l.log).Log("msg", "max retry attempts reached", "attempts", attempts) - l.sendingCleanup() - return - } - // This helps us short circuit the loop if we are stopping. - if l.stopCalled.Load() { - return - } - // Sleep between attempts. - time.Sleep(result.retryAfter) - } -} - -type sendResult struct { - err error - successful bool - recoverableError bool - retryAfter time.Duration - statusCode int - networkError bool -} - -func (l *loop) sendingCleanup() { - types.PutTimeSeriesSliceIntoPool(l.series) - l.sendBuffer = l.sendBuffer[:0] - l.series = make([]*types.TimeSeriesBinary, 0, l.cfg.BatchCount) - l.lastSend = time.Now() -} - -// send is the main work loop of the loop. -func (l *loop) send(ctx context.Context, retryCount int) sendResult { - result := sendResult{} - defer func() { - recordStats(l.series, l.isMeta, l.statsFunc, result, len(l.sendBuffer)) - }() - // Check to see if this is a retry and we can reuse the buffer. - // I wonder if we should do this, its possible we are sending things that have exceeded the TTL. - if len(l.sendBuffer) == 0 { - var data []byte - var wrErr error - if l.isMeta { - data, wrErr = createWriteRequestMetadata(l.log, l.req, l.series, l.buf) - } else { - data, wrErr = createWriteRequest(l.req, l.series, l.externalLabels, l.buf) - } - if wrErr != nil { - result.err = wrErr - result.recoverableError = false - return result - } - l.sendBuffer = snappy.Encode(l.sendBuffer, data) - } - - httpReq, err := http.NewRequest("POST", l.cfg.URL, bytes.NewReader(l.sendBuffer)) - if err != nil { - result.err = err - result.recoverableError = true - result.networkError = true - return result - } - httpReq.Header.Add("Content-Encoding", "snappy") - httpReq.Header.Set("Content-Type", "application/x-protobuf") - httpReq.Header.Set("User-Agent", l.cfg.UserAgent) - httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") - if l.cfg.BasicAuth != nil { - httpReq.SetBasicAuth(l.cfg.BasicAuth.Username, l.cfg.BasicAuth.Password) - } else if l.cfg.BearerToken != "" { - httpReq.Header.Set("Authorization", "Bearer "+string(l.cfg.BearerToken)) - } - - if retryCount > 0 { - httpReq.Header.Set("Retry-Attempt", strconv.Itoa(retryCount)) - } - ctx, cncl := context.WithTimeout(ctx, l.cfg.Timeout) - defer cncl() - resp, err := l.client.Do(httpReq.WithContext(ctx)) - // Network errors are recoverable. - if err != nil { - result.err = err - result.networkError = true - result.recoverableError = true - result.retryAfter = l.cfg.RetryBackoff - return result - } - result.statusCode = resp.StatusCode - defer resp.Body.Close() - // 500 errors are considered recoverable. - if resp.StatusCode/100 == 5 || resp.StatusCode == http.StatusTooManyRequests { - result.err = fmt.Errorf("server responded with status code %d", resp.StatusCode) - result.retryAfter = retryAfterDuration(l.cfg.RetryBackoff, resp.Header.Get("Retry-After")) - result.recoverableError = true - return result - } - // Status Codes that are not 500 or 200 are not recoverable and dropped. - if resp.StatusCode/100 != 2 { - scanner := bufio.NewScanner(io.LimitReader(resp.Body, 1_000)) - line := "" - if scanner.Scan() { - line = scanner.Text() - } - result.err = fmt.Errorf("server returned HTTP status %s: %s", resp.Status, line) - return result - } - - result.successful = true - return result -} - -func createWriteRequest(wr *prompb.WriteRequest, series []*types.TimeSeriesBinary, externalLabels map[string]string, data *proto.Buffer) ([]byte, error) { - if cap(wr.Timeseries) < len(series) { - wr.Timeseries = make([]prompb.TimeSeries, len(series)) - } - wr.Timeseries = wr.Timeseries[:len(series)] - - for i, tsBuf := range series { - ts := wr.Timeseries[i] - if cap(ts.Labels) < len(tsBuf.Labels) { - ts.Labels = make([]prompb.Label, 0, len(tsBuf.Labels)) - } - ts.Labels = ts.Labels[:len(tsBuf.Labels)] - for k, v := range tsBuf.Labels { - ts.Labels[k].Name = v.Name - ts.Labels[k].Value = v.Value - } - - // By default each sample only has a histogram, float histogram or sample. - if cap(ts.Histograms) == 0 { - ts.Histograms = make([]prompb.Histogram, 1) - } else { - ts.Histograms = ts.Histograms[:0] - } - if tsBuf.Histograms.Histogram != nil { - ts.Histograms = ts.Histograms[:1] - ts.Histograms[0] = tsBuf.Histograms.Histogram.ToPromHistogram() - } - if tsBuf.Histograms.FloatHistogram != nil { - ts.Histograms = ts.Histograms[:1] - ts.Histograms[0] = tsBuf.Histograms.FloatHistogram.ToPromFloatHistogram() - } - - if tsBuf.Histograms.Histogram == nil && tsBuf.Histograms.FloatHistogram == nil { - ts.Histograms = ts.Histograms[:0] - } - - // Encode the external labels inside if needed. - for k, v := range externalLabels { - found := false - for j, lbl := range ts.Labels { - if lbl.Name == k { - ts.Labels[j].Value = v - found = true - break - } - } - if !found { - ts.Labels = append(ts.Labels, prompb.Label{ - Name: k, - Value: v, - }) - } - } - // By default each TimeSeries only has one sample. - if len(ts.Samples) == 0 { - ts.Samples = make([]prompb.Sample, 1) - } - ts.Samples[0].Value = tsBuf.Value - ts.Samples[0].Timestamp = tsBuf.TS - wr.Timeseries[i] = ts - } - defer func() { - for i := 0; i < len(wr.Timeseries); i++ { - wr.Timeseries[i].Histograms = wr.Timeseries[i].Histograms[:0] - wr.Timeseries[i].Labels = wr.Timeseries[i].Labels[:0] - wr.Timeseries[i].Exemplars = wr.Timeseries[i].Exemplars[:0] - } - }() - // Reset the buffer for reuse. - data.Reset() - err := data.Marshal(wr) - return data.Bytes(), err -} - -func createWriteRequestMetadata(l log.Logger, wr *prompb.WriteRequest, series []*types.TimeSeriesBinary, data *proto.Buffer) ([]byte, error) { - // Metadata is rarely sent so having this being less than optimal is fine. - wr.Metadata = make([]prompb.MetricMetadata, 0) - for _, ts := range series { - mt, valid := toMetadata(ts) - // TODO @mattdurham somewhere there is a bug where metadata with no labels are being passed through. - if !valid { - level.Error(l).Log("msg", "invalid metadata was found", "labels", ts.Labels.String()) - continue - } - wr.Metadata = append(wr.Metadata, mt) - } - data.Reset() - err := data.Marshal(wr) - return data.Bytes(), err -} - -func getMetadataCount(tss []*types.TimeSeriesBinary) int { - var cnt int - for _, ts := range tss { - if isMetadata(ts) { - cnt++ - } - } - return cnt -} - -func isMetadata(ts *types.TimeSeriesBinary) bool { - return ts.Labels.Has(types.MetaType) && - ts.Labels.Has(types.MetaUnit) && - ts.Labels.Has(types.MetaHelp) -} - -func toMetadata(ts *types.TimeSeriesBinary) (prompb.MetricMetadata, bool) { - if !isMetadata(ts) { - return prompb.MetricMetadata{}, false - } - return prompb.MetricMetadata{ - Type: prompb.MetricMetadata_MetricType(prompb.MetricMetadata_MetricType_value[strings.ToUpper(ts.Labels.Get(types.MetaType))]), - Help: ts.Labels.Get(types.MetaHelp), - Unit: ts.Labels.Get(types.MetaUnit), - MetricFamilyName: ts.Labels.Get("__name__"), - }, true -} - -func retryAfterDuration(defaultDuration time.Duration, t string) time.Duration { - if parsedTime, err := time.Parse(http.TimeFormat, t); err == nil { - return time.Until(parsedTime) - } - // The duration can be in seconds. - d, err := strconv.Atoi(t) - if err != nil { - return defaultDuration - } - return time.Duration(d) * time.Second -} diff --git a/internal/component/prometheus/write/queue/network/manager.go b/internal/component/prometheus/write/queue/network/manager.go deleted file mode 100644 index 0244569403..0000000000 --- a/internal/component/prometheus/write/queue/network/manager.go +++ /dev/null @@ -1,199 +0,0 @@ -package network - -import ( - "context" - "github.com/go-kit/log" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/grafana/alloy/internal/runtime/logging/level" - "github.com/vladopajic/go-actor/actor" -) - -// manager manages loops. Mostly it exists to control their lifecycle and send work to them. -type manager struct { - loops []*loop - metadata *loop - logger log.Logger - inbox actor.Mailbox[*types.TimeSeriesBinary] - metaInbox actor.Mailbox[*types.TimeSeriesBinary] - configInbox actor.Mailbox[configCallback] - self actor.Actor - cfg types.ConnectionConfig - stats func(types.NetworkStats) - metaStats func(types.NetworkStats) -} - -// configCallback allows actors to notify via `done` channel when they're done processing the config `cc`. Useful when synchronous processing is required. -type configCallback struct { - cc types.ConnectionConfig - done chan struct{} -} - -var _ types.NetworkClient = (*manager)(nil) - -var _ actor.Worker = (*manager)(nil) - -func New(cc types.ConnectionConfig, logger log.Logger, seriesStats, metadataStats func(types.NetworkStats)) (types.NetworkClient, error) { - s := &manager{ - loops: make([]*loop, 0, cc.Connections), - logger: logger, - // This provides blocking to only handle one at a time, so that if a queue blocks - // it will stop the filequeue from feeding more. Without OptAsChan the minimum capacity is actually a 64-item buffer. - inbox: actor.NewMailbox[*types.TimeSeriesBinary](actor.OptCapacity(1), actor.OptAsChan()), - metaInbox: actor.NewMailbox[*types.TimeSeriesBinary](actor.OptCapacity(1), actor.OptAsChan()), - configInbox: actor.NewMailbox[configCallback](), - stats: seriesStats, - metaStats: metadataStats, - cfg: cc, - } - - // start kicks off a number of concurrent connections. - for i := uint(0); i < s.cfg.Connections; i++ { - l := newLoop(cc, false, logger, seriesStats) - l.self = actor.New(l) - s.loops = append(s.loops, l) - } - - s.metadata = newLoop(cc, true, logger, metadataStats) - s.metadata.self = actor.New(s.metadata) - return s, nil -} - -func (s *manager) Start() { - s.startLoops() - s.configInbox.Start() - s.metaInbox.Start() - s.inbox.Start() - s.self = actor.New(s) - s.self.Start() -} - -func (s *manager) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { - return s.inbox.Send(ctx, data) -} - -func (s *manager) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { - return s.metaInbox.Send(ctx, data) -} - -func (s *manager) UpdateConfig(ctx context.Context, cc types.ConnectionConfig) error { - done := make(chan struct{}) - defer close(done) - err := s.configInbox.Send(ctx, configCallback{ - cc: cc, - done: done, - }) - if err != nil { - return err - } - <-done - return nil -} - -func (s *manager) DoWork(ctx actor.Context) actor.WorkerStatus { - // This acts as a priority queue, always check for configuration changes first. - select { - case cfg, ok := <-s.configInbox.ReceiveC(): - if !ok { - level.Debug(s.logger).Log("msg", "config inbox closed") - return actor.WorkerEnd - } - s.updateConfig(cfg.cc) - // Notify the caller we have applied the config. - cfg.done <- struct{}{} - return actor.WorkerContinue - default: - } - - // main work queue. - select { - case <-ctx.Done(): - s.Stop() - return actor.WorkerEnd - case ts, ok := <-s.inbox.ReceiveC(): - if !ok { - level.Debug(s.logger).Log("msg", "series inbox closed") - return actor.WorkerEnd - } - s.queue(ctx, ts) - return actor.WorkerContinue - case ts, ok := <-s.metaInbox.ReceiveC(): - if !ok { - level.Debug(s.logger).Log("msg", "meta inbox closed") - return actor.WorkerEnd - } - err := s.metadata.seriesMbx.Send(ctx, ts) - if err != nil { - level.Error(s.logger).Log("msg", "failed to send to metadata loop", "err", err) - } - return actor.WorkerContinue - case cfg, ok := <-s.configInbox.ReceiveC(): - // We need to also check the config here, else its possible this will deadlock. - if !ok { - level.Debug(s.logger).Log("msg", "config inbox closed") - return actor.WorkerEnd - } - s.updateConfig(cfg.cc) - // Notify the caller we have applied the config. - cfg.done <- struct{}{} - return actor.WorkerContinue - } -} - -func (s *manager) updateConfig(cc types.ConnectionConfig) { - // No need to do anything if the configuration is the same. - if s.cfg.Equals(cc) { - return - } - s.cfg = cc - // TODO @mattdurham make this smarter, at the moment any samples in the loops are lost. - // Ideally we would drain the queues and re add them but that is a future need. - // In practice this shouldn't change often so data loss should be minimal. - // For the moment we will stop all the items and recreate them. - level.Debug(s.logger).Log("msg", "dropping all series in loops and creating queue due to config change") - s.stopLoops() - s.loops = make([]*loop, 0, s.cfg.Connections) - for i := uint(0); i < s.cfg.Connections; i++ { - l := newLoop(cc, false, s.logger, s.stats) - l.self = actor.New(l) - s.loops = append(s.loops, l) - } - - s.metadata = newLoop(cc, true, s.logger, s.metaStats) - s.metadata.self = actor.New(s.metadata) - level.Debug(s.logger).Log("msg", "starting loops") - s.startLoops() - level.Debug(s.logger).Log("msg", "loops started") -} - -func (s *manager) Stop() { - s.stopLoops() - s.configInbox.Stop() - s.metaInbox.Stop() - s.inbox.Stop() - s.self.Stop() -} - -func (s *manager) stopLoops() { - for _, l := range s.loops { - l.Stop() - } - s.metadata.Stop() -} - -func (s *manager) startLoops() { - for _, l := range s.loops { - l.Start() - } - s.metadata.Start() -} - -// Queue adds anything thats not metadata to the queue. -func (s *manager) queue(ctx context.Context, ts *types.TimeSeriesBinary) { - // Based on a hash which is the label hash add to the queue. - queueNum := ts.Hash % uint64(s.cfg.Connections) - // This will block if the queue is full. - err := s.loops[queueNum].seriesMbx.Send(ctx, ts) - if err != nil { - level.Error(s.logger).Log("msg", "failed to send to loop", "err", err) - } -} diff --git a/internal/component/prometheus/write/queue/network/manager_test.go b/internal/component/prometheus/write/queue/network/manager_test.go deleted file mode 100644 index 947608021a..0000000000 --- a/internal/component/prometheus/write/queue/network/manager_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package network - -import ( - "context" - "github.com/grafana/alloy/internal/util" - "io" - "math/rand" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/golang/snappy" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/prompb" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - "go.uber.org/goleak" -) - -func TestSending(t *testing.T) { - defer goleak.VerifyNone(t) - - recordsFound := atomic.Uint32{} - svr := httptest.NewServer(handler(t, http.StatusOK, func(wr *prompb.WriteRequest) { - recordsFound.Add(uint32(len(wr.Timeseries))) - })) - - defer svr.Close() - ctx := context.Background() - ctx, cncl := context.WithCancel(ctx) - defer cncl() - - cc := types.ConnectionConfig{ - URL: svr.URL, - Timeout: 1 * time.Second, - BatchCount: 10, - FlushInterval: 1 * time.Second, - Connections: 4, - } - - logger := log.NewNopLogger() - wr, err := New(cc, logger, func(s types.NetworkStats) {}, func(s types.NetworkStats) {}) - wr.Start() - defer wr.Stop() - require.NoError(t, err) - for i := 0; i < 1_000; i++ { - send(t, wr, ctx) - } - require.Eventually(t, func() bool { - return recordsFound.Load() == 1_000 - }, 10*time.Second, 100*time.Millisecond) -} - -func TestUpdatingConfig(t *testing.T) { - defer goleak.VerifyNone(t) - - recordsFound := atomic.Uint32{} - lastBatchSize := atomic.Uint32{} - svr := httptest.NewServer(handler(t, http.StatusOK, func(wr *prompb.WriteRequest) { - lastBatchSize.Store(uint32(len(wr.Timeseries))) - recordsFound.Add(uint32(len(wr.Timeseries))) - })) - - defer svr.Close() - - cc := types.ConnectionConfig{ - URL: svr.URL, - Timeout: 1 * time.Second, - BatchCount: 10, - FlushInterval: 5 * time.Second, - Connections: 1, - } - - logger := util.TestAlloyLogger(t) - - wr, err := New(cc, logger, func(s types.NetworkStats) {}, func(s types.NetworkStats) {}) - require.NoError(t, err) - wr.Start() - defer wr.Stop() - - cc2 := types.ConnectionConfig{ - URL: svr.URL, - Timeout: 1 * time.Second, - BatchCount: 20, - FlushInterval: 5 * time.Second, - Connections: 1, - } - ctx := context.Background() - err = wr.UpdateConfig(ctx, cc2) - require.NoError(t, err) - time.Sleep(1 * time.Second) - for i := 0; i < 100; i++ { - send(t, wr, ctx) - } - require.Eventuallyf(t, func() bool { - return recordsFound.Load() == 100 - }, 20*time.Second, 1*time.Second, "record count should be 100 but is %d", recordsFound.Load()) - - require.Truef(t, lastBatchSize.Load() == 20, "batch_count should be 20 but is %d", lastBatchSize.Load()) -} - -func TestRetry(t *testing.T) { - defer goleak.VerifyNone(t) - - retries := atomic.Uint32{} - var previous *prompb.WriteRequest - svr := httptest.NewServer(handler(t, http.StatusTooManyRequests, func(wr *prompb.WriteRequest) { - retries.Add(1) - // Check that we are getting the same sample back. - if previous == nil { - previous = wr - } else { - require.True(t, previous.Timeseries[0].Labels[0].Value == wr.Timeseries[0].Labels[0].Value) - } - })) - defer svr.Close() - ctx := context.Background() - ctx, cncl := context.WithCancel(ctx) - defer cncl() - - cc := types.ConnectionConfig{ - URL: svr.URL, - Timeout: 1 * time.Second, - BatchCount: 1, - FlushInterval: 1 * time.Second, - RetryBackoff: 100 * time.Millisecond, - Connections: 1, - } - - logger := log.NewNopLogger() - wr, err := New(cc, logger, func(s types.NetworkStats) {}, func(s types.NetworkStats) {}) - require.NoError(t, err) - wr.Start() - defer wr.Stop() - send(t, wr, ctx) - - require.Eventually(t, func() bool { - done := retries.Load() > 5 - return done - }, 10*time.Second, 1*time.Second) -} - -func TestRetryBounded(t *testing.T) { - defer goleak.VerifyNone(t) - - sends := atomic.Uint32{} - svr := httptest.NewServer(handler(t, http.StatusTooManyRequests, func(wr *prompb.WriteRequest) { - sends.Add(1) - })) - - defer svr.Close() - ctx := context.Background() - ctx, cncl := context.WithCancel(ctx) - defer cncl() - - cc := types.ConnectionConfig{ - URL: svr.URL, - Timeout: 1 * time.Second, - BatchCount: 1, - FlushInterval: 1 * time.Second, - RetryBackoff: 100 * time.Millisecond, - MaxRetryAttempts: 1, - Connections: 1, - } - - logger := log.NewNopLogger() - wr, err := New(cc, logger, func(s types.NetworkStats) {}, func(s types.NetworkStats) {}) - wr.Start() - defer wr.Stop() - require.NoError(t, err) - for i := 0; i < 10; i++ { - send(t, wr, ctx) - } - require.Eventually(t, func() bool { - // We send 10 but each one gets retried once so 20 total. - return sends.Load() == 10*2 - }, 2*time.Second, 100*time.Millisecond) - time.Sleep(2 * time.Second) - // Ensure we dont get any more. - require.True(t, sends.Load() == 10*2) -} - -func TestRecoverable(t *testing.T) { - defer goleak.VerifyNone(t) - - recoverable := atomic.Uint32{} - svr := httptest.NewServer(handler(t, http.StatusInternalServerError, func(wr *prompb.WriteRequest) { - })) - defer svr.Close() - ctx := context.Background() - ctx, cncl := context.WithCancel(ctx) - defer cncl() - - cc := types.ConnectionConfig{ - URL: svr.URL, - Timeout: 1 * time.Second, - BatchCount: 1, - FlushInterval: 1 * time.Second, - RetryBackoff: 100 * time.Millisecond, - MaxRetryAttempts: 1, - Connections: 1, - } - - logger := log.NewNopLogger() - wr, err := New(cc, logger, func(s types.NetworkStats) { - recoverable.Add(uint32(s.Total5XX())) - }, func(s types.NetworkStats) {}) - require.NoError(t, err) - wr.Start() - defer wr.Stop() - for i := 0; i < 10; i++ { - send(t, wr, ctx) - } - require.Eventually(t, func() bool { - // We send 10 but each one gets retried once so 20 total. - return recoverable.Load() == 10*2 - }, 2*time.Second, 100*time.Millisecond) - time.Sleep(2 * time.Second) - // Ensure we dont get any more. - require.True(t, recoverable.Load() == 10*2) -} - -func TestNonRecoverable(t *testing.T) { - defer goleak.VerifyNone(t) - - nonRecoverable := atomic.Uint32{} - svr := httptest.NewServer(handler(t, http.StatusBadRequest, func(wr *prompb.WriteRequest) { - })) - - defer svr.Close() - ctx := context.Background() - ctx, cncl := context.WithCancel(ctx) - defer cncl() - - cc := types.ConnectionConfig{ - URL: svr.URL, - Timeout: 1 * time.Second, - BatchCount: 1, - FlushInterval: 1 * time.Second, - RetryBackoff: 100 * time.Millisecond, - MaxRetryAttempts: 1, - Connections: 1, - } - - logger := log.NewNopLogger() - wr, err := New(cc, logger, func(s types.NetworkStats) { - nonRecoverable.Add(uint32(s.TotalFailed())) - }, func(s types.NetworkStats) {}) - wr.Start() - defer wr.Stop() - require.NoError(t, err) - for i := 0; i < 10; i++ { - send(t, wr, ctx) - } - require.Eventually(t, func() bool { - return nonRecoverable.Load() == 10 - }, 2*time.Second, 100*time.Millisecond) - time.Sleep(2 * time.Second) - // Ensure we dont get any more. - require.True(t, nonRecoverable.Load() == 10) -} - -func send(t *testing.T, wr types.NetworkClient, ctx context.Context) { - ts := createSeries(t) - // The actual hash is only used for queueing into different buckets. - err := wr.SendSeries(ctx, ts) - require.NoError(t, err) -} - -func handler(t *testing.T, code int, callback func(wr *prompb.WriteRequest)) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buf, err := io.ReadAll(r.Body) - require.NoError(t, err) - defer r.Body.Close() - decoded, err := snappy.Decode(nil, buf) - require.NoError(t, err) - - wr := &prompb.WriteRequest{} - err = wr.Unmarshal(decoded) - require.NoError(t, err) - callback(wr) - w.WriteHeader(code) - }) -} - -func createSeries(_ *testing.T) *types.TimeSeriesBinary { - ts := &types.TimeSeriesBinary{ - TS: time.Now().Unix(), - Value: 1, - Labels: []labels.Label{ - { - Name: "__name__", - Value: randSeq(10), - }, - }, - } - return ts -} - -var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func randSeq(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} diff --git a/internal/component/prometheus/write/queue/network/stats.go b/internal/component/prometheus/write/queue/network/stats.go deleted file mode 100644 index 345069e1cb..0000000000 --- a/internal/component/prometheus/write/queue/network/stats.go +++ /dev/null @@ -1,126 +0,0 @@ -package network - -import ( - "net/http" - - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" -) - -// recordStats determines what values to send to the stats function. This allows for any -// number of metrics/signals libraries to be used. Prometheus, OTel, and any other. -func recordStats(series []*types.TimeSeriesBinary, isMeta bool, stats func(s types.NetworkStats), r sendResult, bytesSent int) { - seriesCount := getSeriesCount(series) - histogramCount := getHistogramCount(series) - metadataCount := getMetadataCount(series) - switch { - case r.networkError: - stats(types.NetworkStats{ - Series: types.CategoryStats{ - NetworkSamplesFailed: seriesCount, - }, - Histogram: types.CategoryStats{ - NetworkSamplesFailed: histogramCount, - }, - Metadata: types.CategoryStats{ - NetworkSamplesFailed: metadataCount, - }, - }) - case r.successful: - // Need to grab the newest series. - var newestTS int64 - for _, ts := range series { - if ts.TS > newestTS { - newestTS = ts.TS - } - } - var sampleBytesSent int - var metaBytesSent int - // Each loop is explicitly a normal signal or metadata sender. - if isMeta { - metaBytesSent = bytesSent - } else { - sampleBytesSent = bytesSent - } - stats(types.NetworkStats{ - Series: types.CategoryStats{ - SeriesSent: seriesCount, - }, - Histogram: types.CategoryStats{ - SeriesSent: histogramCount, - }, - Metadata: types.CategoryStats{ - SeriesSent: metadataCount, - }, - MetadataBytes: metaBytesSent, - SeriesBytes: sampleBytesSent, - NewestTimestamp: newestTS, - }) - case r.statusCode == http.StatusTooManyRequests: - stats(types.NetworkStats{ - Series: types.CategoryStats{ - RetriedSamples: seriesCount, - RetriedSamples429: seriesCount, - }, - Histogram: types.CategoryStats{ - RetriedSamples: histogramCount, - RetriedSamples429: histogramCount, - }, - Metadata: types.CategoryStats{ - RetriedSamples: metadataCount, - RetriedSamples429: metadataCount, - }, - }) - case r.statusCode/100 == 5: - stats(types.NetworkStats{ - Series: types.CategoryStats{ - RetriedSamples5XX: seriesCount, - }, - Histogram: types.CategoryStats{ - RetriedSamples5XX: histogramCount, - }, - Metadata: types.CategoryStats{ - RetriedSamples: metadataCount, - }, - }) - case r.statusCode != 200: - stats(types.NetworkStats{ - Series: types.CategoryStats{ - FailedSamples: seriesCount, - }, - Histogram: types.CategoryStats{ - FailedSamples: histogramCount, - }, - Metadata: types.CategoryStats{ - FailedSamples: metadataCount, - }, - }) - } - -} - -func getSeriesCount(tss []*types.TimeSeriesBinary) int { - cnt := 0 - for _, ts := range tss { - // This is metadata - if isMetadata(ts) { - continue - } - if ts.Histograms.Histogram == nil && ts.Histograms.FloatHistogram == nil { - cnt++ - } - } - return cnt -} - -func getHistogramCount(tss []*types.TimeSeriesBinary) int { - cnt := 0 - for _, ts := range tss { - if isMetadata(ts) { - continue - } - if ts.Histograms.Histogram != nil || ts.Histograms.FloatHistogram != nil { - cnt++ - } - } - return cnt -} diff --git a/internal/component/prometheus/write/queue/serialization/appender.go b/internal/component/prometheus/write/queue/serialization/appender.go deleted file mode 100644 index 3e8515a19b..0000000000 --- a/internal/component/prometheus/write/queue/serialization/appender.go +++ /dev/null @@ -1,130 +0,0 @@ -package serialization - -import ( - "context" - "fmt" - "time" - - "github.com/go-kit/log" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/metadata" - "github.com/prometheus/prometheus/storage" -) - -type appender struct { - ctx context.Context - ttl time.Duration - s types.Serializer - logger log.Logger -} - -func (a *appender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { - // TODO @mattdurham figure out what to do here later. This mirrors what we do elsewhere. - return ref, nil -} - -// NewAppender returns an Appender that writes to a given serializer. NOTE the returned Appender writes -// data immediately, discards data older than `ttl` and does not honor commit or rollback. -func NewAppender(ctx context.Context, ttl time.Duration, s types.Serializer, logger log.Logger) storage.Appender { - app := &appender{ - ttl: ttl, - s: s, - logger: logger, - ctx: ctx, - } - return app -} - -// Append metric -func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - // Check to see if the TTL has expired for this record. - endTime := time.Now().Unix() - int64(a.ttl.Seconds()) - if t < endTime { - return ref, nil - } - ts := types.GetTimeSeriesFromPool() - ts.Labels = l - ts.TS = t - ts.Value = v - ts.Hash = l.Hash() - err := a.s.SendSeries(a.ctx, ts) - return ref, err -} - -// Commit is a no op since we always write. -func (a *appender) Commit() (_ error) { - return nil -} - -// Rollback is a no op since we write all the data. -func (a *appender) Rollback() error { - return nil -} - -// AppendExemplar appends exemplar to cache. The passed in labels is unused, instead use the labels on the exemplar. -func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (_ storage.SeriesRef, _ error) { - endTime := time.Now().Unix() - int64(a.ttl.Seconds()) - if e.HasTs && e.Ts < endTime { - return ref, nil - } - ts := types.GetTimeSeriesFromPool() - ts.Hash = e.Labels.Hash() - ts.TS = e.Ts - ts.Labels = e.Labels - ts.Hash = e.Labels.Hash() - err := a.s.SendSeries(a.ctx, ts) - return ref, err -} - -// AppendHistogram appends histogram -func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (_ storage.SeriesRef, _ error) { - endTime := time.Now().Unix() - int64(a.ttl.Seconds()) - if t < endTime { - return ref, nil - } - ts := types.GetTimeSeriesFromPool() - ts.Labels = l - ts.TS = t - if h != nil { - ts.FromHistogram(t, h) - } else { - ts.FromFloatHistogram(t, fh) - } - ts.Hash = l.Hash() - err := a.s.SendSeries(a.ctx, ts) - return ref, err -} - -// UpdateMetadata updates metadata. -func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (_ storage.SeriesRef, _ error) { - if !l.Has("__name__") { - return ref, fmt.Errorf("missing __name__ label for metadata") - } - ts := types.GetTimeSeriesFromPool() - // We are going to handle converting some strings to hopefully not reused label names. TimeSeriesBinary has a lot of work - // to ensure its efficient it makes sense to encode metadata into it. - combinedLabels := labels.EmptyLabels() - combinedLabels = append(combinedLabels, labels.Label{ - Name: types.MetaType, - Value: string(m.Type), - }) - combinedLabels = append(combinedLabels, labels.Label{ - Name: types.MetaHelp, - Value: m.Help, - }) - combinedLabels = append(combinedLabels, labels.Label{ - Name: types.MetaUnit, - Value: m.Unit, - }) - // We ONLY want __name__ from labels - combinedLabels = append(combinedLabels, labels.Label{ - Name: "__name__", - Value: l.Get("__name__"), - }) - ts.Labels = combinedLabels - err := a.s.SendMetadata(a.ctx, ts) - return ref, err -} diff --git a/internal/component/prometheus/write/queue/serialization/appender_test.go b/internal/component/prometheus/write/queue/serialization/appender_test.go deleted file mode 100644 index 0215eeee6e..0000000000 --- a/internal/component/prometheus/write/queue/serialization/appender_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package serialization - -import ( - "context" - log2 "github.com/go-kit/log" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" - "testing" - "time" -) - -func TestAppenderTTL(t *testing.T) { - fake := &counterSerializer{} - l := log2.NewNopLogger() - - app := NewAppender(context.Background(), 1*time.Minute, fake, l) - _, err := app.Append(0, labels.FromStrings("one", "two"), time.Now().Unix(), 0) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - _, err = app.Append(0, labels.FromStrings("one", "two"), time.Now().Add(-5*time.Minute).Unix(), 0) - require.NoError(t, err) - } - // Only one record should make it through. - require.True(t, fake.received == 1) -} - -var _ types.Serializer = (*fakeSerializer)(nil) - -type counterSerializer struct { - received int -} - -func (f *counterSerializer) Start() { - -} - -func (f *counterSerializer) Stop() { - -} - -func (f *counterSerializer) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { - f.received++ - return nil - -} - -func (f *counterSerializer) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { - return nil -} - -func (f *counterSerializer) UpdateConfig(ctx context.Context, data types.SerializerConfig) error { - return nil -} diff --git a/internal/component/prometheus/write/queue/serialization/serializer.go b/internal/component/prometheus/write/queue/serialization/serializer.go deleted file mode 100644 index 71c96cb3f9..0000000000 --- a/internal/component/prometheus/write/queue/serialization/serializer.go +++ /dev/null @@ -1,222 +0,0 @@ -package serialization - -import ( - "context" - "fmt" - "strconv" - "time" - - snappy "github.com/eapache/go-xerial-snappy" - "github.com/go-kit/log" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/grafana/alloy/internal/runtime/logging/level" - "github.com/vladopajic/go-actor/actor" - "go.uber.org/atomic" -) - -// serializer collects data from multiple appenders in-memory and will periodically flush the data to file.Storage. -// serializer will flush based on configured time duration OR if it hits a certain number of items. -type serializer struct { - inbox actor.Mailbox[*types.TimeSeriesBinary] - metaInbox actor.Mailbox[*types.TimeSeriesBinary] - cfgInbox actor.Mailbox[types.SerializerConfig] - maxItemsBeforeFlush int - flushFrequency time.Duration - queue types.FileStorage - lastFlush time.Time - logger log.Logger - self actor.Actor - // Every 1 second we should check if we need to flush. - flushTestTimer *time.Ticker - series []*types.TimeSeriesBinary - meta []*types.TimeSeriesBinary - msgpBuffer []byte - stats func(stats types.SerializerStats) - stopped *atomic.Bool -} - -func NewSerializer(cfg types.SerializerConfig, q types.FileStorage, stats func(stats types.SerializerStats), l log.Logger) (types.Serializer, error) { - s := &serializer{ - maxItemsBeforeFlush: int(cfg.MaxSignalsInBatch), - flushFrequency: cfg.FlushFrequency, - queue: q, - series: make([]*types.TimeSeriesBinary, 0), - logger: l, - inbox: actor.NewMailbox[*types.TimeSeriesBinary](), - metaInbox: actor.NewMailbox[*types.TimeSeriesBinary](), - cfgInbox: actor.NewMailbox[types.SerializerConfig](), - flushTestTimer: time.NewTicker(1 * time.Second), - msgpBuffer: make([]byte, 0), - lastFlush: time.Now(), - stats: stats, - stopped: atomic.NewBool(false), - } - - return s, nil -} -func (s *serializer) Start() { - // All the actors and mailboxes need to start. - s.queue.Start() - s.self = actor.Combine(actor.New(s), s.inbox, s.metaInbox, s.cfgInbox).Build() - s.self.Start() -} - -func (s *serializer) Stop() { - s.stopped.Store(true) - s.queue.Stop() - s.self.Stop() -} - -func (s *serializer) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { - if s.stopped.Load() { - return fmt.Errorf("serializer is stopped") - } - return s.inbox.Send(ctx, data) -} - -func (s *serializer) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { - if s.stopped.Load() { - return fmt.Errorf("serializer is stopped") - } - return s.metaInbox.Send(ctx, data) -} - -func (s *serializer) UpdateConfig(ctx context.Context, cfg types.SerializerConfig) error { - if s.stopped.Load() { - return fmt.Errorf("serializer is stopped") - } - return s.cfgInbox.Send(ctx, cfg) -} - -func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { - // Check for config which should have priority. Selector is random but since incoming - // series will always have a queue by explicitly checking the config here we always give it a chance. - // By pulling the config from the mailbox we ensure it does NOT need a mutex around access. - select { - case <-ctx.Done(): - return actor.WorkerEnd - case cfg, ok := <-s.cfgInbox.ReceiveC(): - if !ok { - return actor.WorkerEnd - } - s.maxItemsBeforeFlush = int(cfg.MaxSignalsInBatch) - s.flushFrequency = cfg.FlushFrequency - return actor.WorkerContinue - default: - } - - select { - case <-ctx.Done(): - return actor.WorkerEnd - case item, ok := <-s.inbox.ReceiveC(): - if !ok { - return actor.WorkerEnd - } - s.series = append(s.series, item) - // If we would go over the max size then send, or if we have hit the flush duration then send. - if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { - err := s.flushToDisk(ctx) - if err != nil { - level.Error(s.logger).Log("msg", "unable to append to serializer", "err", err) - } - } - - return actor.WorkerContinue - case item, ok := <-s.metaInbox.ReceiveC(): - if !ok { - return actor.WorkerEnd - } - s.meta = append(s.meta, item) - if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { - err := s.flushToDisk(ctx) - if err != nil { - level.Error(s.logger).Log("msg", "unable to append metadata to serializer", "err", err) - } - } - return actor.WorkerContinue - case <-s.flushTestTimer.C: - if time.Since(s.lastFlush) > s.flushFrequency { - err := s.flushToDisk(ctx) - if err != nil { - level.Error(s.logger).Log("msg", "unable to store data", "err", err) - } - } - return actor.WorkerContinue - } -} - -func (s *serializer) flushToDisk(ctx actor.Context) error { - var err error - defer func() { - s.lastFlush = time.Now() - }() - // Do nothing if there is nothing. - if len(s.series) == 0 && len(s.meta) == 0 { - return nil - } - group := &types.SeriesGroup{ - Series: make([]*types.TimeSeriesBinary, len(s.series)), - Metadata: make([]*types.TimeSeriesBinary, len(s.meta)), - } - defer func() { - s.storeStats(err) - // Return series to the pool, this is key to reducing allocs. - types.PutTimeSeriesSliceIntoPool(s.series) - types.PutTimeSeriesSliceIntoPool(s.meta) - s.series = s.series[:0] - s.meta = s.meta[:0] - }() - - // This maps strings to index position in a slice. This is doing to reduce the file size of the data. - strMapToIndex := make(map[string]uint32) - for i, ts := range s.series { - ts.FillLabelMapping(strMapToIndex) - group.Series[i] = ts - } - for i, ts := range s.meta { - ts.FillLabelMapping(strMapToIndex) - group.Metadata[i] = ts - } - - stringsSlice := make([]string, len(strMapToIndex)) - for stringValue, index := range strMapToIndex { - stringsSlice[index] = stringValue - } - group.Strings = stringsSlice - - buf, err := group.MarshalMsg(s.msgpBuffer) - if err != nil { - return err - } - - out := snappy.Encode(buf) - meta := map[string]string{ - // product.signal_type.schema.version - "version": types.AlloyFileVersion, - "compression": "snappy", - "series_count": strconv.Itoa(len(group.Series)), - "meta_count": strconv.Itoa(len(group.Metadata)), - "strings_count": strconv.Itoa(len(group.Strings)), - } - err = s.queue.Store(ctx, meta, out) - return err -} - -func (s *serializer) storeStats(err error) { - hasError := 0 - if err != nil { - hasError = 1 - } - newestTS := int64(0) - for _, ts := range s.series { - if ts.TS > newestTS { - newestTS = ts.TS - } - } - s.stats(types.SerializerStats{ - SeriesStored: len(s.series), - MetadataStored: len(s.meta), - Errors: hasError, - NewestTimestamp: newestTS, - }) -} diff --git a/internal/component/prometheus/write/queue/serialization/serializer_bench_test.go b/internal/component/prometheus/write/queue/serialization/serializer_bench_test.go deleted file mode 100644 index 8d30591159..0000000000 --- a/internal/component/prometheus/write/queue/serialization/serializer_bench_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package serialization - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/prometheus/prometheus/model/labels" -) - -var lbls = labels.FromStrings("one", "two", "three", "four") - -func BenchmarkAppender(b *testing.B) { - // This should be 0 allocs - b.ReportAllocs() - logger := log.NewNopLogger() - for i := 0; i < b.N; i++ { - app := NewAppender(context.Background(), 1*time.Hour, &fakeSerializer{}, logger) - for j := 0; j < 10_000; j++ { - _, _ = app.Append(0, lbls, time.Now().Unix(), 1.1) - } - _ = app.Commit() - } -} - -func BenchmarkSerializer(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - // This should be ~11 allocs and 1400-1800 ns/op. - logger := log.NewNopLogger() - serial, _ := NewSerializer(types.SerializerConfig{ - MaxSignalsInBatch: 1_000, - FlushFrequency: 1 * time.Second, - }, &fakeFileQueue{}, func(stats types.SerializerStats) {}, logger) - serial.Start() - for i := 0; i < b.N; i++ { - _ = serial.SendSeries(context.Background(), getSingleTimeSeries(b)) - } - serial.Stop() -} - -func getTimeSeries(b *testing.B) []*types.TimeSeriesBinary { - b.Helper() - series := make([]*types.TimeSeriesBinary, 0) - for j := 0; j < 10_000; j++ { - timeseries := types.GetTimeSeriesFromPool() - timeseries.TS = time.Now().Unix() - timeseries.Value = rand.Float64() - timeseries.Labels = getLabels() - series = append(series, timeseries) - } - return series -} - -func getSingleTimeSeries(b *testing.B) *types.TimeSeriesBinary { - b.Helper() - timeseries := types.GetTimeSeriesFromPool() - timeseries.TS = time.Now().Unix() - timeseries.Value = rand.Float64() - timeseries.Labels = getLabels() - return timeseries - -} - -func getLabels() labels.Labels { - retLbls := make(labels.Labels, 0) - for i := 0; i < rand.Intn(20); i++ { - l := labels.Label{ - Name: fmt.Sprintf("label_%d", i), - Value: fmt.Sprintf("value_%d", i), - } - retLbls = append(retLbls, l) - } - return retLbls -} - -var _ types.Serializer = (*fakeSerializer)(nil) - -type fakeSerializer struct{} - -func (f *fakeSerializer) UpdateConfig(ctx context.Context, cfg types.SerializerConfig) error { - return nil -} - -func (f *fakeSerializer) Start() {} - -func (f *fakeSerializer) Stop() {} - -func (f *fakeSerializer) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { - types.PutTimeSeriesIntoPool(data) - return nil -} - -func (f *fakeSerializer) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { - types.PutTimeSeriesIntoPool(data) - return nil -} - -var _ types.FileStorage = (*fakeFileQueue)(nil) - -type fakeFileQueue struct{} - -func (f fakeFileQueue) Start() { - -} - -func (f fakeFileQueue) Stop() { - -} - -func (f fakeFileQueue) Store(ctx context.Context, meta map[string]string, value []byte) error { - return nil -} diff --git a/internal/component/prometheus/write/queue/serialization/serializer_test.go b/internal/component/prometheus/write/queue/serialization/serializer_test.go deleted file mode 100644 index 80054a24a0..0000000000 --- a/internal/component/prometheus/write/queue/serialization/serializer_test.go +++ /dev/null @@ -1,113 +0,0 @@ -//go:build !race - -package serialization - -import ( - "context" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/golang/snappy" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" -) - -func TestRoundTripSerialization(t *testing.T) { - totalSeries := atomic.Int64{} - f := &fqq{t: t} - l := log.NewNopLogger() - start := time.Now().Add(-1 * time.Second).Unix() - - s, err := NewSerializer(types.SerializerConfig{ - MaxSignalsInBatch: 10, - FlushFrequency: 5 * time.Second, - }, f, func(stats types.SerializerStats) { - totalSeries.Add(int64(stats.SeriesStored)) - require.True(t, stats.SeriesStored == 10) - require.True(t, stats.Errors == 0) - require.True(t, stats.MetadataStored == 0) - require.True(t, stats.NewestTimestamp > start) - }, l) - require.NoError(t, err) - - s.Start() - defer s.Stop() - for i := 0; i < 100; i++ { - tss := types.GetTimeSeriesFromPool() - tss.Labels = make(labels.Labels, 10) - for j := 0; j < 10; j++ { - tss.Labels[j] = labels.Label{ - Name: fmt.Sprintf("name_%d_%d", i, j), - Value: fmt.Sprintf("value_%d_%d", i, j), - } - tss.Value = float64(i) - tss.TS = time.Now().Unix() - } - sendErr := s.SendSeries(context.Background(), tss) - require.NoError(t, sendErr) - } - require.Eventually(t, func() bool { - return f.total.Load() == 100 - }, 5*time.Second, 100*time.Millisecond) - // 100 series send from the above for loop - require.True(t, totalSeries.Load() == 100) -} - -func TestUpdateConfig(t *testing.T) { - f := &fqq{t: t} - l := log.NewNopLogger() - s, err := NewSerializer(types.SerializerConfig{ - MaxSignalsInBatch: 10, - FlushFrequency: 5 * time.Second, - }, f, func(stats types.SerializerStats) {}, l) - require.NoError(t, err) - s.Start() - defer s.Stop() - err = s.UpdateConfig(context.Background(), types.SerializerConfig{ - MaxSignalsInBatch: 1, - FlushFrequency: 1 * time.Second, - }) - require.NoError(t, err) - require.Eventually(t, func() bool { - return s.(*serializer).maxItemsBeforeFlush == 1 && s.(*serializer).flushFrequency == 1*time.Second - }, 5*time.Second, 100*time.Millisecond) -} - -var _ types.FileStorage = (*fqq)(nil) - -type fqq struct { - t *testing.T - buf []byte - total atomic.Int64 -} - -func (f *fqq) Start() { - -} - -func (f *fqq) Stop() { - -} - -func (f *fqq) Store(ctx context.Context, meta map[string]string, value []byte) error { - f.buf, _ = snappy.Decode(nil, value) - sg := &types.SeriesGroup{} - sg, _, err := types.DeserializeToSeriesGroup(sg, f.buf) - require.NoError(f.t, err) - require.Len(f.t, sg.Series, 10) - for _, series := range sg.Series { - require.Len(f.t, series.LabelsNames, 0) - require.Len(f.t, series.LabelsValues, 0) - require.Len(f.t, series.Labels, 10) - for j := 0; j < 10; j++ { - series.Labels[j].Name = fmt.Sprintf("name_%d_%d", int(series.Value), j) - series.Labels[j].Value = fmt.Sprintf("value_%d_%d", int(series.Value), j) - } - } - f.total.Add(int64(len(sg.Series))) - return nil -} diff --git a/internal/component/prometheus/write/queue/types.go b/internal/component/prometheus/write/queue/types.go index b56e391d3c..ffffe287ea 100644 --- a/internal/component/prometheus/write/queue/types.go +++ b/internal/component/prometheus/write/queue/types.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/grafana/alloy/internal/component/prometheus/write/queue/types" "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/walqueue/types" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/storage" ) @@ -96,7 +96,7 @@ var UserAgent = fmt.Sprintf("Alloy/%s", version.Version) func (cc EndpointConfig) ToNativeType() types.ConnectionConfig { tcc := types.ConnectionConfig{ URL: cc.URL, - BearerToken: cc.BearerToken, + BearerToken: string(cc.BearerToken), UserAgent: UserAgent, Timeout: cc.Timeout, RetryBackoff: cc.RetryBackoff, diff --git a/internal/component/prometheus/write/queue/types/messages.go b/internal/component/prometheus/write/queue/types/messages.go deleted file mode 100644 index 30c37961c7..0000000000 --- a/internal/component/prometheus/write/queue/types/messages.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -type Data struct { - Meta map[string]string - Data []byte -} - -type DataHandle struct { - Name string - // Pop will get the data and delete the source of the data. - Pop func() (map[string]string, []byte, error) -} diff --git a/internal/component/prometheus/write/queue/types/network.go b/internal/component/prometheus/write/queue/types/network.go deleted file mode 100644 index 23bbe4d2e7..0000000000 --- a/internal/component/prometheus/write/queue/types/network.go +++ /dev/null @@ -1,42 +0,0 @@ -package types - -import ( - "context" - "github.com/grafana/alloy/syntax/alloytypes" - "reflect" - "time" -) - -type NetworkClient interface { - Start() - Stop() - // SendSeries will block if the network caches are full. - SendSeries(ctx context.Context, d *TimeSeriesBinary) error - // SendMetadata will block if the network caches are full. - SendMetadata(ctx context.Context, d *TimeSeriesBinary) error - // UpdateConfig is a synchronous call and will only return once the config - // is applied or an error occurs. - UpdateConfig(ctx context.Context, cfg ConnectionConfig) error -} -type ConnectionConfig struct { - URL string - BasicAuth *BasicAuth - BearerToken alloytypes.Secret - UserAgent string - Timeout time.Duration - RetryBackoff time.Duration - MaxRetryAttempts uint - BatchCount int - FlushInterval time.Duration - ExternalLabels map[string]string - Connections uint -} - -type BasicAuth struct { - Username string - Password string -} - -func (cc ConnectionConfig) Equals(bb ConnectionConfig) bool { - return reflect.DeepEqual(cc, bb) -} diff --git a/internal/component/prometheus/write/queue/types/serialization.go b/internal/component/prometheus/write/queue/types/serialization.go deleted file mode 100644 index 80b2282f7d..0000000000 --- a/internal/component/prometheus/write/queue/types/serialization.go +++ /dev/null @@ -1,296 +0,0 @@ -//go:generate msgp -package types - -import ( - "sync" - - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/prompb" - "go.uber.org/atomic" -) - -const MetaType = "__alloy_metadata_type__" -const MetaUnit = "__alloy_metadata_unit__" -const MetaHelp = "__alloy_metadata_help__" - -// SeriesGroup is the holder for TimeSeries, Metadata, and the strings array. -// When serialized the Labels Key,Value array will be transformed into -// LabelNames and LabelsValues that point to the index in Strings. -// This deduplicates the strings and decreases the size on disk. -type SeriesGroup struct { - Strings []string - Series []*TimeSeriesBinary - Metadata []*TimeSeriesBinary -} - -// TimeSeriesBinary is an optimized format for handling metrics and metadata. It should never be instantiated directly -// but instead use GetTimeSeriesFromPool and PutTimeSeriesSliceIntoPool. This allows us to reuse these objects and avoid -// allocations. -type TimeSeriesBinary struct { - // Labels are not serialized to msgp, instead we store separately a dictionary of strings and use `LabelNames` and `LabelValues` to refer to the dictionary by ID. - Labels labels.Labels `msg:"-"` - LabelsNames []uint32 - LabelsValues []uint32 - TS int64 - Value float64 - Hash uint64 - Histograms Histograms -} - -type Histograms struct { - Histogram *Histogram - FloatHistogram *FloatHistogram -} - -type Histogram struct { - Count HistogramCount - Sum float64 - Schema int32 - ZeroThreshold float64 - ZeroCount HistogramZeroCount - NegativeSpans []BucketSpan - NegativeBuckets []int64 - NegativeCounts []float64 - PositiveSpans []BucketSpan - PositiveBuckets []int64 - PositiveCounts []float64 - ResetHint int32 - TimestampMillisecond int64 -} - -type FloatHistogram struct { - Count HistogramCount - Sum float64 - Schema int32 - ZeroThreshold float64 - ZeroCount HistogramZeroCount - NegativeSpans []BucketSpan - NegativeDeltas []int64 - NegativeCounts []float64 - PositiveSpans []BucketSpan - PositiveDeltas []int64 - PositiveCounts []float64 - ResetHint int32 - TimestampMillisecond int64 -} - -type HistogramCount struct { - IsInt bool - IntValue uint64 - FloatValue float64 -} - -type HistogramZeroCount struct { - IsInt bool - IntValue uint64 - FloatValue float64 -} - -type BucketSpan struct { - Offset int32 - Length uint32 -} - -// IsMetadata is used because it's easier to store metadata as a set of labels. -func (ts TimeSeriesBinary) IsMetadata() bool { - return ts.Labels.Has("__alloy_metadata_type__") -} - -func (h *Histogram) ToPromHistogram() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: h.Count.IntValue}, - Sum: h.Sum, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount.IntValue}, - NegativeSpans: ToPromBucketSpans(h.NegativeSpans), - NegativeDeltas: h.NegativeBuckets, - PositiveSpans: ToPromBucketSpans(h.PositiveSpans), - PositiveDeltas: h.PositiveBuckets, - ResetHint: prompb.Histogram_ResetHint(h.ResetHint), - Timestamp: h.TimestampMillisecond, - } -} - -func (h *FloatHistogram) ToPromFloatHistogram() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountFloat{CountFloat: h.Count.FloatValue}, - Sum: h.Sum, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: h.ZeroCount.FloatValue}, - NegativeSpans: ToPromBucketSpans(h.NegativeSpans), - NegativeCounts: h.NegativeCounts, - PositiveSpans: ToPromBucketSpans(h.PositiveSpans), - PositiveCounts: h.PositiveCounts, - ResetHint: prompb.Histogram_ResetHint(h.ResetHint), - Timestamp: h.TimestampMillisecond, - } -} -func ToPromBucketSpans(bss []BucketSpan) []prompb.BucketSpan { - spans := make([]prompb.BucketSpan, len(bss)) - for i, bs := range bss { - spans[i] = bs.ToPromBucketSpan() - } - return spans -} - -func (bs *BucketSpan) ToPromBucketSpan() prompb.BucketSpan { - return prompb.BucketSpan{ - Offset: bs.Offset, - Length: bs.Length, - } -} - -func (ts *TimeSeriesBinary) FromHistogram(timestamp int64, h *histogram.Histogram) { - ts.Histograms.Histogram = &Histogram{ - Count: HistogramCount{IsInt: true, IntValue: h.Count}, - Sum: h.Sum, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: HistogramZeroCount{IsInt: true, IntValue: h.ZeroCount}, - NegativeSpans: FromPromSpan(h.NegativeSpans), - NegativeBuckets: h.NegativeBuckets, - PositiveSpans: FromPromSpan(h.PositiveSpans), - PositiveBuckets: h.PositiveBuckets, - ResetHint: int32(h.CounterResetHint), - TimestampMillisecond: timestamp, - } -} -func (ts *TimeSeriesBinary) FromFloatHistogram(timestamp int64, h *histogram.FloatHistogram) { - ts.Histograms.FloatHistogram = &FloatHistogram{ - Count: HistogramCount{IsInt: false, FloatValue: h.Count}, - Sum: h.Sum, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: HistogramZeroCount{IsInt: false, FloatValue: h.ZeroCount}, - NegativeSpans: FromPromSpan(h.NegativeSpans), - NegativeCounts: h.NegativeBuckets, - PositiveSpans: FromPromSpan(h.PositiveSpans), - PositiveCounts: h.PositiveBuckets, - ResetHint: int32(h.CounterResetHint), - TimestampMillisecond: timestamp, - } -} -func FromPromSpan(spans []histogram.Span) []BucketSpan { - bs := make([]BucketSpan, len(spans)) - for i, s := range spans { - bs[i].Offset = s.Offset - bs[i].Length = s.Length - } - return bs -} - -// FillLabelMapping is what does the conversion from labels.Labels to LabelNames and -// LabelValues while filling in the string map, that is later converted to []string. -func (ts *TimeSeriesBinary) FillLabelMapping(strMapToInt map[string]uint32) { - ts.LabelsNames = setSliceLength(ts.LabelsNames, len(ts.Labels)) - ts.LabelsValues = setSliceLength(ts.LabelsValues, len(ts.Labels)) - - // This is where we deduplicate the ts.Labels into uint32 values - // that map to a string in the strings slice via the index. - for i, v := range ts.Labels { - val, found := strMapToInt[v.Name] - if !found { - val = uint32(len(strMapToInt)) - strMapToInt[v.Name] = val - } - ts.LabelsNames[i] = val - - val, found = strMapToInt[v.Value] - if !found { - val = uint32(len(strMapToInt)) - strMapToInt[v.Value] = val - } - ts.LabelsValues[i] = val - } - -} - -func setSliceLength(lbls []uint32, length int) []uint32 { - if cap(lbls) <= length { - lbls = make([]uint32, length) - } else { - lbls = lbls[:length] - } - return lbls -} - -var tsBinaryPool = sync.Pool{ - New: func() any { - return &TimeSeriesBinary{} - }, -} - -func GetTimeSeriesFromPool() *TimeSeriesBinary { - OutStandingTimeSeriesBinary.Inc() - return tsBinaryPool.Get().(*TimeSeriesBinary) -} - -var OutStandingTimeSeriesBinary = atomic.Int32{} - -func PutTimeSeriesSliceIntoPool(tss []*TimeSeriesBinary) { - for i := 0; i < len(tss); i++ { - PutTimeSeriesIntoPool(tss[i]) - } - -} - -func PutTimeSeriesIntoPool(ts *TimeSeriesBinary) { - OutStandingTimeSeriesBinary.Dec() - ts.LabelsNames = ts.LabelsNames[:0] - ts.LabelsValues = ts.LabelsValues[:0] - ts.Labels = nil - ts.TS = 0 - ts.Value = 0 - ts.Hash = 0 - ts.Histograms.Histogram = nil - ts.Histograms.FloatHistogram = nil - tsBinaryPool.Put(ts) -} - -// DeserializeToSeriesGroup transforms a buffer to a SeriesGroup and converts the stringmap + indexes into actual Labels. -func DeserializeToSeriesGroup(sg *SeriesGroup, buf []byte) (*SeriesGroup, []byte, error) { - buffer, err := sg.UnmarshalMsg(buf) - if err != nil { - return sg, nil, err - } - // Need to fill in the labels. - for _, series := range sg.Series { - if cap(series.Labels) < len(series.LabelsNames) { - series.Labels = make(labels.Labels, len(series.LabelsNames)) - } else { - series.Labels = series.Labels[:len(series.LabelsNames)] - } - // Since the LabelNames/LabelValues are indexes into the Strings slice we can access it like the below. - // 1 Label corresponds to two entries, one in LabelsNames and one in LabelsValues. - for i := range series.LabelsNames { - series.Labels[i] = labels.Label{ - Name: sg.Strings[series.LabelsNames[i]], - Value: sg.Strings[series.LabelsValues[i]], - } - } - series.LabelsNames = series.LabelsNames[:0] - series.LabelsValues = series.LabelsValues[:0] - } - for _, series := range sg.Metadata { - if cap(series.Labels) < len(series.LabelsNames) { - series.Labels = make(labels.Labels, len(series.LabelsNames)) - } else { - series.Labels = series.Labels[:len(series.LabelsNames)] - } - for i := range series.LabelsNames { - series.Labels[i] = labels.Label{ - Name: sg.Strings[series.LabelsNames[i]], - Value: sg.Strings[series.LabelsValues[i]], - } - } - // Finally ensure we reset the labelnames and labelvalues. - series.LabelsNames = series.LabelsNames[:0] - series.LabelsValues = series.LabelsValues[:0] - } - - sg.Strings = sg.Strings[:0] - return sg, buffer, err -} diff --git a/internal/component/prometheus/write/queue/types/serialization_gen.go b/internal/component/prometheus/write/queue/types/serialization_gen.go deleted file mode 100644 index c31dd8d6a4..0000000000 --- a/internal/component/prometheus/write/queue/types/serialization_gen.go +++ /dev/null @@ -1,3294 +0,0 @@ -package types - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *BucketSpan) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.Offset, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Offset") - return - } - case "Length": - z.Length, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Length") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z BucketSpan) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "Offset" - err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.Offset) - if err != nil { - err = msgp.WrapError(err, "Offset") - return - } - // write "Length" - err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - if err != nil { - return - } - err = en.WriteUint32(z.Length) - if err != nil { - err = msgp.WrapError(err, "Length") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z BucketSpan) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Offset" - o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - o = msgp.AppendInt32(o, z.Offset) - // string "Length" - o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - o = msgp.AppendUint32(o, z.Length) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *BucketSpan) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.Offset, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Offset") - return - } - case "Length": - z.Length, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Length") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z BucketSpan) Msgsize() (s int) { - s = 1 + 7 + msgp.Int32Size + 7 + msgp.Uint32Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *FloatHistogram) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Count": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.Count.IsInt, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Count", "IsInt") - return - } - case "IntValue": - z.Count.IntValue, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Count", "IntValue") - return - } - case "FloatValue": - z.Count.FloatValue, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "Count", "FloatValue") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - } - } - case "Sum": - z.Sum, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "Sum") - return - } - case "Schema": - z.Schema, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Schema") - return - } - case "ZeroThreshold": - z.ZeroThreshold, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "ZeroThreshold") - return - } - case "ZeroCount": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - for zb0003 > 0 { - zb0003-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.ZeroCount.IsInt, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IsInt") - return - } - case "IntValue": - z.ZeroCount.IntValue, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IntValue") - return - } - case "FloatValue": - z.ZeroCount.FloatValue, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "FloatValue") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - } - } - case "NegativeSpans": - var zb0004 uint32 - zb0004, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans") - return - } - if cap(z.NegativeSpans) >= int(zb0004) { - z.NegativeSpans = (z.NegativeSpans)[:zb0004] - } else { - z.NegativeSpans = make([]BucketSpan, zb0004) - } - for za0001 := range z.NegativeSpans { - var zb0005 uint32 - zb0005, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - for zb0005 > 0 { - zb0005-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.NegativeSpans[za0001].Offset, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") - return - } - case "Length": - z.NegativeSpans[za0001].Length, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - } - } - } - case "NegativeDeltas": - var zb0006 uint32 - zb0006, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeDeltas") - return - } - if cap(z.NegativeDeltas) >= int(zb0006) { - z.NegativeDeltas = (z.NegativeDeltas)[:zb0006] - } else { - z.NegativeDeltas = make([]int64, zb0006) - } - for za0002 := range z.NegativeDeltas { - z.NegativeDeltas[za0002], err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "NegativeDeltas", za0002) - return - } - } - case "NegativeCounts": - var zb0007 uint32 - zb0007, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeCounts") - return - } - if cap(z.NegativeCounts) >= int(zb0007) { - z.NegativeCounts = (z.NegativeCounts)[:zb0007] - } else { - z.NegativeCounts = make([]float64, zb0007) - } - for za0003 := range z.NegativeCounts { - z.NegativeCounts[za0003], err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "NegativeCounts", za0003) - return - } - } - case "PositiveSpans": - var zb0008 uint32 - zb0008, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans") - return - } - if cap(z.PositiveSpans) >= int(zb0008) { - z.PositiveSpans = (z.PositiveSpans)[:zb0008] - } else { - z.PositiveSpans = make([]BucketSpan, zb0008) - } - for za0004 := range z.PositiveSpans { - var zb0009 uint32 - zb0009, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - for zb0009 > 0 { - zb0009-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.PositiveSpans[za0004].Offset, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") - return - } - case "Length": - z.PositiveSpans[za0004].Length, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - } - } - } - case "PositiveDeltas": - var zb0010 uint32 - zb0010, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveDeltas") - return - } - if cap(z.PositiveDeltas) >= int(zb0010) { - z.PositiveDeltas = (z.PositiveDeltas)[:zb0010] - } else { - z.PositiveDeltas = make([]int64, zb0010) - } - for za0005 := range z.PositiveDeltas { - z.PositiveDeltas[za0005], err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "PositiveDeltas", za0005) - return - } - } - case "PositiveCounts": - var zb0011 uint32 - zb0011, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveCounts") - return - } - if cap(z.PositiveCounts) >= int(zb0011) { - z.PositiveCounts = (z.PositiveCounts)[:zb0011] - } else { - z.PositiveCounts = make([]float64, zb0011) - } - for za0006 := range z.PositiveCounts { - z.PositiveCounts[za0006], err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "PositiveCounts", za0006) - return - } - } - case "ResetHint": - z.ResetHint, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "ResetHint") - return - } - case "TimestampMillisecond": - z.TimestampMillisecond, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "TimestampMillisecond") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *FloatHistogram) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 13 - // write "Count" - err = en.Append(0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) - if err != nil { - return - } - // map header, size 3 - // write "IsInt" - err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteBool(z.Count.IsInt) - if err != nil { - err = msgp.WrapError(err, "Count", "IsInt") - return - } - // write "IntValue" - err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteUint64(z.Count.IntValue) - if err != nil { - err = msgp.WrapError(err, "Count", "IntValue") - return - } - // write "FloatValue" - err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteFloat64(z.Count.FloatValue) - if err != nil { - err = msgp.WrapError(err, "Count", "FloatValue") - return - } - // write "Sum" - err = en.Append(0xa3, 0x53, 0x75, 0x6d) - if err != nil { - return - } - err = en.WriteFloat64(z.Sum) - if err != nil { - err = msgp.WrapError(err, "Sum") - return - } - // write "Schema" - err = en.Append(0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) - if err != nil { - return - } - err = en.WriteInt32(z.Schema) - if err != nil { - err = msgp.WrapError(err, "Schema") - return - } - // write "ZeroThreshold" - err = en.Append(0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) - if err != nil { - return - } - err = en.WriteFloat64(z.ZeroThreshold) - if err != nil { - err = msgp.WrapError(err, "ZeroThreshold") - return - } - // write "ZeroCount" - err = en.Append(0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) - if err != nil { - return - } - // map header, size 3 - // write "IsInt" - err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteBool(z.ZeroCount.IsInt) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IsInt") - return - } - // write "IntValue" - err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteUint64(z.ZeroCount.IntValue) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IntValue") - return - } - // write "FloatValue" - err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteFloat64(z.ZeroCount.FloatValue) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "FloatValue") - return - } - // write "NegativeSpans" - err = en.Append(0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.NegativeSpans))) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans") - return - } - for za0001 := range z.NegativeSpans { - // map header, size 2 - // write "Offset" - err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.NegativeSpans[za0001].Offset) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") - return - } - // write "Length" - err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - if err != nil { - return - } - err = en.WriteUint32(z.NegativeSpans[za0001].Length) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") - return - } - } - // write "NegativeDeltas" - err = en.Append(0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.NegativeDeltas))) - if err != nil { - err = msgp.WrapError(err, "NegativeDeltas") - return - } - for za0002 := range z.NegativeDeltas { - err = en.WriteInt64(z.NegativeDeltas[za0002]) - if err != nil { - err = msgp.WrapError(err, "NegativeDeltas", za0002) - return - } - } - // write "NegativeCounts" - err = en.Append(0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.NegativeCounts))) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts") - return - } - for za0003 := range z.NegativeCounts { - err = en.WriteFloat64(z.NegativeCounts[za0003]) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts", za0003) - return - } - } - // write "PositiveSpans" - err = en.Append(0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PositiveSpans))) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans") - return - } - for za0004 := range z.PositiveSpans { - // map header, size 2 - // write "Offset" - err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.PositiveSpans[za0004].Offset) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") - return - } - // write "Length" - err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - if err != nil { - return - } - err = en.WriteUint32(z.PositiveSpans[za0004].Length) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") - return - } - } - // write "PositiveDeltas" - err = en.Append(0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PositiveDeltas))) - if err != nil { - err = msgp.WrapError(err, "PositiveDeltas") - return - } - for za0005 := range z.PositiveDeltas { - err = en.WriteInt64(z.PositiveDeltas[za0005]) - if err != nil { - err = msgp.WrapError(err, "PositiveDeltas", za0005) - return - } - } - // write "PositiveCounts" - err = en.Append(0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PositiveCounts))) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts") - return - } - for za0006 := range z.PositiveCounts { - err = en.WriteFloat64(z.PositiveCounts[za0006]) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts", za0006) - return - } - } - // write "ResetHint" - err = en.Append(0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.ResetHint) - if err != nil { - err = msgp.WrapError(err, "ResetHint") - return - } - // write "TimestampMillisecond" - err = en.Append(0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) - if err != nil { - return - } - err = en.WriteInt64(z.TimestampMillisecond) - if err != nil { - err = msgp.WrapError(err, "TimestampMillisecond") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *FloatHistogram) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 13 - // string "Count" - o = append(o, 0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) - // map header, size 3 - // string "IsInt" - o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - o = msgp.AppendBool(o, z.Count.IsInt) - // string "IntValue" - o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendUint64(o, z.Count.IntValue) - // string "FloatValue" - o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.Count.FloatValue) - // string "Sum" - o = append(o, 0xa3, 0x53, 0x75, 0x6d) - o = msgp.AppendFloat64(o, z.Sum) - // string "Schema" - o = append(o, 0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) - o = msgp.AppendInt32(o, z.Schema) - // string "ZeroThreshold" - o = append(o, 0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) - o = msgp.AppendFloat64(o, z.ZeroThreshold) - // string "ZeroCount" - o = append(o, 0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) - // map header, size 3 - // string "IsInt" - o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - o = msgp.AppendBool(o, z.ZeroCount.IsInt) - // string "IntValue" - o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendUint64(o, z.ZeroCount.IntValue) - // string "FloatValue" - o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.ZeroCount.FloatValue) - // string "NegativeSpans" - o = append(o, 0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeSpans))) - for za0001 := range z.NegativeSpans { - // map header, size 2 - // string "Offset" - o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - o = msgp.AppendInt32(o, z.NegativeSpans[za0001].Offset) - // string "Length" - o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - o = msgp.AppendUint32(o, z.NegativeSpans[za0001].Length) - } - // string "NegativeDeltas" - o = append(o, 0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeDeltas))) - for za0002 := range z.NegativeDeltas { - o = msgp.AppendInt64(o, z.NegativeDeltas[za0002]) - } - // string "NegativeCounts" - o = append(o, 0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeCounts))) - for za0003 := range z.NegativeCounts { - o = msgp.AppendFloat64(o, z.NegativeCounts[za0003]) - } - // string "PositiveSpans" - o = append(o, 0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveSpans))) - for za0004 := range z.PositiveSpans { - // map header, size 2 - // string "Offset" - o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - o = msgp.AppendInt32(o, z.PositiveSpans[za0004].Offset) - // string "Length" - o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - o = msgp.AppendUint32(o, z.PositiveSpans[za0004].Length) - } - // string "PositiveDeltas" - o = append(o, 0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveDeltas))) - for za0005 := range z.PositiveDeltas { - o = msgp.AppendInt64(o, z.PositiveDeltas[za0005]) - } - // string "PositiveCounts" - o = append(o, 0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveCounts))) - for za0006 := range z.PositiveCounts { - o = msgp.AppendFloat64(o, z.PositiveCounts[za0006]) - } - // string "ResetHint" - o = append(o, 0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) - o = msgp.AppendInt32(o, z.ResetHint) - // string "TimestampMillisecond" - o = append(o, 0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) - o = msgp.AppendInt64(o, z.TimestampMillisecond) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *FloatHistogram) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Count": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.Count.IsInt, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count", "IsInt") - return - } - case "IntValue": - z.Count.IntValue, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count", "IntValue") - return - } - case "FloatValue": - z.Count.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count", "FloatValue") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - } - } - case "Sum": - z.Sum, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Sum") - return - } - case "Schema": - z.Schema, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Schema") - return - } - case "ZeroThreshold": - z.ZeroThreshold, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroThreshold") - return - } - case "ZeroCount": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - for zb0003 > 0 { - zb0003-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.ZeroCount.IsInt, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IsInt") - return - } - case "IntValue": - z.ZeroCount.IntValue, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IntValue") - return - } - case "FloatValue": - z.ZeroCount.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "FloatValue") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - } - } - case "NegativeSpans": - var zb0004 uint32 - zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans") - return - } - if cap(z.NegativeSpans) >= int(zb0004) { - z.NegativeSpans = (z.NegativeSpans)[:zb0004] - } else { - z.NegativeSpans = make([]BucketSpan, zb0004) - } - for za0001 := range z.NegativeSpans { - var zb0005 uint32 - zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - for zb0005 > 0 { - zb0005-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.NegativeSpans[za0001].Offset, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") - return - } - case "Length": - z.NegativeSpans[za0001].Length, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - } - } - } - case "NegativeDeltas": - var zb0006 uint32 - zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeDeltas") - return - } - if cap(z.NegativeDeltas) >= int(zb0006) { - z.NegativeDeltas = (z.NegativeDeltas)[:zb0006] - } else { - z.NegativeDeltas = make([]int64, zb0006) - } - for za0002 := range z.NegativeDeltas { - z.NegativeDeltas[za0002], bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeDeltas", za0002) - return - } - } - case "NegativeCounts": - var zb0007 uint32 - zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts") - return - } - if cap(z.NegativeCounts) >= int(zb0007) { - z.NegativeCounts = (z.NegativeCounts)[:zb0007] - } else { - z.NegativeCounts = make([]float64, zb0007) - } - for za0003 := range z.NegativeCounts { - z.NegativeCounts[za0003], bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts", za0003) - return - } - } - case "PositiveSpans": - var zb0008 uint32 - zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans") - return - } - if cap(z.PositiveSpans) >= int(zb0008) { - z.PositiveSpans = (z.PositiveSpans)[:zb0008] - } else { - z.PositiveSpans = make([]BucketSpan, zb0008) - } - for za0004 := range z.PositiveSpans { - var zb0009 uint32 - zb0009, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - for zb0009 > 0 { - zb0009-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.PositiveSpans[za0004].Offset, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") - return - } - case "Length": - z.PositiveSpans[za0004].Length, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - } - } - } - case "PositiveDeltas": - var zb0010 uint32 - zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveDeltas") - return - } - if cap(z.PositiveDeltas) >= int(zb0010) { - z.PositiveDeltas = (z.PositiveDeltas)[:zb0010] - } else { - z.PositiveDeltas = make([]int64, zb0010) - } - for za0005 := range z.PositiveDeltas { - z.PositiveDeltas[za0005], bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveDeltas", za0005) - return - } - } - case "PositiveCounts": - var zb0011 uint32 - zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts") - return - } - if cap(z.PositiveCounts) >= int(zb0011) { - z.PositiveCounts = (z.PositiveCounts)[:zb0011] - } else { - z.PositiveCounts = make([]float64, zb0011) - } - for za0006 := range z.PositiveCounts { - z.PositiveCounts[za0006], bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts", za0006) - return - } - } - case "ResetHint": - z.ResetHint, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ResetHint") - return - } - case "TimestampMillisecond": - z.TimestampMillisecond, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "TimestampMillisecond") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *FloatHistogram) Msgsize() (s int) { - s = 1 + 6 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 4 + msgp.Float64Size + 7 + msgp.Int32Size + 14 + msgp.Float64Size + 10 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 14 + msgp.ArrayHeaderSize + (len(z.NegativeSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 15 + msgp.ArrayHeaderSize + (len(z.NegativeDeltas) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.NegativeCounts) * (msgp.Float64Size)) + 14 + msgp.ArrayHeaderSize + (len(z.PositiveSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 15 + msgp.ArrayHeaderSize + (len(z.PositiveDeltas) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.PositiveCounts) * (msgp.Float64Size)) + 10 + msgp.Int32Size + 21 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *Histogram) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Count": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.Count.IsInt, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Count", "IsInt") - return - } - case "IntValue": - z.Count.IntValue, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Count", "IntValue") - return - } - case "FloatValue": - z.Count.FloatValue, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "Count", "FloatValue") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - } - } - case "Sum": - z.Sum, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "Sum") - return - } - case "Schema": - z.Schema, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Schema") - return - } - case "ZeroThreshold": - z.ZeroThreshold, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "ZeroThreshold") - return - } - case "ZeroCount": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - for zb0003 > 0 { - zb0003-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.ZeroCount.IsInt, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IsInt") - return - } - case "IntValue": - z.ZeroCount.IntValue, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IntValue") - return - } - case "FloatValue": - z.ZeroCount.FloatValue, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "FloatValue") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - } - } - case "NegativeSpans": - var zb0004 uint32 - zb0004, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans") - return - } - if cap(z.NegativeSpans) >= int(zb0004) { - z.NegativeSpans = (z.NegativeSpans)[:zb0004] - } else { - z.NegativeSpans = make([]BucketSpan, zb0004) - } - for za0001 := range z.NegativeSpans { - var zb0005 uint32 - zb0005, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - for zb0005 > 0 { - zb0005-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.NegativeSpans[za0001].Offset, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") - return - } - case "Length": - z.NegativeSpans[za0001].Length, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - } - } - } - case "NegativeBuckets": - var zb0006 uint32 - zb0006, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeBuckets") - return - } - if cap(z.NegativeBuckets) >= int(zb0006) { - z.NegativeBuckets = (z.NegativeBuckets)[:zb0006] - } else { - z.NegativeBuckets = make([]int64, zb0006) - } - for za0002 := range z.NegativeBuckets { - z.NegativeBuckets[za0002], err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "NegativeBuckets", za0002) - return - } - } - case "NegativeCounts": - var zb0007 uint32 - zb0007, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "NegativeCounts") - return - } - if cap(z.NegativeCounts) >= int(zb0007) { - z.NegativeCounts = (z.NegativeCounts)[:zb0007] - } else { - z.NegativeCounts = make([]float64, zb0007) - } - for za0003 := range z.NegativeCounts { - z.NegativeCounts[za0003], err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "NegativeCounts", za0003) - return - } - } - case "PositiveSpans": - var zb0008 uint32 - zb0008, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans") - return - } - if cap(z.PositiveSpans) >= int(zb0008) { - z.PositiveSpans = (z.PositiveSpans)[:zb0008] - } else { - z.PositiveSpans = make([]BucketSpan, zb0008) - } - for za0004 := range z.PositiveSpans { - var zb0009 uint32 - zb0009, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - for zb0009 > 0 { - zb0009-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.PositiveSpans[za0004].Offset, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") - return - } - case "Length": - z.PositiveSpans[za0004].Length, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - } - } - } - case "PositiveBuckets": - var zb0010 uint32 - zb0010, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveBuckets") - return - } - if cap(z.PositiveBuckets) >= int(zb0010) { - z.PositiveBuckets = (z.PositiveBuckets)[:zb0010] - } else { - z.PositiveBuckets = make([]int64, zb0010) - } - for za0005 := range z.PositiveBuckets { - z.PositiveBuckets[za0005], err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "PositiveBuckets", za0005) - return - } - } - case "PositiveCounts": - var zb0011 uint32 - zb0011, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "PositiveCounts") - return - } - if cap(z.PositiveCounts) >= int(zb0011) { - z.PositiveCounts = (z.PositiveCounts)[:zb0011] - } else { - z.PositiveCounts = make([]float64, zb0011) - } - for za0006 := range z.PositiveCounts { - z.PositiveCounts[za0006], err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "PositiveCounts", za0006) - return - } - } - case "ResetHint": - z.ResetHint, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "ResetHint") - return - } - case "TimestampMillisecond": - z.TimestampMillisecond, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "TimestampMillisecond") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *Histogram) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 13 - // write "Count" - err = en.Append(0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) - if err != nil { - return - } - // map header, size 3 - // write "IsInt" - err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteBool(z.Count.IsInt) - if err != nil { - err = msgp.WrapError(err, "Count", "IsInt") - return - } - // write "IntValue" - err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteUint64(z.Count.IntValue) - if err != nil { - err = msgp.WrapError(err, "Count", "IntValue") - return - } - // write "FloatValue" - err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteFloat64(z.Count.FloatValue) - if err != nil { - err = msgp.WrapError(err, "Count", "FloatValue") - return - } - // write "Sum" - err = en.Append(0xa3, 0x53, 0x75, 0x6d) - if err != nil { - return - } - err = en.WriteFloat64(z.Sum) - if err != nil { - err = msgp.WrapError(err, "Sum") - return - } - // write "Schema" - err = en.Append(0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) - if err != nil { - return - } - err = en.WriteInt32(z.Schema) - if err != nil { - err = msgp.WrapError(err, "Schema") - return - } - // write "ZeroThreshold" - err = en.Append(0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) - if err != nil { - return - } - err = en.WriteFloat64(z.ZeroThreshold) - if err != nil { - err = msgp.WrapError(err, "ZeroThreshold") - return - } - // write "ZeroCount" - err = en.Append(0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) - if err != nil { - return - } - // map header, size 3 - // write "IsInt" - err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteBool(z.ZeroCount.IsInt) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IsInt") - return - } - // write "IntValue" - err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteUint64(z.ZeroCount.IntValue) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IntValue") - return - } - // write "FloatValue" - err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteFloat64(z.ZeroCount.FloatValue) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "FloatValue") - return - } - // write "NegativeSpans" - err = en.Append(0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.NegativeSpans))) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans") - return - } - for za0001 := range z.NegativeSpans { - // map header, size 2 - // write "Offset" - err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.NegativeSpans[za0001].Offset) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") - return - } - // write "Length" - err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - if err != nil { - return - } - err = en.WriteUint32(z.NegativeSpans[za0001].Length) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") - return - } - } - // write "NegativeBuckets" - err = en.Append(0xaf, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.NegativeBuckets))) - if err != nil { - err = msgp.WrapError(err, "NegativeBuckets") - return - } - for za0002 := range z.NegativeBuckets { - err = en.WriteInt64(z.NegativeBuckets[za0002]) - if err != nil { - err = msgp.WrapError(err, "NegativeBuckets", za0002) - return - } - } - // write "NegativeCounts" - err = en.Append(0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.NegativeCounts))) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts") - return - } - for za0003 := range z.NegativeCounts { - err = en.WriteFloat64(z.NegativeCounts[za0003]) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts", za0003) - return - } - } - // write "PositiveSpans" - err = en.Append(0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PositiveSpans))) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans") - return - } - for za0004 := range z.PositiveSpans { - // map header, size 2 - // write "Offset" - err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.PositiveSpans[za0004].Offset) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") - return - } - // write "Length" - err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - if err != nil { - return - } - err = en.WriteUint32(z.PositiveSpans[za0004].Length) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") - return - } - } - // write "PositiveBuckets" - err = en.Append(0xaf, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PositiveBuckets))) - if err != nil { - err = msgp.WrapError(err, "PositiveBuckets") - return - } - for za0005 := range z.PositiveBuckets { - err = en.WriteInt64(z.PositiveBuckets[za0005]) - if err != nil { - err = msgp.WrapError(err, "PositiveBuckets", za0005) - return - } - } - // write "PositiveCounts" - err = en.Append(0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PositiveCounts))) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts") - return - } - for za0006 := range z.PositiveCounts { - err = en.WriteFloat64(z.PositiveCounts[za0006]) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts", za0006) - return - } - } - // write "ResetHint" - err = en.Append(0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.ResetHint) - if err != nil { - err = msgp.WrapError(err, "ResetHint") - return - } - // write "TimestampMillisecond" - err = en.Append(0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) - if err != nil { - return - } - err = en.WriteInt64(z.TimestampMillisecond) - if err != nil { - err = msgp.WrapError(err, "TimestampMillisecond") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *Histogram) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 13 - // string "Count" - o = append(o, 0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) - // map header, size 3 - // string "IsInt" - o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - o = msgp.AppendBool(o, z.Count.IsInt) - // string "IntValue" - o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendUint64(o, z.Count.IntValue) - // string "FloatValue" - o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.Count.FloatValue) - // string "Sum" - o = append(o, 0xa3, 0x53, 0x75, 0x6d) - o = msgp.AppendFloat64(o, z.Sum) - // string "Schema" - o = append(o, 0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) - o = msgp.AppendInt32(o, z.Schema) - // string "ZeroThreshold" - o = append(o, 0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) - o = msgp.AppendFloat64(o, z.ZeroThreshold) - // string "ZeroCount" - o = append(o, 0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) - // map header, size 3 - // string "IsInt" - o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - o = msgp.AppendBool(o, z.ZeroCount.IsInt) - // string "IntValue" - o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendUint64(o, z.ZeroCount.IntValue) - // string "FloatValue" - o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.ZeroCount.FloatValue) - // string "NegativeSpans" - o = append(o, 0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeSpans))) - for za0001 := range z.NegativeSpans { - // map header, size 2 - // string "Offset" - o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - o = msgp.AppendInt32(o, z.NegativeSpans[za0001].Offset) - // string "Length" - o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - o = msgp.AppendUint32(o, z.NegativeSpans[za0001].Length) - } - // string "NegativeBuckets" - o = append(o, 0xaf, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeBuckets))) - for za0002 := range z.NegativeBuckets { - o = msgp.AppendInt64(o, z.NegativeBuckets[za0002]) - } - // string "NegativeCounts" - o = append(o, 0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeCounts))) - for za0003 := range z.NegativeCounts { - o = msgp.AppendFloat64(o, z.NegativeCounts[za0003]) - } - // string "PositiveSpans" - o = append(o, 0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveSpans))) - for za0004 := range z.PositiveSpans { - // map header, size 2 - // string "Offset" - o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) - o = msgp.AppendInt32(o, z.PositiveSpans[za0004].Offset) - // string "Length" - o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) - o = msgp.AppendUint32(o, z.PositiveSpans[za0004].Length) - } - // string "PositiveBuckets" - o = append(o, 0xaf, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveBuckets))) - for za0005 := range z.PositiveBuckets { - o = msgp.AppendInt64(o, z.PositiveBuckets[za0005]) - } - // string "PositiveCounts" - o = append(o, 0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveCounts))) - for za0006 := range z.PositiveCounts { - o = msgp.AppendFloat64(o, z.PositiveCounts[za0006]) - } - // string "ResetHint" - o = append(o, 0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) - o = msgp.AppendInt32(o, z.ResetHint) - // string "TimestampMillisecond" - o = append(o, 0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) - o = msgp.AppendInt64(o, z.TimestampMillisecond) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Histogram) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Count": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.Count.IsInt, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count", "IsInt") - return - } - case "IntValue": - z.Count.IntValue, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count", "IntValue") - return - } - case "FloatValue": - z.Count.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count", "FloatValue") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - } - } - case "Sum": - z.Sum, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Sum") - return - } - case "Schema": - z.Schema, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Schema") - return - } - case "ZeroThreshold": - z.ZeroThreshold, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroThreshold") - return - } - case "ZeroCount": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - for zb0003 > 0 { - zb0003-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.ZeroCount.IsInt, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IsInt") - return - } - case "IntValue": - z.ZeroCount.IntValue, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "IntValue") - return - } - case "FloatValue": - z.ZeroCount.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount", "FloatValue") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "ZeroCount") - return - } - } - } - case "NegativeSpans": - var zb0004 uint32 - zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans") - return - } - if cap(z.NegativeSpans) >= int(zb0004) { - z.NegativeSpans = (z.NegativeSpans)[:zb0004] - } else { - z.NegativeSpans = make([]BucketSpan, zb0004) - } - for za0001 := range z.NegativeSpans { - var zb0005 uint32 - zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - for zb0005 > 0 { - zb0005-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.NegativeSpans[za0001].Offset, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") - return - } - case "Length": - z.NegativeSpans[za0001].Length, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeSpans", za0001) - return - } - } - } - } - case "NegativeBuckets": - var zb0006 uint32 - zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeBuckets") - return - } - if cap(z.NegativeBuckets) >= int(zb0006) { - z.NegativeBuckets = (z.NegativeBuckets)[:zb0006] - } else { - z.NegativeBuckets = make([]int64, zb0006) - } - for za0002 := range z.NegativeBuckets { - z.NegativeBuckets[za0002], bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeBuckets", za0002) - return - } - } - case "NegativeCounts": - var zb0007 uint32 - zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts") - return - } - if cap(z.NegativeCounts) >= int(zb0007) { - z.NegativeCounts = (z.NegativeCounts)[:zb0007] - } else { - z.NegativeCounts = make([]float64, zb0007) - } - for za0003 := range z.NegativeCounts { - z.NegativeCounts[za0003], bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NegativeCounts", za0003) - return - } - } - case "PositiveSpans": - var zb0008 uint32 - zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans") - return - } - if cap(z.PositiveSpans) >= int(zb0008) { - z.PositiveSpans = (z.PositiveSpans)[:zb0008] - } else { - z.PositiveSpans = make([]BucketSpan, zb0008) - } - for za0004 := range z.PositiveSpans { - var zb0009 uint32 - zb0009, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - for zb0009 > 0 { - zb0009-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - switch msgp.UnsafeString(field) { - case "Offset": - z.PositiveSpans[za0004].Offset, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") - return - } - case "Length": - z.PositiveSpans[za0004].Length, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveSpans", za0004) - return - } - } - } - } - case "PositiveBuckets": - var zb0010 uint32 - zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveBuckets") - return - } - if cap(z.PositiveBuckets) >= int(zb0010) { - z.PositiveBuckets = (z.PositiveBuckets)[:zb0010] - } else { - z.PositiveBuckets = make([]int64, zb0010) - } - for za0005 := range z.PositiveBuckets { - z.PositiveBuckets[za0005], bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveBuckets", za0005) - return - } - } - case "PositiveCounts": - var zb0011 uint32 - zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts") - return - } - if cap(z.PositiveCounts) >= int(zb0011) { - z.PositiveCounts = (z.PositiveCounts)[:zb0011] - } else { - z.PositiveCounts = make([]float64, zb0011) - } - for za0006 := range z.PositiveCounts { - z.PositiveCounts[za0006], bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PositiveCounts", za0006) - return - } - } - case "ResetHint": - z.ResetHint, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ResetHint") - return - } - case "TimestampMillisecond": - z.TimestampMillisecond, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "TimestampMillisecond") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Histogram) Msgsize() (s int) { - s = 1 + 6 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 4 + msgp.Float64Size + 7 + msgp.Int32Size + 14 + msgp.Float64Size + 10 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 14 + msgp.ArrayHeaderSize + (len(z.NegativeSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 16 + msgp.ArrayHeaderSize + (len(z.NegativeBuckets) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.NegativeCounts) * (msgp.Float64Size)) + 14 + msgp.ArrayHeaderSize + (len(z.PositiveSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 16 + msgp.ArrayHeaderSize + (len(z.PositiveBuckets) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.PositiveCounts) * (msgp.Float64Size)) + 10 + msgp.Int32Size + 21 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *HistogramCount) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.IsInt, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "IsInt") - return - } - case "IntValue": - z.IntValue, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "IntValue") - return - } - case "FloatValue": - z.FloatValue, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "FloatValue") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z HistogramCount) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "IsInt" - err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteBool(z.IsInt) - if err != nil { - err = msgp.WrapError(err, "IsInt") - return - } - // write "IntValue" - err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteUint64(z.IntValue) - if err != nil { - err = msgp.WrapError(err, "IntValue") - return - } - // write "FloatValue" - err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteFloat64(z.FloatValue) - if err != nil { - err = msgp.WrapError(err, "FloatValue") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z HistogramCount) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "IsInt" - o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - o = msgp.AppendBool(o, z.IsInt) - // string "IntValue" - o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendUint64(o, z.IntValue) - // string "FloatValue" - o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.FloatValue) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *HistogramCount) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.IsInt, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IsInt") - return - } - case "IntValue": - z.IntValue, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "IntValue") - return - } - case "FloatValue": - z.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "FloatValue") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z HistogramCount) Msgsize() (s int) { - s = 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *HistogramZeroCount) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.IsInt, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "IsInt") - return - } - case "IntValue": - z.IntValue, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "IntValue") - return - } - case "FloatValue": - z.FloatValue, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "FloatValue") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z HistogramZeroCount) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "IsInt" - err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteBool(z.IsInt) - if err != nil { - err = msgp.WrapError(err, "IsInt") - return - } - // write "IntValue" - err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteUint64(z.IntValue) - if err != nil { - err = msgp.WrapError(err, "IntValue") - return - } - // write "FloatValue" - err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteFloat64(z.FloatValue) - if err != nil { - err = msgp.WrapError(err, "FloatValue") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z HistogramZeroCount) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "IsInt" - o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) - o = msgp.AppendBool(o, z.IsInt) - // string "IntValue" - o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendUint64(o, z.IntValue) - // string "FloatValue" - o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.FloatValue) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *HistogramZeroCount) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "IsInt": - z.IsInt, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IsInt") - return - } - case "IntValue": - z.IntValue, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "IntValue") - return - } - case "FloatValue": - z.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "FloatValue") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z HistogramZeroCount) Msgsize() (s int) { - s = 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *Histograms) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Histogram": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "Histogram") - return - } - z.Histogram = nil - } else { - if z.Histogram == nil { - z.Histogram = new(Histogram) - } - err = z.Histogram.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Histogram") - return - } - } - case "FloatHistogram": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "FloatHistogram") - return - } - z.FloatHistogram = nil - } else { - if z.FloatHistogram == nil { - z.FloatHistogram = new(FloatHistogram) - } - err = z.FloatHistogram.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "FloatHistogram") - return - } - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *Histograms) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "Histogram" - err = en.Append(0x82, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) - if err != nil { - return - } - if z.Histogram == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.Histogram.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Histogram") - return - } - } - // write "FloatHistogram" - err = en.Append(0xae, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) - if err != nil { - return - } - if z.FloatHistogram == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.FloatHistogram.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "FloatHistogram") - return - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *Histograms) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Histogram" - o = append(o, 0x82, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) - if z.Histogram == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.Histogram.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Histogram") - return - } - } - // string "FloatHistogram" - o = append(o, 0xae, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) - if z.FloatHistogram == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.FloatHistogram.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "FloatHistogram") - return - } - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Histograms) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Histogram": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.Histogram = nil - } else { - if z.Histogram == nil { - z.Histogram = new(Histogram) - } - bts, err = z.Histogram.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Histogram") - return - } - } - case "FloatHistogram": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.FloatHistogram = nil - } else { - if z.FloatHistogram == nil { - z.FloatHistogram = new(FloatHistogram) - } - bts, err = z.FloatHistogram.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "FloatHistogram") - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Histograms) Msgsize() (s int) { - s = 1 + 10 - if z.Histogram == nil { - s += msgp.NilSize - } else { - s += z.Histogram.Msgsize() - } - s += 15 - if z.FloatHistogram == nil { - s += msgp.NilSize - } else { - s += z.FloatHistogram.Msgsize() - } - return -} - -// DecodeMsg implements msgp.Decodable -func (z *SeriesGroup) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Strings": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Strings") - return - } - if cap(z.Strings) >= int(zb0002) { - z.Strings = (z.Strings)[:zb0002] - } else { - z.Strings = make([]string, zb0002) - } - for za0001 := range z.Strings { - z.Strings[za0001], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Strings", za0001) - return - } - } - case "Series": - var zb0003 uint32 - zb0003, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Series") - return - } - if cap(z.Series) >= int(zb0003) { - z.Series = (z.Series)[:zb0003] - } else { - z.Series = make([]*TimeSeriesBinary, zb0003) - } - for za0002 := range z.Series { - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "Series", za0002) - return - } - z.Series[za0002] = nil - } else { - if z.Series[za0002] == nil { - z.Series[za0002] = new(TimeSeriesBinary) - } - err = z.Series[za0002].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Series", za0002) - return - } - } - } - case "Metadata": - var zb0004 uint32 - zb0004, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - if cap(z.Metadata) >= int(zb0004) { - z.Metadata = (z.Metadata)[:zb0004] - } else { - z.Metadata = make([]*TimeSeriesBinary, zb0004) - } - for za0003 := range z.Metadata { - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "Metadata", za0003) - return - } - z.Metadata[za0003] = nil - } else { - if z.Metadata[za0003] == nil { - z.Metadata[za0003] = new(TimeSeriesBinary) - } - err = z.Metadata[za0003].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Metadata", za0003) - return - } - } - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *SeriesGroup) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "Strings" - err = en.Append(0x83, 0xa7, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Strings))) - if err != nil { - err = msgp.WrapError(err, "Strings") - return - } - for za0001 := range z.Strings { - err = en.WriteString(z.Strings[za0001]) - if err != nil { - err = msgp.WrapError(err, "Strings", za0001) - return - } - } - // write "Series" - err = en.Append(0xa6, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Series))) - if err != nil { - err = msgp.WrapError(err, "Series") - return - } - for za0002 := range z.Series { - if z.Series[za0002] == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.Series[za0002].EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Series", za0002) - return - } - } - } - // write "Metadata" - err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Metadata))) - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - for za0003 := range z.Metadata { - if z.Metadata[za0003] == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.Metadata[za0003].EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Metadata", za0003) - return - } - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *SeriesGroup) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "Strings" - o = append(o, 0x83, 0xa7, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Strings))) - for za0001 := range z.Strings { - o = msgp.AppendString(o, z.Strings[za0001]) - } - // string "Series" - o = append(o, 0xa6, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Series))) - for za0002 := range z.Series { - if z.Series[za0002] == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.Series[za0002].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Series", za0002) - return - } - } - } - // string "Metadata" - o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) - o = msgp.AppendArrayHeader(o, uint32(len(z.Metadata))) - for za0003 := range z.Metadata { - if z.Metadata[za0003] == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.Metadata[za0003].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Metadata", za0003) - return - } - } - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *SeriesGroup) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Strings": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Strings") - return - } - if cap(z.Strings) >= int(zb0002) { - z.Strings = (z.Strings)[:zb0002] - } else { - z.Strings = make([]string, zb0002) - } - for za0001 := range z.Strings { - z.Strings[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Strings", za0001) - return - } - } - case "Series": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Series") - return - } - if cap(z.Series) >= int(zb0003) { - z.Series = (z.Series)[:zb0003] - } else { - z.Series = make([]*TimeSeriesBinary, zb0003) - } - for za0002 := range z.Series { - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.Series[za0002] = nil - } else { - if z.Series[za0002] == nil { - z.Series[za0002] = new(TimeSeriesBinary) - } - bts, err = z.Series[za0002].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Series", za0002) - return - } - } - } - case "Metadata": - var zb0004 uint32 - zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - if cap(z.Metadata) >= int(zb0004) { - z.Metadata = (z.Metadata)[:zb0004] - } else { - z.Metadata = make([]*TimeSeriesBinary, zb0004) - } - for za0003 := range z.Metadata { - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.Metadata[za0003] = nil - } else { - if z.Metadata[za0003] == nil { - z.Metadata[za0003] = new(TimeSeriesBinary) - } - bts, err = z.Metadata[za0003].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Metadata", za0003) - return - } - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *SeriesGroup) Msgsize() (s int) { - s = 1 + 8 + msgp.ArrayHeaderSize - for za0001 := range z.Strings { - s += msgp.StringPrefixSize + len(z.Strings[za0001]) - } - s += 7 + msgp.ArrayHeaderSize - for za0002 := range z.Series { - if z.Series[za0002] == nil { - s += msgp.NilSize - } else { - s += z.Series[za0002].Msgsize() - } - } - s += 9 + msgp.ArrayHeaderSize - for za0003 := range z.Metadata { - if z.Metadata[za0003] == nil { - s += msgp.NilSize - } else { - s += z.Metadata[za0003].Msgsize() - } - } - return -} - -// DecodeMsg implements msgp.Decodable -func (z *TimeSeriesBinary) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "LabelsNames": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "LabelsNames") - return - } - if cap(z.LabelsNames) >= int(zb0002) { - z.LabelsNames = (z.LabelsNames)[:zb0002] - } else { - z.LabelsNames = make([]uint32, zb0002) - } - for za0001 := range z.LabelsNames { - z.LabelsNames[za0001], err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "LabelsNames", za0001) - return - } - } - case "LabelsValues": - var zb0003 uint32 - zb0003, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "LabelsValues") - return - } - if cap(z.LabelsValues) >= int(zb0003) { - z.LabelsValues = (z.LabelsValues)[:zb0003] - } else { - z.LabelsValues = make([]uint32, zb0003) - } - for za0002 := range z.LabelsValues { - z.LabelsValues[za0002], err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "LabelsValues", za0002) - return - } - } - case "TS": - z.TS, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "TS") - return - } - case "Value": - z.Value, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - case "Hash": - z.Hash, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Hash") - return - } - case "Histograms": - err = z.Histograms.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Histograms") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *TimeSeriesBinary) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 6 - // write "LabelsNames" - err = en.Append(0x86, 0xab, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.LabelsNames))) - if err != nil { - err = msgp.WrapError(err, "LabelsNames") - return - } - for za0001 := range z.LabelsNames { - err = en.WriteUint32(z.LabelsNames[za0001]) - if err != nil { - err = msgp.WrapError(err, "LabelsNames", za0001) - return - } - } - // write "LabelsValues" - err = en.Append(0xac, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.LabelsValues))) - if err != nil { - err = msgp.WrapError(err, "LabelsValues") - return - } - for za0002 := range z.LabelsValues { - err = en.WriteUint32(z.LabelsValues[za0002]) - if err != nil { - err = msgp.WrapError(err, "LabelsValues", za0002) - return - } - } - // write "TS" - err = en.Append(0xa2, 0x54, 0x53) - if err != nil { - return - } - err = en.WriteInt64(z.TS) - if err != nil { - err = msgp.WrapError(err, "TS") - return - } - // write "Value" - err = en.Append(0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteFloat64(z.Value) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - // write "Hash" - err = en.Append(0xa4, 0x48, 0x61, 0x73, 0x68) - if err != nil { - return - } - err = en.WriteUint64(z.Hash) - if err != nil { - err = msgp.WrapError(err, "Hash") - return - } - // write "Histograms" - err = en.Append(0xaa, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x73) - if err != nil { - return - } - err = z.Histograms.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Histograms") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *TimeSeriesBinary) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 6 - // string "LabelsNames" - o = append(o, 0x86, 0xab, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.LabelsNames))) - for za0001 := range z.LabelsNames { - o = msgp.AppendUint32(o, z.LabelsNames[za0001]) - } - // string "LabelsValues" - o = append(o, 0xac, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.LabelsValues))) - for za0002 := range z.LabelsValues { - o = msgp.AppendUint32(o, z.LabelsValues[za0002]) - } - // string "TS" - o = append(o, 0xa2, 0x54, 0x53) - o = msgp.AppendInt64(o, z.TS) - // string "Value" - o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.Value) - // string "Hash" - o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) - o = msgp.AppendUint64(o, z.Hash) - // string "Histograms" - o = append(o, 0xaa, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x73) - o, err = z.Histograms.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Histograms") - return - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TimeSeriesBinary) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "LabelsNames": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "LabelsNames") - return - } - if cap(z.LabelsNames) >= int(zb0002) { - z.LabelsNames = (z.LabelsNames)[:zb0002] - } else { - z.LabelsNames = make([]uint32, zb0002) - } - for za0001 := range z.LabelsNames { - z.LabelsNames[za0001], bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "LabelsNames", za0001) - return - } - } - case "LabelsValues": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "LabelsValues") - return - } - if cap(z.LabelsValues) >= int(zb0003) { - z.LabelsValues = (z.LabelsValues)[:zb0003] - } else { - z.LabelsValues = make([]uint32, zb0003) - } - for za0002 := range z.LabelsValues { - z.LabelsValues[za0002], bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "LabelsValues", za0002) - return - } - } - case "TS": - z.TS, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "TS") - return - } - case "Value": - z.Value, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - case "Hash": - z.Hash, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Hash") - return - } - case "Histograms": - bts, err = z.Histograms.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Histograms") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *TimeSeriesBinary) Msgsize() (s int) { - s = 1 + 12 + msgp.ArrayHeaderSize + (len(z.LabelsNames) * (msgp.Uint32Size)) + 13 + msgp.ArrayHeaderSize + (len(z.LabelsValues) * (msgp.Uint32Size)) + 3 + msgp.Int64Size + 6 + msgp.Float64Size + 5 + msgp.Uint64Size + 11 + z.Histograms.Msgsize() - return -} diff --git a/internal/component/prometheus/write/queue/types/serialization_gen_test.go b/internal/component/prometheus/write/queue/types/serialization_gen_test.go deleted file mode 100644 index e6e18c7901..0000000000 --- a/internal/component/prometheus/write/queue/types/serialization_gen_test.go +++ /dev/null @@ -1,914 +0,0 @@ -package types - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalBucketSpan(t *testing.T) { - v := BucketSpan{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgBucketSpan(b *testing.B) { - v := BucketSpan{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgBucketSpan(b *testing.B) { - v := BucketSpan{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalBucketSpan(b *testing.B) { - v := BucketSpan{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeBucketSpan(t *testing.T) { - v := BucketSpan{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeBucketSpan Msgsize() is inaccurate") - } - - vn := BucketSpan{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeBucketSpan(b *testing.B) { - v := BucketSpan{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeBucketSpan(b *testing.B) { - v := BucketSpan{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalFloatHistogram(t *testing.T) { - v := FloatHistogram{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgFloatHistogram(b *testing.B) { - v := FloatHistogram{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgFloatHistogram(b *testing.B) { - v := FloatHistogram{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalFloatHistogram(b *testing.B) { - v := FloatHistogram{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeFloatHistogram(t *testing.T) { - v := FloatHistogram{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeFloatHistogram Msgsize() is inaccurate") - } - - vn := FloatHistogram{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeFloatHistogram(b *testing.B) { - v := FloatHistogram{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeFloatHistogram(b *testing.B) { - v := FloatHistogram{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalHistogram(t *testing.T) { - v := Histogram{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgHistogram(b *testing.B) { - v := Histogram{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgHistogram(b *testing.B) { - v := Histogram{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalHistogram(b *testing.B) { - v := Histogram{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeHistogram(t *testing.T) { - v := Histogram{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeHistogram Msgsize() is inaccurate") - } - - vn := Histogram{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeHistogram(b *testing.B) { - v := Histogram{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeHistogram(b *testing.B) { - v := Histogram{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalHistogramCount(t *testing.T) { - v := HistogramCount{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgHistogramCount(b *testing.B) { - v := HistogramCount{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgHistogramCount(b *testing.B) { - v := HistogramCount{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalHistogramCount(b *testing.B) { - v := HistogramCount{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeHistogramCount(t *testing.T) { - v := HistogramCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeHistogramCount Msgsize() is inaccurate") - } - - vn := HistogramCount{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeHistogramCount(b *testing.B) { - v := HistogramCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeHistogramCount(b *testing.B) { - v := HistogramCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalHistogramZeroCount(t *testing.T) { - v := HistogramZeroCount{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgHistogramZeroCount(b *testing.B) { - v := HistogramZeroCount{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgHistogramZeroCount(b *testing.B) { - v := HistogramZeroCount{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalHistogramZeroCount(b *testing.B) { - v := HistogramZeroCount{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeHistogramZeroCount(t *testing.T) { - v := HistogramZeroCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeHistogramZeroCount Msgsize() is inaccurate") - } - - vn := HistogramZeroCount{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeHistogramZeroCount(b *testing.B) { - v := HistogramZeroCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeHistogramZeroCount(b *testing.B) { - v := HistogramZeroCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalHistograms(t *testing.T) { - v := Histograms{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgHistograms(b *testing.B) { - v := Histograms{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgHistograms(b *testing.B) { - v := Histograms{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalHistograms(b *testing.B) { - v := Histograms{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeHistograms(t *testing.T) { - v := Histograms{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeHistograms Msgsize() is inaccurate") - } - - vn := Histograms{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeHistograms(b *testing.B) { - v := Histograms{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeHistograms(b *testing.B) { - v := Histograms{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalSeriesGroup(t *testing.T) { - v := SeriesGroup{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgSeriesGroup(b *testing.B) { - v := SeriesGroup{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgSeriesGroup(b *testing.B) { - v := SeriesGroup{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalSeriesGroup(b *testing.B) { - v := SeriesGroup{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeSeriesGroup(t *testing.T) { - v := SeriesGroup{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeSeriesGroup Msgsize() is inaccurate") - } - - vn := SeriesGroup{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeSeriesGroup(b *testing.B) { - v := SeriesGroup{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeSeriesGroup(b *testing.B) { - v := SeriesGroup{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalTimeSeriesBinary(t *testing.T) { - v := TimeSeriesBinary{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTimeSeriesBinary(b *testing.B) { - v := TimeSeriesBinary{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTimeSeriesBinary(b *testing.B) { - v := TimeSeriesBinary{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTimeSeriesBinary(b *testing.B) { - v := TimeSeriesBinary{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTimeSeriesBinary(t *testing.T) { - v := TimeSeriesBinary{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTimeSeriesBinary Msgsize() is inaccurate") - } - - vn := TimeSeriesBinary{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTimeSeriesBinary(b *testing.B) { - v := TimeSeriesBinary{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTimeSeriesBinary(b *testing.B) { - v := TimeSeriesBinary{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/internal/component/prometheus/write/queue/types/serialization_test.go b/internal/component/prometheus/write/queue/types/serialization_test.go deleted file mode 100644 index 59f6d077ae..0000000000 --- a/internal/component/prometheus/write/queue/types/serialization_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package types - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" -) - -func TestLabels(t *testing.T) { - lblsMap := make(map[string]string) - unique := make(map[string]struct{}) - for i := 0; i < 1_000; i++ { - k := fmt.Sprintf("key_%d", i) - v := randString() - lblsMap[k] = v - unique[k] = struct{}{} - unique[v] = struct{}{} - } - sg := &SeriesGroup{ - Series: make([]*TimeSeriesBinary, 1), - } - sg.Series[0] = GetTimeSeriesFromPool() - defer PutTimeSeriesIntoPool(sg.Series[0]) - sg.Series[0].Labels = labels.FromMap(lblsMap) - strMap := make(map[string]uint32) - - sg.Series[0].FillLabelMapping(strMap) - stringsSlice := make([]string, len(strMap)) - for k, v := range strMap { - stringsSlice[v] = k - } - sg.Strings = stringsSlice - buf, err := sg.MarshalMsg(nil) - require.NoError(t, err) - newSg := &SeriesGroup{} - newSg, _, err = DeserializeToSeriesGroup(newSg, buf) - require.NoError(t, err) - series1 := newSg.Series[0] - series2 := sg.Series[0] - require.Len(t, series2.Labels, len(series1.Labels)) - // Ensure we were able to convert back and forth properly. - for i, lbl := range series2.Labels { - require.Equal(t, lbl.Name, series1.Labels[i].Name) - require.Equal(t, lbl.Value, series1.Labels[i].Value) - } -} - -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func randString() string { - b := make([]rune, rand.Intn(20)) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(b) -} diff --git a/internal/component/prometheus/write/queue/types/serializer.go b/internal/component/prometheus/write/queue/types/serializer.go deleted file mode 100644 index d0041242cc..0000000000 --- a/internal/component/prometheus/write/queue/types/serializer.go +++ /dev/null @@ -1,24 +0,0 @@ -package types - -import ( - "context" - "time" -) - -const AlloyFileVersion = "alloy.metrics.queue.v1" - -type SerializerConfig struct { - // MaxSignalsInBatch controls what the max batch size is. - MaxSignalsInBatch uint32 - // FlushFrequency controls how often to write to disk regardless of MaxSignalsInBatch. - FlushFrequency time.Duration -} - -// Serializer handles converting a set of signals into a binary representation to be written to storage. -type Serializer interface { - Start() - Stop() - SendSeries(ctx context.Context, data *TimeSeriesBinary) error - SendMetadata(ctx context.Context, data *TimeSeriesBinary) error - UpdateConfig(ctx context.Context, cfg SerializerConfig) error -} diff --git a/internal/component/prometheus/write/queue/types/stats.go b/internal/component/prometheus/write/queue/types/stats.go deleted file mode 100644 index 732b6255aa..0000000000 --- a/internal/component/prometheus/write/queue/types/stats.go +++ /dev/null @@ -1,289 +0,0 @@ -package types - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// TODO @mattdurham separate this into more manageable chunks, and likely 3 stats series: series, metadata and new ones. - -type SerializerStats struct { - SeriesStored int - MetadataStored int - Errors int - NewestTimestamp int64 -} - -type PrometheusStats struct { - // Network Stats - NetworkSeriesSent prometheus.Counter - NetworkFailures prometheus.Counter - NetworkRetries prometheus.Counter - NetworkRetries429 prometheus.Counter - NetworkRetries5XX prometheus.Counter - NetworkSentDuration prometheus.Histogram - NetworkErrors prometheus.Counter - NetworkNewestOutTimeStampSeconds prometheus.Gauge - - // Serializer Stats - SerializerInSeries prometheus.Counter - SerializerNewestInTimeStampSeconds prometheus.Gauge - SerializerErrors prometheus.Counter - - // Backwards compatibility metrics - SamplesTotal prometheus.Counter - HistogramsTotal prometheus.Counter - MetadataTotal prometheus.Counter - - FailedSamplesTotal prometheus.Counter - FailedHistogramsTotal prometheus.Counter - FailedMetadataTotal prometheus.Counter - - RetriedSamplesTotal prometheus.Counter - RetriedHistogramsTotal prometheus.Counter - RetriedMetadataTotal prometheus.Counter - - EnqueueRetriesTotal prometheus.Counter - SentBatchDuration prometheus.Histogram - HighestSentTimestamp prometheus.Gauge - - SentBytesTotal prometheus.Counter - MetadataBytesTotal prometheus.Counter - RemoteStorageInTimestamp prometheus.Gauge - RemoteStorageOutTimestamp prometheus.Gauge - RemoteStorageDuration prometheus.Histogram -} - -func NewStats(namespace, subsystem string, registry prometheus.Registerer) *PrometheusStats { - s := &PrometheusStats{ - SerializerInSeries: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "serializer_incoming_signals", - }), - SerializerNewestInTimeStampSeconds: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "serializer_incoming_timestamp_seconds", - }), - SerializerErrors: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "serializer_errors", - }), - NetworkNewestOutTimeStampSeconds: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_timestamp_seconds", - }), - RemoteStorageDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "prometheus_remote_storage_queue_duration_seconds", - }), - NetworkSeriesSent: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_sent", - }), - NetworkFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_failed", - }), - NetworkRetries: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_retried", - }), - NetworkRetries429: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_retried_429", - }), - NetworkRetries5XX: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_retried_5xx", - }), - NetworkSentDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_duration_seconds", - NativeHistogramBucketFactor: 1.1, - }), - NetworkErrors: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "network_errors", - }), - RemoteStorageOutTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "prometheus_remote_storage_queue_highest_sent_timestamp_seconds", - }), - RemoteStorageInTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "prometheus_remote_storage_highest_timestamp_in_seconds", - }), - SamplesTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_samples_total", - Help: "Total number of samples sent to remote storage.", - }), - HistogramsTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_histograms_total", - Help: "Total number of histograms sent to remote storage.", - }), - MetadataTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_metadata_total", - Help: "Total number of metadata sent to remote storage.", - }), - FailedSamplesTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_samples_failed_total", - Help: "Total number of samples which failed on send to remote storage, non-recoverable errors.", - }), - FailedHistogramsTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_histograms_failed_total", - Help: "Total number of histograms which failed on send to remote storage, non-recoverable errors.", - }), - FailedMetadataTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_metadata_failed_total", - Help: "Total number of metadata entries which failed on send to remote storage, non-recoverable errors.", - }), - - RetriedSamplesTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_samples_retried_total", - Help: "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.", - }), - RetriedHistogramsTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_histograms_retried_total", - Help: "Total number of histograms which failed on send to remote storage but were retried because the send error was recoverable.", - }), - RetriedMetadataTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_metadata_retried_total", - Help: "Total number of metadata entries which failed on send to remote storage but were retried because the send error was recoverable.", - }), - SentBytesTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_sent_bytes_total", - Help: "The total number of bytes of data (not metadata) sent by the queue after compression. Note that when exemplars over remote write is enabled the exemplars included in a remote write request count towards this metric.", - }), - MetadataBytesTotal: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_remote_storage_metadata_bytes_total", - Help: "The total number of bytes of metadata sent by the queue after compression.", - }), - } - registry.MustRegister( - s.NetworkSentDuration, - s.NetworkRetries5XX, - s.NetworkRetries429, - s.NetworkRetries, - s.NetworkFailures, - s.NetworkSeriesSent, - s.NetworkErrors, - s.NetworkNewestOutTimeStampSeconds, - s.SerializerInSeries, - s.SerializerErrors, - s.SerializerNewestInTimeStampSeconds, - ) - return s -} - -func (s *PrometheusStats) SeriesBackwardsCompatibility(registry prometheus.Registerer) { - registry.MustRegister( - s.RemoteStorageDuration, - s.RemoteStorageInTimestamp, - s.RemoteStorageOutTimestamp, - s.SamplesTotal, - s.HistogramsTotal, - s.FailedSamplesTotal, - s.FailedHistogramsTotal, - s.RetriedSamplesTotal, - s.RetriedHistogramsTotal, - s.SentBytesTotal, - ) -} - -func (s *PrometheusStats) MetaBackwardsCompatibility(registry prometheus.Registerer) { - registry.MustRegister( - s.MetadataTotal, - s.FailedMetadataTotal, - s.RetriedMetadataTotal, - s.MetadataBytesTotal, - ) -} - -func (s *PrometheusStats) UpdateNetwork(stats NetworkStats) { - s.NetworkSeriesSent.Add(float64(stats.TotalSent())) - s.NetworkRetries.Add(float64(stats.TotalRetried())) - s.NetworkFailures.Add(float64(stats.TotalFailed())) - s.NetworkRetries429.Add(float64(stats.Total429())) - s.NetworkRetries5XX.Add(float64(stats.Total5XX())) - s.NetworkSentDuration.Observe(stats.SendDuration.Seconds()) - s.RemoteStorageDuration.Observe(stats.SendDuration.Seconds()) - // The newest timestamp is no always sent. - if stats.NewestTimestamp != 0 { - s.RemoteStorageOutTimestamp.Set(float64(stats.NewestTimestamp)) - s.NetworkNewestOutTimeStampSeconds.Set(float64(stats.NewestTimestamp)) - } - - s.SamplesTotal.Add(float64(stats.Series.SeriesSent)) - s.MetadataTotal.Add(float64(stats.Metadata.SeriesSent)) - s.HistogramsTotal.Add(float64(stats.Histogram.SeriesSent)) - - s.FailedSamplesTotal.Add(float64(stats.Series.FailedSamples)) - s.FailedMetadataTotal.Add(float64(stats.Metadata.FailedSamples)) - s.FailedHistogramsTotal.Add(float64(stats.Histogram.FailedSamples)) - - s.RetriedSamplesTotal.Add(float64(stats.Series.RetriedSamples)) - s.RetriedHistogramsTotal.Add(float64(stats.Histogram.RetriedSamples)) - s.RetriedMetadataTotal.Add(float64(stats.Metadata.RetriedSamples)) - - s.MetadataBytesTotal.Add(float64(stats.MetadataBytes)) - s.SentBytesTotal.Add(float64(stats.SeriesBytes)) -} - -func (s *PrometheusStats) UpdateSerializer(stats SerializerStats) { - s.SerializerInSeries.Add(float64(stats.SeriesStored)) - s.SerializerInSeries.Add(float64(stats.MetadataStored)) - s.SerializerErrors.Add(float64(stats.Errors)) - if stats.NewestTimestamp != 0 { - s.SerializerNewestInTimeStampSeconds.Set(float64(stats.NewestTimestamp)) - s.RemoteStorageInTimestamp.Set(float64(stats.NewestTimestamp)) - } - -} - -type NetworkStats struct { - Series CategoryStats - Histogram CategoryStats - Metadata CategoryStats - SendDuration time.Duration - NewestTimestamp int64 - SeriesBytes int - MetadataBytes int -} - -func (ns NetworkStats) TotalSent() int { - return ns.Series.SeriesSent + ns.Histogram.SeriesSent + ns.Metadata.SeriesSent -} - -func (ns NetworkStats) TotalRetried() int { - return ns.Series.RetriedSamples + ns.Histogram.RetriedSamples + ns.Metadata.RetriedSamples -} - -func (ns NetworkStats) TotalFailed() int { - return ns.Series.FailedSamples + ns.Histogram.FailedSamples + ns.Metadata.FailedSamples -} - -func (ns NetworkStats) Total429() int { - return ns.Series.RetriedSamples429 + ns.Histogram.RetriedSamples429 + ns.Metadata.RetriedSamples429 -} - -func (ns NetworkStats) Total5XX() int { - return ns.Series.RetriedSamples5XX + ns.Histogram.RetriedSamples5XX + ns.Metadata.RetriedSamples5XX -} - -type CategoryStats struct { - RetriedSamples int - RetriedSamples429 int - RetriedSamples5XX int - SeriesSent int - FailedSamples int - NetworkSamplesFailed int -} diff --git a/internal/component/prometheus/write/queue/types/storage.go b/internal/component/prometheus/write/queue/types/storage.go deleted file mode 100644 index 6fe262ab46..0000000000 --- a/internal/component/prometheus/write/queue/types/storage.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -import ( - "context" -) - -type FileStorage interface { - Start() - Stop() - Store(ctx context.Context, meta map[string]string, value []byte) error -} diff --git a/internal/component/prometheus/write/queue/types/storage_test.go b/internal/component/prometheus/write/queue/types/storage_test.go deleted file mode 100644 index 4b58550601..0000000000 --- a/internal/component/prometheus/write/queue/types/storage_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package types - -import ( - "testing" - - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" -) - -func TestStorage(t *testing.T) { - ts := GetTimeSeriesFromPool() - ts.Labels = labels.FromStrings("one", "two") - ts.LabelsValues = make([]uint32, 1) - ts.LabelsNames = make([]uint32, 1) - ts.LabelsValues[0] = 1 - ts.LabelsNames[0] = 2 - - PutTimeSeriesIntoPool(ts) - ts = GetTimeSeriesFromPool() - defer PutTimeSeriesIntoPool(ts) - require.Len(t, ts.Labels, 0) - require.Len(t, ts.LabelsValues, 0) - require.Len(t, ts.LabelsNames, 0) -}