diff --git a/docs/Makefile b/docs/Makefile index ea9ddf0df3..e233b21be6 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -11,8 +11,7 @@ include docs.mk docs: check-cloudwatch-integration check-cloudwatch-integration: - $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md - $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md + $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/reference/components/prometheus.exporter.cloudwatch.md generate-cloudwatch-integration: $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go generate diff --git a/docs/sources/about.md b/docs/sources/about.md index a2df0de45c..a2dcb00b71 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -72,7 +72,7 @@ prometheus.remote_write "default" { ## {{% param "PRODUCT_NAME" %}} configuration generator -The {{< param "PRODUCT_NAME" >}} [configuration generator][] helps you get a head start on creating flow code. +The {{< param "PRODUCT_NAME" >}} [configuration generator][] helps you get a head start on creating {{< param "PRODUCT_NAME" >}} configurations. {{< admonition type="note" >}} This feature is experimental, and it doesn't support all River components. diff --git a/docs/sources/reference/cli/convert.md b/docs/sources/reference/cli/convert.md index 1a8ccfc7b2..d736fb7186 100644 --- a/docs/sources/reference/cli/convert.md +++ b/docs/sources/reference/cli/convert.md @@ -70,7 +70,7 @@ Using the `--source-format=promtail` will convert the source configuration from Nearly all [Promtail features][] are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration. -If you have unsupported features in a source configuration, you will receive [errors][] when you convert to a flow configuration. +If you have unsupported features in a source configuration, you will receive [errors][] when you convert to a {{< param "PRODUCT_NAME" >}} configuration. The converter will also raise warnings for configuration options that may require your attention. Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}][migrate promtail] for a detailed migration guide. diff --git a/docs/sources/reference/components/discovery.lightsail.md b/docs/sources/reference/components/discovery.lightsail.md index c6f959ff54..fabb8a3825 100644 --- a/docs/sources/reference/components/discovery.lightsail.md +++ b/docs/sources/reference/components/discovery.lightsail.md @@ -1,9 +1,4 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.lightsail/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.lightsail/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.lightsail/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.lightsail/ canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.lightsail/ description: Learn about discovery.lightsail title: discovery.lightsail diff --git a/docs/sources/reference/components/loki.process.md b/docs/sources/reference/components/loki.process.md index 93f3455d0b..d432afd185 100644 --- a/docs/sources/reference/components/loki.process.md +++ b/docs/sources/reference/components/loki.process.md @@ -1,9 +1,4 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.process/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.process/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.process/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.process/ canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.process/ description: Learn about loki.process title: loki.process diff --git a/docs/sources/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/reference/components/otelcol.processor.tail_sampling.md index c27fae1098..e301d11816 100644 --- a/docs/sources/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/reference/components/otelcol.processor.tail_sampling.md @@ -1,9 +1,4 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.tail_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.tail_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.tail_sampling/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.tail_sampling/ canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.tail_sampling/ description: Learn about otelcol.processor.tail_sampling labels: diff --git a/docs/sources/reference/components/prometheus.remote_write.md b/docs/sources/reference/components/prometheus.remote_write.md index 480e6b7120..3084aae1d1 100644 --- a/docs/sources/reference/components/prometheus.remote_write.md +++ b/docs/sources/reference/components/prometheus.remote_write.md @@ -433,8 +433,7 @@ retention directly to the data age itself, as the truncation logic works on _segments_, not the samples themselves. This makes data retention less predictable when the component receives a non-consistent rate of data. -The [WAL block][] in Flow mode, or the [metrics config][] in Static mode -contain some configurable parameters that can be used to control the tradeoff +The [WAL block][] contains some configurable parameters that can be used to control the tradeoff between memory usage, disk usage, and data retention. The `truncate_frequency` or `wal_truncate_frequency` parameter configures the @@ -496,18 +495,14 @@ To delete the corrupted WAL: 1. Find and delete the contents of the `wal` directory. By default the `wal` directory is a subdirectory - of the `data-agent` directory located in the Grafana Agent working directory. The WAL data directory - may be different than the default depending on the [wal_directory][] setting in your Static configuration - file or the path specified by the Flow [command line flag][run] `--storage-path`. + of the `data-agent` directory located in the {{< param "PRODUCT_NAME" >}} working directory. The WAL data directory + may be different than the default depending on the path specified by the [command line flag][run] `--storage-path`. {{< admonition type="note" >}} - There is one `wal` directory per: - - * Metrics instance running in Static mode - * `prometheus.remote_write` component running in Flow mode + There is one `wal` directory per `prometheus.remote_write` component. {{< /admonition >}} -1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. +1. [Start][Stop] {{< param "PRODUCT_NAME" >}} and verify that the WAL is working correctly. @@ -525,8 +520,6 @@ Refer to the linked documentation for more details. [snappy]: https://en.wikipedia.org/wiki/Snappy_(compression) -[WAL block]: /docs/agent//flow/reference/components/prometheus.remote_write#wal-block -[metrics config]: /docs/agent//static/configuration/metrics-config -[Stop]: /docs/agent//flow/get-started/start-agent -[wal_directory]: /docs/agent//static/configuration/metrics-config -[run]: /docs/agent//flow/reference/cli/run +[WAL block]: #wal-block +[Stop]: ../../../get-started/start-agent/ +[run]: ../../../reference/cli/run/ diff --git a/docs/sources/shared/deploy-alloy.md b/docs/sources/shared/deploy-alloy.md index 6c86f737ba..0eafc10152 100644 --- a/docs/sources/shared/deploy-alloy.md +++ b/docs/sources/shared/deploy-alloy.md @@ -15,12 +15,12 @@ This page lists common topologies used for deployments of {{% param "PRODUCT_NAM ## As a centralized collection service Deploying {{< param "PRODUCT_NAME" >}} as a centralized service is recommended for collecting application telemetry. -This topology allows you to use a smaller number of agents to coordinate service discovery, collection, and remote writing. +This topology allows you to use a smaller number of collectors to coordinate service discovery, collection, and remote writing. ![centralized-collection](/media/docs/agent/agent-topologies/centralized-collection.png) -Using this topology requires deploying the Agent on separate infrastructure, and making sure that agents can discover and reach these applications over the network. -The main predictor for the size of the agent is the number of active metrics series it is scraping; a rule of thumb is approximately 10 KB of memory for each series. +Using this topology requires deploying {{< param "PRODUCT_NAME" >}} on separate infrastructure, and making sure that they can discover and reach these applications over the network. +The main predictor for the size of {{< param "PRODUCT_NAME" >}} is the number of active metrics series it's scraping. A rule of thumb is approximately 10 KB of memory for each series. We recommend you start looking towards horizontal scaling around the 1 million active series mark. ### Using Kubernetes StatefulSets @@ -57,7 +57,7 @@ Deploying one {{< param "PRODUCT_NAME" >}} per machine is required for collectin Each {{< param "PRODUCT_NAME" >}} requires you to open an outgoing connection for each remote endpoint it’s shipping data to. This can lead to NAT port exhaustion on the egress infrastructure. Each egress IP can support up to (65535 - 1024 = 64511) outgoing connections on different ports. -So, if all {{< param "PRODUCT_NAME" >}}s are shipping metrics and log data, an egress IP can support up to 32,255 agents. +So, if all {{< param "PRODUCT_NAME" >}}s are shipping metrics and log data, an egress IP can support up to 32,255 collectors. ### Using Kubernetes DaemonSets @@ -66,13 +66,13 @@ The simplest use case of the host daemon topology is a Kubernetes DaemonSet, and ### Pros * Doesn’t require running on separate infrastructure -* Typically leads to smaller-sized agents +* Typically leads to smaller-sized collectors * Lower network latency to instrumented applications ### Cons -* Requires planning a process for provisioning Grafana Agent on new machines, as well as keeping configuration up to date to avoid configuration drift -* Not possible to scale agents independently when using Kubernetes DaemonSets +* Requires planning a process for provisioning {{< param "PRODUCT_NAME" >}} on new machines, as well as keeping configuration up to date to avoid configuration drift +* Not possible to scale independently when using Kubernetes DaemonSets * Scaling the topology can strain external APIs (like service discovery) and network infrastructure (like firewalls, proxy servers, and egress points) ### Use for @@ -81,19 +81,19 @@ The simplest use case of the host daemon topology is a Kubernetes DaemonSet, and ### Don’t use for -* Scenarios where Grafana Agent grows so large it can become a noisy neighbor +* Scenarios where {{< param "PRODUCT_NAME" >}} grows so large it can become a noisy neighbor * Collecting an unpredictable amount of telemetry ## As a container sidecar -Deploying {{< param "PRODUCT_NAME" >}} as a container sidecar is only recommended for short-lived applications or specialized agent deployments. +Deploying {{< param "PRODUCT_NAME" >}} as a container sidecar is only recommended for short-lived applications or specialized {{< param "PRODUCT_NAME" >}} deployments. ![daemonset](/media/docs/agent/agent-topologies/sidecar.png) ### Using Kubernetes Pod sidecars In a Kubernetes environment, the sidecar model consists of deploying {{< param "PRODUCT_NAME" >}} as an extra container on the Pod. -The Pod’s controller, network configuration, enabled capabilities, and available resources are shared between the actual application and the sidecar agent. +The Pod’s controller, network configuration, enabled capabilities, and available resources are shared between the actual application and the sidecar {{< param "PRODUCT_NAME" >}}. ### Pros @@ -115,7 +115,7 @@ The Pod’s controller, network configuration, enabled capabilities, and availab ### Don’t use for * Long-lived applications -* Scenarios where the agent size grows so large it can become a noisy neighbor +* Scenarios where the {{< param "PRODUCT_NAME" >}} size grows so large it can become a noisy neighbor [hashmod sharding]: https://grafana.com/docs/agent/latest/static/operation-guide/ diff --git a/docs/sources/tasks/migrate/from-operator.md b/docs/sources/tasks/migrate/from-operator.md index 58c62f792e..f266e1ce8d 100644 --- a/docs/sources/tasks/migrate/from-operator.md +++ b/docs/sources/tasks/migrate/from-operator.md @@ -11,7 +11,7 @@ weight: 320 With the release of {{< param "PRODUCT_NAME" >}}, Grafana Agent Operator is no longer the recommended way to deploy {{< param "PRODUCT_ROOT_NAME" >}} in Kubernetes. Some of the Operator functionality has moved into {{< param "PRODUCT_NAME" >}} itself, and the Helm Chart has replaced the remaining functionality. -- The Monitor types (`PodMonitor`, `ServiceMonitor`, `Probe`, and `LogsInstance`) are all supported natively by {{< param "PRODUCT_NAME" >}}. +- The Monitor types (`PodMonitor`, `ServiceMonitor`, `Probe`, and `PodLogs`) are all supported natively by {{< param "PRODUCT_NAME" >}}. You are no longer required to use the Operator to consume those CRDs for dynamic monitoring in your cluster. - The parts of the Operator that deploy the {{< param "PRODUCT_ROOT_NAME" >}} itself (`GrafanaAgent`, `MetricsInstance`, and `LogsInstance` CRDs) are deprecated. Operator users should use the {{< param "PRODUCT_ROOT_NAME" >}} [Helm Chart][] to deploy {{< param "PRODUCT_ROOT_NAME" >}} directly to your clusters.