2023-04-27 10:34:00 +00:00
|
|
|
package prometheusmetrics
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2023-05-25 10:52:36 +00:00
|
|
|
"golang.org/x/exp/slices"
|
2023-04-27 10:34:00 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
|
|
|
"cdr.dev/slog"
|
|
|
|
|
2023-12-18 12:53:28 +00:00
|
|
|
agentproto "github.com/coder/coder/v2/agent/proto"
|
2023-04-27 10:34:00 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// MetricHelpForAgent is a help string that replaces all agent metric help
|
|
|
|
// messages. This is because a registry cannot have conflicting
|
|
|
|
// help messages for the same metric in a "gather". If our coder agents are
|
|
|
|
// on different versions, this is a possible scenario.
|
|
|
|
metricHelpForAgent = "Metrics are forwarded from workspace agents connected to this instance of coderd."
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2023-06-22 18:09:33 +00:00
|
|
|
loggerName = "prometheusmetrics"
|
|
|
|
|
2023-04-27 10:34:00 +00:00
|
|
|
sizeCollectCh = 10
|
|
|
|
sizeUpdateCh = 1024
|
|
|
|
|
|
|
|
defaultMetricsCleanupInterval = 2 * time.Minute
|
|
|
|
)
|
|
|
|
|
|
|
|
type MetricsAggregator struct {
|
|
|
|
queue []annotatedMetric
|
|
|
|
|
|
|
|
log slog.Logger
|
|
|
|
metricsCleanupInterval time.Duration
|
|
|
|
|
|
|
|
collectCh chan (chan []prometheus.Metric)
|
|
|
|
updateCh chan updateRequest
|
|
|
|
|
|
|
|
updateHistogram prometheus.Histogram
|
|
|
|
cleanupHistogram prometheus.Histogram
|
|
|
|
}
|
|
|
|
|
|
|
|
type updateRequest struct {
|
|
|
|
username string
|
|
|
|
workspaceName string
|
|
|
|
agentName string
|
2023-12-13 17:45:43 +00:00
|
|
|
templateName string
|
2023-04-27 10:34:00 +00:00
|
|
|
|
2023-12-18 12:53:28 +00:00
|
|
|
metrics []*agentproto.Stats_Metric
|
2023-04-27 10:34:00 +00:00
|
|
|
|
|
|
|
timestamp time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
type annotatedMetric struct {
|
2023-12-18 12:53:28 +00:00
|
|
|
*agentproto.Stats_Metric
|
2023-04-27 10:34:00 +00:00
|
|
|
|
|
|
|
username string
|
|
|
|
workspaceName string
|
|
|
|
agentName string
|
2023-12-13 17:45:43 +00:00
|
|
|
templateName string
|
2023-04-27 10:34:00 +00:00
|
|
|
|
|
|
|
expiryDate time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ prometheus.Collector = new(MetricsAggregator)
|
|
|
|
|
2023-12-18 12:53:28 +00:00
|
|
|
func (am *annotatedMetric) is(req updateRequest, m *agentproto.Stats_Metric) bool {
|
2023-05-25 10:52:36 +00:00
|
|
|
return am.username == req.username && am.workspaceName == req.workspaceName && am.agentName == req.agentName && am.Name == m.Name && slices.Equal(am.Labels, m.Labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (am *annotatedMetric) asPrometheus() (prometheus.Metric, error) {
|
|
|
|
labels := make([]string, 0, len(agentMetricsLabels)+len(am.Labels))
|
|
|
|
labelValues := make([]string, 0, len(agentMetricsLabels)+len(am.Labels))
|
|
|
|
|
|
|
|
labels = append(labels, agentMetricsLabels...)
|
2023-12-13 17:45:43 +00:00
|
|
|
labelValues = append(labelValues, am.username, am.workspaceName, am.agentName, am.templateName)
|
2023-05-25 10:52:36 +00:00
|
|
|
|
|
|
|
for _, l := range am.Labels {
|
|
|
|
labels = append(labels, l.Name)
|
|
|
|
labelValues = append(labelValues, l.Value)
|
|
|
|
}
|
|
|
|
|
|
|
|
desc := prometheus.NewDesc(am.Name, metricHelpForAgent, labels, nil)
|
|
|
|
valueType, err := asPrometheusValueType(am.Type)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return prometheus.MustNewConstMetric(desc, valueType, am.Value, labelValues...), nil
|
|
|
|
}
|
|
|
|
|
2023-04-27 10:34:00 +00:00
|
|
|
func NewMetricsAggregator(logger slog.Logger, registerer prometheus.Registerer, duration time.Duration) (*MetricsAggregator, error) {
|
|
|
|
metricsCleanupInterval := defaultMetricsCleanupInterval
|
|
|
|
if duration > 0 {
|
|
|
|
metricsCleanupInterval = duration
|
|
|
|
}
|
|
|
|
|
|
|
|
updateHistogram := prometheus.NewHistogram(prometheus.HistogramOpts{
|
|
|
|
Namespace: "coderd",
|
|
|
|
Subsystem: "prometheusmetrics",
|
|
|
|
Name: "metrics_aggregator_execution_update_seconds",
|
|
|
|
Help: "Histogram for duration of metrics aggregator update in seconds.",
|
|
|
|
Buckets: []float64{0.001, 0.005, 0.010, 0.025, 0.050, 0.100, 0.500, 1, 5, 10, 30},
|
|
|
|
})
|
|
|
|
err := registerer.Register(updateHistogram)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanupHistogram := prometheus.NewHistogram(prometheus.HistogramOpts{
|
|
|
|
Namespace: "coderd",
|
|
|
|
Subsystem: "prometheusmetrics",
|
|
|
|
Name: "metrics_aggregator_execution_cleanup_seconds",
|
|
|
|
Help: "Histogram for duration of metrics aggregator cleanup in seconds.",
|
|
|
|
Buckets: []float64{0.001, 0.005, 0.010, 0.025, 0.050, 0.100, 0.500, 1, 5, 10, 30},
|
|
|
|
})
|
|
|
|
err = registerer.Register(cleanupHistogram)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &MetricsAggregator{
|
2023-06-22 18:09:33 +00:00
|
|
|
log: logger.Named(loggerName),
|
2023-04-27 10:34:00 +00:00
|
|
|
metricsCleanupInterval: metricsCleanupInterval,
|
|
|
|
|
|
|
|
collectCh: make(chan (chan []prometheus.Metric), sizeCollectCh),
|
|
|
|
updateCh: make(chan updateRequest, sizeUpdateCh),
|
|
|
|
|
|
|
|
updateHistogram: updateHistogram,
|
|
|
|
cleanupHistogram: cleanupHistogram,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ma *MetricsAggregator) Run(ctx context.Context) func() {
|
|
|
|
ctx, cancelFunc := context.WithCancel(ctx)
|
|
|
|
done := make(chan struct{})
|
|
|
|
|
|
|
|
cleanupTicker := time.NewTicker(ma.metricsCleanupInterval)
|
|
|
|
go func() {
|
|
|
|
defer close(done)
|
|
|
|
defer cleanupTicker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case req := <-ma.updateCh:
|
2023-06-22 18:09:33 +00:00
|
|
|
ma.log.Debug(ctx, "update metrics")
|
2023-04-27 10:34:00 +00:00
|
|
|
|
|
|
|
timer := prometheus.NewTimer(ma.updateHistogram)
|
|
|
|
UpdateLoop:
|
|
|
|
for _, m := range req.metrics {
|
|
|
|
for i, q := range ma.queue {
|
2023-05-25 10:52:36 +00:00
|
|
|
if q.is(req, m) {
|
2023-12-18 12:53:28 +00:00
|
|
|
ma.queue[i].Stats_Metric.Value = m.Value
|
2023-04-27 10:34:00 +00:00
|
|
|
ma.queue[i].expiryDate = req.timestamp.Add(ma.metricsCleanupInterval)
|
|
|
|
continue UpdateLoop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ma.queue = append(ma.queue, annotatedMetric{
|
2023-12-18 12:53:28 +00:00
|
|
|
Stats_Metric: m,
|
2023-04-27 10:34:00 +00:00
|
|
|
username: req.username,
|
|
|
|
workspaceName: req.workspaceName,
|
|
|
|
agentName: req.agentName,
|
2023-12-13 17:45:43 +00:00
|
|
|
templateName: req.templateName,
|
2023-12-18 12:53:28 +00:00
|
|
|
expiryDate: req.timestamp.Add(ma.metricsCleanupInterval),
|
2023-04-27 10:34:00 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
timer.ObserveDuration()
|
|
|
|
case outputCh := <-ma.collectCh:
|
2023-06-22 18:09:33 +00:00
|
|
|
ma.log.Debug(ctx, "collect metrics")
|
2023-04-27 10:34:00 +00:00
|
|
|
|
|
|
|
output := make([]prometheus.Metric, 0, len(ma.queue))
|
|
|
|
for _, m := range ma.queue {
|
2023-05-25 10:52:36 +00:00
|
|
|
promMetric, err := m.asPrometheus()
|
2023-04-27 10:34:00 +00:00
|
|
|
if err != nil {
|
|
|
|
ma.log.Error(ctx, "can't convert Prometheus value type", slog.F("name", m.Name), slog.F("type", m.Type), slog.F("value", m.Value), slog.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
2023-05-25 10:52:36 +00:00
|
|
|
output = append(output, promMetric)
|
2023-04-27 10:34:00 +00:00
|
|
|
}
|
|
|
|
outputCh <- output
|
|
|
|
close(outputCh)
|
|
|
|
case <-cleanupTicker.C:
|
2023-06-22 18:09:33 +00:00
|
|
|
ma.log.Debug(ctx, "clean expired metrics")
|
2023-04-27 10:34:00 +00:00
|
|
|
|
|
|
|
timer := prometheus.NewTimer(ma.cleanupHistogram)
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
var hasExpiredMetrics bool
|
|
|
|
for _, m := range ma.queue {
|
|
|
|
if now.After(m.expiryDate) {
|
|
|
|
hasExpiredMetrics = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hasExpiredMetrics {
|
|
|
|
fresh := make([]annotatedMetric, 0, len(ma.queue))
|
|
|
|
for _, m := range ma.queue {
|
|
|
|
if m.expiryDate.After(now) {
|
|
|
|
fresh = append(fresh, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ma.queue = fresh
|
|
|
|
}
|
|
|
|
|
|
|
|
timer.ObserveDuration()
|
|
|
|
cleanupTicker.Reset(ma.metricsCleanupInterval)
|
|
|
|
|
|
|
|
case <-ctx.Done():
|
2023-06-22 18:09:33 +00:00
|
|
|
ma.log.Debug(ctx, "metrics aggregator is stopped")
|
2023-04-27 10:34:00 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return func() {
|
|
|
|
cancelFunc()
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Describe function does not have any knowledge about the metrics schema,
|
|
|
|
// so it does not emit anything.
|
|
|
|
func (*MetricsAggregator) Describe(_ chan<- *prometheus.Desc) {
|
|
|
|
}
|
|
|
|
|
2023-12-13 17:45:43 +00:00
|
|
|
var agentMetricsLabels = []string{usernameLabel, workspaceNameLabel, agentNameLabel, templateNameLabel}
|
|
|
|
|
|
|
|
// AgentMetricLabels are the labels used to decorate an agent's metrics.
|
|
|
|
// This list should match the list of labels in agentMetricsLabels.
|
|
|
|
type AgentMetricLabels struct {
|
|
|
|
Username string
|
|
|
|
WorkspaceName string
|
|
|
|
AgentName string
|
|
|
|
TemplateName string
|
|
|
|
}
|
2023-04-27 10:34:00 +00:00
|
|
|
|
|
|
|
func (ma *MetricsAggregator) Collect(ch chan<- prometheus.Metric) {
|
|
|
|
output := make(chan []prometheus.Metric, 1)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case ma.collectCh <- output:
|
|
|
|
default:
|
2023-06-22 18:09:33 +00:00
|
|
|
ma.log.Error(context.Background(), "collect queue is full")
|
2023-04-27 10:34:00 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for s := range output {
|
|
|
|
for _, m := range s {
|
|
|
|
ch <- m
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-18 12:53:28 +00:00
|
|
|
func (ma *MetricsAggregator) Update(ctx context.Context, labels AgentMetricLabels, metrics []*agentproto.Stats_Metric) {
|
2023-04-27 10:34:00 +00:00
|
|
|
select {
|
|
|
|
case ma.updateCh <- updateRequest{
|
2023-12-13 17:45:43 +00:00
|
|
|
username: labels.Username,
|
|
|
|
workspaceName: labels.WorkspaceName,
|
|
|
|
agentName: labels.AgentName,
|
|
|
|
templateName: labels.TemplateName,
|
2023-04-27 10:34:00 +00:00
|
|
|
metrics: metrics,
|
|
|
|
|
|
|
|
timestamp: time.Now(),
|
|
|
|
}:
|
|
|
|
case <-ctx.Done():
|
2023-06-22 18:09:33 +00:00
|
|
|
ma.log.Debug(ctx, "update request is canceled")
|
2023-04-27 10:34:00 +00:00
|
|
|
default:
|
2023-06-22 18:09:33 +00:00
|
|
|
ma.log.Error(ctx, "update queue is full")
|
2023-04-27 10:34:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-18 12:53:28 +00:00
|
|
|
func asPrometheusValueType(metricType agentproto.Stats_Metric_Type) (prometheus.ValueType, error) {
|
2023-04-27 10:34:00 +00:00
|
|
|
switch metricType {
|
2023-12-18 12:53:28 +00:00
|
|
|
case agentproto.Stats_Metric_GAUGE:
|
2023-04-27 10:34:00 +00:00
|
|
|
return prometheus.GaugeValue, nil
|
2023-12-18 12:53:28 +00:00
|
|
|
case agentproto.Stats_Metric_COUNTER:
|
2023-04-27 10:34:00 +00:00
|
|
|
return prometheus.CounterValue, nil
|
|
|
|
default:
|
|
|
|
return -1, xerrors.Errorf("unsupported value type: %s", metricType)
|
|
|
|
}
|
|
|
|
}
|