fix(coderd): use insights for DAUs, simplify metricscache (#12775)

Fixes #12134
Fixes https://github.com/coder/customers/issues/384
Refs #12122
This commit is contained in:
Mathias Fredriksson 2024-03-27 18:10:14 +02:00 committed by GitHub
parent 5d82a78d4c
commit 421bf7e785
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 90 additions and 657 deletions

View File

@ -366,8 +366,8 @@ func New(options *Options) *API {
options.Database,
options.Logger.Named("metrics_cache"),
metricscache.Intervals{
TemplateDAUs: options.MetricsCacheRefreshInterval,
DeploymentStats: options.AgentStatsRefreshInterval,
TemplateBuildTimes: options.MetricsCacheRefreshInterval,
DeploymentStats: options.AgentStatsRefreshInterval,
},
)

View File

@ -32,14 +32,19 @@ const insightsTimeLayout = time.RFC3339
// @Success 200 {object} codersdk.DAUsResponse
// @Router /insights/daus [get]
func (api *API) deploymentDAUs(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if !api.Authorize(r, rbac.ActionRead, rbac.ResourceDeploymentValues) {
httpapi.Forbidden(rw)
return
}
vals := r.URL.Query()
api.returnDAUsInternal(rw, r, nil)
}
func (api *API) returnDAUsInternal(rw http.ResponseWriter, r *http.Request, templateIDs []uuid.UUID) {
ctx := r.Context()
p := httpapi.NewQueryParamParser()
vals := r.URL.Query()
tzOffset := p.Int(vals, 0, "tz_offset")
p.ErrorExcessParams(vals)
if len(p.Errors) > 0 {
@ -50,12 +55,41 @@ func (api *API) deploymentDAUs(rw http.ResponseWriter, r *http.Request) {
return
}
_, resp, _ := api.metricsCache.DeploymentDAUs(tzOffset)
if resp == nil || resp.Entries == nil {
httpapi.Write(ctx, rw, http.StatusOK, &codersdk.DAUsResponse{
Entries: []codersdk.DAUEntry{},
loc := time.FixedZone("", tzOffset*3600)
// If the time is 14:01 or 14:31, we still want to include all the
// data between 14:00 and 15:00. Our rollups buckets are 30 minutes
// so this works nicely. It works just as well for 23:59 as well.
nextHourInLoc := time.Now().In(loc).Truncate(time.Hour).Add(time.Hour)
// Always return 60 days of data (2 months).
sixtyDaysAgo := nextHourInLoc.In(loc).Truncate(24*time.Hour).AddDate(0, 0, -60)
rows, err := api.Database.GetTemplateInsightsByInterval(ctx, database.GetTemplateInsightsByIntervalParams{
StartTime: sixtyDaysAgo,
EndTime: nextHourInLoc,
IntervalDays: 1,
TemplateIDs: templateIDs,
})
if err != nil {
if httpapi.Is404Error(err) {
httpapi.ResourceNotFound(rw)
return
}
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error fetching DAUs.",
Detail: err.Error(),
})
}
resp := codersdk.DAUsResponse{
TZHourOffset: tzOffset,
Entries: make([]codersdk.DAUEntry, 0, len(rows)),
}
for _, row := range rows {
resp.Entries = append(resp.Entries, codersdk.DAUEntry{
Date: row.StartTime.Format(time.DateOnly),
Amount: int(row.ActiveUsers),
})
return
}
httpapi.Write(ctx, rw, http.StatusOK, resp)
}

View File

@ -39,25 +39,25 @@ import (
)
func TestDeploymentInsights(t *testing.T) {
t.Skipf("This test is flaky: https://github.com/coder/coder/issues/12509")
t.Parallel()
clientTz, err := time.LoadLocation("America/Chicago")
require.NoError(t, err)
db, ps := dbtestutil.NewDB(t)
db, ps := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
logger := slogtest.Make(t, nil)
rollupEvents := make(chan dbrollup.Event)
client := coderdtest.New(t, &coderdtest.Options{
Database: db,
Pubsub: ps,
Logger: &logger,
IncludeProvisionerDaemon: true,
AgentStatsRefreshInterval: time.Millisecond * 50,
AgentStatsRefreshInterval: time.Millisecond * 100,
DatabaseRolluper: dbrollup.New(
logger.Named("dbrollup"),
logger.Named("dbrollup").Leveled(slog.LevelDebug),
db,
dbrollup.WithInterval(time.Millisecond*100),
dbrollup.WithEventChannel(rollupEvents),
),
})
@ -75,57 +75,51 @@ func TestDeploymentInsights(t *testing.T) {
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
ctx := testutil.Context(t, testutil.WaitLong)
// Pre-check, no permission issues.
daus, err := client.DeploymentDAUs(ctx, codersdk.TimezoneOffsetHour(clientTz))
require.NoError(t, err)
_ = agenttest.New(t, client.URL, authToken)
resources := coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
daus, err := client.DeploymentDAUs(context.Background(), codersdk.TimezoneOffsetHour(clientTz))
require.NoError(t, err)
res, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{})
require.NoError(t, err)
assert.NotZero(t, res.Workspaces[0].LastUsedAt)
resources := coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()
conn, err := workspacesdk.New(client).
DialAgent(ctx, resources[0].Agents[0].ID, &workspacesdk.DialAgentOptions{
Logger: slogtest.Make(t, nil).Named("tailnet"),
Logger: slogtest.Make(t, nil).Named("dialagent"),
})
require.NoError(t, err)
defer func() {
_ = conn.Close()
}()
defer conn.Close()
sshConn, err := conn.SSHClient(ctx)
require.NoError(t, err)
_ = sshConn.Close()
defer sshConn.Close()
sess, err := sshConn.NewSession()
require.NoError(t, err)
defer sess.Close()
r, w := io.Pipe()
defer r.Close()
defer w.Close()
sess.Stdin = r
sess.Stdout = io.Discard
err = sess.Start("cat")
require.NoError(t, err)
for {
select {
case <-ctx.Done():
require.Fail(t, "timed out waiting for deployment daus to update", daus)
case <-rollupEvents:
}
wantDAUs := &codersdk.DAUsResponse{
TZHourOffset: codersdk.TimezoneOffsetHour(clientTz),
Entries: []codersdk.DAUEntry{
{
Date: time.Now().In(clientTz).Format("2006-01-02"),
Amount: 1,
},
},
}
require.Eventuallyf(t, func() bool {
daus, err = client.DeploymentDAUs(ctx, codersdk.TimezoneOffsetHour(clientTz))
require.NoError(t, err)
return len(daus.Entries) > 0
},
testutil.WaitShort, testutil.IntervalFast,
"deployment daus never loaded",
)
gotDAUs, err := client.DeploymentDAUs(ctx, codersdk.TimezoneOffsetHour(clientTz))
require.NoError(t, err)
require.Equal(t, gotDAUs, wantDAUs)
template, err = client.Template(ctx, template.ID)
require.NoError(t, err)
res, err = client.Workspaces(ctx, codersdk.WorkspaceFilter{})
require.NoError(t, err)
if len(daus.Entries) > 0 && daus.Entries[len(daus.Entries)-1].Amount > 0 {
break
}
}
}
func TestUserActivityInsights_SanityCheck(t *testing.T) {

View File

@ -1,93 +0,0 @@
package metricscache
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestClosest(t *testing.T) {
t.Parallel()
testCases := []struct {
Name string
Keys []int
Input int
Expected int
NotFound bool
}{
{
Name: "Empty",
Input: 10,
NotFound: true,
},
{
Name: "Equal",
Keys: []int{1, 2, 3, 4, 5, 6, 10, 12, 15},
Input: 10,
Expected: 10,
},
{
Name: "ZeroOnly",
Keys: []int{0},
Input: 10,
Expected: 0,
},
{
Name: "NegativeOnly",
Keys: []int{-10, -5},
Input: 10,
Expected: -5,
},
{
Name: "CloseBothSides",
Keys: []int{-10, -5, 0, 5, 8, 12},
Input: 10,
Expected: 8,
},
{
Name: "CloseNoZero",
Keys: []int{-10, -5, 5, 8, 12},
Input: 0,
Expected: -5,
},
{
Name: "CloseLeft",
Keys: []int{-10, -5, 0, 5, 8, 12},
Input: 20,
Expected: 12,
},
{
Name: "CloseRight",
Keys: []int{-10, -5, 0, 5, 8, 12},
Input: -20,
Expected: -10,
},
{
Name: "ChooseZero",
Keys: []int{-10, -5, 0, 5, 8, 12},
Input: 2,
Expected: 0,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
m := make(map[int]int)
for _, k := range tc.Keys {
m[k] = k
}
found, _, ok := closest(m, tc.Input)
if tc.NotFound {
require.False(t, ok, "should not be found")
} else {
require.True(t, ok)
require.Equal(t, tc.Expected, found, "closest")
}
})
}
}

View File

@ -3,15 +3,11 @@ package metricscache
import (
"context"
"database/sql"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
"golang.org/x/xerrors"
"cdr.dev/slog"
@ -22,33 +18,6 @@ import (
"github.com/coder/retry"
)
func OnlyDate(t time.Time) string {
return t.Format("2006-01-02")
}
// deploymentTimezoneOffsets are the timezones that are cached and supported.
// Any non-listed timezone offsets will need to use the closest supported one.
var deploymentTimezoneOffsets = []int{
0, // UTC - is listed first intentionally.
// Shortened list of 4 timezones that should encompass *most* users. Caching
// all 25 timezones can be too computationally expensive for large
// deployments. This is a stop-gap until more robust fixes can be made for
// the deployment DAUs query.
-6, 3, 6, 10,
// -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1,
// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
}
// templateTimezoneOffsets are the timezones each template will use for it's DAU
// calculations. This is expensive as each template needs to do each timezone, so keep this list
// very small.
var templateTimezoneOffsets = []int{
// Only do one for now. If people request more accurate template DAU, we can
// fix this. But it adds too much cost, so optimization is needed first.
0, // UTC - is listed first intentionally.
}
// Cache holds the template metrics.
// The aggregation queries responsible for these values can take up to a minute
// on large deployments. Even in small deployments, aggregation queries can
@ -59,9 +28,6 @@ type Cache struct {
log slog.Logger
intervals Intervals
deploymentDAUResponses atomic.Pointer[map[int]codersdk.DAUsResponse]
templateDAUResponses atomic.Pointer[map[int]map[uuid.UUID]codersdk.DAUsResponse]
templateUniqueUsers atomic.Pointer[map[uuid.UUID]int]
templateWorkspaceOwners atomic.Pointer[map[uuid.UUID]int]
templateAverageBuildTime atomic.Pointer[map[uuid.UUID]database.GetTemplateAverageBuildTimeRow]
deploymentStatsResponse atomic.Pointer[codersdk.DeploymentStats]
@ -71,13 +37,13 @@ type Cache struct {
}
type Intervals struct {
TemplateDAUs time.Duration
DeploymentStats time.Duration
TemplateBuildTimes time.Duration
DeploymentStats time.Duration
}
func New(db database.Store, log slog.Logger, intervals Intervals) *Cache {
if intervals.TemplateDAUs <= 0 {
intervals.TemplateDAUs = time.Hour
if intervals.TemplateBuildTimes <= 0 {
intervals.TemplateBuildTimes = time.Hour
}
if intervals.DeploymentStats <= 0 {
intervals.DeploymentStats = time.Minute
@ -97,7 +63,7 @@ func New(db database.Store, log slog.Logger, intervals Intervals) *Cache {
wg.Add(1)
go func() {
defer wg.Done()
c.run(ctx, "template daus", intervals.TemplateDAUs, c.refreshTemplateDAUs)
c.run(ctx, "template build times", intervals.TemplateBuildTimes, c.refreshTemplateBuildTimes)
}()
wg.Add(1)
go func() {
@ -109,104 +75,7 @@ func New(db database.Store, log slog.Logger, intervals Intervals) *Cache {
return c
}
func fillEmptyDays(sortedDates []time.Time) []time.Time {
var newDates []time.Time
for i, ti := range sortedDates {
if i == 0 {
newDates = append(newDates, ti)
continue
}
last := sortedDates[i-1]
const day = time.Hour * 24
diff := ti.Sub(last)
for diff > day {
if diff <= day {
break
}
last = last.Add(day)
newDates = append(newDates, last)
diff -= day
}
newDates = append(newDates, ti)
continue
}
return newDates
}
type dauRow interface {
database.GetTemplateDAUsRow |
database.GetDeploymentDAUsRow
}
func convertDAUResponse[T dauRow](rows []T, tzOffset int) codersdk.DAUsResponse {
respMap := make(map[time.Time][]uuid.UUID)
for _, row := range rows {
switch row := any(row).(type) {
case database.GetDeploymentDAUsRow:
respMap[row.Date] = append(respMap[row.Date], row.UserID)
case database.GetTemplateDAUsRow:
respMap[row.Date] = append(respMap[row.Date], row.UserID)
default:
// This should never happen.
panic(fmt.Sprintf("%T not acceptable, developer error", row))
}
}
dates := maps.Keys(respMap)
slices.SortFunc(dates, func(a, b time.Time) int {
if a.Before(b) {
return -1
} else if a.Equal(b) {
return 0
}
return 1
})
var resp codersdk.DAUsResponse
for _, date := range fillEmptyDays(dates) {
resp.Entries = append(resp.Entries, codersdk.DAUEntry{
// This date is truncated to 00:00:00 of the given day, so only
// return date information.
Date: OnlyDate(date),
Amount: len(respMap[date]),
})
}
resp.TZHourOffset = tzOffset
return resp
}
func countUniqueUsers(rows []database.GetTemplateDAUsRow) int {
seen := make(map[uuid.UUID]struct{}, len(rows))
for _, row := range rows {
seen[row.UserID] = struct{}{}
}
return len(seen)
}
func (c *Cache) refreshDeploymentDAUs(ctx context.Context) error {
//nolint:gocritic // This is a system service.
ctx = dbauthz.AsSystemRestricted(ctx)
deploymentDAUs := make(map[int]codersdk.DAUsResponse)
for _, tzOffset := range deploymentTimezoneOffsets {
rows, err := c.database.GetDeploymentDAUs(ctx, int32(tzOffset))
if err != nil {
return err
}
deploymentDAUs[tzOffset] = convertDAUResponse(rows, tzOffset)
}
c.deploymentDAUResponses.Store(&deploymentDAUs)
return nil
}
func (c *Cache) refreshTemplateDAUs(ctx context.Context) error {
func (c *Cache) refreshTemplateBuildTimes(ctx context.Context) error {
//nolint:gocritic // This is a system service.
ctx = dbauthz.AsSystemRestricted(ctx)
@ -216,38 +85,13 @@ func (c *Cache) refreshTemplateDAUs(ctx context.Context) error {
}
var (
templateDAUs = make(map[int]map[uuid.UUID]codersdk.DAUsResponse, len(templates))
templateUniqueUsers = make(map[uuid.UUID]int)
templateWorkspaceOwners = make(map[uuid.UUID]int)
templateAverageBuildTimes = make(map[uuid.UUID]database.GetTemplateAverageBuildTimeRow)
)
err = c.refreshDeploymentDAUs(ctx)
if err != nil {
return xerrors.Errorf("deployment daus: %w", err)
}
ids := make([]uuid.UUID, 0, len(templates))
for _, template := range templates {
ids = append(ids, template.ID)
for _, tzOffset := range templateTimezoneOffsets {
rows, err := c.database.GetTemplateDAUs(ctx, database.GetTemplateDAUsParams{
TemplateID: template.ID,
TzOffset: int32(tzOffset),
})
if err != nil {
return err
}
if templateDAUs[tzOffset] == nil {
templateDAUs[tzOffset] = make(map[uuid.UUID]codersdk.DAUsResponse)
}
templateDAUs[tzOffset][template.ID] = convertDAUResponse(rows, tzOffset)
if _, set := templateUniqueUsers[template.ID]; !set {
// If the uniqueUsers has not been counted yet, set the unique count with the rows we have.
// We only need to calculate this once.
templateUniqueUsers[template.ID] = countUniqueUsers(rows)
}
}
templateAvgBuildTime, err := c.database.GetTemplateAverageBuildTime(ctx, database.GetTemplateAverageBuildTimeParams{
TemplateID: uuid.NullUUID{
@ -275,8 +119,6 @@ func (c *Cache) refreshTemplateDAUs(ctx context.Context) error {
}
c.templateWorkspaceOwners.Store(&templateWorkspaceOwners)
c.templateDAUResponses.Store(&templateDAUs)
c.templateUniqueUsers.Store(&templateUniqueUsers)
c.templateAverageBuildTime.Store(&templateAverageBuildTimes)
return nil
@ -359,99 +201,6 @@ func (c *Cache) Close() error {
return nil
}
func (c *Cache) DeploymentDAUs(offset int) (int, *codersdk.DAUsResponse, bool) {
m := c.deploymentDAUResponses.Load()
if m == nil {
return 0, nil, false
}
closestOffset, resp, ok := closest(*m, offset)
if !ok {
return 0, nil, false
}
return closestOffset, &resp, ok
}
// TemplateDAUs returns an empty response if the template doesn't have users
// or is loading for the first time.
// The cache will select the closest DAUs response to given timezone offset.
func (c *Cache) TemplateDAUs(id uuid.UUID, offset int) (int, *codersdk.DAUsResponse, bool) {
m := c.templateDAUResponses.Load()
if m == nil {
// Data loading.
return 0, nil, false
}
closestOffset, resp, ok := closest(*m, offset)
if !ok {
// Probably no data.
return 0, nil, false
}
tpl, ok := resp[id]
if !ok {
// Probably no data.
return 0, nil, false
}
return closestOffset, &tpl, true
}
// closest returns the value in the values map that has a key with the value most
// close to the requested key. This is so if a user requests a timezone offset that
// we do not have, we return the closest one we do have to the user.
func closest[V any](values map[int]V, offset int) (int, V, bool) {
if len(values) == 0 {
var v V
return -1, v, false
}
v, ok := values[offset]
if ok {
// We have the exact offset, that was easy!
return offset, v, true
}
var closest int
var closestV V
diff := math.MaxInt
for k, v := range values {
newDiff := abs(k - offset)
// Take the closest value that is also the smallest value. We do this
// to make the output deterministic
if newDiff < diff || (newDiff == diff && k < closest) {
// new closest
closest = k
closestV = v
diff = newDiff
}
}
return closest, closestV, true
}
func abs(a int) int {
if a < 0 {
return -1 * a
}
return a
}
// TemplateUniqueUsers returns the number of unique Template users
// from all Cache data.
func (c *Cache) TemplateUniqueUsers(id uuid.UUID) (int, bool) {
m := c.templateUniqueUsers.Load()
if m == nil {
// Data loading.
return -1, false
}
resp, ok := (*m)[id]
if !ok {
// Probably no data.
return -1, false
}
return resp, true
}
func (c *Cache) TemplateBuildTimeStats(id uuid.UUID) codersdk.TemplateBuildTimeStats {
unknown := codersdk.TemplateBuildTimeStats{
codersdk.WorkspaceTransitionStart: {},

View File

@ -19,241 +19,10 @@ import (
"github.com/coder/coder/v2/testutil"
)
func dateH(year, month, day, hour int) time.Time {
return time.Date(year, time.Month(month), day, hour, 0, 0, 0, time.UTC)
}
func date(year, month, day int) time.Time {
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
func TestCache_TemplateUsers(t *testing.T) {
t.Parallel()
statRow := func(user uuid.UUID, date time.Time) database.InsertWorkspaceAgentStatParams {
return database.InsertWorkspaceAgentStatParams{
CreatedAt: date,
UserID: user,
}
}
var (
zebra = uuid.UUID{1}
tiger = uuid.UUID{2}
)
type args struct {
rows []database.InsertWorkspaceAgentStatParams
}
type want struct {
entries []codersdk.DAUEntry
uniqueUsers int
}
tests := []struct {
name string
args args
tplWant want
// dauWant is optional
dauWant []codersdk.DAUEntry
tzOffset int
}{
{name: "empty", args: args{}, tplWant: want{nil, 0}},
{
name: "one hole",
args: args{
rows: []database.InsertWorkspaceAgentStatParams{
statRow(zebra, dateH(2022, 8, 27, 0)),
statRow(zebra, dateH(2022, 8, 30, 0)),
},
},
tplWant: want{[]codersdk.DAUEntry{
{
Date: metricscache.OnlyDate(date(2022, 8, 27)),
Amount: 1,
},
{
Date: metricscache.OnlyDate(date(2022, 8, 28)),
Amount: 0,
},
{
Date: metricscache.OnlyDate(date(2022, 8, 29)),
Amount: 0,
},
{
Date: metricscache.OnlyDate(date(2022, 8, 30)),
Amount: 1,
},
}, 1},
},
{
name: "no holes",
args: args{
rows: []database.InsertWorkspaceAgentStatParams{
statRow(zebra, dateH(2022, 8, 27, 0)),
statRow(zebra, dateH(2022, 8, 28, 0)),
statRow(zebra, dateH(2022, 8, 29, 0)),
},
},
tplWant: want{[]codersdk.DAUEntry{
{
Date: metricscache.OnlyDate(date(2022, 8, 27)),
Amount: 1,
},
{
Date: metricscache.OnlyDate(date(2022, 8, 28)),
Amount: 1,
},
{
Date: metricscache.OnlyDate(date(2022, 8, 29)),
Amount: 1,
},
}, 1},
},
{
name: "holes",
args: args{
rows: []database.InsertWorkspaceAgentStatParams{
statRow(zebra, dateH(2022, 1, 1, 0)),
statRow(tiger, dateH(2022, 1, 1, 0)),
statRow(zebra, dateH(2022, 1, 4, 0)),
statRow(zebra, dateH(2022, 1, 7, 0)),
statRow(tiger, dateH(2022, 1, 7, 0)),
},
},
tplWant: want{[]codersdk.DAUEntry{
{
Date: metricscache.OnlyDate(date(2022, 1, 1)),
Amount: 2,
},
{
Date: metricscache.OnlyDate(date(2022, 1, 2)),
Amount: 0,
},
{
Date: metricscache.OnlyDate(date(2022, 1, 3)),
Amount: 0,
},
{
Date: metricscache.OnlyDate(date(2022, 1, 4)),
Amount: 1,
},
{
Date: metricscache.OnlyDate(date(2022, 1, 5)),
Amount: 0,
},
{
Date: metricscache.OnlyDate(date(2022, 1, 6)),
Amount: 0,
},
{
Date: metricscache.OnlyDate(date(2022, 1, 7)),
Amount: 2,
},
}, 2},
},
{
name: "tzOffset",
tzOffset: 3,
args: args{
rows: []database.InsertWorkspaceAgentStatParams{
statRow(zebra, dateH(2022, 1, 2, 3)),
statRow(tiger, dateH(2022, 1, 2, 3)),
// With offset these should be in the previous day
statRow(zebra, dateH(2022, 1, 2, 0)),
statRow(tiger, dateH(2022, 1, 2, 0)),
},
},
tplWant: want{[]codersdk.DAUEntry{
{
Date: metricscache.OnlyDate(date(2022, 1, 2)),
Amount: 2,
},
}, 2},
dauWant: []codersdk.DAUEntry{
{
Date: metricscache.OnlyDate(date(2022, 1, 1)),
Amount: 2,
},
{
Date: metricscache.OnlyDate(date(2022, 1, 2)),
Amount: 2,
},
},
},
{
name: "tzOffsetPreviousDay",
tzOffset: 6,
args: args{
rows: []database.InsertWorkspaceAgentStatParams{
statRow(zebra, dateH(2022, 1, 2, 1)),
statRow(tiger, dateH(2022, 1, 2, 1)),
statRow(zebra, dateH(2022, 1, 2, 0)),
statRow(tiger, dateH(2022, 1, 2, 0)),
},
},
dauWant: []codersdk.DAUEntry{
{
Date: metricscache.OnlyDate(date(2022, 1, 1)),
Amount: 2,
},
},
tplWant: want{[]codersdk.DAUEntry{
{
Date: metricscache.OnlyDate(date(2022, 1, 2)),
Amount: 2,
},
}, 2},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var (
db = dbmem.New()
cache = metricscache.New(db, slogtest.Make(t, nil), metricscache.Intervals{
TemplateDAUs: testutil.IntervalFast,
})
)
defer cache.Close()
template := dbgen.Template(t, db, database.Template{
Provisioner: database.ProvisionerTypeEcho,
})
for _, row := range tt.args.rows {
row.TemplateID = template.ID
row.ConnectionCount = 1
db.InsertWorkspaceAgentStat(context.Background(), row)
}
require.Eventuallyf(t, func() bool {
_, _, ok := cache.TemplateDAUs(template.ID, tt.tzOffset)
return ok
}, testutil.WaitShort, testutil.IntervalMedium,
"TemplateDAUs never populated",
)
gotUniqueUsers, ok := cache.TemplateUniqueUsers(template.ID)
require.True(t, ok)
if tt.dauWant != nil {
_, dauResponse, ok := cache.DeploymentDAUs(tt.tzOffset)
require.True(t, ok)
require.Equal(t, tt.dauWant, dauResponse.Entries)
}
offset, gotEntries, ok := cache.TemplateDAUs(template.ID, tt.tzOffset)
require.True(t, ok)
// Template only supports 0 offset.
require.Equal(t, 0, offset)
require.Equal(t, tt.tplWant.entries, gotEntries.Entries)
require.Equal(t, tt.tplWant.uniqueUsers, gotUniqueUsers)
})
}
}
func TestCache_TemplateWorkspaceOwners(t *testing.T) {
t.Parallel()
var ()
@ -261,7 +30,7 @@ func TestCache_TemplateWorkspaceOwners(t *testing.T) {
var (
db = dbmem.New()
cache = metricscache.New(db, slogtest.Make(t, nil), metricscache.Intervals{
TemplateDAUs: testutil.IntervalFast,
TemplateBuildTimes: testutil.IntervalFast,
})
)
@ -412,7 +181,7 @@ func TestCache_BuildTime(t *testing.T) {
var (
db = dbmem.New()
cache = metricscache.New(db, slogtest.Make(t, nil), metricscache.Intervals{
TemplateDAUs: testutil.IntervalFast,
TemplateBuildTimes: testutil.IntervalFast,
})
)

View File

@ -788,29 +788,9 @@ func (api *API) patchTemplateMeta(rw http.ResponseWriter, r *http.Request) {
// @Success 200 {object} codersdk.DAUsResponse
// @Router /templates/{template}/daus [get]
func (api *API) templateDAUs(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
template := httpmw.TemplateParam(r)
vals := r.URL.Query()
p := httpapi.NewQueryParamParser()
tzOffset := p.Int(vals, 0, "tz_offset")
p.ErrorExcessParams(vals)
if len(p.Errors) > 0 {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Query parameters have invalid values.",
Validations: p.Errors,
})
return
}
_, resp, _ := api.metricsCache.TemplateDAUs(template.ID, tzOffset)
if resp == nil || resp.Entries == nil {
httpapi.Write(ctx, rw, http.StatusOK, &codersdk.DAUsResponse{
Entries: []codersdk.DAUEntry{},
})
return
}
httpapi.Write(ctx, rw, http.StatusOK, resp)
api.returnDAUsInternal(rw, r, []uuid.UUID{template.ID})
}
// @Summary Get template examples by organization