Add complex dbmem implementations

This commit is contained in:
Kyle Carberry 2024-05-01 13:51:57 +00:00
parent 19ad836e01
commit 2e30fa4e78
14 changed files with 469 additions and 386 deletions

View File

@ -1034,10 +1034,6 @@ func (q *querier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUI
return q.db.GetAuthorizationUserRoles(ctx, userID)
}
func (q *querier) GetConsistencyByIntelCohort(ctx context.Context) ([]database.GetConsistencyByIntelCohortRow, error) {
panic("not implemented")
}
func (q *querier) GetDBCryptKeys(ctx context.Context) ([]database.DBCryptKey, error) {
if err := q.authorizeContext(ctx, rbac.ActionRead, rbac.ResourceSystem); err != nil {
return nil, err
@ -1181,11 +1177,6 @@ func (q *querier) GetIntelCohortsMatchedByMachineIDs(ctx context.Context, ids []
return q.db.GetIntelCohortsMatchedByMachineIDs(ctx, ids)
}
func (q *querier) GetIntelInvocationSummaries(ctx context.Context) ([]database.IntelInvocationSummary, error) {
// No authz checks - it'd be too slow
return q.db.GetIntelInvocationSummaries(ctx)
}
func (q *querier) GetIntelMachinesMatchingFilters(ctx context.Context, arg database.GetIntelMachinesMatchingFiltersParams) ([]database.GetIntelMachinesMatchingFiltersRow, error) {
// No authz checks possible. It's too weird
return q.db.GetIntelMachinesMatchingFilters(ctx, arg)

View File

@ -582,6 +582,103 @@ func WorkspaceProxy(t testing.TB, db database.Store, orig database.WorkspaceProx
return proxy, secret
}
func IntelCohort(t testing.TB, db database.Store, orig database.IntelCohort) database.IntelCohort {
cohort, err := db.UpsertIntelCohort(genCtx, database.UpsertIntelCohortParams{
ID: takeFirst(orig.ID, uuid.New()),
CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()),
UpdatedAt: takeFirst(orig.UpdatedAt, dbtime.Now()),
Name: takeFirst(orig.Name, namesgenerator.GetRandomName(1)),
OrganizationID: takeFirst(orig.OrganizationID, uuid.New()),
CreatedBy: takeFirst(orig.CreatedBy, uuid.New()),
DisplayName: takeFirst(orig.DisplayName, namesgenerator.GetRandomName(1)),
Icon: takeFirst(orig.Icon, ""),
Description: takeFirst(orig.Description, ""),
RegexOperatingSystem: takeFirst(orig.RegexOperatingSystem, ".*"),
RegexOperatingSystemVersion: takeFirst(orig.RegexOperatingSystemVersion, ".*"),
RegexOperatingSystemPlatform: takeFirst(orig.RegexOperatingSystemPlatform, ".*"),
RegexArchitecture: takeFirst(orig.RegexArchitecture, ".*"),
RegexInstanceID: takeFirst(orig.RegexInstanceID, ".*"),
TrackedExecutables: takeFirstSlice(orig.TrackedExecutables, []string{}),
})
require.NoError(t, err, "insert cohort")
return cohort
}
func IntelMachine(t testing.TB, db database.Store, orig database.IntelMachine) database.IntelMachine {
if !orig.IPAddress.Valid {
orig.IPAddress = pqtype.Inet{
IPNet: net.IPNet{
IP: net.IPv4(127, 0, 0, 1),
Mask: net.IPv4Mask(255, 255, 255, 255),
},
Valid: true,
}
}
machine, err := db.UpsertIntelMachine(genCtx, database.UpsertIntelMachineParams{
ID: takeFirst(orig.ID, uuid.New()),
UserID: takeFirst(orig.UserID, uuid.New()),
CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()),
UpdatedAt: takeFirst(orig.UpdatedAt, dbtime.Now()),
OrganizationID: takeFirst(orig.OrganizationID, uuid.New()),
OperatingSystem: takeFirst(orig.OperatingSystem, "linux"),
OperatingSystemVersion: takeFirst(orig.OperatingSystemVersion, "1.0"),
OperatingSystemPlatform: takeFirst(orig.OperatingSystemPlatform, "linux"),
Architecture: takeFirst(orig.Architecture, "amd64"),
InstanceID: takeFirst(orig.InstanceID, "i-123456"),
Hostname: takeFirst(orig.Hostname, "hostname"),
CPUCores: takeFirst(orig.CPUCores, 1),
MemoryMBTotal: takeFirst(orig.MemoryMBTotal, 1024),
DaemonVersion: takeFirst(orig.DaemonVersion, "1.0.0"),
IPAddress: orig.IPAddress,
})
require.NoError(t, err, "insert machine")
return machine
}
func IntelInvocations(t testing.TB, db database.Store, orig database.IntelInvocation, count int) {
ids := make([]uuid.UUID, 0)
binaryNames := make([]string, 0, count)
binaryHashes := make([]string, 0, count)
binaryPaths := make([]string, 0, count)
binaryArgs := make([]json.RawMessage, 0, count)
binaryVersions := make([]string, 0, count)
workingDirs := make([]string, 0, count)
gitRemoteURLs := make([]string, 0, count)
exitCodes := make([]int32, 0, count)
durationsMS := make([]float64, 0, count)
for z := 0; z < count; z++ {
ids = append(ids, uuid.New())
binaryNames = append(binaryNames, orig.BinaryName)
binaryHashes = append(binaryHashes, orig.BinaryHash)
binaryPaths = append(binaryPaths, orig.BinaryPath)
binaryArgs = append(binaryArgs, orig.BinaryArgs)
binaryVersions = append(binaryVersions, orig.BinaryVersion)
workingDirs = append(workingDirs, orig.WorkingDirectory)
gitRemoteURLs = append(gitRemoteURLs, orig.GitRemoteUrl)
exitCodes = append(exitCodes, orig.ExitCode)
durationsMS = append(durationsMS, orig.DurationMs)
}
binaryArgsData, _ := json.Marshal(binaryArgs)
err := db.InsertIntelInvocations(context.Background(), database.InsertIntelInvocationsParams{
CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()),
MachineID: takeFirst(orig.MachineID, uuid.New()),
UserID: takeFirst(orig.UserID, uuid.New()),
ID: ids,
BinaryName: binaryNames,
BinaryHash: binaryHashes,
BinaryPath: binaryPaths,
BinaryArgs: binaryArgsData,
BinaryVersion: binaryVersions,
WorkingDirectory: workingDirs,
GitRemoteUrl: gitRemoteURLs,
ExitCode: exitCodes,
DurationMs: durationsMS,
})
require.NoError(t, err)
}
func File(t testing.TB, db database.Store, orig database.File) database.File {
file, err := db.InsertFile(genCtx, database.InsertFileParams{
ID: takeFirst(orig.ID, uuid.New()),

View File

@ -10,6 +10,7 @@ import (
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
@ -151,6 +152,7 @@ type data struct {
intelCohorts []database.IntelCohort
intelMachines []database.IntelMachine
intelInvocations []database.IntelInvocation
intelInvocationSummaries []database.IntelInvocationSummary
jfrogXRayScans []database.JfrogXrayScan
licenses []database.License
oauth2ProviderApps []database.OAuth2ProviderApp
@ -897,6 +899,17 @@ func (q *FakeQuerier) getLatestWorkspaceAppByTemplateIDUserIDSlugNoLock(ctx cont
return database.WorkspaceApp{}, sql.ErrNoRows
}
func medianFloat64s(a []float64) float64 {
sort.Float64s(a)
if len(a) == 1 {
return a[0]
} else if len(a)%2 == 0 {
return (a[len(a)/2-1] + a[len(a)/2]) / 2
} else {
return a[len(a)/2]
}
}
func (*FakeQuerier) AcquireLock(_ context.Context, _ int64) error {
return xerrors.New("AcquireLock must only be called within a transaction")
}
@ -1302,7 +1315,7 @@ func (q *FakeQuerier) DeleteGroupMemberFromGroup(_ context.Context, arg database
return nil
}
func (q *FakeQuerier) DeleteIntelCohortsByIDs(ctx context.Context, dollar_1 []uuid.UUID) error {
func (q *FakeQuerier) DeleteIntelCohortsByIDs(ctx context.Context, cohortIDs []uuid.UUID) error {
panic("not implemented")
}
@ -1966,10 +1979,6 @@ func (q *FakeQuerier) GetAuthorizationUserRoles(_ context.Context, userID uuid.U
}, nil
}
func (q *FakeQuerier) GetConsistencyByIntelCohort(ctx context.Context) ([]database.GetConsistencyByIntelCohortRow, error) {
panic("not implemented")
}
func (q *FakeQuerier) GetDBCryptKeys(_ context.Context) ([]database.DBCryptKey, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
@ -2442,10 +2451,6 @@ func (q *FakeQuerier) GetIntelCohortsMatchedByMachineIDs(_ context.Context, ids
return rows, nil
}
func (q *FakeQuerier) GetIntelInvocationSummaries(ctx context.Context) ([]database.IntelInvocationSummary, error) {
panic("not implemented")
}
func (q *FakeQuerier) GetIntelMachinesMatchingFilters(_ context.Context, arg database.GetIntelMachinesMatchingFiltersParams) ([]database.GetIntelMachinesMatchingFiltersRow, error) {
err := validateDatabaseType(arg)
if err != nil {
@ -2464,7 +2469,9 @@ func (q *FakeQuerier) GetIntelMachinesMatchingFilters(_ context.Context, arg dat
return nil, err
}
filterOSPlatform, err := regexp.CompilePOSIX(arg.RegexOperatingSystemPlatform)
if err != nil {
return nil, err
}
filterArch, err := regexp.CompilePOSIX(arg.RegexArchitecture)
if err != nil {
return nil, err
@ -2508,12 +2515,113 @@ func (q *FakeQuerier) GetIntelMachinesMatchingFilters(_ context.Context, arg dat
return machines, nil
}
func (q *FakeQuerier) GetIntelReportCommands(ctx context.Context, startsAt database.GetIntelReportCommandsParams) ([]database.GetIntelReportCommandsRow, error) {
panic("not implemented")
func (q *FakeQuerier) GetIntelReportCommands(ctx context.Context, arg database.GetIntelReportCommandsParams) ([]database.GetIntelReportCommandsRow, error) {
err := validateDatabaseType(arg)
if err != nil {
return nil, err
}
q.mutex.RLock()
defer q.mutex.RUnlock()
commandKey := func(summary database.IntelInvocationSummary) string {
return fmt.Sprintf("%s-%s-%s-%s-%s", summary.StartsAt, summary.EndsAt, summary.CohortID, summary.BinaryName, summary.BinaryArgs)
}
commands := map[string]database.GetIntelReportCommandsRow{}
commandMedianDurations := map[string][]float64{}
for _, summary := range q.intelInvocationSummaries {
if summary.StartsAt.Before(arg.StartsAt) {
continue
}
if len(arg.CohortIds) > 0 && !slices.Contains(arg.CohortIds, summary.CohortID) {
continue
}
key := commandKey(summary)
command, ok := commands[key]
if !ok {
command = database.GetIntelReportCommandsRow{
StartsAt: summary.StartsAt,
EndsAt: summary.EndsAt,
CohortID: summary.CohortID,
BinaryName: summary.BinaryName,
BinaryArgs: summary.BinaryArgs,
}
}
command.TotalInvocations += summary.TotalInvocations
command.AggregatedBinaryPaths = append(command.AggregatedBinaryPaths, summary.BinaryPaths)
command.AggregatedExitCodes = append(command.AggregatedExitCodes, summary.ExitCodes)
command.AggregatedGitRemoteUrls = append(command.AggregatedGitRemoteUrls, summary.GitRemoteUrls)
command.AggregatedWorkingDirectories = append(command.AggregatedWorkingDirectories, summary.WorkingDirectories)
commandMedianDurations[key] = append(commandMedianDurations[key], summary.MedianDurationMs)
commands[key] = command
}
rows := make([]database.GetIntelReportCommandsRow, 0, len(commands))
for key, command := range commands {
durations, ok := commandMedianDurations[key]
if !ok {
continue
}
command.MedianDurationMs = medianFloat64s(durations)
rows = append(rows, command)
}
return rows, nil
}
func (q *FakeQuerier) GetIntelReportGitRemotes(ctx context.Context, startsAt database.GetIntelReportGitRemotesParams) ([]database.GetIntelReportGitRemotesRow, error) {
panic("not implemented")
func (q *FakeQuerier) GetIntelReportGitRemotes(_ context.Context, opts database.GetIntelReportGitRemotesParams) ([]database.GetIntelReportGitRemotesRow, error) {
err := validateDatabaseType(opts)
if err != nil {
return nil, err
}
q.mutex.RLock()
defer q.mutex.RUnlock()
remoteKey := func(summary database.IntelInvocationSummary, remoteURL string) string {
return fmt.Sprintf("%s-%s-%s-%s", summary.StartsAt, summary.EndsAt, summary.CohortID, remoteURL)
}
remotes := map[string]database.GetIntelReportGitRemotesRow{}
remoteMedianDurations := map[string][]float64{}
for _, summary := range q.intelInvocationSummaries {
if summary.StartsAt.Before(opts.StartsAt) {
continue
}
if len(opts.CohortIds) > 0 && !slices.Contains(opts.CohortIds, summary.CohortID) {
continue
}
// Maps remote URLs to invocation counts
gitRemoteURLs := map[string]int{}
err = json.Unmarshal(summary.GitRemoteUrls, &gitRemoteURLs)
if err != nil {
return nil, err
}
for remoteURL, invocations := range gitRemoteURLs {
key := remoteKey(summary, remoteURL)
remote, ok := remotes[key]
if !ok {
remote = database.GetIntelReportGitRemotesRow{
StartsAt: summary.StartsAt,
EndsAt: summary.EndsAt,
CohortID: summary.CohortID,
GitRemoteUrl: remoteURL,
}
}
remote.TotalInvocations += int64(invocations)
remoteMedianDurations[key] = append(remoteMedianDurations[key], float64(summary.MedianDurationMs))
remotes[key] = remote
}
}
rows := make([]database.GetIntelReportGitRemotesRow, 0, len(remotes))
for key, remote := range remotes {
durations, ok := remoteMedianDurations[key]
if !ok {
continue
}
remote.MedianDurationMs = medianFloat64s(durations)
rows = append(rows, remote)
}
return rows, nil
}
func (q *FakeQuerier) GetJFrogXrayScanByWorkspaceAndAgentID(_ context.Context, arg database.GetJFrogXrayScanByWorkspaceAndAgentIDParams) (database.JfrogXrayScan, error) {
@ -8429,8 +8537,145 @@ func (q *FakeQuerier) UpsertIntelCohort(_ context.Context, arg database.UpsertIn
return cohort, nil
}
func (q *FakeQuerier) UpsertIntelInvocationSummaries(ctx context.Context) error {
panic("not implemented")
func (q *FakeQuerier) UpsertIntelInvocationSummaries(_ context.Context) error {
q.mutex.Lock()
defer q.mutex.Unlock()
type machineCohort struct {
MachineID uuid.UUID
CohortID uuid.UUID
}
machineCohorts := make([]machineCohort, 0)
for _, cohort := range q.intelCohorts {
filterOS, err := regexp.CompilePOSIX(cohort.RegexOperatingSystem)
if err != nil {
return err
}
filterOSPlatform, err := regexp.CompilePOSIX(cohort.RegexOperatingSystemPlatform)
if err != nil {
return err
}
filterOSVersion, err := regexp.CompilePOSIX(cohort.RegexOperatingSystemVersion)
if err != nil {
return err
}
filterArch, err := regexp.CompilePOSIX(cohort.RegexArchitecture)
if err != nil {
return err
}
filterInstanceID, err := regexp.CompilePOSIX(cohort.RegexInstanceID)
if err != nil {
return err
}
for _, machine := range q.intelMachines {
if !filterOS.MatchString(machine.OperatingSystem) {
continue
}
if !filterOSPlatform.MatchString(machine.OperatingSystemPlatform) {
continue
}
if !filterOSVersion.MatchString(machine.OperatingSystemVersion) {
continue
}
if !filterArch.MatchString(machine.Architecture) {
continue
}
if !filterInstanceID.MatchString(machine.InstanceID) {
continue
}
machineCohorts = append(machineCohorts, machineCohort{
CohortID: cohort.ID,
MachineID: machine.ID,
})
}
}
truncateDuration := 15 * time.Minute
invocationKey := func(invocation database.IntelInvocation, cohortID string) string {
truncatedCreatedAt := invocation.CreatedAt.Truncate(truncateDuration)
return fmt.Sprintf("%s-%s-%s-%s", truncatedCreatedAt.Format(time.RFC3339), cohortID, invocation.BinaryName, invocation.BinaryArgs)
}
type summaryWithTypes struct {
database.IntelInvocationSummary
InvocationIDs map[uuid.UUID]struct{}
MachineIDs map[uuid.UUID]struct{}
BinaryPaths map[string]int
WorkingDirectories map[string]int
GitRemoteUrls map[string]int
ExitCodes map[string]int
DurationMS []float64
}
invocationSummaries := make(map[string]summaryWithTypes)
for _, invocation := range q.intelInvocations {
for _, mc := range machineCohorts {
if mc.MachineID != invocation.MachineID {
continue
}
key := invocationKey(invocation, mc.CohortID.String())
summary, ok := invocationSummaries[key]
if !ok {
startsAt := invocation.CreatedAt.Truncate(truncateDuration)
summary = summaryWithTypes{
IntelInvocationSummary: database.IntelInvocationSummary{
ID: uuid.New(),
CohortID: mc.CohortID,
StartsAt: startsAt,
EndsAt: startsAt.Add(truncateDuration),
BinaryName: invocation.BinaryName,
BinaryArgs: invocation.BinaryArgs,
},
InvocationIDs: make(map[uuid.UUID]struct{}),
MachineIDs: make(map[uuid.UUID]struct{}),
BinaryPaths: make(map[string]int),
WorkingDirectories: make(map[string]int),
GitRemoteUrls: make(map[string]int),
ExitCodes: make(map[string]int),
}
}
summary.BinaryPaths[invocation.BinaryPath]++
summary.WorkingDirectories[invocation.WorkingDirectory]++
summary.GitRemoteUrls[invocation.GitRemoteUrl]++
summary.ExitCodes[strconv.Itoa(int(invocation.ExitCode))]++
summary.InvocationIDs[invocation.ID] = struct{}{}
summary.MachineIDs[invocation.MachineID] = struct{}{}
summary.DurationMS = append(summary.DurationMS, invocation.DurationMs)
invocationSummaries[key] = summary
}
}
var err error
for _, wrapperSummary := range invocationSummaries {
summary := wrapperSummary.IntelInvocationSummary
summary.UniqueMachines = int64(len(wrapperSummary.MachineIDs))
summary.TotalInvocations = int64(len(wrapperSummary.InvocationIDs))
summary.WorkingDirectories, err = json.Marshal(wrapperSummary.WorkingDirectories)
if err != nil {
return err
}
summary.BinaryPaths, err = json.Marshal(wrapperSummary.BinaryPaths)
if err != nil {
return err
}
summary.GitRemoteUrls, err = json.Marshal(wrapperSummary.GitRemoteUrls)
if err != nil {
return err
}
summary.ExitCodes, err = json.Marshal(wrapperSummary.ExitCodes)
if err != nil {
return err
}
summary.MedianDurationMs = medianFloat64s(wrapperSummary.DurationMS)
q.intelInvocationSummaries = append(q.intelInvocationSummaries, summary)
// Remove invocations that have been compressed
for id := range wrapperSummary.InvocationIDs {
q.intelInvocations = slices.DeleteFunc(q.intelInvocations, func(invocation database.IntelInvocation) bool {
return invocation.ID == id
})
}
}
return nil
}
func (q *FakeQuerier) UpsertIntelMachine(_ context.Context, arg database.UpsertIntelMachineParams) (database.IntelMachine, error) {
@ -9099,14 +9344,7 @@ func (q *FakeQuerier) UpsertTemplateUsageStats(ctx context.Context) error {
}
}
row.Latencies = append(row.Latencies, was.ConnectionMedianLatencyMS)
sort.Float64s(row.Latencies)
if len(row.Latencies) == 1 {
row.MedianLatencyMS = was.ConnectionMedianLatencyMS
} else if len(row.Latencies)%2 == 0 {
row.MedianLatencyMS = (row.Latencies[len(row.Latencies)/2-1] + row.Latencies[len(row.Latencies)/2]) / 2
} else {
row.MedianLatencyMS = row.Latencies[len(row.Latencies)/2]
}
row.MedianLatencyMS = medianFloat64s(row.Latencies)
latenciesRows[key] = row
}
}

View File

@ -450,13 +450,6 @@ func (m metricsStore) GetAuthorizationUserRoles(ctx context.Context, userID uuid
return row, err
}
func (m metricsStore) GetConsistencyByIntelCohort(ctx context.Context) ([]database.GetConsistencyByIntelCohortRow, error) {
start := time.Now()
r0, r1 := m.s.GetConsistencyByIntelCohort(ctx)
m.queryLatencies.WithLabelValues("GetConsistencyByIntelCohort").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m metricsStore) GetDBCryptKeys(ctx context.Context) ([]database.DBCryptKey, error) {
start := time.Now()
r0, r1 := m.s.GetDBCryptKeys(ctx)
@ -618,13 +611,6 @@ func (m metricsStore) GetIntelCohortsMatchedByMachineIDs(ctx context.Context, id
return r0, r1
}
func (m metricsStore) GetIntelInvocationSummaries(ctx context.Context) ([]database.IntelInvocationSummary, error) {
start := time.Now()
r0, r1 := m.s.GetIntelInvocationSummaries(ctx)
m.queryLatencies.WithLabelValues("GetIntelInvocationSummaries").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m metricsStore) GetIntelMachinesMatchingFilters(ctx context.Context, arg database.GetIntelMachinesMatchingFiltersParams) ([]database.GetIntelMachinesMatchingFiltersRow, error) {
start := time.Now()
r0, r1 := m.s.GetIntelMachinesMatchingFilters(ctx, arg)

View File

@ -854,21 +854,6 @@ func (mr *MockStoreMockRecorder) GetAuthorizedWorkspaces(arg0, arg1, arg2 any) *
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedWorkspaces", reflect.TypeOf((*MockStore)(nil).GetAuthorizedWorkspaces), arg0, arg1, arg2)
}
// GetConsistencyByIntelCohort mocks base method.
func (m *MockStore) GetConsistencyByIntelCohort(arg0 context.Context) ([]database.GetConsistencyByIntelCohortRow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetConsistencyByIntelCohort", arg0)
ret0, _ := ret[0].([]database.GetConsistencyByIntelCohortRow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetConsistencyByIntelCohort indicates an expected call of GetConsistencyByIntelCohort.
func (mr *MockStoreMockRecorder) GetConsistencyByIntelCohort(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConsistencyByIntelCohort", reflect.TypeOf((*MockStore)(nil).GetConsistencyByIntelCohort), arg0)
}
// GetDBCryptKeys mocks base method.
func (m *MockStore) GetDBCryptKeys(arg0 context.Context) ([]database.DBCryptKey, error) {
m.ctrl.T.Helper()
@ -1214,21 +1199,6 @@ func (mr *MockStoreMockRecorder) GetIntelCohortsMatchedByMachineIDs(arg0, arg1 a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIntelCohortsMatchedByMachineIDs", reflect.TypeOf((*MockStore)(nil).GetIntelCohortsMatchedByMachineIDs), arg0, arg1)
}
// GetIntelInvocationSummaries mocks base method.
func (m *MockStore) GetIntelInvocationSummaries(arg0 context.Context) ([]database.IntelInvocationSummary, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetIntelInvocationSummaries", arg0)
ret0, _ := ret[0].([]database.IntelInvocationSummary)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetIntelInvocationSummaries indicates an expected call of GetIntelInvocationSummaries.
func (mr *MockStoreMockRecorder) GetIntelInvocationSummaries(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIntelInvocationSummaries", reflect.TypeOf((*MockStore)(nil).GetIntelInvocationSummaries), arg0)
}
// GetIntelMachinesMatchingFilters mocks base method.
func (m *MockStore) GetIntelMachinesMatchingFilters(arg0 context.Context, arg1 database.GetIntelMachinesMatchingFiltersParams) ([]database.GetIntelMachinesMatchingFiltersRow, error) {
m.ctrl.T.Helper()

View File

@ -511,7 +511,7 @@ CREATE TABLE intel_invocation_summaries (
exit_codes jsonb NOT NULL,
unique_machines bigint NOT NULL,
total_invocations bigint NOT NULL,
median_duration_ms bigint NOT NULL
median_duration_ms double precision NOT NULL
);
CREATE UNLOGGED TABLE intel_invocations (
@ -527,7 +527,7 @@ CREATE UNLOGGED TABLE intel_invocations (
working_directory text NOT NULL,
git_remote_url text NOT NULL,
exit_code integer NOT NULL,
duration_ms integer NOT NULL
duration_ms double precision NOT NULL
);
CREATE TABLE intel_machines (

View File

@ -59,7 +59,7 @@ CREATE UNLOGGED TABLE intel_invocations (
working_directory TEXT NOT NULL,
git_remote_url TEXT NOT NULL,
exit_code INT NOT NULL,
duration_ms INT NOT NULL
duration_ms FLOAT NOT NULL
);
CREATE INDEX idx_intel_invocations_id ON intel_invocations (id);
@ -86,6 +86,6 @@ CREATE TABLE intel_invocation_summaries (
exit_codes jsonb NOT NULL,
unique_machines BIGINT NOT NULL,
total_invocations BIGINT NOT NULL,
median_duration_ms BIGINT NOT NULL
median_duration_ms FLOAT NOT NULL
);

View File

@ -1877,7 +1877,7 @@ type IntelInvocation struct {
WorkingDirectory string `db:"working_directory" json:"working_directory"`
GitRemoteUrl string `db:"git_remote_url" json:"git_remote_url"`
ExitCode int32 `db:"exit_code" json:"exit_code"`
DurationMs int32 `db:"duration_ms" json:"duration_ms"`
DurationMs float64 `db:"duration_ms" json:"duration_ms"`
}
type IntelInvocationSummary struct {
@ -1893,7 +1893,7 @@ type IntelInvocationSummary struct {
ExitCodes json.RawMessage `db:"exit_codes" json:"exit_codes"`
UniqueMachines int64 `db:"unique_machines" json:"unique_machines"`
TotalInvocations int64 `db:"total_invocations" json:"total_invocations"`
MedianDurationMs int64 `db:"median_duration_ms" json:"median_duration_ms"`
MedianDurationMs float64 `db:"median_duration_ms" json:"median_duration_ms"`
}
type IntelMachine struct {

View File

@ -58,7 +58,7 @@ type sqlcQuerier interface {
DeleteGitSSHKey(ctx context.Context, userID uuid.UUID) error
DeleteGroupByID(ctx context.Context, id uuid.UUID) error
DeleteGroupMemberFromGroup(ctx context.Context, arg DeleteGroupMemberFromGroupParams) error
DeleteIntelCohortsByIDs(ctx context.Context, dollar_1 []uuid.UUID) error
DeleteIntelCohortsByIDs(ctx context.Context, cohortIds []uuid.UUID) error
DeleteLicense(ctx context.Context, id int32) (int32, error)
DeleteOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) error
DeleteOAuth2ProviderAppCodeByID(ctx context.Context, id uuid.UUID) error
@ -104,7 +104,6 @@ type sqlcQuerier interface {
// This function returns roles for authorization purposes. Implied member roles
// are included.
GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUID) (GetAuthorizationUserRolesRow, error)
GetConsistencyByIntelCohort(ctx context.Context) ([]GetConsistencyByIntelCohortRow, error)
GetDBCryptKeys(ctx context.Context) ([]DBCryptKey, error)
GetDERPMeshKey(ctx context.Context) (string, error)
GetDefaultOrganization(ctx context.Context) (Organization, error)
@ -132,7 +131,6 @@ type sqlcQuerier interface {
GetIntelCohortsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]IntelCohort, error)
// Obtains a list of cohorts that a user can track invocations for.
GetIntelCohortsMatchedByMachineIDs(ctx context.Context, ids []uuid.UUID) ([]GetIntelCohortsMatchedByMachineIDsRow, error)
GetIntelInvocationSummaries(ctx context.Context) ([]IntelInvocationSummary, error)
GetIntelMachinesMatchingFilters(ctx context.Context, arg GetIntelMachinesMatchingFiltersParams) ([]GetIntelMachinesMatchingFiltersRow, error)
GetIntelReportCommands(ctx context.Context, arg GetIntelReportCommandsParams) ([]GetIntelReportCommandsRow, error)
// Get the total amount of time spent invoking commands

View File

@ -6,20 +6,16 @@ import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"net"
"sort"
"testing"
"time"
"github.com/google/uuid"
"github.com/lib/pq"
"github.com/sqlc-dev/pqtype"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/database/migrations"
"github.com/coder/coder/v2/testutil"
@ -683,192 +679,97 @@ func requireUsersMatch(t testing.TB, expected []database.User, found []database.
require.ElementsMatch(t, expected, database.ConvertUserRows(found), msg)
}
func TestIntel(t *testing.T) {
// TestIntelReports ensures the in-memory database and PostgreSQL database
// return the same values for various test scenarios. The query is quite
// complex, so it's good to manually verify the outputs.
func TestIntelReports(t *testing.T) {
t.Parallel()
if testing.Short() {
return
}
sqlDB := testSQLDB(t)
err := migrations.Up(sqlDB)
require.NoError(t, err)
db := database.New(sqlDB)
ctx := context.Background()
cohort, err := db.UpsertIntelCohort(ctx, database.UpsertIntelCohortParams{
ID: uuid.New(),
CreatedAt: dbtime.Now(),
UpdatedAt: dbtime.Now(),
Name: "cohort",
OrganizationID: uuid.New(),
CreatedBy: uuid.New(),
DisplayName: "cohort",
TrackedExecutables: []string{"go"},
RegexOperatingSystem: ".*",
RegexOperatingSystemVersion: ".*",
RegexOperatingSystemPlatform: ".*",
RegexArchitecture: ".*",
RegexInstanceID: ".*",
})
require.NoError(t, err)
machine1, err := db.UpsertIntelMachine(ctx, database.UpsertIntelMachineParams{
ID: uuid.New(),
CreatedAt: dbtime.Now(),
UpdatedAt: dbtime.Now(),
InstanceID: "some-id",
OrganizationID: cohort.OrganizationID,
UserID: cohort.CreatedBy,
IPAddress: pqtype.Inet{
IPNet: net.IPNet{
IP: net.IPv4(127, 0, 0, 1),
Mask: net.IPv4Mask(255, 255, 255, 255),
},
Valid: true,
},
Hostname: "host",
OperatingSystem: "linux",
CPUCores: 4,
MemoryMBTotal: 16 * 1024,
Architecture: "amd64",
DaemonVersion: "1.0.0",
})
require.NoError(t, err)
machine2, err := db.UpsertIntelMachine(ctx, database.UpsertIntelMachineParams{
ID: uuid.New(),
CreatedAt: dbtime.Now(),
UpdatedAt: dbtime.Now(),
InstanceID: "some-id-2",
OrganizationID: cohort.OrganizationID,
UserID: cohort.CreatedBy,
IPAddress: pqtype.Inet{
IPNet: net.IPNet{
IP: net.IPv4(127, 0, 0, 1),
Mask: net.IPv4Mask(255, 255, 255, 255),
},
Valid: true,
},
Hostname: "host",
OperatingSystem: "linux",
CPUCores: 4,
MemoryMBTotal: 16 * 1024,
Architecture: "amd64",
DaemonVersion: "1.0.0",
})
require.NoError(t, err)
rows, err := db.GetIntelMachinesMatchingFilters(ctx, database.GetIntelMachinesMatchingFiltersParams{
OrganizationID: machine1.OrganizationID,
RegexOperatingSystem: ".*",
RegexOperatingSystemVersion: ".*",
RegexOperatingSystemPlatform: ".*",
RegexArchitecture: ".*",
RegexInstanceID: ".*",
LimitOpt: 999,
OffsetOpt: 0,
})
require.NoError(t, err)
require.Len(t, rows, 2)
chunkSize := 100
numberOfChunks := 100
for n := 0; n < numberOfChunks; n++ {
i := chunkSize
ids := make([]uuid.UUID, 0)
binaryNames := make([]string, 0, i)
binaryHashes := make([]string, 0, i)
binaryPaths := make([]string, 0, i)
binaryArgs := make([]json.RawMessage, 0, i)
binaryVersions := make([]string, 0, i)
workingDirs := make([]string, 0, i)
gitRemoteURLs := make([]string, 0, i)
exitCodes := make([]int32, 0, i)
durationsMS := make([]int32, 0, i)
for z := 0; z < i; z++ {
ids = append(ids, uuid.New())
binaryNames = append(binaryNames, "go")
binaryHashes = append(binaryHashes, "my-hash")
binaryPaths = append(binaryPaths, "/usr/bin/go")
args := []string{"test"}
workingDir := "/home/coder"
durationMS := int32(15)
if z%2 == 0 {
args = []string{"build"}
if z%3 == 0 {
workingDir = "/home/moo"
}
durationMS = 5
}
argsData, _ := json.Marshal(args)
binaryArgs = append(binaryArgs, argsData)
binaryVersions = append(binaryVersions, "version")
workingDirs = append(workingDirs, workingDir)
gitRemoteURLs = append(gitRemoteURLs, "remote")
exitCodes = append(exitCodes, 0)
durationsMS = append(durationsMS, durationMS)
}
machineID := machine1.ID
if n%2 == 0 {
machineID = machine2.ID
}
binaryArgsData, _ := json.Marshal(binaryArgs)
err = db.InsertIntelInvocations(ctx, database.InsertIntelInvocationsParams{
CreatedAt: dbtime.Now(),
MachineID: machineID,
UserID: machine1.UserID,
ID: ids,
BinaryName: binaryNames,
BinaryHash: binaryHashes,
BinaryPath: binaryPaths,
BinaryArgs: binaryArgsData,
BinaryVersion: binaryVersions,
WorkingDirectory: workingDirs,
GitRemoteUrl: gitRemoteURLs,
ExitCode: exitCodes,
DurationMs: durationsMS,
t.Run("ReportGitRemotes", func(t *testing.T) {
t.Parallel()
t.Run("MedianDurationMS", func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
// A cohort that matches all machines is necessary.
cohort := dbgen.IntelCohort(t, db, database.IntelCohort{})
machine := dbgen.IntelMachine(t, db, database.IntelMachine{})
dbgen.IntelInvocations(t, db, database.IntelInvocation{
MachineID: machine.ID,
GitRemoteUrl: "https://github.com/coder/coder",
DurationMs: 5,
}, 50)
dbgen.IntelInvocations(t, db, database.IntelInvocation{
MachineID: machine.ID,
GitRemoteUrl: "https://github.com/coder/coder",
DurationMs: 10,
}, 50)
err := db.UpsertIntelInvocationSummaries(context.Background())
require.NoError(t, err)
rows, err := db.GetIntelReportGitRemotes(context.Background(), database.GetIntelReportGitRemotesParams{
CohortIds: []uuid.UUID{cohort.ID},
})
require.NoError(t, err)
require.Len(t, rows, 1)
row := rows[0]
require.Equal(t, 7.5, row.MedianDurationMs)
})
require.NoError(t, err)
t.Logf("inserted %d (%d/%d) invocations", chunkSize, n, numberOfChunks)
}
t.Run("MultipleCohorts", func(t *testing.T) {
// Ensures that multiple cohorts with a matching remote URL
// are properly returned!
t.Parallel()
db, _ := dbtestutil.NewDB(t)
// Should catch everything.
allCohort := dbgen.IntelCohort(t, db, database.IntelCohort{})
// Create two cohorts to track. One for Linux machines
// and one for Windows machines.
linuxCohort := dbgen.IntelCohort(t, db, database.IntelCohort{
RegexOperatingSystem: "linux",
})
windowsCohort := dbgen.IntelCohort(t, db, database.IntelCohort{
RegexOperatingSystem: "windows",
})
start := time.Now()
err = db.UpsertIntelInvocationSummaries(ctx)
if err != nil {
var pqErr *pq.Error
if errors.As(err, &pqErr) {
t.Fatalf("failed: %+v", pqErr.Message)
}
}
require.NoError(t, err)
end := time.Now()
fmt.Printf("UpsertIntelInvocationSummaries took %s\n", end.Sub(start))
// Create machines to match the cohorts!
windows := dbgen.IntelMachine(t, db, database.IntelMachine{
OperatingSystem: "windows",
})
linux := dbgen.IntelMachine(t, db, database.IntelMachine{
OperatingSystem: "linux",
})
summaries, err := db.GetIntelInvocationSummaries(ctx)
require.NoError(t, err)
// Insert invocations for each machine.
dbgen.IntelInvocations(t, db, database.IntelInvocation{
MachineID: windows.ID,
GitRemoteUrl: "https://github.com/coder/coder",
}, 50)
dbgen.IntelInvocations(t, db, database.IntelInvocation{
MachineID: linux.ID,
GitRemoteUrl: "https://github.com/coder/coder",
}, 50)
fmt.Printf("Summaries %d\n", len(summaries))
err := db.UpsertIntelInvocationSummaries(context.Background())
require.NoError(t, err)
rows, err := db.GetIntelReportGitRemotes(context.Background(), database.GetIntelReportGitRemotesParams{
CohortIds: []uuid.UUID{allCohort.ID, linuxCohort.ID, windowsCohort.ID},
})
require.NoError(t, err)
require.Len(t, rows, 3)
for _, summary := range summaries {
fmt.Printf("%+v\n", summary)
workingDirs := map[string]int{}
err = json.Unmarshal(summary.BinaryPaths, &workingDirs)
require.NoError(t, err)
totalWorkingDirs := int64(0)
for _, count := range workingDirs {
totalWorkingDirs += int64(count)
}
// require.Equal(t, summary.TotalInvocations, totalWorkingDirs)
}
// got, err := db.GetIntelInvocationSummariesByBinaryAndCohort(ctx)
// require.NoError(t, err)
// for _, row := range got {
// fmt.Printf("%+v\n", row)
// }
for _, row := range rows {
switch row.CohortID {
case allCohort.ID:
require.Equal(t, int64(100), row.TotalInvocations)
case linuxCohort.ID:
require.Equal(t, int64(50), row.TotalInvocations)
case windowsCohort.ID:
require.Equal(t, int64(50), row.TotalInvocations)
default:
t.Fatalf("unexpected cohort ID: %s", row.CohortID)
}
}
})
})
t.Run("ReportCommands", func(t *testing.T) {
t.Parallel()
//
})
}

View File

@ -2920,53 +2920,11 @@ const deleteIntelCohortsByIDs = `-- name: DeleteIntelCohortsByIDs :exec
DELETE FROM intel_cohorts WHERE id = ANY($1::uuid[])
`
func (q *sqlQuerier) DeleteIntelCohortsByIDs(ctx context.Context, dollar_1 []uuid.UUID) error {
_, err := q.db.ExecContext(ctx, deleteIntelCohortsByIDs, pq.Array(dollar_1))
func (q *sqlQuerier) DeleteIntelCohortsByIDs(ctx context.Context, cohortIds []uuid.UUID) error {
_, err := q.db.ExecContext(ctx, deleteIntelCohortsByIDs, pq.Array(cohortIds))
return err
}
const getConsistencyByIntelCohort = `-- name: GetConsistencyByIntelCohort :many
SELECT
binary_path,
binary_args,
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY duration_ms) AS median_duration
FROM
intel_invocations
GROUP BY
binary_path, binary_args
ORDER BY
median_duration DESC
`
type GetConsistencyByIntelCohortRow struct {
BinaryPath string `db:"binary_path" json:"binary_path"`
BinaryArgs json.RawMessage `db:"binary_args" json:"binary_args"`
MedianDuration float64 `db:"median_duration" json:"median_duration"`
}
func (q *sqlQuerier) GetConsistencyByIntelCohort(ctx context.Context) ([]GetConsistencyByIntelCohortRow, error) {
rows, err := q.db.QueryContext(ctx, getConsistencyByIntelCohort)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetConsistencyByIntelCohortRow
for rows.Next() {
var i GetConsistencyByIntelCohortRow
if err := rows.Scan(&i.BinaryPath, &i.BinaryArgs, &i.MedianDuration); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getIntelCohortsByOrganizationID = `-- name: GetIntelCohortsByOrganizationID :many
SELECT id, organization_id, created_by, created_at, updated_at, name, display_name, icon, description, regex_operating_system, regex_operating_system_platform, regex_operating_system_version, regex_architecture, regex_instance_id, tracked_executables FROM intel_cohorts WHERE organization_id = $1
`
@ -3056,47 +3014,6 @@ func (q *sqlQuerier) GetIntelCohortsMatchedByMachineIDs(ctx context.Context, ids
return items, nil
}
const getIntelInvocationSummaries = `-- name: GetIntelInvocationSummaries :many
SELECT id, cohort_id, starts_at, ends_at, binary_name, binary_args, binary_paths, working_directories, git_remote_urls, exit_codes, unique_machines, total_invocations, median_duration_ms FROM intel_invocation_summaries
`
func (q *sqlQuerier) GetIntelInvocationSummaries(ctx context.Context) ([]IntelInvocationSummary, error) {
rows, err := q.db.QueryContext(ctx, getIntelInvocationSummaries)
if err != nil {
return nil, err
}
defer rows.Close()
var items []IntelInvocationSummary
for rows.Next() {
var i IntelInvocationSummary
if err := rows.Scan(
&i.ID,
&i.CohortID,
&i.StartsAt,
&i.EndsAt,
&i.BinaryName,
&i.BinaryArgs,
&i.BinaryPaths,
&i.WorkingDirectories,
&i.GitRemoteUrls,
&i.ExitCodes,
&i.UniqueMachines,
&i.TotalInvocations,
&i.MedianDurationMs,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getIntelMachinesMatchingFilters = `-- name: GetIntelMachinesMatchingFilters :many
WITH filtered_machines AS (
SELECT
@ -3199,7 +3116,7 @@ FROM
WHERE
starts_at >= $1
AND
(CARDINALITY($2 :: uuid []) = 0 OR cohort_id = ANY($2 :: uuid []))
(cohort_id = ANY($2 :: uuid []))
GROUP BY
starts_at, ends_at, cohort_id, binary_name, binary_args
`
@ -3272,7 +3189,7 @@ FROM
WHERE
starts_at >= $1
AND
(CARDINALITY($2 :: uuid []) = 0 OR cohort_id = ANY($2 :: uuid []))
(cohort_id = ANY($2 :: uuid []))
GROUP BY
starts_at,
ends_at,
@ -3345,7 +3262,7 @@ SELECT
unnest($10 :: text[ ]) as working_directory,
unnest($11 :: text[ ]) as git_remote_url,
unnest($12 :: int [ ]) as exit_code,
unnest($13 :: int[ ]) as duration_ms
unnest($13 :: float[ ]) as duration_ms
`
type InsertIntelInvocationsParams struct {
@ -3361,7 +3278,7 @@ type InsertIntelInvocationsParams struct {
WorkingDirectory []string `db:"working_directory" json:"working_directory"`
GitRemoteUrl []string `db:"git_remote_url" json:"git_remote_url"`
ExitCode []int32 `db:"exit_code" json:"exit_code"`
DurationMs []int32 `db:"duration_ms" json:"duration_ms"`
DurationMs []float64 `db:"duration_ms" json:"duration_ms"`
}
// Insert many invocations using unnest
@ -3465,7 +3382,7 @@ WITH machine_cohorts AS (
m.id AS machine_id,
c.id AS cohort_id
FROM intel_machines m
JOIN intel_cohorts c ON
LEFT JOIN intel_cohorts c ON
m.operating_system ~ c.regex_operating_system AND
m.operating_system_platform ~ c.regex_operating_system_platform AND
m.operating_system_version ~ c.regex_operating_system_version AND

View File

@ -19,7 +19,7 @@ INSERT INTO intel_cohorts (id, organization_id, created_by, created_at, updated_
SELECT * FROM intel_cohorts WHERE organization_id = $1;
-- name: DeleteIntelCohortsByIDs :exec
DELETE FROM intel_cohorts WHERE id = ANY($1::uuid[]);
DELETE FROM intel_cohorts WHERE id = ANY(@cohort_ids::uuid[]);
-- name: UpsertIntelMachine :one
INSERT INTO intel_machines (id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_platform, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version)
@ -57,7 +57,7 @@ SELECT
unnest(@working_directory :: text[ ]) as working_directory,
unnest(@git_remote_url :: text[ ]) as git_remote_url,
unnest(@exit_code :: int [ ]) as exit_code,
unnest(@duration_ms :: int[ ]) as duration_ms;
unnest(@duration_ms :: float[ ]) as duration_ms;
-- name: GetIntelCohortsMatchedByMachineIDs :many
-- Obtains a list of cohorts that a user can track invocations for.
@ -92,25 +92,13 @@ WITH filtered_machines AS (
)
SELECT tm.count, sqlc.embed(intel_machines) FROM paginated_machines AS intel_machines CROSS JOIN total_machines as tm;
-- name: GetConsistencyByIntelCohort :many
SELECT
binary_path,
binary_args,
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY duration_ms) AS median_duration
FROM
intel_invocations
GROUP BY
binary_path, binary_args
ORDER BY
median_duration DESC;
-- name: UpsertIntelInvocationSummaries :exec
WITH machine_cohorts AS (
SELECT
m.id AS machine_id,
c.id AS cohort_id
FROM intel_machines m
JOIN intel_cohorts c ON
LEFT JOIN intel_cohorts c ON
m.operating_system ~ c.regex_operating_system AND
m.operating_system_platform ~ c.regex_operating_system_platform AND
m.operating_system_version ~ c.regex_operating_system_version AND
@ -216,9 +204,6 @@ saved AS (
DELETE FROM intel_invocations
WHERE id IN (SELECT id FROM invocations_with_cohorts);
-- name: GetIntelInvocationSummaries :many
SELECT * FROM intel_invocation_summaries;
-- name: GetIntelReportGitRemotes :many
-- Get the total amount of time spent invoking commands
-- in the directories of a given git remote URL.
@ -235,7 +220,7 @@ FROM
WHERE
starts_at >= @starts_at
AND
(CARDINALITY(@cohort_ids :: uuid []) = 0 OR cohort_id = ANY(@cohort_ids :: uuid []))
(cohort_id = ANY(@cohort_ids :: uuid []))
GROUP BY
starts_at,
ends_at,
@ -261,6 +246,6 @@ FROM
WHERE
starts_at >= @starts_at
AND
(CARDINALITY(@cohort_ids :: uuid []) = 0 OR cohort_id = ANY(@cohort_ids :: uuid []))
(cohort_id = ANY(@cohort_ids :: uuid []))
GROUP BY
starts_at, ends_at, cohort_id, binary_name, binary_args;

View File

@ -100,7 +100,7 @@ func (s *server) invocationQueueLoop() {
workingDirs := make([]string, 0, len(i))
gitRemoteURLs := make([]string, 0, len(i))
exitCodes := make([]int32, 0, len(i))
durationsMS := make([]int32, 0, len(i))
durationsMS := make([]float64, 0, len(i))
for _, invocation := range i {
ids = append(ids, uuid.New())
@ -114,7 +114,7 @@ func (s *server) invocationQueueLoop() {
workingDirs = append(workingDirs, invocation.WorkingDirectory)
gitRemoteURLs = append(gitRemoteURLs, invocation.GitRemoteUrl)
exitCodes = append(exitCodes, invocation.ExitCode)
durationsMS = append(durationsMS, int32(invocation.DurationMs))
durationsMS = append(durationsMS, float64(invocation.DurationMs))
}
binaryArgsData, _ := json.Marshal(binaryArgs)

View File

@ -75,6 +75,6 @@ The directory where binaries are aliased to and overridden in the $PATH so they
| ----------- | --------------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_INTEL_DAEMON_INSTANCE_ID</code> |
| Default | <code>521a9a1a77a34604bbdece92a9ac815d</code> |
| Default | <code>a1acbdbfe2274834a19f3682db50dc2c</code> |
The instance ID of the machine running the intel daemon. This is used to identify the machine.