Finally 😢 fix intel insertion

This commit is contained in:
Kyle Carberry 2024-04-24 01:23:34 +00:00
parent 9b7e1352e3
commit 8fc28a3e2f
26 changed files with 870 additions and 377 deletions

View File

@ -78,6 +78,7 @@ DOCKER_ARCHES := amd64 arm64 armv7
# Computed variables based on the above.
CODER_SLIM_BINARIES := $(addprefix build/coder-slim_$(VERSION)_,$(OS_ARCHES))
CODER_FAT_BINARIES := $(addprefix build/coder_$(VERSION)_,$(OS_ARCHES))
CODER_INTEL_BINARIES := $(addprefix build/coder-intel-invoke_$(VERSION)_,$(OS_ARCHES))
CODER_ALL_BINARIES := $(CODER_SLIM_BINARIES) $(CODER_FAT_BINARIES)
CODER_TAR_GZ_ARCHIVES := $(foreach os_arch, $(ARCHIVE_TAR_GZ), build/coder_$(VERSION)_$(os_arch).tar.gz)
CODER_ZIP_ARCHIVES := $(foreach os_arch, $(ARCHIVE_ZIP), build/coder_$(VERSION)_$(os_arch).zip)
@ -89,6 +90,7 @@ CODER_MAIN_IMAGE := build/coder_$(VERSION)_linux.tag
CODER_SLIM_NOVERSION_BINARIES := $(addprefix build/coder-slim_,$(OS_ARCHES))
CODER_FAT_NOVERSION_BINARIES := $(addprefix build/coder_,$(OS_ARCHES))
CODER_INTEL_NOVERSION_BINARIES := $(addprefix build/coder-intel-invoke_,$(OS_ARCHES))
CODER_ALL_NOVERSION_IMAGES := $(foreach arch, $(DOCKER_ARCHES), build/coder_linux_$(arch).tag) build/coder_linux.tag
CODER_ALL_NOVERSION_IMAGES_PUSHED := $(addprefix push/, $(CODER_ALL_NOVERSION_IMAGES))
@ -121,15 +123,15 @@ build-fat build-full build: $(CODER_FAT_BINARIES)
release: $(CODER_FAT_BINARIES) $(CODER_ALL_ARCHIVES) $(CODER_ALL_PACKAGES) $(CODER_ARCH_IMAGES) build/coder_helm_$(VERSION).tgz
.PHONY: release
build/coder-slim_$(VERSION)_checksums.sha1: site/out/bin/coder.sha1
build/coder-embed_$(VERSION)_checksums.sha1: site/out/bin/coder.sha1
cp "$<" "$@"
site/out/bin/coder.sha1: $(CODER_SLIM_BINARIES)
site/out/bin/coder.sha1: $(CODER_SLIM_BINARIES) $(CODER_INTEL_BINARIES)
pushd ./site/out/bin
openssl dgst -r -sha1 coder-* | tee coder.sha1
popd
build/coder-slim_$(VERSION).tar: build/coder-slim_$(VERSION)_checksums.sha1 $(CODER_SLIM_BINARIES)
build/coder-embed_$(VERSION).tar: build/coder-embed_$(VERSION)_checksums.sha1 $(CODER_SLIM_BINARIES) $(CODER_INTEL_BINARIES)
pushd ./site/out/bin
tar cf "../../../build/$(@F)" coder-*
popd
@ -137,16 +139,16 @@ build/coder-slim_$(VERSION).tar: build/coder-slim_$(VERSION)_checksums.sha1 $(CO
# delete the uncompressed binaries from the embedded dir
rm -f site/out/bin/coder-*
site/out/bin/coder.tar.zst: build/coder-slim_$(VERSION).tar.zst
site/out/bin/coder.tar.zst: build/coder-embed_$(VERSION).tar.zst
cp "$<" "$@"
build/coder-slim_$(VERSION).tar.zst: build/coder-slim_$(VERSION).tar
build/coder-embed_$(VERSION).tar.zst: build/coder-embed_$(VERSION).tar
zstd $(ZSTDFLAGS) \
--force \
--long \
--no-progress \
-o "build/coder-slim_$(VERSION).tar.zst" \
"build/coder-slim_$(VERSION).tar"
-o "build/coder-embed_$(VERSION).tar.zst" \
"build/coder-embed_$(VERSION).tar"
# Redirect from version-less targets to the versioned ones. There is a similar
# target for slim binaries below.
@ -167,6 +169,15 @@ $(CODER_SLIM_NOVERSION_BINARIES): build/coder-slim_%: build/coder-slim_$(VERSION
rm -f "$@"
ln "$<" "$@"
# Same as above, but for intel binaries.
#
# Called like this:
# make build/coder-intel-invoke_linux_amd64
# make build/coder-intel-invoke_windows_amd64.exe
$(CODER_INTEL_NOVERSION_BINARIES): build/coder-intel-invoke_%: build/coder-intel-invoke_$(VERSION)_%
rm -f "$@"
ln "$<" "$@"
# "fat" binaries always depend on the site and the compressed slim binaries.
$(CODER_FAT_BINARIES): \
site/out/index.html \
@ -191,6 +202,29 @@ define get-mode-os-arch-ext =
fi
endef
$(CODER_INTEL_BINARIES): go.mod go.sum cmd/coder-intel-invoke/main.go
$(get-mode-os-arch-ext)
if [[ "$$os" == "windows" ]] && [[ "$$ext" != exe ]]; then
echo "ERROR: Windows binaries must have an .exe extension." 1>&2
exit 1
fi
# Determine GOARM.
goarm=""
if [[ "$$arch" == "arm" ]]; then
goarm="7"
elif [[ "$$arch" == "armv"* ]] || [[ "$$arch" == "arm64v"* ]]; then
goarm="$${arch//*v/}"
arch="$${arch//v*/}"
fi
GOOS=$$os GOARCH=$$arch GOARM=$$goarm go build -o "$@" -ldflags "-s -w" ./cmd/coder-intel-invoke
dot_ext=""
if [[ "$$ext" != "" ]]; then
dot_ext=".$$ext"
fi
cp "$@" "./site/out/bin/coder-intel-invoke-$$os-$$arch$$dot_ext"
# This task handles all builds, for both "fat" and "slim" binaries. It parses
# the target name to get the metadata for the build, so it must be specified in
# this format:

View File

@ -3,8 +3,10 @@ package cli
import (
"context"
"fmt"
"net/http"
"os"
"path/filepath"
"runtime"
"github.com/spf13/afero"
"golang.org/x/xerrors"
@ -77,17 +79,40 @@ func (r *RootCmd) intelDaemonStart() *serpent.Command {
defer closeLogger()
}
logger.Info(ctx, "starting intel daemon")
logger.Info(ctx, "starting intel daemon", slog.F("invoke_directory", invokeDirectory))
srv := inteld.New(inteld.Options{
Dialer: func(ctx context.Context) (proto.DRPCIntelDaemonClient, error) {
return client.ServeIntelDaemon(ctx, codersdk.ServeIntelDaemonRequest{})
Dialer: func(ctx context.Context, hostInfo codersdk.IntelDaemonHostInfo) (proto.DRPCIntelDaemonClient, error) {
return client.ServeIntelDaemon(ctx, codersdk.ServeIntelDaemonRequest{
IntelDaemonHostInfo: hostInfo,
})
},
Logger: logger,
Filesystem: afero.NewOsFs(),
InvokeBinaryDownloader: func(ctx context.Context, etag string) (*http.Response, error) {
binPath := fmt.Sprintf("/bin/coder-intel-invoke-%s-%s", runtime.GOOS, runtime.GOARCH)
if runtime.GOOS == "windows" {
binPath += ".exe"
}
binURL, err := client.URL.Parse(binPath)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, binURL.String(), nil)
if err != nil {
return nil, err
}
return client.HTTPClient.Do(req)
},
Logger: logger,
Filesystem: afero.NewOsFs(),
InvokeBinary: "/tmp/bypass",
InvokeDirectory: invokeDirectory,
})
defer srv.Close()
closeListen, err := proto.ListenForInvocations(srv.ReportInvocation)
if err != nil {
return xerrors.Errorf("listen for invocations: %w", err)
}
defer closeListen.Close()
waitForReporting := false
var exitErr error
@ -110,6 +135,11 @@ func (r *RootCmd) intelDaemonStart() *serpent.Command {
// TODO: Make this work!
_ = waitForReporting
err = closeListen.Close()
if err != nil {
return xerrors.Errorf("close listen: %w", err)
}
err = srv.Close()
if err != nil {
return xerrors.Errorf("shutdown: %w", err)

View File

@ -2,7 +2,8 @@ package main
import (
"context"
"net"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
@ -10,8 +11,7 @@ import (
"strings"
"time"
"storj.io/drpc/drpcconn"
"tailscale.com/safesocket"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/inteld/proto"
)
@ -19,28 +19,17 @@ import (
func main() {
runtime.LockOSThread()
runtime.GOMAXPROCS(1)
_ = run(context.Background())
err := run(context.Background())
if err != nil && os.Getenv("CODER_INTEL_INVOKE_DEBUG") != "" {
_, _ = fmt.Printf("error: %v\n", err)
os.Exit(1)
}
}
func run(ctx context.Context) error {
// go = os.Args[0]
//
// start := time.Now()
// addr := os.Getenv("CODER_INTEL_DAEMON_ADDRESS")
// if addr == "" {
// addr = "localhost:13337"
// }
safesocket.Connect(safesocket.DefaultConnectionStrategy("asdasd"))
c, _ := net.Dial("tcp", "localhost:3000")
// ctx, cancelFunc := context.WithCancel(context.Background())
pathParts := filepath.SplitList(os.Getenv("PATH"))
currentPath, err := exec.LookPath(os.Args[0])
baseName := filepath.Base(os.Args[0])
currentPath, err := exec.LookPath(baseName)
if err != nil {
return err
}
@ -51,37 +40,50 @@ func run(ctx context.Context) error {
break
}
}
os.Setenv("PATH", strings.Join(pathParts, string(filepath.ListSeparator)))
currentPath, err = exec.LookPath(os.Args[0])
err = os.Setenv("PATH", strings.Join(pathParts, string(filepath.ListSeparator)))
if err != nil {
return err
}
currentPath, err = exec.LookPath(baseName)
if err != nil {
return err
}
currentPath, err = filepath.Abs(currentPath)
if err != nil {
return err
}
currentExec, err := os.Executable()
if err != nil {
return err
}
if currentPath == currentExec {
return xerrors.New("supposed to be linked")
}
wd, err := os.Getwd()
if err != nil {
return err
}
start := time.Now()
//nolint:gosec
cmd := exec.CommandContext(ctx, os.Args[0], os.Args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
cmd.Stderr = os.Stderr
start := time.Now()
err = cmd.Run()
end := time.Now()
exitCode := 0
if err != nil {
if e, ok := err.(*exec.ExitError); ok {
exitCode = e.ExitCode()
var exitError *exec.ExitError
if errors.As(err, &exitError) {
exitCode = exitError.ExitCode()
}
}
client := proto.NewDRPCIntelClientClient(drpcconn.New(c))
_, err = client.ReportInvocation(ctx, &proto.ReportInvocationRequest{
return proto.ReportInvocation(&proto.ReportInvocationRequest{
ExecutablePath: currentPath,
Arguments: os.Args[1:],
DurationMs: time.Since(start).Milliseconds(),
DurationMs: end.Sub(start).Milliseconds(),
ExitCode: int32(exitCode),
WorkingDirectory: wd,
})
return nil
}

View File

@ -148,6 +148,9 @@ type data struct {
gitSSHKey []database.GitSSHKey
groupMembers []database.GroupMember
groups []database.Group
intelCohorts []database.IntelCohort
intelMachines []database.IntelMachine
intelInvocations []database.IntelInvocation
jfrogXRayScans []database.JfrogXrayScan
licenses []database.License
oauth2ProviderApps []database.OAuth2ProviderApp
@ -2368,8 +2371,51 @@ func (q *FakeQuerier) GetHungProvisionerJobs(_ context.Context, hungSince time.T
return hungJobs, nil
}
func (q *FakeQuerier) GetIntelCohortsMatchedByMachineIDs(ctx context.Context, ids []uuid.UUID) ([]database.GetIntelCohortsMatchedByMachineIDsRow, error) {
panic("not implemented")
func (q *FakeQuerier) GetIntelCohortsMatchedByMachineIDs(_ context.Context, ids []uuid.UUID) ([]database.GetIntelCohortsMatchedByMachineIDsRow, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
machines := make([]database.IntelMachine, 0)
for _, m := range q.intelMachines {
if slices.Contains(ids, m.ID) {
machines = append(machines, m)
}
}
rows := make([]database.GetIntelCohortsMatchedByMachineIDsRow, 0)
for _, cohort := range q.intelCohorts {
filterOS, err := regexp.CompilePOSIX(cohort.FilterRegexOperatingSystem)
if err != nil {
return nil, err
}
filterOSVersion, err := regexp.CompilePOSIX(cohort.FilterRegexOperatingSystemVersion)
if err != nil {
return nil, err
}
filterArch, err := regexp.CompilePOSIX(cohort.FilterRegexArchitecture)
if err != nil {
return nil, err
}
filterInstanceID, err := regexp.CompilePOSIX(cohort.FilterRegexInstanceID)
if err != nil {
return nil, err
}
for _, machine := range machines {
row := database.GetIntelCohortsMatchedByMachineIDsRow{
ID: cohort.ID,
TrackedExecutables: cohort.TrackedExecutables,
MachineID: machine.ID,
OperatingSystemMatch: filterOS.MatchString(machine.OperatingSystem),
ArchitectureMatch: filterArch.MatchString(machine.Architecture),
InstanceIDMatch: filterInstanceID.MatchString(machine.InstanceID),
}
if machine.OperatingSystemVersion.Valid {
row.OperatingSystemVersionMatch = filterOSVersion.MatchString(machine.OperatingSystemVersion.String)
}
rows = append(rows, row)
}
}
return rows, nil
}
func (q *FakeQuerier) GetJFrogXrayScanByWorkspaceAndAgentID(_ context.Context, arg database.GetJFrogXrayScanByWorkspaceAndAgentIDParams) (database.JfrogXrayScan, error) {
@ -5859,22 +5905,59 @@ func (q *FakeQuerier) InsertGroupMember(_ context.Context, arg database.InsertGr
return nil
}
func (q *FakeQuerier) InsertIntelCohort(ctx context.Context, arg database.InsertIntelCohortParams) (database.IntelCohort, error) {
func (q *FakeQuerier) InsertIntelCohort(_ context.Context, arg database.InsertIntelCohortParams) (database.IntelCohort, error) {
err := validateDatabaseType(arg)
if err != nil {
return err
return database.IntelCohort{}, err
}
panic("not implemented")
q.mutex.Lock()
defer q.mutex.Unlock()
cohort := database.IntelCohort{
ID: arg.ID,
OrganizationID: arg.OrganizationID,
CreatedBy: arg.CreatedBy,
CreatedAt: arg.CreatedAt,
UpdatedAt: arg.UpdatedAt,
DisplayName: arg.DisplayName,
Description: arg.Description,
FilterRegexOperatingSystem: arg.FilterRegexOperatingSystem,
FilterRegexOperatingSystemVersion: arg.FilterRegexOperatingSystemVersion,
FilterRegexArchitecture: arg.FilterRegexArchitecture,
FilterRegexInstanceID: arg.FilterRegexInstanceID,
TrackedExecutables: arg.TrackedExecutables,
}
q.intelCohorts = append(q.intelCohorts, cohort)
return cohort, nil
}
func (q *FakeQuerier) InsertIntelInvocations(ctx context.Context, arg database.InsertIntelInvocationsParams) error {
func (q *FakeQuerier) InsertIntelInvocations(_ context.Context, arg database.InsertIntelInvocationsParams) error {
err := validateDatabaseType(arg)
if err != nil {
return err
}
panic("not implemented")
q.mutex.Lock()
defer q.mutex.Unlock()
var binaryArgs []json.RawMessage
_ = json.Unmarshal(arg.BinaryArgs, &binaryArgs)
for i, id := range arg.ID {
q.intelInvocations = append(q.intelInvocations, database.IntelInvocation{
ID: id,
CreatedAt: arg.CreatedAt,
MachineID: arg.MachineID,
UserID: arg.UserID,
BinaryHash: arg.BinaryHash[i],
BinaryPath: arg.BinaryPath[i],
BinaryArgs: binaryArgs[i],
BinaryVersion: arg.BinaryVersion[i],
WorkingDirectory: arg.WorkingDirectory[i],
GitRemoteUrl: arg.GitRemoteUrl[i],
DurationMs: arg.DurationMs[i],
})
}
return nil
}
func (q *FakeQuerier) InsertLicense(
@ -8217,8 +8300,8 @@ func (q *FakeQuerier) UpsertDefaultProxy(_ context.Context, arg database.UpsertD
}
func (q *FakeQuerier) UpsertHealthSettings(_ context.Context, data string) error {
q.mutex.RLock()
defer q.mutex.RUnlock()
q.mutex.Lock()
defer q.mutex.Unlock()
q.healthSettings = []byte(data)
return nil
@ -8230,7 +8313,43 @@ func (q *FakeQuerier) UpsertIntelMachine(ctx context.Context, arg database.Upser
return database.IntelMachine{}, err
}
panic("not implemented")
q.mutex.Lock()
defer q.mutex.Unlock()
for i, machine := range q.intelMachines {
if machine.UserID == arg.UserID && machine.InstanceID == arg.InstanceID {
machine.UpdatedAt = arg.UpdatedAt
machine.IPAddress = arg.IPAddress
machine.Hostname = arg.Hostname
machine.OperatingSystem = arg.OperatingSystem
machine.OperatingSystemVersion = arg.OperatingSystemVersion
machine.CPUCores = arg.CPUCores
machine.MemoryMBTotal = arg.MemoryMBTotal
machine.Architecture = arg.Architecture
machine.DaemonVersion = arg.DaemonVersion
q.intelMachines[i] = machine
return machine, nil
}
}
machine := database.IntelMachine{
ID: arg.ID,
CreatedAt: arg.CreatedAt,
UpdatedAt: arg.UpdatedAt,
InstanceID: arg.InstanceID,
OrganizationID: arg.OrganizationID,
UserID: arg.UserID,
IPAddress: arg.IPAddress,
Hostname: arg.Hostname,
OperatingSystem: arg.OperatingSystem,
OperatingSystemVersion: arg.OperatingSystemVersion,
CPUCores: arg.CPUCores,
MemoryMBTotal: arg.MemoryMBTotal,
Architecture: arg.Architecture,
DaemonVersion: arg.DaemonVersion,
}
q.intelMachines = append(q.intelMachines, machine)
return machine, nil
}
func (q *FakeQuerier) UpsertJFrogXrayScanByWorkspaceAndAgentID(_ context.Context, arg database.UpsertJFrogXrayScanByWorkspaceAndAgentIDParams) error {

View File

@ -3131,11 +3131,12 @@ func (mr *MockStoreMockRecorder) InsertGroupMember(arg0, arg1 any) *gomock.Call
}
// InsertIntelCohort mocks base method.
func (m *MockStore) InsertIntelCohort(arg0 context.Context, arg1 database.InsertIntelCohortParams) error {
func (m *MockStore) InsertIntelCohort(arg0 context.Context, arg1 database.InsertIntelCohortParams) (database.IntelCohort, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InsertIntelCohort", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
ret0, _ := ret[0].(database.IntelCohort)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// InsertIntelCohort indicates an expected call of InsertIntelCohort.
@ -3144,6 +3145,20 @@ func (mr *MockStoreMockRecorder) InsertIntelCohort(arg0, arg1 any) *gomock.Call
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertIntelCohort", reflect.TypeOf((*MockStore)(nil).InsertIntelCohort), arg0, arg1)
}
// InsertIntelInvocations mocks base method.
func (m *MockStore) InsertIntelInvocations(arg0 context.Context, arg1 database.InsertIntelInvocationsParams) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InsertIntelInvocations", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// InsertIntelInvocations indicates an expected call of InsertIntelInvocations.
func (mr *MockStoreMockRecorder) InsertIntelInvocations(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertIntelInvocations", reflect.TypeOf((*MockStore)(nil).InsertIntelInvocations), arg0, arg1)
}
// InsertLicense mocks base method.
func (m *MockStore) InsertLicense(arg0 context.Context, arg1 database.InsertLicenseParams) (database.License, error) {
m.ctrl.T.Helper()

View File

@ -488,10 +488,11 @@ CREATE TABLE intel_cohorts (
updated_at timestamp with time zone NOT NULL,
display_name text NOT NULL,
description text NOT NULL,
filter_regex_operating_system character varying(255),
filter_regex_operating_system_version character varying(255),
filter_regex_architecture character varying(255),
filter_regex_git_remote_url character varying(255),
filter_regex_operating_system character varying(255) DEFAULT '.*'::character varying NOT NULL,
filter_regex_operating_system_version character varying(255) DEFAULT '.*'::character varying NOT NULL,
filter_regex_architecture character varying(255) DEFAULT '.*'::character varying NOT NULL,
filter_regex_git_remote_url character varying(255) DEFAULT '.*'::character varying NOT NULL,
filter_regex_instance_id character varying(255) DEFAULT '.*'::character varying NOT NULL,
tracked_executables text[] NOT NULL
);
@ -509,13 +510,13 @@ CREATE TABLE intel_git_commits (
);
CREATE TABLE intel_invocations (
id uuid DEFAULT gen_random_uuid() NOT NULL,
id uuid NOT NULL,
created_at timestamp with time zone NOT NULL,
machine_id uuid NOT NULL,
user_id uuid NOT NULL,
binary_hash text NOT NULL,
binary_path text NOT NULL,
binary_args text[] NOT NULL,
binary_args jsonb NOT NULL,
binary_version text NOT NULL,
working_directory text NOT NULL,
git_remote_url text NOT NULL,
@ -546,8 +547,7 @@ CREATE TABLE intel_machines (
architecture character varying(255) NOT NULL,
daemon_version character varying(255) NOT NULL,
git_config_email character varying(255),
git_config_name character varying(255),
tags character varying(64)[]
git_config_name character varying(255)
);
COMMENT ON COLUMN intel_machines.operating_system IS 'GOOS';
@ -562,8 +562,6 @@ COMMENT ON COLUMN intel_machines.git_config_email IS 'git config --get user.emai
COMMENT ON COLUMN intel_machines.git_config_name IS 'git config --get user.name';
COMMENT ON COLUMN intel_machines.tags IS 'Arbitrary user-defined tags. e.g. "coder-v1" or "coder-v2"';
CREATE TABLE jfrog_xray_scans (
agent_id uuid NOT NULL,
workspace_id uuid NOT NULL,
@ -1518,15 +1516,15 @@ ALTER TABLE ONLY intel_cohorts
ALTER TABLE ONLY intel_git_commits
ADD CONSTRAINT intel_git_commits_pkey PRIMARY KEY (id);
ALTER TABLE ONLY intel_invocations
ADD CONSTRAINT intel_invocations_pkey PRIMARY KEY (id);
ALTER TABLE ONLY intel_machine_executables
ADD CONSTRAINT intel_machine_executables_pkey PRIMARY KEY (machine_id, user_id, hash);
ALTER TABLE ONLY intel_machines
ADD CONSTRAINT intel_machines_pkey PRIMARY KEY (id);
ALTER TABLE ONLY intel_machines
ADD CONSTRAINT intel_machines_user_id_instance_id_key UNIQUE (user_id, instance_id);
ALTER TABLE ONLY jfrog_xray_scans
ADD CONSTRAINT jfrog_xray_scans_pkey PRIMARY KEY (agent_id, workspace_id);

View File

@ -7,10 +7,11 @@ CREATE TABLE intel_cohorts (
display_name TEXT NOT NULL,
description TEXT NOT NULL,
filter_regex_operating_system VARCHAR(255),
filter_regex_operating_system_version VARCHAR(255),
filter_regex_architecture VARCHAR(255),
filter_regex_git_remote_url VARCHAR(255),
filter_regex_operating_system VARCHAR(255) NOT NULL DEFAULT '.*',
filter_regex_operating_system_version VARCHAR(255) NOT NULL DEFAULT '.*',
filter_regex_architecture VARCHAR(255) NOT NULL DEFAULT '.*',
filter_regex_git_remote_url VARCHAR(255) NOT NULL DEFAULT '.*',
filter_regex_instance_id VARCHAR(255) NOT NULL DEFAULT '.*',
tracked_executables TEXT[] NOT NULL
);
@ -32,7 +33,7 @@ CREATE TABLE intel_machines (
daemon_version VARCHAR(255) NOT NULL,
git_config_email VARCHAR(255),
git_config_name VARCHAR(255),
tags VARCHAR(64)[]
UNIQUE (user_id, instance_id)
);
COMMENT ON COLUMN intel_machines.operating_system IS 'GOOS';
@ -41,16 +42,15 @@ COMMENT ON COLUMN intel_machines.architecture IS 'GOARCH. e.g. amd64';
COMMENT ON COLUMN intel_machines.daemon_version IS 'Version of the daemon running on the machine';
COMMENT ON COLUMN intel_machines.git_config_email IS 'git config --get user.email';
COMMENT ON COLUMN intel_machines.git_config_name IS 'git config --get user.name';
COMMENT ON COLUMN intel_machines.tags IS 'Arbitrary user-defined tags. e.g. "coder-v1" or "coder-v2"';
CREATE TABLE intel_invocations (
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
id uuid NOT NULL,
created_at TIMESTAMPTZ NOT NULL,
machine_id UUID NOT NULL REFERENCES intel_machines(id) ON DELETE CASCADE,
user_id UUID NOT NULL,
machine_id uuid NOT NULL REFERENCES intel_machines(id) ON DELETE CASCADE,
user_id uuid NOT NULL,
binary_hash TEXT NOT NULL,
binary_path TEXT NOT NULL,
binary_args TEXT[] NOT NULL,
binary_args jsonb NOT NULL,
binary_version TEXT NOT NULL,
working_directory TEXT NOT NULL,
git_remote_url TEXT NOT NULL,

View File

@ -1847,18 +1847,19 @@ type GroupMember struct {
}
type IntelCohort struct {
ID uuid.UUID `db:"id" json:"id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
DisplayName string `db:"display_name" json:"display_name"`
Description string `db:"description" json:"description"`
FilterRegexOperatingSystem sql.NullString `db:"filter_regex_operating_system" json:"filter_regex_operating_system"`
FilterRegexOperatingSystemVersion sql.NullString `db:"filter_regex_operating_system_version" json:"filter_regex_operating_system_version"`
FilterRegexArchitecture sql.NullString `db:"filter_regex_architecture" json:"filter_regex_architecture"`
FilterRegexGitRemoteUrl sql.NullString `db:"filter_regex_git_remote_url" json:"filter_regex_git_remote_url"`
TrackedExecutables []string `db:"tracked_executables" json:"tracked_executables"`
ID uuid.UUID `db:"id" json:"id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
DisplayName string `db:"display_name" json:"display_name"`
Description string `db:"description" json:"description"`
FilterRegexOperatingSystem string `db:"filter_regex_operating_system" json:"filter_regex_operating_system"`
FilterRegexOperatingSystemVersion string `db:"filter_regex_operating_system_version" json:"filter_regex_operating_system_version"`
FilterRegexArchitecture string `db:"filter_regex_architecture" json:"filter_regex_architecture"`
FilterRegexGitRemoteUrl string `db:"filter_regex_git_remote_url" json:"filter_regex_git_remote_url"`
FilterRegexInstanceID string `db:"filter_regex_instance_id" json:"filter_regex_instance_id"`
TrackedExecutables []string `db:"tracked_executables" json:"tracked_executables"`
}
type IntelGitCommit struct {
@ -1875,17 +1876,17 @@ type IntelGitCommit struct {
}
type IntelInvocation struct {
ID uuid.UUID `db:"id" json:"id"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
MachineID uuid.UUID `db:"machine_id" json:"machine_id"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
BinaryHash string `db:"binary_hash" json:"binary_hash"`
BinaryPath string `db:"binary_path" json:"binary_path"`
BinaryArgs []string `db:"binary_args" json:"binary_args"`
BinaryVersion string `db:"binary_version" json:"binary_version"`
WorkingDirectory string `db:"working_directory" json:"working_directory"`
GitRemoteUrl string `db:"git_remote_url" json:"git_remote_url"`
DurationMs int32 `db:"duration_ms" json:"duration_ms"`
ID uuid.UUID `db:"id" json:"id"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
MachineID uuid.UUID `db:"machine_id" json:"machine_id"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
BinaryHash string `db:"binary_hash" json:"binary_hash"`
BinaryPath string `db:"binary_path" json:"binary_path"`
BinaryArgs json.RawMessage `db:"binary_args" json:"binary_args"`
BinaryVersion string `db:"binary_version" json:"binary_version"`
WorkingDirectory string `db:"working_directory" json:"working_directory"`
GitRemoteUrl string `db:"git_remote_url" json:"git_remote_url"`
DurationMs int32 `db:"duration_ms" json:"duration_ms"`
}
type IntelMachine struct {
@ -1900,9 +1901,9 @@ type IntelMachine struct {
// GOOS
OperatingSystem string `db:"operating_system" json:"operating_system"`
OperatingSystemVersion sql.NullString `db:"operating_system_version" json:"operating_system_version"`
CpuCores int32 `db:"cpu_cores" json:"cpu_cores"`
CPUCores int32 `db:"cpu_cores" json:"cpu_cores"`
// in MB
MemoryMbTotal int32 `db:"memory_mb_total" json:"memory_mb_total"`
MemoryMBTotal int32 `db:"memory_mb_total" json:"memory_mb_total"`
// GOARCH. e.g. amd64
Architecture string `db:"architecture" json:"architecture"`
// Version of the daemon running on the machine
@ -1911,8 +1912,6 @@ type IntelMachine struct {
GitConfigEmail sql.NullString `db:"git_config_email" json:"git_config_email"`
// git config --get user.name
GitConfigName sql.NullString `db:"git_config_name" json:"git_config_name"`
// Arbitrary user-defined tags. e.g. "coder-v1" or "coder-v2"
Tags []string `db:"tags" json:"tags"`
}
type IntelMachineExecutable struct {

View File

@ -2918,47 +2918,38 @@ func (q *sqlQuerier) UpsertTemplateUsageStats(ctx context.Context) error {
const getIntelCohortsMatchedByMachineIDs = `-- name: GetIntelCohortsMatchedByMachineIDs :many
WITH machines AS (
SELECT id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version, git_config_email, git_config_name, tags FROM intel_machines WHERE ids = ANY($1::uuid [])
SELECT id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version, git_config_email, git_config_name FROM intel_machines WHERE id = ANY($1::uuid [])
),
matches AS (
SELECT
m.id machine_id,
c.id, c.organization_id, c.created_by, c.created_at, c.updated_at, c.display_name, c.description, c.filter_regex_operating_system, c.filter_regex_operating_system_version, c.filter_regex_architecture, c.filter_regex_git_remote_url, c.tracked_executables,
(c.filter_regex_operating_system IS NULL OR c.filter_regex_operating_system ~ m.operating_system) AS operating_system_match,
(c.filter_regex_operating_system_version IS NULL OR c.filter_regex_operating_system_version ~ m.operating_system_version) AS operating_system_version_match,
(c.filter_regex_architecture IS NULL OR c.filter_regex_architecture ~ m.architecture) AS architecture_match,
(c.filter_regex_git_remote_url IS NULL OR c.filter_regex_git_remote_url ~ i.git_remote_url) AS git_remote_url_match
c.id,
c.tracked_executables,
(c.filter_regex_operating_system ~ m.operating_system)::boolean AS operating_system_match,
(c.filter_regex_operating_system_version ~ m.operating_system_version)::boolean AS operating_system_version_match,
(c.filter_regex_architecture ~ m.architecture)::boolean AS architecture_match,
(c.filter_regex_instance_id ~ m.instance_id)::boolean AS instance_id_match
FROM intel_cohorts c
CROSS JOIN machines m
)
SELECT
machine_id, id, organization_id, created_by, created_at, updated_at, display_name, description, filter_regex_operating_system, filter_regex_operating_system_version, filter_regex_architecture, filter_regex_git_remote_url, tracked_executables, operating_system_match, operating_system_version_match, architecture_match, git_remote_url_match
machine_id, id, tracked_executables, operating_system_match, operating_system_version_match, architecture_match, instance_id_match
FROM matches
WHERE
operating_system_match AND
operating_system_version_match AND
architecture_match AND
git_remote_url_match
instance_id_match
`
type GetIntelCohortsMatchedByMachineIDsRow struct {
MachineID uuid.UUID `db:"machine_id" json:"machine_id"`
ID uuid.UUID `db:"id" json:"id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
DisplayName string `db:"display_name" json:"display_name"`
Description string `db:"description" json:"description"`
FilterRegexOperatingSystem sql.NullString `db:"filter_regex_operating_system" json:"filter_regex_operating_system"`
FilterRegexOperatingSystemVersion sql.NullString `db:"filter_regex_operating_system_version" json:"filter_regex_operating_system_version"`
FilterRegexArchitecture sql.NullString `db:"filter_regex_architecture" json:"filter_regex_architecture"`
FilterRegexGitRemoteUrl sql.NullString `db:"filter_regex_git_remote_url" json:"filter_regex_git_remote_url"`
TrackedExecutables []string `db:"tracked_executables" json:"tracked_executables"`
OperatingSystemMatch sql.NullBool `db:"operating_system_match" json:"operating_system_match"`
OperatingSystemVersionMatch sql.NullBool `db:"operating_system_version_match" json:"operating_system_version_match"`
ArchitectureMatch sql.NullBool `db:"architecture_match" json:"architecture_match"`
GitRemoteUrlMatch sql.NullBool `db:"git_remote_url_match" json:"git_remote_url_match"`
MachineID uuid.UUID `db:"machine_id" json:"machine_id"`
ID uuid.UUID `db:"id" json:"id"`
TrackedExecutables []string `db:"tracked_executables" json:"tracked_executables"`
OperatingSystemMatch bool `db:"operating_system_match" json:"operating_system_match"`
OperatingSystemVersionMatch bool `db:"operating_system_version_match" json:"operating_system_version_match"`
ArchitectureMatch bool `db:"architecture_match" json:"architecture_match"`
InstanceIDMatch bool `db:"instance_id_match" json:"instance_id_match"`
}
// Obtains a list of cohorts that a user can track invocations for.
@ -2974,21 +2965,11 @@ func (q *sqlQuerier) GetIntelCohortsMatchedByMachineIDs(ctx context.Context, ids
if err := rows.Scan(
&i.MachineID,
&i.ID,
&i.OrganizationID,
&i.CreatedBy,
&i.CreatedAt,
&i.UpdatedAt,
&i.DisplayName,
&i.Description,
&i.FilterRegexOperatingSystem,
&i.FilterRegexOperatingSystemVersion,
&i.FilterRegexArchitecture,
&i.FilterRegexGitRemoteUrl,
pq.Array(&i.TrackedExecutables),
&i.OperatingSystemMatch,
&i.OperatingSystemVersionMatch,
&i.ArchitectureMatch,
&i.GitRemoteUrlMatch,
&i.InstanceIDMatch,
); err != nil {
return nil, err
}
@ -3004,23 +2985,23 @@ func (q *sqlQuerier) GetIntelCohortsMatchedByMachineIDs(ctx context.Context, ids
}
const insertIntelCohort = `-- name: InsertIntelCohort :one
INSERT INTO intel_cohorts (id, organization_id, created_by, created_at, updated_at, display_name, description, filter_regex_operating_system, filter_regex_operating_system_version, filter_regex_architecture, filter_regex_git_remote_url, tracked_executables)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, organization_id, created_by, created_at, updated_at, display_name, description, filter_regex_operating_system, filter_regex_operating_system_version, filter_regex_architecture, filter_regex_git_remote_url, tracked_executables
INSERT INTO intel_cohorts (id, organization_id, created_by, created_at, updated_at, display_name, description, filter_regex_operating_system, filter_regex_operating_system_version, filter_regex_architecture, filter_regex_instance_id, tracked_executables)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, organization_id, created_by, created_at, updated_at, display_name, description, filter_regex_operating_system, filter_regex_operating_system_version, filter_regex_architecture, filter_regex_git_remote_url, filter_regex_instance_id, tracked_executables
`
type InsertIntelCohortParams struct {
ID uuid.UUID `db:"id" json:"id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
DisplayName string `db:"display_name" json:"display_name"`
Description string `db:"description" json:"description"`
FilterRegexOperatingSystem sql.NullString `db:"filter_regex_operating_system" json:"filter_regex_operating_system"`
FilterRegexOperatingSystemVersion sql.NullString `db:"filter_regex_operating_system_version" json:"filter_regex_operating_system_version"`
FilterRegexArchitecture sql.NullString `db:"filter_regex_architecture" json:"filter_regex_architecture"`
FilterRegexGitRemoteUrl sql.NullString `db:"filter_regex_git_remote_url" json:"filter_regex_git_remote_url"`
TrackedExecutables []string `db:"tracked_executables" json:"tracked_executables"`
ID uuid.UUID `db:"id" json:"id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
DisplayName string `db:"display_name" json:"display_name"`
Description string `db:"description" json:"description"`
FilterRegexOperatingSystem string `db:"filter_regex_operating_system" json:"filter_regex_operating_system"`
FilterRegexOperatingSystemVersion string `db:"filter_regex_operating_system_version" json:"filter_regex_operating_system_version"`
FilterRegexArchitecture string `db:"filter_regex_architecture" json:"filter_regex_architecture"`
FilterRegexInstanceID string `db:"filter_regex_instance_id" json:"filter_regex_instance_id"`
TrackedExecutables []string `db:"tracked_executables" json:"tracked_executables"`
}
func (q *sqlQuerier) InsertIntelCohort(ctx context.Context, arg InsertIntelCohortParams) (IntelCohort, error) {
@ -3035,7 +3016,7 @@ func (q *sqlQuerier) InsertIntelCohort(ctx context.Context, arg InsertIntelCohor
arg.FilterRegexOperatingSystem,
arg.FilterRegexOperatingSystemVersion,
arg.FilterRegexArchitecture,
arg.FilterRegexGitRemoteUrl,
arg.FilterRegexInstanceID,
pq.Array(arg.TrackedExecutables),
)
var i IntelCohort
@ -3051,6 +3032,7 @@ func (q *sqlQuerier) InsertIntelCohort(ctx context.Context, arg InsertIntelCohor
&i.FilterRegexOperatingSystemVersion,
&i.FilterRegexArchitecture,
&i.FilterRegexGitRemoteUrl,
&i.FilterRegexInstanceID,
pq.Array(&i.TrackedExecutables),
)
return i, err
@ -3058,46 +3040,48 @@ func (q *sqlQuerier) InsertIntelCohort(ctx context.Context, arg InsertIntelCohor
const insertIntelInvocations = `-- name: InsertIntelInvocations :exec
INSERT INTO intel_invocations (
id, created_at, machine_id, user_id, binary_hash, binary_path, binary_args,
created_at, machine_id, user_id, id, binary_hash, binary_path, binary_args,
binary_version, working_directory, git_remote_url, duration_ms)
SELECT
unnest($1 :: uuid[]) as id,
$2 :: timestamptz as created_at,
$3 :: uuid as machine_id,
$4 :: uuid as user_id,
unnest($5 :: text[]) as binary_hash,
unnest($6 :: text[]) as binary_path,
unnest($7 :: text[][]) as binary_args,
unnest($8 :: text[]) as binary_version,
unnest($9 :: text[]) as working_directory,
unnest($10 :: text[]) as git_remote_url,
unnest($11 :: int[]) as duration_ms
$1 :: timestamptz as created_at,
$2 :: uuid as machine_id,
$3 :: uuid as user_id,
unnest($4 :: uuid[ ]) as id,
unnest($5 :: text[ ]) as binary_hash,
unnest($6 :: text[ ]) as binary_path,
-- This has to be jsonb because PostgreSQL does not support parsing
-- multi-dimensional multi-length arrays!
jsonb_array_elements($7 :: jsonb) as binary_args,
unnest($8 :: text[ ]) as binary_version,
unnest($9 :: text[ ]) as working_directory,
unnest($10 :: text[ ]) as git_remote_url,
unnest($11 :: int[ ]) as duration_ms
`
type InsertIntelInvocationsParams struct {
ID []uuid.UUID `db:"id" json:"id"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
MachineID uuid.UUID `db:"machine_id" json:"machine_id"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
BinaryHash []string `db:"binary_hash" json:"binary_hash"`
BinaryPath []string `db:"binary_path" json:"binary_path"`
BinaryArgs [][]string `db:"binary_args" json:"binary_args"`
BinaryVersion []string `db:"binary_version" json:"binary_version"`
WorkingDirectory []string `db:"working_directory" json:"working_directory"`
GitRemoteUrl []string `db:"git_remote_url" json:"git_remote_url"`
DurationMs []int32 `db:"duration_ms" json:"duration_ms"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
MachineID uuid.UUID `db:"machine_id" json:"machine_id"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
ID []uuid.UUID `db:"id" json:"id"`
BinaryHash []string `db:"binary_hash" json:"binary_hash"`
BinaryPath []string `db:"binary_path" json:"binary_path"`
BinaryArgs json.RawMessage `db:"binary_args" json:"binary_args"`
BinaryVersion []string `db:"binary_version" json:"binary_version"`
WorkingDirectory []string `db:"working_directory" json:"working_directory"`
GitRemoteUrl []string `db:"git_remote_url" json:"git_remote_url"`
DurationMs []int32 `db:"duration_ms" json:"duration_ms"`
}
// Insert many invocations using unnest
func (q *sqlQuerier) InsertIntelInvocations(ctx context.Context, arg InsertIntelInvocationsParams) error {
_, err := q.db.ExecContext(ctx, insertIntelInvocations,
pq.Array(arg.ID),
arg.CreatedAt,
arg.MachineID,
arg.UserID,
pq.Array(arg.ID),
pq.Array(arg.BinaryHash),
pq.Array(arg.BinaryPath),
pq.Array(arg.BinaryArgs),
arg.BinaryArgs,
pq.Array(arg.BinaryVersion),
pq.Array(arg.WorkingDirectory),
pq.Array(arg.GitRemoteUrl),
@ -3107,8 +3091,8 @@ func (q *sqlQuerier) InsertIntelInvocations(ctx context.Context, arg InsertIntel
}
const upsertIntelMachine = `-- name: UpsertIntelMachine :one
INSERT INTO intel_machines (id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version, tags)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
INSERT INTO intel_machines (id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
ON CONFLICT (user_id, instance_id) DO UPDATE SET
updated_at = $3,
ip_address = $7,
@ -3118,9 +3102,8 @@ INSERT INTO intel_machines (id, created_at, updated_at, instance_id, organizatio
cpu_cores = $11,
memory_mb_total = $12,
architecture = $13,
daemon_version = $14,
tags = $15
RETURNING id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version, git_config_email, git_config_name, tags
daemon_version = $14
RETURNING id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version, git_config_email, git_config_name
`
type UpsertIntelMachineParams struct {
@ -3134,11 +3117,10 @@ type UpsertIntelMachineParams struct {
Hostname string `db:"hostname" json:"hostname"`
OperatingSystem string `db:"operating_system" json:"operating_system"`
OperatingSystemVersion sql.NullString `db:"operating_system_version" json:"operating_system_version"`
CpuCores int32 `db:"cpu_cores" json:"cpu_cores"`
MemoryMbTotal int32 `db:"memory_mb_total" json:"memory_mb_total"`
CPUCores int32 `db:"cpu_cores" json:"cpu_cores"`
MemoryMBTotal int32 `db:"memory_mb_total" json:"memory_mb_total"`
Architecture string `db:"architecture" json:"architecture"`
DaemonVersion string `db:"daemon_version" json:"daemon_version"`
Tags []string `db:"tags" json:"tags"`
}
func (q *sqlQuerier) UpsertIntelMachine(ctx context.Context, arg UpsertIntelMachineParams) (IntelMachine, error) {
@ -3153,11 +3135,10 @@ func (q *sqlQuerier) UpsertIntelMachine(ctx context.Context, arg UpsertIntelMach
arg.Hostname,
arg.OperatingSystem,
arg.OperatingSystemVersion,
arg.CpuCores,
arg.MemoryMbTotal,
arg.CPUCores,
arg.MemoryMBTotal,
arg.Architecture,
arg.DaemonVersion,
pq.Array(arg.Tags),
)
var i IntelMachine
err := row.Scan(
@ -3171,13 +3152,12 @@ func (q *sqlQuerier) UpsertIntelMachine(ctx context.Context, arg UpsertIntelMach
&i.Hostname,
&i.OperatingSystem,
&i.OperatingSystemVersion,
&i.CpuCores,
&i.MemoryMbTotal,
&i.CPUCores,
&i.MemoryMBTotal,
&i.Architecture,
&i.DaemonVersion,
&i.GitConfigEmail,
&i.GitConfigName,
pq.Array(&i.Tags),
)
return i, err
}

View File

@ -1,10 +1,10 @@
-- name: InsertIntelCohort :one
INSERT INTO intel_cohorts (id, organization_id, created_by, created_at, updated_at, display_name, description, filter_regex_operating_system, filter_regex_operating_system_version, filter_regex_architecture, filter_regex_git_remote_url, tracked_executables)
INSERT INTO intel_cohorts (id, organization_id, created_by, created_at, updated_at, display_name, description, filter_regex_operating_system, filter_regex_operating_system_version, filter_regex_architecture, filter_regex_instance_id, tracked_executables)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING *;
-- name: UpsertIntelMachine :one
INSERT INTO intel_machines (id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version, tags)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
INSERT INTO intel_machines (id, created_at, updated_at, instance_id, organization_id, user_id, ip_address, hostname, operating_system, operating_system_version, cpu_cores, memory_mb_total, architecture, daemon_version)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
ON CONFLICT (user_id, instance_id) DO UPDATE SET
updated_at = $3,
ip_address = $7,
@ -14,41 +14,43 @@ INSERT INTO intel_machines (id, created_at, updated_at, instance_id, organizatio
cpu_cores = $11,
memory_mb_total = $12,
architecture = $13,
daemon_version = $14,
tags = $15
daemon_version = $14
RETURNING *;
-- name: InsertIntelInvocations :exec
-- Insert many invocations using unnest
INSERT INTO intel_invocations (
id, created_at, machine_id, user_id, binary_hash, binary_path, binary_args,
created_at, machine_id, user_id, id, binary_hash, binary_path, binary_args,
binary_version, working_directory, git_remote_url, duration_ms)
SELECT
unnest(@id :: uuid[]) as id,
@created_at :: timestamptz as created_at,
@machine_id :: uuid as machine_id,
@user_id :: uuid as user_id,
unnest(@binary_hash :: text[]) as binary_hash,
unnest(@binary_path :: text[]) as binary_path,
unnest(@binary_args :: text[][]) as binary_args,
unnest(@binary_version :: text[]) as binary_version,
unnest(@working_directory :: text[]) as working_directory,
unnest(@git_remote_url :: text[]) as git_remote_url,
unnest(@duration_ms :: int[]) as duration_ms;
unnest(@id :: uuid[ ]) as id,
unnest(@binary_hash :: text[ ]) as binary_hash,
unnest(@binary_path :: text[ ]) as binary_path,
-- This has to be jsonb because PostgreSQL does not support parsing
-- multi-dimensional multi-length arrays!
jsonb_array_elements(@binary_args :: jsonb) as binary_args,
unnest(@binary_version :: text[ ]) as binary_version,
unnest(@working_directory :: text[ ]) as working_directory,
unnest(@git_remote_url :: text[ ]) as git_remote_url,
unnest(@duration_ms :: int[ ]) as duration_ms;
-- name: GetIntelCohortsMatchedByMachineIDs :many
-- Obtains a list of cohorts that a user can track invocations for.
WITH machines AS (
SELECT * FROM intel_machines WHERE ids = ANY(@ids::uuid [])
SELECT * FROM intel_machines WHERE id = ANY(@ids::uuid [])
),
matches AS (
SELECT
m.id machine_id,
c.*,
(c.filter_regex_operating_system IS NULL OR c.filter_regex_operating_system ~ m.operating_system) AS operating_system_match,
(c.filter_regex_operating_system_version IS NULL OR c.filter_regex_operating_system_version ~ m.operating_system_version) AS operating_system_version_match,
(c.filter_regex_architecture IS NULL OR c.filter_regex_architecture ~ m.architecture) AS architecture_match,
(c.filter_regex_git_remote_url IS NULL OR c.filter_regex_git_remote_url ~ i.git_remote_url) AS git_remote_url_match
c.id,
c.tracked_executables,
(c.filter_regex_operating_system ~ m.operating_system)::boolean AS operating_system_match,
(c.filter_regex_operating_system_version ~ m.operating_system_version)::boolean AS operating_system_version_match,
(c.filter_regex_architecture ~ m.architecture)::boolean AS architecture_match,
(c.filter_regex_instance_id ~ m.instance_id)::boolean AS instance_id_match
FROM intel_cohorts c
CROSS JOIN machines m
)
@ -59,4 +61,4 @@ WHERE
operating_system_match AND
operating_system_version_match AND
architecture_match AND
git_remote_url_match;
instance_id_match;

View File

@ -65,6 +65,8 @@ sql:
api_version: APIVersion
avatar_url: AvatarURL
created_by_avatar_url: CreatedByAvatarURL
cpu_cores: CPUCores
memory_mb_total: MemoryMBTotal
dbcrypt_key: DBCryptKey
session_count_vscode: SessionCountVSCode
session_count_jetbrains: SessionCountJetBrains

View File

@ -3,6 +3,8 @@ package database
import (
"database/sql/driver"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/google/uuid"
@ -112,3 +114,48 @@ func (m *StringMapOfInt) Scan(src interface{}) error {
func (m StringMapOfInt) Value() (driver.Value, error) {
return json.Marshal(m)
}
type StringaArray [][]string
func (s *StringaArray) Scan(src interface{}) error {
if src == nil {
return nil
}
switch src := src.(type) {
case []byte:
err := json.Unmarshal(src, s)
if err != nil {
return err
}
default:
return xerrors.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, s)
}
return nil
}
func (s StringaArray) Value() (driver.Value, error) {
fmt.Printf("Calling custom!\n")
var b strings.Builder
b.WriteString("{")
for i, arr := range s {
if i > 0 {
b.WriteString(",")
}
b.WriteString("{")
for j, str := range arr {
if j > 0 {
b.WriteString(",")
}
// Properly escape string literals
escaped := strings.ReplaceAll(str, "\"", "\\\"")
escaped = strings.ReplaceAll(escaped, ",", "\\,")
b.WriteString(fmt.Sprintf("%q", escaped))
}
b.WriteString("}")
}
b.WriteString("}")
fmt.Printf("Writing %q\n", b.String())
return b.String(), nil
}

View File

@ -21,9 +21,9 @@ const (
UniqueGroupsPkey UniqueConstraint = "groups_pkey" // ALTER TABLE ONLY groups ADD CONSTRAINT groups_pkey PRIMARY KEY (id);
UniqueIntelCohortsPkey UniqueConstraint = "intel_cohorts_pkey" // ALTER TABLE ONLY intel_cohorts ADD CONSTRAINT intel_cohorts_pkey PRIMARY KEY (id);
UniqueIntelGitCommitsPkey UniqueConstraint = "intel_git_commits_pkey" // ALTER TABLE ONLY intel_git_commits ADD CONSTRAINT intel_git_commits_pkey PRIMARY KEY (id);
UniqueIntelInvocationsPkey UniqueConstraint = "intel_invocations_pkey" // ALTER TABLE ONLY intel_invocations ADD CONSTRAINT intel_invocations_pkey PRIMARY KEY (id);
UniqueIntelMachineExecutablesPkey UniqueConstraint = "intel_machine_executables_pkey" // ALTER TABLE ONLY intel_machine_executables ADD CONSTRAINT intel_machine_executables_pkey PRIMARY KEY (machine_id, user_id, hash);
UniqueIntelMachinesPkey UniqueConstraint = "intel_machines_pkey" // ALTER TABLE ONLY intel_machines ADD CONSTRAINT intel_machines_pkey PRIMARY KEY (id);
UniqueIntelMachinesUserIDInstanceIDKey UniqueConstraint = "intel_machines_user_id_instance_id_key" // ALTER TABLE ONLY intel_machines ADD CONSTRAINT intel_machines_user_id_instance_id_key UNIQUE (user_id, instance_id);
UniqueJfrogXrayScansPkey UniqueConstraint = "jfrog_xray_scans_pkey" // ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_pkey PRIMARY KEY (agent_id, workspace_id);
UniqueLicensesJWTKey UniqueConstraint = "licenses_jwt_key" // ALTER TABLE ONLY licenses ADD CONSTRAINT licenses_jwt_key UNIQUE (jwt);
UniqueLicensesPkey UniqueConstraint = "licenses_pkey" // ALTER TABLE ONLY licenses ADD CONSTRAINT licenses_pkey PRIMARY KEY (id);

View File

@ -54,7 +54,6 @@ func (api *API) intelDaemonServe(rw http.ResponseWriter, r *http.Request) {
OperatingSystem: query.Get("operating_system"),
OperatingSystemVersion: query.Get("operating_system_version"),
Architecture: query.Get("architecture"),
Tags: query["tags"],
CPUCores: uint16(cpuCores),
MemoryTotalMB: memoryTotalMB,
}
@ -86,11 +85,10 @@ func (api *API) intelDaemonServe(rw http.ResponseWriter, r *http.Request) {
String: hostInfo.OperatingSystemVersion,
Valid: hostInfo.OperatingSystemVersion != "",
},
CpuCores: int32(hostInfo.CPUCores),
MemoryMbTotal: int32(hostInfo.MemoryTotalMB),
CPUCores: int32(hostInfo.CPUCores),
MemoryMBTotal: int32(hostInfo.MemoryTotalMB),
Architecture: hostInfo.Architecture,
DaemonVersion: "",
Tags: hostInfo.Tags,
})
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
@ -168,7 +166,7 @@ func (api *API) intelDaemonServe(rw http.ResponseWriter, r *http.Request) {
})
err = server.Serve(ctx, session)
srvCancel()
logger.Info(ctx, "provisioner daemon disconnected", slog.Error(err))
logger.Info(ctx, "intel daemon disconnected", slog.Error(err))
if err != nil && !xerrors.Is(err, io.EOF) {
_ = conn.Close(websocket.StatusInternalError, httpapi.WebsocketCloseSprintf("serve: %s", err))
return

View File

@ -2,6 +2,7 @@ package inteldserver
import (
"context"
"encoding/json"
"errors"
"sync"
"time"
@ -89,24 +90,28 @@ func (s *server) invocationQueueLoop() {
defer s.closeWaitGroup.Done()
for {
err := s.invocationQueue.startFlushLoop(s.closeContext, func(i []*proto.Invocation) error {
ids := make([]uuid.UUID, 0, len(i))
ids := make([]uuid.UUID, 0)
binaryHashes := make([]string, 0, len(i))
binaryPaths := make([]string, 0, len(i))
binaryArgs := make([][]string, 0, len(i))
binaryArgs := make([]json.RawMessage, 0, len(i))
binaryVersions := make([]string, 0, len(i))
workingDirs := make([]string, 0, len(i))
gitRemoteURLs := make([]string, 0, len(i))
durationsMS := make([]int32, 0, len(i))
for _, invocation := range i {
ids = append(ids, uuid.New())
binaryHashes = append(binaryHashes, invocation.Executable.Hash)
binaryPaths = append(binaryPaths, invocation.Executable.Path)
binaryArgs = append(binaryArgs, invocation.Arguments)
argsData, _ := json.Marshal(invocation.Arguments)
binaryArgs = append(binaryArgs, argsData)
binaryVersions = append(binaryVersions, invocation.Executable.Version)
workingDirs = append(workingDirs, invocation.WorkingDirectory)
gitRemoteURLs = append(gitRemoteURLs, invocation.GitRemoteUrl)
durationsMS = append(durationsMS, int32(invocation.DurationMs))
}
binaryArgsData, _ := json.Marshal(binaryArgs)
err := s.Database.InsertIntelInvocations(s.closeContext, database.InsertIntelInvocationsParams{
ID: ids,
CreatedAt: dbtime.Now(),
@ -114,7 +119,7 @@ func (s *server) invocationQueueLoop() {
UserID: s.UserID,
BinaryHash: binaryHashes,
BinaryPath: binaryPaths,
BinaryArgs: binaryArgs,
BinaryArgs: binaryArgsData,
BinaryVersion: binaryVersions,
WorkingDirectory: workingDirs,
GitRemoteUrl: gitRemoteURLs,
@ -122,7 +127,10 @@ func (s *server) invocationQueueLoop() {
})
if err != nil {
s.Logger.Error(s.closeContext, "write invocations", slog.Error(err))
return err
// Just ignore the failure and ignore the invocations.
// It's not a big deal... the bigger deal is keeping
// too big of a queue and failing.
return nil
}
s.Logger.Info(s.closeContext, "invocations flushed", slog.F("count", len(i)))
return nil
@ -133,7 +141,6 @@ func (s *server) invocationQueueLoop() {
return
}
s.Logger.Warn(s.closeContext, "failed to write invocations", slog.Error(err))
return
}
}
}

View File

@ -6,6 +6,7 @@ import (
"io"
"net/http"
"net/http/cookiejar"
"strconv"
"github.com/google/uuid"
"github.com/hashicorp/yamux"
@ -21,14 +22,13 @@ type IntelDaemonHostInfo struct {
// InstanceID is a self-reported unique identifier for
// the machine. If one cannot be found, a random ID
// will be used.
InstanceID string `json:"instance_id"`
Hostname string `json:"hostname"`
OperatingSystem string `json:"operating_system"`
OperatingSystemVersion string `json:"operating_system_version"`
Architecture string `json:"architecture"`
CPUCores uint16 `json:"cpu_cores"`
MemoryTotalMB uint64 `json:"memory_total_mb"`
Tags []string `json:"tags"`
InstanceID string `json:"instance_id"`
Hostname string `json:"hostname"`
OperatingSystem string `json:"operating_system"`
OperatingSystemVersion string `json:"operating_system_version"`
Architecture string `json:"architecture"`
CPUCores uint16 `json:"cpu_cores"`
MemoryTotalMB uint64 `json:"memory_total_mb"`
}
type ServeIntelDaemonRequest struct {
@ -53,10 +53,9 @@ func (c *Client) ServeIntelDaemon(ctx context.Context, req ServeIntelDaemonReque
query.Add("operating_system", req.OperatingSystem)
query.Add("operating_system_version", req.OperatingSystemVersion)
query.Add("architecture", req.Architecture)
query.Add("cpu_cores", fmt.Sprint(req.CPUCores))
query.Add("memory_total_mb", fmt.Sprint(req.MemoryTotalMB))
query["tags"] = req.Tags
query.Add("cpu_cores", strconv.Itoa(int(req.CPUCores)))
query.Add("memory_total_mb", strconv.Itoa(int(req.MemoryTotalMB)))
serverURL.RawQuery = query.Encode()
httpClient := &http.Client{
Transport: c.HTTPClient.Transport,
}

View File

@ -27,6 +27,7 @@ Coder — A tool for provisioning self-hosted development environments with Terr
| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------------- |
| [<code>dotfiles</code>](./cli/dotfiles.md) | Personalize your workspace by applying a canonical dotfiles repository |
| [<code>external-auth</code>](./cli/external-auth.md) | Manage external authentication |
| [<code>inteld</code>](./cli/inteld.md) | Manage the Intel Daemon |
| [<code>login</code>](./cli/login.md) | Authenticate with Coder deployment |
| [<code>logout</code>](./cli/logout.md) | Unauthenticate your local session |
| [<code>netcheck</code>](./cli/netcheck.md) | Print network debug information for DERP and STUN |

17
docs/cli/inteld.md generated Normal file
View File

@ -0,0 +1,17 @@
<!-- DO NOT EDIT | GENERATED CONTENT -->
# inteld
Manage the Intel Daemon
## Usage
```console
coder inteld
```
## Subcommands
| Name | Purpose |
| --------------------------------------- | ---------------------- |
| [<code>start</code>](./inteld_start.md) | Start the Intel Daemon |

70
docs/cli/inteld_start.md generated Normal file
View File

@ -0,0 +1,70 @@
<!-- DO NOT EDIT | GENERATED CONTENT -->
# inteld start
Start the Intel Daemon
## Usage
```console
coder inteld start [flags]
```
## Options
### --verbose
| | |
| ----------- | ---------------------------------------- |
| Type | <code>bool</code> |
| Environment | <code>$CODER_INTEL_DAEMON_VERBOSE</code> |
| Default | <code>false</code> |
Output debug-level logs.
### --log-human
| | |
| ----------- | ---------------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_INTEL_DAEMON_LOGGING_HUMAN</code> |
| Default | <code>/dev/stderr</code> |
Output human-readable logs to a given file.
### --log-json
| | |
| ----------- | --------------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_INTEL_DAEMON_LOGGING_JSON</code> |
Output JSON logs to a given file.
### --log-stackdriver
| | |
| ----------- | ---------------------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_INTEL_DAEMON_LOGGING_STACKDRIVER</code> |
Output Stackdriver compatible logs to a given file.
### --log-filter
| | |
| ----------- | ------------------------------------------- |
| Type | <code>string-array</code> |
| Environment | <code>$CODER_INTEL_DAEMON_LOG_FILTER</code> |
Filter debug logs by matching against a given regex. Use .\* to match all debug logs.
### --invoke-directory
| | |
| ----------- | ------------------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_INTEL_DAEMON_INVOKE_DIRECTORY</code> |
| Default | <code>~/.coder-intel/bin</code> |
The directory where binaries are aliased to and overridden in the $PATH so they can be tracked.

View File

@ -690,6 +690,16 @@
"description": "List user groups",
"path": "cli/groups_list.md"
},
{
"title": "inteld",
"description": "Manage the Intel Daemon",
"path": "cli/inteld.md"
},
{
"title": "inteld start",
"description": "Start the Intel Daemon",
"path": "cli/inteld_start.md"
},
{
"title": "licenses",
"description": "Add, delete, and list licenses",

View File

@ -2,8 +2,10 @@ package inteld
import (
"context"
"crypto/sha1"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
@ -30,20 +32,22 @@ import (
type Dialer func(ctx context.Context, hostInfo codersdk.IntelDaemonHostInfo) (proto.DRPCIntelDaemonClient, error)
type InvokeBinaryDownloader func(ctx context.Context, etag string) (*http.Response, error)
type Options struct {
// Dialer connects the daemon to a client.
Dialer Dialer
Filesystem afero.Fs
// InvokeBinary is the path to the binary that will be
// associated with aliased commands.
InvokeBinary string
// InvokeDirectory is the directory where binaries are aliased
// to and overridden in the $PATH so they can be man-in-the-middled.
InvokeDirectory string
// InvokeBinaryDownloader is a function that downloads the invoke binary.
// It will be downloaded into the invoke directory.
InvokeBinaryDownloader InvokeBinaryDownloader
// InvocationFlushInterval is the interval at which invocations
// are flushed to the server.
InvocationFlushInterval time.Duration
@ -62,17 +66,17 @@ func New(opts Options) *API {
opts.Filesystem = afero.NewOsFs()
}
closeContext, closeCancel := context.WithCancel(context.Background())
invocationQueue := newInvocationQueue(opts.InvocationFlushInterval)
invocationQueue := newInvocationQueue(opts.InvocationFlushInterval, opts.Logger)
api := &API{
clientDialer: opts.Dialer,
clientChan: make(chan proto.DRPCIntelDaemonClient),
closeContext: closeContext,
closeCancel: closeCancel,
filesystem: opts.Filesystem,
logger: opts.Logger,
invokeDirectory: opts.InvokeDirectory,
invokeBinary: opts.InvokeBinary,
invocationQueue: invocationQueue,
clientDialer: opts.Dialer,
clientChan: make(chan proto.DRPCIntelDaemonClient),
closeContext: closeContext,
closeCancel: closeCancel,
filesystem: opts.Filesystem,
logger: opts.Logger,
invokeDirectory: opts.InvokeDirectory,
invokeBinaryDownloader: opts.InvokeBinaryDownloader,
invocationQueue: invocationQueue,
}
api.closeWaitGroup.Add(3)
go api.invocationQueueLoop()
@ -83,10 +87,11 @@ func New(opts Options) *API {
// API serves an instance of the intel daemon.
type API struct {
filesystem afero.Fs
invokeBinary string
invokeDirectory string
invocationQueue *invocationQueue
filesystem afero.Fs
invokeDirectory string
invocationQueue *invocationQueue
invokeBinaryPathChan chan string
invokeBinaryDownloader InvokeBinaryDownloader
clientDialer Dialer
clientChan chan proto.DRPCIntelDaemonClient
@ -107,7 +112,8 @@ func (a *API) invocationQueueLoop() {
err := a.invocationQueue.startSendLoop(a.closeContext, func(i []*proto.Invocation) error {
client, ok := a.client()
if !ok {
return xerrors.New("no client available")
// If no client is available, we shouldn't try to retry. We're shutting down!
return nil
}
_, err := client.RecordInvocation(a.closeContext, &proto.RecordInvocationRequest{
Invocations: i,
@ -124,11 +130,65 @@ func (a *API) invocationQueueLoop() {
}
}
// downloadInvokeBinary downloads the binary to the provided path.
// If the binary already exists at the path, it is hashed to ensure
// it is up-to-date with ETag.
func (a *API) downloadInvokeBinary(invokeBinaryPath string) error {
_, err := os.Stat(invokeBinaryPath)
existingSha1 := ""
if err == nil {
file, err := a.filesystem.Open(invokeBinaryPath)
if err != nil {
return xerrors.Errorf("unable to open invoke binary: %w", err)
}
defer file.Close()
//nolint:gosec // this is what our etag uses
hash := sha1.New()
_, err = io.Copy(hash, file)
if err != nil {
return xerrors.Errorf("unable to hash invoke binary: %w", err)
}
existingSha1 = fmt.Sprintf("%x", hash.Sum(nil))
}
resp, err := a.invokeBinaryDownloader(a.closeContext, existingSha1)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotModified {
return nil
}
if resp.StatusCode != http.StatusOK {
return xerrors.Errorf("unexpected status code: %d", resp.StatusCode)
}
err = a.filesystem.MkdirAll(filepath.Dir(invokeBinaryPath), 0755)
if err != nil {
return xerrors.Errorf("unable to create invoke binary directory: %w", err)
}
_ = a.filesystem.Remove(invokeBinaryPath)
file, err := a.filesystem.Create(invokeBinaryPath)
if err != nil {
return xerrors.Errorf("unable to create invoke binary: %w", err)
}
defer file.Close()
_, err = io.Copy(file, resp.Body)
if err != nil {
return xerrors.Errorf("unable to write invoke binary: %w", err)
}
err = a.filesystem.Chmod(invokeBinaryPath, 0755)
if err != nil {
return xerrors.Errorf("unable to chmod invoke binary: %w", err)
}
return nil
}
// listenLoop starts a loop that listens for messages from the system.
func (a *API) listenLoop() {
defer a.logger.Debug(a.closeContext, "system loop exited")
defer a.closeWaitGroup.Done()
for {
// Wrapped in a retry in case recv ends super quickly for any reason!
for retrier := retry.New(50*time.Millisecond, 10*time.Second); retrier.Wait(a.closeContext); {
client, ok := a.client()
if !ok {
a.logger.Debug(a.closeContext, "shut down before client (re) connected")
@ -154,10 +214,7 @@ func (a *API) listenLoop() {
},
})
if err != nil {
if errors.Is(err, context.Canceled) ||
errors.Is(err, yamux.ErrSessionShutdown) {
continue
}
continue
}
a.systemRecvLoop(system)
}
@ -204,11 +261,21 @@ func (a *API) trackExecutables(binaryNames []string) error {
if err != nil {
return err
}
invokeBinary, valid := a.invokeBinaryPath()
if !valid {
// If there isn't an invoke binary, we shouldn't set up any symlinks!
return nil
}
for _, file := range files {
// Clear out the directory to remove old filenames.
// Don't do this for the global dir because it makes
// debugging harder.
err = a.filesystem.Remove(filepath.Join(a.invokeDirectory, file.Name()))
filePath := filepath.Join(a.invokeDirectory, file.Name())
if filePath == invokeBinary {
// Don't remove this bad boy!
continue
}
err = a.filesystem.Remove(filePath)
if err != nil {
return err
}
@ -222,7 +289,7 @@ func (a *API) trackExecutables(binaryNames []string) error {
return xerrors.New("filesystem does not support symlinks")
}
for _, binaryName := range binaryNames {
err = linker.SymlinkIfPossible(a.invokeBinary, filepath.Join(a.invokeDirectory, binaryName))
err = linker.SymlinkIfPossible(invokeBinary, filepath.Join(a.invokeDirectory, binaryName))
if err != nil {
return err
}
@ -263,6 +330,10 @@ func (a *API) connectLoop() {
CPUCores: uint16(runtime.NumCPU()),
MemoryTotalMB: memoryTotal,
}
invokeBinaryPath := filepath.Join(a.invokeDirectory, "coder-intel-invoke")
if runtime.GOOS == "windows" {
invokeBinaryPath += ".exe"
}
connectLoop:
for retrier := retry.New(50*time.Millisecond, 10*time.Second); retrier.Wait(a.closeContext); {
a.logger.Debug(a.closeContext, "dialing coderd")
@ -284,6 +355,12 @@ connectLoop:
continue
}
a.logger.Info(a.closeContext, "successfully connected to coderd")
err = a.downloadInvokeBinary(invokeBinaryPath)
if err != nil {
a.logger.Warn(a.closeContext, "unable to download invoke binary", slog.Error(err))
continue
}
a.logger.Info(a.closeContext, "successfully obtained invoke binary")
retrier.Reset()
// serve the client until we are closed or it disconnects
@ -297,6 +374,8 @@ connectLoop:
continue connectLoop
case a.clientChan <- client:
continue
case a.invokeBinaryPathChan <- invokeBinaryPath:
continue
}
}
}
@ -312,6 +391,15 @@ func (a *API) client() (proto.DRPCIntelDaemonClient, bool) {
}
}
func (a *API) invokeBinaryPath() (string, bool) {
select {
case <-a.closeContext.Done():
return "", false
case invokeBinary := <-a.invokeBinaryPathChan:
return invokeBinary, true
}
}
// isClosed returns whether the API is closed or not.
func (a *API) isClosed() bool {
select {
@ -350,20 +438,18 @@ func fetchFromGitConfig(property string) (string, error) {
return strings.TrimSpace(string(output)), nil
}
var _ proto.DRPCIntelClientServer = (*API)(nil)
// ReportInvocation is called by the client to report an invocation.
func (a *API) ReportInvocation(_ context.Context, req *proto.ReportInvocationRequest) (*proto.Empty, error) {
func (a *API) ReportInvocation(req *proto.ReportInvocationRequest) {
a.invocationQueue.enqueue(req)
return &proto.Empty{}, nil
}
func newInvocationQueue(flushInterval time.Duration) *invocationQueue {
func newInvocationQueue(flushInterval time.Duration, logger slog.Logger) *invocationQueue {
return &invocationQueue{
Cond: sync.NewCond(&sync.Mutex{}),
flushInterval: flushInterval,
binaryCache: tlru.New[string, *proto.Executable](tlru.ConstantCost, 1000),
gitRemoteCache: tlru.New[string, string](tlru.ConstantCost, 1000),
logger: logger,
}
}
@ -417,9 +503,20 @@ func (i *invocationQueue) enqueue(req *proto.ReportInvocationRequest) {
cmd.Dir = req.WorkingDirectory
out, err := cmd.Output()
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
// We probably just weren't inside a git dir!
// This result should still be cached.
if exitError.ExitCode() == 128 {
return "", nil
}
}
return "", err
}
return strings.TrimSpace(string(out)), nil
url := strings.TrimSpace(string(out))
i.logger.Info(context.Background(),
"cached git remote", slog.F("url", url), slog.F("working_dir", req.WorkingDirectory))
return url, nil
}, time.Hour)
if err != nil {
// This isn't worth failing the execution on, but is an issue
@ -431,6 +528,9 @@ func (i *invocationQueue) enqueue(req *proto.ReportInvocationRequest) {
i.L.Lock()
defer i.L.Unlock()
i.queue = append(i.queue, inv)
if len(i.queue)%10 == 0 {
i.logger.Info(context.Background(), "invocation queue length", slog.F("count", len(i.queue)))
}
i.Broadcast()
}
@ -472,7 +572,9 @@ func (i *invocationQueue) startSendLoop(ctx context.Context, flush func([]*proto
queue := i.queue[:]
i.flushRequested = false
i.L.Unlock()
i.logger.Info(ctx, "flushing invocations", slog.F("count", len(queue)))
err := flush(queue)
i.logger.Info(ctx, "flushed invocations", slog.F("count", len(queue)), slog.Error(err))
i.L.Lock()
if err != nil {
return xerrors.Errorf("failed to flush invocations: %w", err)

View File

@ -581,6 +581,8 @@ func (x *TrackExecutables) GetBinaryName() []string {
return nil
}
// ReportInvocationRequest is sent as bytes for speed from
// the slim invoke binary to the daemon.
type ReportInvocationRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -760,15 +762,10 @@ var file_inteld_proto_inteld_proto_rawDesc = []byte{
0x74, 0x79, 0x12, 0x36, 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x61, 0x74, 0x68,
0x12, 0x19, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x6c, 0x64, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x69, 0x6e,
0x74, 0x65, 0x6c, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x51, 0x0a, 0x0b, 0x49, 0x6e,
0x74, 0x65, 0x6c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x42, 0x0a, 0x10, 0x52, 0x65, 0x70,
0x6f, 0x72, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e,
0x69, 0x6e, 0x74, 0x65, 0x6c, 0x64, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x76,
0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x6c, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x28, 0x5a,
0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65,
0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x6c,
0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x74, 0x65, 0x6c, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63,
0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x6c, 0x64, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -805,13 +802,11 @@ var file_inteld_proto_inteld_proto_depIdxs = []int32{
6, // 5: inteld.IntelDaemon.Listen:input_type -> inteld.ListenRequest
4, // 6: inteld.IntelDaemon.RecordInvocation:input_type -> inteld.RecordInvocationRequest
3, // 7: inteld.IntelDaemon.ReportPath:input_type -> inteld.ReportPathRequest
9, // 8: inteld.IntelClient.ReportInvocation:input_type -> inteld.ReportInvocationRequest
7, // 9: inteld.IntelDaemon.Listen:output_type -> inteld.SystemResponse
0, // 10: inteld.IntelDaemon.RecordInvocation:output_type -> inteld.Empty
0, // 11: inteld.IntelDaemon.ReportPath:output_type -> inteld.Empty
0, // 12: inteld.IntelClient.ReportInvocation:output_type -> inteld.Empty
9, // [9:13] is the sub-list for method output_type
5, // [5:9] is the sub-list for method input_type
7, // 8: inteld.IntelDaemon.Listen:output_type -> inteld.SystemResponse
0, // 9: inteld.IntelDaemon.RecordInvocation:output_type -> inteld.Empty
0, // 10: inteld.IntelDaemon.ReportPath:output_type -> inteld.Empty
8, // [8:11] is the sub-list for method output_type
5, // [5:8] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
@ -955,7 +950,7 @@ func file_inteld_proto_inteld_proto_init() {
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 2,
NumServices: 1,
},
GoTypes: file_inteld_proto_inteld_proto_goTypes,
DependencyIndexes: file_inteld_proto_inteld_proto_depIdxs,

View File

@ -73,8 +73,8 @@ service IntelDaemon {
rpc ReportPath(ReportPathRequest) returns (Empty);
}
// Client messages!
// ReportInvocationRequest is sent as bytes for speed from
// the slim invoke binary to the daemon.
message ReportInvocationRequest {
string executable_path = 1;
repeated string arguments = 2;
@ -82,11 +82,3 @@ message ReportInvocationRequest {
int32 exit_code = 4;
string working_directory = 5;
}
// IntelClient is provided by inteld to clients that want
// to report insights. Clients report through the daemon
// to ensure that the insights are stored and processed
// in a consistent and timely manner.
service IntelClient {
rpc ReportInvocation(ReportInvocationRequest) returns (Empty);
}

View File

@ -217,78 +217,3 @@ func (x *drpcIntelDaemon_ReportPathStream) SendAndClose(m *Empty) error {
}
return x.CloseSend()
}
type DRPCIntelClientClient interface {
DRPCConn() drpc.Conn
ReportInvocation(ctx context.Context, in *ReportInvocationRequest) (*Empty, error)
}
type drpcIntelClientClient struct {
cc drpc.Conn
}
func NewDRPCIntelClientClient(cc drpc.Conn) DRPCIntelClientClient {
return &drpcIntelClientClient{cc}
}
func (c *drpcIntelClientClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcIntelClientClient) ReportInvocation(ctx context.Context, in *ReportInvocationRequest) (*Empty, error) {
out := new(Empty)
err := c.cc.Invoke(ctx, "/inteld.IntelClient/ReportInvocation", drpcEncoding_File_inteld_proto_inteld_proto{}, in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCIntelClientServer interface {
ReportInvocation(context.Context, *ReportInvocationRequest) (*Empty, error)
}
type DRPCIntelClientUnimplementedServer struct{}
func (s *DRPCIntelClientUnimplementedServer) ReportInvocation(context.Context, *ReportInvocationRequest) (*Empty, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
type DRPCIntelClientDescription struct{}
func (DRPCIntelClientDescription) NumMethods() int { return 1 }
func (DRPCIntelClientDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/inteld.IntelClient/ReportInvocation", drpcEncoding_File_inteld_proto_inteld_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCIntelClientServer).
ReportInvocation(
ctx,
in1.(*ReportInvocationRequest),
)
}, DRPCIntelClientServer.ReportInvocation, true
default:
return "", nil, nil, nil, false
}
}
func DRPCRegisterIntelClient(mux drpc.Mux, impl DRPCIntelClientServer) error {
return mux.Register(impl, DRPCIntelClientDescription{})
}
type DRPCIntelClient_ReportInvocationStream interface {
drpc.Stream
SendAndClose(*Empty) error
}
type drpcIntelClient_ReportInvocationStream struct {
drpc.Stream
}
func (x *drpcIntelClient_ReportInvocationStream) SendAndClose(m *Empty) error {
if err := x.MsgSend(m, drpcEncoding_File_inteld_proto_inteld_proto{}); err != nil {
return err
}
return x.CloseSend()
}

133
inteld/proto/proto.go Normal file
View File

@ -0,0 +1,133 @@
package proto
import (
"io"
"net"
"os"
"path/filepath"
"runtime"
"time"
gproto "google.golang.org/protobuf/proto"
)
const (
DialTimeout = 100 * time.Millisecond
)
var (
daemonSocket = filepath.Join(os.TempDir(), ".coder-intel.sock")
)
// ReportInvocation reports an invocation to a daemon
// if it is running.
func ReportInvocation(inv *ReportInvocationRequest) error {
data, err := gproto.Marshal(inv)
if err != nil {
return err
}
var w io.Writer
// Don't bother closing either of these.
// It's a waste of a syscall, because we're exiting
// immediately after reporting anyways!
if shouldUnixgram() {
w, err = net.DialUnix("unixgram", nil, &net.UnixAddr{
Name: daemonSocket,
Net: "unixgram",
})
} else {
daemonDialTimeoutRaw, exists := os.LookupEnv("CODER_INTEL_DAEMON_TIMEOUT")
timeout := DialTimeout
if exists {
dur, err := time.ParseDuration(daemonDialTimeoutRaw)
if err != nil {
return err
}
timeout = dur
}
w, err = net.DialTimeout("tcp", daemonAddress(), timeout)
}
if err != nil {
return err
}
_, err = w.Write(data)
return err
}
// ListenForInvocations starts a listener that listens for invocation requests
// from the intel client in the most efficient way possible.
func ListenForInvocations(sendFunc func(inv *ReportInvocationRequest)) (io.Closer, error) {
overrideAddress := os.Getenv("CODER_INTEL_DAEMON_ADDRESS")
if overrideAddress != "" {
return net.Listen("tcp", overrideAddress)
}
unmarshalAndSend := func(data []byte, count int) {
var inv ReportInvocationRequest
err := gproto.Unmarshal(data[:count], &inv)
if err != nil {
return
}
go sendFunc(&inv)
}
var closer io.Closer
if shouldUnixgram() {
// Remove the socket first!
_ = os.Remove(daemonSocket)
unixConn, err := net.ListenUnixgram("unixgram", &net.UnixAddr{
Name: daemonSocket,
Net: "unixgram",
})
if err != nil {
return nil, err
}
go func() {
data := make([]byte, 1024)
for {
count, _, err := unixConn.ReadFromUnix(data)
if err != nil {
return
}
unmarshalAndSend(data, count)
}
}()
closer = unixConn
} else {
listener, err := net.Listen("tcp", daemonAddress())
if err != nil {
return nil, err
}
go func() {
for {
conn, err := listener.Accept()
if err != nil {
return
}
go func() {
data := make([]byte, 1024)
read, err := conn.Read(data)
if err != nil {
return
}
unmarshalAndSend(data, read)
_ = conn.Close()
}()
}
}()
closer = listener
}
return closer, nil
}
// shouldUnixgram returns whether the fast unixgram method
// of transmission should be used!
func shouldUnixgram() bool {
return os.Getenv("CODER_INTEL_DAEMON_ADDRESS") == "" && runtime.GOOS != "windows"
}
func daemonAddress() string {
overrideAddress := os.Getenv("CODER_INTEL_DAEMON_ADDRESS")
if overrideAddress != "" {
return overrideAddress
}
return "127.0.0.1:13657"
}

View File

@ -600,6 +600,17 @@ export interface HealthcheckConfig {
readonly threshold_database: number;
}
// From codersdk/intel.go
export interface IntelDaemonHostInfo {
readonly instance_id: string;
readonly hostname: string;
readonly operating_system: string;
readonly operating_system_version: string;
readonly architecture: string;
readonly cpu_cores: number;
readonly memory_total_mb: number;
}
// From codersdk/workspaceagents.go
export interface IssueReconnectingPTYSignedTokenRequest {
readonly url: string;
@ -975,6 +986,11 @@ export interface SSHConfigResponse {
readonly ssh_config_options: Record<string, string>;
}
// From codersdk/intel.go
export interface ServeIntelDaemonRequest extends IntelDaemonHostInfo {
readonly organization: string;
}
// From codersdk/serversentevents.go
export interface ServerSentEvent {
readonly type: ServerSentEventType;