feat: add AgentAPI using DRPC (#10811)

Co-authored-by: Spike Curtis <spike@coder.com>
This commit is contained in:
Dean Sheather 2023-12-18 04:53:28 -08:00 committed by GitHub
parent eb781751b8
commit e46431078c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 4413 additions and 1035 deletions

View File

@ -30,4 +30,5 @@ extend-exclude = [
"**/*_test.go",
"**/*.test.tsx",
"**/pnpm-lock.yaml",
"tailnet/testdata/**",
]

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,7 @@ import "google/protobuf/timestamp.proto";
import "google/protobuf/duration.proto";
message WorkspaceApp {
bytes uuid = 1;
bytes id = 1;
string url = 2;
bool external = 3;
string slug = 4;
@ -26,12 +26,12 @@ message WorkspaceApp {
}
SharingLevel sharing_level = 10;
message HealthCheck {
message Healthcheck {
string url = 1;
int32 interval = 2;
google.protobuf.Duration interval = 2;
int32 threshold = 3;
}
HealthCheck healthcheck = 11;
Healthcheck healthcheck = 11;
enum Health {
HEALTH_UNSPECIFIED = 0;
@ -43,11 +43,52 @@ message WorkspaceApp {
Health health = 12;
}
message WorkspaceAgentScript {
bytes log_source_id = 1;
string log_path = 2;
string script = 3;
string cron = 4;
bool run_on_start = 5;
bool run_on_stop = 6;
bool start_blocks_login = 7;
google.protobuf.Duration timeout = 8;
}
message WorkspaceAgentMetadata {
message Result {
google.protobuf.Timestamp collected_at = 1;
int64 age = 2;
string value = 3;
string error = 4;
}
Result result = 1;
message Description {
string display_name = 1;
string key = 2;
string script = 3;
google.protobuf.Duration interval = 4;
google.protobuf.Duration timeout = 5;
}
Description description = 2;
}
message Manifest {
uint32 git_auth_configs = 1;
string vs_code_port_proxy_uri = 2;
repeated WorkspaceApp apps = 3;
coder.tailnet.v2.DERPMap derp_map = 4;
bytes agent_id = 1;
string owner_username = 13;
bytes workspace_id = 14;
uint32 git_auth_configs = 2;
map<string, string> environment_variables = 3;
string directory = 4;
string vs_code_port_proxy_uri = 5;
string motd_path = 6;
bool disable_direct_connections = 7;
bool derp_force_websockets = 8;
coder.tailnet.v2.DERPMap derp_map = 9;
repeated WorkspaceAgentScript scripts = 10;
repeated WorkspaceApp apps = 11;
repeated WorkspaceAgentMetadata.Description metadata = 12;
}
message GetManifestRequest {}
@ -100,8 +141,14 @@ message Stats {
Type type = 2;
double value = 3;
map<string, string> labels = 4;
message Label {
string name = 1;
string value = 2;
}
repeated Label labels = 4;
}
repeated Metric metrics = 12;
}
message UpdateStatsRequest{
@ -109,14 +156,14 @@ message UpdateStatsRequest{
}
message UpdateStatsResponse {
google.protobuf.Duration report_interval_nanoseconds = 1;
google.protobuf.Duration report_interval = 1;
}
message Lifecycle {
enum State {
STATE_UNSPECIFIED = 0;
CREATED = 1;
STARTED = 2;
STARTING = 2;
START_TIMEOUT = 3;
START_ERROR = 4;
READY = 5;
@ -126,6 +173,7 @@ message Lifecycle {
OFF = 9;
}
State state = 1;
google.protobuf.Timestamp changed_at = 2;
}
message UpdateLifecycleRequest {
@ -142,7 +190,7 @@ enum AppHealth {
message BatchUpdateAppHealthRequest {
message HealthUpdate {
bytes uuid = 1;
bytes id = 1;
AppHealth health = 2;
}
repeated HealthUpdate updates = 1;
@ -153,7 +201,13 @@ message BatchUpdateAppHealthResponse {}
message Startup {
string version = 1;
string expanded_directory = 2;
repeated string subsystems = 3;
enum Subsystem {
SUBSYSTEM_UNSPECIFIED = 0;
ENVBOX = 1;
ENVBUILDER = 2;
EXECTRACE = 3;
}
repeated Subsystem subsystems = 3;
}
message UpdateStartupRequest{
@ -162,10 +216,7 @@ message UpdateStartupRequest{
message Metadata {
string key = 1;
google.protobuf.Timestamp collected_at = 2;
int64 age = 3;
string value = 4;
string error = 5;
WorkspaceAgentMetadata.Result result = 2;
}
message BatchUpdateMetadataRequest {
@ -190,7 +241,7 @@ message Log {
}
message BatchCreateLogsRequest {
bytes source_id = 1;
bytes log_source_id = 1;
repeated Log logs = 2;
}

192
agent/proto/convert.go Normal file
View File

@ -0,0 +1,192 @@
package proto
import (
"strings"
"time"
"github.com/google/uuid"
"golang.org/x/xerrors"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/db2sdk"
"github.com/coder/coder/v2/codersdk"
)
func DBAgentMetadataToProtoDescription(metadata []database.WorkspaceAgentMetadatum) []*WorkspaceAgentMetadata_Description {
ret := make([]*WorkspaceAgentMetadata_Description, len(metadata))
for i, metadatum := range metadata {
ret[i] = DBAgentMetadatumToProtoDescription(metadatum)
}
return ret
}
func DBAgentMetadatumToProtoDescription(metadatum database.WorkspaceAgentMetadatum) *WorkspaceAgentMetadata_Description {
return &WorkspaceAgentMetadata_Description{
DisplayName: metadatum.DisplayName,
Key: metadatum.Key,
Script: metadatum.Script,
Interval: durationpb.New(time.Duration(metadatum.Interval)),
Timeout: durationpb.New(time.Duration(metadatum.Timeout)),
}
}
func SDKAgentMetadataDescriptionsFromProto(descriptions []*WorkspaceAgentMetadata_Description) []codersdk.WorkspaceAgentMetadataDescription {
ret := make([]codersdk.WorkspaceAgentMetadataDescription, len(descriptions))
for i, description := range descriptions {
ret[i] = SDKAgentMetadataDescriptionFromProto(description)
}
return ret
}
func SDKAgentMetadataDescriptionFromProto(description *WorkspaceAgentMetadata_Description) codersdk.WorkspaceAgentMetadataDescription {
return codersdk.WorkspaceAgentMetadataDescription{
DisplayName: description.DisplayName,
Key: description.Key,
Script: description.Script,
Interval: int64(description.Interval.AsDuration()),
Timeout: int64(description.Timeout.AsDuration()),
}
}
func DBAgentScriptsToProto(scripts []database.WorkspaceAgentScript) []*WorkspaceAgentScript {
ret := make([]*WorkspaceAgentScript, len(scripts))
for i, script := range scripts {
ret[i] = DBAgentScriptToProto(script)
}
return ret
}
func DBAgentScriptToProto(script database.WorkspaceAgentScript) *WorkspaceAgentScript {
return &WorkspaceAgentScript{
LogSourceId: script.LogSourceID[:],
LogPath: script.LogPath,
Script: script.Script,
Cron: script.Cron,
RunOnStart: script.RunOnStart,
RunOnStop: script.RunOnStop,
StartBlocksLogin: script.StartBlocksLogin,
Timeout: durationpb.New(time.Duration(script.TimeoutSeconds) * time.Second),
}
}
func SDKAgentScriptsFromProto(protoScripts []*WorkspaceAgentScript) ([]codersdk.WorkspaceAgentScript, error) {
ret := make([]codersdk.WorkspaceAgentScript, len(protoScripts))
for i, protoScript := range protoScripts {
app, err := SDKAgentScriptFromProto(protoScript)
if err != nil {
return nil, xerrors.Errorf("parse script %v: %w", i, err)
}
ret[i] = app
}
return ret, nil
}
func SDKAgentScriptFromProto(protoScript *WorkspaceAgentScript) (codersdk.WorkspaceAgentScript, error) {
id, err := uuid.FromBytes(protoScript.LogSourceId)
if err != nil {
return codersdk.WorkspaceAgentScript{}, xerrors.Errorf("parse id: %w", err)
}
return codersdk.WorkspaceAgentScript{
LogSourceID: id,
LogPath: protoScript.LogPath,
Script: protoScript.Script,
Cron: protoScript.Cron,
RunOnStart: protoScript.RunOnStart,
RunOnStop: protoScript.RunOnStop,
StartBlocksLogin: protoScript.StartBlocksLogin,
Timeout: protoScript.Timeout.AsDuration(),
}, nil
}
func DBAppsToProto(dbApps []database.WorkspaceApp, agent database.WorkspaceAgent, ownerName string, workspace database.Workspace) ([]*WorkspaceApp, error) {
ret := make([]*WorkspaceApp, len(dbApps))
for i, dbApp := range dbApps {
var err error
ret[i], err = DBAppToProto(dbApp, agent, ownerName, workspace)
if err != nil {
return nil, xerrors.Errorf("parse app %v (%q): %w", i, dbApp.Slug, err)
}
}
return ret, nil
}
func DBAppToProto(dbApp database.WorkspaceApp, agent database.WorkspaceAgent, ownerName string, workspace database.Workspace) (*WorkspaceApp, error) {
sharingLevelRaw, ok := WorkspaceApp_SharingLevel_value[strings.ToUpper(string(dbApp.SharingLevel))]
if !ok {
return nil, xerrors.Errorf("unknown app sharing level: %q", dbApp.SharingLevel)
}
healthRaw, ok := WorkspaceApp_Health_value[strings.ToUpper(string(dbApp.Health))]
if !ok {
return nil, xerrors.Errorf("unknown app health: %q", dbApp.SharingLevel)
}
return &WorkspaceApp{
Id: dbApp.ID[:],
Url: dbApp.Url.String,
External: dbApp.External,
Slug: dbApp.Slug,
DisplayName: dbApp.DisplayName,
Command: dbApp.Command.String,
Icon: dbApp.Icon,
Subdomain: dbApp.Subdomain,
SubdomainName: db2sdk.AppSubdomain(dbApp, agent.Name, workspace.Name, ownerName),
SharingLevel: WorkspaceApp_SharingLevel(sharingLevelRaw),
Healthcheck: &WorkspaceApp_Healthcheck{
Url: dbApp.HealthcheckUrl,
Interval: durationpb.New(time.Duration(dbApp.HealthcheckInterval) * time.Second),
Threshold: dbApp.HealthcheckThreshold,
},
Health: WorkspaceApp_Health(healthRaw),
}, nil
}
func SDKAppsFromProto(protoApps []*WorkspaceApp) ([]codersdk.WorkspaceApp, error) {
ret := make([]codersdk.WorkspaceApp, len(protoApps))
for i, protoApp := range protoApps {
app, err := SDKAppFromProto(protoApp)
if err != nil {
return nil, xerrors.Errorf("parse app %v (%q): %w", i, protoApp.Slug, err)
}
ret[i] = app
}
return ret, nil
}
func SDKAppFromProto(protoApp *WorkspaceApp) (codersdk.WorkspaceApp, error) {
id, err := uuid.FromBytes(protoApp.Id)
if err != nil {
return codersdk.WorkspaceApp{}, xerrors.Errorf("parse id: %w", err)
}
var sharingLevel codersdk.WorkspaceAppSharingLevel = codersdk.WorkspaceAppSharingLevel(strings.ToLower(protoApp.SharingLevel.String()))
if _, ok := codersdk.MapWorkspaceAppSharingLevels[sharingLevel]; !ok {
return codersdk.WorkspaceApp{}, xerrors.Errorf("unknown app sharing level: %v (%q)", protoApp.SharingLevel, protoApp.SharingLevel.String())
}
var health codersdk.WorkspaceAppHealth = codersdk.WorkspaceAppHealth(strings.ToLower(protoApp.Health.String()))
if _, ok := codersdk.MapWorkspaceAppHealths[health]; !ok {
return codersdk.WorkspaceApp{}, xerrors.Errorf("unknown app health: %v (%q)", protoApp.Health, protoApp.Health.String())
}
return codersdk.WorkspaceApp{
ID: id,
URL: protoApp.Url,
External: protoApp.External,
Slug: protoApp.Slug,
DisplayName: protoApp.DisplayName,
Command: protoApp.Command,
Icon: protoApp.Icon,
Subdomain: protoApp.Subdomain,
SubdomainName: protoApp.SubdomainName,
SharingLevel: sharingLevel,
Healthcheck: codersdk.Healthcheck{
URL: protoApp.Healthcheck.Url,
Interval: int32(protoApp.Healthcheck.Interval.AsDuration().Seconds()),
Threshold: protoApp.Healthcheck.Threshold,
},
Health: health,
}, nil
}

View File

@ -1,4 +1,4 @@
package coderd
package agentapi
import (
"context"
@ -11,7 +11,7 @@ import (
"github.com/coder/coder/v2/coderd/database"
)
// activityBumpWorkspace automatically bumps the workspace's auto-off timer
// ActivityBumpWorkspace automatically bumps the workspace's auto-off timer
// if it is set to expire soon. The deadline will be bumped by 1 hour*.
// If the bump crosses over an autostart time, the workspace will be
// bumped by the workspace ttl instead.
@ -36,7 +36,7 @@ import (
// A way to avoid this is to configure the max deadline to something that will not
// span more than 1 day. This will force the workspace to restart and reset the deadline
// each morning when it autostarts.
func activityBumpWorkspace(ctx context.Context, log slog.Logger, db database.Store, workspaceID uuid.UUID, nextAutostart time.Time) {
func ActivityBumpWorkspace(ctx context.Context, log slog.Logger, db database.Store, workspaceID uuid.UUID, nextAutostart time.Time) {
// We set a short timeout so if the app is under load, these
// low priority operations fail first.
ctx, cancel := context.WithTimeout(ctx, time.Second*15)

View File

@ -1,4 +1,4 @@
package coderd
package agentapi_test
import (
"database/sql"
@ -8,6 +8,7 @@ import (
"github.com/google/uuid"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/agentapi"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
@ -236,7 +237,7 @@ func Test_ActivityBumpWorkspace(t *testing.T) {
// Bump duration is measured from the time of the bump, so we measure from here.
start := dbtime.Now()
activityBumpWorkspace(ctx, log, db, bld.WorkspaceID, tt.nextAutostart)
agentapi.ActivityBumpWorkspace(ctx, log, db, bld.WorkspaceID, tt.nextAutostart)
end := dbtime.Now()
// Validate our state after bump

234
coderd/agentapi/api.go Normal file
View File

@ -0,0 +1,234 @@
package agentapi
import (
"context"
"io"
"net"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
"golang.org/x/xerrors"
"storj.io/drpc/drpcmux"
"storj.io/drpc/drpcserver"
"tailscale.com/tailcfg"
"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/batchstats"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/coderd/prometheusmetrics"
"github.com/coder/coder/v2/coderd/schedule"
"github.com/coder/coder/v2/coderd/tracing"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/tailnet"
)
const AgentAPIVersionDRPC = "2.0"
// API implements the DRPC agent API interface from agent/proto. This struct is
// instantiated once per agent connection and kept alive for the duration of the
// session.
type API struct {
opts Options
*ManifestAPI
*ServiceBannerAPI
*StatsAPI
*LifecycleAPI
*AppsAPI
*MetadataAPI
*LogsAPI
*TailnetAPI
mu sync.Mutex
cachedWorkspaceID uuid.UUID
}
var _ agentproto.DRPCAgentServer = &API{}
type Options struct {
AgentID uuid.UUID
Ctx context.Context
Log slog.Logger
Database database.Store
Pubsub pubsub.Pubsub
DerpMapFn func() *tailcfg.DERPMap
TailnetCoordinator *atomic.Pointer[tailnet.Coordinator]
TemplateScheduleStore *atomic.Pointer[schedule.TemplateScheduleStore]
StatsBatcher *batchstats.Batcher
PublishWorkspaceUpdateFn func(ctx context.Context, workspaceID uuid.UUID)
PublishWorkspaceAgentLogsUpdateFn func(ctx context.Context, workspaceAgentID uuid.UUID, msg agentsdk.LogsNotifyMessage)
AccessURL *url.URL
AppHostname string
AgentInactiveDisconnectTimeout time.Duration
AgentFallbackTroubleshootingURL string
AgentStatsRefreshInterval time.Duration
DisableDirectConnections bool
DerpForceWebSockets bool
DerpMapUpdateFrequency time.Duration
ExternalAuthConfigs []*externalauth.Config
// Optional:
// WorkspaceID avoids a future lookup to find the workspace ID by setting
// the cache in advance.
WorkspaceID uuid.UUID
UpdateAgentMetricsFn func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric)
}
func New(opts Options) *API {
api := &API{
opts: opts,
mu: sync.Mutex{},
cachedWorkspaceID: opts.WorkspaceID,
}
api.ManifestAPI = &ManifestAPI{
AccessURL: opts.AccessURL,
AppHostname: opts.AppHostname,
AgentInactiveDisconnectTimeout: opts.AgentInactiveDisconnectTimeout,
AgentFallbackTroubleshootingURL: opts.AgentFallbackTroubleshootingURL,
ExternalAuthConfigs: opts.ExternalAuthConfigs,
DisableDirectConnections: opts.DisableDirectConnections,
DerpForceWebSockets: opts.DerpForceWebSockets,
AgentFn: api.agent,
Database: opts.Database,
DerpMapFn: opts.DerpMapFn,
TailnetCoordinator: opts.TailnetCoordinator,
}
api.ServiceBannerAPI = &ServiceBannerAPI{
Database: opts.Database,
}
api.StatsAPI = &StatsAPI{
AgentFn: api.agent,
Database: opts.Database,
Log: opts.Log,
StatsBatcher: opts.StatsBatcher,
TemplateScheduleStore: opts.TemplateScheduleStore,
AgentStatsRefreshInterval: opts.AgentStatsRefreshInterval,
UpdateAgentMetricsFn: opts.UpdateAgentMetricsFn,
}
api.LifecycleAPI = &LifecycleAPI{
AgentFn: api.agent,
WorkspaceIDFn: api.workspaceID,
Database: opts.Database,
Log: opts.Log,
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
}
api.AppsAPI = &AppsAPI{
AgentFn: api.agent,
Database: opts.Database,
Log: opts.Log,
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
}
api.MetadataAPI = &MetadataAPI{
AgentFn: api.agent,
Database: opts.Database,
Pubsub: opts.Pubsub,
Log: opts.Log,
}
api.LogsAPI = &LogsAPI{
AgentFn: api.agent,
Database: opts.Database,
Log: opts.Log,
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
PublishWorkspaceAgentLogsUpdateFn: opts.PublishWorkspaceAgentLogsUpdateFn,
}
api.TailnetAPI = &TailnetAPI{
Ctx: opts.Ctx,
DerpMapFn: opts.DerpMapFn,
DerpMapUpdateFrequency: opts.DerpMapUpdateFrequency,
}
return api
}
func (a *API) Server(ctx context.Context) (*drpcserver.Server, error) {
mux := drpcmux.New()
err := agentproto.DRPCRegisterAgent(mux, a)
if err != nil {
return nil, xerrors.Errorf("register agent API protocol in DRPC mux: %w", err)
}
return drpcserver.NewWithOptions(&tracing.DRPCHandler{Handler: mux},
drpcserver.Options{
Log: func(err error) {
if xerrors.Is(err, io.EOF) {
return
}
a.opts.Log.Debug(ctx, "drpc server error", slog.Error(err))
},
},
), nil
}
func (a *API) Serve(ctx context.Context, l net.Listener) error {
server, err := a.Server(ctx)
if err != nil {
return xerrors.Errorf("create agent API server: %w", err)
}
return server.Serve(ctx, l)
}
func (a *API) agent(ctx context.Context) (database.WorkspaceAgent, error) {
agent, err := a.opts.Database.GetWorkspaceAgentByID(ctx, a.opts.AgentID)
if err != nil {
return database.WorkspaceAgent{}, xerrors.Errorf("get workspace agent by id %q: %w", a.opts.AgentID, err)
}
return agent, nil
}
func (a *API) workspaceID(ctx context.Context, agent *database.WorkspaceAgent) (uuid.UUID, error) {
a.mu.Lock()
if a.cachedWorkspaceID != uuid.Nil {
id := a.cachedWorkspaceID
a.mu.Unlock()
return id, nil
}
if agent == nil {
agnt, err := a.agent(ctx)
if err != nil {
return uuid.Nil, err
}
agent = &agnt
}
resource, err := a.opts.Database.GetWorkspaceResourceByID(ctx, agent.ResourceID)
if err != nil {
return uuid.Nil, xerrors.Errorf("get workspace agent resource by id %q: %w", agent.ResourceID, err)
}
build, err := a.opts.Database.GetWorkspaceBuildByJobID(ctx, resource.JobID)
if err != nil {
return uuid.Nil, xerrors.Errorf("get workspace build by job id %q: %w", resource.JobID, err)
}
a.mu.Lock()
a.cachedWorkspaceID = build.WorkspaceID
a.mu.Unlock()
return build.WorkspaceID, nil
}
func (a *API) publishWorkspaceUpdate(ctx context.Context, agent *database.WorkspaceAgent) error {
workspaceID, err := a.workspaceID(ctx, agent)
if err != nil {
return err
}
a.opts.PublishWorkspaceUpdateFn(ctx, workspaceID)
return nil
}

98
coderd/agentapi/apps.go Normal file
View File

@ -0,0 +1,98 @@
package agentapi
import (
"context"
"github.com/google/uuid"
"golang.org/x/xerrors"
"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
)
type AppsAPI struct {
AgentFn func(context.Context) (database.WorkspaceAgent, error)
Database database.Store
Log slog.Logger
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent) error
}
func (a *AppsAPI) BatchUpdateAppHealths(ctx context.Context, req *agentproto.BatchUpdateAppHealthRequest) (*agentproto.BatchUpdateAppHealthResponse, error) {
workspaceAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, err
}
if len(req.Updates) == 0 {
return &agentproto.BatchUpdateAppHealthResponse{}, nil
}
apps, err := a.Database.GetWorkspaceAppsByAgentID(ctx, workspaceAgent.ID)
if err != nil {
return nil, xerrors.Errorf("get workspace apps by agent ID %q: %w", workspaceAgent.ID, err)
}
var newApps []database.WorkspaceApp
for _, update := range req.Updates {
updateID, err := uuid.FromBytes(update.Id)
if err != nil {
return nil, xerrors.Errorf("parse workspace app ID %q: %w", update.Id, err)
}
old := func() *database.WorkspaceApp {
for _, app := range apps {
if app.ID == updateID {
return &app
}
}
return nil
}()
if old == nil {
return nil, xerrors.Errorf("workspace app ID %q not found", updateID)
}
if old.HealthcheckUrl == "" {
return nil, xerrors.Errorf("workspace app %q (%q) does not have healthchecks enabled", updateID, old.Slug)
}
var newHealth database.WorkspaceAppHealth
switch update.Health {
case agentproto.AppHealth_DISABLED:
newHealth = database.WorkspaceAppHealthDisabled
case agentproto.AppHealth_INITIALIZING:
newHealth = database.WorkspaceAppHealthInitializing
case agentproto.AppHealth_HEALTHY:
newHealth = database.WorkspaceAppHealthHealthy
case agentproto.AppHealth_UNHEALTHY:
newHealth = database.WorkspaceAppHealthUnhealthy
default:
return nil, xerrors.Errorf("unknown health status %q for app %q (%q)", update.Health, updateID, old.Slug)
}
// Don't bother updating if the value hasn't changed.
if old.Health == newHealth {
continue
}
old.Health = newHealth
newApps = append(newApps, *old)
}
for _, app := range newApps {
err = a.Database.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{
ID: app.ID,
Health: app.Health,
})
if err != nil {
return nil, xerrors.Errorf("update workspace app health for app %q (%q): %w", err, app.ID, app.Slug)
}
}
err = a.PublishWorkspaceUpdateFn(ctx, &workspaceAgent)
if err != nil {
return nil, xerrors.Errorf("publish workspace update: %w", err)
}
return &agentproto.BatchUpdateAppHealthResponse{}, nil
}

View File

@ -0,0 +1,163 @@
package agentapi
import (
"context"
"database/sql"
"github.com/google/uuid"
"golang.org/x/mod/semver"
"golang.org/x/xerrors"
"google.golang.org/protobuf/types/known/timestamppb"
"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbtime"
)
type LifecycleAPI struct {
AgentFn func(context.Context) (database.WorkspaceAgent, error)
WorkspaceIDFn func(context.Context, *database.WorkspaceAgent) (uuid.UUID, error)
Database database.Store
Log slog.Logger
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent) error
}
func (a *LifecycleAPI) UpdateLifecycle(ctx context.Context, req *agentproto.UpdateLifecycleRequest) (*agentproto.Lifecycle, error) {
workspaceAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, err
}
workspaceID, err := a.WorkspaceIDFn(ctx, &workspaceAgent)
if err != nil {
return nil, err
}
logger := a.Log.With(
slog.F("workspace_id", workspaceID),
slog.F("payload", req),
)
logger.Debug(ctx, "workspace agent state report")
var lifecycleState database.WorkspaceAgentLifecycleState
switch req.Lifecycle.State {
case agentproto.Lifecycle_CREATED:
lifecycleState = database.WorkspaceAgentLifecycleStateCreated
case agentproto.Lifecycle_STARTING:
lifecycleState = database.WorkspaceAgentLifecycleStateStarting
case agentproto.Lifecycle_START_TIMEOUT:
lifecycleState = database.WorkspaceAgentLifecycleStateStartTimeout
case agentproto.Lifecycle_START_ERROR:
lifecycleState = database.WorkspaceAgentLifecycleStateStartError
case agentproto.Lifecycle_READY:
lifecycleState = database.WorkspaceAgentLifecycleStateReady
case agentproto.Lifecycle_SHUTTING_DOWN:
lifecycleState = database.WorkspaceAgentLifecycleStateShuttingDown
case agentproto.Lifecycle_SHUTDOWN_TIMEOUT:
lifecycleState = database.WorkspaceAgentLifecycleStateShutdownTimeout
case agentproto.Lifecycle_SHUTDOWN_ERROR:
lifecycleState = database.WorkspaceAgentLifecycleStateShutdownError
case agentproto.Lifecycle_OFF:
lifecycleState = database.WorkspaceAgentLifecycleStateOff
default:
return nil, xerrors.Errorf("unknown lifecycle state %q", req.Lifecycle.State)
}
if !lifecycleState.Valid() {
return nil, xerrors.Errorf("unknown lifecycle state %q", req.Lifecycle.State)
}
changedAt := req.Lifecycle.ChangedAt.AsTime()
if changedAt.IsZero() {
changedAt = dbtime.Now()
req.Lifecycle.ChangedAt = timestamppb.New(changedAt)
}
dbChangedAt := sql.NullTime{Time: changedAt, Valid: true}
startedAt := workspaceAgent.StartedAt
readyAt := workspaceAgent.ReadyAt
switch lifecycleState {
case database.WorkspaceAgentLifecycleStateStarting:
startedAt = dbChangedAt
readyAt.Valid = false // This agent is re-starting, so it's not ready yet.
case database.WorkspaceAgentLifecycleStateReady, database.WorkspaceAgentLifecycleStateStartError:
readyAt = dbChangedAt
}
err = a.Database.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
ID: workspaceAgent.ID,
LifecycleState: lifecycleState,
StartedAt: startedAt,
ReadyAt: readyAt,
})
if err != nil {
if !xerrors.Is(err, context.Canceled) {
// not an error if we are canceled
logger.Error(ctx, "failed to update lifecycle state", slog.Error(err))
}
return nil, xerrors.Errorf("update workspace agent lifecycle state: %w", err)
}
err = a.PublishWorkspaceUpdateFn(ctx, &workspaceAgent)
if err != nil {
return nil, xerrors.Errorf("publish workspace update: %w", err)
}
return req.Lifecycle, nil
}
func (a *LifecycleAPI) UpdateStartup(ctx context.Context, req *agentproto.UpdateStartupRequest) (*agentproto.Startup, error) {
workspaceAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, err
}
workspaceID, err := a.WorkspaceIDFn(ctx, &workspaceAgent)
if err != nil {
return nil, err
}
a.Log.Debug(
ctx,
"post workspace agent version",
slog.F("workspace_id", workspaceID),
slog.F("agent_version", req.Startup.Version),
)
if !semver.IsValid(req.Startup.Version) {
return nil, xerrors.Errorf("invalid agent semver version %q", req.Startup.Version)
}
// Validate subsystems.
dbSubsystems := make([]database.WorkspaceAgentSubsystem, 0, len(req.Startup.Subsystems))
seenSubsystems := make(map[database.WorkspaceAgentSubsystem]struct{}, len(req.Startup.Subsystems))
for _, s := range req.Startup.Subsystems {
var dbSubsystem database.WorkspaceAgentSubsystem
switch s {
case agentproto.Startup_ENVBOX:
dbSubsystem = database.WorkspaceAgentSubsystemEnvbox
case agentproto.Startup_ENVBUILDER:
dbSubsystem = database.WorkspaceAgentSubsystemEnvbuilder
case agentproto.Startup_EXECTRACE:
dbSubsystem = database.WorkspaceAgentSubsystemExectrace
default:
return nil, xerrors.Errorf("invalid agent subsystem %q", s)
}
if _, ok := seenSubsystems[dbSubsystem]; !ok {
seenSubsystems[dbSubsystem] = struct{}{}
dbSubsystems = append(dbSubsystems, dbSubsystem)
}
}
err = a.Database.UpdateWorkspaceAgentStartupByID(ctx, database.UpdateWorkspaceAgentStartupByIDParams{
ID: workspaceAgent.ID,
Version: req.Startup.Version,
ExpandedDirectory: req.Startup.ExpandedDirectory,
Subsystems: dbSubsystems,
APIVersion: AgentAPIVersionDRPC,
})
if err != nil {
return nil, xerrors.Errorf("update workspace agent startup in database: %w", err)
}
return req.Startup, nil
}

139
coderd/agentapi/logs.go Normal file
View File

@ -0,0 +1,139 @@
package agentapi
import (
"context"
"github.com/google/uuid"
"golang.org/x/xerrors"
"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/codersdk/agentsdk"
)
type LogsAPI struct {
AgentFn func(context.Context) (database.WorkspaceAgent, error)
Database database.Store
Log slog.Logger
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent) error
PublishWorkspaceAgentLogsUpdateFn func(ctx context.Context, workspaceAgentID uuid.UUID, msg agentsdk.LogsNotifyMessage)
}
func (a *LogsAPI) BatchCreateLogs(ctx context.Context, req *agentproto.BatchCreateLogsRequest) (*agentproto.BatchCreateLogsResponse, error) {
workspaceAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, err
}
if len(req.Logs) == 0 {
return &agentproto.BatchCreateLogsResponse{}, nil
}
logSourceID, err := uuid.FromBytes(req.LogSourceId)
if err != nil {
return nil, xerrors.Errorf("parse log source ID %q: %w", req.LogSourceId, err)
}
// This is to support the legacy API where the log source ID was
// not provided in the request body. We default to the external
// log source in this case.
if logSourceID == uuid.Nil {
// Use the external log source
externalSources, err := a.Database.InsertWorkspaceAgentLogSources(ctx, database.InsertWorkspaceAgentLogSourcesParams{
WorkspaceAgentID: workspaceAgent.ID,
CreatedAt: dbtime.Now(),
ID: []uuid.UUID{agentsdk.ExternalLogSourceID},
DisplayName: []string{"External"},
Icon: []string{"/emojis/1f310.png"},
})
if database.IsUniqueViolation(err, database.UniqueWorkspaceAgentLogSourcesPkey) {
err = nil
logSourceID = agentsdk.ExternalLogSourceID
}
if err != nil {
return nil, xerrors.Errorf("insert external workspace agent log source: %w", err)
}
if len(externalSources) == 1 {
logSourceID = externalSources[0].ID
}
}
output := make([]string, 0)
level := make([]database.LogLevel, 0)
outputLength := 0
for _, logEntry := range req.Logs {
output = append(output, logEntry.Output)
outputLength += len(logEntry.Output)
var dbLevel database.LogLevel
switch logEntry.Level {
case agentproto.Log_TRACE:
dbLevel = database.LogLevelTrace
case agentproto.Log_DEBUG:
dbLevel = database.LogLevelDebug
case agentproto.Log_INFO:
dbLevel = database.LogLevelInfo
case agentproto.Log_WARN:
dbLevel = database.LogLevelWarn
case agentproto.Log_ERROR:
dbLevel = database.LogLevelError
default:
// Default to "info" to support older clients that didn't have the
// level field.
dbLevel = database.LogLevelInfo
}
level = append(level, dbLevel)
}
logs, err := a.Database.InsertWorkspaceAgentLogs(ctx, database.InsertWorkspaceAgentLogsParams{
AgentID: workspaceAgent.ID,
CreatedAt: dbtime.Now(),
Output: output,
Level: level,
LogSourceID: logSourceID,
OutputLength: int32(outputLength),
})
if err != nil {
if !database.IsWorkspaceAgentLogsLimitError(err) {
return nil, xerrors.Errorf("insert workspace agent logs: %w", err)
}
if workspaceAgent.LogsOverflowed {
return nil, xerrors.New("workspace agent logs overflowed")
}
err := a.Database.UpdateWorkspaceAgentLogOverflowByID(ctx, database.UpdateWorkspaceAgentLogOverflowByIDParams{
ID: workspaceAgent.ID,
LogsOverflowed: true,
})
if err != nil {
// We don't want to return here, because the agent will retry on
// failure and this isn't a huge deal. The overflow state is just a
// hint to the user that the logs are incomplete.
a.Log.Warn(ctx, "failed to update workspace agent log overflow", slog.Error(err))
}
err = a.PublishWorkspaceUpdateFn(ctx, &workspaceAgent)
if err != nil {
return nil, xerrors.Errorf("publish workspace update: %w", err)
}
return nil, xerrors.New("workspace agent log limit exceeded")
}
// Publish by the lowest log ID inserted so the log stream will fetch
// everything from that point.
lowestLogID := logs[0].ID
a.PublishWorkspaceAgentLogsUpdateFn(ctx, workspaceAgent.ID, agentsdk.LogsNotifyMessage{
CreatedAfter: lowestLogID - 1,
})
if workspaceAgent.LogsLength == 0 {
// If these are the first logs being appended, we publish a UI update
// to notify the UI that logs are now available.
err = a.PublishWorkspaceUpdateFn(ctx, &workspaceAgent)
if err != nil {
return nil, xerrors.Errorf("publish workspace update: %w", err)
}
}
return &agentproto.BatchCreateLogsResponse{}, nil
}

153
coderd/agentapi/manifest.go Normal file
View File

@ -0,0 +1,153 @@
package agentapi
import (
"context"
"database/sql"
"fmt"
"net/url"
"strings"
"sync/atomic"
"time"
"github.com/google/uuid"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"tailscale.com/tailcfg"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/db2sdk"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/tailnet"
)
type ManifestAPI struct {
AccessURL *url.URL
AppHostname string
AgentInactiveDisconnectTimeout time.Duration
AgentFallbackTroubleshootingURL string
ExternalAuthConfigs []*externalauth.Config
DisableDirectConnections bool
DerpForceWebSockets bool
AgentFn func(context.Context) (database.WorkspaceAgent, error)
Database database.Store
DerpMapFn func() *tailcfg.DERPMap
TailnetCoordinator *atomic.Pointer[tailnet.Coordinator]
}
func (a *ManifestAPI) GetManifest(ctx context.Context, _ *agentproto.GetManifestRequest) (*agentproto.Manifest, error) {
workspaceAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, err
}
apiAgent, err := db2sdk.WorkspaceAgent(
a.DerpMapFn(), *a.TailnetCoordinator.Load(), workspaceAgent, nil, nil, nil, a.AgentInactiveDisconnectTimeout,
a.AgentFallbackTroubleshootingURL,
)
if err != nil {
return nil, xerrors.Errorf("converting workspace agent: %w", err)
}
var (
dbApps []database.WorkspaceApp
scripts []database.WorkspaceAgentScript
metadata []database.WorkspaceAgentMetadatum
resource database.WorkspaceResource
build database.WorkspaceBuild
workspace database.Workspace
owner database.User
)
var eg errgroup.Group
eg.Go(func() (err error) {
dbApps, err = a.Database.GetWorkspaceAppsByAgentID(ctx, workspaceAgent.ID)
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
return err
}
return nil
})
eg.Go(func() (err error) {
// nolint:gocritic // This is necessary to fetch agent scripts!
scripts, err = a.Database.GetWorkspaceAgentScriptsByAgentIDs(dbauthz.AsSystemRestricted(ctx), []uuid.UUID{workspaceAgent.ID})
return err
})
eg.Go(func() (err error) {
metadata, err = a.Database.GetWorkspaceAgentMetadata(ctx, database.GetWorkspaceAgentMetadataParams{
WorkspaceAgentID: workspaceAgent.ID,
Keys: nil,
})
return err
})
eg.Go(func() (err error) {
resource, err = a.Database.GetWorkspaceResourceByID(ctx, workspaceAgent.ResourceID)
if err != nil {
return xerrors.Errorf("getting resource by id: %w", err)
}
build, err = a.Database.GetWorkspaceBuildByJobID(ctx, resource.JobID)
if err != nil {
return xerrors.Errorf("getting workspace build by job id: %w", err)
}
workspace, err = a.Database.GetWorkspaceByID(ctx, build.WorkspaceID)
if err != nil {
return xerrors.Errorf("getting workspace by id: %w", err)
}
owner, err = a.Database.GetUserByID(ctx, workspace.OwnerID)
if err != nil {
return xerrors.Errorf("getting workspace owner by id: %w", err)
}
return err
})
err = eg.Wait()
if err != nil {
return nil, xerrors.Errorf("fetching workspace agent data: %w", err)
}
appHost := httpapi.ApplicationURL{
AppSlugOrPort: "{{port}}",
AgentName: workspaceAgent.Name,
WorkspaceName: workspace.Name,
Username: owner.Username,
}
vscodeProxyURI := a.AccessURL.Scheme + "://" + strings.ReplaceAll(a.AppHostname, "*", appHost.String())
if a.AppHostname == "" {
vscodeProxyURI += a.AccessURL.Hostname()
}
if a.AccessURL.Port() != "" {
vscodeProxyURI += fmt.Sprintf(":%s", a.AccessURL.Port())
}
var gitAuthConfigs uint32
for _, cfg := range a.ExternalAuthConfigs {
if codersdk.EnhancedExternalAuthProvider(cfg.Type).Git() {
gitAuthConfigs++
}
}
apps, err := agentproto.DBAppsToProto(dbApps, workspaceAgent, owner.Username, workspace)
if err != nil {
return nil, xerrors.Errorf("converting workspace apps: %w", err)
}
return &agentproto.Manifest{
AgentId: workspaceAgent.ID[:],
OwnerUsername: owner.Username,
WorkspaceId: workspace.ID[:],
GitAuthConfigs: gitAuthConfigs,
EnvironmentVariables: apiAgent.EnvironmentVariables,
Directory: apiAgent.Directory,
VsCodePortProxyUri: vscodeProxyURI,
MotdPath: workspaceAgent.MOTDFile,
DisableDirectConnections: a.DisableDirectConnections,
DerpForceWebsockets: a.DerpForceWebSockets,
DerpMap: tailnet.DERPMapToProto(a.DerpMapFn()),
Scripts: agentproto.DBAgentScriptsToProto(scripts),
Apps: apps,
Metadata: agentproto.DBAgentMetadataToProtoDescription(metadata),
}, nil
}

115
coderd/agentapi/metadata.go Normal file
View File

@ -0,0 +1,115 @@
package agentapi
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/google/uuid"
"golang.org/x/xerrors"
"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/pubsub"
)
type MetadataAPI struct {
AgentFn func(context.Context) (database.WorkspaceAgent, error)
Database database.Store
Pubsub pubsub.Pubsub
Log slog.Logger
}
func (a *MetadataAPI) BatchUpdateMetadata(ctx context.Context, req *agentproto.BatchUpdateMetadataRequest) (*agentproto.BatchUpdateMetadataResponse, error) {
const (
// maxValueLen is set to 2048 to stay under the 8000 byte Postgres
// NOTIFY limit. Since both value and error can be set, the real payload
// limit is 2 * 2048 * 4/3 <base64 expansion> = 5461 bytes + a few
// hundred bytes for JSON syntax, key names, and metadata.
maxValueLen = 2048
maxErrorLen = maxValueLen
)
workspaceAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, err
}
collectedAt := time.Now()
dbUpdate := database.UpdateWorkspaceAgentMetadataParams{
WorkspaceAgentID: workspaceAgent.ID,
Key: make([]string, 0, len(req.Metadata)),
Value: make([]string, 0, len(req.Metadata)),
Error: make([]string, 0, len(req.Metadata)),
CollectedAt: make([]time.Time, 0, len(req.Metadata)),
}
for _, md := range req.Metadata {
metadataError := md.Result.Error
// We overwrite the error if the provided payload is too long.
if len(md.Result.Value) > maxValueLen {
metadataError = fmt.Sprintf("value of %d bytes exceeded %d bytes", len(md.Result.Value), maxValueLen)
md.Result.Value = md.Result.Value[:maxValueLen]
}
if len(md.Result.Error) > maxErrorLen {
metadataError = fmt.Sprintf("error of %d bytes exceeded %d bytes", len(md.Result.Error), maxErrorLen)
md.Result.Error = ""
}
// We don't want a misconfigured agent to fill the database.
dbUpdate.Key = append(dbUpdate.Key, md.Key)
dbUpdate.Value = append(dbUpdate.Value, md.Result.Value)
dbUpdate.Error = append(dbUpdate.Error, metadataError)
// We ignore the CollectedAt from the agent to avoid bugs caused by
// clock skew.
dbUpdate.CollectedAt = append(dbUpdate.CollectedAt, collectedAt)
a.Log.Debug(
ctx, "accepted metadata report",
slog.F("collected_at", collectedAt),
slog.F("original_collected_at", collectedAt),
slog.F("key", md.Key),
slog.F("value", ellipse(md.Result.Value, 16)),
)
}
payload, err := json.Marshal(WorkspaceAgentMetadataChannelPayload{
CollectedAt: collectedAt,
Keys: dbUpdate.Key,
})
if err != nil {
return nil, xerrors.Errorf("marshal workspace agent metadata channel payload: %w", err)
}
err = a.Database.UpdateWorkspaceAgentMetadata(ctx, dbUpdate)
if err != nil {
return nil, xerrors.Errorf("update workspace agent metadata in database: %w", err)
}
err = a.Pubsub.Publish(WatchWorkspaceAgentMetadataChannel(workspaceAgent.ID), payload)
if err != nil {
return nil, xerrors.Errorf("publish workspace agent metadata: %w", err)
}
return &agentproto.BatchUpdateMetadataResponse{}, nil
}
func ellipse(v string, n int) string {
if len(v) > n {
return v[:n] + "..."
}
return v
}
type WorkspaceAgentMetadataChannelPayload struct {
CollectedAt time.Time `json:"collected_at"`
Keys []string `json:"keys"`
}
func WatchWorkspaceAgentMetadataChannel(id uuid.UUID) string {
return "workspace_agent_metadata:" + id.String()
}

View File

@ -0,0 +1,38 @@
package agentapi
import (
"context"
"database/sql"
"encoding/json"
"golang.org/x/xerrors"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/codersdk"
)
type ServiceBannerAPI struct {
Database database.Store
}
func (a *ServiceBannerAPI) GetServiceBanner(ctx context.Context, _ *agentproto.GetServiceBannerRequest) (*agentproto.ServiceBanner, error) {
serviceBannerJSON, err := a.Database.GetServiceBanner(ctx)
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
return nil, xerrors.Errorf("get service banner: %w", err)
}
var cfg codersdk.ServiceBannerConfig
if serviceBannerJSON != "" {
err = json.Unmarshal([]byte(serviceBannerJSON), &cfg)
if err != nil {
return nil, xerrors.Errorf("unmarshal json: %w, raw: %s", err, serviceBannerJSON)
}
}
return &agentproto.ServiceBanner{
Enabled: cfg.Enabled,
Message: cfg.Message,
BackgroundColor: cfg.BackgroundColor,
}, nil
}

121
coderd/agentapi/stats.go Normal file
View File

@ -0,0 +1,121 @@
package agentapi
import (
"context"
"sync/atomic"
"time"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"google.golang.org/protobuf/types/known/durationpb"
"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/autobuild"
"github.com/coder/coder/v2/coderd/batchstats"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/prometheusmetrics"
"github.com/coder/coder/v2/coderd/schedule"
)
type StatsAPI struct {
AgentFn func(context.Context) (database.WorkspaceAgent, error)
Database database.Store
Log slog.Logger
StatsBatcher *batchstats.Batcher
TemplateScheduleStore *atomic.Pointer[schedule.TemplateScheduleStore]
AgentStatsRefreshInterval time.Duration
UpdateAgentMetricsFn func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric)
}
func (a *StatsAPI) UpdateStats(ctx context.Context, req *agentproto.UpdateStatsRequest) (*agentproto.UpdateStatsResponse, error) {
workspaceAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, err
}
row, err := a.Database.GetWorkspaceByAgentID(ctx, workspaceAgent.ID)
if err != nil {
return nil, xerrors.Errorf("get workspace by agent ID %q: %w", workspaceAgent.ID, err)
}
workspace := row.Workspace
res := &agentproto.UpdateStatsResponse{
ReportInterval: durationpb.New(a.AgentStatsRefreshInterval),
}
// An empty stat means it's just looking for the report interval.
if len(req.Stats.ConnectionsByProto) == 0 {
return res, nil
}
a.Log.Debug(ctx, "read stats report",
slog.F("interval", a.AgentStatsRefreshInterval),
slog.F("workspace_id", workspace.ID),
slog.F("payload", req),
)
if req.Stats.ConnectionCount > 0 {
var nextAutostart time.Time
if workspace.AutostartSchedule.String != "" {
templateSchedule, err := (*(a.TemplateScheduleStore.Load())).Get(ctx, a.Database, workspace.TemplateID)
// If the template schedule fails to load, just default to bumping without the next trasition and log it.
if err != nil {
a.Log.Warn(ctx, "failed to load template schedule bumping activity, defaulting to bumping by 60min",
slog.F("workspace_id", workspace.ID),
slog.F("template_id", workspace.TemplateID),
slog.Error(err),
)
} else {
next, allowed := autobuild.NextAutostartSchedule(time.Now(), workspace.AutostartSchedule.String, templateSchedule)
if allowed {
nextAutostart = next
}
}
}
ActivityBumpWorkspace(ctx, a.Log.Named("activity_bump"), a.Database, workspace.ID, nextAutostart)
}
now := dbtime.Now()
var errGroup errgroup.Group
errGroup.Go(func() error {
if err := a.StatsBatcher.Add(time.Now(), workspaceAgent.ID, workspace.TemplateID, workspace.OwnerID, workspace.ID, req.Stats); err != nil {
a.Log.Error(ctx, "failed to add stats to batcher", slog.Error(err))
return xerrors.Errorf("can't insert workspace agent stat: %w", err)
}
return nil
})
errGroup.Go(func() error {
err := a.Database.UpdateWorkspaceLastUsedAt(ctx, database.UpdateWorkspaceLastUsedAtParams{
ID: workspace.ID,
LastUsedAt: now,
})
if err != nil {
return xerrors.Errorf("can't update workspace LastUsedAt: %w", err)
}
return nil
})
if a.UpdateAgentMetricsFn != nil {
errGroup.Go(func() error {
user, err := a.Database.GetUserByID(ctx, workspace.OwnerID)
if err != nil {
return xerrors.Errorf("can't get user: %w", err)
}
a.UpdateAgentMetricsFn(ctx, prometheusmetrics.AgentMetricLabels{
Username: user.Username,
WorkspaceName: workspace.Name,
AgentName: workspaceAgent.Name,
TemplateName: row.TemplateName,
}, req.Stats.Metrics)
return nil
})
}
err = errGroup.Wait()
if err != nil {
return nil, xerrors.Errorf("update stats in database: %w", err)
}
return res, nil
}

View File

@ -0,0 +1,53 @@
package agentapi
import (
"context"
"time"
"golang.org/x/xerrors"
"tailscale.com/tailcfg"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/tailnet"
tailnetproto "github.com/coder/coder/v2/tailnet/proto"
)
type TailnetAPI struct {
Ctx context.Context
DerpMapFn func() *tailcfg.DERPMap
DerpMapUpdateFrequency time.Duration
}
func (a *TailnetAPI) StreamDERPMaps(_ *tailnetproto.StreamDERPMapsRequest, stream agentproto.DRPCAgent_StreamDERPMapsStream) error {
defer stream.Close()
ticker := time.NewTicker(a.DerpMapUpdateFrequency)
defer ticker.Stop()
var lastDERPMap *tailcfg.DERPMap
for {
derpMap := a.DerpMapFn()
if lastDERPMap == nil || !tailnet.CompareDERPMaps(lastDERPMap, derpMap) {
protoDERPMap := tailnet.DERPMapToProto(derpMap)
err := stream.Send(protoDERPMap)
if err != nil {
return xerrors.Errorf("send derp map: %w", err)
}
lastDERPMap = derpMap
}
ticker.Reset(a.DerpMapUpdateFrequency)
select {
case <-stream.Context().Done():
return nil
case <-a.Ctx.Done():
return nil
case <-ticker.C:
}
}
}
func (*TailnetAPI) CoordinateTailnet(_ agentproto.DRPCAgent_CoordinateTailnetStream) error {
// TODO: implement this
return xerrors.New("CoordinateTailnet is unimplemented")
}

22
coderd/apidoc/docs.go generated
View File

@ -5269,6 +5269,28 @@ const docTemplate = `{
}
}
},
"/workspaceagents/me/rpc": {
"get": {
"security": [
{
"CoderSessionToken": []
}
],
"tags": [
"Agents"
],
"summary": "Workspace agent RPC API",
"operationId": "workspace-agent-rpc-api",
"responses": {
"101": {
"description": "Switching Protocols"
}
},
"x-apidocgen": {
"skip": true
}
}
},
"/workspaceagents/me/startup": {
"post": {
"security": [

View File

@ -4633,6 +4633,26 @@
}
}
},
"/workspaceagents/me/rpc": {
"get": {
"security": [
{
"CoderSessionToken": []
}
],
"tags": ["Agents"],
"summary": "Workspace agent RPC API",
"operationId": "workspace-agent-rpc-api",
"responses": {
"101": {
"description": "Switching Protocols"
}
},
"x-apidocgen": {
"skip": true
}
}
},
"/workspaceagents/me/startup": {
"post": {
"security": [

View File

@ -13,10 +13,10 @@ import (
"cdr.dev/slog"
"cdr.dev/slog/sloggers/sloghuman"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/codersdk/agentsdk"
)
const (
@ -133,7 +133,7 @@ func (b *Batcher) Add(
templateID uuid.UUID,
userID uuid.UUID,
workspaceID uuid.UUID,
st agentsdk.Stats,
st *agentproto.Stats,
) error {
b.mu.Lock()
defer b.mu.Unlock()
@ -156,11 +156,11 @@ func (b *Batcher) Add(
b.buf.RxBytes = append(b.buf.RxBytes, st.RxBytes)
b.buf.TxPackets = append(b.buf.TxPackets, st.TxPackets)
b.buf.TxBytes = append(b.buf.TxBytes, st.TxBytes)
b.buf.SessionCountVSCode = append(b.buf.SessionCountVSCode, st.SessionCountVSCode)
b.buf.SessionCountJetBrains = append(b.buf.SessionCountJetBrains, st.SessionCountJetBrains)
b.buf.SessionCountReconnectingPTY = append(b.buf.SessionCountReconnectingPTY, st.SessionCountReconnectingPTY)
b.buf.SessionCountSSH = append(b.buf.SessionCountSSH, st.SessionCountSSH)
b.buf.ConnectionMedianLatencyMS = append(b.buf.ConnectionMedianLatencyMS, st.ConnectionMedianLatencyMS)
b.buf.SessionCountVSCode = append(b.buf.SessionCountVSCode, st.SessionCountVscode)
b.buf.SessionCountJetBrains = append(b.buf.SessionCountJetBrains, st.SessionCountJetbrains)
b.buf.SessionCountReconnectingPTY = append(b.buf.SessionCountReconnectingPTY, st.SessionCountReconnectingPty)
b.buf.SessionCountSSH = append(b.buf.SessionCountSSH, st.SessionCountSsh)
b.buf.ConnectionMedianLatencyMS = append(b.buf.ConnectionMedianLatencyMS, st.ConnectionMedianLatencyMs)
// If the buffer is over 80% full, signal the flusher to flush immediately.
// We want to trigger flushes early to reduce the likelihood of

View File

@ -10,13 +10,13 @@ import (
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/cryptorand"
)
@ -63,7 +63,7 @@ func TestBatchStats(t *testing.T) {
// Given: a single data point is added for workspace
t2 := t1.Add(time.Second)
t.Logf("inserting 1 stat")
require.NoError(t, b.Add(t2.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randAgentSDKStats(t)))
require.NoError(t, b.Add(t2.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randStats(t)))
// When: it becomes time to report stats
// Signal a tick and wait for a flush to complete.
@ -87,9 +87,9 @@ func TestBatchStats(t *testing.T) {
t.Logf("inserting %d stats", defaultBufferSize)
for i := 0; i < defaultBufferSize; i++ {
if i%2 == 0 {
require.NoError(t, b.Add(t3.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randAgentSDKStats(t)))
require.NoError(t, b.Add(t3.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randStats(t)))
} else {
require.NoError(t, b.Add(t3.Add(time.Millisecond), deps2.Agent.ID, deps2.User.ID, deps2.Template.ID, deps2.Workspace.ID, randAgentSDKStats(t)))
require.NoError(t, b.Add(t3.Add(time.Millisecond), deps2.Agent.ID, deps2.User.ID, deps2.Template.ID, deps2.Workspace.ID, randStats(t)))
}
}
}()
@ -129,10 +129,10 @@ func TestBatchStats(t *testing.T) {
require.Equal(t, defaultBufferSize, cap(b.buf.ID), "buffer grew beyond expected capacity")
}
// randAgentSDKStats returns a random agentsdk.Stats
func randAgentSDKStats(t *testing.T, opts ...func(*agentsdk.Stats)) agentsdk.Stats {
// randStats returns a random agentproto.Stats
func randStats(t *testing.T, opts ...func(*agentproto.Stats)) *agentproto.Stats {
t.Helper()
s := agentsdk.Stats{
s := &agentproto.Stats{
ConnectionsByProto: map[string]int64{
"ssh": mustRandInt64n(t, 9) + 1,
"vscode": mustRandInt64n(t, 9) + 1,
@ -140,19 +140,19 @@ func randAgentSDKStats(t *testing.T, opts ...func(*agentsdk.Stats)) agentsdk.Sta
"reconnecting_pty": mustRandInt64n(t, 9) + 1,
},
ConnectionCount: mustRandInt64n(t, 99) + 1,
ConnectionMedianLatencyMS: float64(mustRandInt64n(t, 99) + 1),
ConnectionMedianLatencyMs: float64(mustRandInt64n(t, 99) + 1),
RxPackets: mustRandInt64n(t, 99) + 1,
RxBytes: mustRandInt64n(t, 99) + 1,
TxPackets: mustRandInt64n(t, 99) + 1,
TxBytes: mustRandInt64n(t, 99) + 1,
SessionCountVSCode: mustRandInt64n(t, 9) + 1,
SessionCountJetBrains: mustRandInt64n(t, 9) + 1,
SessionCountReconnectingPTY: mustRandInt64n(t, 9) + 1,
SessionCountSSH: mustRandInt64n(t, 9) + 1,
Metrics: []agentsdk.AgentMetric{},
SessionCountVscode: mustRandInt64n(t, 9) + 1,
SessionCountJetbrains: mustRandInt64n(t, 9) + 1,
SessionCountReconnectingPty: mustRandInt64n(t, 9) + 1,
SessionCountSsh: mustRandInt64n(t, 9) + 1,
Metrics: []*agentproto.Stats_Metric{},
}
for _, opt := range opts {
opt(&s)
opt(s)
}
return s
}

View File

@ -16,6 +16,10 @@ import (
"sync/atomic"
"time"
"github.com/coder/coder/v2/coderd/prometheusmetrics"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/andybalholm/brotli"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
@ -34,24 +38,22 @@ import (
"tailscale.com/types/key"
"tailscale.com/util/singleflight"
// Used for swagger docs.
_ "github.com/coder/coder/v2/coderd/apidoc"
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/coderd/healthcheck/derphealth"
"github.com/coder/coder/v2/coderd/prometheusmetrics"
"cdr.dev/slog"
"github.com/coder/coder/v2/buildinfo"
"github.com/coder/coder/v2/cli/clibase"
// Used for swagger docs.
_ "github.com/coder/coder/v2/coderd/apidoc"
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/awsidentity"
"github.com/coder/coder/v2/coderd/batchstats"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/coderd/gitsshkey"
"github.com/coder/coder/v2/coderd/healthcheck"
"github.com/coder/coder/v2/coderd/healthcheck/derphealth"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/metricscache"
@ -65,7 +67,6 @@ import (
"github.com/coder/coder/v2/coderd/workspaceapps"
"github.com/coder/coder/v2/coderd/wsconncache"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/codersdk/drpc"
"github.com/coder/coder/v2/provisionerd/proto"
"github.com/coder/coder/v2/provisionersdk"
@ -171,7 +172,7 @@ type Options struct {
HTTPClient *http.Client
UpdateAgentMetrics func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []agentsdk.AgentMetric)
UpdateAgentMetrics func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric)
StatsBatcher *batchstats.Batcher
WorkspaceAppsStatsCollectorOptions workspaceapps.StatsCollectorOptions
@ -388,6 +389,7 @@ func New(options *Options) *API {
),
metricsCache: metricsCache,
Auditor: atomic.Pointer[audit.Auditor]{},
TailnetCoordinator: atomic.Pointer[tailnet.Coordinator]{},
TemplateScheduleStore: options.TemplateScheduleStore,
UserQuietHoursScheduleStore: options.UserQuietHoursScheduleStore,
AccessControlStore: options.AccessControlStore,
@ -866,6 +868,7 @@ func New(options *Options) *API {
DB: options.Database,
Optional: false,
}))
r.Get("/rpc", api.workspaceAgentRPC)
r.Get("/manifest", api.workspaceAgentManifest)
// This route is deprecated and will be removed in a future release.
// New agents will use /me/manifest instead.

View File

@ -3,17 +3,23 @@ package db2sdk
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"golang.org/x/exp/slices"
"golang.org/x/xerrors"
"tailscale.com/tailcfg"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/parameter"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/provisionersdk/proto"
"github.com/coder/coder/v2/tailnet"
)
type ExternalAuthMeta struct {
@ -218,3 +224,178 @@ func templateVersionParameterOptions(rawOptions json.RawMessage) ([]codersdk.Tem
}
return options, nil
}
func convertDisplayApps(apps []database.DisplayApp) []codersdk.DisplayApp {
dapps := make([]codersdk.DisplayApp, 0, len(apps))
for _, app := range apps {
switch codersdk.DisplayApp(app) {
case codersdk.DisplayAppVSCodeDesktop, codersdk.DisplayAppVSCodeInsiders, codersdk.DisplayAppPortForward, codersdk.DisplayAppWebTerminal, codersdk.DisplayAppSSH:
dapps = append(dapps, codersdk.DisplayApp(app))
}
}
return dapps
}
func WorkspaceAgent(derpMap *tailcfg.DERPMap, coordinator tailnet.Coordinator,
dbAgent database.WorkspaceAgent, apps []codersdk.WorkspaceApp, scripts []codersdk.WorkspaceAgentScript, logSources []codersdk.WorkspaceAgentLogSource,
agentInactiveDisconnectTimeout time.Duration, agentFallbackTroubleshootingURL string,
) (codersdk.WorkspaceAgent, error) {
var envs map[string]string
if dbAgent.EnvironmentVariables.Valid {
err := json.Unmarshal(dbAgent.EnvironmentVariables.RawMessage, &envs)
if err != nil {
return codersdk.WorkspaceAgent{}, xerrors.Errorf("unmarshal env vars: %w", err)
}
}
troubleshootingURL := agentFallbackTroubleshootingURL
if dbAgent.TroubleshootingURL != "" {
troubleshootingURL = dbAgent.TroubleshootingURL
}
subsystems := make([]codersdk.AgentSubsystem, len(dbAgent.Subsystems))
for i, subsystem := range dbAgent.Subsystems {
subsystems[i] = codersdk.AgentSubsystem(subsystem)
}
legacyStartupScriptBehavior := codersdk.WorkspaceAgentStartupScriptBehaviorNonBlocking
for _, script := range scripts {
if !script.RunOnStart {
continue
}
if !script.StartBlocksLogin {
continue
}
legacyStartupScriptBehavior = codersdk.WorkspaceAgentStartupScriptBehaviorBlocking
}
workspaceAgent := codersdk.WorkspaceAgent{
ID: dbAgent.ID,
CreatedAt: dbAgent.CreatedAt,
UpdatedAt: dbAgent.UpdatedAt,
ResourceID: dbAgent.ResourceID,
InstanceID: dbAgent.AuthInstanceID.String,
Name: dbAgent.Name,
Architecture: dbAgent.Architecture,
OperatingSystem: dbAgent.OperatingSystem,
Scripts: scripts,
StartupScriptBehavior: legacyStartupScriptBehavior,
LogsLength: dbAgent.LogsLength,
LogsOverflowed: dbAgent.LogsOverflowed,
LogSources: logSources,
Version: dbAgent.Version,
APIVersion: dbAgent.APIVersion,
EnvironmentVariables: envs,
Directory: dbAgent.Directory,
ExpandedDirectory: dbAgent.ExpandedDirectory,
Apps: apps,
ConnectionTimeoutSeconds: dbAgent.ConnectionTimeoutSeconds,
TroubleshootingURL: troubleshootingURL,
LifecycleState: codersdk.WorkspaceAgentLifecycle(dbAgent.LifecycleState),
Subsystems: subsystems,
DisplayApps: convertDisplayApps(dbAgent.DisplayApps),
}
node := coordinator.Node(dbAgent.ID)
if node != nil {
workspaceAgent.DERPLatency = map[string]codersdk.DERPRegion{}
for rawRegion, latency := range node.DERPLatency {
regionParts := strings.SplitN(rawRegion, "-", 2)
regionID, err := strconv.Atoi(regionParts[0])
if err != nil {
return codersdk.WorkspaceAgent{}, xerrors.Errorf("convert derp region id %q: %w", rawRegion, err)
}
region, found := derpMap.Regions[regionID]
if !found {
// It's possible that a workspace agent is using an old DERPMap
// and reports regions that do not exist. If that's the case,
// report the region as unknown!
region = &tailcfg.DERPRegion{
RegionID: regionID,
RegionName: fmt.Sprintf("Unnamed %d", regionID),
}
}
workspaceAgent.DERPLatency[region.RegionName] = codersdk.DERPRegion{
Preferred: node.PreferredDERP == regionID,
LatencyMilliseconds: latency * 1000,
}
}
}
status := dbAgent.Status(agentInactiveDisconnectTimeout)
workspaceAgent.Status = codersdk.WorkspaceAgentStatus(status.Status)
workspaceAgent.FirstConnectedAt = status.FirstConnectedAt
workspaceAgent.LastConnectedAt = status.LastConnectedAt
workspaceAgent.DisconnectedAt = status.DisconnectedAt
if dbAgent.StartedAt.Valid {
workspaceAgent.StartedAt = &dbAgent.StartedAt.Time
}
if dbAgent.ReadyAt.Valid {
workspaceAgent.ReadyAt = &dbAgent.ReadyAt.Time
}
switch {
case workspaceAgent.Status != codersdk.WorkspaceAgentConnected && workspaceAgent.LifecycleState == codersdk.WorkspaceAgentLifecycleOff:
workspaceAgent.Health.Reason = "agent is not running"
case workspaceAgent.Status == codersdk.WorkspaceAgentTimeout:
workspaceAgent.Health.Reason = "agent is taking too long to connect"
case workspaceAgent.Status == codersdk.WorkspaceAgentDisconnected:
workspaceAgent.Health.Reason = "agent has lost connection"
// Note: We could also handle codersdk.WorkspaceAgentLifecycleStartTimeout
// here, but it's more of a soft issue, so we don't want to mark the agent
// as unhealthy.
case workspaceAgent.LifecycleState == codersdk.WorkspaceAgentLifecycleStartError:
workspaceAgent.Health.Reason = "agent startup script exited with an error"
case workspaceAgent.LifecycleState.ShuttingDown():
workspaceAgent.Health.Reason = "agent is shutting down"
default:
workspaceAgent.Health.Healthy = true
}
return workspaceAgent, nil
}
func AppSubdomain(dbApp database.WorkspaceApp, agentName, workspaceName, ownerName string) string {
if !dbApp.Subdomain || agentName == "" || ownerName == "" || workspaceName == "" {
return ""
}
appSlug := dbApp.Slug
if appSlug == "" {
appSlug = dbApp.DisplayName
}
return httpapi.ApplicationURL{
// We never generate URLs with a prefix. We only allow prefixes when
// parsing URLs from the hostname. Users that want this feature can
// write out their own URLs.
Prefix: "",
AppSlugOrPort: appSlug,
AgentName: agentName,
WorkspaceName: workspaceName,
Username: ownerName,
}.String()
}
func Apps(dbApps []database.WorkspaceApp, agent database.WorkspaceAgent, ownerName string, workspace database.Workspace) []codersdk.WorkspaceApp {
apps := make([]codersdk.WorkspaceApp, 0)
for _, dbApp := range dbApps {
apps = append(apps, codersdk.WorkspaceApp{
ID: dbApp.ID,
URL: dbApp.Url.String,
External: dbApp.External,
Slug: dbApp.Slug,
DisplayName: dbApp.DisplayName,
Command: dbApp.Command.String,
Icon: dbApp.Icon,
Subdomain: dbApp.Subdomain,
SubdomainName: AppSubdomain(dbApp, agent.Name, workspace.Name, ownerName),
SharingLevel: codersdk.WorkspaceAppSharingLevel(dbApp.SharingLevel),
Healthcheck: codersdk.Healthcheck{
URL: dbApp.HealthcheckUrl,
Interval: dbApp.HealthcheckInterval,
Threshold: dbApp.HealthcheckThreshold,
},
Health: codersdk.WorkspaceAppHealth(dbApp.Health),
})
}
return apps
}

View File

@ -20,6 +20,7 @@ import (
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/agent/agenttest"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/batchstats"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
@ -658,12 +659,12 @@ func TestTemplateInsights_Golden(t *testing.T) {
connectionCount = 0
}
for createdAt.Before(stat.endedAt) {
err = batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, agentsdk.Stats{
err = batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, &agentproto.Stats{
ConnectionCount: connectionCount,
SessionCountVSCode: stat.sessionCountVSCode,
SessionCountJetBrains: stat.sessionCountJetBrains,
SessionCountReconnectingPTY: stat.sessionCountReconnectingPTY,
SessionCountSSH: stat.sessionCountSSH,
SessionCountVscode: stat.sessionCountVSCode,
SessionCountJetbrains: stat.sessionCountJetBrains,
SessionCountReconnectingPty: stat.sessionCountReconnectingPTY,
SessionCountSsh: stat.sessionCountSSH,
})
require.NoError(t, err, "want no error inserting agent stats")
createdAt = createdAt.Add(30 * time.Second)
@ -1545,12 +1546,12 @@ func TestUserActivityInsights_Golden(t *testing.T) {
connectionCount = 0
}
for createdAt.Before(stat.endedAt) {
err = batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, agentsdk.Stats{
err = batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, &agentproto.Stats{
ConnectionCount: connectionCount,
SessionCountVSCode: stat.sessionCountVSCode,
SessionCountJetBrains: stat.sessionCountJetBrains,
SessionCountReconnectingPTY: stat.sessionCountReconnectingPTY,
SessionCountSSH: stat.sessionCountSSH,
SessionCountVscode: stat.sessionCountVSCode,
SessionCountJetbrains: stat.sessionCountJetBrains,
SessionCountReconnectingPty: stat.sessionCountReconnectingPTY,
SessionCountSsh: stat.sessionCountSSH,
})
require.NoError(t, err, "want no error inserting agent stats")
createdAt = createdAt.Add(30 * time.Second)

View File

@ -10,7 +10,7 @@ import (
"cdr.dev/slog"
"github.com/coder/coder/v2/codersdk/agentsdk"
agentproto "github.com/coder/coder/v2/agent/proto"
)
const (
@ -49,13 +49,13 @@ type updateRequest struct {
agentName string
templateName string
metrics []agentsdk.AgentMetric
metrics []*agentproto.Stats_Metric
timestamp time.Time
}
type annotatedMetric struct {
agentsdk.AgentMetric
*agentproto.Stats_Metric
username string
workspaceName string
@ -67,7 +67,7 @@ type annotatedMetric struct {
var _ prometheus.Collector = new(MetricsAggregator)
func (am *annotatedMetric) is(req updateRequest, m agentsdk.AgentMetric) bool {
func (am *annotatedMetric) is(req updateRequest, m *agentproto.Stats_Metric) bool {
return am.username == req.username && am.workspaceName == req.workspaceName && am.agentName == req.agentName && am.Name == m.Name && slices.Equal(am.Labels, m.Labels)
}
@ -152,21 +152,19 @@ func (ma *MetricsAggregator) Run(ctx context.Context) func() {
for _, m := range req.metrics {
for i, q := range ma.queue {
if q.is(req, m) {
ma.queue[i].AgentMetric.Value = m.Value
ma.queue[i].Stats_Metric.Value = m.Value
ma.queue[i].expiryDate = req.timestamp.Add(ma.metricsCleanupInterval)
continue UpdateLoop
}
}
ma.queue = append(ma.queue, annotatedMetric{
Stats_Metric: m,
username: req.username,
workspaceName: req.workspaceName,
agentName: req.agentName,
templateName: req.templateName,
AgentMetric: m,
expiryDate: req.timestamp.Add(ma.metricsCleanupInterval),
expiryDate: req.timestamp.Add(ma.metricsCleanupInterval),
})
}
@ -258,7 +256,7 @@ func (ma *MetricsAggregator) Collect(ch chan<- prometheus.Metric) {
}
}
func (ma *MetricsAggregator) Update(ctx context.Context, labels AgentMetricLabels, metrics []agentsdk.AgentMetric) {
func (ma *MetricsAggregator) Update(ctx context.Context, labels AgentMetricLabels, metrics []*agentproto.Stats_Metric) {
select {
case ma.updateCh <- updateRequest{
username: labels.Username,
@ -276,11 +274,11 @@ func (ma *MetricsAggregator) Update(ctx context.Context, labels AgentMetricLabel
}
}
func asPrometheusValueType(metricType agentsdk.AgentMetricType) (prometheus.ValueType, error) {
func asPrometheusValueType(metricType agentproto.Stats_Metric_Type) (prometheus.ValueType, error) {
switch metricType {
case agentsdk.AgentMetricTypeGauge:
case agentproto.Stats_Metric_GAUGE:
return prometheus.GaugeValue, nil
case agentsdk.AgentMetricTypeCounter:
case agentproto.Stats_Metric_COUNTER:
return prometheus.CounterValue, nil
default:
return -1, xerrors.Errorf("unsupported value type: %s", metricType)

View File

@ -14,8 +14,8 @@ import (
"cdr.dev/slog/sloggers/slogtest"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/prometheusmetrics"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/cryptorand"
"github.com/coder/coder/v2/testutil"
)
@ -48,33 +48,33 @@ func TestUpdateMetrics_MetricsDoNotExpire(t *testing.T) {
closeFunc := metricsAggregator.Run(ctx)
t.Cleanup(closeFunc)
given1 := []agentsdk.AgentMetric{
{Name: "a_counter_one", Type: agentsdk.AgentMetricTypeCounter, Value: 1},
{Name: "b_counter_two", Type: agentsdk.AgentMetricTypeCounter, Value: 2},
{Name: "c_gauge_three", Type: agentsdk.AgentMetricTypeGauge, Value: 3},
given1 := []*agentproto.Stats_Metric{
{Name: "a_counter_one", Type: agentproto.Stats_Metric_COUNTER, Value: 1},
{Name: "b_counter_two", Type: agentproto.Stats_Metric_COUNTER, Value: 2},
{Name: "c_gauge_three", Type: agentproto.Stats_Metric_GAUGE, Value: 3},
}
given2 := []agentsdk.AgentMetric{
{Name: "b_counter_two", Type: agentsdk.AgentMetricTypeCounter, Value: 4},
{Name: "c_gauge_three", Type: agentsdk.AgentMetricTypeGauge, Value: 5},
{Name: "c_gauge_three", Type: agentsdk.AgentMetricTypeGauge, Value: 2, Labels: []agentsdk.AgentMetricLabel{
given2 := []*agentproto.Stats_Metric{
{Name: "b_counter_two", Type: agentproto.Stats_Metric_COUNTER, Value: 4},
{Name: "c_gauge_three", Type: agentproto.Stats_Metric_GAUGE, Value: 5},
{Name: "c_gauge_three", Type: agentproto.Stats_Metric_GAUGE, Value: 2, Labels: []*agentproto.Stats_Metric_Label{
{Name: "foobar", Value: "Foobaz"},
{Name: "hello", Value: "world"},
}},
{Name: "d_gauge_four", Type: agentsdk.AgentMetricTypeGauge, Value: 6},
{Name: "d_gauge_four", Type: agentproto.Stats_Metric_GAUGE, Value: 6},
}
commonLabels := []agentsdk.AgentMetricLabel{
commonLabels := []*agentproto.Stats_Metric_Label{
{Name: "agent_name", Value: testAgentName},
{Name: "username", Value: testUsername},
{Name: "workspace_name", Value: testWorkspaceName},
{Name: "template_name", Value: testTemplateName},
}
expected := []agentsdk.AgentMetric{
{Name: "a_counter_one", Type: agentsdk.AgentMetricTypeCounter, Value: 1, Labels: commonLabels},
{Name: "b_counter_two", Type: agentsdk.AgentMetricTypeCounter, Value: 4, Labels: commonLabels},
{Name: "c_gauge_three", Type: agentsdk.AgentMetricTypeGauge, Value: 5, Labels: commonLabels},
{Name: "c_gauge_three", Type: agentsdk.AgentMetricTypeGauge, Value: 2, Labels: []agentsdk.AgentMetricLabel{
expected := []*agentproto.Stats_Metric{
{Name: "a_counter_one", Type: agentproto.Stats_Metric_COUNTER, Value: 1, Labels: commonLabels},
{Name: "b_counter_two", Type: agentproto.Stats_Metric_COUNTER, Value: 4, Labels: commonLabels},
{Name: "c_gauge_three", Type: agentproto.Stats_Metric_GAUGE, Value: 5, Labels: commonLabels},
{Name: "c_gauge_three", Type: agentproto.Stats_Metric_GAUGE, Value: 2, Labels: []*agentproto.Stats_Metric_Label{
{Name: "agent_name", Value: testAgentName},
{Name: "foobar", Value: "Foobaz"},
{Name: "hello", Value: "world"},
@ -82,7 +82,7 @@ func TestUpdateMetrics_MetricsDoNotExpire(t *testing.T) {
{Name: "workspace_name", Value: testWorkspaceName},
{Name: "template_name", Value: testTemplateName},
}},
{Name: "d_gauge_four", Type: agentsdk.AgentMetricTypeGauge, Value: 6, Labels: commonLabels},
{Name: "d_gauge_four", Type: agentproto.Stats_Metric_GAUGE, Value: 6, Labels: commonLabels},
}
// when
@ -109,7 +109,7 @@ func TestUpdateMetrics_MetricsDoNotExpire(t *testing.T) {
}, testutil.WaitMedium, testutil.IntervalSlow)
}
func verifyCollectedMetrics(t *testing.T, expected []agentsdk.AgentMetric, actual []prometheus.Metric) bool {
func verifyCollectedMetrics(t *testing.T, expected []*agentproto.Stats_Metric, actual []prometheus.Metric) bool {
if len(expected) != len(actual) {
return false
}
@ -122,9 +122,9 @@ func verifyCollectedMetrics(t *testing.T, expected []agentsdk.AgentMetric, actua
err := actual[i].Write(&d)
require.NoError(t, err)
if e.Type == agentsdk.AgentMetricTypeCounter {
if e.Type == agentproto.Stats_Metric_COUNTER {
require.Equal(t, e.Value, d.Counter.GetValue())
} else if e.Type == agentsdk.AgentMetricTypeGauge {
} else if e.Type == agentproto.Stats_Metric_GAUGE {
require.Equal(t, e.Value, d.Gauge.GetValue())
} else {
require.Failf(t, "unsupported type: %s", string(e.Type))
@ -140,10 +140,10 @@ func verifyCollectedMetrics(t *testing.T, expected []agentsdk.AgentMetric, actua
return true
}
func asMetricAgentLabels(dtoLabels []*dto.LabelPair) []agentsdk.AgentMetricLabel {
metricLabels := make([]agentsdk.AgentMetricLabel, 0, len(dtoLabels))
func asMetricAgentLabels(dtoLabels []*dto.LabelPair) []*agentproto.Stats_Metric_Label {
metricLabels := make([]*agentproto.Stats_Metric_Label, 0, len(dtoLabels))
for _, dtoLabel := range dtoLabels {
metricLabels = append(metricLabels, agentsdk.AgentMetricLabel{
metricLabels = append(metricLabels, &agentproto.Stats_Metric_Label{
Name: dtoLabel.GetName(),
Value: dtoLabel.GetValue(),
})
@ -165,8 +165,8 @@ func TestUpdateMetrics_MetricsExpire(t *testing.T) {
closeFunc := metricsAggregator.Run(ctx)
t.Cleanup(closeFunc)
given := []agentsdk.AgentMetric{
{Name: "a_counter_one", Type: agentsdk.AgentMetricTypeCounter, Value: 1},
given := []*agentproto.Stats_Metric{
{Name: "a_counter_one", Type: agentproto.Stats_Metric_COUNTER, Value: 1},
}
// when
@ -228,7 +228,7 @@ func Benchmark_MetricsAggregator_Run(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
b.Logf("N=%d generating %d metrics", b.N, numMetrics)
metrics := make([]agentsdk.AgentMetric, 0, numMetrics)
metrics := make([]*agentproto.Stats_Metric, 0, numMetrics)
for i := 0; i < numMetrics; i++ {
metrics = append(metrics, genAgentMetric(b))
}
@ -250,14 +250,14 @@ func Benchmark_MetricsAggregator_Run(b *testing.B) {
}
}
func genAgentMetric(t testing.TB) agentsdk.AgentMetric {
func genAgentMetric(t testing.TB) *agentproto.Stats_Metric {
t.Helper()
var metricType agentsdk.AgentMetricType
var metricType agentproto.Stats_Metric_Type
if must(cryptorand.Float64()) >= 0.5 {
metricType = agentsdk.AgentMetricTypeCounter
metricType = agentproto.Stats_Metric_COUNTER
} else {
metricType = agentsdk.AgentMetricTypeGauge
metricType = agentproto.Stats_Metric_GAUGE
}
// Ensure that metric name does not start or end with underscore, as it is not allowed by Prometheus.
@ -265,8 +265,8 @@ func genAgentMetric(t testing.TB) agentsdk.AgentMetric {
// Generate random metric value between 0 and 1000.
metricValue := must(cryptorand.Float64()) * float64(must(cryptorand.Intn(1000)))
return agentsdk.AgentMetric{
Name: metricName, Type: metricType, Value: metricValue, Labels: []agentsdk.AgentMetricLabel{},
return &agentproto.Stats_Metric{
Name: metricName, Type: metricType, Value: metricValue, Labels: []*agentproto.Stats_Metric_Label{},
}
}

View File

@ -17,6 +17,7 @@ import (
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/db2sdk"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/httpapi"
@ -185,7 +186,7 @@ func (api *API) provisionerJobResources(rw http.ResponseWriter, r *http.Request,
}
}
apiAgent, err := convertWorkspaceAgent(
apiAgent, err := db2sdk.WorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), agent, convertProvisionedApps(dbApps), convertScripts(dbScripts), convertLogSources(dbLogSources), api.AgentInactiveDisconnectTimeout,
api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
)

View File

@ -30,8 +30,11 @@ import (
"tailscale.com/tailcfg"
"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/agentapi"
"github.com/coder/coder/v2/coderd/autobuild"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/db2sdk"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/externalauth"
@ -124,8 +127,8 @@ func (api *API) workspaceAgent(rw http.ResponseWriter, r *http.Request) {
return
}
apiAgent, err := convertWorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), workspaceAgent, convertApps(dbApps, workspaceAgent, owner.Username, workspace), convertScripts(scripts), convertLogSources(logSources), api.AgentInactiveDisconnectTimeout,
apiAgent, err := db2sdk.WorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), workspaceAgent, db2sdk.Apps(dbApps, workspaceAgent, owner.Username, workspace), convertScripts(scripts), convertLogSources(logSources), api.AgentInactiveDisconnectTimeout,
api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
)
if err != nil {
@ -149,68 +152,24 @@ func (api *API) workspaceAgent(rw http.ResponseWriter, r *http.Request) {
func (api *API) workspaceAgentManifest(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
workspaceAgent := httpmw.WorkspaceAgent(r)
apiAgent, err := convertWorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), workspaceAgent, nil, nil, nil, api.AgentInactiveDisconnectTimeout,
api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error reading workspace agent.",
Detail: err.Error(),
})
return
// As this API becomes deprecated, use the new protobuf API and convert the
// types back to the SDK types.
manifestAPI := &agentapi.ManifestAPI{
AccessURL: api.AccessURL,
AppHostname: api.AppHostname,
AgentInactiveDisconnectTimeout: api.AgentInactiveDisconnectTimeout,
AgentFallbackTroubleshootingURL: api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
ExternalAuthConfigs: api.ExternalAuthConfigs,
DisableDirectConnections: api.DeploymentValues.DERP.Config.BlockDirect.Value(),
DerpForceWebSockets: api.DeploymentValues.DERP.Config.ForceWebSockets.Value(),
AgentFn: func(_ context.Context) (database.WorkspaceAgent, error) { return workspaceAgent, nil },
Database: api.Database,
DerpMapFn: api.DERPMap,
TailnetCoordinator: &api.TailnetCoordinator,
}
var (
dbApps []database.WorkspaceApp
scripts []database.WorkspaceAgentScript
metadata []database.WorkspaceAgentMetadatum
resource database.WorkspaceResource
build database.WorkspaceBuild
workspace database.Workspace
owner database.User
)
var eg errgroup.Group
eg.Go(func() (err error) {
dbApps, err = api.Database.GetWorkspaceAppsByAgentID(ctx, workspaceAgent.ID)
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
return err
}
return nil
})
eg.Go(func() (err error) {
// nolint:gocritic // This is necessary to fetch agent scripts!
scripts, err = api.Database.GetWorkspaceAgentScriptsByAgentIDs(dbauthz.AsSystemRestricted(ctx), []uuid.UUID{workspaceAgent.ID})
return err
})
eg.Go(func() (err error) {
metadata, err = api.Database.GetWorkspaceAgentMetadata(ctx, database.GetWorkspaceAgentMetadataParams{
WorkspaceAgentID: workspaceAgent.ID,
Keys: nil,
})
return err
})
eg.Go(func() (err error) {
resource, err = api.Database.GetWorkspaceResourceByID(ctx, workspaceAgent.ResourceID)
if err != nil {
return xerrors.Errorf("getting resource by id: %w", err)
}
build, err = api.Database.GetWorkspaceBuildByJobID(ctx, resource.JobID)
if err != nil {
return xerrors.Errorf("getting workspace build by job id: %w", err)
}
workspace, err = api.Database.GetWorkspaceByID(ctx, build.WorkspaceID)
if err != nil {
return xerrors.Errorf("getting workspace by id: %w", err)
}
owner, err = api.Database.GetUserByID(ctx, workspace.OwnerID)
if err != nil {
return xerrors.Errorf("getting workspace owner by id: %w", err)
}
return err
})
err = eg.Wait()
manifest, err := manifestAPI.GetManifest(ctx, &agentproto.GetManifestRequest{})
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error fetching workspace agent manifest.",
@ -219,42 +178,56 @@ func (api *API) workspaceAgentManifest(rw http.ResponseWriter, r *http.Request)
return
}
appHost := httpapi.ApplicationURL{
AppSlugOrPort: "{{port}}",
AgentName: workspaceAgent.Name,
WorkspaceName: workspace.Name,
Username: owner.Username,
}
vscodeProxyURI := api.AccessURL.Scheme + "://" + strings.ReplaceAll(api.AppHostname, "*", appHost.String())
if api.AppHostname == "" {
vscodeProxyURI += api.AccessURL.Hostname()
}
if api.AccessURL.Port() != "" {
vscodeProxyURI += fmt.Sprintf(":%s", api.AccessURL.Port())
apps, err := agentproto.SDKAppsFromProto(manifest.Apps)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error converting workspace agent apps.",
Detail: err.Error(),
})
return
}
gitAuthConfigs := 0
for _, cfg := range api.ExternalAuthConfigs {
if codersdk.EnhancedExternalAuthProvider(cfg.Type).Git() {
gitAuthConfigs++
}
scripts, err := agentproto.SDKAgentScriptsFromProto(manifest.Scripts)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error converting workspace agent scripts.",
Detail: err.Error(),
})
return
}
agentID, err := uuid.FromBytes(manifest.AgentId)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error converting workspace agent ID.",
Detail: err.Error(),
})
return
}
workspaceID, err := uuid.FromBytes(manifest.WorkspaceId)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error converting workspace ID.",
Detail: err.Error(),
})
return
}
httpapi.Write(ctx, rw, http.StatusOK, agentsdk.Manifest{
AgentID: apiAgent.ID,
OwnerName: owner.Username,
WorkspaceID: workspace.ID,
Apps: convertApps(dbApps, workspaceAgent, owner.Username, workspace),
Scripts: convertScripts(scripts),
DERPMap: api.DERPMap(),
DERPForceWebSockets: api.DeploymentValues.DERP.Config.ForceWebSockets.Value(),
GitAuthConfigs: gitAuthConfigs,
EnvironmentVariables: apiAgent.EnvironmentVariables,
Directory: apiAgent.Directory,
VSCodePortProxyURI: vscodeProxyURI,
MOTDFile: workspaceAgent.MOTDFile,
DisableDirectConnections: api.DeploymentValues.DERP.Config.BlockDirect.Value(),
Metadata: convertWorkspaceAgentMetadataDesc(metadata),
AgentID: agentID,
OwnerName: manifest.OwnerUsername,
WorkspaceID: workspaceID,
Apps: apps,
Scripts: scripts,
DERPMap: tailnet.DERPMapFromProto(manifest.DerpMap),
DERPForceWebSockets: manifest.DerpForceWebsockets,
GitAuthConfigs: int(manifest.GitAuthConfigs),
EnvironmentVariables: manifest.EnvironmentVariables,
Directory: manifest.Directory,
VSCodePortProxyURI: manifest.VsCodePortProxyUri,
MOTDFile: manifest.MotdPath,
DisableDirectConnections: manifest.DisableDirectConnections,
Metadata: agentproto.SDKAgentMetadataDescriptionsFromProto(manifest.Metadata),
})
}
@ -273,7 +246,7 @@ const AgentAPIVersionREST = "1.0"
func (api *API) postWorkspaceAgentStartup(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
workspaceAgent := httpmw.WorkspaceAgent(r)
apiAgent, err := convertWorkspaceAgent(
apiAgent, err := db2sdk.WorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), workspaceAgent, nil, nil, nil, api.AgentInactiveDisconnectTimeout,
api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
)
@ -789,7 +762,7 @@ func (api *API) workspaceAgentListeningPorts(rw http.ResponseWriter, r *http.Req
ctx := r.Context()
workspaceAgent := httpmw.WorkspaceAgentParam(r)
apiAgent, err := convertWorkspaceAgent(
apiAgent, err := db2sdk.WorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), workspaceAgent, nil, nil, nil, api.AgentInactiveDisconnectTimeout,
api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
)
@ -1432,50 +1405,7 @@ func (api *API) workspaceAgentClientCoordinate(rw http.ResponseWriter, r *http.R
// convertProvisionedApps converts applications that are in the middle of provisioning process.
// It means that they may not have an agent or workspace assigned (dry-run job).
func convertProvisionedApps(dbApps []database.WorkspaceApp) []codersdk.WorkspaceApp {
return convertApps(dbApps, database.WorkspaceAgent{}, "", database.Workspace{})
}
func convertApps(dbApps []database.WorkspaceApp, agent database.WorkspaceAgent, ownerName string, workspace database.Workspace) []codersdk.WorkspaceApp {
apps := make([]codersdk.WorkspaceApp, 0)
for _, dbApp := range dbApps {
var subdomainName string
if dbApp.Subdomain && agent.Name != "" && ownerName != "" && workspace.Name != "" {
appSlug := dbApp.Slug
if appSlug == "" {
appSlug = dbApp.DisplayName
}
subdomainName = httpapi.ApplicationURL{
// We never generate URLs with a prefix. We only allow prefixes
// when parsing URLs from the hostname. Users that want this
// feature can write out their own URLs.
Prefix: "",
AppSlugOrPort: appSlug,
AgentName: agent.Name,
WorkspaceName: workspace.Name,
Username: ownerName,
}.String()
}
apps = append(apps, codersdk.WorkspaceApp{
ID: dbApp.ID,
URL: dbApp.Url.String,
External: dbApp.External,
Slug: dbApp.Slug,
DisplayName: dbApp.DisplayName,
Command: dbApp.Command.String,
Icon: dbApp.Icon,
Subdomain: dbApp.Subdomain,
SubdomainName: subdomainName,
SharingLevel: codersdk.WorkspaceAppSharingLevel(dbApp.SharingLevel),
Healthcheck: codersdk.Healthcheck{
URL: dbApp.HealthcheckUrl,
Interval: dbApp.HealthcheckInterval,
Threshold: dbApp.HealthcheckThreshold,
},
Health: codersdk.WorkspaceAppHealth(dbApp.Health),
})
}
return apps
return db2sdk.Apps(dbApps, database.WorkspaceAgent{}, "", database.Workspace{})
}
func convertLogSources(dbLogSources []database.WorkspaceAgentLogSource) []codersdk.WorkspaceAgentLogSource {
@ -1509,149 +1439,6 @@ func convertScripts(dbScripts []database.WorkspaceAgentScript) []codersdk.Worksp
return scripts
}
func convertWorkspaceAgentMetadataDesc(mds []database.WorkspaceAgentMetadatum) []codersdk.WorkspaceAgentMetadataDescription {
metadata := make([]codersdk.WorkspaceAgentMetadataDescription, 0)
for _, datum := range mds {
metadata = append(metadata, codersdk.WorkspaceAgentMetadataDescription{
DisplayName: datum.DisplayName,
Key: datum.Key,
Script: datum.Script,
Interval: datum.Interval,
Timeout: datum.Timeout,
})
}
return metadata
}
func convertWorkspaceAgent(derpMap *tailcfg.DERPMap, coordinator tailnet.CoordinatorV1,
dbAgent database.WorkspaceAgent, apps []codersdk.WorkspaceApp, scripts []codersdk.WorkspaceAgentScript, logSources []codersdk.WorkspaceAgentLogSource,
agentInactiveDisconnectTimeout time.Duration, agentFallbackTroubleshootingURL string,
) (codersdk.WorkspaceAgent, error) {
var envs map[string]string
if dbAgent.EnvironmentVariables.Valid {
err := json.Unmarshal(dbAgent.EnvironmentVariables.RawMessage, &envs)
if err != nil {
return codersdk.WorkspaceAgent{}, xerrors.Errorf("unmarshal env vars: %w", err)
}
}
troubleshootingURL := agentFallbackTroubleshootingURL
if dbAgent.TroubleshootingURL != "" {
troubleshootingURL = dbAgent.TroubleshootingURL
}
subsystems := make([]codersdk.AgentSubsystem, len(dbAgent.Subsystems))
for i, subsystem := range dbAgent.Subsystems {
subsystems[i] = codersdk.AgentSubsystem(subsystem)
}
legacyStartupScriptBehavior := codersdk.WorkspaceAgentStartupScriptBehaviorNonBlocking
for _, script := range scripts {
if !script.RunOnStart {
continue
}
if !script.StartBlocksLogin {
continue
}
legacyStartupScriptBehavior = codersdk.WorkspaceAgentStartupScriptBehaviorBlocking
}
workspaceAgent := codersdk.WorkspaceAgent{
ID: dbAgent.ID,
CreatedAt: dbAgent.CreatedAt,
UpdatedAt: dbAgent.UpdatedAt,
ResourceID: dbAgent.ResourceID,
InstanceID: dbAgent.AuthInstanceID.String,
Name: dbAgent.Name,
Architecture: dbAgent.Architecture,
OperatingSystem: dbAgent.OperatingSystem,
Scripts: scripts,
StartupScriptBehavior: legacyStartupScriptBehavior,
LogsLength: dbAgent.LogsLength,
LogsOverflowed: dbAgent.LogsOverflowed,
LogSources: logSources,
Version: dbAgent.Version,
APIVersion: dbAgent.APIVersion,
EnvironmentVariables: envs,
Directory: dbAgent.Directory,
ExpandedDirectory: dbAgent.ExpandedDirectory,
Apps: apps,
ConnectionTimeoutSeconds: dbAgent.ConnectionTimeoutSeconds,
TroubleshootingURL: troubleshootingURL,
LifecycleState: codersdk.WorkspaceAgentLifecycle(dbAgent.LifecycleState),
Subsystems: subsystems,
DisplayApps: convertDisplayApps(dbAgent.DisplayApps),
}
node := coordinator.Node(dbAgent.ID)
if node != nil {
workspaceAgent.DERPLatency = map[string]codersdk.DERPRegion{}
for rawRegion, latency := range node.DERPLatency {
regionParts := strings.SplitN(rawRegion, "-", 2)
regionID, err := strconv.Atoi(regionParts[0])
if err != nil {
return codersdk.WorkspaceAgent{}, xerrors.Errorf("convert derp region id %q: %w", rawRegion, err)
}
region, found := derpMap.Regions[regionID]
if !found {
// It's possible that a workspace agent is using an old DERPMap
// and reports regions that do not exist. If that's the case,
// report the region as unknown!
region = &tailcfg.DERPRegion{
RegionID: regionID,
RegionName: fmt.Sprintf("Unnamed %d", regionID),
}
}
workspaceAgent.DERPLatency[region.RegionName] = codersdk.DERPRegion{
Preferred: node.PreferredDERP == regionID,
LatencyMilliseconds: latency * 1000,
}
}
}
status := dbAgent.Status(agentInactiveDisconnectTimeout)
workspaceAgent.Status = codersdk.WorkspaceAgentStatus(status.Status)
workspaceAgent.FirstConnectedAt = status.FirstConnectedAt
workspaceAgent.LastConnectedAt = status.LastConnectedAt
workspaceAgent.DisconnectedAt = status.DisconnectedAt
if dbAgent.StartedAt.Valid {
workspaceAgent.StartedAt = &dbAgent.StartedAt.Time
}
if dbAgent.ReadyAt.Valid {
workspaceAgent.ReadyAt = &dbAgent.ReadyAt.Time
}
switch {
case workspaceAgent.Status != codersdk.WorkspaceAgentConnected && workspaceAgent.LifecycleState == codersdk.WorkspaceAgentLifecycleOff:
workspaceAgent.Health.Reason = "agent is not running"
case workspaceAgent.Status == codersdk.WorkspaceAgentTimeout:
workspaceAgent.Health.Reason = "agent is taking too long to connect"
case workspaceAgent.Status == codersdk.WorkspaceAgentDisconnected:
workspaceAgent.Health.Reason = "agent has lost connection"
// Note: We could also handle codersdk.WorkspaceAgentLifecycleStartTimeout
// here, but it's more of a soft issue, so we don't want to mark the agent
// as unhealthy.
case workspaceAgent.LifecycleState == codersdk.WorkspaceAgentLifecycleStartError:
workspaceAgent.Health.Reason = "agent startup script exited with an error"
case workspaceAgent.LifecycleState.ShuttingDown():
workspaceAgent.Health.Reason = "agent is shutting down"
default:
workspaceAgent.Health.Healthy = true
}
return workspaceAgent, nil
}
func convertDisplayApps(apps []database.DisplayApp) []codersdk.DisplayApp {
dapps := make([]codersdk.DisplayApp, 0, len(apps))
for _, app := range apps {
switch codersdk.DisplayApp(app) {
case codersdk.DisplayAppVSCodeDesktop, codersdk.DisplayAppVSCodeInsiders, codersdk.DisplayAppPortForward, codersdk.DisplayAppWebTerminal, codersdk.DisplayAppSSH:
dapps = append(dapps, codersdk.DisplayApp(app))
}
}
return dapps
}
// @Summary Submit workspace agent stats
// @ID submit-workspace-agent-stats
// @Security CoderSessionToken
@ -1713,14 +1500,51 @@ func (api *API) workspaceAgentReportStats(rw http.ResponseWriter, r *http.Reques
}
}
}
activityBumpWorkspace(ctx, api.Logger.Named("activity_bump"), api.Database, workspace.ID, nextAutostart)
agentapi.ActivityBumpWorkspace(ctx, api.Logger.Named("activity_bump"), api.Database, workspace.ID, nextAutostart)
}
now := dbtime.Now()
protoStats := &agentproto.Stats{
ConnectionsByProto: req.ConnectionsByProto,
ConnectionCount: req.ConnectionCount,
ConnectionMedianLatencyMs: req.ConnectionMedianLatencyMS,
RxPackets: req.RxPackets,
RxBytes: req.RxBytes,
TxPackets: req.TxPackets,
TxBytes: req.TxBytes,
SessionCountVscode: req.SessionCountVSCode,
SessionCountJetbrains: req.SessionCountJetBrains,
SessionCountReconnectingPty: req.SessionCountReconnectingPTY,
SessionCountSsh: req.SessionCountSSH,
Metrics: make([]*agentproto.Stats_Metric, len(req.Metrics)),
}
for i, metric := range req.Metrics {
metricType := agentproto.Stats_Metric_TYPE_UNSPECIFIED
switch metric.Type {
case agentsdk.AgentMetricTypeCounter:
metricType = agentproto.Stats_Metric_COUNTER
case agentsdk.AgentMetricTypeGauge:
metricType = agentproto.Stats_Metric_GAUGE
}
protoStats.Metrics[i] = &agentproto.Stats_Metric{
Name: metric.Name,
Type: metricType,
Value: metric.Value,
Labels: make([]*agentproto.Stats_Metric_Label, len(metric.Labels)),
}
for j, label := range metric.Labels {
protoStats.Metrics[i].Labels[j] = &agentproto.Stats_Metric_Label{
Name: label.Name,
Value: label.Value,
}
}
}
var errGroup errgroup.Group
errGroup.Go(func() error {
if err := api.statsBatcher.Add(time.Now(), workspaceAgent.ID, workspace.TemplateID, workspace.OwnerID, workspace.ID, req); err != nil {
err := api.statsBatcher.Add(time.Now(), workspaceAgent.ID, workspace.TemplateID, workspace.OwnerID, workspace.ID, protoStats)
if err != nil {
api.Logger.Error(ctx, "failed to add stats to batcher", slog.Error(err))
return xerrors.Errorf("can't insert workspace agent stat: %w", err)
}
@ -1750,7 +1574,7 @@ func (api *API) workspaceAgentReportStats(rw http.ResponseWriter, r *http.Reques
WorkspaceName: workspace.Name,
AgentName: workspaceAgent.Name,
TemplateName: row.TemplateName,
}, req.Metrics)
}, protoStats.Metrics)
return nil
})
}
@ -1854,7 +1678,7 @@ func (api *API) workspaceAgentUpdateMetadata(ctx context.Context, workspaceAgent
)
}
payload, err := json.Marshal(workspaceAgentMetadataChannelPayload{
payload, err := json.Marshal(agentapi.WorkspaceAgentMetadataChannelPayload{
CollectedAt: collectedAt,
Keys: datum.Key,
})
@ -1867,7 +1691,7 @@ func (api *API) workspaceAgentUpdateMetadata(ctx context.Context, workspaceAgent
return err
}
err = api.Pubsub.Publish(watchWorkspaceAgentMetadataChannel(workspaceAgent.ID), payload)
err = api.Pubsub.Publish(agentapi.WatchWorkspaceAgentMetadataChannel(workspaceAgent.ID), payload)
if err != nil {
return err
}
@ -1896,13 +1720,13 @@ func (api *API) watchWorkspaceAgentMetadata(rw http.ResponseWriter, r *http.Requ
// Send metadata on updates, we must ensure subscription before sending
// initial metadata to guarantee that events in-between are not missed.
update := make(chan workspaceAgentMetadataChannelPayload, 1)
cancelSub, err := api.Pubsub.Subscribe(watchWorkspaceAgentMetadataChannel(workspaceAgent.ID), func(_ context.Context, byt []byte) {
update := make(chan agentapi.WorkspaceAgentMetadataChannelPayload, 1)
cancelSub, err := api.Pubsub.Subscribe(agentapi.WatchWorkspaceAgentMetadataChannel(workspaceAgent.ID), func(_ context.Context, byt []byte) {
if ctx.Err() != nil {
return
}
var payload workspaceAgentMetadataChannelPayload
var payload agentapi.WorkspaceAgentMetadataChannelPayload
err := json.Unmarshal(byt, &payload)
if err != nil {
log.Error(ctx, "failed to unmarshal pubsub message", slog.Error(err))
@ -2104,15 +1928,6 @@ func convertWorkspaceAgentMetadata(db []database.WorkspaceAgentMetadatum) []code
return result
}
type workspaceAgentMetadataChannelPayload struct {
CollectedAt time.Time `json:"collected_at"`
Keys []string `json:"keys"`
}
func watchWorkspaceAgentMetadataChannel(id uuid.UUID) string {
return "workspace_agent_metadata:" + id.String()
}
// @Summary Submit workspace agent lifecycle state
// @ID submit-workspace-agent-lifecycle-state
// @Security CoderSessionToken

View File

@ -0,0 +1,344 @@
package coderd
import (
"context"
"database/sql"
"net/http"
"runtime/pprof"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/hashicorp/yamux"
"golang.org/x/xerrors"
"nhooyr.io/websocket"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/agentapi"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/codersdk"
)
// @Summary Workspace agent RPC API
// @ID workspace-agent-rpc-api
// @Security CoderSessionToken
// @Tags Agents
// @Success 101
// @Router /workspaceagents/me/rpc [get]
// @x-apidocgen {"skip": true}
func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
api.WebsocketWaitMutex.Lock()
api.WebsocketWaitGroup.Add(1)
api.WebsocketWaitMutex.Unlock()
defer api.WebsocketWaitGroup.Done()
workspaceAgent := httpmw.WorkspaceAgent(r)
ensureLatestBuildFn, build, ok := ensureLatestBuild(ctx, api.Database, api.Logger, rw, workspaceAgent)
if !ok {
return
}
workspace, err := api.Database.GetWorkspaceByID(ctx, build.WorkspaceID)
if err != nil {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Internal error fetching workspace.",
Detail: err.Error(),
})
return
}
owner, err := api.Database.GetUserByID(ctx, workspace.OwnerID)
if err != nil {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Internal error fetching user.",
Detail: err.Error(),
})
return
}
conn, err := websocket.Accept(rw, r, nil)
if err != nil {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Failed to accept websocket.",
Detail: err.Error(),
})
return
}
ctx, wsNetConn := websocketNetConn(ctx, conn, websocket.MessageBinary)
defer wsNetConn.Close()
mux, err := yamux.Server(wsNetConn, nil)
if err != nil {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Failed to start yamux over websocket.",
Detail: err.Error(),
})
return
}
defer mux.Close()
api.Logger.Debug(ctx, "accepting agent RPC connection",
slog.F("owner", owner.Username),
slog.F("workspace", workspace.Name),
slog.F("name", workspaceAgent.Name),
)
api.Logger.Debug(ctx, "accepting agent details", slog.F("agent", workspaceAgent))
defer conn.Close(websocket.StatusNormalClosure, "")
pingFn, ok := api.agentConnectionUpdate(ctx, workspaceAgent, build.WorkspaceID, conn)
if !ok {
return
}
agentAPI := agentapi.New(agentapi.Options{
AgentID: workspaceAgent.ID,
Ctx: api.ctx,
Log: api.Logger,
Database: api.Database,
Pubsub: api.Pubsub,
DerpMapFn: api.DERPMap,
TailnetCoordinator: &api.TailnetCoordinator,
TemplateScheduleStore: api.TemplateScheduleStore,
StatsBatcher: api.statsBatcher,
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
PublishWorkspaceAgentLogsUpdateFn: api.publishWorkspaceAgentLogsUpdate,
AccessURL: api.AccessURL,
AppHostname: api.AppHostname,
AgentInactiveDisconnectTimeout: api.AgentInactiveDisconnectTimeout,
AgentFallbackTroubleshootingURL: api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
AgentStatsRefreshInterval: api.AgentStatsRefreshInterval,
DisableDirectConnections: api.DeploymentValues.DERP.Config.BlockDirect.Value(),
DerpForceWebSockets: api.DeploymentValues.DERP.Config.ForceWebSockets.Value(),
DerpMapUpdateFrequency: api.Options.DERPMapUpdateFrequency,
ExternalAuthConfigs: api.ExternalAuthConfigs,
// Optional:
WorkspaceID: build.WorkspaceID, // saves the extra lookup later
UpdateAgentMetricsFn: api.UpdateAgentMetrics,
})
closeCtx, closeCtxCancel := context.WithCancel(ctx)
go func() {
defer closeCtxCancel()
err := agentAPI.Serve(ctx, mux)
if err != nil {
api.Logger.Warn(ctx, "workspace agent RPC listen error", slog.Error(err))
_ = conn.Close(websocket.StatusInternalError, err.Error())
return
}
}()
pingFn(closeCtx, ensureLatestBuildFn)
}
func ensureLatestBuild(ctx context.Context, db database.Store, logger slog.Logger, rw http.ResponseWriter, workspaceAgent database.WorkspaceAgent) (func() error, database.WorkspaceBuild, bool) {
resource, err := db.GetWorkspaceResourceByID(ctx, workspaceAgent.ResourceID)
if err != nil {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Internal error fetching workspace agent resource.",
Detail: err.Error(),
})
return nil, database.WorkspaceBuild{}, false
}
build, err := db.GetWorkspaceBuildByJobID(ctx, resource.JobID)
if err != nil {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: "Internal error fetching workspace build job.",
Detail: err.Error(),
})
return nil, database.WorkspaceBuild{}, false
}
// Ensure the resource is still valid!
// We only accept agents for resources on the latest build.
ensureLatestBuild := func() error {
latestBuild, err := db.GetLatestWorkspaceBuildByWorkspaceID(ctx, build.WorkspaceID)
if err != nil {
return err
}
if build.ID != latestBuild.ID {
return xerrors.New("build is outdated")
}
return nil
}
err = ensureLatestBuild()
if err != nil {
logger.Debug(ctx, "agent tried to connect from non-latest build",
slog.F("resource", resource),
slog.F("agent", workspaceAgent),
)
httpapi.Write(ctx, rw, http.StatusForbidden, codersdk.Response{
Message: "Agent trying to connect from non-latest build.",
Detail: err.Error(),
})
return nil, database.WorkspaceBuild{}, false
}
return ensureLatestBuild, build, true
}
func (api *API) agentConnectionUpdate(ctx context.Context, workspaceAgent database.WorkspaceAgent, workspaceID uuid.UUID, conn *websocket.Conn) (func(closeCtx context.Context, ensureLatestBuildFn func() error), bool) {
// We use a custom heartbeat routine here instead of `httpapi.Heartbeat`
// because we want to log the agent's last ping time.
var lastPing atomic.Pointer[time.Time]
lastPing.Store(ptr.Ref(time.Now())) // Since the agent initiated the request, assume it's alive.
go pprof.Do(ctx, pprof.Labels("agent", workspaceAgent.ID.String()), func(ctx context.Context) {
// TODO(mafredri): Is this too frequent? Use separate ping disconnect timeout?
t := time.NewTicker(api.AgentConnectionUpdateFrequency)
defer t.Stop()
for {
select {
case <-t.C:
case <-ctx.Done():
return
}
// We don't need a context that times out here because the ping will
// eventually go through. If the context times out, then other
// websocket read operations will receive an error, obfuscating the
// actual problem.
err := conn.Ping(ctx)
if err != nil {
return
}
lastPing.Store(ptr.Ref(time.Now()))
}
})
firstConnectedAt := workspaceAgent.FirstConnectedAt
if !firstConnectedAt.Valid {
firstConnectedAt = sql.NullTime{
Time: dbtime.Now(),
Valid: true,
}
}
lastConnectedAt := sql.NullTime{
Time: dbtime.Now(),
Valid: true,
}
disconnectedAt := workspaceAgent.DisconnectedAt
updateConnectionTimes := func(ctx context.Context) error {
//nolint:gocritic // We only update ourself.
err := api.Database.UpdateWorkspaceAgentConnectionByID(dbauthz.AsSystemRestricted(ctx), database.UpdateWorkspaceAgentConnectionByIDParams{
ID: workspaceAgent.ID,
FirstConnectedAt: firstConnectedAt,
LastConnectedAt: lastConnectedAt,
DisconnectedAt: disconnectedAt,
UpdatedAt: dbtime.Now(),
LastConnectedReplicaID: uuid.NullUUID{
UUID: api.ID,
Valid: true,
},
})
if err != nil {
return err
}
return nil
}
defer func() {
// If connection closed then context will be canceled, try to
// ensure our final update is sent. By waiting at most the agent
// inactive disconnect timeout we ensure that we don't block but
// also guarantee that the agent will be considered disconnected
// by normal status check.
//
// Use a system context as the agent has disconnected and that token
// may no longer be valid.
//nolint:gocritic
ctx, cancel := context.WithTimeout(dbauthz.AsSystemRestricted(api.ctx), api.AgentInactiveDisconnectTimeout)
defer cancel()
// Only update timestamp if the disconnect is new.
if !disconnectedAt.Valid {
disconnectedAt = sql.NullTime{
Time: dbtime.Now(),
Valid: true,
}
}
err := updateConnectionTimes(ctx)
if err != nil {
// This is a bug with unit tests that cancel the app context and
// cause this error log to be generated. We should fix the unit tests
// as this is a valid log.
//
// The pq error occurs when the server is shutting down.
if !xerrors.Is(err, context.Canceled) && !database.IsQueryCanceledError(err) {
api.Logger.Error(ctx, "failed to update agent disconnect time",
slog.Error(err),
slog.F("workspace_id", workspaceID),
)
}
}
api.publishWorkspaceUpdate(ctx, workspaceID)
}()
err := updateConnectionTimes(ctx)
if err != nil {
_ = conn.Close(websocket.StatusGoingAway, err.Error())
return nil, false
}
api.publishWorkspaceUpdate(ctx, workspaceID)
return func(closeCtx context.Context, ensureLatestBuildFn func() error) {
ticker := time.NewTicker(api.AgentConnectionUpdateFrequency)
defer ticker.Stop()
for {
select {
case <-closeCtx.Done():
return
case <-ticker.C:
}
lastPing := *lastPing.Load()
var connectionStatusChanged bool
if time.Since(lastPing) > api.AgentInactiveDisconnectTimeout {
if !disconnectedAt.Valid {
connectionStatusChanged = true
disconnectedAt = sql.NullTime{
Time: dbtime.Now(),
Valid: true,
}
}
} else {
connectionStatusChanged = disconnectedAt.Valid
// TODO(mafredri): Should we update it here or allow lastConnectedAt to shadow it?
disconnectedAt = sql.NullTime{}
lastConnectedAt = sql.NullTime{
Time: dbtime.Now(),
Valid: true,
}
}
err = updateConnectionTimes(ctx)
if err != nil {
_ = conn.Close(websocket.StatusGoingAway, err.Error())
return
}
if connectionStatusChanged {
api.publishWorkspaceUpdate(ctx, workspaceID)
}
err := ensureLatestBuildFn()
if err != nil {
// Disconnect agents that are no longer valid.
_ = conn.Close(websocket.StatusGoingAway, "")
return
}
}
}, true
}

View File

@ -908,8 +908,8 @@ func (api *API) convertWorkspaceBuild(
apps := appsByAgentID[agent.ID]
scripts := scriptsByAgentID[agent.ID]
logSources := logSourcesByAgentID[agent.ID]
apiAgent, err := convertWorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), agent, convertApps(apps, agent, ownerName, workspace), convertScripts(scripts), convertLogSources(logSources), api.AgentInactiveDisconnectTimeout,
apiAgent, err := db2sdk.WorkspaceAgent(
api.DERPMap(), *api.TailnetCoordinator.Load(), agent, db2sdk.Apps(apps, agent, ownerName, workspace), convertScripts(scripts), convertLogSources(logSources), api.AgentInactiveDisconnectTimeout,
api.DeploymentValues.AgentFallbackTroubleshootingURL.String(),
)
if err != nil {

68
codersdk/agentsdk/rpc.go Normal file
View File

@ -0,0 +1,68 @@
package agentsdk
import (
"context"
"io"
"net/http"
"net/http/cookiejar"
"github.com/hashicorp/yamux"
"golang.org/x/xerrors"
"nhooyr.io/websocket"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/drpc"
)
func (c *Client) RPC(ctx context.Context) (agentproto.DRPCAgentClient, error) {
rpcURL, err := c.SDK.URL.Parse("/api/v2/workspaceagents/me/rpc")
if err != nil {
return nil, xerrors.Errorf("parse url: %w", err)
}
jar, err := cookiejar.New(nil)
if err != nil {
return nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(rpcURL, []*http.Cookie{{
Name: codersdk.SessionTokenCookie,
Value: c.SDK.SessionToken(),
}})
httpClient := &http.Client{
Jar: jar,
Transport: c.SDK.HTTPClient.Transport,
}
// nolint:bodyclose
conn, res, err := websocket.Dial(ctx, rpcURL.String(), &websocket.DialOptions{
HTTPClient: httpClient,
})
if err != nil {
if res == nil {
return nil, err
}
return nil, codersdk.ReadBodyAsError(res)
}
ctx, cancelFunc := context.WithCancel(ctx)
ctx, wsNetConn := websocketNetConn(ctx, conn, websocket.MessageBinary)
pingClosed := pingWebSocket(ctx, c.SDK.Logger(), conn, "RPC")
nconn := &closeNetConn{
Conn: wsNetConn,
closeFunc: func() {
cancelFunc()
_ = conn.Close(websocket.StatusGoingAway, "Listen closed")
<-pingClosed
},
}
config := yamux.DefaultConfig()
config.LogOutput = io.Discard
mux, err := yamux.Client(nconn, config)
if err != nil {
return nil, xerrors.Errorf("create yamux client: %w", err)
}
dconn := drpc.MultiplexedConn(mux)
return agentproto.NewDRPCAgentClient(dconn), nil
}

View File

@ -13,6 +13,13 @@ const (
WorkspaceAppHealthUnhealthy WorkspaceAppHealth = "unhealthy"
)
var MapWorkspaceAppHealths = map[WorkspaceAppHealth]struct{}{
WorkspaceAppHealthDisabled: {},
WorkspaceAppHealthInitializing: {},
WorkspaceAppHealthHealthy: {},
WorkspaceAppHealthUnhealthy: {},
}
type WorkspaceAppSharingLevel string
const (
@ -21,6 +28,12 @@ const (
WorkspaceAppSharingLevelPublic WorkspaceAppSharingLevel = "public"
)
var MapWorkspaceAppSharingLevels = map[WorkspaceAppSharingLevel]struct{}{
WorkspaceAppSharingLevelOwner: {},
WorkspaceAppSharingLevelAuthenticated: {},
WorkspaceAppSharingLevelPublic: {},
}
type WorkspaceApp struct {
ID uuid.UUID `json:"id" format:"uuid"`
// URL is the address being proxied to inside the workspace.

View File

@ -139,7 +139,7 @@ func (c *connIO) handleRequest(req *proto.CoordinateRequest) error {
}
if req.AddTunnel != nil {
c.logger.Debug(c.peerCtx, "got add tunnel", slog.F("tunnel", req.AddTunnel))
dst, err := uuid.FromBytes(req.AddTunnel.Uuid)
dst, err := uuid.FromBytes(req.AddTunnel.Id)
if err != nil {
c.logger.Error(c.peerCtx, "unable to convert bytes to UUID", slog.Error(err))
// this shouldn't happen unless there is a client error. Close the connection so the client
@ -163,7 +163,7 @@ func (c *connIO) handleRequest(req *proto.CoordinateRequest) error {
}
if req.RemoveTunnel != nil {
c.logger.Debug(c.peerCtx, "got remove tunnel", slog.F("tunnel", req.RemoveTunnel))
dst, err := uuid.FromBytes(req.RemoveTunnel.Uuid)
dst, err := uuid.FromBytes(req.RemoveTunnel.Id)
if err != nil {
c.logger.Error(c.peerCtx, "unable to convert bytes to UUID", slog.Error(err))
// this shouldn't happen unless there is a client error. Close the connection so the client

View File

@ -697,7 +697,7 @@ func (m *mapper) bestToUpdate(best map[uuid.UUID]mapping) *proto.CoordinateRespo
reason = "update"
}
resp.PeerUpdates = append(resp.PeerUpdates, &proto.CoordinateResponse_PeerUpdate{
Uuid: agpl.UUIDToByteSlice(k),
Id: agpl.UUIDToByteSlice(k),
Node: mpng.node,
Kind: mpng.kind,
Reason: reason,
@ -708,7 +708,7 @@ func (m *mapper) bestToUpdate(best map[uuid.UUID]mapping) *proto.CoordinateRespo
for k := range m.sent {
if _, ok := best[k]; !ok {
resp.PeerUpdates = append(resp.PeerUpdates, &proto.CoordinateResponse_PeerUpdate{
Uuid: agpl.UUIDToByteSlice(k),
Id: agpl.UUIDToByteSlice(k),
Kind: proto.CoordinateResponse_PeerUpdate_DISCONNECTED,
Reason: "disconnected",
})

View File

@ -129,10 +129,144 @@ func SingleNodeUpdate(id uuid.UUID, node *Node, reason string) (*proto.Coordinat
PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{
{
Kind: proto.CoordinateResponse_PeerUpdate_NODE,
Uuid: UUIDToByteSlice(id),
Id: UUIDToByteSlice(id),
Node: p,
Reason: reason,
},
},
}, nil
}
func DERPMapToProto(derpMap *tailcfg.DERPMap) *proto.DERPMap {
if derpMap == nil {
return nil
}
regionScore := make(map[int64]float64)
if derpMap.HomeParams != nil {
for k, v := range derpMap.HomeParams.RegionScore {
regionScore[int64(k)] = v
}
}
regions := make(map[int64]*proto.DERPMap_Region, len(derpMap.Regions))
for regionID, region := range derpMap.Regions {
regions[int64(regionID)] = DERPRegionToProto(region)
}
return &proto.DERPMap{
HomeParams: &proto.DERPMap_HomeParams{
RegionScore: regionScore,
},
Regions: regions,
}
}
func DERPRegionToProto(region *tailcfg.DERPRegion) *proto.DERPMap_Region {
if region == nil {
return nil
}
regionNodes := make([]*proto.DERPMap_Region_Node, len(region.Nodes))
for i, node := range region.Nodes {
regionNodes[i] = DERPNodeToProto(node)
}
return &proto.DERPMap_Region{
RegionId: int64(region.RegionID),
EmbeddedRelay: region.EmbeddedRelay,
RegionCode: region.RegionCode,
RegionName: region.RegionName,
Avoid: region.Avoid,
Nodes: regionNodes,
}
}
func DERPNodeToProto(node *tailcfg.DERPNode) *proto.DERPMap_Region_Node {
if node == nil {
return nil
}
return &proto.DERPMap_Region_Node{
Name: node.Name,
RegionId: int64(node.RegionID),
HostName: node.HostName,
CertName: node.CertName,
Ipv4: node.IPv4,
Ipv6: node.IPv6,
StunPort: int32(node.STUNPort),
StunOnly: node.STUNOnly,
DerpPort: int32(node.DERPPort),
InsecureForTests: node.InsecureForTests,
ForceHttp: node.ForceHTTP,
StunTestIp: node.STUNTestIP,
CanPort_80: node.CanPort80,
}
}
func DERPMapFromProto(derpMap *proto.DERPMap) *tailcfg.DERPMap {
if derpMap == nil {
return nil
}
regionScore := make(map[int]float64, len(derpMap.HomeParams.RegionScore))
for k, v := range derpMap.HomeParams.RegionScore {
regionScore[int(k)] = v
}
regions := make(map[int]*tailcfg.DERPRegion, len(derpMap.Regions))
for regionID, region := range derpMap.Regions {
regions[int(regionID)] = DERPRegionFromProto(region)
}
return &tailcfg.DERPMap{
HomeParams: &tailcfg.DERPHomeParams{
RegionScore: regionScore,
},
Regions: regions,
}
}
func DERPRegionFromProto(region *proto.DERPMap_Region) *tailcfg.DERPRegion {
if region == nil {
return nil
}
regionNodes := make([]*tailcfg.DERPNode, len(region.Nodes))
for i, node := range region.Nodes {
regionNodes[i] = DERPNodeFromProto(node)
}
return &tailcfg.DERPRegion{
RegionID: int(region.RegionId),
EmbeddedRelay: region.EmbeddedRelay,
RegionCode: region.RegionCode,
RegionName: region.RegionName,
Avoid: region.Avoid,
Nodes: regionNodes,
}
}
func DERPNodeFromProto(node *proto.DERPMap_Region_Node) *tailcfg.DERPNode {
if node == nil {
return nil
}
return &tailcfg.DERPNode{
Name: node.Name,
RegionID: int(node.RegionId),
HostName: node.HostName,
CertName: node.CertName,
IPv4: node.Ipv4,
IPv6: node.Ipv6,
STUNPort: int(node.StunPort),
STUNOnly: node.StunOnly,
DERPPort: int(node.DerpPort),
InsecureForTests: node.InsecureForTests,
ForceHTTP: node.ForceHttp,
STUNTestIP: node.StunTestIp,
CanPort80: node.CanPort_80,
}
}

View File

@ -1,7 +1,9 @@
package tailnet_test
import (
"encoding/json"
"net/netip"
"os"
"testing"
"time"
@ -111,22 +113,22 @@ func TestOnlyNodeUpdates(t *testing.T) {
resp := &proto.CoordinateResponse{
PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{
{
Uuid: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
Id: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
Kind: proto.CoordinateResponse_PeerUpdate_NODE,
Node: p,
},
{
Uuid: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2},
Id: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2},
Kind: proto.CoordinateResponse_PeerUpdate_DISCONNECTED,
Reason: "disconnected",
},
{
Uuid: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3},
Id: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3},
Kind: proto.CoordinateResponse_PeerUpdate_LOST,
Reason: "disconnected",
},
{
Uuid: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4},
Id: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4},
},
},
}
@ -145,9 +147,63 @@ func TestSingleNodeUpdate(t *testing.T) {
require.Len(t, resp.PeerUpdates, 1)
up := resp.PeerUpdates[0]
require.Equal(t, proto.CoordinateResponse_PeerUpdate_NODE, up.Kind)
u2, err := uuid.FromBytes(up.Uuid)
u2, err := uuid.FromBytes(up.Id)
require.NoError(t, err)
require.Equal(t, u, u2)
require.Equal(t, "unit test", up.Reason)
require.EqualValues(t, 1, up.Node.Id)
}
func TestDERPMap(t *testing.T) {
t.Parallel()
// Tailscale DERP map on 2023-11-20 for testing purposes.
tailscaleDERPMap, err := os.ReadFile("testdata/tailscale_derpmap.json")
require.NoError(t, err)
derpMap := &tailcfg.DERPMap{}
err = json.Unmarshal(tailscaleDERPMap, derpMap)
require.NoError(t, err)
// The tailscale DERPMap doesn't have HomeParams.
derpMap.HomeParams = &tailcfg.DERPHomeParams{
RegionScore: map[int]float64{
1: 2,
2: 3,
},
}
// Add a region and node that uses every single field.
derpMap.Regions[999] = &tailcfg.DERPRegion{
RegionID: 999,
EmbeddedRelay: true,
RegionCode: "zzz",
RegionName: "Cool Region",
Avoid: true,
Nodes: []*tailcfg.DERPNode{
{
Name: "zzz1",
RegionID: 999,
HostName: "coolderp.com",
IPv4: "1.2.3.4",
IPv6: "2001:db8::1",
STUNPort: 1234,
STUNOnly: true,
DERPPort: 5678,
InsecureForTests: true,
ForceHTTP: true,
STUNTestIP: "5.6.7.8",
CanPort80: true,
},
},
}
protoMap := tailnet.DERPMapToProto(derpMap)
require.NotNil(t, protoMap)
derpMap2 := tailnet.DERPMapFromProto(protoMap)
require.NotNil(t, derpMap2)
require.Equal(t, derpMap, derpMap2)
}

View File

@ -238,11 +238,11 @@ func ServeMultiAgent(c CoordinatorV2, logger slog.Logger, id uuid.UUID) MultiAge
return false
},
OnSubscribe: func(enq Queue, agent uuid.UUID) (*Node, error) {
err := SendCtx(ctx, reqs, &proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Uuid: UUIDToByteSlice(agent)}})
err := SendCtx(ctx, reqs, &proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Id: UUIDToByteSlice(agent)}})
return c.Node(agent), err
},
OnUnsubscribe: func(enq Queue, agent uuid.UUID) error {
err := SendCtx(ctx, reqs, &proto.CoordinateRequest{RemoveTunnel: &proto.CoordinateRequest_Tunnel{Uuid: UUIDToByteSlice(agent)}})
err := SendCtx(ctx, reqs, &proto.CoordinateRequest{RemoveTunnel: &proto.CoordinateRequest_Tunnel{Id: UUIDToByteSlice(agent)}})
return err
},
OnNodeUpdate: func(id uuid.UUID, node *Node) error {
@ -348,7 +348,7 @@ func ServeClientV1(ctx context.Context, logger slog.Logger, c CoordinatorV2, con
defer cancel()
reqs, resps := c.Coordinate(ctx, id, id.String(), ClientTunnelAuth{AgentID: agent})
err := SendCtx(ctx, reqs, &proto.CoordinateRequest{
AddTunnel: &proto.CoordinateRequest_Tunnel{Uuid: UUIDToByteSlice(agent)},
AddTunnel: &proto.CoordinateRequest_Tunnel{Id: UUIDToByteSlice(agent)},
})
if err != nil {
// can only be a context error, no need to log here.
@ -383,7 +383,7 @@ func (c *core) handleRequest(p *peer, req *proto.CoordinateRequest) error {
}
}
if req.AddTunnel != nil {
dstID, err := uuid.FromBytes(req.AddTunnel.Uuid)
dstID, err := uuid.FromBytes(req.AddTunnel.Id)
if err != nil {
// this shouldn't happen unless there is a client error. Close the connection so the client
// doesn't just happily continue thinking everything is fine.
@ -398,7 +398,7 @@ func (c *core) handleRequest(p *peer, req *proto.CoordinateRequest) error {
}
}
if req.RemoveTunnel != nil {
dstID, err := uuid.FromBytes(req.RemoveTunnel.Uuid)
dstID, err := uuid.FromBytes(req.RemoveTunnel.Id)
if err != nil {
// this shouldn't happen unless there is a client error. Close the connection so the client
// doesn't just happily continue thinking everything is fine.

View File

@ -114,7 +114,7 @@ func (p *peer) storeMappingLocked(
p.sent[id] = n
}
return &proto.CoordinateResponse_PeerUpdate{
Uuid: id[:],
Id: id[:],
Kind: k,
Node: n,
Reason: reason,

View File

@ -79,7 +79,7 @@ type DERPMap struct {
unknownFields protoimpl.UnknownFields
HomeParams *DERPMap_HomeParams `protobuf:"bytes,1,opt,name=home_params,json=homeParams,proto3" json:"home_params,omitempty"`
Regions map[int32]*DERPMap_Region `protobuf:"bytes,2,rep,name=regions,proto3" json:"regions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Regions map[int64]*DERPMap_Region `protobuf:"bytes,2,rep,name=regions,proto3" json:"regions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *DERPMap) Reset() {
@ -121,7 +121,7 @@ func (x *DERPMap) GetHomeParams() *DERPMap_HomeParams {
return nil
}
func (x *DERPMap) GetRegions() map[int32]*DERPMap_Region {
func (x *DERPMap) GetRegions() map[int64]*DERPMap_Region {
if x != nil {
return x.Regions
}
@ -409,7 +409,7 @@ type DERPMap_HomeParams struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
RegionScore map[int32]float64 `protobuf:"bytes,1,rep,name=region_score,json=regionScore,proto3" json:"region_score,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
RegionScore map[int64]float64 `protobuf:"bytes,1,rep,name=region_score,json=regionScore,proto3" json:"region_score,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
}
func (x *DERPMap_HomeParams) Reset() {
@ -444,7 +444,7 @@ func (*DERPMap_HomeParams) Descriptor() ([]byte, []int) {
return file_tailnet_proto_tailnet_proto_rawDescGZIP(), []int{0, 0}
}
func (x *DERPMap_HomeParams) GetRegionScore() map[int32]float64 {
func (x *DERPMap_HomeParams) GetRegionScore() map[int64]float64 {
if x != nil {
return x.RegionScore
}
@ -456,7 +456,7 @@ type DERPMap_Region struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
RegionId int32 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
RegionId int64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
EmbeddedRelay bool `protobuf:"varint,2,opt,name=embedded_relay,json=embeddedRelay,proto3" json:"embedded_relay,omitempty"`
RegionCode string `protobuf:"bytes,3,opt,name=region_code,json=regionCode,proto3" json:"region_code,omitempty"`
RegionName string `protobuf:"bytes,4,opt,name=region_name,json=regionName,proto3" json:"region_name,omitempty"`
@ -496,7 +496,7 @@ func (*DERPMap_Region) Descriptor() ([]byte, []int) {
return file_tailnet_proto_tailnet_proto_rawDescGZIP(), []int{0, 1}
}
func (x *DERPMap_Region) GetRegionId() int32 {
func (x *DERPMap_Region) GetRegionId() int64 {
if x != nil {
return x.RegionId
}
@ -544,7 +544,7 @@ type DERPMap_Region_Node struct {
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
RegionId int32 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
RegionId int64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
HostName string `protobuf:"bytes,3,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
CertName string `protobuf:"bytes,4,opt,name=cert_name,json=certName,proto3" json:"cert_name,omitempty"`
Ipv4 string `protobuf:"bytes,5,opt,name=ipv4,proto3" json:"ipv4,omitempty"`
@ -597,7 +597,7 @@ func (x *DERPMap_Region_Node) GetName() string {
return ""
}
func (x *DERPMap_Region_Node) GetRegionId() int32 {
func (x *DERPMap_Region_Node) GetRegionId() int64 {
if x != nil {
return x.RegionId
}
@ -771,7 +771,7 @@ type CoordinateRequest_Tunnel struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Uuid []byte `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *CoordinateRequest_Tunnel) Reset() {
@ -806,9 +806,9 @@ func (*CoordinateRequest_Tunnel) Descriptor() ([]byte, []int) {
return file_tailnet_proto_tailnet_proto_rawDescGZIP(), []int{3, 2}
}
func (x *CoordinateRequest_Tunnel) GetUuid() []byte {
func (x *CoordinateRequest_Tunnel) GetId() []byte {
if x != nil {
return x.Uuid
return x.Id
}
return nil
}
@ -818,7 +818,7 @@ type CoordinateResponse_PeerUpdate struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Uuid []byte `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"`
Kind CoordinateResponse_PeerUpdate_Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=coder.tailnet.v2.CoordinateResponse_PeerUpdate_Kind" json:"kind,omitempty"`
Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"`
@ -856,9 +856,9 @@ func (*CoordinateResponse_PeerUpdate) Descriptor() ([]byte, []int) {
return file_tailnet_proto_tailnet_proto_rawDescGZIP(), []int{4, 0}
}
func (x *CoordinateResponse_PeerUpdate) GetUuid() []byte {
func (x *CoordinateResponse_PeerUpdate) GetId() []byte {
if x != nil {
return x.Uuid
return x.Id
}
return nil
}
@ -909,11 +909,11 @@ var file_tailnet_proto_tailnet_proto_rawDesc = []byte{
0x2e, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x0b, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x3e,
0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe3,
0x04, 0x0a, 0x06, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x67,
0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65,
0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x72, 0x65,
0x67, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64,
0x65, 0x64, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d,
0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x1f, 0x0a,
@ -929,7 +929,7 @@ var file_tailnet_proto_tailnet_proto_rawDesc = []byte{
0x65, 0x73, 0x1a, 0xff, 0x02, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x1b, 0x0a, 0x09, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09,
0x28, 0x03, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09,
0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x65, 0x72,
0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x65,
@ -952,7 +952,7 @@ var file_tailnet_proto_tailnet_proto_rawDesc = []byte{
0x5f, 0x38, 0x30, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x61, 0x6e, 0x50, 0x6f,
0x72, 0x74, 0x38, 0x30, 0x1a, 0x5c, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61,
0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70,
0x2e, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
@ -992,7 +992,7 @@ var file_tailnet_proto_tailnet_proto_rawDesc = []byte{
0x57, 0x65, 0x62, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb6, 0x03, 0x0a, 0x11, 0x43,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x03, 0x0a, 0x11, 0x43,
0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61,
@ -1018,46 +1018,46 @@ var file_tailnet_proto_tailnet_proto_rawDesc = []byte{
0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x64, 0x65,
0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64,
0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x0c, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x63, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x1a, 0x1c, 0x0a, 0x06, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12,
0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x75,
0x75, 0x69, 0x64, 0x22, 0xdd, 0x02, 0x0a, 0x12, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61,
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0c, 0x70, 0x65,
0x65, 0x72, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74,
0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74,
0x65, 0x52, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x1a, 0xf2,
0x01, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a,
0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x75, 0x75, 0x69,
0x64, 0x12, 0x2a, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x16, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e,
0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x48, 0x0a,
0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x63, 0x6f,
0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43,
0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4b, 0x69, 0x6e,
0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f,
0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22,
0x42, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x10, 0x4b, 0x49, 0x4e, 0x44, 0x5f,
0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a,
0x04, 0x4e, 0x4f, 0x44, 0x45, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x43, 0x4f,
0x4e, 0x4e, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x4f, 0x53,
0x54, 0x10, 0x03, 0x32, 0xc4, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x56,
0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x73,
0x12, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74,
0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61,
0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x63, 0x6f, 0x64, 0x65,
0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x45, 0x52,
0x50, 0x4d, 0x61, 0x70, 0x30, 0x01, 0x12, 0x62, 0x0a, 0x11, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69,
0x6e, 0x61, 0x74, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x63, 0x6f,
0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43,
0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x24, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74,
0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63,
0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x1a, 0x18, 0x0a, 0x06, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12,
0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22,
0xd9, 0x02, 0x0a, 0x12, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x75,
0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63,
0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e,
0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x70,
0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x1a, 0xee, 0x01, 0x0a, 0x0a, 0x50,
0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x04, 0x6e, 0x6f, 0x64,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52,
0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x48, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c,
0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74,
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12,
0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12,
0x14, 0x0a, 0x10, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x44, 0x45, 0x10, 0x01, 0x12,
0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10,
0x02, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x32, 0xc4, 0x01, 0x0a, 0x06,
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x56, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72,
0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x19, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65,
0x74, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x30, 0x01, 0x12, 0x62,
0x0a, 0x11, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x54, 0x61, 0x69, 0x6c,
0x6e, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c,
0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72,
0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72,
0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01,
0x30, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f,
0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -7,12 +7,12 @@ import "google/protobuf/timestamp.proto";
message DERPMap {
message HomeParams {
map<int32, double> region_score = 1;
map<int64, double> region_score = 1;
}
HomeParams home_params = 1;
message Region {
int32 region_id = 1;
int64 region_id = 1;
bool embedded_relay = 2;
string region_code = 3;
string region_name = 4;
@ -20,7 +20,7 @@ message DERPMap {
message Node {
string name = 1;
int32 region_id = 2;
int64 region_id = 2;
string host_name = 3;
string cert_name = 4;
string ipv4 = 5;
@ -35,7 +35,7 @@ message DERPMap {
}
repeated Node nodes = 6;
}
map<int32, Region> regions = 2;
map<int64, Region> regions = 2;
}
message StreamDERPMapsRequest {}
@ -64,7 +64,7 @@ message CoordinateRequest {
Disconnect disconnect = 2;
message Tunnel {
bytes uuid = 1;
bytes id = 1;
}
Tunnel add_tunnel = 3;
Tunnel remove_tunnel = 4;
@ -72,7 +72,7 @@ message CoordinateRequest {
message CoordinateResponse {
message PeerUpdate {
bytes uuid = 1;
bytes id = 1;
Node node = 2;
enum Kind {

View File

@ -133,7 +133,7 @@ func TestClientService_ServeClient_V2(t *testing.T) {
{
Kind: proto.CoordinateResponse_PeerUpdate_NODE,
Node: &proto.Node{PreferredDerp: 22},
Uuid: agentID[:],
Id: agentID[:],
},
}}
resp, err := stream.Recv()

View File

@ -46,7 +46,7 @@ func NewPeer(ctx context.Context, t testing.TB, coord tailnet.CoordinatorV2, nam
func (p *Peer) AddTunnel(other uuid.UUID) {
p.t.Helper()
req := &proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Uuid: tailnet.UUIDToByteSlice(other)}}
req := &proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Id: tailnet.UUIDToByteSlice(other)}}
select {
case <-p.ctx.Done():
p.t.Errorf("timeout adding tunnel for %s", p.name)
@ -146,7 +146,7 @@ func (p *Peer) handleOneResp() error {
return responsesClosed
}
for _, update := range resp.PeerUpdates {
id, err := uuid.FromBytes(update.Uuid)
id, err := uuid.FromBytes(update.Id)
if err != nil {
return err
}

698
tailnet/testdata/tailscale_derpmap.json vendored Normal file
View File

@ -0,0 +1,698 @@
{
"Regions": {
"1": {
"RegionID": 1,
"RegionCode": "nyc",
"RegionName": "New York City",
"Nodes": [
{
"Name": "1f",
"RegionID": 1,
"HostName": "derp1f.tailscale.com",
"IPv4": "199.38.181.104",
"IPv6": "2607:f740:f::bc"
},
{
"Name": "1g",
"RegionID": 1,
"HostName": "derp1g.tailscale.com",
"IPv4": "209.177.145.120",
"IPv6": "2607:f740:f::3eb"
},
{
"Name": "1h",
"RegionID": 1,
"HostName": "derp1h.tailscale.com",
"IPv4": "199.38.181.93",
"IPv6": "2607:f740:f::afd"
},
{
"Name": "1i",
"RegionID": 1,
"HostName": "derp1i.tailscale.com",
"IPv4": "199.38.181.103",
"IPv6": "2607:f740:f::e19",
"STUNOnly": true
}
]
},
"10": {
"RegionID": 10,
"RegionCode": "sea",
"RegionName": "Seattle",
"Nodes": [
{
"Name": "10b",
"RegionID": 10,
"HostName": "derp10b.tailscale.com",
"IPv4": "192.73.240.161",
"IPv6": "2607:f740:14::61c"
},
{
"Name": "10c",
"RegionID": 10,
"HostName": "derp10c.tailscale.com",
"IPv4": "192.73.240.121",
"IPv6": "2607:f740:14::40c"
},
{
"Name": "10d",
"RegionID": 10,
"HostName": "derp10d.tailscale.com",
"IPv4": "192.73.240.132",
"IPv6": "2607:f740:14::500"
}
]
},
"11": {
"RegionID": 11,
"RegionCode": "sao",
"RegionName": "São Paulo",
"Nodes": [
{
"Name": "11b",
"RegionID": 11,
"HostName": "derp11b.tailscale.com",
"IPv4": "148.163.220.129",
"IPv6": "2607:f740:1::211"
},
{
"Name": "11c",
"RegionID": 11,
"HostName": "derp11c.tailscale.com",
"IPv4": "148.163.220.134",
"IPv6": "2607:f740:1::861"
},
{
"Name": "11d",
"RegionID": 11,
"HostName": "derp11d.tailscale.com",
"IPv4": "148.163.220.210",
"IPv6": "2607:f740:1::2e6"
}
]
},
"12": {
"RegionID": 12,
"RegionCode": "ord",
"RegionName": "Chicago",
"Nodes": [
{
"Name": "12d",
"RegionID": 12,
"HostName": "derp12d.tailscale.com",
"IPv4": "209.177.158.246",
"IPv6": "2607:f740:e::811"
},
{
"Name": "12e",
"RegionID": 12,
"HostName": "derp12e.tailscale.com",
"IPv4": "209.177.158.15",
"IPv6": "2607:f740:e::b17"
},
{
"Name": "12f",
"RegionID": 12,
"HostName": "derp12f.tailscale.com",
"IPv4": "199.38.182.118",
"IPv6": "2607:f740:e::4c8"
}
]
},
"13": {
"RegionID": 13,
"RegionCode": "den",
"RegionName": "Denver",
"Nodes": [
{
"Name": "13b",
"RegionID": 13,
"HostName": "derp13b.tailscale.com",
"IPv4": "192.73.242.187",
"IPv6": "2607:f740:16::640"
},
{
"Name": "13c",
"RegionID": 13,
"HostName": "derp13c.tailscale.com",
"IPv4": "192.73.242.28",
"IPv6": "2607:f740:16::5c"
},
{
"Name": "13d",
"RegionID": 13,
"HostName": "derp13d.tailscale.com",
"IPv4": "192.73.242.204",
"IPv6": "2607:f740:16::c23"
}
]
},
"14": {
"RegionID": 14,
"RegionCode": "ams",
"RegionName": "Amsterdam",
"Nodes": [
{
"Name": "14b",
"RegionID": 14,
"HostName": "derp14b.tailscale.com",
"IPv4": "176.58.93.248",
"IPv6": "2a00:dd80:3c::807"
},
{
"Name": "14c",
"RegionID": 14,
"HostName": "derp14c.tailscale.com",
"IPv4": "176.58.93.147",
"IPv6": "2a00:dd80:3c::b09"
},
{
"Name": "14d",
"RegionID": 14,
"HostName": "derp14d.tailscale.com",
"IPv4": "176.58.93.154",
"IPv6": "2a00:dd80:3c::3d5"
}
]
},
"15": {
"RegionID": 15,
"RegionCode": "jnb",
"RegionName": "Johannesburg",
"Nodes": [
{
"Name": "15b",
"RegionID": 15,
"HostName": "derp15b.tailscale.com",
"IPv4": "102.67.165.90",
"IPv6": "2c0f:edb0:0:10::963"
},
{
"Name": "15c",
"RegionID": 15,
"HostName": "derp15c.tailscale.com",
"IPv4": "102.67.165.185",
"IPv6": "2c0f:edb0:0:10::b59"
},
{
"Name": "15d",
"RegionID": 15,
"HostName": "derp15d.tailscale.com",
"IPv4": "102.67.165.36",
"IPv6": "2c0f:edb0:0:10::599"
}
]
},
"16": {
"RegionID": 16,
"RegionCode": "mia",
"RegionName": "Miami",
"Nodes": [
{
"Name": "16b",
"RegionID": 16,
"HostName": "derp16b.tailscale.com",
"IPv4": "192.73.243.135",
"IPv6": "2607:f740:17::476"
},
{
"Name": "16c",
"RegionID": 16,
"HostName": "derp16c.tailscale.com",
"IPv4": "192.73.243.229",
"IPv6": "2607:f740:17::4e4"
},
{
"Name": "16d",
"RegionID": 16,
"HostName": "derp16d.tailscale.com",
"IPv4": "192.73.243.141",
"IPv6": "2607:f740:17::475"
}
]
},
"17": {
"RegionID": 17,
"RegionCode": "lax",
"RegionName": "Los Angeles",
"Nodes": [
{
"Name": "17b",
"RegionID": 17,
"HostName": "derp17b.tailscale.com",
"IPv4": "192.73.244.245",
"IPv6": "2607:f740:c::646"
},
{
"Name": "17c",
"RegionID": 17,
"HostName": "derp17c.tailscale.com",
"IPv4": "208.111.40.12",
"IPv6": "2607:f740:c::10"
},
{
"Name": "17d",
"RegionID": 17,
"HostName": "derp17d.tailscale.com",
"IPv4": "208.111.40.216",
"IPv6": "2607:f740:c::e1b"
}
]
},
"18": {
"RegionID": 18,
"RegionCode": "par",
"RegionName": "Paris",
"Nodes": [
{
"Name": "18b",
"RegionID": 18,
"HostName": "derp18b.tailscale.com",
"IPv4": "176.58.90.147",
"IPv6": "2a00:dd80:3e::363"
},
{
"Name": "18c",
"RegionID": 18,
"HostName": "derp18c.tailscale.com",
"IPv4": "176.58.90.207",
"IPv6": "2a00:dd80:3e::c19"
},
{
"Name": "18d",
"RegionID": 18,
"HostName": "derp18d.tailscale.com",
"IPv4": "176.58.90.104",
"IPv6": "2a00:dd80:3e::f2e"
}
]
},
"19": {
"RegionID": 19,
"RegionCode": "mad",
"RegionName": "Madrid",
"Nodes": [
{
"Name": "19b",
"RegionID": 19,
"HostName": "derp19b.tailscale.com",
"IPv4": "45.159.97.144",
"IPv6": "2a00:dd80:14:10::335"
},
{
"Name": "19c",
"RegionID": 19,
"HostName": "derp19c.tailscale.com",
"IPv4": "45.159.97.61",
"IPv6": "2a00:dd80:14:10::20"
},
{
"Name": "19d",
"RegionID": 19,
"HostName": "derp19d.tailscale.com",
"IPv4": "45.159.97.233",
"IPv6": "2a00:dd80:14:10::34a"
}
]
},
"2": {
"RegionID": 2,
"RegionCode": "sfo",
"RegionName": "San Francisco",
"Nodes": [
{
"Name": "2d",
"RegionID": 2,
"HostName": "derp2d.tailscale.com",
"IPv4": "192.73.252.65",
"IPv6": "2607:f740:0:3f::287"
},
{
"Name": "2e",
"RegionID": 2,
"HostName": "derp2e.tailscale.com",
"IPv4": "192.73.252.134",
"IPv6": "2607:f740:0:3f::44c"
},
{
"Name": "2f",
"RegionID": 2,
"HostName": "derp2f.tailscale.com",
"IPv4": "208.111.34.178",
"IPv6": "2607:f740:0:3f::f4"
}
]
},
"20": {
"RegionID": 20,
"RegionCode": "hkg",
"RegionName": "Hong Kong",
"Nodes": [
{
"Name": "20b",
"RegionID": 20,
"HostName": "derp20b.tailscale.com",
"IPv4": "103.6.84.152",
"IPv6": "2403:2500:8000:1::ef6"
},
{
"Name": "20c",
"RegionID": 20,
"HostName": "derp20c.tailscale.com",
"IPv4": "205.147.105.30",
"IPv6": "2403:2500:8000:1::5fb"
},
{
"Name": "20d",
"RegionID": 20,
"HostName": "derp20d.tailscale.com",
"IPv4": "205.147.105.78",
"IPv6": "2403:2500:8000:1::e9a"
}
]
},
"21": {
"RegionID": 21,
"RegionCode": "tor",
"RegionName": "Toronto",
"Nodes": [
{
"Name": "21b",
"RegionID": 21,
"HostName": "derp21b.tailscale.com",
"IPv4": "162.248.221.199",
"IPv6": "2607:f740:50::1d1"
},
{
"Name": "21c",
"RegionID": 21,
"HostName": "derp21c.tailscale.com",
"IPv4": "162.248.221.215",
"IPv6": "2607:f740:50::f10"
},
{
"Name": "21d",
"RegionID": 21,
"HostName": "derp21d.tailscale.com",
"IPv4": "162.248.221.248",
"IPv6": "2607:f740:50::ca4"
}
]
},
"22": {
"RegionID": 22,
"RegionCode": "waw",
"RegionName": "Warsaw",
"Nodes": [
{
"Name": "22b",
"RegionID": 22,
"HostName": "derp22b.tailscale.com",
"IPv4": "45.159.98.196",
"IPv6": "2a00:dd80:40:100::316"
},
{
"Name": "22c",
"RegionID": 22,
"HostName": "derp22c.tailscale.com",
"IPv4": "45.159.98.253",
"IPv6": "2a00:dd80:40:100::3f"
},
{
"Name": "22d",
"RegionID": 22,
"HostName": "derp22d.tailscale.com",
"IPv4": "45.159.98.145",
"IPv6": "2a00:dd80:40:100::211"
}
]
},
"23": {
"RegionID": 23,
"RegionCode": "dbi",
"RegionName": "Dubai",
"Nodes": [
{
"Name": "23b",
"RegionID": 23,
"HostName": "derp23b.tailscale.com",
"IPv4": "185.34.3.232",
"IPv6": "2a00:dd80:3f:100::76f"
},
{
"Name": "23c",
"RegionID": 23,
"HostName": "derp23c.tailscale.com",
"IPv4": "185.34.3.207",
"IPv6": "2a00:dd80:3f:100::a50"
},
{
"Name": "23d",
"RegionID": 23,
"HostName": "derp23d.tailscale.com",
"IPv4": "185.34.3.75",
"IPv6": "2a00:dd80:3f:100::97e"
}
]
},
"24": {
"RegionID": 24,
"RegionCode": "hnl",
"RegionName": "Honolulu",
"Nodes": [
{
"Name": "24b",
"RegionID": 24,
"HostName": "derp24b.tailscale.com",
"IPv4": "208.83.234.151",
"IPv6": "2001:19f0:c000:c586:5400:04ff:fe26:2ba6"
},
{
"Name": "24c",
"RegionID": 24,
"HostName": "derp24c.tailscale.com",
"IPv4": "208.83.233.233",
"IPv6": "2001:19f0:c000:c591:5400:04ff:fe26:2c5f"
},
{
"Name": "24d",
"RegionID": 24,
"HostName": "derp24d.tailscale.com",
"IPv4": "208.72.155.133",
"IPv6": "2001:19f0:c000:c564:5400:04ff:fe26:2ba8"
}
]
},
"25": {
"RegionID": 25,
"RegionCode": "nai",
"RegionName": "Nairobi",
"Nodes": [
{
"Name": "25b",
"RegionID": 25,
"HostName": "derp25b.tailscale.com",
"IPv4": "102.67.167.245",
"IPv6": "2c0f:edb0:2000:1::2e9"
},
{
"Name": "25c",
"RegionID": 25,
"HostName": "derp25c.tailscale.com",
"IPv4": "102.67.167.37",
"IPv6": "2c0f:edb0:2000:1::2c7"
},
{
"Name": "25d",
"RegionID": 25,
"HostName": "derp25d.tailscale.com",
"IPv4": "102.67.167.188",
"IPv6": "2c0f:edb0:2000:1::188"
}
]
},
"3": {
"RegionID": 3,
"RegionCode": "sin",
"RegionName": "Singapore",
"Nodes": [
{
"Name": "3b",
"RegionID": 3,
"HostName": "derp3b.tailscale.com",
"IPv4": "43.245.49.105",
"IPv6": "2403:2500:300::b0c"
},
{
"Name": "3c",
"RegionID": 3,
"HostName": "derp3c.tailscale.com",
"IPv4": "43.245.49.83",
"IPv6": "2403:2500:300::57a"
},
{
"Name": "3d",
"RegionID": 3,
"HostName": "derp3d.tailscale.com",
"IPv4": "43.245.49.144",
"IPv6": "2403:2500:300::df9"
}
]
},
"4": {
"RegionID": 4,
"RegionCode": "fra",
"RegionName": "Frankfurt",
"Nodes": [
{
"Name": "4f",
"RegionID": 4,
"HostName": "derp4f.tailscale.com",
"IPv4": "185.40.234.219",
"IPv6": "2a00:dd80:20::a25"
},
{
"Name": "4g",
"RegionID": 4,
"HostName": "derp4g.tailscale.com",
"IPv4": "185.40.234.113",
"IPv6": "2a00:dd80:20::8f"
},
{
"Name": "4h",
"RegionID": 4,
"HostName": "derp4h.tailscale.com",
"IPv4": "185.40.234.77",
"IPv6": "2a00:dd80:20::bcf"
}
]
},
"5": {
"RegionID": 5,
"RegionCode": "syd",
"RegionName": "Sydney",
"Nodes": [
{
"Name": "5b",
"RegionID": 5,
"HostName": "derp5b.tailscale.com",
"IPv4": "43.245.48.220",
"IPv6": "2403:2500:9000:1::ce7"
},
{
"Name": "5c",
"RegionID": 5,
"HostName": "derp5c.tailscale.com",
"IPv4": "43.245.48.50",
"IPv6": "2403:2500:9000:1::f57"
},
{
"Name": "5d",
"RegionID": 5,
"HostName": "derp5d.tailscale.com",
"IPv4": "43.245.48.250",
"IPv6": "2403:2500:9000:1::43"
}
]
},
"6": {
"RegionID": 6,
"RegionCode": "blr",
"RegionName": "Bangalore",
"Nodes": [
{
"Name": "6a",
"RegionID": 6,
"HostName": "derp6.tailscale.com",
"IPv4": "68.183.90.120",
"IPv6": "2400:6180:100:d0::982:d001"
}
]
},
"7": {
"RegionID": 7,
"RegionCode": "tok",
"RegionName": "Tokyo",
"Nodes": [
{
"Name": "7b",
"RegionID": 7,
"HostName": "derp7b.tailscale.com",
"IPv4": "103.84.155.178",
"IPv6": "2403:2500:400:20::b79"
},
{
"Name": "7c",
"RegionID": 7,
"HostName": "derp7c.tailscale.com",
"IPv4": "103.84.155.188",
"IPv6": "2403:2500:400:20::835"
},
{
"Name": "7d",
"RegionID": 7,
"HostName": "derp7d.tailscale.com",
"IPv4": "103.84.155.46",
"IPv6": "2403:2500:400:20::cfe"
}
]
},
"8": {
"RegionID": 8,
"RegionCode": "lhr",
"RegionName": "London",
"Nodes": [
{
"Name": "8e",
"RegionID": 8,
"HostName": "derp8e.tailscale.com",
"IPv4": "176.58.92.144",
"IPv6": "2a00:dd80:3a::b33"
},
{
"Name": "8f",
"RegionID": 8,
"HostName": "derp8f.tailscale.com",
"IPv4": "176.58.88.183",
"IPv6": "2a00:dd80:3a::dfa"
},
{
"Name": "8g",
"RegionID": 8,
"HostName": "derp8g.tailscale.com",
"IPv4": "176.58.92.254",
"IPv6": "2a00:dd80:3a::ed"
}
]
},
"9": {
"RegionID": 9,
"RegionCode": "dfw",
"RegionName": "Dallas",
"Nodes": [
{
"Name": "9d",
"RegionID": 9,
"HostName": "derp9d.tailscale.com",
"IPv4": "209.177.156.94",
"IPv6": "2607:f740:100::c05"
},
{
"Name": "9e",
"RegionID": 9,
"HostName": "derp9e.tailscale.com",
"IPv4": "192.73.248.83",
"IPv6": "2607:f740:100::359"
},
{
"Name": "9f",
"RegionID": 9,
"HostName": "derp9f.tailscale.com",
"IPv4": "209.177.156.197",
"IPv6": "2607:f740:100::cad"
}
]
}
}
}