Merge branch 'main' into report-agpl-template-settings-errors

This commit is contained in:
McKayla Washburn 2024-04-12 18:33:52 +00:00
commit 9fd523a193
170 changed files with 4477 additions and 1261 deletions

View File

@ -4,7 +4,7 @@ description: |
inputs:
version:
description: "The Go version to use."
default: "1.21.5"
default: "1.21.9"
runs:
using: "composite"
steps:

View File

@ -228,7 +228,7 @@ jobs:
with:
# This doesn't need caching. It's super fast anyways!
cache: false
go-version: 1.21.5
go-version: 1.21.9
- name: Install shfmt
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
@ -432,6 +432,15 @@ jobs:
needs: changes
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
timeout-minutes: 20
strategy:
fail-fast: false
matrix:
variant:
- enterprise: false
name: test-e2e
- enterprise: true
name: test-e2e-enterprise
name: ${{ matrix.variant.name }}
steps:
- name: Checkout
uses: actions/checkout@v4
@ -444,52 +453,40 @@ jobs:
- name: Setup Go
uses: ./.github/actions/setup-go
- name: Setup Terraform
uses: ./.github/actions/setup-tf
# Assume that the checked-in versions are up-to-date
- run: make gen/mark-fresh
name: make gen
- name: go install tools
run: |
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33
go install golang.org/x/tools/cmd/goimports@latest
go install github.com/mikefarah/yq/v4@v4.30.6
go install go.uber.org/mock/mockgen@v0.4.0
- name: Install Protoc
run: |
mkdir -p /tmp/proto
pushd /tmp/proto
curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.3/protoc-23.3-linux-x86_64.zip
unzip protoc.zip
cp -r ./bin/* /usr/local/bin
cp -r ./include /usr/local/bin/include
popd
- name: Build
run: |
make -B site/out/index.html
- run: pnpm build
working-directory: site
- run: pnpm playwright:install
working-directory: site
# Run tests that don't require an enterprise license without an enterprise license
- run: pnpm playwright:test --forbid-only --workers 1
if: ${{ !matrix.variant.enterprise }}
env:
DEBUG: pw:api
working-directory: site
# Run all of the tests with an enterprise license
- run: pnpm playwright:test --forbid-only --workers 1
if: ${{ matrix.variant.enterprise }}
env:
DEBUG: pw:api
CODER_E2E_ENTERPRISE_LICENSE: ${{ secrets.CODER_E2E_ENTERPRISE_LICENSE }}
CODER_E2E_REQUIRE_ENTERPRISE_TESTS: "1"
working-directory: site
# Temporarily allow these to fail so that I can gather data about which
# tests are failing.
continue-on-error: true
- name: Upload Playwright Failed Tests
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
uses: actions/upload-artifact@v4
with:
name: failed-test-videos
name: failed-test-videos${{ matrix.variant.enterprise && '-enterprise' || '-agpl' }}
path: ./site/test-results/**/*.webm
retention-days: 7
@ -497,7 +494,7 @@ jobs:
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
uses: actions/upload-artifact@v4
with:
name: debug-pprof-dumps
name: debug-pprof-dumps${{ matrix.variant.enterprise && '-enterprise' || '-agpl' }}
path: ./site/test-results/**/debug-pprof-*.txt
retention-days: 7

View File

@ -382,9 +382,9 @@ install: build/coder_$(VERSION)_$(GOOS)_$(GOARCH)$(GOOS_BIN_EXT)
cp "$<" "$$output_file"
.PHONY: install
BOLD := $(shell tput bold)
GREEN := $(shell tput setaf 2)
RESET := $(shell tput sgr0)
BOLD := $(shell tput bold 2>/dev/null)
GREEN := $(shell tput setaf 2 2>/dev/null)
RESET := $(shell tput sgr0 2>/dev/null)
fmt: fmt/eslint fmt/prettier fmt/terraform fmt/shfmt fmt/go
.PHONY: fmt

View File

@ -62,7 +62,10 @@ const (
// EnvProcPrioMgmt determines whether we attempt to manage
// process CPU and OOM Killer priority.
const EnvProcPrioMgmt = "CODER_PROC_PRIO_MGMT"
const (
EnvProcPrioMgmt = "CODER_PROC_PRIO_MGMT"
EnvProcOOMScore = "CODER_PROC_OOM_SCORE"
)
type Options struct {
Filesystem afero.Fs
@ -1575,10 +1578,31 @@ func (a *agent) manageProcessPriorityUntilGracefulShutdown() {
a.processManagementTick = ticker.C
}
oomScore := unsetOOMScore
if scoreStr, ok := a.environmentVariables[EnvProcOOMScore]; ok {
score, err := strconv.Atoi(strings.TrimSpace(scoreStr))
if err == nil && score >= -1000 && score <= 1000 {
oomScore = score
} else {
a.logger.Error(ctx, "invalid oom score",
slog.F("min_value", -1000),
slog.F("max_value", 1000),
slog.F("value", scoreStr),
)
}
}
debouncer := &logDebouncer{
logger: a.logger,
messages: map[string]time.Time{},
interval: time.Minute,
}
for {
procs, err := a.manageProcessPriority(ctx)
procs, err := a.manageProcessPriority(ctx, debouncer, oomScore)
// Avoid spamming the logs too often.
if err != nil {
a.logger.Error(ctx, "manage process priority",
debouncer.Error(ctx, "manage process priority",
slog.Error(err),
)
}
@ -1594,27 +1618,34 @@ func (a *agent) manageProcessPriorityUntilGracefulShutdown() {
}
}
func (a *agent) manageProcessPriority(ctx context.Context) ([]*agentproc.Process, error) {
// unsetOOMScore is set to an invalid OOM score to imply an unset value.
const unsetOOMScore = 1001
func (a *agent) manageProcessPriority(ctx context.Context, debouncer *logDebouncer, oomScore int) ([]*agentproc.Process, error) {
const (
niceness = 10
)
// We fetch the agent score each time because it's possible someone updates the
// value after it is started.
agentScore, err := a.getAgentOOMScore()
if err != nil {
agentScore = unsetOOMScore
}
if oomScore == unsetOOMScore && agentScore != unsetOOMScore {
// If the child score has not been explicitly specified we should
// set it to a score relative to the agent score.
oomScore = childOOMScore(agentScore)
}
procs, err := agentproc.List(a.filesystem, a.syscaller)
if err != nil {
return nil, xerrors.Errorf("list: %w", err)
}
var (
modProcs = []*agentproc.Process{}
logger slog.Logger
)
modProcs := []*agentproc.Process{}
for _, proc := range procs {
logger = a.logger.With(
slog.F("cmd", proc.Cmd()),
slog.F("pid", proc.PID),
)
containsFn := func(e string) bool {
contains := strings.Contains(proc.Cmd(), e)
return contains
@ -1622,14 +1653,16 @@ func (a *agent) manageProcessPriority(ctx context.Context) ([]*agentproc.Process
// If the process is prioritized we should adjust
// it's oom_score_adj and avoid lowering its niceness.
if slices.ContainsFunc[[]string, string](prioritizedProcs, containsFn) {
if slices.ContainsFunc(prioritizedProcs, containsFn) {
continue
}
score, err := proc.Niceness(a.syscaller)
if err != nil {
logger.Warn(ctx, "unable to get proc niceness",
slog.Error(err),
score, niceErr := proc.Niceness(a.syscaller)
if niceErr != nil && !xerrors.Is(niceErr, os.ErrPermission) {
debouncer.Warn(ctx, "unable to get proc niceness",
slog.F("cmd", proc.Cmd()),
slog.F("pid", proc.PID),
slog.Error(niceErr),
)
continue
}
@ -1643,15 +1676,31 @@ func (a *agent) manageProcessPriority(ctx context.Context) ([]*agentproc.Process
continue
}
err = proc.SetNiceness(a.syscaller, niceness)
if err != nil {
logger.Warn(ctx, "unable to set proc niceness",
slog.F("niceness", niceness),
slog.Error(err),
)
continue
if niceErr == nil {
err := proc.SetNiceness(a.syscaller, niceness)
if err != nil && !xerrors.Is(err, os.ErrPermission) {
debouncer.Warn(ctx, "unable to set proc niceness",
slog.F("cmd", proc.Cmd()),
slog.F("pid", proc.PID),
slog.F("niceness", niceness),
slog.Error(err),
)
}
}
// If the oom score is valid and it's not already set and isn't a custom value set by another process then it's ok to update it.
if oomScore != unsetOOMScore && oomScore != proc.OOMScoreAdj && !isCustomOOMScore(agentScore, proc) {
oomScoreStr := strconv.Itoa(oomScore)
err := afero.WriteFile(a.filesystem, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), []byte(oomScoreStr), 0o644)
if err != nil && !xerrors.Is(err, os.ErrPermission) {
debouncer.Warn(ctx, "unable to set oom_score_adj",
slog.F("cmd", proc.Cmd()),
slog.F("pid", proc.PID),
slog.F("score", oomScoreStr),
slog.Error(err),
)
}
}
modProcs = append(modProcs, proc)
}
return modProcs, nil
@ -2005,3 +2054,77 @@ func PrometheusMetricsHandler(prometheusRegistry *prometheus.Registry, logger sl
}
})
}
// childOOMScore returns the oom_score_adj for a child process. It is based
// on the oom_score_adj of the agent process.
func childOOMScore(agentScore int) int {
// If the agent has a negative oom_score_adj, we set the child to 0
// so it's treated like every other process.
if agentScore < 0 {
return 0
}
// If the agent is already almost at the maximum then set it to the max.
if agentScore >= 998 {
return 1000
}
// If the agent oom_score_adj is >=0, we set the child to slightly
// less than the maximum. If users want a different score they set it
// directly.
return 998
}
func (a *agent) getAgentOOMScore() (int, error) {
scoreStr, err := afero.ReadFile(a.filesystem, "/proc/self/oom_score_adj")
if err != nil {
return 0, xerrors.Errorf("read file: %w", err)
}
score, err := strconv.Atoi(strings.TrimSpace(string(scoreStr)))
if err != nil {
return 0, xerrors.Errorf("parse int: %w", err)
}
return score, nil
}
// isCustomOOMScore checks to see if the oom_score_adj is not a value that would
// originate from an agent-spawned process.
func isCustomOOMScore(agentScore int, process *agentproc.Process) bool {
score := process.OOMScoreAdj
return agentScore != score && score != 1000 && score != 0 && score != 998
}
// logDebouncer skips writing a log for a particular message if
// it's been emitted within the given interval duration.
// It's a shoddy implementation used in one spot that should be replaced at
// some point.
type logDebouncer struct {
logger slog.Logger
messages map[string]time.Time
interval time.Duration
}
func (l *logDebouncer) Warn(ctx context.Context, msg string, fields ...any) {
l.log(ctx, slog.LevelWarn, msg, fields...)
}
func (l *logDebouncer) Error(ctx context.Context, msg string, fields ...any) {
l.log(ctx, slog.LevelError, msg, fields...)
}
func (l *logDebouncer) log(ctx context.Context, level slog.Level, msg string, fields ...any) {
// This (bad) implementation assumes you wouldn't reuse the same msg
// for different levels.
if last, ok := l.messages[msg]; ok && time.Since(last) < l.interval {
return
}
switch level {
case slog.LevelWarn:
l.logger.Warn(ctx, msg, fields...)
case slog.LevelError:
l.logger.Error(ctx, msg, fields...)
}
l.messages[msg] = time.Now()
}

View File

@ -2529,11 +2529,11 @@ func TestAgent_ManageProcessPriority(t *testing.T) {
logger = slog.Make(sloghuman.Sink(io.Discard))
)
requireFileWrite(t, fs, "/proc/self/oom_score_adj", "-500")
// Create some processes.
for i := 0; i < 4; i++ {
// Create a prioritized process. This process should
// have it's oom_score_adj set to -500 and its nice
// score should be untouched.
// Create a prioritized process.
var proc agentproc.Process
if i == 0 {
proc = agentproctest.GenerateProcess(t, fs,
@ -2551,8 +2551,8 @@ func TestAgent_ManageProcessPriority(t *testing.T) {
},
)
syscaller.EXPECT().SetPriority(proc.PID, 10).Return(nil)
syscaller.EXPECT().GetPriority(proc.PID).Return(20, nil)
syscaller.EXPECT().SetPriority(proc.PID, 10).Return(nil)
}
syscaller.EXPECT().
Kill(proc.PID, syscall.Signal(0)).
@ -2571,6 +2571,9 @@ func TestAgent_ManageProcessPriority(t *testing.T) {
})
actualProcs := <-modProcs
require.Len(t, actualProcs, len(expectedProcs)-1)
for _, proc := range actualProcs {
requireFileEquals(t, fs, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), "0")
}
})
t.Run("IgnoreCustomNice", func(t *testing.T) {
@ -2589,8 +2592,11 @@ func TestAgent_ManageProcessPriority(t *testing.T) {
logger = slog.Make(sloghuman.Sink(io.Discard))
)
err := afero.WriteFile(fs, "/proc/self/oom_score_adj", []byte("0"), 0o644)
require.NoError(t, err)
// Create some processes.
for i := 0; i < 2; i++ {
for i := 0; i < 3; i++ {
proc := agentproctest.GenerateProcess(t, fs)
syscaller.EXPECT().
Kill(proc.PID, syscall.Signal(0)).
@ -2618,7 +2624,59 @@ func TestAgent_ManageProcessPriority(t *testing.T) {
})
actualProcs := <-modProcs
// We should ignore the process with a custom nice score.
require.Len(t, actualProcs, 1)
require.Len(t, actualProcs, 2)
for _, proc := range actualProcs {
_, ok := expectedProcs[proc.PID]
require.True(t, ok)
requireFileEquals(t, fs, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), "998")
}
})
t.Run("CustomOOMScore", func(t *testing.T) {
t.Parallel()
if runtime.GOOS != "linux" {
t.Skip("Skipping non-linux environment")
}
var (
fs = afero.NewMemMapFs()
ticker = make(chan time.Time)
syscaller = agentproctest.NewMockSyscaller(gomock.NewController(t))
modProcs = make(chan []*agentproc.Process)
logger = slog.Make(sloghuman.Sink(io.Discard))
)
err := afero.WriteFile(fs, "/proc/self/oom_score_adj", []byte("0"), 0o644)
require.NoError(t, err)
// Create some processes.
for i := 0; i < 3; i++ {
proc := agentproctest.GenerateProcess(t, fs)
syscaller.EXPECT().
Kill(proc.PID, syscall.Signal(0)).
Return(nil)
syscaller.EXPECT().GetPriority(proc.PID).Return(20, nil)
syscaller.EXPECT().SetPriority(proc.PID, 10).Return(nil)
}
_, _, _, _, _ = setupAgent(t, agentsdk.Manifest{}, 0, func(c *agenttest.Client, o *agent.Options) {
o.Syscaller = syscaller
o.ModifiedProcesses = modProcs
o.EnvironmentVariables = map[string]string{
agent.EnvProcPrioMgmt: "1",
agent.EnvProcOOMScore: "-567",
}
o.Filesystem = fs
o.Logger = logger
o.ProcessManagementTick = ticker
})
actualProcs := <-modProcs
// We should ignore the process with a custom nice score.
require.Len(t, actualProcs, 3)
for _, proc := range actualProcs {
requireFileEquals(t, fs, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), "-567")
}
})
t.Run("DisabledByDefault", func(t *testing.T) {
@ -2739,3 +2797,17 @@ func requireEcho(t *testing.T, conn net.Conn) {
require.NoError(t, err)
require.Equal(t, "test", string(b))
}
func requireFileWrite(t testing.TB, fs afero.Fs, fp, data string) {
t.Helper()
err := afero.WriteFile(fs, fp, []byte(data), 0o600)
require.NoError(t, err)
}
func requireFileEquals(t testing.TB, fs afero.Fs, fp, expect string) {
t.Helper()
actual, err := afero.ReadFile(fs, fp)
require.NoError(t, err)
require.Equal(t, expect, string(actual))
}

View File

@ -2,6 +2,7 @@ package agentproctest
import (
"fmt"
"strconv"
"testing"
"github.com/spf13/afero"
@ -29,8 +30,9 @@ func GenerateProcess(t *testing.T, fs afero.Fs, muts ...func(*agentproc.Process)
cmdline := fmt.Sprintf("%s\x00%s\x00%s", arg1, arg2, arg3)
process := agentproc.Process{
CmdLine: cmdline,
PID: int32(pid),
CmdLine: cmdline,
PID: int32(pid),
OOMScoreAdj: 0,
}
for _, mut := range muts {
@ -45,5 +47,9 @@ func GenerateProcess(t *testing.T, fs afero.Fs, muts ...func(*agentproc.Process)
err = afero.WriteFile(fs, fmt.Sprintf("%s/cmdline", process.Dir), []byte(process.CmdLine), 0o444)
require.NoError(t, err)
score := strconv.Itoa(process.OOMScoreAdj)
err = afero.WriteFile(fs, fmt.Sprintf("%s/oom_score_adj", process.Dir), []byte(score), 0o444)
require.NoError(t, err)
return process
}

View File

@ -5,6 +5,7 @@ package agentproc
import (
"errors"
"os"
"path/filepath"
"strconv"
"strings"
@ -50,10 +51,26 @@ func List(fs afero.Fs, syscaller Syscaller) ([]*Process, error) {
}
return nil, xerrors.Errorf("read cmdline: %w", err)
}
oomScore, err := afero.ReadFile(fs, filepath.Join(defaultProcDir, entry, "oom_score_adj"))
if err != nil {
if xerrors.Is(err, os.ErrPermission) {
continue
}
return nil, xerrors.Errorf("read oom_score_adj: %w", err)
}
oom, err := strconv.Atoi(strings.TrimSpace(string(oomScore)))
if err != nil {
return nil, xerrors.Errorf("convert oom score: %w", err)
}
processes = append(processes, &Process{
PID: int32(pid),
CmdLine: string(cmdline),
Dir: filepath.Join(defaultProcDir, entry),
PID: int32(pid),
CmdLine: string(cmdline),
Dir: filepath.Join(defaultProcDir, entry),
OOMScoreAdj: oom,
})
}

View File

@ -14,7 +14,8 @@ type Syscaller interface {
const defaultProcDir = "/proc"
type Process struct {
Dir string
CmdLine string
PID int32
Dir string
CmdLine string
PID int32
OOMScoreAdj int
}

View File

@ -1,7 +1,10 @@
package agent
import (
"bytes"
"context"
"encoding/json"
"io"
"net/netip"
"sync"
"testing"
@ -14,6 +17,7 @@ import (
"tailscale.com/types/netlogtype"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogjson"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/testutil"
@ -210,3 +214,58 @@ func newFakeStatsDest() *fakeStatsDest {
resps: make(chan *proto.UpdateStatsResponse),
}
}
func Test_logDebouncer(t *testing.T) {
t.Parallel()
var (
buf bytes.Buffer
logger = slog.Make(slogjson.Sink(&buf))
ctx = context.Background()
)
debouncer := &logDebouncer{
logger: logger,
messages: map[string]time.Time{},
interval: time.Minute,
}
fields := map[string]interface{}{
"field_1": float64(1),
"field_2": "2",
}
debouncer.Error(ctx, "my message", "field_1", 1, "field_2", "2")
debouncer.Warn(ctx, "another message", "field_1", 1, "field_2", "2")
// Shouldn't log this.
debouncer.Warn(ctx, "another message", "field_1", 1, "field_2", "2")
require.Len(t, debouncer.messages, 2)
type entry struct {
Msg string `json:"msg"`
Level string `json:"level"`
Fields map[string]interface{} `json:"fields"`
}
assertLog := func(msg string, level string, fields map[string]interface{}) {
line, err := buf.ReadString('\n')
require.NoError(t, err)
var e entry
err = json.Unmarshal([]byte(line), &e)
require.NoError(t, err)
require.Equal(t, msg, e.Msg)
require.Equal(t, level, e.Level)
require.Equal(t, fields, e.Fields)
}
assertLog("my message", "ERROR", fields)
assertLog("another message", "WARN", fields)
debouncer.messages["another message"] = time.Now().Add(-2 * time.Minute)
debouncer.Warn(ctx, "another message", "field_1", 1, "field_2", "2")
assertLog("another message", "WARN", fields)
// Assert nothing else was written.
_, err := buf.ReadString('\n')
require.ErrorIs(t, err, io.EOF)
}

View File

@ -283,6 +283,9 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
if v, ok := os.LookupEnv(agent.EnvProcPrioMgmt); ok {
environmentVariables[agent.EnvProcPrioMgmt] = v
}
if v, ok := os.LookupEnv(agent.EnvProcOOMScore); ok {
environmentVariables[agent.EnvProcOOMScore] = v
}
agnt := agent.New(agent.Options{
Client: client,

View File

@ -1084,10 +1084,23 @@ func formatCoderSDKError(from string, err *codersdk.Error, opts *formatOpts) str
_, _ = str.WriteString("\n")
}
// The main error message
_, _ = str.WriteString(pretty.Sprint(headLineStyle(), err.Message))
// Validation errors.
if len(err.Validations) > 0 {
_, _ = str.WriteString("\n")
_, _ = str.WriteString(pretty.Sprint(tailLineStyle(), fmt.Sprintf("%d validation error(s) found", len(err.Validations))))
for _, e := range err.Validations {
_, _ = str.WriteString("\n\t")
_, _ = str.WriteString(pretty.Sprint(cliui.DefaultStyles.Field, e.Field))
_, _ = str.WriteString(pretty.Sprintf(cliui.DefaultStyles.Warn, ": %s", e.Detail))
}
}
if err.Helper != "" {
_, _ = str.WriteString("\n")
_, _ = str.WriteString(pretty.Sprint(tailLineStyle(), err.Helper))
_, _ = str.WriteString(pretty.Sprintf(tailLineStyle(), "Suggestion: %s", err.Helper))
}
// By default we do not show the Detail with the helper.
if opts.Verbose || (err.Helper == "" && err.Detail != "") {

View File

@ -209,7 +209,7 @@ func enablePrometheus(
}
afterCtx(ctx, closeUsersFunc)
closeWorkspacesFunc, err := prometheusmetrics.Workspaces(ctx, options.PrometheusRegistry, options.Database, 0)
closeWorkspacesFunc, err := prometheusmetrics.Workspaces(ctx, options.Logger.Named("workspaces_metrics"), options.PrometheusRegistry, options.Database, 0)
if err != nil {
return nil, xerrors.Errorf("register workspaces prometheus metric: %w", err)
}
@ -792,6 +792,9 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
return err
}
// This should be output before the logs start streaming.
cliui.Infof(inv.Stdout, "\n==> Logs will stream in below (press ctrl+c to gracefully exit):")
if vals.Telemetry.Enable {
gitAuth := make([]telemetry.GitAuth, 0)
// TODO:
@ -1025,8 +1028,6 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
}
}()
cliui.Infof(inv.Stdout, "\n==> Logs will stream in below (press ctrl+c to gracefully exit):")
// Updates the systemd status from activating to activated.
_, err = daemon.SdNotify(false, daemon.SdNotifyReady)
if err != nil {

View File

@ -973,7 +973,6 @@ func TestServer(t *testing.T) {
scanner := bufio.NewScanner(res.Body)
hasActiveUsers := false
hasWorkspaces := false
for scanner.Scan() {
// This metric is manually registered to be tracked in the server. That's
// why we test it's tracked here.
@ -981,10 +980,6 @@ func TestServer(t *testing.T) {
hasActiveUsers = true
continue
}
if strings.HasPrefix(scanner.Text(), "coderd_api_workspace_latest_build_total") {
hasWorkspaces = true
continue
}
if strings.HasPrefix(scanner.Text(), "coderd_db_query_latencies_seconds") {
t.Fatal("db metrics should not be tracked when --prometheus-collect-db-metrics is not enabled")
}
@ -992,7 +987,6 @@ func TestServer(t *testing.T) {
}
require.NoError(t, scanner.Err())
require.True(t, hasActiveUsers)
require.True(t, hasWorkspaces)
})
t.Run("DBMetricsEnabled", func(t *testing.T) {

View File

@ -13,6 +13,7 @@ import (
"text/tabwriter"
"time"
"github.com/google/uuid"
"golang.org/x/xerrors"
"cdr.dev/slog"
@ -114,32 +115,41 @@ func (r *RootCmd) supportBundle() *serpent.Command {
client.URL = u
}
var (
wsID uuid.UUID
agtID uuid.UUID
)
if len(inv.Args) == 0 {
return xerrors.Errorf("must specify workspace name")
}
ws, err := namedWorkspace(inv.Context(), client, inv.Args[0])
if err != nil {
return xerrors.Errorf("invalid workspace: %w", err)
}
cliLog.Debug(inv.Context(), "found workspace",
slog.F("workspace_name", ws.Name),
slog.F("workspace_id", ws.ID),
)
cliLog.Warn(inv.Context(), "no workspace specified")
_, _ = fmt.Fprintln(inv.Stderr, "Warning: no workspace specified. This will result in incomplete information.")
} else {
ws, err := namedWorkspace(inv.Context(), client, inv.Args[0])
if err != nil {
return xerrors.Errorf("invalid workspace: %w", err)
}
cliLog.Debug(inv.Context(), "found workspace",
slog.F("workspace_name", ws.Name),
slog.F("workspace_id", ws.ID),
)
wsID = ws.ID
agentName := ""
if len(inv.Args) > 1 {
agentName = inv.Args[1]
}
agentName := ""
if len(inv.Args) > 1 {
agentName = inv.Args[1]
agt, found := findAgent(agentName, ws.LatestBuild.Resources)
if !found {
cliLog.Warn(inv.Context(), "could not find agent in workspace", slog.F("agent_name", agentName))
} else {
cliLog.Debug(inv.Context(), "found workspace agent",
slog.F("agent_name", agt.Name),
slog.F("agent_id", agt.ID),
)
agtID = agt.ID
}
}
agt, found := findAgent(agentName, ws.LatestBuild.Resources)
if !found {
return xerrors.Errorf("could not find agent named %q for workspace", agentName)
}
cliLog.Debug(inv.Context(), "found workspace agent",
slog.F("agent_name", agt.Name),
slog.F("agent_id", agt.ID),
)
if outputPath == "" {
cwd, err := filepath.Abs(".")
if err != nil {
@ -165,8 +175,8 @@ func (r *RootCmd) supportBundle() *serpent.Command {
Client: client,
// Support adds a sink so we don't need to supply one ourselves.
Log: clientLog,
WorkspaceID: ws.ID,
AgentID: agt.ID,
WorkspaceID: wsID,
AgentID: agtID,
}
bun, err := support.Run(inv.Context(), &deps)
@ -222,20 +232,21 @@ func findAgent(agentName string, haystack []codersdk.WorkspaceResource) (*coders
func writeBundle(src *support.Bundle, dest *zip.Writer) error {
// We JSON-encode the following:
for k, v := range map[string]any{
"deployment/buildinfo.json": src.Deployment.BuildInfo,
"deployment/config.json": src.Deployment.Config,
"deployment/experiments.json": src.Deployment.Experiments,
"deployment/health.json": src.Deployment.HealthReport,
"network/netcheck.json": src.Network.Netcheck,
"workspace/workspace.json": src.Workspace.Workspace,
"agent/agent.json": src.Agent.Agent,
"agent/listening_ports.json": src.Agent.ListeningPorts,
"agent/manifest.json": src.Agent.Manifest,
"agent/peer_diagnostics.json": src.Agent.PeerDiagnostics,
"agent/ping_result.json": src.Agent.PingResult,
"deployment/buildinfo.json": src.Deployment.BuildInfo,
"deployment/config.json": src.Deployment.Config,
"deployment/experiments.json": src.Deployment.Experiments,
"deployment/health.json": src.Deployment.HealthReport,
"network/connection_info.json": src.Network.ConnectionInfo,
"network/netcheck.json": src.Network.Netcheck,
"workspace/template.json": src.Workspace.Template,
"workspace/template_version.json": src.Workspace.TemplateVersion,
"workspace/parameters.json": src.Workspace.Parameters,
"workspace/workspace.json": src.Workspace.Workspace,
} {
f, err := dest.Create(k)
if err != nil {
@ -255,17 +266,17 @@ func writeBundle(src *support.Bundle, dest *zip.Writer) error {
// The below we just write as we have them:
for k, v := range map[string]string{
"network/coordinator_debug.html": src.Network.CoordinatorDebug,
"network/tailnet_debug.html": src.Network.TailnetDebug,
"workspace/build_logs.txt": humanizeBuildLogs(src.Workspace.BuildLogs),
"agent/logs.txt": string(src.Agent.Logs),
"agent/agent_magicsock.html": string(src.Agent.AgentMagicsockHTML),
"agent/client_magicsock.html": string(src.Agent.ClientMagicsockHTML),
"agent/startup_logs.txt": humanizeAgentLogs(src.Agent.StartupLogs),
"agent/prometheus.txt": string(src.Agent.Prometheus),
"workspace/template_file.zip": string(templateVersionBytes),
"logs.txt": strings.Join(src.Logs, "\n"),
"cli_logs.txt": string(src.CLILogs),
"logs.txt": strings.Join(src.Logs, "\n"),
"network/coordinator_debug.html": src.Network.CoordinatorDebug,
"network/tailnet_debug.html": src.Network.TailnetDebug,
"workspace/build_logs.txt": humanizeBuildLogs(src.Workspace.BuildLogs),
"workspace/template_file.zip": string(templateVersionBytes),
} {
f, err := dest.Create(k)
if err != nil {

View File

@ -23,6 +23,7 @@ import (
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbfake"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/healthcheck/derphealth"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/codersdk/healthsdk"
@ -95,33 +96,50 @@ func TestSupportBundle(t *testing.T) {
clitest.SetupConfig(t, client, root)
err = inv.Run()
require.NoError(t, err)
assertBundleContents(t, path, secretValue)
assertBundleContents(t, path, true, true, []string{secretValue})
})
t.Run("NoWorkspace", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
var dc codersdk.DeploymentConfig
secretValue := uuid.NewString()
seedSecretDeploymentOptions(t, &dc, secretValue)
client := coderdtest.New(t, &coderdtest.Options{
DeploymentValues: dc.Values,
})
_ = coderdtest.CreateFirstUser(t, client)
inv, root := clitest.New(t, "support", "bundle", "--yes")
d := t.TempDir()
path := filepath.Join(d, "bundle.zip")
inv, root := clitest.New(t, "support", "bundle", "--output-file", path, "--yes")
//nolint: gocritic // requires owner privilege
clitest.SetupConfig(t, client, root)
err := inv.Run()
require.ErrorContains(t, err, "must specify workspace name")
require.NoError(t, err)
assertBundleContents(t, path, false, false, []string{secretValue})
})
t.Run("NoAgent", func(t *testing.T) {
t.Parallel()
client, db := coderdtest.NewWithDatabase(t, nil)
var dc codersdk.DeploymentConfig
secretValue := uuid.NewString()
seedSecretDeploymentOptions(t, &dc, secretValue)
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
DeploymentValues: dc.Values,
})
admin := coderdtest.CreateFirstUser(t, client)
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
OrganizationID: admin.OrganizationID,
OwnerID: admin.UserID,
}).Do() // without agent!
inv, root := clitest.New(t, "support", "bundle", r.Workspace.Name, "--yes")
d := t.TempDir()
path := filepath.Join(d, "bundle.zip")
inv, root := clitest.New(t, "support", "bundle", r.Workspace.Name, "--output-file", path, "--yes")
//nolint: gocritic // requires owner privilege
clitest.SetupConfig(t, client, root)
err := inv.Run()
require.ErrorContains(t, err, "could not find agent")
require.NoError(t, err)
assertBundleContents(t, path, true, false, []string{secretValue})
})
t.Run("NoPrivilege", func(t *testing.T) {
@ -140,7 +158,8 @@ func TestSupportBundle(t *testing.T) {
})
}
func assertBundleContents(t *testing.T, path string, badValues ...string) {
// nolint:revive // It's a control flag, but this is just a test.
func assertBundleContents(t *testing.T, path string, wantWorkspace bool, wantAgent bool, badValues []string) {
t.Helper()
r, err := zip.OpenReader(path)
require.NoError(t, err, "open zip file")
@ -164,6 +183,10 @@ func assertBundleContents(t *testing.T, path string, badValues ...string) {
var v healthsdk.HealthcheckReport
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "health report should not be empty")
case "network/connection_info.json":
var v workspacesdk.AgentConnectionInfo
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "agent connection info should not be empty")
case "network/coordinator_debug.html":
bs := readBytesFromZip(t, f)
require.NotEmpty(t, bs, "coordinator debug should not be empty")
@ -171,66 +194,130 @@ func assertBundleContents(t *testing.T, path string, badValues ...string) {
bs := readBytesFromZip(t, f)
require.NotEmpty(t, bs, "tailnet debug should not be empty")
case "network/netcheck.json":
var v workspacesdk.AgentConnectionInfo
var v derphealth.Report
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "connection info should not be empty")
require.NotEmpty(t, v, "netcheck should not be empty")
case "workspace/workspace.json":
var v codersdk.Workspace
decodeJSONFromZip(t, f, &v)
if !wantWorkspace {
require.Empty(t, v, "expected workspace to be empty")
continue
}
require.NotEmpty(t, v, "workspace should not be empty")
case "workspace/build_logs.txt":
bs := readBytesFromZip(t, f)
if !wantWorkspace || !wantAgent {
require.Empty(t, bs, "expected workspace build logs to be empty")
continue
}
require.Contains(t, string(bs), "provision done")
case "agent/agent.json":
var v codersdk.WorkspaceAgent
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "agent should not be empty")
case "agent/listening_ports.json":
var v codersdk.WorkspaceAgentListeningPortsResponse
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "agent listening ports should not be empty")
case "agent/logs.txt":
bs := readBytesFromZip(t, f)
require.NotEmpty(t, bs, "logs should not be empty")
case "agent/agent_magicsock.html":
bs := readBytesFromZip(t, f)
require.NotEmpty(t, bs, "agent magicsock should not be empty")
case "agent/client_magicsock.html":
bs := readBytesFromZip(t, f)
require.NotEmpty(t, bs, "client magicsock should not be empty")
case "agent/manifest.json":
var v agentsdk.Manifest
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "agent manifest should not be empty")
case "agent/peer_diagnostics.json":
var v *tailnet.PeerDiagnostics
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "peer diagnostics should not be empty")
case "agent/ping_result.json":
var v *ipnstate.PingResult
decodeJSONFromZip(t, f, &v)
require.NotEmpty(t, v, "ping result should not be empty")
case "agent/prometheus.txt":
bs := readBytesFromZip(t, f)
require.NotEmpty(t, bs, "agent prometheus metrics should not be empty")
case "agent/startup_logs.txt":
bs := readBytesFromZip(t, f)
require.Contains(t, string(bs), "started up")
case "workspace/template.json":
var v codersdk.Template
decodeJSONFromZip(t, f, &v)
if !wantWorkspace {
require.Empty(t, v, "expected workspace template to be empty")
continue
}
require.NotEmpty(t, v, "workspace template should not be empty")
case "workspace/template_version.json":
var v codersdk.TemplateVersion
decodeJSONFromZip(t, f, &v)
if !wantWorkspace {
require.Empty(t, v, "expected workspace template version to be empty")
continue
}
require.NotEmpty(t, v, "workspace template version should not be empty")
case "workspace/parameters.json":
var v []codersdk.WorkspaceBuildParameter
decodeJSONFromZip(t, f, &v)
if !wantWorkspace {
require.Empty(t, v, "expected workspace parameters to be empty")
continue
}
require.NotNil(t, v, "workspace parameters should not be nil")
case "workspace/template_file.zip":
bs := readBytesFromZip(t, f)
if !wantWorkspace {
require.Empty(t, bs, "expected template file to be empty")
continue
}
require.NotNil(t, bs, "template file should not be nil")
case "agent/agent.json":
var v codersdk.WorkspaceAgent
decodeJSONFromZip(t, f, &v)
if !wantAgent {
require.Empty(t, v, "expected agent to be empty")
continue
}
require.NotEmpty(t, v, "agent should not be empty")
case "agent/listening_ports.json":
var v codersdk.WorkspaceAgentListeningPortsResponse
decodeJSONFromZip(t, f, &v)
if !wantAgent {
require.Empty(t, v, "expected agent listening ports to be empty")
continue
}
require.NotEmpty(t, v, "agent listening ports should not be empty")
case "agent/logs.txt":
bs := readBytesFromZip(t, f)
if !wantAgent {
require.Empty(t, bs, "expected agent logs to be empty")
continue
}
require.NotEmpty(t, bs, "logs should not be empty")
case "agent/agent_magicsock.html":
bs := readBytesFromZip(t, f)
if !wantAgent {
require.Empty(t, bs, "expected agent magicsock to be empty")
continue
}
require.NotEmpty(t, bs, "agent magicsock should not be empty")
case "agent/client_magicsock.html":
bs := readBytesFromZip(t, f)
if !wantAgent {
require.Empty(t, bs, "expected client magicsock to be empty")
continue
}
require.NotEmpty(t, bs, "client magicsock should not be empty")
case "agent/manifest.json":
var v agentsdk.Manifest
decodeJSONFromZip(t, f, &v)
if !wantAgent {
require.Empty(t, v, "expected agent manifest to be empty")
continue
}
require.NotEmpty(t, v, "agent manifest should not be empty")
case "agent/peer_diagnostics.json":
var v *tailnet.PeerDiagnostics
decodeJSONFromZip(t, f, &v)
if !wantAgent {
require.Empty(t, v, "expected peer diagnostics to be empty")
continue
}
require.NotEmpty(t, v, "peer diagnostics should not be empty")
case "agent/ping_result.json":
var v *ipnstate.PingResult
decodeJSONFromZip(t, f, &v)
if !wantAgent {
require.Empty(t, v, "expected ping result to be empty")
continue
}
require.NotEmpty(t, v, "ping result should not be empty")
case "agent/prometheus.txt":
bs := readBytesFromZip(t, f)
if !wantAgent {
require.Empty(t, bs, "expected agent prometheus metrics to be empty")
continue
}
require.NotEmpty(t, bs, "agent prometheus metrics should not be empty")
case "agent/startup_logs.txt":
bs := readBytesFromZip(t, f)
if !wantAgent {
require.Empty(t, bs, "expected agent startup logs to be empty")
continue
}
require.Contains(t, string(bs), "started up")
case "logs.txt":
bs := readBytesFromZip(t, f)
require.NotEmpty(t, bs, "logs should not be empty")

View File

@ -7,11 +7,11 @@ import (
"path/filepath"
"sort"
"github.com/codeclysm/extract/v3"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/provisionersdk"
"github.com/coder/serpent"
)
@ -161,7 +161,7 @@ func (r *RootCmd) templatePull() *serpent.Command {
}
_, _ = fmt.Fprintf(inv.Stderr, "Extracting template to %q\n", dest)
err = extract.Tar(ctx, bytes.NewReader(raw), dest, nil)
err = provisionersdk.Untar(dest, bytes.NewReader(raw))
return err
},
}

View File

@ -3,7 +3,6 @@ package cli_test
import (
"archive/tar"
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"os"
@ -11,7 +10,6 @@ import (
"strings"
"testing"
"github.com/codeclysm/extract/v3"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
@ -20,6 +18,7 @@ import (
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/provisioner/echo"
"github.com/coder/coder/v2/provisionersdk"
"github.com/coder/coder/v2/provisionersdk/proto"
"github.com/coder/coder/v2/pty/ptytest"
)
@ -310,9 +309,7 @@ func TestTemplatePull_ToDir(t *testing.T) {
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, updatedVersion.ID)
coderdtest.UpdateActiveTemplateVersion(t, client, template.ID, updatedVersion.ID)
ctx := context.Background()
err = extract.Tar(ctx, bytes.NewReader(expected), expectedDest, nil)
err = provisionersdk.Untar(expectedDest, bytes.NewReader(expected))
require.NoError(t, err)
ents, _ := os.ReadDir(actualDest)
@ -387,9 +384,7 @@ func TestTemplatePull_FolderConflict(t *testing.T) {
)
require.NoError(t, err)
ctx := context.Background()
err = extract.Tar(ctx, bytes.NewReader(expected), expectedDest, nil)
err = provisionersdk.Untar(expectedDest, bytes.NewReader(expected))
require.NoError(t, err)
inv, root := clitest.New(t, "templates", "pull", template.Name, conflictDest)

View File

@ -1,3 +1,5 @@
Encountered an error running "coder exp example-error api", see "coder exp example-error api --help" for more information
error: Top level sdk error message.
Have you tried turning it off and on again?
1 validation error(s) found
region : magic dust is not available in your region
Suggestion: Have you tried turning it off and on again?

View File

@ -4,4 +4,6 @@ error: 3 errors encountered: Trace=[wrapped: ])
2. second error: function decided not to work, and it never will
3. Trace=[wrapped api error: ]
Top level sdk error message.
1 validation error(s) found
region : magic dust is not available in your region
magic dust unavailable, please try again later

31
coderd/apidoc/docs.go generated
View File

@ -2211,6 +2211,7 @@ const docTemplate = `{
"CoderSessionToken": []
}
],
"description": "Create a new workspace using a template. The request must\nspecify either the Template ID or the Template Version ID,\nnot both. If the Template ID is specified, the active version\nof the template will be used.",
"consumes": [
"application/json"
],
@ -5903,6 +5904,7 @@ const docTemplate = `{
],
"summary": "Submit workspace agent stats",
"operationId": "submit-workspace-agent-stats",
"deprecated": true,
"parameters": [
{
"description": "Stats request",
@ -9045,6 +9047,7 @@ const docTemplate = `{
}
},
"codersdk.CreateWorkspaceRequest": {
"description": "CreateWorkspaceRequest provides options for creating a new workspace. Only one of TemplateID or TemplateVersionID can be specified, not both. If TemplateID is specified, the active version of the template will be used.",
"type": "object",
"required": [
"name"
@ -9293,9 +9296,6 @@ const docTemplate = `{
"disable_path_apps": {
"type": "boolean"
},
"disable_session_expiry_refresh": {
"type": "boolean"
},
"docs_url": {
"$ref": "#/definitions/serpent.URL"
},
@ -9333,12 +9333,6 @@ const docTemplate = `{
"logging": {
"$ref": "#/definitions/codersdk.LoggingConfig"
},
"max_session_expiry": {
"type": "integer"
},
"max_token_lifetime": {
"type": "integer"
},
"metrics_cache_refresh_interval": {
"type": "integer"
},
@ -9390,6 +9384,9 @@ const docTemplate = `{
"secure_auth_cookie": {
"type": "boolean"
},
"session_lifetime": {
"$ref": "#/definitions/codersdk.SessionLifetime"
},
"ssh_keygen_algorithm": {
"type": "string"
},
@ -11082,6 +11079,22 @@ const docTemplate = `{
}
}
},
"codersdk.SessionLifetime": {
"type": "object",
"properties": {
"default_duration": {
"description": "DefaultDuration is for api keys, not tokens.",
"type": "integer"
},
"disable_expiry_refresh": {
"description": "DisableExpiryRefresh will disable automatically refreshing api\nkeys when they are used from the api. This means the api key lifetime at\ncreation is the lifetime of the api key.",
"type": "boolean"
},
"max_token_lifetime": {
"type": "integer"
}
}
},
"codersdk.SupportConfig": {
"type": "object",
"properties": {

View File

@ -1932,6 +1932,7 @@
"CoderSessionToken": []
}
],
"description": "Create a new workspace using a template. The request must\nspecify either the Template ID or the Template Version ID,\nnot both. If the Template ID is specified, the active version\nof the template will be used.",
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Workspaces"],
@ -5200,6 +5201,7 @@
"tags": ["Agents"],
"summary": "Submit workspace agent stats",
"operationId": "submit-workspace-agent-stats",
"deprecated": true,
"parameters": [
{
"description": "Stats request",
@ -8052,6 +8054,7 @@
}
},
"codersdk.CreateWorkspaceRequest": {
"description": "CreateWorkspaceRequest provides options for creating a new workspace. Only one of TemplateID or TemplateVersionID can be specified, not both. If TemplateID is specified, the active version of the template will be used.",
"type": "object",
"required": ["name"],
"properties": {
@ -8298,9 +8301,6 @@
"disable_path_apps": {
"type": "boolean"
},
"disable_session_expiry_refresh": {
"type": "boolean"
},
"docs_url": {
"$ref": "#/definitions/serpent.URL"
},
@ -8338,12 +8338,6 @@
"logging": {
"$ref": "#/definitions/codersdk.LoggingConfig"
},
"max_session_expiry": {
"type": "integer"
},
"max_token_lifetime": {
"type": "integer"
},
"metrics_cache_refresh_interval": {
"type": "integer"
},
@ -8395,6 +8389,9 @@
"secure_auth_cookie": {
"type": "boolean"
},
"session_lifetime": {
"$ref": "#/definitions/codersdk.SessionLifetime"
},
"ssh_keygen_algorithm": {
"type": "string"
},
@ -9984,6 +9981,22 @@
}
}
},
"codersdk.SessionLifetime": {
"type": "object",
"properties": {
"default_duration": {
"description": "DefaultDuration is for api keys, not tokens.",
"type": "integer"
},
"disable_expiry_refresh": {
"description": "DisableExpiryRefresh will disable automatically refreshing api\nkeys when they are used from the api. This means the api key lifetime at\ncreation is the lifetime of the api key.",
"type": "boolean"
},
"max_token_lifetime": {
"type": "integer"
}
}
},
"codersdk.SupportConfig": {
"type": "object",
"properties": {

View File

@ -84,7 +84,7 @@ func (api *API) postToken(rw http.ResponseWriter, r *http.Request) {
cookie, key, err := api.createAPIKey(ctx, apikey.CreateParams{
UserID: user.ID,
LoginType: database.LoginTypeToken,
DefaultLifetime: api.DeploymentValues.SessionDuration.Value(),
DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(),
ExpiresAt: dbtime.Now().Add(lifeTime),
Scope: scope,
LifetimeSeconds: int64(lifeTime.Seconds()),
@ -128,7 +128,7 @@ func (api *API) postAPIKey(rw http.ResponseWriter, r *http.Request) {
lifeTime := time.Hour * 24 * 7
cookie, _, err := api.createAPIKey(ctx, apikey.CreateParams{
UserID: user.ID,
DefaultLifetime: api.DeploymentValues.SessionDuration.Value(),
DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(),
LoginType: database.LoginTypePassword,
RemoteAddr: r.RemoteAddr,
// All api generated keys will last 1 week. Browser login tokens have
@ -354,7 +354,7 @@ func (api *API) tokenConfig(rw http.ResponseWriter, r *http.Request) {
httpapi.Write(
r.Context(), rw, http.StatusOK,
codersdk.TokenConfig{
MaxTokenLifetime: values.MaxTokenLifetime.Value(),
MaxTokenLifetime: values.Sessions.MaximumTokenDuration.Value(),
},
)
}
@ -364,10 +364,10 @@ func (api *API) validateAPIKeyLifetime(lifetime time.Duration) error {
return xerrors.New("lifetime must be positive number greater than 0")
}
if lifetime > api.DeploymentValues.MaxTokenLifetime.Value() {
if lifetime > api.DeploymentValues.Sessions.MaximumTokenDuration.Value() {
return xerrors.Errorf(
"lifetime must be less than %v",
api.DeploymentValues.MaxTokenLifetime,
api.DeploymentValues.Sessions.MaximumTokenDuration,
)
}

View File

@ -125,7 +125,7 @@ func TestTokenUserSetMaxLifetime(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
dc := coderdtest.DeploymentValues(t)
dc.MaxTokenLifetime = serpent.Duration(time.Hour * 24 * 7)
dc.Sessions.MaximumTokenDuration = serpent.Duration(time.Hour * 24 * 7)
client := coderdtest.New(t, &coderdtest.Options{
DeploymentValues: dc,
})
@ -165,7 +165,7 @@ func TestSessionExpiry(t *testing.T) {
//
// We don't support updating the deployment config after startup, but for
// this test it works because we don't copy the value (and we use pointers).
dc.SessionDuration = serpent.Duration(time.Second)
dc.Sessions.DefaultDuration = serpent.Duration(time.Second)
userClient, _ := coderdtest.CreateAnotherUser(t, adminClient, adminUser.OrganizationID)
@ -174,8 +174,8 @@ func TestSessionExpiry(t *testing.T) {
apiKey, err := db.GetAPIKeyByID(ctx, strings.Split(token, "-")[0])
require.NoError(t, err)
require.EqualValues(t, dc.SessionDuration.Value().Seconds(), apiKey.LifetimeSeconds)
require.WithinDuration(t, apiKey.CreatedAt.Add(dc.SessionDuration.Value()), apiKey.ExpiresAt, 2*time.Second)
require.EqualValues(t, dc.Sessions.DefaultDuration.Value().Seconds(), apiKey.LifetimeSeconds)
require.WithinDuration(t, apiKey.CreatedAt.Add(dc.Sessions.DefaultDuration.Value()), apiKey.ExpiresAt, 2*time.Second)
// Update the session token to be expired so we can test that it is
// rejected for extra points.

View File

@ -21,7 +21,7 @@ type AdditionalFields struct {
BuildNumber string `json:"build_number"`
BuildReason database.BuildReason `json:"build_reason"`
WorkspaceOwner string `json:"workspace_owner"`
WorkspaceID uuid.UUID `json:"workpace_id"`
WorkspaceID uuid.UUID `json:"workspace_id"`
}
func NewNop() Auditor {

View File

@ -17,6 +17,7 @@ import (
"github.com/coder/coder/v2/coderd/autobuild"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/schedule"
"github.com/coder/coder/v2/coderd/schedule/cron"
"github.com/coder/coder/v2/coderd/util/ptr"
@ -849,14 +850,17 @@ func TestExecutorRequireActiveVersion(t *testing.T) {
ticker = make(chan time.Time)
statCh = make(chan autobuild.Stats)
ownerClient = coderdtest.New(t, &coderdtest.Options{
ownerClient, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
AutobuildTicker: ticker,
IncludeProvisionerDaemon: true,
AutobuildStats: statCh,
TemplateScheduleStore: schedule.NewAGPLTemplateScheduleStore(),
})
)
ctx := testutil.Context(t, testutil.WaitShort)
owner := coderdtest.CreateFirstUser(t, ownerClient)
me, err := ownerClient.User(ctx, codersdk.Me)
require.NoError(t, err)
// Create an active and inactive template version. We'll
// build a regular member's workspace using a non-active
@ -864,10 +868,14 @@ func TestExecutorRequireActiveVersion(t *testing.T) {
// since there is no enterprise license.
activeVersion := coderdtest.CreateTemplateVersion(t, ownerClient, owner.OrganizationID, nil)
coderdtest.AwaitTemplateVersionJobCompleted(t, ownerClient, activeVersion.ID)
template := coderdtest.CreateTemplate(t, ownerClient, owner.OrganizationID, activeVersion.ID, func(ctr *codersdk.CreateTemplateRequest) {
ctr.RequireActiveVersion = true
ctr.VersionID = activeVersion.ID
template := coderdtest.CreateTemplate(t, ownerClient, owner.OrganizationID, activeVersion.ID)
//nolint We need to set this in the database directly, because the API will return an error
// letting you know that this feature requires an enterprise license.
err = db.UpdateTemplateAccessControlByID(dbauthz.As(ctx, coderdtest.AuthzUserSubject(me, owner.OrganizationID)), database.UpdateTemplateAccessControlByIDParams{
ID: template.ID,
RequireActiveVersion: true,
})
require.NoError(t, err)
inactiveVersion := coderdtest.CreateTemplateVersion(t, ownerClient, owner.OrganizationID, nil, func(ctvr *codersdk.CreateTemplateVersionRequest) {
ctvr.TemplateID = template.ID
})

View File

@ -566,7 +566,7 @@ func New(options *Options) *API {
DB: options.Database,
OAuth2Configs: oauthConfigs,
RedirectToLogin: false,
DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(),
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
Optional: false,
SessionTokenFunc: nil, // Default behavior
PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc,
@ -576,7 +576,7 @@ func New(options *Options) *API {
DB: options.Database,
OAuth2Configs: oauthConfigs,
RedirectToLogin: true,
DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(),
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
Optional: false,
SessionTokenFunc: nil, // Default behavior
PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc,
@ -586,7 +586,7 @@ func New(options *Options) *API {
DB: options.Database,
OAuth2Configs: oauthConfigs,
RedirectToLogin: false,
DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(),
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
Optional: true,
SessionTokenFunc: nil, // Default behavior
PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc,

View File

@ -604,7 +604,7 @@ func (f *FakeIDP) CreateAuthCode(t testing.TB, state string) string {
// something.
// Essentially this is used to fake the Coderd side of the exchange.
// The flow starts at the user hitting the OIDC login page.
func (f *FakeIDP) OIDCCallback(t testing.TB, state string, idTokenClaims jwt.MapClaims) (*http.Response, error) {
func (f *FakeIDP) OIDCCallback(t testing.TB, state string, idTokenClaims jwt.MapClaims) *http.Response {
t.Helper()
if f.serve {
panic("cannot use OIDCCallback with WithServing. This is only for the in memory usage")
@ -625,7 +625,7 @@ func (f *FakeIDP) OIDCCallback(t testing.TB, state string, idTokenClaims jwt.Map
_ = resp.Body.Close()
}
})
return resp, nil
return resp
}
// ProviderJSON is the .well-known/configuration JSON

View File

@ -54,12 +54,12 @@ func TestFakeIDPBasicFlow(t *testing.T) {
token = oauthToken
})
resp, err := fake.OIDCCallback(t, expectedState, jwt.MapClaims{})
require.NoError(t, err)
//nolint:bodyclose
resp := fake.OIDCCallback(t, expectedState, jwt.MapClaims{})
require.Equal(t, http.StatusOK, resp.StatusCode)
// Test the user info
_, err = cfg.Provider.UserInfo(ctx, oauth2.StaticTokenSource(token))
_, err := cfg.Provider.UserInfo(ctx, oauth2.StaticTokenSource(token))
require.NoError(t, err)
// Now test it can refresh

View File

@ -174,6 +174,7 @@ var (
// When org scoped provisioner credentials are implemented,
// this can be reduced to read a specific org.
rbac.ResourceOrganization.Type: {rbac.ActionRead},
rbac.ResourceGroup.Type: {rbac.ActionRead},
}),
Org: map[string][]rbac.Permission{},
User: []rbac.Permission{},
@ -1141,6 +1142,10 @@ func (q *querier) GetGroupMembers(ctx context.Context, id uuid.UUID) ([]database
return q.db.GetGroupMembers(ctx, id)
}
func (q *querier) GetGroupsByOrganizationAndUserID(ctx context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) {
return fetchWithPostFilter(q.auth, q.db.GetGroupsByOrganizationAndUserID)(ctx, arg)
}
func (q *querier) GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]database.Group, error) {
return fetchWithPostFilter(q.auth, q.db.GetGroupsByOrganizationID)(ctx, organizationID)
}
@ -2527,20 +2532,6 @@ func (q *querier) InsertWorkspaceAgentScripts(ctx context.Context, arg database.
return q.db.InsertWorkspaceAgentScripts(ctx, arg)
}
func (q *querier) InsertWorkspaceAgentStat(ctx context.Context, arg database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) {
// TODO: This is a workspace agent operation. Should users be able to query this?
// Not really sure what this is for.
workspace, err := q.db.GetWorkspaceByID(ctx, arg.WorkspaceID)
if err != nil {
return database.WorkspaceAgentStat{}, err
}
err = q.authorizeContext(ctx, rbac.ActionUpdate, workspace)
if err != nil {
return database.WorkspaceAgentStat{}, err
}
return q.db.InsertWorkspaceAgentStat(ctx, arg)
}
func (q *querier) InsertWorkspaceAgentStats(ctx context.Context, arg database.InsertWorkspaceAgentStatsParams) error {
if err := q.authorizeContext(ctx, rbac.ActionCreate, rbac.ResourceSystem); err != nil {
return err

View File

@ -314,6 +314,14 @@ func (s *MethodTestSuite) TestGroup() {
_ = dbgen.GroupMember(s.T(), db, database.GroupMember{})
check.Args(g.ID).Asserts(g, rbac.ActionRead)
}))
s.Run("GetGroupsByOrganizationAndUserID", s.Subtest(func(db database.Store, check *expects) {
g := dbgen.Group(s.T(), db, database.Group{})
gm := dbgen.GroupMember(s.T(), db, database.GroupMember{GroupID: g.ID})
check.Args(database.GetGroupsByOrganizationAndUserIDParams{
OrganizationID: g.OrganizationID,
UserID: gm.UserID,
}).Asserts(g, rbac.ActionRead)
}))
s.Run("InsertAllUsersGroup", s.Subtest(func(db database.Store, check *expects) {
o := dbgen.Organization(s.T(), db, database.Organization{})
check.Args(o.ID).Asserts(rbac.ResourceGroup.InOrg(o.ID), rbac.ActionCreate)
@ -1512,12 +1520,6 @@ func (s *MethodTestSuite) TestWorkspace() {
AutomaticUpdates: database.AutomaticUpdatesAlways,
}).Asserts(w, rbac.ActionUpdate)
}))
s.Run("InsertWorkspaceAgentStat", s.Subtest(func(db database.Store, check *expects) {
ws := dbgen.Workspace(s.T(), db, database.Workspace{})
check.Args(database.InsertWorkspaceAgentStatParams{
WorkspaceID: ws.ID,
}).Asserts(ws, rbac.ActionUpdate)
}))
s.Run("UpdateWorkspaceAppHealthByID", s.Subtest(func(db database.Store, check *expects) {
ws := dbgen.Workspace(s.T(), db, database.Workspace{})
build := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{WorkspaceID: ws.ID, JobID: uuid.New()})

View File

@ -707,27 +707,49 @@ func WorkspaceAgentStat(t testing.TB, db database.Store, orig database.Workspace
if orig.ConnectionsByProto == nil {
orig.ConnectionsByProto = json.RawMessage([]byte("{}"))
}
scheme, err := db.InsertWorkspaceAgentStat(genCtx, database.InsertWorkspaceAgentStatParams{
ID: takeFirst(orig.ID, uuid.New()),
CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()),
UserID: takeFirst(orig.UserID, uuid.New()),
TemplateID: takeFirst(orig.TemplateID, uuid.New()),
WorkspaceID: takeFirst(orig.WorkspaceID, uuid.New()),
AgentID: takeFirst(orig.AgentID, uuid.New()),
ConnectionsByProto: orig.ConnectionsByProto,
ConnectionCount: takeFirst(orig.ConnectionCount, 0),
RxPackets: takeFirst(orig.RxPackets, 0),
RxBytes: takeFirst(orig.RxBytes, 0),
TxPackets: takeFirst(orig.TxPackets, 0),
TxBytes: takeFirst(orig.TxBytes, 0),
SessionCountVSCode: takeFirst(orig.SessionCountVSCode, 0),
SessionCountJetBrains: takeFirst(orig.SessionCountJetBrains, 0),
SessionCountReconnectingPTY: takeFirst(orig.SessionCountReconnectingPTY, 0),
SessionCountSSH: takeFirst(orig.SessionCountSSH, 0),
ConnectionMedianLatencyMS: takeFirst(orig.ConnectionMedianLatencyMS, 0),
})
jsonProto := []byte(fmt.Sprintf("[%s]", orig.ConnectionsByProto))
params := database.InsertWorkspaceAgentStatsParams{
ID: []uuid.UUID{takeFirst(orig.ID, uuid.New())},
CreatedAt: []time.Time{takeFirst(orig.CreatedAt, dbtime.Now())},
UserID: []uuid.UUID{takeFirst(orig.UserID, uuid.New())},
TemplateID: []uuid.UUID{takeFirst(orig.TemplateID, uuid.New())},
WorkspaceID: []uuid.UUID{takeFirst(orig.WorkspaceID, uuid.New())},
AgentID: []uuid.UUID{takeFirst(orig.AgentID, uuid.New())},
ConnectionsByProto: jsonProto,
ConnectionCount: []int64{takeFirst(orig.ConnectionCount, 0)},
RxPackets: []int64{takeFirst(orig.RxPackets, 0)},
RxBytes: []int64{takeFirst(orig.RxBytes, 0)},
TxPackets: []int64{takeFirst(orig.TxPackets, 0)},
TxBytes: []int64{takeFirst(orig.TxBytes, 0)},
SessionCountVSCode: []int64{takeFirst(orig.SessionCountVSCode, 0)},
SessionCountJetBrains: []int64{takeFirst(orig.SessionCountJetBrains, 0)},
SessionCountReconnectingPTY: []int64{takeFirst(orig.SessionCountReconnectingPTY, 0)},
SessionCountSSH: []int64{takeFirst(orig.SessionCountSSH, 0)},
ConnectionMedianLatencyMS: []float64{takeFirst(orig.ConnectionMedianLatencyMS, 0)},
}
err := db.InsertWorkspaceAgentStats(genCtx, params)
require.NoError(t, err, "insert workspace agent stat")
return scheme
return database.WorkspaceAgentStat{
ID: params.ID[0],
CreatedAt: params.CreatedAt[0],
UserID: params.UserID[0],
AgentID: params.AgentID[0],
WorkspaceID: params.WorkspaceID[0],
TemplateID: params.TemplateID[0],
ConnectionsByProto: orig.ConnectionsByProto,
ConnectionCount: params.ConnectionCount[0],
RxPackets: params.RxPackets[0],
RxBytes: params.RxBytes[0],
TxPackets: params.TxPackets[0],
TxBytes: params.TxBytes[0],
ConnectionMedianLatencyMS: params.ConnectionMedianLatencyMS[0],
SessionCountVSCode: params.SessionCountVSCode[0],
SessionCountJetBrains: params.SessionCountJetBrains[0],
SessionCountReconnectingPTY: params.SessionCountReconnectingPTY[0],
SessionCountSSH: params.SessionCountSSH[0],
}
}
func OAuth2ProviderApp(t testing.TB, db database.Store, seed database.OAuth2ProviderApp) database.OAuth2ProviderApp {

View File

@ -404,6 +404,16 @@ func (q *FakeQuerier) convertToWorkspaceRowsNoLock(ctx context.Context, workspac
break
}
}
if pj, err := q.getProvisionerJobByIDNoLock(ctx, build.JobID); err == nil {
wr.LatestBuildStatus = pj.JobStatus
}
wr.LatestBuildTransition = build.Transition
}
if u, err := q.getUserByIDNoLock(w.OwnerID); err == nil {
wr.Username = u.Username
}
rows = append(rows, wr)
@ -2240,6 +2250,30 @@ func (q *FakeQuerier) GetGroupMembers(_ context.Context, id uuid.UUID) ([]databa
return users, nil
}
func (q *FakeQuerier) GetGroupsByOrganizationAndUserID(_ context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) {
err := validateDatabaseType(arg)
if err != nil {
return nil, err
}
q.mutex.RLock()
defer q.mutex.RUnlock()
var groupIds []uuid.UUID
for _, member := range q.groupMembers {
if member.UserID == arg.UserID {
groupIds = append(groupIds, member.GroupID)
}
}
groups := []database.Group{}
for _, group := range q.groups {
if slices.Contains(groupIds, group.ID) && group.OrganizationID == arg.OrganizationID {
groups = append(groups, group)
}
}
return groups, nil
}
func (q *FakeQuerier) GetGroupsByOrganizationID(_ context.Context, id uuid.UUID) ([]database.Group, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
@ -6447,37 +6481,6 @@ func (q *FakeQuerier) InsertWorkspaceAgentScripts(_ context.Context, arg databas
return scripts, nil
}
func (q *FakeQuerier) InsertWorkspaceAgentStat(_ context.Context, p database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) {
if err := validateDatabaseType(p); err != nil {
return database.WorkspaceAgentStat{}, err
}
q.mutex.Lock()
defer q.mutex.Unlock()
stat := database.WorkspaceAgentStat{
ID: p.ID,
CreatedAt: p.CreatedAt,
WorkspaceID: p.WorkspaceID,
AgentID: p.AgentID,
UserID: p.UserID,
ConnectionsByProto: p.ConnectionsByProto,
ConnectionCount: p.ConnectionCount,
RxPackets: p.RxPackets,
RxBytes: p.RxBytes,
TxPackets: p.TxPackets,
TxBytes: p.TxBytes,
TemplateID: p.TemplateID,
SessionCountVSCode: p.SessionCountVSCode,
SessionCountJetBrains: p.SessionCountJetBrains,
SessionCountReconnectingPTY: p.SessionCountReconnectingPTY,
SessionCountSSH: p.SessionCountSSH,
ConnectionMedianLatencyMS: p.ConnectionMedianLatencyMS,
}
q.workspaceAgentStats = append(q.workspaceAgentStats, stat)
return stat, nil
}
func (q *FakeQuerier) InsertWorkspaceAgentStats(_ context.Context, arg database.InsertWorkspaceAgentStatsParams) error {
err := validateDatabaseType(arg)
if err != nil {

View File

@ -559,6 +559,13 @@ func (m metricsStore) GetGroupMembers(ctx context.Context, groupID uuid.UUID) ([
return users, err
}
func (m metricsStore) GetGroupsByOrganizationAndUserID(ctx context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) {
start := time.Now()
r0, r1 := m.s.GetGroupsByOrganizationAndUserID(ctx, arg)
m.queryLatencies.WithLabelValues("GetGroupsByOrganizationAndUserID").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m metricsStore) GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]database.Group, error) {
start := time.Now()
groups, err := m.s.GetGroupsByOrganizationID(ctx, organizationID)
@ -1642,13 +1649,6 @@ func (m metricsStore) InsertWorkspaceAgentScripts(ctx context.Context, arg datab
return r0, r1
}
func (m metricsStore) InsertWorkspaceAgentStat(ctx context.Context, arg database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) {
start := time.Now()
stat, err := m.s.InsertWorkspaceAgentStat(ctx, arg)
m.queryLatencies.WithLabelValues("InsertWorkspaceAgentStat").Observe(time.Since(start).Seconds())
return stat, err
}
func (m metricsStore) InsertWorkspaceAgentStats(ctx context.Context, arg database.InsertWorkspaceAgentStatsParams) error {
start := time.Now()
r0 := m.s.InsertWorkspaceAgentStats(ctx, arg)

View File

@ -1095,6 +1095,21 @@ func (mr *MockStoreMockRecorder) GetGroupMembers(arg0, arg1 any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupMembers", reflect.TypeOf((*MockStore)(nil).GetGroupMembers), arg0, arg1)
}
// GetGroupsByOrganizationAndUserID mocks base method.
func (m *MockStore) GetGroupsByOrganizationAndUserID(arg0 context.Context, arg1 database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetGroupsByOrganizationAndUserID", arg0, arg1)
ret0, _ := ret[0].([]database.Group)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetGroupsByOrganizationAndUserID indicates an expected call of GetGroupsByOrganizationAndUserID.
func (mr *MockStoreMockRecorder) GetGroupsByOrganizationAndUserID(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupsByOrganizationAndUserID", reflect.TypeOf((*MockStore)(nil).GetGroupsByOrganizationAndUserID), arg0, arg1)
}
// GetGroupsByOrganizationID mocks base method.
func (m *MockStore) GetGroupsByOrganizationID(arg0 context.Context, arg1 uuid.UUID) ([]database.Group, error) {
m.ctrl.T.Helper()
@ -3456,21 +3471,6 @@ func (mr *MockStoreMockRecorder) InsertWorkspaceAgentScripts(arg0, arg1 any) *go
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWorkspaceAgentScripts", reflect.TypeOf((*MockStore)(nil).InsertWorkspaceAgentScripts), arg0, arg1)
}
// InsertWorkspaceAgentStat mocks base method.
func (m *MockStore) InsertWorkspaceAgentStat(arg0 context.Context, arg1 database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InsertWorkspaceAgentStat", arg0, arg1)
ret0, _ := ret[0].(database.WorkspaceAgentStat)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// InsertWorkspaceAgentStat indicates an expected call of InsertWorkspaceAgentStat.
func (mr *MockStoreMockRecorder) InsertWorkspaceAgentStat(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWorkspaceAgentStat", reflect.TypeOf((*MockStore)(nil).InsertWorkspaceAgentStat), arg0, arg1)
}
// InsertWorkspaceAgentStats mocks base method.
func (m *MockStore) InsertWorkspaceAgentStats(arg0 context.Context, arg1 database.InsertWorkspaceAgentStatsParams) error {
m.ctrl.T.Helper()

View File

@ -143,8 +143,8 @@ func TestRollupTemplateUsageStats(t *testing.T) {
db, ps := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
anHourAgo := dbtime.Now().Add(-time.Hour).Truncate(time.Hour)
anHourAndSixMonthsAgo := anHourAgo.AddDate(0, -6, 0)
anHourAgo := dbtime.Now().Add(-time.Hour).Truncate(time.Hour).UTC()
anHourAndSixMonthsAgo := anHourAgo.AddDate(0, -6, 0).UTC()
var (
org = dbgen.Organization(t, db, database.Organization{})
@ -242,6 +242,12 @@ func TestRollupTemplateUsageStats(t *testing.T) {
require.NoError(t, err)
require.Len(t, stats, 1)
// I do not know a better way to do this. Our database runs in a *random*
// timezone. So the returned time is in a random timezone and fails on the
// equal even though they are the same time if converted back to the same timezone.
stats[0].EndTime = stats[0].EndTime.UTC()
stats[0].StartTime = stats[0].StartTime.UTC()
require.Equal(t, database.TemplateUsageStat{
TemplateID: tpl.ID,
UserID: user.ID,

View File

@ -1624,6 +1624,10 @@ CREATE INDEX idx_tailnet_clients_coordinator ON tailnet_clients USING btree (coo
CREATE INDEX idx_tailnet_peers_coordinator ON tailnet_peers USING btree (coordinator_id);
CREATE INDEX idx_tailnet_tunnels_dst_id ON tailnet_tunnels USING hash (dst_id);
CREATE INDEX idx_tailnet_tunnels_src_id ON tailnet_tunnels USING hash (src_id);
CREATE UNIQUE INDEX idx_users_email ON users USING btree (email) WHERE (deleted = false);
CREATE UNIQUE INDEX idx_users_username ON users USING btree (username) WHERE (deleted = false);
@ -1644,6 +1648,8 @@ COMMENT ON INDEX template_usage_stats_start_time_template_id_user_id_idx IS 'Ind
CREATE UNIQUE INDEX templates_organization_id_name_idx ON templates USING btree (organization_id, lower((name)::text)) WHERE (deleted = false);
CREATE UNIQUE INDEX user_links_linked_id_login_type_idx ON user_links USING btree (linked_id, login_type) WHERE (linked_id <> ''::text);
CREATE UNIQUE INDEX users_email_lower_idx ON users USING btree (lower(email)) WHERE (deleted = false);
CREATE UNIQUE INDEX users_username_lower_idx ON users USING btree (lower(username)) WHERE (deleted = false);

View File

@ -0,0 +1 @@
DROP INDEX user_links_linked_id_login_type_idx;

View File

@ -0,0 +1,21 @@
-- Remove the linked_id if two user_links share the same value.
-- This will affect the user if they attempt to change their settings on
-- the oauth/oidc provider. However, if two users exist with the same
-- linked_value, there is no way to determine correctly which user should
-- be updated. Since the linked_id is empty, this value will be linked
-- by email.
UPDATE ONLY user_links AS out
SET
linked_id =
CASE WHEN (
-- When the count of linked_id is greater than 1, set the linked_id to empty
SELECT
COUNT(*)
FROM
user_links inn
WHERE
out.linked_id = inn.linked_id AND out.login_type = inn.login_type
) > 1 THEN '' ELSE out.linked_id END;
-- Enforce unique linked_id constraint on non-empty linked_id
CREATE UNIQUE INDEX user_links_linked_id_login_type_idx ON user_links USING btree (linked_id, login_type) WHERE (linked_id != '');

View File

@ -0,0 +1,2 @@
DROP INDEX idx_tailnet_tunnels_src_id;
DROP INDEX idx_tailnet_tunnels_dst_id;

View File

@ -0,0 +1,3 @@
-- Since src_id and dst_id are UUIDs, we only ever compare them with equality, so hash is better
CREATE INDEX idx_tailnet_tunnels_src_id ON tailnet_tunnels USING hash (src_id);
CREATE INDEX idx_tailnet_tunnels_dst_id ON tailnet_tunnels USING hash (dst_id);

View File

@ -17,3 +17,18 @@ INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token
-- This has happened on a production database.
INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token)
VALUES('fc1511ef-4fcf-4a3b-98a1-8df64160e35a', 'oidc', 'foo', '');
-- Lastly, make 2 other users who have the same user link.
INSERT INTO public.users(id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, deleted)
VALUES ('580ed397-727d-4aaf-950a-51f89f556c24', 'dup_link_a@coder.com', 'dupe_a', '\x', '2022-11-02 13:05:21.445455+02', '2022-11-02 13:05:21.445455+02', 'active', '{}', false) ON CONFLICT DO NOTHING;
INSERT INTO public.organization_members VALUES ('580ed397-727d-4aaf-950a-51f89f556c24', 'bb640d07-ca8a-4869-b6bc-ae61ebb2fda1', '2022-11-02 13:05:21.447595+02', '2022-11-02 13:05:21.447595+02', '{}') ON CONFLICT DO NOTHING;
INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token)
VALUES('580ed397-727d-4aaf-950a-51f89f556c24', 'github', '500', '');
INSERT INTO public.users(id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, deleted)
VALUES ('c813366b-2fde-45ae-920c-101c3ad6a1e1', 'dup_link_b@coder.com', 'dupe_b', '\x', '2022-11-02 13:05:21.445455+02', '2022-11-02 13:05:21.445455+02', 'active', '{}', false) ON CONFLICT DO NOTHING;
INSERT INTO public.organization_members VALUES ('c813366b-2fde-45ae-920c-101c3ad6a1e1', 'bb640d07-ca8a-4869-b6bc-ae61ebb2fda1', '2022-11-02 13:05:21.447595+02', '2022-11-02 13:05:21.447595+02', '{}') ON CONFLICT DO NOTHING;
INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token)
VALUES('c813366b-2fde-45ae-920c-101c3ad6a1e1', 'github', '500', '');

View File

@ -266,6 +266,7 @@ func (q *sqlQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg GetWorkspa
&i.LatestBuildCanceledAt,
&i.LatestBuildError,
&i.LatestBuildTransition,
&i.LatestBuildStatus,
&i.Count,
); err != nil {
return nil, err

View File

@ -123,6 +123,7 @@ type sqlcQuerier interface {
// If the group is a user made group, then we need to check the group_members table.
// If it is the "Everyone" group, then we need to check the organization_members table.
GetGroupMembers(ctx context.Context, groupID uuid.UUID) ([]User, error)
GetGroupsByOrganizationAndUserID(ctx context.Context, arg GetGroupsByOrganizationAndUserIDParams) ([]Group, error)
GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]Group, error)
GetHealthSettings(ctx context.Context) (string, error)
GetHungProvisionerJobs(ctx context.Context, updatedAt time.Time) ([]ProvisionerJob, error)
@ -335,7 +336,6 @@ type sqlcQuerier interface {
InsertWorkspaceAgentLogs(ctx context.Context, arg InsertWorkspaceAgentLogsParams) ([]WorkspaceAgentLog, error)
InsertWorkspaceAgentMetadata(ctx context.Context, arg InsertWorkspaceAgentMetadataParams) error
InsertWorkspaceAgentScripts(ctx context.Context, arg InsertWorkspaceAgentScriptsParams) ([]WorkspaceAgentScript, error)
InsertWorkspaceAgentStat(ctx context.Context, arg InsertWorkspaceAgentStatParams) (WorkspaceAgentStat, error)
InsertWorkspaceAgentStats(ctx context.Context, arg InsertWorkspaceAgentStatsParams) error
InsertWorkspaceApp(ctx context.Context, arg InsertWorkspaceAppParams) (WorkspaceApp, error)
InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error

View File

@ -1484,6 +1484,67 @@ func (q *sqlQuerier) GetGroupByOrgAndName(ctx context.Context, arg GetGroupByOrg
return i, err
}
const getGroupsByOrganizationAndUserID = `-- name: GetGroupsByOrganizationAndUserID :many
SELECT
groups.id, groups.name, groups.organization_id, groups.avatar_url, groups.quota_allowance, groups.display_name, groups.source
FROM
groups
-- If the group is a user made group, then we need to check the group_members table.
LEFT JOIN
group_members
ON
group_members.group_id = groups.id AND
group_members.user_id = $1
-- If it is the "Everyone" group, then we need to check the organization_members table.
LEFT JOIN
organization_members
ON
organization_members.organization_id = groups.id AND
organization_members.user_id = $1
WHERE
-- In either case, the group_id will only match an org or a group.
(group_members.user_id = $1 OR organization_members.user_id = $1)
AND
-- Ensure the group or organization is the specified organization.
groups.organization_id = $2
`
type GetGroupsByOrganizationAndUserIDParams struct {
UserID uuid.UUID `db:"user_id" json:"user_id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
}
func (q *sqlQuerier) GetGroupsByOrganizationAndUserID(ctx context.Context, arg GetGroupsByOrganizationAndUserIDParams) ([]Group, error) {
rows, err := q.db.QueryContext(ctx, getGroupsByOrganizationAndUserID, arg.UserID, arg.OrganizationID)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Group
for rows.Next() {
var i Group
if err := rows.Scan(
&i.ID,
&i.Name,
&i.OrganizationID,
&i.AvatarURL,
&i.QuotaAllowance,
&i.DisplayName,
&i.Source,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getGroupsByOrganizationID = `-- name: GetGroupsByOrganizationID :many
SELECT
id, name, organization_id, avatar_url, quota_allowance, display_name, source
@ -10386,94 +10447,6 @@ func (q *sqlQuerier) GetWorkspaceAgentStatsAndLabels(ctx context.Context, create
return items, nil
}
const insertWorkspaceAgentStat = `-- name: InsertWorkspaceAgentStat :one
INSERT INTO
workspace_agent_stats (
id,
created_at,
user_id,
workspace_id,
template_id,
agent_id,
connections_by_proto,
connection_count,
rx_packets,
rx_bytes,
tx_packets,
tx_bytes,
session_count_vscode,
session_count_jetbrains,
session_count_reconnecting_pty,
session_count_ssh,
connection_median_latency_ms
)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17) RETURNING id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh
`
type InsertWorkspaceAgentStatParams struct {
ID uuid.UUID `db:"id" json:"id"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
ConnectionsByProto json.RawMessage `db:"connections_by_proto" json:"connections_by_proto"`
ConnectionCount int64 `db:"connection_count" json:"connection_count"`
RxPackets int64 `db:"rx_packets" json:"rx_packets"`
RxBytes int64 `db:"rx_bytes" json:"rx_bytes"`
TxPackets int64 `db:"tx_packets" json:"tx_packets"`
TxBytes int64 `db:"tx_bytes" json:"tx_bytes"`
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
ConnectionMedianLatencyMS float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
}
func (q *sqlQuerier) InsertWorkspaceAgentStat(ctx context.Context, arg InsertWorkspaceAgentStatParams) (WorkspaceAgentStat, error) {
row := q.db.QueryRowContext(ctx, insertWorkspaceAgentStat,
arg.ID,
arg.CreatedAt,
arg.UserID,
arg.WorkspaceID,
arg.TemplateID,
arg.AgentID,
arg.ConnectionsByProto,
arg.ConnectionCount,
arg.RxPackets,
arg.RxBytes,
arg.TxPackets,
arg.TxBytes,
arg.SessionCountVSCode,
arg.SessionCountJetBrains,
arg.SessionCountReconnectingPTY,
arg.SessionCountSSH,
arg.ConnectionMedianLatencyMS,
)
var i WorkspaceAgentStat
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UserID,
&i.AgentID,
&i.WorkspaceID,
&i.TemplateID,
&i.ConnectionsByProto,
&i.ConnectionCount,
&i.RxPackets,
&i.RxBytes,
&i.TxPackets,
&i.TxBytes,
&i.ConnectionMedianLatencyMS,
&i.SessionCountVSCode,
&i.SessionCountJetBrains,
&i.SessionCountReconnectingPTY,
&i.SessionCountSSH,
)
return i, err
}
const insertWorkspaceAgentStats = `-- name: InsertWorkspaceAgentStats :exec
INSERT INTO
workspace_agent_stats (
@ -12280,7 +12253,8 @@ SELECT
latest_build.completed_at as latest_build_completed_at,
latest_build.canceled_at as latest_build_canceled_at,
latest_build.error as latest_build_error,
latest_build.transition as latest_build_transition
latest_build.transition as latest_build_transition,
latest_build.job_status as latest_build_status
FROM
workspaces
JOIN
@ -12302,7 +12276,7 @@ LEFT JOIN LATERAL (
provisioner_jobs.job_status
FROM
workspace_builds
LEFT JOIN
JOIN
provisioner_jobs
ON
provisioner_jobs.id = workspace_builds.job_id
@ -12507,7 +12481,7 @@ WHERE
-- @authorize_filter
), filtered_workspaces_order AS (
SELECT
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.template_name, fw.template_version_id, fw.template_version_name, fw.username, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.template_name, fw.template_version_id, fw.template_version_name, fw.username, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status
FROM
filtered_workspaces fw
ORDER BY
@ -12528,7 +12502,7 @@ WHERE
$19
), filtered_workspaces_order_with_summary AS (
SELECT
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.template_name, fwo.template_version_id, fwo.template_version_name, fwo.username, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.template_name, fwo.template_version_id, fwo.template_version_name, fwo.username, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status
FROM
filtered_workspaces_order fwo
-- Return a technical summary row with total count of workspaces.
@ -12558,7 +12532,8 @@ WHERE
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_completed_at,
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_canceled_at,
'', -- latest_build_error
'start'::workspace_transition -- latest_build_transition
'start'::workspace_transition, -- latest_build_transition
'unknown'::provisioner_job_status -- latest_build_status
WHERE
$21 :: boolean = true
), total_count AS (
@ -12568,7 +12543,7 @@ WHERE
filtered_workspaces
)
SELECT
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.template_name, fwos.template_version_id, fwos.template_version_name, fwos.username, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition,
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.template_name, fwos.template_version_id, fwos.template_version_name, fwos.username, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status,
tc.count
FROM
filtered_workspaces_order_with_summary fwos
@ -12601,30 +12576,31 @@ type GetWorkspacesParams struct {
}
type GetWorkspacesRow struct {
ID uuid.UUID `db:"id" json:"id"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
Deleted bool `db:"deleted" json:"deleted"`
Name string `db:"name" json:"name"`
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"`
DeletingAt sql.NullTime `db:"deleting_at" json:"deleting_at"`
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
Favorite bool `db:"favorite" json:"favorite"`
TemplateName string `db:"template_name" json:"template_name"`
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"`
Username string `db:"username" json:"username"`
LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"`
LatestBuildCanceledAt sql.NullTime `db:"latest_build_canceled_at" json:"latest_build_canceled_at"`
LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"`
LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"`
Count int64 `db:"count" json:"count"`
ID uuid.UUID `db:"id" json:"id"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
Deleted bool `db:"deleted" json:"deleted"`
Name string `db:"name" json:"name"`
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"`
DeletingAt sql.NullTime `db:"deleting_at" json:"deleting_at"`
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
Favorite bool `db:"favorite" json:"favorite"`
TemplateName string `db:"template_name" json:"template_name"`
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"`
Username string `db:"username" json:"username"`
LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"`
LatestBuildCanceledAt sql.NullTime `db:"latest_build_canceled_at" json:"latest_build_canceled_at"`
LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"`
LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"`
LatestBuildStatus ProvisionerJobStatus `db:"latest_build_status" json:"latest_build_status"`
Count int64 `db:"count" json:"count"`
}
// build_params is used to filter by build parameters if present.
@ -12685,6 +12661,7 @@ func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams)
&i.LatestBuildCanceledAt,
&i.LatestBuildError,
&i.LatestBuildTransition,
&i.LatestBuildStatus,
&i.Count,
); err != nil {
return nil, err

View File

@ -28,6 +28,31 @@ FROM
WHERE
organization_id = $1;
-- name: GetGroupsByOrganizationAndUserID :many
SELECT
groups.*
FROM
groups
-- If the group is a user made group, then we need to check the group_members table.
LEFT JOIN
group_members
ON
group_members.group_id = groups.id AND
group_members.user_id = @user_id
-- If it is the "Everyone" group, then we need to check the organization_members table.
LEFT JOIN
organization_members
ON
organization_members.organization_id = groups.id AND
organization_members.user_id = @user_id
WHERE
-- In either case, the group_id will only match an org or a group.
(group_members.user_id = @user_id OR organization_members.user_id = @user_id)
AND
-- Ensure the group or organization is the specified organization.
groups.organization_id = @organization_id;
-- name: InsertGroup :one
INSERT INTO groups (
id,

View File

@ -1,27 +1,3 @@
-- name: InsertWorkspaceAgentStat :one
INSERT INTO
workspace_agent_stats (
id,
created_at,
user_id,
workspace_id,
template_id,
agent_id,
connections_by_proto,
connection_count,
rx_packets,
rx_bytes,
tx_packets,
tx_bytes,
session_count_vscode,
session_count_jetbrains,
session_count_reconnecting_pty,
session_count_ssh,
connection_median_latency_ms
)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17) RETURNING *;
-- name: InsertWorkspaceAgentStats :exec
INSERT INTO
workspace_agent_stats (

View File

@ -96,7 +96,8 @@ SELECT
latest_build.completed_at as latest_build_completed_at,
latest_build.canceled_at as latest_build_canceled_at,
latest_build.error as latest_build_error,
latest_build.transition as latest_build_transition
latest_build.transition as latest_build_transition,
latest_build.job_status as latest_build_status
FROM
workspaces
JOIN
@ -118,7 +119,7 @@ LEFT JOIN LATERAL (
provisioner_jobs.job_status
FROM
workspace_builds
LEFT JOIN
JOIN
provisioner_jobs
ON
provisioner_jobs.id = workspace_builds.job_id
@ -374,7 +375,8 @@ WHERE
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_completed_at,
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_canceled_at,
'', -- latest_build_error
'start'::workspace_transition -- latest_build_transition
'start'::workspace_transition, -- latest_build_transition
'unknown'::provisioner_job_status -- latest_build_status
WHERE
@with_summary :: boolean = true
), total_count AS (

View File

@ -82,6 +82,7 @@ const (
UniqueOrganizationsSingleDefaultOrg UniqueConstraint = "organizations_single_default_org" // CREATE UNIQUE INDEX organizations_single_default_org ON organizations USING btree (is_default) WHERE (is_default = true);
UniqueTemplateUsageStatsStartTimeTemplateIDUserIDIndex UniqueConstraint = "template_usage_stats_start_time_template_id_user_id_idx" // CREATE UNIQUE INDEX template_usage_stats_start_time_template_id_user_id_idx ON template_usage_stats USING btree (start_time, template_id, user_id);
UniqueTemplatesOrganizationIDNameIndex UniqueConstraint = "templates_organization_id_name_idx" // CREATE UNIQUE INDEX templates_organization_id_name_idx ON templates USING btree (organization_id, lower((name)::text)) WHERE (deleted = false);
UniqueUserLinksLinkedIDLoginTypeIndex UniqueConstraint = "user_links_linked_id_login_type_idx" // CREATE UNIQUE INDEX user_links_linked_id_login_type_idx ON user_links USING btree (linked_id, login_type) WHERE (linked_id <> ''::text);
UniqueUsersEmailLowerIndex UniqueConstraint = "users_email_lower_idx" // CREATE UNIQUE INDEX users_email_lower_idx ON users USING btree (lower(email)) WHERE (deleted = false);
UniqueUsersUsernameLowerIndex UniqueConstraint = "users_username_lower_idx" // CREATE UNIQUE INDEX users_username_lower_idx ON users USING btree (lower(username)) WHERE (deleted = false);
UniqueWorkspaceProxiesLowerNameIndex UniqueConstraint = "workspace_proxies_lower_name_idx" // CREATE UNIQUE INDEX workspace_proxies_lower_name_idx ON workspace_proxies USING btree (lower(name)) WHERE (deleted = false);

View File

@ -3,9 +3,11 @@ package externalauth_test
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
@ -13,6 +15,7 @@ import (
"github.com/golang-jwt/jwt/v4"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/oauth2"
"golang.org/x/xerrors"
@ -417,6 +420,78 @@ func TestConvertYAML(t *testing.T) {
})
}
// TestConstantQueryParams verifies a constant query parameter can be set in the
// "authenticate" url for external auth applications, and it will be carried forward
// to actual auth requests.
// This unit test was specifically created for Auth0 which can set an
// audience query parameter in it's /authorize endpoint.
func TestConstantQueryParams(t *testing.T) {
t.Parallel()
const constantQueryParamKey = "audience"
const constantQueryParamValue = "foobar"
constantQueryParam := fmt.Sprintf("%s=%s", constantQueryParamKey, constantQueryParamValue)
fake, config, _ := setupOauth2Test(t, testConfig{
FakeIDPOpts: []oidctest.FakeIDPOpt{
oidctest.WithMiddlewares(func(next http.Handler) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
if strings.Contains(request.URL.Path, "authorize") {
// Assert has the audience query param
assert.Equal(t, request.URL.Query().Get(constantQueryParamKey), constantQueryParamValue)
}
next.ServeHTTP(writer, request)
})
}),
},
CoderOIDCConfigOpts: []func(cfg *coderd.OIDCConfig){
func(cfg *coderd.OIDCConfig) {
// Include a constant query parameter.
authURL, err := url.Parse(cfg.OAuth2Config.(*oauth2.Config).Endpoint.AuthURL)
require.NoError(t, err)
authURL.RawQuery = url.Values{constantQueryParamKey: []string{constantQueryParamValue}}.Encode()
cfg.OAuth2Config.(*oauth2.Config).Endpoint.AuthURL = authURL.String()
require.Contains(t, cfg.OAuth2Config.(*oauth2.Config).Endpoint.AuthURL, constantQueryParam)
},
},
})
callbackCalled := false
fake.SetCoderdCallbackHandler(func(writer http.ResponseWriter, request *http.Request) {
// Just record the callback was hit, and the auth succeeded.
callbackCalled = true
})
// Verify the AuthURL endpoint contains the constant query parameter and is a valid URL.
// It should look something like:
// http://127.0.0.1:<port>>/oauth2/authorize?
// audience=foobar&
// client_id=d<uuid>&
// redirect_uri=<redirect>&
// response_type=code&
// scope=openid+email+profile&
// state=state
const state = "state"
rawAuthURL := config.AuthCodeURL(state)
// Parsing the url is not perfect. It allows imperfections like the query
// params having 2 question marks '?a=foo?b=bar'.
// So use it to validate, then verify the raw url is as expected.
authURL, err := url.Parse(rawAuthURL)
require.NoError(t, err)
require.Equal(t, authURL.Query().Get(constantQueryParamKey), constantQueryParamValue)
// We are not using a real server, so it fakes https://coder.com
require.Equal(t, authURL.Scheme, "https")
// Validate the raw URL.
// Double check only 1 '?' exists. Url parsing allows multiple '?' in the query string.
require.Equal(t, strings.Count(rawAuthURL, "?"), 1)
// Actually run an auth request. Although it says OIDC, the flow is the same
// for oauth2.
//nolint:bodyclose
resp := fake.OIDCCallback(t, state, jwt.MapClaims{})
require.True(t, callbackCalled)
require.Equal(t, http.StatusOK, resp.StatusCode)
}
type testConfig struct {
FakeIDPOpts []oidctest.FakeIDPOpt
CoderOIDCConfigOpts []func(cfg *coderd.OIDCConfig)
@ -433,6 +508,10 @@ type testConfig struct {
func setupOauth2Test(t *testing.T, settings testConfig) (*oidctest.FakeIDP, *externalauth.Config, database.ExternalAuthLink) {
t.Helper()
if settings.ExternalAuthOpt == nil {
settings.ExternalAuthOpt = func(_ *externalauth.Config) {}
}
const providerID = "test-idp"
fake := oidctest.NewFakeIDP(t,
append([]oidctest.FakeIDPOpt{}, settings.FakeIDPOpts...)...,

View File

@ -7,7 +7,6 @@ import (
"fmt"
"net/http"
"net/url"
"time"
"github.com/google/uuid"
"golang.org/x/oauth2"
@ -75,7 +74,11 @@ func extractTokenParams(r *http.Request, callbackURL *url.URL) (tokenParams, []c
return params, nil, nil
}
func Tokens(db database.Store, defaultLifetime time.Duration) http.HandlerFunc {
// Tokens
// TODO: the sessions lifetime config passed is for coder api tokens.
// Should there be a separate config for oauth2 tokens? They are related,
// but they are not the same.
func Tokens(db database.Store, lifetimes codersdk.SessionLifetime) http.HandlerFunc {
return func(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
app := httpmw.OAuth2ProviderApp(r)
@ -104,9 +107,9 @@ func Tokens(db database.Store, defaultLifetime time.Duration) http.HandlerFunc {
switch params.grantType {
// TODO: Client creds, device code.
case codersdk.OAuth2ProviderGrantTypeRefreshToken:
token, err = refreshTokenGrant(ctx, db, app, defaultLifetime, params)
token, err = refreshTokenGrant(ctx, db, app, lifetimes, params)
case codersdk.OAuth2ProviderGrantTypeAuthorizationCode:
token, err = authorizationCodeGrant(ctx, db, app, defaultLifetime, params)
token, err = authorizationCodeGrant(ctx, db, app, lifetimes, params)
default:
// Grant types are validated by the parser, so getting through here means
// the developer added a type but forgot to add a case here.
@ -137,7 +140,7 @@ func Tokens(db database.Store, defaultLifetime time.Duration) http.HandlerFunc {
}
}
func authorizationCodeGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, defaultLifetime time.Duration, params tokenParams) (oauth2.Token, error) {
func authorizationCodeGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, lifetimes codersdk.SessionLifetime, params tokenParams) (oauth2.Token, error) {
// Validate the client secret.
secret, err := parseSecret(params.clientSecret)
if err != nil {
@ -195,11 +198,9 @@ func authorizationCodeGrant(ctx context.Context, db database.Store, app database
// TODO: We are ignoring scopes for now.
tokenName := fmt.Sprintf("%s_%s_oauth_session_token", dbCode.UserID, app.ID)
key, sessionToken, err := apikey.Generate(apikey.CreateParams{
UserID: dbCode.UserID,
LoginType: database.LoginTypeOAuth2ProviderApp,
// TODO: This is just the lifetime for api keys, maybe have its own config
// settings. #11693
DefaultLifetime: defaultLifetime,
UserID: dbCode.UserID,
LoginType: database.LoginTypeOAuth2ProviderApp,
DefaultLifetime: lifetimes.DefaultDuration.Value(),
// For now, we allow only one token per app and user at a time.
TokenName: tokenName,
})
@ -271,7 +272,7 @@ func authorizationCodeGrant(ctx context.Context, db database.Store, app database
}, nil
}
func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, defaultLifetime time.Duration, params tokenParams) (oauth2.Token, error) {
func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, lifetimes codersdk.SessionLifetime, params tokenParams) (oauth2.Token, error) {
// Validate the token.
token, err := parseSecret(params.refreshToken)
if err != nil {
@ -326,11 +327,9 @@ func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAut
// TODO: We are ignoring scopes for now.
tokenName := fmt.Sprintf("%s_%s_oauth_session_token", prevKey.UserID, app.ID)
key, sessionToken, err := apikey.Generate(apikey.CreateParams{
UserID: prevKey.UserID,
LoginType: database.LoginTypeOAuth2ProviderApp,
// TODO: This is just the lifetime for api keys, maybe have its own config
// settings. #11693
DefaultLifetime: defaultLifetime,
UserID: prevKey.UserID,
LoginType: database.LoginTypeOAuth2ProviderApp,
DefaultLifetime: lifetimes.DefaultDuration.Value(),
// For now, we allow only one token per app and user at a time.
TokenName: tokenName,
})

View File

@ -3,6 +3,7 @@ package metricscache_test
import (
"context"
"database/sql"
"encoding/json"
"testing"
"time"
@ -280,14 +281,25 @@ func TestCache_DeploymentStats(t *testing.T) {
})
defer cache.Close()
_, err := db.InsertWorkspaceAgentStat(context.Background(), database.InsertWorkspaceAgentStatParams{
ID: uuid.New(),
AgentID: uuid.New(),
CreatedAt: dbtime.Now(),
ConnectionCount: 1,
RxBytes: 1,
TxBytes: 1,
SessionCountVSCode: 1,
err := db.InsertWorkspaceAgentStats(context.Background(), database.InsertWorkspaceAgentStatsParams{
ID: []uuid.UUID{uuid.New()},
CreatedAt: []time.Time{dbtime.Now()},
WorkspaceID: []uuid.UUID{uuid.New()},
UserID: []uuid.UUID{uuid.New()},
TemplateID: []uuid.UUID{uuid.New()},
AgentID: []uuid.UUID{uuid.New()},
ConnectionsByProto: json.RawMessage(`[{}]`),
RxPackets: []int64{0},
RxBytes: []int64{1},
TxPackets: []int64{0},
TxBytes: []int64{1},
ConnectionCount: []int64{1},
SessionCountVSCode: []int64{1},
SessionCountJetBrains: []int64{0},
SessionCountReconnectingPTY: []int64{0},
SessionCountSSH: []int64{0},
ConnectionMedianLatencyMS: []float64{10},
})
require.NoError(t, err)

View File

@ -354,7 +354,7 @@ func (api *API) getOAuth2ProviderAppAuthorize() http.HandlerFunc {
// @Success 200 {object} oauth2.Token
// @Router /oauth2/tokens [post]
func (api *API) postOAuth2ProviderAppToken() http.HandlerFunc {
return identityprovider.Tokens(api.Database, api.DeploymentValues.SessionDuration.Value())
return identityprovider.Tokens(api.Database, api.DeploymentValues.Sessions)
}
// @Summary Delete OAuth2 application tokens.

View File

@ -24,10 +24,12 @@ import (
"github.com/coder/coder/v2/tailnet"
)
const defaultRefreshRate = time.Minute
// ActiveUsers tracks the number of users that have authenticated within the past hour.
func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
if duration == 0 {
duration = 5 * time.Minute
duration = defaultRefreshRate
}
gauge := prometheus.NewGauge(prometheus.GaugeOpts{
@ -72,36 +74,42 @@ func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db datab
}
// Workspaces tracks the total number of workspaces with labels on status.
func Workspaces(ctx context.Context, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
func Workspaces(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
if duration == 0 {
duration = 5 * time.Minute
duration = defaultRefreshRate
}
gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{
workspaceLatestBuildTotals := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "coderd",
Subsystem: "api",
Name: "workspace_latest_build_total",
Help: "The latest workspace builds with a status.",
Help: "The current number of workspace builds by status.",
}, []string{"status"})
err := registerer.Register(gauge)
if err != nil {
if err := registerer.Register(workspaceLatestBuildTotals); err != nil {
return nil, err
}
workspaceLatestBuildStatuses := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "coderd",
Name: "workspace_latest_build_status",
Help: "The current workspace statuses by template, transition, and owner.",
}, []string{"status", "template_name", "template_version", "workspace_owner", "workspace_transition"})
if err := registerer.Register(workspaceLatestBuildStatuses); err != nil {
return nil, err
}
// This exists so the prometheus metric exports immediately when set.
// It helps with tests so they don't have to wait for a tick.
gauge.WithLabelValues("pending").Set(0)
ctx, cancelFunc := context.WithCancel(ctx)
done := make(chan struct{})
// Use time.Nanosecond to force an initial tick. It will be reset to the
// correct duration after executing once.
ticker := time.NewTicker(time.Nanosecond)
doTick := func() {
defer ticker.Reset(duration)
updateWorkspaceTotals := func() {
builds, err := db.GetLatestWorkspaceBuilds(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
// clear all series if there are no database entries
workspaceLatestBuildTotals.Reset()
}
logger.Warn(ctx, "failed to load latest workspace builds", slog.Error(err))
return
}
jobIDs := make([]uuid.UUID, 0, len(builds))
@ -110,16 +118,53 @@ func Workspaces(ctx context.Context, registerer prometheus.Registerer, db databa
}
jobs, err := db.GetProvisionerJobsByIDs(ctx, jobIDs)
if err != nil {
ids := make([]string, 0, len(jobIDs))
for _, id := range jobIDs {
ids = append(ids, id.String())
}
logger.Warn(ctx, "failed to load provisioner jobs", slog.F("ids", ids), slog.Error(err))
return
}
gauge.Reset()
workspaceLatestBuildTotals.Reset()
for _, job := range jobs {
status := codersdk.ProvisionerJobStatus(job.JobStatus)
gauge.WithLabelValues(string(status)).Add(1)
workspaceLatestBuildTotals.WithLabelValues(string(status)).Add(1)
}
}
updateWorkspaceStatuses := func() {
ws, err := db.GetWorkspaces(ctx, database.GetWorkspacesParams{
Deleted: false,
WithSummary: false,
})
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
// clear all series if there are no database entries
workspaceLatestBuildStatuses.Reset()
}
logger.Warn(ctx, "failed to load active workspaces", slog.Error(err))
return
}
workspaceLatestBuildStatuses.Reset()
for _, w := range ws {
workspaceLatestBuildStatuses.WithLabelValues(string(w.LatestBuildStatus), w.TemplateName, w.TemplateVersionName.String, w.Username, string(w.LatestBuildTransition)).Add(1)
}
}
// Use time.Nanosecond to force an initial tick. It will be reset to the
// correct duration after executing once.
ticker := time.NewTicker(time.Nanosecond)
doTick := func() {
defer ticker.Reset(duration)
updateWorkspaceTotals()
updateWorkspaceStatuses()
}
go func() {
defer close(done)
defer ticker.Stop()
@ -141,7 +186,7 @@ func Workspaces(ctx context.Context, registerer prometheus.Registerer, db databa
// Agents tracks the total number of workspaces with labels on status.
func Agents(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, coordinator *atomic.Pointer[tailnet.Coordinator], derpMapFn func() *tailcfg.DERPMap, agentInactiveDisconnectTimeout, duration time.Duration) (func(), error) {
if duration == 0 {
duration = 1 * time.Minute
duration = defaultRefreshRate
}
agentsGauge := NewCachedGaugeVec(prometheus.NewGaugeVec(prometheus.GaugeOpts{
@ -330,7 +375,7 @@ func Agents(ctx context.Context, logger slog.Logger, registerer prometheus.Regis
func AgentStats(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, initialCreateAfter time.Time, duration time.Duration, aggregateByLabels []string) (func(), error) {
if duration == 0 {
duration = 1 * time.Minute
duration = defaultRefreshRate
}
if len(aggregateByLabels) == 0 {

View File

@ -31,6 +31,7 @@ import (
"github.com/coder/coder/v2/coderd/prometheusmetrics"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/cryptorand"
"github.com/coder/coder/v2/provisioner/echo"
"github.com/coder/coder/v2/provisionersdk/proto"
"github.com/coder/coder/v2/tailnet"
@ -110,89 +111,9 @@ func TestActiveUsers(t *testing.T) {
}
}
func TestWorkspaces(t *testing.T) {
func TestWorkspaceLatestBuildTotals(t *testing.T) {
t.Parallel()
insertRunning := func(db database.Store) database.ProvisionerJob {
job, err := db.InsertProvisionerJob(context.Background(), database.InsertProvisionerJobParams{
ID: uuid.New(),
CreatedAt: dbtime.Now(),
UpdatedAt: dbtime.Now(),
Provisioner: database.ProvisionerTypeEcho,
StorageMethod: database.ProvisionerStorageMethodFile,
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
require.NoError(t, err)
err = db.InsertWorkspaceBuild(context.Background(), database.InsertWorkspaceBuildParams{
ID: uuid.New(),
WorkspaceID: uuid.New(),
JobID: job.ID,
BuildNumber: 1,
Transition: database.WorkspaceTransitionStart,
Reason: database.BuildReasonInitiator,
})
require.NoError(t, err)
// This marks the job as started.
_, err = db.AcquireProvisionerJob(context.Background(), database.AcquireProvisionerJobParams{
OrganizationID: job.OrganizationID,
StartedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
Types: []database.ProvisionerType{database.ProvisionerTypeEcho},
})
require.NoError(t, err)
return job
}
insertCanceled := func(db database.Store) {
job := insertRunning(db)
err := db.UpdateProvisionerJobWithCancelByID(context.Background(), database.UpdateProvisionerJobWithCancelByIDParams{
ID: job.ID,
CanceledAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
})
require.NoError(t, err)
err = db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{
ID: job.ID,
CompletedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
})
require.NoError(t, err)
}
insertFailed := func(db database.Store) {
job := insertRunning(db)
err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{
ID: job.ID,
CompletedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
Error: sql.NullString{
String: "failed",
Valid: true,
},
})
require.NoError(t, err)
}
insertSuccess := func(db database.Store) {
job := insertRunning(db)
err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{
ID: job.ID,
CompletedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
})
require.NoError(t, err)
}
for _, tc := range []struct {
Name string
Database func() database.Store
@ -208,13 +129,13 @@ func TestWorkspaces(t *testing.T) {
Name: "Multiple",
Database: func() database.Store {
db := dbmem.New()
insertCanceled(db)
insertFailed(db)
insertFailed(db)
insertSuccess(db)
insertSuccess(db)
insertSuccess(db)
insertRunning(db)
insertCanceled(t, db)
insertFailed(t, db)
insertFailed(t, db)
insertSuccess(t, db)
insertSuccess(t, db)
insertSuccess(t, db)
insertRunning(t, db)
return db
},
Total: 7,
@ -229,29 +150,32 @@ func TestWorkspaces(t *testing.T) {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
registry := prometheus.NewRegistry()
closeFunc, err := prometheusmetrics.Workspaces(context.Background(), registry, tc.Database(), time.Millisecond)
closeFunc, err := prometheusmetrics.Workspaces(context.Background(), slogtest.Make(t, nil).Leveled(slog.LevelWarn), registry, tc.Database(), testutil.IntervalFast)
require.NoError(t, err)
t.Cleanup(closeFunc)
require.Eventually(t, func() bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
if len(metrics) < 1 {
return false
}
sum := 0
for _, metric := range metrics[0].Metric {
count, ok := tc.Status[codersdk.ProvisionerJobStatus(metric.Label[0].GetValue())]
if metric.Gauge.GetValue() == 0 {
for _, m := range metrics {
if m.GetName() != "coderd_api_workspace_latest_build_total" {
continue
}
if !ok {
t.Fail()
for _, metric := range m.Metric {
count, ok := tc.Status[codersdk.ProvisionerJobStatus(metric.Label[0].GetValue())]
if metric.Gauge.GetValue() == 0 {
continue
}
if !ok {
t.Fail()
}
if metric.Gauge.GetValue() != float64(count) {
return false
}
sum += int(metric.Gauge.GetValue())
}
if metric.Gauge.GetValue() != float64(count) {
return false
}
sum += int(metric.Gauge.GetValue())
}
t.Logf("sum %d == total %d", sum, tc.Total)
return sum == tc.Total
@ -260,6 +184,90 @@ func TestWorkspaces(t *testing.T) {
}
}
func TestWorkspaceLatestBuildStatuses(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
Name string
Database func() database.Store
ExpectedWorkspaces int
ExpectedStatuses map[codersdk.ProvisionerJobStatus]int
}{{
Name: "None",
Database: func() database.Store {
return dbmem.New()
},
ExpectedWorkspaces: 0,
}, {
Name: "Multiple",
Database: func() database.Store {
db := dbmem.New()
insertTemplates(t, db)
insertCanceled(t, db)
insertFailed(t, db)
insertFailed(t, db)
insertSuccess(t, db)
insertSuccess(t, db)
insertSuccess(t, db)
insertRunning(t, db)
return db
},
ExpectedWorkspaces: 7,
ExpectedStatuses: map[codersdk.ProvisionerJobStatus]int{
codersdk.ProvisionerJobCanceled: 1,
codersdk.ProvisionerJobFailed: 2,
codersdk.ProvisionerJobSucceeded: 3,
codersdk.ProvisionerJobRunning: 1,
},
}} {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
registry := prometheus.NewRegistry()
closeFunc, err := prometheusmetrics.Workspaces(context.Background(), slogtest.Make(t, nil), registry, tc.Database(), testutil.IntervalFast)
require.NoError(t, err)
t.Cleanup(closeFunc)
require.Eventually(t, func() bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
stMap := map[codersdk.ProvisionerJobStatus]int{}
for _, m := range metrics {
if m.GetName() != "coderd_workspace_latest_build_status" {
continue
}
for _, metric := range m.Metric {
for _, l := range metric.Label {
if l == nil {
continue
}
if l.GetName() == "status" {
status := codersdk.ProvisionerJobStatus(l.GetValue())
stMap[status] += int(metric.Gauge.GetValue())
}
}
}
}
stSum := 0
for st, count := range stMap {
if tc.ExpectedStatuses[st] != count {
return false
}
stSum += count
}
t.Logf("status series = %d, expected == %d", stSum, tc.ExpectedWorkspaces)
return stSum == tc.ExpectedWorkspaces
}, testutil.WaitShort, testutil.IntervalFast)
})
}
}
func TestAgents(t *testing.T) {
t.Parallel()
@ -601,3 +609,153 @@ func prepareWorkspaceAndAgent(t *testing.T, client *codersdk.Client, user coders
agentClient.SetSessionToken(authToken)
return agentClient
}
var (
templateA = uuid.New()
templateVersionA = uuid.New()
templateB = uuid.New()
templateVersionB = uuid.New()
)
func insertTemplates(t *testing.T, db database.Store) {
require.NoError(t, db.InsertTemplate(context.Background(), database.InsertTemplateParams{
ID: templateA,
Name: "template-a",
Provisioner: database.ProvisionerTypeTerraform,
MaxPortSharingLevel: database.AppSharingLevelAuthenticated,
}))
require.NoError(t, db.InsertTemplateVersion(context.Background(), database.InsertTemplateVersionParams{
ID: templateVersionA,
TemplateID: uuid.NullUUID{UUID: templateA},
Name: "version-1a",
}))
require.NoError(t, db.InsertTemplate(context.Background(), database.InsertTemplateParams{
ID: templateB,
Name: "template-b",
Provisioner: database.ProvisionerTypeTerraform,
MaxPortSharingLevel: database.AppSharingLevelAuthenticated,
}))
require.NoError(t, db.InsertTemplateVersion(context.Background(), database.InsertTemplateVersionParams{
ID: templateVersionB,
TemplateID: uuid.NullUUID{UUID: templateB},
Name: "version-1b",
}))
}
func insertUser(t *testing.T, db database.Store) database.User {
username, err := cryptorand.String(8)
require.NoError(t, err)
user, err := db.InsertUser(context.Background(), database.InsertUserParams{
ID: uuid.New(),
Username: username,
LoginType: database.LoginTypeNone,
})
require.NoError(t, err)
return user
}
func insertRunning(t *testing.T, db database.Store) database.ProvisionerJob {
var template, templateVersion uuid.UUID
rnd, err := cryptorand.Intn(10)
require.NoError(t, err)
if rnd > 5 {
template = templateB
templateVersion = templateVersionB
} else {
template = templateA
templateVersion = templateVersionA
}
workspace, err := db.InsertWorkspace(context.Background(), database.InsertWorkspaceParams{
ID: uuid.New(),
OwnerID: insertUser(t, db).ID,
Name: uuid.NewString(),
TemplateID: template,
AutomaticUpdates: database.AutomaticUpdatesNever,
})
require.NoError(t, err)
job, err := db.InsertProvisionerJob(context.Background(), database.InsertProvisionerJobParams{
ID: uuid.New(),
CreatedAt: dbtime.Now(),
UpdatedAt: dbtime.Now(),
Provisioner: database.ProvisionerTypeEcho,
StorageMethod: database.ProvisionerStorageMethodFile,
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
require.NoError(t, err)
err = db.InsertWorkspaceBuild(context.Background(), database.InsertWorkspaceBuildParams{
ID: uuid.New(),
WorkspaceID: workspace.ID,
JobID: job.ID,
BuildNumber: 1,
Transition: database.WorkspaceTransitionStart,
Reason: database.BuildReasonInitiator,
TemplateVersionID: templateVersion,
})
require.NoError(t, err)
// This marks the job as started.
_, err = db.AcquireProvisionerJob(context.Background(), database.AcquireProvisionerJobParams{
OrganizationID: job.OrganizationID,
StartedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
Types: []database.ProvisionerType{database.ProvisionerTypeEcho},
})
require.NoError(t, err)
return job
}
func insertCanceled(t *testing.T, db database.Store) {
job := insertRunning(t, db)
err := db.UpdateProvisionerJobWithCancelByID(context.Background(), database.UpdateProvisionerJobWithCancelByIDParams{
ID: job.ID,
CanceledAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
})
require.NoError(t, err)
err = db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{
ID: job.ID,
CompletedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
})
require.NoError(t, err)
}
func insertFailed(t *testing.T, db database.Store) {
job := insertRunning(t, db)
err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{
ID: job.ID,
CompletedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
Error: sql.NullString{
String: "failed",
Valid: true,
},
})
require.NoError(t, err)
}
func insertSuccess(t *testing.T, db database.Store) {
job := insertRunning(t, db)
err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{
ID: job.ID,
CompletedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
},
})
require.NoError(t, err)
}

View File

@ -467,6 +467,17 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
if err != nil {
return nil, failJob(fmt.Sprintf("get owner: %s", err))
}
ownerGroups, err := s.Database.GetGroupsByOrganizationAndUserID(ctx, database.GetGroupsByOrganizationAndUserIDParams{
UserID: owner.ID,
OrganizationID: s.OrganizationID,
})
if err != nil {
return nil, failJob(fmt.Sprintf("get owner group names: %s", err))
}
ownerGroupNames := []string{}
for _, group := range ownerGroups {
ownerGroupNames = append(ownerGroupNames, group.Name)
}
err = s.Pubsub.Publish(codersdk.WorkspaceNotifyChannel(workspace.ID), []byte{})
if err != nil {
return nil, failJob(fmt.Sprintf("publish workspace update: %s", err))
@ -567,6 +578,7 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
WorkspaceOwner: owner.Username,
WorkspaceOwnerEmail: owner.Email,
WorkspaceOwnerName: owner.Name,
WorkspaceOwnerGroups: ownerGroupNames,
WorkspaceOwnerOidcAccessToken: workspaceOwnerOIDCAccessToken,
WorkspaceId: workspace.ID.String(),
WorkspaceOwnerId: owner.ID.String(),
@ -1725,9 +1737,9 @@ func (s *server) regenerateSessionToken(ctx context.Context, user database.User,
newkey, sessionToken, err := apikey.Generate(apikey.CreateParams{
UserID: user.ID,
LoginType: user.LoginType,
DefaultLifetime: s.DeploymentValues.SessionDuration.Value(),
TokenName: workspaceSessionTokenName(workspace),
LifetimeSeconds: int64(s.DeploymentValues.MaxTokenLifetime.Value().Seconds()),
DefaultLifetime: s.DeploymentValues.Sessions.DefaultDuration.Value(),
LifetimeSeconds: int64(s.DeploymentValues.Sessions.MaximumTokenDuration.Value().Seconds()),
})
if err != nil {
return "", xerrors.Errorf("generate API key: %w", err)

View File

@ -166,7 +166,11 @@ func TestAcquireJob(t *testing.T) {
// Set the max session token lifetime so we can assert we
// create an API key with an expiration within the bounds of the
// deployment config.
dv := &codersdk.DeploymentValues{MaxTokenLifetime: serpent.Duration(time.Hour)}
dv := &codersdk.DeploymentValues{
Sessions: codersdk.SessionLifetime{
MaximumTokenDuration: serpent.Duration(time.Hour),
},
}
gitAuthProvider := &sdkproto.ExternalAuthProviderResource{
Id: "github",
}
@ -182,6 +186,15 @@ func TestAcquireJob(t *testing.T) {
defer cancel()
user := dbgen.User(t, db, database.User{})
group1 := dbgen.Group(t, db, database.Group{
Name: "group1",
OrganizationID: pd.OrganizationID,
})
err := db.InsertGroupMember(ctx, database.InsertGroupMemberParams{
UserID: user.ID,
GroupID: group1.ID,
})
require.NoError(t, err)
link := dbgen.UserLink(t, db, database.UserLink{
LoginType: database.LoginTypeOIDC,
UserID: user.ID,
@ -310,8 +323,8 @@ func TestAcquireJob(t *testing.T) {
require.Len(t, toks, 2, "invalid api key")
key, err := db.GetAPIKeyByID(ctx, toks[0])
require.NoError(t, err)
require.Equal(t, int64(dv.MaxTokenLifetime.Value().Seconds()), key.LifetimeSeconds)
require.WithinDuration(t, time.Now().Add(dv.MaxTokenLifetime.Value()), key.ExpiresAt, time.Minute)
require.Equal(t, int64(dv.Sessions.MaximumTokenDuration.Value().Seconds()), key.LifetimeSeconds)
require.WithinDuration(t, time.Now().Add(dv.Sessions.MaximumTokenDuration.Value()), key.ExpiresAt, time.Minute)
want, err := json.Marshal(&proto.AcquiredJob_WorkspaceBuild_{
WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
@ -340,6 +353,7 @@ func TestAcquireJob(t *testing.T) {
WorkspaceOwnerEmail: user.Email,
WorkspaceOwnerName: user.Name,
WorkspaceOwnerOidcAccessToken: link.OAuthAccessToken,
WorkspaceOwnerGroups: []string{group1.Name},
WorkspaceId: workspace.ID.String(),
WorkspaceOwnerId: user.ID.String(),
TemplateId: template.ID.String(),

View File

@ -32,11 +32,14 @@ import (
var tailnetTransport *http.Transport
func init() {
var valid bool
tailnetTransport, valid = http.DefaultTransport.(*http.Transport)
tp, valid := http.DefaultTransport.(*http.Transport)
if !valid {
panic("dev error: default transport is the wrong type")
}
tailnetTransport = tp.Clone()
// We do not want to respect the proxy settings from the environment, since
// all network traffic happens over wireguard.
tailnetTransport.Proxy = nil
}
var _ workspaceapps.AgentProvider = (*ServerTailnet)(nil)

View File

@ -68,6 +68,35 @@ func TestServerTailnet_AgentConn_NoSTUN(t *testing.T) {
assert.True(t, conn.AwaitReachable(ctx))
}
//nolint:paralleltest // t.Setenv
func TestServerTailnet_ReverseProxy_ProxyEnv(t *testing.T) {
t.Setenv("HTTP_PROXY", "http://169.254.169.254:12345")
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
agents, serverTailnet := setupServerTailnetAgent(t, 1)
a := agents[0]
u, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", workspacesdk.AgentHTTPAPIServerPort))
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(u, u, a.id)
rw := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
u.String(),
nil,
).WithContext(ctx)
rp.ServeHTTP(rw, req)
res := rw.Result()
defer res.Body.Close()
assert.Equal(t, http.StatusOK, res.StatusCode)
}
func TestServerTailnet_ReverseProxy(t *testing.T) {
t.Parallel()

View File

@ -252,7 +252,7 @@ func (api *API) postLogin(rw http.ResponseWriter, r *http.Request) {
UserID: user.ID,
LoginType: database.LoginTypePassword,
RemoteAddr: r.RemoteAddr,
DefaultLifetime: api.DeploymentValues.SessionDuration.Value(),
DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(),
})
if err != nil {
logger.Error(ctx, "unable to create API key", slog.Error(err))
@ -1612,7 +1612,7 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C
cookie, newKey, err := api.createAPIKey(dbauthz.AsSystemRestricted(ctx), apikey.CreateParams{
UserID: user.ID,
LoginType: params.LoginType,
DefaultLifetime: api.DeploymentValues.SessionDuration.Value(),
DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(),
RemoteAddr: r.RemoteAddr,
})
if err != nil {

View File

@ -1132,6 +1132,7 @@ func convertScripts(dbScripts []database.WorkspaceAgentScript) []codersdk.Worksp
// @Param request body agentsdk.Stats true "Stats request"
// @Success 200 {object} agentsdk.StatsResponse
// @Router /workspaceagents/me/report-stats [post]
// @Deprecated Uses agent API v2 endpoint instead.
func (api *API) workspaceAgentReportStats(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()

View File

@ -4,6 +4,7 @@ import (
"context"
"database/sql"
"fmt"
"io"
"net/http"
"runtime/pprof"
"sync"
@ -156,7 +157,7 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) {
ctx = tailnet.WithStreamID(ctx, streamID)
ctx = agentapi.WithAPIVersion(ctx, version)
err = agentAPI.Serve(ctx, mux)
if err != nil {
if err != nil && !xerrors.Is(err, yamux.ErrSessionShutdown) && !xerrors.Is(err, io.EOF) {
logger.Warn(ctx, "workspace agent RPC listen error", slog.Error(err))
_ = conn.Close(websocket.StatusInternalError, err.Error())
return

View File

@ -102,14 +102,14 @@ func (api *API) workspaceApplicationAuth(rw http.ResponseWriter, r *http.Request
// the current session.
exp := apiKey.ExpiresAt
lifetimeSeconds := apiKey.LifetimeSeconds
if exp.IsZero() || time.Until(exp) > api.DeploymentValues.SessionDuration.Value() {
exp = dbtime.Now().Add(api.DeploymentValues.SessionDuration.Value())
lifetimeSeconds = int64(api.DeploymentValues.SessionDuration.Value().Seconds())
if exp.IsZero() || time.Until(exp) > api.DeploymentValues.Sessions.DefaultDuration.Value() {
exp = dbtime.Now().Add(api.DeploymentValues.Sessions.DefaultDuration.Value())
lifetimeSeconds = int64(api.DeploymentValues.Sessions.DefaultDuration.Value().Seconds())
}
cookie, _, err := api.createAPIKey(ctx, apikey.CreateParams{
UserID: apiKey.UserID,
LoginType: database.LoginTypePassword,
DefaultLifetime: api.DeploymentValues.SessionDuration.Value(),
DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(),
ExpiresAt: exp,
LifetimeSeconds: lifetimeSeconds,
Scope: database.APIKeyScopeApplicationConnect,

View File

@ -1165,6 +1165,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
appDetails := setupProxyTest(t, &DeploymentOptions{
ServeHTTPS: true,
})
// using the fact that Apps.Port and Apps.PortHTTPS are the same port here
port, err := strconv.ParseInt(appDetails.Apps.Port.AppSlugOrPort, 10, 32)
require.NoError(t, err)
_, err = appDetails.SDKClient.UpsertWorkspaceAgentPortShare(ctx, appDetails.Workspace.ID, codersdk.UpsertWorkspaceAgentPortShareRequest{
@ -1178,7 +1179,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
publicAppClient := appDetails.AppClient(t)
publicAppClient.SetSessionToken("")
resp, err := requestWithRetries(ctx, t, publicAppClient, http.MethodGet, appDetails.SubdomainAppURL(appDetails.Apps.Port).String(), nil)
resp, err := requestWithRetries(ctx, t, publicAppClient, http.MethodGet, appDetails.SubdomainAppURL(appDetails.Apps.PortHTTPS).String(), nil)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode)
@ -1765,9 +1766,11 @@ func assertWorkspaceLastUsedAtUpdated(t testing.TB, details *Details) {
require.NotNil(t, details.Workspace, "can't assert LastUsedAt on a nil workspace!")
before, err := details.SDKClient.Workspace(context.Background(), details.Workspace.ID)
require.NoError(t, err)
// Wait for stats to fully flush.
details.FlushStats()
require.Eventually(t, func() bool {
// We may need to flush multiple times, since the stats from the app we are testing might be
// collected asynchronously from when we see the connection close, and thus, could race
// against being flushed.
details.FlushStats()
after, err := details.SDKClient.Workspace(context.Background(), details.Workspace.ID)
return assert.NoError(t, err) && after.LastUsedAt.After(before.LastUsedAt)
}, testutil.WaitShort, testutil.IntervalMedium)

View File

@ -116,6 +116,7 @@ type Details struct {
Authenticated App
Public App
Port App
PortHTTPS App
}
}
@ -247,6 +248,12 @@ func setupProxyTestWithFactory(t *testing.T, factory DeploymentFactory, opts *De
AgentName: agnt.Name,
AppSlugOrPort: strconv.Itoa(int(opts.port)),
}
details.Apps.PortHTTPS = App{
Username: me.Username,
WorkspaceName: workspace.Name,
AgentName: agnt.Name,
AppSlugOrPort: strconv.Itoa(int(opts.port)) + "s",
}
return details
}

View File

@ -90,9 +90,10 @@ func (a ApplicationURL) Path() string {
//
// Subdomains should be in the form:
//
// ({PREFIX}---)?{PORT/APP_SLUG}--{AGENT_NAME}--{WORKSPACE_NAME}--{USERNAME}
// ({PREFIX}---)?{PORT{s?}/APP_SLUG}--{AGENT_NAME}--{WORKSPACE_NAME}--{USERNAME}
// e.g.
// https://8080--main--dev--dean.hi.c8s.io
// https://8080s--main--dev--dean.hi.c8s.io
// https://app--main--dev--dean.hi.c8s.io
// https://prefix---8080--main--dev--dean.hi.c8s.io
// https://prefix---app--main--dev--dean.hi.c8s.io

View File

@ -85,7 +85,7 @@ func (p *DBTokenProvider) Issue(ctx context.Context, rw http.ResponseWriter, r *
DB: p.Database,
OAuth2Configs: p.OAuth2Configs,
RedirectToLogin: false,
DisableSessionExpiryRefresh: p.DeploymentValues.DisableSessionExpiryRefresh.Value(),
DisableSessionExpiryRefresh: p.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
// Optional is true to allow for public apps. If the authorization check
// (later on) fails and the user is not authenticated, they will be
// redirected to the login page or app auth endpoint using code below.

View File

@ -40,6 +40,7 @@ func Test_ResolveRequest(t *testing.T) {
// Users can access unhealthy and initializing apps (as of 2024-02).
appNameUnhealthy = "app-unhealthy"
appNameInitializing = "app-initializing"
appNameEndsInS = "app-ends-in-s"
// This agent will never connect, so it will never become "connected".
// Users cannot access unhealthy agents.
@ -166,6 +167,12 @@ func Test_ResolveRequest(t *testing.T) {
Threshold: 1000,
},
},
{
Slug: appNameEndsInS,
DisplayName: appNameEndsInS,
SharingLevel: proto.AppSharingLevel_OWNER,
Url: appURL,
},
},
},
{
@ -644,6 +651,67 @@ func Test_ResolveRequest(t *testing.T) {
require.Equal(t, "http://127.0.0.1:9090", token.AppURL)
})
t.Run("PortSubdomainHTTPSS", func(t *testing.T) {
t.Parallel()
req := (workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
BasePath: "/",
UsernameOrID: me.Username,
WorkspaceNameOrID: workspace.Name,
AgentNameOrID: agentName,
AppSlugOrPort: "9090ss",
}).Normalize()
rw := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/", nil)
r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken())
_, ok := workspaceapps.ResolveRequest(rw, r, workspaceapps.ResolveRequestOptions{
Logger: api.Logger,
SignedTokenProvider: api.WorkspaceAppsProvider,
DashboardURL: api.AccessURL,
PathAppBaseURL: api.AccessURL,
AppHostname: api.AppHostname,
AppRequest: req,
})
// should parse as app and fail to find app "9090ss"
require.False(t, ok)
w := rw.Result()
_ = w.Body.Close()
b, err := io.ReadAll(w.Body)
require.NoError(t, err)
require.Contains(t, string(b), "404 - Application Not Found")
})
t.Run("SubdomainEndsInS", func(t *testing.T) {
t.Parallel()
req := (workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
BasePath: "/",
UsernameOrID: me.Username,
WorkspaceNameOrID: workspace.Name,
AgentNameOrID: agentName,
AppSlugOrPort: appNameEndsInS,
}).Normalize()
rw := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/", nil)
r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken())
token, ok := workspaceapps.ResolveRequest(rw, r, workspaceapps.ResolveRequestOptions{
Logger: api.Logger,
SignedTokenProvider: api.WorkspaceAppsProvider,
DashboardURL: api.AccessURL,
PathAppBaseURL: api.AccessURL,
AppHostname: api.AppHostname,
AppRequest: req,
})
require.True(t, ok)
require.Equal(t, req.AppSlugOrPort, token.AppSlugOrPort)
})
t.Run("Terminal", func(t *testing.T) {
t.Parallel()

View File

@ -287,12 +287,20 @@ func (r Request) getDatabase(ctx context.Context, db database.Store) (*databaseR
// whether the app is a slug or a port and whether there are multiple agents
// in the workspace or not.
var (
agentNameOrID = r.AgentNameOrID
appURL string
appSharingLevel database.AppSharingLevel
portUint, portUintErr = strconv.ParseUint(r.AppSlugOrPort, 10, 16)
agentNameOrID = r.AgentNameOrID
appURL string
appSharingLevel database.AppSharingLevel
// First check if it's a port-based URL with an optional "s" suffix for HTTPS.
potentialPortStr = strings.TrimSuffix(r.AppSlugOrPort, "s")
portUint, portUintErr = strconv.ParseUint(potentialPortStr, 10, 16)
)
//nolint:nestif
if portUintErr == nil {
protocol := "http"
if strings.HasSuffix(r.AppSlugOrPort, "s") {
protocol = "https"
}
if r.AccessMethod != AccessMethodSubdomain {
// TODO(@deansheather): this should return a 400 instead of a 500.
return nil, xerrors.New("port-based URLs are only supported for subdomain-based applications")
@ -309,10 +317,10 @@ func (r Request) getDatabase(ctx context.Context, db database.Store) (*databaseR
}
// If the app slug is a port number, then route to the port as an
// "anonymous app". We only support HTTP for port-based URLs.
// "anonymous app".
//
// This is only supported for subdomain-based applications.
appURL = fmt.Sprintf("http://127.0.0.1:%d", portUint)
appURL = fmt.Sprintf("%s://127.0.0.1:%d", protocol, portUint)
appSharingLevel = database.AppSharingLevelOwner
// Port sharing authorization
@ -342,10 +350,6 @@ func (r Request) getDatabase(ctx context.Context, db database.Store) (*databaseR
}
// No port share found, so we keep default to owner.
} else {
if ps.Protocol == database.PortShareProtocolHttps {
// Apply HTTPS protocol if specified.
appURL = fmt.Sprintf("https://127.0.0.1:%d", portUint)
}
appSharingLevel = ps.ShareLevel
}
} else {

View File

@ -57,6 +57,26 @@ func Test_RequestValidate(t *testing.T) {
AppSlugOrPort: "baz",
},
},
{
name: "OK5",
req: workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
BasePath: "/",
UsernameOrID: "foo",
WorkspaceNameOrID: "bar",
AppSlugOrPort: "8080",
},
},
{
name: "OK6",
req: workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
BasePath: "/",
UsernameOrID: "foo",
WorkspaceNameOrID: "bar",
AppSlugOrPort: "8080s",
},
},
{
name: "NoAccessMethod",
req: workspaceapps.Request{

View File

@ -222,6 +222,54 @@ func Test_TokenMatchesRequest(t *testing.T) {
},
want: false,
},
{
name: "PortPortocolHTTP",
req: workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
Prefix: "yolo--",
BasePath: "/",
UsernameOrID: "foo",
WorkspaceNameOrID: "bar",
AgentNameOrID: "baz",
AppSlugOrPort: "8080",
},
token: workspaceapps.SignedToken{
Request: workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
Prefix: "yolo--",
BasePath: "/",
UsernameOrID: "foo",
WorkspaceNameOrID: "bar",
AgentNameOrID: "baz",
AppSlugOrPort: "8080",
},
},
want: true,
},
{
name: "PortPortocolHTTPS",
req: workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
Prefix: "yolo--",
BasePath: "/",
UsernameOrID: "foo",
WorkspaceNameOrID: "bar",
AgentNameOrID: "baz",
AppSlugOrPort: "8080s",
},
token: workspaceapps.SignedToken{
Request: workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodSubdomain,
Prefix: "yolo--",
BasePath: "/",
UsernameOrID: "foo",
WorkspaceNameOrID: "bar",
AgentNameOrID: "baz",
AppSlugOrPort: "8080s",
},
},
want: true,
},
}
for _, c := range cases {

View File

@ -332,6 +332,10 @@ func (api *API) workspaceByOwnerAndName(rw http.ResponseWriter, r *http.Request)
// Create a new workspace for the currently authenticated user.
//
// @Summary Create user workspace by organization
// @Description Create a new workspace using a template. The request must
// @Description specify either the Template ID or the Template Version ID,
// @Description not both. If the Template ID is specified, the active version
// @Description of the template will be used.
// @ID create-user-workspace-by-organization
// @Security CoderSessionToken
// @Accept json
@ -1645,6 +1649,11 @@ func convertWorkspace(
}
ttlMillis := convertWorkspaceTTLMillis(workspace.Ttl)
// If the template doesn't allow a workspace-configured value, then report the
// template value instead.
if !template.AllowUserAutostop {
ttlMillis = convertWorkspaceTTLMillis(sql.NullInt64{Valid: true, Int64: template.DefaultTTL})
}
// Only show favorite status if you own the workspace.
requesterFavorite := workspace.OwnerID == requesterID && workspace.Favorite

View File

@ -761,8 +761,8 @@ func TestPostWorkspacesByOrganization(t *testing.T) {
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
// TTL should be set by the template
require.Equal(t, template.DefaultTTLMillis, templateTTL)
require.Equal(t, template.DefaultTTLMillis, *workspace.TTLMillis)
require.Equal(t, templateTTL, template.DefaultTTLMillis)
require.Equal(t, templateTTL, *workspace.TTLMillis)
})
t.Run("InvalidTTL", func(t *testing.T) {
@ -789,7 +789,7 @@ func TestPostWorkspacesByOrganization(t *testing.T) {
require.ErrorAs(t, err, &apiErr)
require.Equal(t, http.StatusBadRequest, apiErr.StatusCode())
require.Len(t, apiErr.Validations, 1)
require.Equal(t, apiErr.Validations[0].Field, "ttl_ms")
require.Equal(t, "ttl_ms", apiErr.Validations[0].Field)
require.Equal(t, "time until shutdown must be at least one minute", apiErr.Validations[0].Detail)
})
})

View File

@ -182,13 +182,11 @@ type DeploymentValues struct {
RateLimit RateLimitConfig `json:"rate_limit,omitempty" typescript:",notnull"`
Experiments serpent.StringArray `json:"experiments,omitempty" typescript:",notnull"`
UpdateCheck serpent.Bool `json:"update_check,omitempty" typescript:",notnull"`
MaxTokenLifetime serpent.Duration `json:"max_token_lifetime,omitempty" typescript:",notnull"`
Swagger SwaggerConfig `json:"swagger,omitempty" typescript:",notnull"`
Logging LoggingConfig `json:"logging,omitempty" typescript:",notnull"`
Dangerous DangerousConfig `json:"dangerous,omitempty" typescript:",notnull"`
DisablePathApps serpent.Bool `json:"disable_path_apps,omitempty" typescript:",notnull"`
SessionDuration serpent.Duration `json:"max_session_expiry,omitempty" typescript:",notnull"`
DisableSessionExpiryRefresh serpent.Bool `json:"disable_session_expiry_refresh,omitempty" typescript:",notnull"`
Sessions SessionLifetime `json:"session_lifetime,omitempty" typescript:",notnull"`
DisablePasswordAuth serpent.Bool `json:"disable_password_auth,omitempty" typescript:",notnull"`
Support SupportConfig `json:"support,omitempty" typescript:",notnull"`
ExternalAuthConfigs serpent.Struct[[]ExternalAuthConfig] `json:"external_auth,omitempty" typescript:",notnull"`
@ -244,6 +242,33 @@ func ParseSSHConfigOption(opt string) (key string, value string, err error) {
return opt[:idx], opt[idx+1:], nil
}
// SessionLifetime refers to "sessions" authenticating into Coderd. Coder has
// multiple different session types: api keys, tokens, workspace app tokens,
// agent tokens, etc. This configuration struct should be used to group all
// settings referring to any of these session lifetime controls.
// TODO: These config options were created back when coder only had api keys.
// Today, the config is ambigously used for all of them. For example:
// - cli based api keys ignore all settings
// - login uses the default lifetime, not the MaximumTokenDuration
// - Tokens use the Default & MaximumTokenDuration
// - ... etc ...
// The rational behind each decision is undocumented. The naming behind these
// config options is also confusing without any clear documentation.
// 'CreateAPIKey' is used to make all sessions, and it's parameters are just
// 'LifetimeSeconds' and 'DefaultLifetime'. Which does not directly correlate to
// the config options here.
type SessionLifetime struct {
// DisableExpiryRefresh will disable automatically refreshing api
// keys when they are used from the api. This means the api key lifetime at
// creation is the lifetime of the api key.
DisableExpiryRefresh serpent.Bool `json:"disable_expiry_refresh,omitempty" typescript:",notnull"`
// DefaultDuration is for api keys, not tokens.
DefaultDuration serpent.Duration `json:"default_duration" typescript:",notnull"`
MaximumTokenDuration serpent.Duration `json:"max_token_lifetime,omitempty" typescript:",notnull"`
}
type DERP struct {
Server DERPServerConfig `json:"server" typescript:",notnull"`
Config DERPConfig `json:"config" typescript:",notnull"`
@ -1579,7 +1604,7 @@ when required by your organization's security policy.`,
// We have to add in the 25 leap days for the frontend to show the
// "100 years" correctly.
Default: ((100 * 365 * time.Hour * 24) + (25 * time.Hour * 24)).String(),
Value: &c.MaxTokenLifetime,
Value: &c.Sessions.MaximumTokenDuration,
Group: &deploymentGroupNetworkingHTTP,
YAML: "maxTokenLifetime",
Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"),
@ -1773,7 +1798,7 @@ when required by your organization's security policy.`,
Flag: "session-duration",
Env: "CODER_SESSION_DURATION",
Default: (24 * time.Hour).String(),
Value: &c.SessionDuration,
Value: &c.Sessions.DefaultDuration,
Group: &deploymentGroupNetworkingHTTP,
YAML: "sessionDuration",
Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"),
@ -1784,7 +1809,7 @@ when required by your organization's security policy.`,
Flag: "disable-session-expiry-refresh",
Env: "CODER_DISABLE_SESSION_EXPIRY_REFRESH",
Value: &c.DisableSessionExpiryRefresh,
Value: &c.Sessions.DisableExpiryRefresh,
Group: &deploymentGroupNetworkingHTTP,
YAML: "disableSessionExpiryRefresh",
},

View File

@ -138,6 +138,9 @@ type CreateTemplateRequest struct {
// CreateWorkspaceRequest provides options for creating a new workspace.
// Either TemplateID or TemplateVersionID must be specified. They cannot both be present.
// @Description CreateWorkspaceRequest provides options for creating a new workspace.
// @Description Only one of TemplateID or TemplateVersionID can be specified, not both.
// @Description If TemplateID is specified, the active version of the template will be used.
type CreateWorkspaceRequest struct {
// TemplateID specifies which template should be used for creating the workspace.
TemplateID uuid.UUID `json:"template_id,omitempty" validate:"required_without=TemplateVersionID,excluded_with=TemplateVersionID" format:"uuid"`

View File

@ -86,9 +86,11 @@ func runTailnetAPIConnector(
func (tac *tailnetAPIConnector) manageGracefulTimeout() {
defer tac.cancelGracefulCtx()
<-tac.ctx.Done()
timer := time.NewTimer(time.Second)
defer timer.Stop()
select {
case <-tac.closed:
case <-time.After(time.Second):
case <-timer.C:
}
}

View File

@ -102,6 +102,8 @@ func (*fakeTailnetConn) SetNodeCallback(func(*tailnet.Node)) {}
func (*fakeTailnetConn) SetDERPMap(*tailcfg.DERPMap) {}
func (*fakeTailnetConn) SetTunnelDestination(uuid.UUID) {}
func newFakeTailnetConn() *fakeTailnetConn {
return &fakeTailnetConn{}
}

View File

@ -1,39 +1,38 @@
# Architecture
This document provides a high level overview of Coder's architecture.
The Coder deployment model is flexible and offers various components that
platform administrators can deploy and scale depending on their use case. This
page describes possible deployments, challenges, and risks associated with them.
## Single region architecture
Learn more about our [Reference Architectures](../admin/architectures/index.md)
and platform scaling capabilities.
![Architecture Diagram](../images/architecture-single-region.png)
## Primary components
## Multi-region architecture
### coderd
![Architecture Diagram](../images/architecture-multi-region.png)
## coderd
coderd is the service created by running `coder server`. It is a thin API that
connects workspaces, provisioners and users. coderd stores its state in Postgres
and is the only service that communicates with Postgres.
_coderd_ is the service created by running `coder server`. It is a thin API that
connects workspaces, provisioners and users. _coderd_ stores its state in
Postgres and is the only service that communicates with Postgres.
It offers:
- Dashboard (UI)
- HTTP API
- Dev URLs (HTTP reverse proxy to workspaces)
- Workspace Web Applications (e.g easily access code-server)
- Workspace Web Applications (e.g for easy access to `code-server`)
- Agent registration
## provisionerd
### provisionerd
provisionerd is the execution context for infrastructure modifying providers. At
the moment, the only provider is Terraform (running `terraform`).
_provisionerd_ is the execution context for infrastructure modifying providers.
At the moment, the only provider is Terraform (running `terraform`).
By default, the Coder server runs multiple provisioner daemons.
[External provisioners](../admin/provisioners.md) can be added for security or
scalability purposes.
## Agents
### Agents
An agent is the Coder service that runs within a user's remote workspace. It
provides a consistent interface for coderd and clients to communicate with
@ -50,9 +49,9 @@ Templates are responsible for
[creating and running agents](../templates/index.md#coder-agent) within
workspaces.
## Service Bundling
### Service Bundling
While coderd and Postgres can be orchestrated independently, our default
While _coderd_ and Postgres can be orchestrated independently, our default
installation paths bundle them all together into one system service. It's
perfectly fine to run a production deployment this way, but there are certain
situations that necessitate decomposition:
@ -61,7 +60,7 @@ situations that necessitate decomposition:
- Achieving greater availability and efficiency (horizontally scale individual
services)
## Workspaces
### Workspaces
At the highest level, a workspace is a set of cloud resources. These resources
can be VMs, Kubernetes clusters, storage buckets, or whatever else Terraform
@ -72,3 +71,329 @@ while those that don't are called _peripheral resources_.
Each resource may also be _persistent_ or _ephemeral_ depending on whether
they're destroyed on workspace stop.
## Deployment models
### Single region architecture
![Architecture Diagram](../images/architecture-single-region.png)
#### Components
This architecture consists of a single load balancer, several _coderd_ replicas,
and _Coder workspaces_ deployed in the same region.
##### Workload resources
- Deploy at least one _coderd_ replica per availability zone with _coderd_
instances and provisioners. High availability is recommended but not essential
for small deployments.
- Single replica deployment is a special case that can address a
tiny/small/proof-of-concept installation on a single virtual machine. If you
are serving more than 100 users/workspaces, you should add more replicas.
**Coder workspace**
- For small deployments consider a lightweight workspace runtime like the
[Sysbox](https://github.com/nestybox/sysbox) container runtime. Learn more how
to enable
[docker-in-docker using Sysbox](https://asciinema.org/a/kkTmOxl8DhEZiM2fLZNFlYzbo?speed=2).
**HA Database**
- Monitor node status and resource utilization metrics.
- Implement robust backup and disaster recovery strategies to protect against
data loss.
##### Workload supporting resources
**Load balancer**
- Distributes and load balances traffic from agents and clients to _Coder
Server_ replicas across availability zones.
- Layer 7 load balancing. The load balancer can decrypt SSL traffic, and
re-encrypt using an internal certificate.
- Session persistence (sticky sessions) can be disabled as _coderd_ instances
are stateless.
- WebSocket and long-lived connections must be supported.
**Single sign-on**
- Integrate with existing Single Sign-On (SSO) solutions used within the
organization via the supported OAuth 2.0 or OpenID Connect standards.
- Learn more about [Authentication in Coder](../admin/auth.md).
### Multi-region architecture
![Architecture Diagram](../images/architecture-multi-region.png)
#### Components
This architecture is for globally distributed developer teams using Coder
workspaces on daily basis. It features a single load balancer with regionally
deployed _Workspace Proxies_, several _coderd_ replicas, and _Coder workspaces_
provisioned in different regions.
Note: The _multi-region architecture_ assumes the same deployment principles as
the _single region architecture_, but it extends them to multi region deployment
with workspace proxies. Proxies are deployed in regions closest to developers to
offer the fastest developer experience.
##### Workload resources
**Workspace proxy**
- Workspace proxy offers developers the option to establish a fast relay
connection when accessing their workspace via SSH, a workspace application, or
port forwarding.
- Dashboard connections, API calls (e.g. _list workspaces_) are not served over
proxies.
- Proxies do not establish connections to the database.
- Proxy instances do not share authentication tokens between one another.
##### Workload supporting resources
**Proxy load balancer**
- Distributes and load balances workspace relay traffic in a single region
across availability zones.
- Layer 7 load balancing. The load balancer can decrypt SSL traffic, and
re-encrypt using internal certificate.
- Session persistence (sticky sessions) can be disabled as _coderd_ instances
are stateless.
- WebSocket and long-lived connections must be supported.
### Multi-cloud architecture
By distributing Coder workspaces across different cloud providers, organizations
can mitigate the risk of downtime caused by provider-specific outages or
disruptions. Additionally, multi-cloud deployment enables organizations to
leverage the unique features and capabilities offered by each cloud provider,
such as region availability and pricing models.
![Architecture Diagram](../images/architecture-multi-cloud.png)
#### Components
The deployment model comprises:
- `coderd` instances deployed within a single region of the same cloud provider,
with replicas strategically distributed across availability zones.
- Workspace provisioners deployed in each cloud, communicating with `coderd`
instances.
- Workspace proxies running in the same locations as provisioners to optimize
user connections to workspaces for maximum speed.
Due to the relatively large overhead of cross-regional communication, it is not
advised to set up multi-cloud control planes. It is recommended to keep coderd
replicas and the database within the same cloud-provider and region.
Note: The _multi-cloud architecture_ follows the deployment principles outlined
in the _multi-region architecture_. However, it adapts component selection based
on the specific cloud provider. Developers can initiate workspaces based on the
nearest region and technical specifications provided by the cloud providers.
##### Workload resources
**Workspace provisioner**
- _Security recommendation_: Create a long, random pre-shared key (PSK) and add
it to the regional secret store, so that local _provisionerd_ can access it.
Remember to distribute it using safe, encrypted communication channel. The PSK
must also be added to the _coderd_ configuration.
**Workspace proxy**
- _Security recommendation_: Use `coder` CLI to create
[authentication tokens for every workspace proxy](../admin/workspace-proxies.md#requirements),
and keep them in regional secret stores. Remember to distribute them using
safe, encrypted communication channel.
**Managed database**
- For AWS: _Amazon RDS for PostgreSQL_
- For Azure: _Azure Database for PostgreSQL - Flexible Server_
- For GCP: _Cloud SQL for PostgreSQL_
##### Workload supporting resources
**Kubernetes platform (optional)**
- For AWS: _Amazon Elastic Kubernetes Service_
- For Azure: _Azure Kubernetes Service_
- For GCP: _Google Kubernetes Engine_
See how to deploy
[Coder on Azure Kubernetes Service](https://github.com/ericpaulsen/coder-aks).
Learn more about [security requirements](../install/kubernetes.md) for deploying
Coder on Kubernetes.
**Load balancer**
- For AWS:
- _AWS Network Load Balancer_
- Level 4 load balancing
- For Kubernetes deployment: annotate service with
`service.beta.kubernetes.io/aws-load-balancer-type: "nlb"`, preserve the
client source IP with `externalTrafficPolicy: Local`
- _AWS Classic Load Balancer_
- Level 7 load balancing
- For Kubernetes deployment: set `sessionAffinity` to `None`
- For Azure:
- _Azure Load Balancer_
- Level 7 load balancing
- Azure Application Gateway
- Deploy Azure Application Gateway when more advanced traffic routing
policies are needed for Kubernetes applications.
- Take advantage of features such as WebSocket support and TLS termination
provided by Azure Application Gateway, enhancing the capabilities of
Kubernetes deployments on Azure.
- For GCP:
- _Cloud Load Balancing_ with SSL load balancer:
- Layer 4 load balancing, SSL enabled
- _Cloud Load Balancing_ with HTTPS load balancer:
- Layer 7 load balancing
- For Kubernetes deployment: annotate service (with ingress enabled) with
`kubernetes.io/ingress.class: "gce"`, leverage the `NodePort` service
type.
- Note: HTTP load balancer rejects DERP upgrade, Coder will fallback to
WebSockets
**Single sign-on**
- For AWS:
[AWS IAM Identity Center](https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
- For Azure:
[Microsoft Entra ID Sign-On](https://learn.microsoft.com/en-us/entra/identity/app-proxy/)
- For GCP:
[Google Cloud Identity Platform](https://cloud.google.com/architecture/identity/single-sign-on)
### Air-gapped architecture
The air-gapped deployment model refers to the setup of Coder's development
environment within a restricted network environment that lacks internet
connectivity. This deployment model is often required for organizations with
strict security policies or those operating in isolated environments, such as
government agencies or certain enterprise setups.
The key features of the air-gapped architecture include:
- _Offline installation_: Deploy workspaces without relying on an external
internet connection.
- _Isolated package/plugin repositories_: Depend on local repositories for
software installation, updates, and security patches.
- _Secure data transfer_: Enable encrypted communication channels and robust
access controls to safeguard sensitive information.
Learn more about [offline deployments](../install/offline.md) of Coder.
![Architecture Diagram](../images/architecture-air-gapped.png)
#### Components
The deployment model includes:
- _Workspace provisioners_ with direct access to self-hosted package and plugin
repositories and restricted internet access.
- _Mirror of Terraform Registry_ with multiple versions of Terraform plugins.
- _Certificate Authority_ with all TLS certificates to build secure
communication channels.
The model is compatible with various infrastructure models, enabling deployment
across multiple regions and diverse cloud platforms.
##### Workload resources
**Workspace provisioner**
- Includes Terraform binary in the container or system image.
- Checks out Terraform plugins from self-hosted _Registry_ mirror.
- Deploys workspace images stored in the self-hosted _Container Registry_.
**Coder server**
- Update checks are disabled (`CODER_UPDATE_CHECK=false`).
- Telemetry data is not collected (`CODER_TELEMETRY_ENABLE=false`).
- Direct connections are not possible, workspace traffic is relayed through
control plane's DERP proxy.
##### Workload supporting resources
**Self-hosted Database**
- In the air-gapped deployment model, _Coderd_ instance is unable to download
Postgres binaries from the internet, so external database must be provided.
**Container Registry**
- Since the _Registry_ is isolated from the internet, platform engineers are
responsible for maintaining Workspace container images and conducting periodic
updates of base Docker images.
- It is recommended to keep [Dev Containers](../templates/devcontainers.md) up
to date with the latest released
[Envbuilder](https://github.com/coder/envbuilder) runtime.
**Mirror of Terraform Registry**
- Stores all necessary Terraform plugin dependencies, ensuring successful
workspace provisioning and maintenance without internet access.
- Platform engineers are responsible for periodically updating the mirrored
Terraform plugins, including
[terraform-provider-coder](https://github.com/coder/terraform-provider-coder).
**Certificate Authority**
- Manages and issues TLS certificates to facilitate secure communication
channels within the infrastructure.
### Dev Containers
Note: _Dev containers_ are at early stage and considered experimental at the
moment.
This architecture enhances a Coder workspace with a
[development container](https://containers.dev/) setup built using the
[envbuilder](https://github.com/coder/envbuilder) project. Workspace users have
the flexibility to extend generic, base developer environments with custom,
project-oriented [features](https://containers.dev/features) without requiring
platform administrators to push altered Docker images.
Learn more about
[Dev containers support](https://coder.com/docs/v2/latest/templates/devcontainers)
in Coder.
![Architecture Diagram](../images/architecture-devcontainers.png)
#### Components
The deployment model includes:
- _Workspace_ built using Coder template with _envbuilder_ enabled to set up the
developer environment accordingly to the dev container spec.
- _Container Registry_ for Docker images used by _envbuilder_, maintained by
Coder platform engineers or developer productivity engineers.
Since this model is strictly focused on workspace nodes, it does not affect the
setup of regional infrastructure. It can be deployed alongside other deployment
models, in multiple regions, or across various cloud platforms.
##### Workload resources
**Coder workspace**
- Docker and Kubernetes based templates are supported.
- The `docker_container` resource uses `ghcr.io/coder/envbuilder` as the base
image.
_Envbuilder_ checks out the base Docker image from the container registry and
installs selected features as specified in the `devcontainer.json` on top.
Eventually, it starts the container with the developer environment.
##### Workload supporting resources
**Container Registry (optional)**
- Workspace nodes need access to the Container Registry to check out images. To
shorten the provisioning time, it is recommended to deploy registry mirrors in
the same region as the workspace nodes.

8
docs/api/general.md generated
View File

@ -200,7 +200,6 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \
"disable_owner_workspace_exec": true,
"disable_password_auth": true,
"disable_path_apps": true,
"disable_session_expiry_refresh": true,
"docs_url": {
"forceQuery": true,
"fragment": "string",
@ -252,8 +251,6 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \
"log_filter": ["string"],
"stackdriver": "string"
},
"max_session_expiry": 0,
"max_token_lifetime": 0,
"metrics_cache_refresh_interval": 0,
"oauth2": {
"github": {
@ -341,6 +338,11 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \
"redirect_to_access_url": true,
"scim_api_key": "string",
"secure_auth_cookie": true,
"session_lifetime": {
"default_duration": 0,
"disable_expiry_refresh": true,
"max_token_lifetime": 0
},
"ssh_keygen_algorithm": "string",
"strict_transport_security": 0,
"strict_transport_security_options": ["string"],

40
docs/api/schemas.md generated
View File

@ -1646,6 +1646,8 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
}
```
CreateWorkspaceRequest provides options for creating a new workspace. Only one of TemplateID or TemplateVersionID can be specified, not both. If TemplateID is specified, the active version of the template will be used.
### Properties
| Name | Type | Required | Restrictions | Description |
@ -1923,7 +1925,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
"disable_owner_workspace_exec": true,
"disable_password_auth": true,
"disable_path_apps": true,
"disable_session_expiry_refresh": true,
"docs_url": {
"forceQuery": true,
"fragment": "string",
@ -1975,8 +1976,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
"log_filter": ["string"],
"stackdriver": "string"
},
"max_session_expiry": 0,
"max_token_lifetime": 0,
"metrics_cache_refresh_interval": 0,
"oauth2": {
"github": {
@ -2064,6 +2063,11 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
"redirect_to_access_url": true,
"scim_api_key": "string",
"secure_auth_cookie": true,
"session_lifetime": {
"default_duration": 0,
"disable_expiry_refresh": true,
"max_token_lifetime": 0
},
"ssh_keygen_algorithm": "string",
"strict_transport_security": 0,
"strict_transport_security_options": ["string"],
@ -2293,7 +2297,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
"disable_owner_workspace_exec": true,
"disable_password_auth": true,
"disable_path_apps": true,
"disable_session_expiry_refresh": true,
"docs_url": {
"forceQuery": true,
"fragment": "string",
@ -2345,8 +2348,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
"log_filter": ["string"],
"stackdriver": "string"
},
"max_session_expiry": 0,
"max_token_lifetime": 0,
"metrics_cache_refresh_interval": 0,
"oauth2": {
"github": {
@ -2434,6 +2435,11 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
"redirect_to_access_url": true,
"scim_api_key": "string",
"secure_auth_cookie": true,
"session_lifetime": {
"default_duration": 0,
"disable_expiry_refresh": true,
"max_token_lifetime": 0
},
"ssh_keygen_algorithm": "string",
"strict_transport_security": 0,
"strict_transport_security_options": ["string"],
@ -2524,7 +2530,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
| `disable_owner_workspace_exec` | boolean | false | | |
| `disable_password_auth` | boolean | false | | |
| `disable_path_apps` | boolean | false | | |
| `disable_session_expiry_refresh` | boolean | false | | |
| `docs_url` | [serpent.URL](#serpenturl) | false | | |
| `enable_terraform_debug_mode` | boolean | false | | |
| `experiments` | array of string | false | | |
@ -2535,8 +2540,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
| `in_memory_database` | boolean | false | | |
| `job_hang_detector_interval` | integer | false | | |
| `logging` | [codersdk.LoggingConfig](#codersdkloggingconfig) | false | | |
| `max_session_expiry` | integer | false | | |
| `max_token_lifetime` | integer | false | | |
| `metrics_cache_refresh_interval` | integer | false | | |
| `oauth2` | [codersdk.OAuth2Config](#codersdkoauth2config) | false | | |
| `oidc` | [codersdk.OIDCConfig](#codersdkoidcconfig) | false | | |
@ -2552,6 +2555,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
| `redirect_to_access_url` | boolean | false | | |
| `scim_api_key` | string | false | | |
| `secure_auth_cookie` | boolean | false | | |
| `session_lifetime` | [codersdk.SessionLifetime](#codersdksessionlifetime) | false | | |
| `ssh_keygen_algorithm` | string | false | | |
| `strict_transport_security` | integer | false | | |
| `strict_transport_security_options` | array of string | false | | |
@ -4292,6 +4296,24 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
| `ssh` | integer | false | | |
| `vscode` | integer | false | | |
## codersdk.SessionLifetime
```json
{
"default_duration": 0,
"disable_expiry_refresh": true,
"max_token_lifetime": 0
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
| ------------------------ | ------- | -------- | ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `default_duration` | integer | false | | Default duration is for api keys, not tokens. |
| `disable_expiry_refresh` | boolean | false | | Disable expiry refresh will disable automatically refreshing api keys when they are used from the api. This means the api key lifetime at creation is the lifetime of the api key. |
| `max_token_lifetime` | integer | false | | |
## codersdk.SupportConfig
```json

View File

@ -14,6 +14,11 @@ curl -X POST http://coder-server:8080/api/v2/organizations/{organization}/member
`POST /organizations/{organization}/members/{user}/workspaces`
Create a new workspace using a template. The request must
specify either the Template ID or the Template Version ID,
not both. If the Template ID is specified, the active version
of the template will be used.
> Body parameter
```json

Binary file not shown.

After

Width:  |  Height:  |  Size: 188 KiB

130
docs/changelogs/v2.10.0.md Normal file
View File

@ -0,0 +1,130 @@
## Changelog
> [!NOTE]
> This is a mainline Coder release. We advise enterprise customers without a staging environment to install our [latest stable release](https://github.com/coder/coder/releases/latest) while we refine this version. Learn more about our [Release Schedule](../install/releases.md).
### BREAKING CHANGES
- Removed `max_ttl` from templates (#12644) (@Emyrk)
> Maximum Workspace Lifetime, or `MAX_TTL`, has been removed from the product in favor of Autostop Requirement. Max Lifetime was designed to automate workspace shutdowns to enable security policy enforcement, enforce routine updates, and reduce idle resource costs.
>
> If you use Maximum Lifetime in your templates, workspaces will no longer stop at the end of this timer. Instead, we advise migrating to Autostop Requirement.
>
> Autostop Requirement shares the benefits of `MAX_TTL`, but also respects user-configured quiet hours to avoid forcing shutdowns while developers are connected.
>
> We only completely deprecate features after a 2-month heads up in the UI.
### Features
- Make agent stats' cardinality configurable (#12535) (@dannykopping)
- Upgrade tailscale fork to set TCP options for performance (#12574) (@spikecurtis)
- Add AWS IAM RDS Database auth driver (#12566) (@f0ssel)
- Support Windows containers in bootstrap script (#12662) (@kylecarbs)
- Add `workspace_id` to `workspace_build` audit logs (#12718) (@sreya)
- Make OAuth2 provider not enterprise-only (#12732) (@code-asher)
- Allow number options with monotonic validation (#12726) (@dannykopping)
- Expose workspace statuses (with details) as a prometheus metric (#12762) (@dannykopping)
- Agent: Support adjusting child process OOM scores (#12655) (@sreya)
> This opt-in configuration protects the Agent process from crashing via OOM. To prevent the agent from being killed in most scenarios, set `CODER_PROC_PRIO_MGMT=1` on your container.
- Expose HTTP debug server over tailnet API (#12582) (@johnstcn)
- Show queue position during workspace builds (#12606) (@dannykopping)
- Unhide support bundle command (#12745) (@johnstcn)
> The Coder support bundle grabs a variety of deployment health information to improve and expedite the debugging experience.
> ![Coder Support Bundle](https://raw.githubusercontent.com/coder/coder/main/docs/changelogs/images/support-bundle.png)
- Add golden tests for errors (#11588) (#12698) (@elasticspoon)
- Enforce confirmation before creating bundle (#12684) (@johnstcn)
- Add enabled experiments to telemetry (#12656) (@dannykopping)
- Export metric indicating each experiment's status (#12657) (@dannykopping)
- Add sftp to insights apps (#12675) (@mafredri)
- Add `template_usage_stats` table and rollup query (#12664) (@mafredri)
- Add `dbrollup` service to rollup insights (#12665) (@mafredri)
- Use `template_usage_stats` in `GetTemplateInsights` query (#12666) (@mafredri)
- Use `template_usage_stats` in `GetTemplateInsightsByInterval` query (#12667) (@mafredri)
- Use `template_usage_stats` in `GetTemplateAppInsights` query (#12669) (@mafredri)
- Use `template_usage_stats` in `GetUserLatencyInsights` query (#12671) (@mafredri)
- Use `template_usage_stats` in `GetUserActivityInsights` query (#12672) (@mafredri)
- Use `template_usage_stats` in `*ByTemplate` insights queries (#12668) (@mafredri)
- Add debug handlers for logs, manifest, and token to agent (#12593) (@johnstcn)
- Add linting to all examples (#12595) (@mafredri)
- Add C++ icon (#12572) (@michaelbrewer)
- Add support for `--mainline` (default) and `--stable` (#12858) (@mafredri)
- Make listening ports scrollable (#12660) (@BrunoQuaresma)
- Fetch agent network info over tailnet (#12577) (@johnstcn)
- Add client magicsock and agent prometheus metrics to support bundle (#12604) (@johnstcn)
### Bug fixes
- Server: Fix data race in TestLabelsAggregation tests (#12578) (@dannykopping)
- Dashboard: Hide actions and notifications from deleted workspaces (#12563) (@aslilac)
- VSCode: Importing api into vscode-coder (#12570) (@code-asher)
- CLI: Clean template destination path for `pull` (#12559) (@dannykopping)
- Agent: Ensure agent token is from latest build in middleware (#12443) (@f0ssel)
- CLI: Handle CLI default organization when none exists in <v2.9.0 coderd (#12594) (@Emyrk)
- Server: Separate signals for passive, active, and forced shutdown (#12358) (@kylecarbs)
- Docs: Correct typo error about minTerraformVersion (#12621) (@garylavayou)
- Docs: Correct troubleshooting links (#12608) (@dannykopping)
- Server: Prevent single replica proxies from staying unhealthy (#12641) (@deansheather)
- Database: Implicit schema in dump (#12646) (@mtojek)
- Server: Disable workspace auto-create if external auth requirements aren't met (#12538) (@aslilac)
- Server: Allow proxy version mismatch (with warning) (#12433) (@deansheather)
- Server: Disable relay if built-in DERP is disabled (#12654) (@coadler)
- Dashboard: Create workspace with optional auth providers (#12729) (@aslilac)
- Always use bash when executing web terminal tests (#12755) (@aslilac)
- Server: Nil ptr dereference when removing a license (#12785) (@coadler)
- Use latest coder/tailscale (@spikecurtis)
- Agent: remove unused token debug handler (#12602) (@johnstcn)
- CLI: Show error/hide help for unsupported subcommands (#10760) (#12624) (@elasticspoon)
- CLI: Port-forward: update workspace last_used_at (#12659) (@johnstcn)
- CLI: Fix newline escape sequence in support blurb (#12749) (@johnstcn)
- Server: Skip logging error for cancelled query in agent report stats (#12730) (@mafredri)
- Server: Add timeout to websocket waitgroup on shutdown (#12754) (@coadler)
- Server: Use insights for DAUs, simplify metricscache (#12775) (@mafredri)
- API: always write agent stats when provided (#12699) (@mafredri)
- Database: Improve data exclusion in `UpsertTemplateUsageStats` (#12764) (@mafredri)
- Database: Improve query performance of `GetTemplateAppInsights` (#12767) (@mafredri)
- Database: Improve performance of `GetTemplateInsightsByInterval` (#12773) (@mafredri)
- Database: Add FK index for `workspace_agent_scripts` (#12791) (@mafredri)
- API: Abort in-progress writes/reads when closing websocket (#12650) (@ammario)
- Update base image in lima/coder.yaml example, remove usage of deprecated LIMA_CIDATA (#12613) (@johnstcn)
- Removed hardcoded public (#12620) (@95gabor)
- API: change test to use bash script instead of binary echo (#12759) (@spikecurtis)
- Dashboard: Display not found page when pagination page is invalid (#12611) (@BrunoQuaresma)
- Dashboard: Fix and improve pending state on template editor UI (#12766) (@BrunoQuaresma)
- Also sanitize agent environment (#12615) (@johnstcn)
- Sanitize manifest for tests (#12711) (@johnstcn)
### Documentation
- Add updated architecture diagrams (#12584) (@ericpaulsen)
- Describe reference architectures (#12609) (@mtojek)
- Use scale testing utility (#12643) (@mtojek)
- Describe Coder's operational readiness (#12723) (@mtojek)
- Add guide for JFrog Xray integration (#12629) (@matifali)
- Document how to run workspace-proxy as a system service (#12810) (@michaelbrewer)
- Describe mutually exclusive create workspace template fields (#12834) (@Emyrk)
- Describe single region and multi-region deployments (#12779) (@mtojek)
- Fix coder-logstream-kube typo in deployment-logs.md (#12845) (@toshikish)
- Remove phone number, we do not offer phone support yet (#12658) (@bpmct)
### Performance improvements
- Optimize `GetWorkspaceAgentAndLatestBuildByAuthToken` query (#12809) (@mafredri)
### Tests
- Apptest was accidently choosing ports in use (#12580) (@Emyrk)
- Ensure `RequireActiveVersion` is actually set when testing with AGPL store (#12843) (@aslilac)
- Add an E2E test for removing a group (#12844) (@aslilac)
- Enable `dbrollup` service for insights tests (#12673) (@mafredri)
- Fix TODO for increased accuracy in insights test (#12727) (@mafredri)
- Fix template name too long in TestPatchTemplateMeta (#12781) (@mafredri)
Compare: [`v2.9.0...v2.10.0`](https://github.com/coder/coder/compare/v2.9.0...v2.10.0)
## Container image
- `docker pull ghcr.io/coder/coder:v2.10.0`
## Install/upgrade
Refer to our docs to [install](https://coder.com/docs/v2/latest/install) or [upgrade](https://coder.com/docs/v2/latest/admin/upgrade) Coder, or use a release asset below.

View File

@ -4,8 +4,7 @@ Frequently asked questions on Coder OSS and Enterprise deployments. These FAQs
come from our community and enterprise customers, feel free to
[contribute to this page](https://github.com/coder/coder/edit/main/docs/faqs.md).
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">How do I add an enterprise license?</summary>
### How do I add an enterprise license?
Visit https://coder.com/trial or contact
[sales@coder.com](mailto:sales@coder.com?subject=License) to get a v2 enterprise
@ -32,10 +31,7 @@ If the license is in a file:
coder licenses add -f <path/filename>
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I'm experiencing networking issues, so want to disable Tailscale, STUN, Direct connections and force use of websockets</summary>
### I'm experiencing networking issues, so want to disable Tailscale, STUN, Direct connections and force use of websockets
The primary developer use case is a local IDE connecting over SSH to a Coder
workspace.
@ -62,19 +58,13 @@ troubleshooting.
| [`CODER_DERP_SERVER_STUN_ADDRESSES`](https://coder.com/docs/v2/latest/cli/server#--derp-server-stun-addresses) | `"disable"` | Disables STUN |
| [`CODER_DERP_FORCE_WEBSOCKETS`](https://coder.com/docs/v2/latest/cli/server#--derp-force-websockets) | `true` | Forces websockets over Tailscale DERP |
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">How do I configure NGINX as the reverse proxy in front of Coder?</summary>
### How do I configure NGINX as the reverse proxy in front of Coder?
[This doc](https://github.com/coder/coder/tree/main/examples/web-server/nginx#configure-nginx)
in our repo explains in detail how to configure NGINX with Coder so that our
Tailscale Wireguard networking functions properly.
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">How do I hide some of the default icons in a workspace like VS Code Desktop, Terminal, SSH, Ports?</summary>
### How do I hide some of the default icons in a workspace like VS Code Desktop, Terminal, SSH, Ports?
The visibility of Coder apps is configurable in the template. To change the
default (shows all), add this block inside the
@ -93,10 +83,7 @@ of a template and configure as needed:
This example will hide all built-in coder_app icons except the web terminal.
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I want to allow code-server to be accessible by other users in my deployment.</summary>
### I want to allow code-server to be accessible by other users in my deployment.
> It is **not** recommended to share a web IDE, but if required, the following
> deployment environment variable settings are required.
@ -126,10 +113,7 @@ resource "coder_app" "code-server" {
}
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I installed Coder and created a workspace but the icons do not load.</summary>
### I installed Coder and created a workspace but the icons do not load.
An important concept to understand is that Coder creates workspaces which have
an agent that must be able to reach the `coder server`.
@ -153,10 +137,7 @@ coder server --access-url http://localhost:3000 --address 0.0.0.0:3000
> Even `coder server` which creates a reverse proxy, will let you use
> http://localhost to access Coder from a browser.
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I updated a template, and an existing workspace based on that template fails to start.</summary>
### I updated a template, and an existing workspace based on that template fails to start.
When updating a template, be aware of potential issues with input variables. For
example, if a template prompts users to choose options like a
@ -176,10 +157,7 @@ potentially saving the workspace from a failed status.
coder update --always-prompt <workspace name>
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I'm running coder on a VM with systemd but latest release installed isn't showing up.</summary>
### I'm running coder on a VM with systemd but latest release installed isn't showing up.
Take, for example, a Coder deployment on a VM with a 2 shared vCPU systemd
service. In this scenario, it's necessary to reload the daemon and then restart
@ -194,10 +172,7 @@ sudo systemctl daemon-reload
sudo systemctl restart coder.service
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I'm using the built-in Postgres database and forgot admin email I set up.</summary>
### I'm using the built-in Postgres database and forgot admin email I set up.
1. Run the `coder server` command below to retrieve the `psql` connection URL
which includes the database user and password.
@ -210,10 +185,7 @@ coder server postgres-builtin-url
psql "postgres://coder@localhost:53737/coder?sslmode=disable&password=I2S...pTk"
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">How to find out Coder's latest Terraform provider version?</summary>
### How to find out Coder's latest Terraform provider version?
[Coder is on the HashiCorp's Terraform registry](https://registry.terraform.io/providers/coder/coder/latest).
Check this frequently to make sure you are on the latest version.
@ -222,10 +194,7 @@ Sometimes, the version may change and `resource` configurations will either
become deprecated or new ones will be added when you get warnings or errors
creating and pushing templates.
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">How can I set up TLS for my deployment and not create a signed certificate?</summary>
### How can I set up TLS for my deployment and not create a signed certificate?
Caddy is an easy-to-configure reverse proxy that also automatically creates
certificates from Let's Encrypt.
@ -250,10 +219,7 @@ coder.example.com {
}
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I'm using Caddy as my reverse proxy in front of Coder. How do I set up a wildcard domain for port forwarding?</summary>
### I'm using Caddy as my reverse proxy in front of Coder. How do I set up a wildcard domain for port forwarding?
Caddy requires your DNS provider's credentials to create wildcard certificates.
This involves building the Caddy binary
@ -283,10 +249,7 @@ The updated Caddyfile configuration will look like this:
}
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">Can I use local or remote Terraform Modules in Coder templates?</summary>
### Can I use local or remote Terraform Modules in Coder templates?
One way is to reference a Terraform module from a GitHub repo to avoid
duplication and then just extend it or pass template-specific
@ -328,10 +291,8 @@ References:
- [Public Github Issue 6117](https://github.com/coder/coder/issues/6117)
- [Public Github Issue 5677](https://github.com/coder/coder/issues/5677)
- [Coder docs: Templates/Change Management](https://coder.com/docs/v2/latest/templates/change-management)
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">Can I run Coder in an air-gapped or offline mode? (no Internet)?</summary>
### Can I run Coder in an air-gapped or offline mode? (no Internet)?
Yes, Coder can be deployed in air-gapped or offline mode.
https://coder.com/docs/v2/latest/install/offline
@ -345,10 +306,7 @@ defaults to Google's STUN servers, so you can either create your STUN server in
your network or disable and force all traffic through the control plane's DERP
proxy.
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">Create a randomized computer_name for an Azure VM</summary>
### Create a randomized computer_name for an Azure VM
Azure VMs have a 15 character limit for the `computer_name` which can lead to
duplicate name errors.
@ -363,10 +321,7 @@ locals {
}
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">Do you have example JetBrains Gateway templates?</summary>
### Do you have example JetBrains Gateway templates?
In August 2023, JetBrains certified the Coder plugin signifying enhanced
stability and reliability.
@ -387,10 +342,8 @@ open the IDE.
- [IntelliJ IDEA](https://github.com/sharkymark/v2-templates/tree/main/pod-idea)
- [IntelliJ IDEA with Icon](https://github.com/sharkymark/v2-templates/tree/main/pod-idea-icon)
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">What options do I have for adding VS Code extensions into code-server, VS Code Desktop or Microsoft's Code Server?</summary>
### What options do I have for adding VS Code extensions into code-server, VS Code Desktop or Microsoft's Code Server?
Coder has an open-source project called
[`code-marketplace`](https://github.com/coder/code-marketplace) which is a
@ -416,10 +369,7 @@ https://github.com/sharkymark/v2-templates/blob/main/vs-code-server/main.tf
> Note: these are example templates with no SLAs on them and are not guaranteed
> for long-term support.
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">I want to run Docker for my workspaces but not install Docker Desktop.</summary>
### I want to run Docker for my workspaces but not install Docker Desktop.
[Colima](https://github.com/abiosoft/colima) is a Docker Desktop alternative.
@ -454,10 +404,7 @@ Colima will show the path to the docker socket so we have a
[community template](https://github.com/sharkymark/v2-templates/tree/main/docker-code-server)
that prompts the Coder admin to enter the docker socket as a Terraform variable.
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">How to make a `coder_app` optional?</summary>
### How to make a `coder_app` optional?
An example use case is the user should decide if they want a browser-based IDE
like code-server when creating the workspace.
@ -515,10 +462,7 @@ resource "coder_app" "code-server" {
}
```
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">Why am I getting this "remote host doesn't meet VS Code Server's prerequisites" error when opening up VSCode remote in a Linux environment?</summary>
### Why am I getting this "remote host doesn't meet VS Code Server's prerequisites" error when opening up VSCode remote in a Linux environment?
![VS Code Server prerequisite](https://github.com/coder/coder/assets/10648092/150c5996-18b1-4fae-afd0-be2b386a3239)
@ -529,10 +473,7 @@ image or supported OS for the VS Code Server. For more information on OS
prerequisites for Linux, please look at the VSCode docs.
https://code.visualstudio.com/docs/remote/linux#_local-linux-prerequisites
</details>
<details style="margin-bottom: 28px;">
<summary style="font-size: larger; font-weight: bold;">How can I resolve disconnects when connected to Coder via JetBrains Gateway?</summary>
### How can I resolve disconnects when connected to Coder via JetBrains Gateway?
If your JetBrains IDE is disconnected for a long period of time due to a network
change (for example turning off a VPN), you may find that the IDE will not
@ -560,5 +501,3 @@ Note that the JetBrains Gateway configuration blocks for each host in your SSH
config file will be overwritten by the JetBrains Gateway client when it
re-authenticates to your Coder deployment so you must add the above config as a
separate block and not add it to any existing ones.
</details>

View File

@ -0,0 +1,87 @@
# Generate and upload a Support Bundle to Coder Support
When you engage with Coder support to diagnose an issue with your deployment,
you may be asked to generate and upload a "Support Bundle" for offline analysis.
This document explains the contents of a support bundle and the steps to submit
a support bundle to Coder staff.
## What is a Support Bundle?
A support bundle is an archive containing a snapshot of information about your
Coder deployment.
It contains information about the workspace, the template it uses, running
agents in the workspace, and other detailed information useful for
troubleshooting.
It is primarily intended for troubleshooting connectivity issues to workspaces,
but can be useful for diagnosing other issues as well.
**While we attempt to redact sensitive information from support bundles, they
may contain information deemed sensitive by your organization and should be
treated as such.**
A brief overview of all files contained in the bundle is provided below:
> Note: detailed descriptions of all the information available in the bundle is
> out of scope, as support bundles are primarily intended for internal use.
| Filename | Description |
| --------------------------------- | ------------------------------------------------------------------------------------------------ |
| `agent/agent.json` | The agent used to connect to the workspace with environment variables stripped. |
| `agent/agent_magicsock.html` | The contents of the HTTP debug endpoint of the agent's Tailscale connection. |
| `agent/client_magicsock.html` | The contents of the HTTP debug endpoint of the client's Tailscale connection. |
| `agent/listening_ports.json` | The listening ports detected by the selected agent running in the workspace. |
| `agent/logs.txt` | The logs of the selected agent running in the workspace. |
| `agent/manifest.json` | The manifest of the selected agent with environment variables stripped. |
| `agent/startup_logs.txt` | Startup logs of the workspace agent. |
| `agent/prometheus.txt` | The contents of the agent's Prometheus endpoint. |
| `cli_logs.txt` | Logs from running the `coder support bundle` command. |
| `deployment/buildinfo.json` | Coder version and build information. |
| `deployment/config.json` | Deployment [configuration](../api/general.md#get-deployment-config), with secret values removed. |
| `deployment/experiments.json` | Any [experiments](../cli/server.md#experiments) currently enabled for the deployment. |
| `deployment/health.json` | A snapshot of the [health status](../admin/healthcheck.md) of the deployment. |
| `logs.txt` | Logs from the `codersdk.Client` used to generate the bundle. |
| `network/connection_info.json` | Information used by workspace agents used to connect to Coder (DERP map etc.) |
| `network/coordinator_debug.html` | Peers currently connected to each Coder instance and the tunnels established between peers. |
| `network/netcheck.json` | Results of running `coder netcheck` locally. |
| `network/tailnet_debug.html` | Tailnet coordinators, their heartbeat ages, connected peers, and tunnels. |
| `workspace/build_logs.txt` | Build logs of the selected workspace. |
| `workspace/workspace.json` | Details of the selected workspace. |
| `workspace/parameters.json` | Build parameters of the selected workspace. |
| `workspace/template.json` | The template currently in use by the selected workspace. |
| `workspace/template_file.zip` | The source code of the template currently in use by the selected workspace. |
| `workspace/template_version.json` | The template version currently in use by the selected workspace. |
## How do I generate a Support Bundle?
1. Ensure your deployment is up and running. Generating a support bundle
requires the Coder deployment to be available.
2. Ensure you have the Coder CLI installed on a local machine. See
(installation)[../install/index.md] for steps on how to do this.
> Note: It is recommended to generate a support bundle from a location
> experiencing workspace connectivity issues.
3. Ensure you are [logged in](../cli/login.md#login) to your Coder deployment as
a user with the Owner privilege.
4. Run `coder support bundle [owner/workspace]`, and respond `yes` to the
prompt. The support bundle will be generated in the current directory with
the filename `coder-support-$TIMESTAMP.zip`.
> While support bundles can be generated without a running workspace, it is
> recommended to specify one to maximize troubleshooting information.
5. (Recommended) Extract the support bundle and review its contents, redacting
any information you deem necessary.
6. Coder staff will provide you a link where you can upload the bundle along
with any other necessary supporting files.
> Note: It is helpful to leave an informative message regarding the nature of
> supporting files.
Coder support will then review the information you provided and respond to you
with next steps.

View File

@ -19,7 +19,7 @@ using Coder's [JFrog Xray Integration](github.com/coder/coder-xray).
- A self-hosted JFrog Platform instance.
- Kubernetes workspaces running on Coder.
## Deploying the Coder Xray Integration
## Deploying the Coder - JFrog Xray Integration
1. Create a JFrog Platform
[Access Token](https://jfrog.com/help/r/jfrog-platform-administration-documentation/access-tokens)
@ -37,7 +37,7 @@ kubectl create secret generic coder-token --from-literal=coder-token='<token>'
kubectl create secret generic jfrog-token --from-literal=user='<user>' --from-literal=token='<token>'
```
4. Deploy the Coder Xray integration.
4. Deploy the Coder - JFrog Xray integration.
```bash
helm repo add coder-xray https://helm.coder.com/coder-xray
@ -69,4 +69,4 @@ image = "<ARTIFACTORY_URL>/<REPO>/<IMAGE>:<TAG>"
> use it in the `imagePullSecrets` field of the kubernetes pod. See this
> [guide](./image-pull-secret.md) for more information.
![Coder Xray Integration](../images/guides/xray-integration/example.png)
![JFrog Xray Integration](../images/guides/xray-integration/example.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 179 KiB

After

Width:  |  Height:  |  Size: 173 KiB

View File

@ -2,6 +2,9 @@
A single CLI (`coder`) is used for both the Coder server and the client.
We support two release channels: mainline and stable - read the
[Releases](./releases.md) page to learn more about which best suits your team.
There are several ways to install Coder. For production deployments with 50+
users, we recommend [installing on Kubernetes](./kubernetes.md). Otherwise, you
can install Coder on your local machine or on a VM:

View File

@ -7,6 +7,15 @@ You'll also want to install the
[latest version of Coder](https://github.com/coder/coder/releases/latest)
locally in order to log in and manage templates.
> Coder supports two release channels: mainline for the true latest version of
> Coder, and stable for large enterprise deployments. Before installing your
> control plane via Helm, please read the [Releases](./releases.md) document to
> identify the best-suited release for your team, then specify the version using
> Helm's `--version` flag.
> The version flags for both stable and mainline are automatically filled in
> this page.
## Install Coder with Helm
1. Create a namespace for Coder, such as `coder`:
@ -112,10 +121,22 @@ locally in order to log in and manage templates.
1. Run the following command to install the chart in your cluster.
For the **mainline** Coder release:
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml
--values values.yaml \
--version 2.10.0
```
For the **stable** Coder release:
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml \
--version 2.9.1
```
You can watch Coder start up by running `kubectl get pods -n coder`. Once

56
docs/install/releases.md Normal file
View File

@ -0,0 +1,56 @@
# Releases
Coder releases are cut directly from main in our
[Github](https://github.com/coder/coder) on the first Tuesday of each month.
We recommend enterprise customers test the compatibility of new releases with
their infrastructure on a staging environment before upgrading a production
deployment.
We support two release channels:
[mainline](https://github.com/coder/coder/2.10.0) for the edge version of Coder
and [stable](https://github.com/coder/coder/releases/latest) for those with
lower tolerance for fault. We field our mainline releases publicly for two weeks
before promoting them to stable.
### Mainline releases
- Intended for customers with a staging environment
- Gives earliest access to new features
- May include minor bugs
- All bugfixes and security patches are supported
### Stable releases
- Safest upgrade/installation path
- May not include the latest features
- Security vulnerabilities and major bugfixes are supported
> Note: We support major security vulnerabilities (CVEs) for the past three
> versions of Coder.
## Installing stable
When installing Coder, we generally advise specifying the desired version from
our Github [releases page](https://github.com/coder/coder/releases).
You can also use our `install.sh` script with the `stable` flag to install the
latest stable release:
```shell
curl -fsSL https://coder.com/install.sh | sh -s -- --stable
```
Best practices for installing Coder can be found on our [install](./index.md)
pages.
## Release schedule
| Release name | Date | Status |
| ------------ | ------------------ | ---------------- |
| 2.7.0 | January 01, 2024 | Not Supported |
| 2.8.0 | Februrary 06, 2024 | Security Support |
| 2.9.0 | March 07, 2024 | Stable |
| 2.10.0 | April 03, 2024 | Mainline |
| 2.11.0 | May 07, 2024 | Not Released |
| 2.12.0 | June 04, 2024 | Not Released |

View File

@ -55,6 +55,11 @@
"title": "1-click install",
"description": "Install Coder on a cloud provider with a single click",
"path": "./install/1-click.md"
},
{
"title": "Releases",
"description": "Coder Release Channels and Cadence",
"path": "./install/releases.md"
}
]
},
@ -230,9 +235,9 @@
"icon_path": "./images/icons/docker.svg"
},
{
"title": "Devcontainers",
"description": "Use devcontainers in workspaces",
"path": "./templates/devcontainers.md",
"title": "Dev Containers",
"description": "Use Dev Containers in workspaces",
"path": "./templates/dev-containers.md",
"state": "alpha"
},
{
@ -1070,6 +1075,11 @@
"path": "./guides/index.md",
"icon_path": "./images/icons/notes.svg",
"children": [
{
"title": "Generate a Support Bundle",
"description": "Generate and upload a Support Bundle to Coder Support",
"path": "./guides/support-bundle.md"
},
{
"title": "Configuring Okta",
"description": "Custom claims/scopes with Okta for group/role sync",
@ -1101,7 +1111,7 @@
"path": "./guides/azure-federation.md"
},
{
"title": "Scanning Coder Workspaces with Xray",
"title": "Scanning Coder Workspaces with JFrog Xray",
"description": "Integrate Coder with JFrog Xray",
"path": "./guides/xray-integration.md"
}

View File

@ -33,7 +33,7 @@ serviceAccount:
## Installation
Install the `coder-kubestream-logs` helm chart on the cluster where the
Install the `coder-logstream-kube` helm chart on the cluster where the
deployment is running.
```shell

View File

@ -1,17 +1,17 @@
# Devcontainers (alpha)
# Dev Containers (alpha)
[Devcontainers](https://containers.dev) are an open source specification for
defining development environments.
[Development containers](https://containers.dev) are an open source
specification for defining development environments.
[envbuilder](https://github.com/coder/envbuilder) is an open source project by
Coder that runs devcontainers via Coder templates and your underlying
Coder that runs dev containers via Coder templates and your underlying
infrastructure. It can run on Docker or Kubernetes.
There are several benefits to adding a devcontainer-compatible template to
Coder:
- Drop-in migration from Codespaces (or any existing repositories that use
devcontainers)
- Drop-in migration from Codespaces (or any existing repositories that use dev
containers)
- Easier to start projects from Coder. Just create a new workspace then pick a
starter devcontainer.
- Developer teams can "bring their own image." No need for platform teams to
@ -47,7 +47,7 @@ information.
## Caching
To improve build times, devcontainers can be cached. Refer to the
To improve build times, dev containers can be cached. Refer to the
[envbuilder documentation](https://github.com/coder/envbuilder/) for more
information.

View File

@ -8,7 +8,7 @@ FROM ubuntu:jammy AS go
RUN apt-get update && apt-get install --yes curl gcc
# Install Go manually, so that we can control the version
ARG GO_VERSION=1.21.5
ARG GO_VERSION=1.21.9
RUN mkdir --parents /usr/local/go
# Boring Go is needed to build FIPS-compliant binaries.

View File

@ -148,7 +148,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
DB: options.Database,
OAuth2Configs: oauthConfigs,
RedirectToLogin: false,
DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(),
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
Optional: false,
SessionTokenFunc: nil, // Default behavior
PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc,
@ -157,7 +157,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
DB: options.Database,
OAuth2Configs: oauthConfigs,
RedirectToLogin: false,
DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(),
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
Optional: true,
SessionTokenFunc: nil, // Default behavior
PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc,
@ -630,7 +630,13 @@ func (api *API) updateEntitlements(ctx context.Context) error {
if initial, changed, enabled := featureChanged(codersdk.FeatureHighAvailability); shouldUpdate(initial, changed, enabled) {
var coordinator agpltailnet.Coordinator
if enabled {
// If HA is enabled, but the database is in-memory, we can't actually
// run HA and the PG coordinator. So throw a log line, and continue to use
// the in memory AGPL coordinator.
if enabled && api.DeploymentValues.InMemoryDatabase.Value() {
api.Logger.Warn(ctx, "high availability is enabled, but cannot be configured due to the database being set to in-memory")
}
if enabled && !api.DeploymentValues.InMemoryDatabase.Value() {
haCoordinator, err := tailnet.NewPGCoord(api.ctx, api.Logger, api.Pubsub, api.Database)
if err != nil {
api.Logger.Error(ctx, "unable to set up high availability coordinator", slog.Error(err))

View File

@ -658,7 +658,7 @@ func (api *API) workspaceProxyRegister(rw http.ResponseWriter, r *http.Request)
if err != nil {
return xerrors.Errorf("insert replica: %w", err)
}
} else if err != nil {
} else {
return xerrors.Errorf("get replica: %w", err)
}

View File

@ -913,6 +913,44 @@ func TestWorkspaceAutobuild(t *testing.T) {
ws = coderdtest.MustWorkspace(t, client, ws.ID)
require.Equal(t, version2.ID, ws.LatestBuild.TemplateVersionID)
})
t.Run("TemplateDoesNotAllowUserAutostop", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{
IncludeProvisionerDaemon: true,
TemplateScheduleStore: schedule.NewEnterpriseTemplateScheduleStore(agplUserQuietHoursScheduleStore()),
})
user := coderdtest.CreateFirstUser(t, client)
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
templateTTL := 24 * time.Hour.Milliseconds()
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) {
ctr.DefaultTTLMillis = ptr.Ref(templateTTL)
ctr.AllowUserAutostop = ptr.Ref(false)
})
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
cwr.TTLMillis = nil // ensure that no default TTL is set
})
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
// TTL should be set by the template
require.Equal(t, false, template.AllowUserAutostop)
require.Equal(t, templateTTL, template.DefaultTTLMillis)
require.Equal(t, templateTTL, *workspace.TTLMillis)
// Change the template's default TTL and refetch the workspace
templateTTL = 72 * time.Hour.Milliseconds()
ctx := testutil.Context(t, testutil.WaitShort)
template = coderdtest.UpdateTemplateMeta(t, client, template.ID, codersdk.UpdateTemplateMeta{
DefaultTTLMillis: templateTTL,
})
workspace, err := client.Workspace(ctx, workspace.ID)
require.NoError(t, err)
// Ensure that the new value is reflected in the template and workspace
require.Equal(t, templateTTL, template.DefaultTTLMillis)
require.Equal(t, templateTTL, *workspace.TTLMillis)
})
}
// Blocked by autostart requirements

View File

@ -231,6 +231,17 @@ func (c *pgCoord) Coordinate(
logger := c.logger.With(slog.F("peer_id", id))
reqs := make(chan *proto.CoordinateRequest, agpl.RequestBufferSize)
resps := make(chan *proto.CoordinateResponse, agpl.ResponseBufferSize)
if !c.querier.isHealthy() {
// If the coordinator is unhealthy, we don't want to hook this Coordinate call up to the
// binder, as that can cause an unnecessary call to DeleteTailnetPeer when the connIO is
// closed. Instead, we just close the response channel and bail out.
// c.f. https://github.com/coder/coder/issues/12923
c.logger.Info(ctx, "closed incoming coordinate call while unhealthy",
slog.F("peer_id", id),
)
close(resps)
return reqs, resps
}
cIO := newConnIO(c.ctx, ctx, logger, c.bindings, c.tunnelerCh, reqs, resps, id, name, a)
err := agpl.SendCtx(c.ctx, c.newConnections, cIO)
if err != nil {
@ -842,7 +853,12 @@ func (q *querier) newConn(c *connIO) {
defer q.mu.Unlock()
if !q.healthy {
err := c.Close()
q.logger.Info(q.ctx, "closed incoming connection while unhealthy",
// This can only happen during a narrow window where we were healthy
// when pgCoord checked before accepting the connection, but now are
// unhealthy now that we get around to processing it. Seeing a small
// number of these logs is not worrying, but a large number probably
// indicates something is amiss.
q.logger.Warn(q.ctx, "closed incoming connection while unhealthy",
slog.Error(err),
slog.F("peer_id", c.UniqueID()),
)
@ -865,6 +881,12 @@ func (q *querier) newConn(c *connIO) {
})
}
func (q *querier) isHealthy() bool {
q.mu.Lock()
defer q.mu.Unlock()
return q.healthy
}
func (q *querier) cleanupConn(c *connIO) {
logger := q.logger.With(slog.F("peer_id", c.UniqueID()))
q.mu.Lock()

Some files were not shown because too many files have changed in this diff Show More