diff --git a/.github/actions/setup-go/action.yaml b/.github/actions/setup-go/action.yaml index d4c67cb7dd..819bc27e0e 100644 --- a/.github/actions/setup-go/action.yaml +++ b/.github/actions/setup-go/action.yaml @@ -4,7 +4,7 @@ description: | inputs: version: description: "The Go version to use." - default: "1.21.5" + default: "1.21.9" runs: using: "composite" steps: diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 084d4c5983..8aaaa74398 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -228,7 +228,7 @@ jobs: with: # This doesn't need caching. It's super fast anyways! cache: false - go-version: 1.21.5 + go-version: 1.21.9 - name: Install shfmt run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0 @@ -432,6 +432,15 @@ jobs: needs: changes if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + variant: + - enterprise: false + name: test-e2e + - enterprise: true + name: test-e2e-enterprise + name: ${{ matrix.variant.name }} steps: - name: Checkout uses: actions/checkout@v4 @@ -444,52 +453,40 @@ jobs: - name: Setup Go uses: ./.github/actions/setup-go - - name: Setup Terraform - uses: ./.github/actions/setup-tf + # Assume that the checked-in versions are up-to-date + - run: make gen/mark-fresh + name: make gen - - name: go install tools - run: | - go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30 - go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33 - go install golang.org/x/tools/cmd/goimports@latest - go install github.com/mikefarah/yq/v4@v4.30.6 - go install go.uber.org/mock/mockgen@v0.4.0 - - - name: Install Protoc - run: | - mkdir -p /tmp/proto - pushd /tmp/proto - curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.3/protoc-23.3-linux-x86_64.zip - unzip protoc.zip - cp -r ./bin/* /usr/local/bin - cp -r ./include /usr/local/bin/include - popd - - - name: Build - run: | - make -B site/out/index.html + - run: pnpm build + working-directory: site - run: pnpm playwright:install working-directory: site # Run tests that don't require an enterprise license without an enterprise license - run: pnpm playwright:test --forbid-only --workers 1 + if: ${{ !matrix.variant.enterprise }} env: DEBUG: pw:api working-directory: site # Run all of the tests with an enterprise license - run: pnpm playwright:test --forbid-only --workers 1 + if: ${{ matrix.variant.enterprise }} env: DEBUG: pw:api CODER_E2E_ENTERPRISE_LICENSE: ${{ secrets.CODER_E2E_ENTERPRISE_LICENSE }} + CODER_E2E_REQUIRE_ENTERPRISE_TESTS: "1" working-directory: site + # Temporarily allow these to fail so that I can gather data about which + # tests are failing. + continue-on-error: true - name: Upload Playwright Failed Tests if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork uses: actions/upload-artifact@v4 with: - name: failed-test-videos + name: failed-test-videos${{ matrix.variant.enterprise && '-enterprise' || '-agpl' }} path: ./site/test-results/**/*.webm retention-days: 7 @@ -497,7 +494,7 @@ jobs: if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork uses: actions/upload-artifact@v4 with: - name: debug-pprof-dumps + name: debug-pprof-dumps${{ matrix.variant.enterprise && '-enterprise' || '-agpl' }} path: ./site/test-results/**/debug-pprof-*.txt retention-days: 7 diff --git a/Makefile b/Makefile index 84a97323cd..e588279384 100644 --- a/Makefile +++ b/Makefile @@ -382,9 +382,9 @@ install: build/coder_$(VERSION)_$(GOOS)_$(GOARCH)$(GOOS_BIN_EXT) cp "$<" "$$output_file" .PHONY: install -BOLD := $(shell tput bold) -GREEN := $(shell tput setaf 2) -RESET := $(shell tput sgr0) +BOLD := $(shell tput bold 2>/dev/null) +GREEN := $(shell tput setaf 2 2>/dev/null) +RESET := $(shell tput sgr0 2>/dev/null) fmt: fmt/eslint fmt/prettier fmt/terraform fmt/shfmt fmt/go .PHONY: fmt diff --git a/agent/agent.go b/agent/agent.go index 0cb2aa2aca..abbe9c8ea4 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -62,7 +62,10 @@ const ( // EnvProcPrioMgmt determines whether we attempt to manage // process CPU and OOM Killer priority. -const EnvProcPrioMgmt = "CODER_PROC_PRIO_MGMT" +const ( + EnvProcPrioMgmt = "CODER_PROC_PRIO_MGMT" + EnvProcOOMScore = "CODER_PROC_OOM_SCORE" +) type Options struct { Filesystem afero.Fs @@ -1575,10 +1578,31 @@ func (a *agent) manageProcessPriorityUntilGracefulShutdown() { a.processManagementTick = ticker.C } + oomScore := unsetOOMScore + if scoreStr, ok := a.environmentVariables[EnvProcOOMScore]; ok { + score, err := strconv.Atoi(strings.TrimSpace(scoreStr)) + if err == nil && score >= -1000 && score <= 1000 { + oomScore = score + } else { + a.logger.Error(ctx, "invalid oom score", + slog.F("min_value", -1000), + slog.F("max_value", 1000), + slog.F("value", scoreStr), + ) + } + } + + debouncer := &logDebouncer{ + logger: a.logger, + messages: map[string]time.Time{}, + interval: time.Minute, + } + for { - procs, err := a.manageProcessPriority(ctx) + procs, err := a.manageProcessPriority(ctx, debouncer, oomScore) + // Avoid spamming the logs too often. if err != nil { - a.logger.Error(ctx, "manage process priority", + debouncer.Error(ctx, "manage process priority", slog.Error(err), ) } @@ -1594,27 +1618,34 @@ func (a *agent) manageProcessPriorityUntilGracefulShutdown() { } } -func (a *agent) manageProcessPriority(ctx context.Context) ([]*agentproc.Process, error) { +// unsetOOMScore is set to an invalid OOM score to imply an unset value. +const unsetOOMScore = 1001 + +func (a *agent) manageProcessPriority(ctx context.Context, debouncer *logDebouncer, oomScore int) ([]*agentproc.Process, error) { const ( niceness = 10 ) + // We fetch the agent score each time because it's possible someone updates the + // value after it is started. + agentScore, err := a.getAgentOOMScore() + if err != nil { + agentScore = unsetOOMScore + } + if oomScore == unsetOOMScore && agentScore != unsetOOMScore { + // If the child score has not been explicitly specified we should + // set it to a score relative to the agent score. + oomScore = childOOMScore(agentScore) + } + procs, err := agentproc.List(a.filesystem, a.syscaller) if err != nil { return nil, xerrors.Errorf("list: %w", err) } - var ( - modProcs = []*agentproc.Process{} - logger slog.Logger - ) + modProcs := []*agentproc.Process{} for _, proc := range procs { - logger = a.logger.With( - slog.F("cmd", proc.Cmd()), - slog.F("pid", proc.PID), - ) - containsFn := func(e string) bool { contains := strings.Contains(proc.Cmd(), e) return contains @@ -1622,14 +1653,16 @@ func (a *agent) manageProcessPriority(ctx context.Context) ([]*agentproc.Process // If the process is prioritized we should adjust // it's oom_score_adj and avoid lowering its niceness. - if slices.ContainsFunc[[]string, string](prioritizedProcs, containsFn) { + if slices.ContainsFunc(prioritizedProcs, containsFn) { continue } - score, err := proc.Niceness(a.syscaller) - if err != nil { - logger.Warn(ctx, "unable to get proc niceness", - slog.Error(err), + score, niceErr := proc.Niceness(a.syscaller) + if niceErr != nil && !xerrors.Is(niceErr, os.ErrPermission) { + debouncer.Warn(ctx, "unable to get proc niceness", + slog.F("cmd", proc.Cmd()), + slog.F("pid", proc.PID), + slog.Error(niceErr), ) continue } @@ -1643,15 +1676,31 @@ func (a *agent) manageProcessPriority(ctx context.Context) ([]*agentproc.Process continue } - err = proc.SetNiceness(a.syscaller, niceness) - if err != nil { - logger.Warn(ctx, "unable to set proc niceness", - slog.F("niceness", niceness), - slog.Error(err), - ) - continue + if niceErr == nil { + err := proc.SetNiceness(a.syscaller, niceness) + if err != nil && !xerrors.Is(err, os.ErrPermission) { + debouncer.Warn(ctx, "unable to set proc niceness", + slog.F("cmd", proc.Cmd()), + slog.F("pid", proc.PID), + slog.F("niceness", niceness), + slog.Error(err), + ) + } } + // If the oom score is valid and it's not already set and isn't a custom value set by another process then it's ok to update it. + if oomScore != unsetOOMScore && oomScore != proc.OOMScoreAdj && !isCustomOOMScore(agentScore, proc) { + oomScoreStr := strconv.Itoa(oomScore) + err := afero.WriteFile(a.filesystem, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), []byte(oomScoreStr), 0o644) + if err != nil && !xerrors.Is(err, os.ErrPermission) { + debouncer.Warn(ctx, "unable to set oom_score_adj", + slog.F("cmd", proc.Cmd()), + slog.F("pid", proc.PID), + slog.F("score", oomScoreStr), + slog.Error(err), + ) + } + } modProcs = append(modProcs, proc) } return modProcs, nil @@ -2005,3 +2054,77 @@ func PrometheusMetricsHandler(prometheusRegistry *prometheus.Registry, logger sl } }) } + +// childOOMScore returns the oom_score_adj for a child process. It is based +// on the oom_score_adj of the agent process. +func childOOMScore(agentScore int) int { + // If the agent has a negative oom_score_adj, we set the child to 0 + // so it's treated like every other process. + if agentScore < 0 { + return 0 + } + + // If the agent is already almost at the maximum then set it to the max. + if agentScore >= 998 { + return 1000 + } + + // If the agent oom_score_adj is >=0, we set the child to slightly + // less than the maximum. If users want a different score they set it + // directly. + return 998 +} + +func (a *agent) getAgentOOMScore() (int, error) { + scoreStr, err := afero.ReadFile(a.filesystem, "/proc/self/oom_score_adj") + if err != nil { + return 0, xerrors.Errorf("read file: %w", err) + } + + score, err := strconv.Atoi(strings.TrimSpace(string(scoreStr))) + if err != nil { + return 0, xerrors.Errorf("parse int: %w", err) + } + + return score, nil +} + +// isCustomOOMScore checks to see if the oom_score_adj is not a value that would +// originate from an agent-spawned process. +func isCustomOOMScore(agentScore int, process *agentproc.Process) bool { + score := process.OOMScoreAdj + return agentScore != score && score != 1000 && score != 0 && score != 998 +} + +// logDebouncer skips writing a log for a particular message if +// it's been emitted within the given interval duration. +// It's a shoddy implementation used in one spot that should be replaced at +// some point. +type logDebouncer struct { + logger slog.Logger + messages map[string]time.Time + interval time.Duration +} + +func (l *logDebouncer) Warn(ctx context.Context, msg string, fields ...any) { + l.log(ctx, slog.LevelWarn, msg, fields...) +} + +func (l *logDebouncer) Error(ctx context.Context, msg string, fields ...any) { + l.log(ctx, slog.LevelError, msg, fields...) +} + +func (l *logDebouncer) log(ctx context.Context, level slog.Level, msg string, fields ...any) { + // This (bad) implementation assumes you wouldn't reuse the same msg + // for different levels. + if last, ok := l.messages[msg]; ok && time.Since(last) < l.interval { + return + } + switch level { + case slog.LevelWarn: + l.logger.Warn(ctx, msg, fields...) + case slog.LevelError: + l.logger.Error(ctx, msg, fields...) + } + l.messages[msg] = time.Now() +} diff --git a/agent/agent_test.go b/agent/agent_test.go index 2813d45125..45ebf7b709 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2529,11 +2529,11 @@ func TestAgent_ManageProcessPriority(t *testing.T) { logger = slog.Make(sloghuman.Sink(io.Discard)) ) + requireFileWrite(t, fs, "/proc/self/oom_score_adj", "-500") + // Create some processes. for i := 0; i < 4; i++ { - // Create a prioritized process. This process should - // have it's oom_score_adj set to -500 and its nice - // score should be untouched. + // Create a prioritized process. var proc agentproc.Process if i == 0 { proc = agentproctest.GenerateProcess(t, fs, @@ -2551,8 +2551,8 @@ func TestAgent_ManageProcessPriority(t *testing.T) { }, ) - syscaller.EXPECT().SetPriority(proc.PID, 10).Return(nil) syscaller.EXPECT().GetPriority(proc.PID).Return(20, nil) + syscaller.EXPECT().SetPriority(proc.PID, 10).Return(nil) } syscaller.EXPECT(). Kill(proc.PID, syscall.Signal(0)). @@ -2571,6 +2571,9 @@ func TestAgent_ManageProcessPriority(t *testing.T) { }) actualProcs := <-modProcs require.Len(t, actualProcs, len(expectedProcs)-1) + for _, proc := range actualProcs { + requireFileEquals(t, fs, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), "0") + } }) t.Run("IgnoreCustomNice", func(t *testing.T) { @@ -2589,8 +2592,11 @@ func TestAgent_ManageProcessPriority(t *testing.T) { logger = slog.Make(sloghuman.Sink(io.Discard)) ) + err := afero.WriteFile(fs, "/proc/self/oom_score_adj", []byte("0"), 0o644) + require.NoError(t, err) + // Create some processes. - for i := 0; i < 2; i++ { + for i := 0; i < 3; i++ { proc := agentproctest.GenerateProcess(t, fs) syscaller.EXPECT(). Kill(proc.PID, syscall.Signal(0)). @@ -2618,7 +2624,59 @@ func TestAgent_ManageProcessPriority(t *testing.T) { }) actualProcs := <-modProcs // We should ignore the process with a custom nice score. - require.Len(t, actualProcs, 1) + require.Len(t, actualProcs, 2) + for _, proc := range actualProcs { + _, ok := expectedProcs[proc.PID] + require.True(t, ok) + requireFileEquals(t, fs, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), "998") + } + }) + + t.Run("CustomOOMScore", func(t *testing.T) { + t.Parallel() + + if runtime.GOOS != "linux" { + t.Skip("Skipping non-linux environment") + } + + var ( + fs = afero.NewMemMapFs() + ticker = make(chan time.Time) + syscaller = agentproctest.NewMockSyscaller(gomock.NewController(t)) + modProcs = make(chan []*agentproc.Process) + logger = slog.Make(sloghuman.Sink(io.Discard)) + ) + + err := afero.WriteFile(fs, "/proc/self/oom_score_adj", []byte("0"), 0o644) + require.NoError(t, err) + + // Create some processes. + for i := 0; i < 3; i++ { + proc := agentproctest.GenerateProcess(t, fs) + syscaller.EXPECT(). + Kill(proc.PID, syscall.Signal(0)). + Return(nil) + syscaller.EXPECT().GetPriority(proc.PID).Return(20, nil) + syscaller.EXPECT().SetPriority(proc.PID, 10).Return(nil) + } + + _, _, _, _, _ = setupAgent(t, agentsdk.Manifest{}, 0, func(c *agenttest.Client, o *agent.Options) { + o.Syscaller = syscaller + o.ModifiedProcesses = modProcs + o.EnvironmentVariables = map[string]string{ + agent.EnvProcPrioMgmt: "1", + agent.EnvProcOOMScore: "-567", + } + o.Filesystem = fs + o.Logger = logger + o.ProcessManagementTick = ticker + }) + actualProcs := <-modProcs + // We should ignore the process with a custom nice score. + require.Len(t, actualProcs, 3) + for _, proc := range actualProcs { + requireFileEquals(t, fs, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), "-567") + } }) t.Run("DisabledByDefault", func(t *testing.T) { @@ -2739,3 +2797,17 @@ func requireEcho(t *testing.T, conn net.Conn) { require.NoError(t, err) require.Equal(t, "test", string(b)) } + +func requireFileWrite(t testing.TB, fs afero.Fs, fp, data string) { + t.Helper() + err := afero.WriteFile(fs, fp, []byte(data), 0o600) + require.NoError(t, err) +} + +func requireFileEquals(t testing.TB, fs afero.Fs, fp, expect string) { + t.Helper() + actual, err := afero.ReadFile(fs, fp) + require.NoError(t, err) + + require.Equal(t, expect, string(actual)) +} diff --git a/agent/agentproc/agentproctest/proc.go b/agent/agentproc/agentproctest/proc.go index c36e04ec1c..4fa1c698b5 100644 --- a/agent/agentproc/agentproctest/proc.go +++ b/agent/agentproc/agentproctest/proc.go @@ -2,6 +2,7 @@ package agentproctest import ( "fmt" + "strconv" "testing" "github.com/spf13/afero" @@ -29,8 +30,9 @@ func GenerateProcess(t *testing.T, fs afero.Fs, muts ...func(*agentproc.Process) cmdline := fmt.Sprintf("%s\x00%s\x00%s", arg1, arg2, arg3) process := agentproc.Process{ - CmdLine: cmdline, - PID: int32(pid), + CmdLine: cmdline, + PID: int32(pid), + OOMScoreAdj: 0, } for _, mut := range muts { @@ -45,5 +47,9 @@ func GenerateProcess(t *testing.T, fs afero.Fs, muts ...func(*agentproc.Process) err = afero.WriteFile(fs, fmt.Sprintf("%s/cmdline", process.Dir), []byte(process.CmdLine), 0o444) require.NoError(t, err) + score := strconv.Itoa(process.OOMScoreAdj) + err = afero.WriteFile(fs, fmt.Sprintf("%s/oom_score_adj", process.Dir), []byte(score), 0o444) + require.NoError(t, err) + return process } diff --git a/agent/agentproc/proc_unix.go b/agent/agentproc/proc_unix.go index f52caed52e..2eeb7d5a22 100644 --- a/agent/agentproc/proc_unix.go +++ b/agent/agentproc/proc_unix.go @@ -5,6 +5,7 @@ package agentproc import ( "errors" + "os" "path/filepath" "strconv" "strings" @@ -50,10 +51,26 @@ func List(fs afero.Fs, syscaller Syscaller) ([]*Process, error) { } return nil, xerrors.Errorf("read cmdline: %w", err) } + + oomScore, err := afero.ReadFile(fs, filepath.Join(defaultProcDir, entry, "oom_score_adj")) + if err != nil { + if xerrors.Is(err, os.ErrPermission) { + continue + } + + return nil, xerrors.Errorf("read oom_score_adj: %w", err) + } + + oom, err := strconv.Atoi(strings.TrimSpace(string(oomScore))) + if err != nil { + return nil, xerrors.Errorf("convert oom score: %w", err) + } + processes = append(processes, &Process{ - PID: int32(pid), - CmdLine: string(cmdline), - Dir: filepath.Join(defaultProcDir, entry), + PID: int32(pid), + CmdLine: string(cmdline), + Dir: filepath.Join(defaultProcDir, entry), + OOMScoreAdj: oom, }) } diff --git a/agent/agentproc/syscaller.go b/agent/agentproc/syscaller.go index 25dc6cfd54..fba3bf32ce 100644 --- a/agent/agentproc/syscaller.go +++ b/agent/agentproc/syscaller.go @@ -14,7 +14,8 @@ type Syscaller interface { const defaultProcDir = "/proc" type Process struct { - Dir string - CmdLine string - PID int32 + Dir string + CmdLine string + PID int32 + OOMScoreAdj int } diff --git a/agent/stats_internal_test.go b/agent/stats_internal_test.go index bfd6a3436d..57b21a655a 100644 --- a/agent/stats_internal_test.go +++ b/agent/stats_internal_test.go @@ -1,7 +1,10 @@ package agent import ( + "bytes" "context" + "encoding/json" + "io" "net/netip" "sync" "testing" @@ -14,6 +17,7 @@ import ( "tailscale.com/types/netlogtype" "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogjson" "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/testutil" @@ -210,3 +214,58 @@ func newFakeStatsDest() *fakeStatsDest { resps: make(chan *proto.UpdateStatsResponse), } } + +func Test_logDebouncer(t *testing.T) { + t.Parallel() + + var ( + buf bytes.Buffer + logger = slog.Make(slogjson.Sink(&buf)) + ctx = context.Background() + ) + + debouncer := &logDebouncer{ + logger: logger, + messages: map[string]time.Time{}, + interval: time.Minute, + } + + fields := map[string]interface{}{ + "field_1": float64(1), + "field_2": "2", + } + + debouncer.Error(ctx, "my message", "field_1", 1, "field_2", "2") + debouncer.Warn(ctx, "another message", "field_1", 1, "field_2", "2") + // Shouldn't log this. + debouncer.Warn(ctx, "another message", "field_1", 1, "field_2", "2") + + require.Len(t, debouncer.messages, 2) + + type entry struct { + Msg string `json:"msg"` + Level string `json:"level"` + Fields map[string]interface{} `json:"fields"` + } + + assertLog := func(msg string, level string, fields map[string]interface{}) { + line, err := buf.ReadString('\n') + require.NoError(t, err) + + var e entry + err = json.Unmarshal([]byte(line), &e) + require.NoError(t, err) + require.Equal(t, msg, e.Msg) + require.Equal(t, level, e.Level) + require.Equal(t, fields, e.Fields) + } + assertLog("my message", "ERROR", fields) + assertLog("another message", "WARN", fields) + + debouncer.messages["another message"] = time.Now().Add(-2 * time.Minute) + debouncer.Warn(ctx, "another message", "field_1", 1, "field_2", "2") + assertLog("another message", "WARN", fields) + // Assert nothing else was written. + _, err := buf.ReadString('\n') + require.ErrorIs(t, err, io.EOF) +} diff --git a/cli/agent.go b/cli/agent.go index aaef3805e6..1f91f1c98b 100644 --- a/cli/agent.go +++ b/cli/agent.go @@ -283,6 +283,9 @@ func (r *RootCmd) workspaceAgent() *serpent.Command { if v, ok := os.LookupEnv(agent.EnvProcPrioMgmt); ok { environmentVariables[agent.EnvProcPrioMgmt] = v } + if v, ok := os.LookupEnv(agent.EnvProcOOMScore); ok { + environmentVariables[agent.EnvProcOOMScore] = v + } agnt := agent.New(agent.Options{ Client: client, diff --git a/cli/root.go b/cli/root.go index 83367169df..d9407cf217 100644 --- a/cli/root.go +++ b/cli/root.go @@ -1084,10 +1084,23 @@ func formatCoderSDKError(from string, err *codersdk.Error, opts *formatOpts) str _, _ = str.WriteString("\n") } + // The main error message _, _ = str.WriteString(pretty.Sprint(headLineStyle(), err.Message)) + + // Validation errors. + if len(err.Validations) > 0 { + _, _ = str.WriteString("\n") + _, _ = str.WriteString(pretty.Sprint(tailLineStyle(), fmt.Sprintf("%d validation error(s) found", len(err.Validations)))) + for _, e := range err.Validations { + _, _ = str.WriteString("\n\t") + _, _ = str.WriteString(pretty.Sprint(cliui.DefaultStyles.Field, e.Field)) + _, _ = str.WriteString(pretty.Sprintf(cliui.DefaultStyles.Warn, ": %s", e.Detail)) + } + } + if err.Helper != "" { _, _ = str.WriteString("\n") - _, _ = str.WriteString(pretty.Sprint(tailLineStyle(), err.Helper)) + _, _ = str.WriteString(pretty.Sprintf(tailLineStyle(), "Suggestion: %s", err.Helper)) } // By default we do not show the Detail with the helper. if opts.Verbose || (err.Helper == "" && err.Detail != "") { diff --git a/cli/server.go b/cli/server.go index f2178d470a..d278dbea35 100644 --- a/cli/server.go +++ b/cli/server.go @@ -209,7 +209,7 @@ func enablePrometheus( } afterCtx(ctx, closeUsersFunc) - closeWorkspacesFunc, err := prometheusmetrics.Workspaces(ctx, options.PrometheusRegistry, options.Database, 0) + closeWorkspacesFunc, err := prometheusmetrics.Workspaces(ctx, options.Logger.Named("workspaces_metrics"), options.PrometheusRegistry, options.Database, 0) if err != nil { return nil, xerrors.Errorf("register workspaces prometheus metric: %w", err) } @@ -792,6 +792,9 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. return err } + // This should be output before the logs start streaming. + cliui.Infof(inv.Stdout, "\n==> Logs will stream in below (press ctrl+c to gracefully exit):") + if vals.Telemetry.Enable { gitAuth := make([]telemetry.GitAuth, 0) // TODO: @@ -1025,8 +1028,6 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. } }() - cliui.Infof(inv.Stdout, "\n==> Logs will stream in below (press ctrl+c to gracefully exit):") - // Updates the systemd status from activating to activated. _, err = daemon.SdNotify(false, daemon.SdNotifyReady) if err != nil { diff --git a/cli/server_test.go b/cli/server_test.go index 7842f9e62b..065131fd97 100644 --- a/cli/server_test.go +++ b/cli/server_test.go @@ -973,7 +973,6 @@ func TestServer(t *testing.T) { scanner := bufio.NewScanner(res.Body) hasActiveUsers := false - hasWorkspaces := false for scanner.Scan() { // This metric is manually registered to be tracked in the server. That's // why we test it's tracked here. @@ -981,10 +980,6 @@ func TestServer(t *testing.T) { hasActiveUsers = true continue } - if strings.HasPrefix(scanner.Text(), "coderd_api_workspace_latest_build_total") { - hasWorkspaces = true - continue - } if strings.HasPrefix(scanner.Text(), "coderd_db_query_latencies_seconds") { t.Fatal("db metrics should not be tracked when --prometheus-collect-db-metrics is not enabled") } @@ -992,7 +987,6 @@ func TestServer(t *testing.T) { } require.NoError(t, scanner.Err()) require.True(t, hasActiveUsers) - require.True(t, hasWorkspaces) }) t.Run("DBMetricsEnabled", func(t *testing.T) { diff --git a/cli/support.go b/cli/support.go index 2e87b01479..f2f962a358 100644 --- a/cli/support.go +++ b/cli/support.go @@ -13,6 +13,7 @@ import ( "text/tabwriter" "time" + "github.com/google/uuid" "golang.org/x/xerrors" "cdr.dev/slog" @@ -114,32 +115,41 @@ func (r *RootCmd) supportBundle() *serpent.Command { client.URL = u } + var ( + wsID uuid.UUID + agtID uuid.UUID + ) + if len(inv.Args) == 0 { - return xerrors.Errorf("must specify workspace name") - } - ws, err := namedWorkspace(inv.Context(), client, inv.Args[0]) - if err != nil { - return xerrors.Errorf("invalid workspace: %w", err) - } - cliLog.Debug(inv.Context(), "found workspace", - slog.F("workspace_name", ws.Name), - slog.F("workspace_id", ws.ID), - ) + cliLog.Warn(inv.Context(), "no workspace specified") + _, _ = fmt.Fprintln(inv.Stderr, "Warning: no workspace specified. This will result in incomplete information.") + } else { + ws, err := namedWorkspace(inv.Context(), client, inv.Args[0]) + if err != nil { + return xerrors.Errorf("invalid workspace: %w", err) + } + cliLog.Debug(inv.Context(), "found workspace", + slog.F("workspace_name", ws.Name), + slog.F("workspace_id", ws.ID), + ) + wsID = ws.ID + agentName := "" + if len(inv.Args) > 1 { + agentName = inv.Args[1] + } - agentName := "" - if len(inv.Args) > 1 { - agentName = inv.Args[1] + agt, found := findAgent(agentName, ws.LatestBuild.Resources) + if !found { + cliLog.Warn(inv.Context(), "could not find agent in workspace", slog.F("agent_name", agentName)) + } else { + cliLog.Debug(inv.Context(), "found workspace agent", + slog.F("agent_name", agt.Name), + slog.F("agent_id", agt.ID), + ) + agtID = agt.ID + } } - agt, found := findAgent(agentName, ws.LatestBuild.Resources) - if !found { - return xerrors.Errorf("could not find agent named %q for workspace", agentName) - } - cliLog.Debug(inv.Context(), "found workspace agent", - slog.F("agent_name", agt.Name), - slog.F("agent_id", agt.ID), - ) - if outputPath == "" { cwd, err := filepath.Abs(".") if err != nil { @@ -165,8 +175,8 @@ func (r *RootCmd) supportBundle() *serpent.Command { Client: client, // Support adds a sink so we don't need to supply one ourselves. Log: clientLog, - WorkspaceID: ws.ID, - AgentID: agt.ID, + WorkspaceID: wsID, + AgentID: agtID, } bun, err := support.Run(inv.Context(), &deps) @@ -222,20 +232,21 @@ func findAgent(agentName string, haystack []codersdk.WorkspaceResource) (*coders func writeBundle(src *support.Bundle, dest *zip.Writer) error { // We JSON-encode the following: for k, v := range map[string]any{ - "deployment/buildinfo.json": src.Deployment.BuildInfo, - "deployment/config.json": src.Deployment.Config, - "deployment/experiments.json": src.Deployment.Experiments, - "deployment/health.json": src.Deployment.HealthReport, - "network/netcheck.json": src.Network.Netcheck, - "workspace/workspace.json": src.Workspace.Workspace, "agent/agent.json": src.Agent.Agent, "agent/listening_ports.json": src.Agent.ListeningPorts, "agent/manifest.json": src.Agent.Manifest, "agent/peer_diagnostics.json": src.Agent.PeerDiagnostics, "agent/ping_result.json": src.Agent.PingResult, + "deployment/buildinfo.json": src.Deployment.BuildInfo, + "deployment/config.json": src.Deployment.Config, + "deployment/experiments.json": src.Deployment.Experiments, + "deployment/health.json": src.Deployment.HealthReport, + "network/connection_info.json": src.Network.ConnectionInfo, + "network/netcheck.json": src.Network.Netcheck, "workspace/template.json": src.Workspace.Template, "workspace/template_version.json": src.Workspace.TemplateVersion, "workspace/parameters.json": src.Workspace.Parameters, + "workspace/workspace.json": src.Workspace.Workspace, } { f, err := dest.Create(k) if err != nil { @@ -255,17 +266,17 @@ func writeBundle(src *support.Bundle, dest *zip.Writer) error { // The below we just write as we have them: for k, v := range map[string]string{ - "network/coordinator_debug.html": src.Network.CoordinatorDebug, - "network/tailnet_debug.html": src.Network.TailnetDebug, - "workspace/build_logs.txt": humanizeBuildLogs(src.Workspace.BuildLogs), "agent/logs.txt": string(src.Agent.Logs), "agent/agent_magicsock.html": string(src.Agent.AgentMagicsockHTML), "agent/client_magicsock.html": string(src.Agent.ClientMagicsockHTML), "agent/startup_logs.txt": humanizeAgentLogs(src.Agent.StartupLogs), "agent/prometheus.txt": string(src.Agent.Prometheus), - "workspace/template_file.zip": string(templateVersionBytes), - "logs.txt": strings.Join(src.Logs, "\n"), "cli_logs.txt": string(src.CLILogs), + "logs.txt": strings.Join(src.Logs, "\n"), + "network/coordinator_debug.html": src.Network.CoordinatorDebug, + "network/tailnet_debug.html": src.Network.TailnetDebug, + "workspace/build_logs.txt": humanizeBuildLogs(src.Workspace.BuildLogs), + "workspace/template_file.zip": string(templateVersionBytes), } { f, err := dest.Create(k) if err != nil { diff --git a/cli/support_test.go b/cli/support_test.go index 7f2fce53e4..d9bee0fb2f 100644 --- a/cli/support_test.go +++ b/cli/support_test.go @@ -23,6 +23,7 @@ import ( "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbfake" "github.com/coder/coder/v2/coderd/database/dbtime" + "github.com/coder/coder/v2/coderd/healthcheck/derphealth" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" "github.com/coder/coder/v2/codersdk/healthsdk" @@ -95,33 +96,50 @@ func TestSupportBundle(t *testing.T) { clitest.SetupConfig(t, client, root) err = inv.Run() require.NoError(t, err) - assertBundleContents(t, path, secretValue) + assertBundleContents(t, path, true, true, []string{secretValue}) }) t.Run("NoWorkspace", func(t *testing.T) { t.Parallel() - client := coderdtest.New(t, nil) + var dc codersdk.DeploymentConfig + secretValue := uuid.NewString() + seedSecretDeploymentOptions(t, &dc, secretValue) + client := coderdtest.New(t, &coderdtest.Options{ + DeploymentValues: dc.Values, + }) _ = coderdtest.CreateFirstUser(t, client) - inv, root := clitest.New(t, "support", "bundle", "--yes") + + d := t.TempDir() + path := filepath.Join(d, "bundle.zip") + inv, root := clitest.New(t, "support", "bundle", "--output-file", path, "--yes") //nolint: gocritic // requires owner privilege clitest.SetupConfig(t, client, root) err := inv.Run() - require.ErrorContains(t, err, "must specify workspace name") + require.NoError(t, err) + assertBundleContents(t, path, false, false, []string{secretValue}) }) t.Run("NoAgent", func(t *testing.T) { t.Parallel() - client, db := coderdtest.NewWithDatabase(t, nil) + var dc codersdk.DeploymentConfig + secretValue := uuid.NewString() + seedSecretDeploymentOptions(t, &dc, secretValue) + client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{ + DeploymentValues: dc.Values, + }) admin := coderdtest.CreateFirstUser(t, client) r := dbfake.WorkspaceBuild(t, db, database.Workspace{ OrganizationID: admin.OrganizationID, OwnerID: admin.UserID, }).Do() // without agent! - inv, root := clitest.New(t, "support", "bundle", r.Workspace.Name, "--yes") + d := t.TempDir() + path := filepath.Join(d, "bundle.zip") + inv, root := clitest.New(t, "support", "bundle", r.Workspace.Name, "--output-file", path, "--yes") //nolint: gocritic // requires owner privilege clitest.SetupConfig(t, client, root) err := inv.Run() - require.ErrorContains(t, err, "could not find agent") + require.NoError(t, err) + assertBundleContents(t, path, true, false, []string{secretValue}) }) t.Run("NoPrivilege", func(t *testing.T) { @@ -140,7 +158,8 @@ func TestSupportBundle(t *testing.T) { }) } -func assertBundleContents(t *testing.T, path string, badValues ...string) { +// nolint:revive // It's a control flag, but this is just a test. +func assertBundleContents(t *testing.T, path string, wantWorkspace bool, wantAgent bool, badValues []string) { t.Helper() r, err := zip.OpenReader(path) require.NoError(t, err, "open zip file") @@ -164,6 +183,10 @@ func assertBundleContents(t *testing.T, path string, badValues ...string) { var v healthsdk.HealthcheckReport decodeJSONFromZip(t, f, &v) require.NotEmpty(t, v, "health report should not be empty") + case "network/connection_info.json": + var v workspacesdk.AgentConnectionInfo + decodeJSONFromZip(t, f, &v) + require.NotEmpty(t, v, "agent connection info should not be empty") case "network/coordinator_debug.html": bs := readBytesFromZip(t, f) require.NotEmpty(t, bs, "coordinator debug should not be empty") @@ -171,66 +194,130 @@ func assertBundleContents(t *testing.T, path string, badValues ...string) { bs := readBytesFromZip(t, f) require.NotEmpty(t, bs, "tailnet debug should not be empty") case "network/netcheck.json": - var v workspacesdk.AgentConnectionInfo + var v derphealth.Report decodeJSONFromZip(t, f, &v) - require.NotEmpty(t, v, "connection info should not be empty") + require.NotEmpty(t, v, "netcheck should not be empty") case "workspace/workspace.json": var v codersdk.Workspace decodeJSONFromZip(t, f, &v) + if !wantWorkspace { + require.Empty(t, v, "expected workspace to be empty") + continue + } require.NotEmpty(t, v, "workspace should not be empty") case "workspace/build_logs.txt": bs := readBytesFromZip(t, f) + if !wantWorkspace || !wantAgent { + require.Empty(t, bs, "expected workspace build logs to be empty") + continue + } require.Contains(t, string(bs), "provision done") - case "agent/agent.json": - var v codersdk.WorkspaceAgent - decodeJSONFromZip(t, f, &v) - require.NotEmpty(t, v, "agent should not be empty") - case "agent/listening_ports.json": - var v codersdk.WorkspaceAgentListeningPortsResponse - decodeJSONFromZip(t, f, &v) - require.NotEmpty(t, v, "agent listening ports should not be empty") - case "agent/logs.txt": - bs := readBytesFromZip(t, f) - require.NotEmpty(t, bs, "logs should not be empty") - case "agent/agent_magicsock.html": - bs := readBytesFromZip(t, f) - require.NotEmpty(t, bs, "agent magicsock should not be empty") - case "agent/client_magicsock.html": - bs := readBytesFromZip(t, f) - require.NotEmpty(t, bs, "client magicsock should not be empty") - case "agent/manifest.json": - var v agentsdk.Manifest - decodeJSONFromZip(t, f, &v) - require.NotEmpty(t, v, "agent manifest should not be empty") - case "agent/peer_diagnostics.json": - var v *tailnet.PeerDiagnostics - decodeJSONFromZip(t, f, &v) - require.NotEmpty(t, v, "peer diagnostics should not be empty") - case "agent/ping_result.json": - var v *ipnstate.PingResult - decodeJSONFromZip(t, f, &v) - require.NotEmpty(t, v, "ping result should not be empty") - case "agent/prometheus.txt": - bs := readBytesFromZip(t, f) - require.NotEmpty(t, bs, "agent prometheus metrics should not be empty") - case "agent/startup_logs.txt": - bs := readBytesFromZip(t, f) - require.Contains(t, string(bs), "started up") case "workspace/template.json": var v codersdk.Template decodeJSONFromZip(t, f, &v) + if !wantWorkspace { + require.Empty(t, v, "expected workspace template to be empty") + continue + } require.NotEmpty(t, v, "workspace template should not be empty") case "workspace/template_version.json": var v codersdk.TemplateVersion decodeJSONFromZip(t, f, &v) + if !wantWorkspace { + require.Empty(t, v, "expected workspace template version to be empty") + continue + } require.NotEmpty(t, v, "workspace template version should not be empty") case "workspace/parameters.json": var v []codersdk.WorkspaceBuildParameter decodeJSONFromZip(t, f, &v) + if !wantWorkspace { + require.Empty(t, v, "expected workspace parameters to be empty") + continue + } require.NotNil(t, v, "workspace parameters should not be nil") case "workspace/template_file.zip": bs := readBytesFromZip(t, f) + if !wantWorkspace { + require.Empty(t, bs, "expected template file to be empty") + continue + } require.NotNil(t, bs, "template file should not be nil") + case "agent/agent.json": + var v codersdk.WorkspaceAgent + decodeJSONFromZip(t, f, &v) + if !wantAgent { + require.Empty(t, v, "expected agent to be empty") + continue + } + require.NotEmpty(t, v, "agent should not be empty") + case "agent/listening_ports.json": + var v codersdk.WorkspaceAgentListeningPortsResponse + decodeJSONFromZip(t, f, &v) + if !wantAgent { + require.Empty(t, v, "expected agent listening ports to be empty") + continue + } + require.NotEmpty(t, v, "agent listening ports should not be empty") + case "agent/logs.txt": + bs := readBytesFromZip(t, f) + if !wantAgent { + require.Empty(t, bs, "expected agent logs to be empty") + continue + } + require.NotEmpty(t, bs, "logs should not be empty") + case "agent/agent_magicsock.html": + bs := readBytesFromZip(t, f) + if !wantAgent { + require.Empty(t, bs, "expected agent magicsock to be empty") + continue + } + require.NotEmpty(t, bs, "agent magicsock should not be empty") + case "agent/client_magicsock.html": + bs := readBytesFromZip(t, f) + if !wantAgent { + require.Empty(t, bs, "expected client magicsock to be empty") + continue + } + require.NotEmpty(t, bs, "client magicsock should not be empty") + case "agent/manifest.json": + var v agentsdk.Manifest + decodeJSONFromZip(t, f, &v) + if !wantAgent { + require.Empty(t, v, "expected agent manifest to be empty") + continue + } + require.NotEmpty(t, v, "agent manifest should not be empty") + case "agent/peer_diagnostics.json": + var v *tailnet.PeerDiagnostics + decodeJSONFromZip(t, f, &v) + if !wantAgent { + require.Empty(t, v, "expected peer diagnostics to be empty") + continue + } + require.NotEmpty(t, v, "peer diagnostics should not be empty") + case "agent/ping_result.json": + var v *ipnstate.PingResult + decodeJSONFromZip(t, f, &v) + if !wantAgent { + require.Empty(t, v, "expected ping result to be empty") + continue + } + require.NotEmpty(t, v, "ping result should not be empty") + case "agent/prometheus.txt": + bs := readBytesFromZip(t, f) + if !wantAgent { + require.Empty(t, bs, "expected agent prometheus metrics to be empty") + continue + } + require.NotEmpty(t, bs, "agent prometheus metrics should not be empty") + case "agent/startup_logs.txt": + bs := readBytesFromZip(t, f) + if !wantAgent { + require.Empty(t, bs, "expected agent startup logs to be empty") + continue + } + require.Contains(t, string(bs), "started up") case "logs.txt": bs := readBytesFromZip(t, f) require.NotEmpty(t, bs, "logs should not be empty") diff --git a/cli/templatepull.go b/cli/templatepull.go index 0d0c46f687..7f9317be37 100644 --- a/cli/templatepull.go +++ b/cli/templatepull.go @@ -7,11 +7,11 @@ import ( "path/filepath" "sort" - "github.com/codeclysm/extract/v3" "golang.org/x/xerrors" "github.com/coder/coder/v2/cli/cliui" "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/provisionersdk" "github.com/coder/serpent" ) @@ -161,7 +161,7 @@ func (r *RootCmd) templatePull() *serpent.Command { } _, _ = fmt.Fprintf(inv.Stderr, "Extracting template to %q\n", dest) - err = extract.Tar(ctx, bytes.NewReader(raw), dest, nil) + err = provisionersdk.Untar(dest, bytes.NewReader(raw)) return err }, } diff --git a/cli/templatepull_test.go b/cli/templatepull_test.go index 1b1d51b0cc..da981f6ad6 100644 --- a/cli/templatepull_test.go +++ b/cli/templatepull_test.go @@ -3,7 +3,6 @@ package cli_test import ( "archive/tar" "bytes" - "context" "crypto/sha256" "encoding/hex" "os" @@ -11,7 +10,6 @@ import ( "strings" "testing" - "github.com/codeclysm/extract/v3" "github.com/google/uuid" "github.com/stretchr/testify/require" @@ -20,6 +18,7 @@ import ( "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/provisioner/echo" + "github.com/coder/coder/v2/provisionersdk" "github.com/coder/coder/v2/provisionersdk/proto" "github.com/coder/coder/v2/pty/ptytest" ) @@ -310,9 +309,7 @@ func TestTemplatePull_ToDir(t *testing.T) { _ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, updatedVersion.ID) coderdtest.UpdateActiveTemplateVersion(t, client, template.ID, updatedVersion.ID) - ctx := context.Background() - - err = extract.Tar(ctx, bytes.NewReader(expected), expectedDest, nil) + err = provisionersdk.Untar(expectedDest, bytes.NewReader(expected)) require.NoError(t, err) ents, _ := os.ReadDir(actualDest) @@ -387,9 +384,7 @@ func TestTemplatePull_FolderConflict(t *testing.T) { ) require.NoError(t, err) - ctx := context.Background() - - err = extract.Tar(ctx, bytes.NewReader(expected), expectedDest, nil) + err = provisionersdk.Untar(expectedDest, bytes.NewReader(expected)) require.NoError(t, err) inv, root := clitest.New(t, "templates", "pull", template.Name, conflictDest) diff --git a/cli/testdata/coder_exp_example-error_api.golden b/cli/testdata/coder_exp_example-error_api.golden index e15b60abbd..a0a8455447 100644 --- a/cli/testdata/coder_exp_example-error_api.golden +++ b/cli/testdata/coder_exp_example-error_api.golden @@ -1,3 +1,5 @@ Encountered an error running "coder exp example-error api", see "coder exp example-error api --help" for more information error: Top level sdk error message. -Have you tried turning it off and on again? +1 validation error(s) found + region : magic dust is not available in your region +Suggestion: Have you tried turning it off and on again? diff --git a/cli/testdata/coder_exp_example-error_multi-error.golden b/cli/testdata/coder_exp_example-error_multi-error.golden index 73a32afd80..2b89275dff 100644 --- a/cli/testdata/coder_exp_example-error_multi-error.golden +++ b/cli/testdata/coder_exp_example-error_multi-error.golden @@ -4,4 +4,6 @@ error: 3 errors encountered: Trace=[wrapped: ]) 2. second error: function decided not to work, and it never will 3. Trace=[wrapped api error: ] Top level sdk error message. + 1 validation error(s) found + region : magic dust is not available in your region magic dust unavailable, please try again later diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index a082996d32..bcd2b3b15c 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -2211,6 +2211,7 @@ const docTemplate = `{ "CoderSessionToken": [] } ], + "description": "Create a new workspace using a template. The request must\nspecify either the Template ID or the Template Version ID,\nnot both. If the Template ID is specified, the active version\nof the template will be used.", "consumes": [ "application/json" ], @@ -5903,6 +5904,7 @@ const docTemplate = `{ ], "summary": "Submit workspace agent stats", "operationId": "submit-workspace-agent-stats", + "deprecated": true, "parameters": [ { "description": "Stats request", @@ -9045,6 +9047,7 @@ const docTemplate = `{ } }, "codersdk.CreateWorkspaceRequest": { + "description": "CreateWorkspaceRequest provides options for creating a new workspace. Only one of TemplateID or TemplateVersionID can be specified, not both. If TemplateID is specified, the active version of the template will be used.", "type": "object", "required": [ "name" @@ -9293,9 +9296,6 @@ const docTemplate = `{ "disable_path_apps": { "type": "boolean" }, - "disable_session_expiry_refresh": { - "type": "boolean" - }, "docs_url": { "$ref": "#/definitions/serpent.URL" }, @@ -9333,12 +9333,6 @@ const docTemplate = `{ "logging": { "$ref": "#/definitions/codersdk.LoggingConfig" }, - "max_session_expiry": { - "type": "integer" - }, - "max_token_lifetime": { - "type": "integer" - }, "metrics_cache_refresh_interval": { "type": "integer" }, @@ -9390,6 +9384,9 @@ const docTemplate = `{ "secure_auth_cookie": { "type": "boolean" }, + "session_lifetime": { + "$ref": "#/definitions/codersdk.SessionLifetime" + }, "ssh_keygen_algorithm": { "type": "string" }, @@ -11082,6 +11079,22 @@ const docTemplate = `{ } } }, + "codersdk.SessionLifetime": { + "type": "object", + "properties": { + "default_duration": { + "description": "DefaultDuration is for api keys, not tokens.", + "type": "integer" + }, + "disable_expiry_refresh": { + "description": "DisableExpiryRefresh will disable automatically refreshing api\nkeys when they are used from the api. This means the api key lifetime at\ncreation is the lifetime of the api key.", + "type": "boolean" + }, + "max_token_lifetime": { + "type": "integer" + } + } + }, "codersdk.SupportConfig": { "type": "object", "properties": { diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index a559938463..47bac4fc4e 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -1932,6 +1932,7 @@ "CoderSessionToken": [] } ], + "description": "Create a new workspace using a template. The request must\nspecify either the Template ID or the Template Version ID,\nnot both. If the Template ID is specified, the active version\nof the template will be used.", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["Workspaces"], @@ -5200,6 +5201,7 @@ "tags": ["Agents"], "summary": "Submit workspace agent stats", "operationId": "submit-workspace-agent-stats", + "deprecated": true, "parameters": [ { "description": "Stats request", @@ -8052,6 +8054,7 @@ } }, "codersdk.CreateWorkspaceRequest": { + "description": "CreateWorkspaceRequest provides options for creating a new workspace. Only one of TemplateID or TemplateVersionID can be specified, not both. If TemplateID is specified, the active version of the template will be used.", "type": "object", "required": ["name"], "properties": { @@ -8298,9 +8301,6 @@ "disable_path_apps": { "type": "boolean" }, - "disable_session_expiry_refresh": { - "type": "boolean" - }, "docs_url": { "$ref": "#/definitions/serpent.URL" }, @@ -8338,12 +8338,6 @@ "logging": { "$ref": "#/definitions/codersdk.LoggingConfig" }, - "max_session_expiry": { - "type": "integer" - }, - "max_token_lifetime": { - "type": "integer" - }, "metrics_cache_refresh_interval": { "type": "integer" }, @@ -8395,6 +8389,9 @@ "secure_auth_cookie": { "type": "boolean" }, + "session_lifetime": { + "$ref": "#/definitions/codersdk.SessionLifetime" + }, "ssh_keygen_algorithm": { "type": "string" }, @@ -9984,6 +9981,22 @@ } } }, + "codersdk.SessionLifetime": { + "type": "object", + "properties": { + "default_duration": { + "description": "DefaultDuration is for api keys, not tokens.", + "type": "integer" + }, + "disable_expiry_refresh": { + "description": "DisableExpiryRefresh will disable automatically refreshing api\nkeys when they are used from the api. This means the api key lifetime at\ncreation is the lifetime of the api key.", + "type": "boolean" + }, + "max_token_lifetime": { + "type": "integer" + } + } + }, "codersdk.SupportConfig": { "type": "object", "properties": { diff --git a/coderd/apikey.go b/coderd/apikey.go index b1d31ff613..10a83a05f4 100644 --- a/coderd/apikey.go +++ b/coderd/apikey.go @@ -84,7 +84,7 @@ func (api *API) postToken(rw http.ResponseWriter, r *http.Request) { cookie, key, err := api.createAPIKey(ctx, apikey.CreateParams{ UserID: user.ID, LoginType: database.LoginTypeToken, - DefaultLifetime: api.DeploymentValues.SessionDuration.Value(), + DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(), ExpiresAt: dbtime.Now().Add(lifeTime), Scope: scope, LifetimeSeconds: int64(lifeTime.Seconds()), @@ -128,7 +128,7 @@ func (api *API) postAPIKey(rw http.ResponseWriter, r *http.Request) { lifeTime := time.Hour * 24 * 7 cookie, _, err := api.createAPIKey(ctx, apikey.CreateParams{ UserID: user.ID, - DefaultLifetime: api.DeploymentValues.SessionDuration.Value(), + DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(), LoginType: database.LoginTypePassword, RemoteAddr: r.RemoteAddr, // All api generated keys will last 1 week. Browser login tokens have @@ -354,7 +354,7 @@ func (api *API) tokenConfig(rw http.ResponseWriter, r *http.Request) { httpapi.Write( r.Context(), rw, http.StatusOK, codersdk.TokenConfig{ - MaxTokenLifetime: values.MaxTokenLifetime.Value(), + MaxTokenLifetime: values.Sessions.MaximumTokenDuration.Value(), }, ) } @@ -364,10 +364,10 @@ func (api *API) validateAPIKeyLifetime(lifetime time.Duration) error { return xerrors.New("lifetime must be positive number greater than 0") } - if lifetime > api.DeploymentValues.MaxTokenLifetime.Value() { + if lifetime > api.DeploymentValues.Sessions.MaximumTokenDuration.Value() { return xerrors.Errorf( "lifetime must be less than %v", - api.DeploymentValues.MaxTokenLifetime, + api.DeploymentValues.Sessions.MaximumTokenDuration, ) } diff --git a/coderd/apikey_test.go b/coderd/apikey_test.go index a20acf5ff3..29d0f01126 100644 --- a/coderd/apikey_test.go +++ b/coderd/apikey_test.go @@ -125,7 +125,7 @@ func TestTokenUserSetMaxLifetime(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) defer cancel() dc := coderdtest.DeploymentValues(t) - dc.MaxTokenLifetime = serpent.Duration(time.Hour * 24 * 7) + dc.Sessions.MaximumTokenDuration = serpent.Duration(time.Hour * 24 * 7) client := coderdtest.New(t, &coderdtest.Options{ DeploymentValues: dc, }) @@ -165,7 +165,7 @@ func TestSessionExpiry(t *testing.T) { // // We don't support updating the deployment config after startup, but for // this test it works because we don't copy the value (and we use pointers). - dc.SessionDuration = serpent.Duration(time.Second) + dc.Sessions.DefaultDuration = serpent.Duration(time.Second) userClient, _ := coderdtest.CreateAnotherUser(t, adminClient, adminUser.OrganizationID) @@ -174,8 +174,8 @@ func TestSessionExpiry(t *testing.T) { apiKey, err := db.GetAPIKeyByID(ctx, strings.Split(token, "-")[0]) require.NoError(t, err) - require.EqualValues(t, dc.SessionDuration.Value().Seconds(), apiKey.LifetimeSeconds) - require.WithinDuration(t, apiKey.CreatedAt.Add(dc.SessionDuration.Value()), apiKey.ExpiresAt, 2*time.Second) + require.EqualValues(t, dc.Sessions.DefaultDuration.Value().Seconds(), apiKey.LifetimeSeconds) + require.WithinDuration(t, apiKey.CreatedAt.Add(dc.Sessions.DefaultDuration.Value()), apiKey.ExpiresAt, 2*time.Second) // Update the session token to be expired so we can test that it is // rejected for extra points. diff --git a/coderd/audit/audit.go b/coderd/audit/audit.go index bdd32abfae..097b0c6f49 100644 --- a/coderd/audit/audit.go +++ b/coderd/audit/audit.go @@ -21,7 +21,7 @@ type AdditionalFields struct { BuildNumber string `json:"build_number"` BuildReason database.BuildReason `json:"build_reason"` WorkspaceOwner string `json:"workspace_owner"` - WorkspaceID uuid.UUID `json:"workpace_id"` + WorkspaceID uuid.UUID `json:"workspace_id"` } func NewNop() Auditor { diff --git a/coderd/autobuild/lifecycle_executor_test.go b/coderd/autobuild/lifecycle_executor_test.go index d8c1a4d8a8..54ceb53254 100644 --- a/coderd/autobuild/lifecycle_executor_test.go +++ b/coderd/autobuild/lifecycle_executor_test.go @@ -17,6 +17,7 @@ import ( "github.com/coder/coder/v2/coderd/autobuild" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/schedule" "github.com/coder/coder/v2/coderd/schedule/cron" "github.com/coder/coder/v2/coderd/util/ptr" @@ -849,14 +850,17 @@ func TestExecutorRequireActiveVersion(t *testing.T) { ticker = make(chan time.Time) statCh = make(chan autobuild.Stats) - ownerClient = coderdtest.New(t, &coderdtest.Options{ + ownerClient, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: ticker, IncludeProvisionerDaemon: true, AutobuildStats: statCh, TemplateScheduleStore: schedule.NewAGPLTemplateScheduleStore(), }) ) + ctx := testutil.Context(t, testutil.WaitShort) owner := coderdtest.CreateFirstUser(t, ownerClient) + me, err := ownerClient.User(ctx, codersdk.Me) + require.NoError(t, err) // Create an active and inactive template version. We'll // build a regular member's workspace using a non-active @@ -864,10 +868,14 @@ func TestExecutorRequireActiveVersion(t *testing.T) { // since there is no enterprise license. activeVersion := coderdtest.CreateTemplateVersion(t, ownerClient, owner.OrganizationID, nil) coderdtest.AwaitTemplateVersionJobCompleted(t, ownerClient, activeVersion.ID) - template := coderdtest.CreateTemplate(t, ownerClient, owner.OrganizationID, activeVersion.ID, func(ctr *codersdk.CreateTemplateRequest) { - ctr.RequireActiveVersion = true - ctr.VersionID = activeVersion.ID + template := coderdtest.CreateTemplate(t, ownerClient, owner.OrganizationID, activeVersion.ID) + //nolint We need to set this in the database directly, because the API will return an error + // letting you know that this feature requires an enterprise license. + err = db.UpdateTemplateAccessControlByID(dbauthz.As(ctx, coderdtest.AuthzUserSubject(me, owner.OrganizationID)), database.UpdateTemplateAccessControlByIDParams{ + ID: template.ID, + RequireActiveVersion: true, }) + require.NoError(t, err) inactiveVersion := coderdtest.CreateTemplateVersion(t, ownerClient, owner.OrganizationID, nil, func(ctvr *codersdk.CreateTemplateVersionRequest) { ctvr.TemplateID = template.ID }) diff --git a/coderd/coderd.go b/coderd/coderd.go index 0cc0962316..67b16e9032 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -566,7 +566,7 @@ func New(options *Options) *API { DB: options.Database, OAuth2Configs: oauthConfigs, RedirectToLogin: false, - DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(), + DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(), Optional: false, SessionTokenFunc: nil, // Default behavior PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc, @@ -576,7 +576,7 @@ func New(options *Options) *API { DB: options.Database, OAuth2Configs: oauthConfigs, RedirectToLogin: true, - DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(), + DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(), Optional: false, SessionTokenFunc: nil, // Default behavior PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc, @@ -586,7 +586,7 @@ func New(options *Options) *API { DB: options.Database, OAuth2Configs: oauthConfigs, RedirectToLogin: false, - DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(), + DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(), Optional: true, SessionTokenFunc: nil, // Default behavior PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc, diff --git a/coderd/coderdtest/oidctest/idp.go b/coderd/coderdtest/oidctest/idp.go index ff42e31997..c0b95619d4 100644 --- a/coderd/coderdtest/oidctest/idp.go +++ b/coderd/coderdtest/oidctest/idp.go @@ -604,7 +604,7 @@ func (f *FakeIDP) CreateAuthCode(t testing.TB, state string) string { // something. // Essentially this is used to fake the Coderd side of the exchange. // The flow starts at the user hitting the OIDC login page. -func (f *FakeIDP) OIDCCallback(t testing.TB, state string, idTokenClaims jwt.MapClaims) (*http.Response, error) { +func (f *FakeIDP) OIDCCallback(t testing.TB, state string, idTokenClaims jwt.MapClaims) *http.Response { t.Helper() if f.serve { panic("cannot use OIDCCallback with WithServing. This is only for the in memory usage") @@ -625,7 +625,7 @@ func (f *FakeIDP) OIDCCallback(t testing.TB, state string, idTokenClaims jwt.Map _ = resp.Body.Close() } }) - return resp, nil + return resp } // ProviderJSON is the .well-known/configuration JSON diff --git a/coderd/coderdtest/oidctest/idp_test.go b/coderd/coderdtest/oidctest/idp_test.go index 519635b067..7706834785 100644 --- a/coderd/coderdtest/oidctest/idp_test.go +++ b/coderd/coderdtest/oidctest/idp_test.go @@ -54,12 +54,12 @@ func TestFakeIDPBasicFlow(t *testing.T) { token = oauthToken }) - resp, err := fake.OIDCCallback(t, expectedState, jwt.MapClaims{}) - require.NoError(t, err) + //nolint:bodyclose + resp := fake.OIDCCallback(t, expectedState, jwt.MapClaims{}) require.Equal(t, http.StatusOK, resp.StatusCode) // Test the user info - _, err = cfg.Provider.UserInfo(ctx, oauth2.StaticTokenSource(token)) + _, err := cfg.Provider.UserInfo(ctx, oauth2.StaticTokenSource(token)) require.NoError(t, err) // Now test it can refresh diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 97a695cb37..a638b705a5 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -174,6 +174,7 @@ var ( // When org scoped provisioner credentials are implemented, // this can be reduced to read a specific org. rbac.ResourceOrganization.Type: {rbac.ActionRead}, + rbac.ResourceGroup.Type: {rbac.ActionRead}, }), Org: map[string][]rbac.Permission{}, User: []rbac.Permission{}, @@ -1141,6 +1142,10 @@ func (q *querier) GetGroupMembers(ctx context.Context, id uuid.UUID) ([]database return q.db.GetGroupMembers(ctx, id) } +func (q *querier) GetGroupsByOrganizationAndUserID(ctx context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) { + return fetchWithPostFilter(q.auth, q.db.GetGroupsByOrganizationAndUserID)(ctx, arg) +} + func (q *querier) GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]database.Group, error) { return fetchWithPostFilter(q.auth, q.db.GetGroupsByOrganizationID)(ctx, organizationID) } @@ -2527,20 +2532,6 @@ func (q *querier) InsertWorkspaceAgentScripts(ctx context.Context, arg database. return q.db.InsertWorkspaceAgentScripts(ctx, arg) } -func (q *querier) InsertWorkspaceAgentStat(ctx context.Context, arg database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) { - // TODO: This is a workspace agent operation. Should users be able to query this? - // Not really sure what this is for. - workspace, err := q.db.GetWorkspaceByID(ctx, arg.WorkspaceID) - if err != nil { - return database.WorkspaceAgentStat{}, err - } - err = q.authorizeContext(ctx, rbac.ActionUpdate, workspace) - if err != nil { - return database.WorkspaceAgentStat{}, err - } - return q.db.InsertWorkspaceAgentStat(ctx, arg) -} - func (q *querier) InsertWorkspaceAgentStats(ctx context.Context, arg database.InsertWorkspaceAgentStatsParams) error { if err := q.authorizeContext(ctx, rbac.ActionCreate, rbac.ResourceSystem); err != nil { return err diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go index 71345ccf09..7be33d58c8 100644 --- a/coderd/database/dbauthz/dbauthz_test.go +++ b/coderd/database/dbauthz/dbauthz_test.go @@ -314,6 +314,14 @@ func (s *MethodTestSuite) TestGroup() { _ = dbgen.GroupMember(s.T(), db, database.GroupMember{}) check.Args(g.ID).Asserts(g, rbac.ActionRead) })) + s.Run("GetGroupsByOrganizationAndUserID", s.Subtest(func(db database.Store, check *expects) { + g := dbgen.Group(s.T(), db, database.Group{}) + gm := dbgen.GroupMember(s.T(), db, database.GroupMember{GroupID: g.ID}) + check.Args(database.GetGroupsByOrganizationAndUserIDParams{ + OrganizationID: g.OrganizationID, + UserID: gm.UserID, + }).Asserts(g, rbac.ActionRead) + })) s.Run("InsertAllUsersGroup", s.Subtest(func(db database.Store, check *expects) { o := dbgen.Organization(s.T(), db, database.Organization{}) check.Args(o.ID).Asserts(rbac.ResourceGroup.InOrg(o.ID), rbac.ActionCreate) @@ -1512,12 +1520,6 @@ func (s *MethodTestSuite) TestWorkspace() { AutomaticUpdates: database.AutomaticUpdatesAlways, }).Asserts(w, rbac.ActionUpdate) })) - s.Run("InsertWorkspaceAgentStat", s.Subtest(func(db database.Store, check *expects) { - ws := dbgen.Workspace(s.T(), db, database.Workspace{}) - check.Args(database.InsertWorkspaceAgentStatParams{ - WorkspaceID: ws.ID, - }).Asserts(ws, rbac.ActionUpdate) - })) s.Run("UpdateWorkspaceAppHealthByID", s.Subtest(func(db database.Store, check *expects) { ws := dbgen.Workspace(s.T(), db, database.Workspace{}) build := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{WorkspaceID: ws.ID, JobID: uuid.New()}) diff --git a/coderd/database/dbgen/dbgen.go b/coderd/database/dbgen/dbgen.go index 707e977178..596885c9d2 100644 --- a/coderd/database/dbgen/dbgen.go +++ b/coderd/database/dbgen/dbgen.go @@ -707,27 +707,49 @@ func WorkspaceAgentStat(t testing.TB, db database.Store, orig database.Workspace if orig.ConnectionsByProto == nil { orig.ConnectionsByProto = json.RawMessage([]byte("{}")) } - scheme, err := db.InsertWorkspaceAgentStat(genCtx, database.InsertWorkspaceAgentStatParams{ - ID: takeFirst(orig.ID, uuid.New()), - CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()), - UserID: takeFirst(orig.UserID, uuid.New()), - TemplateID: takeFirst(orig.TemplateID, uuid.New()), - WorkspaceID: takeFirst(orig.WorkspaceID, uuid.New()), - AgentID: takeFirst(orig.AgentID, uuid.New()), - ConnectionsByProto: orig.ConnectionsByProto, - ConnectionCount: takeFirst(orig.ConnectionCount, 0), - RxPackets: takeFirst(orig.RxPackets, 0), - RxBytes: takeFirst(orig.RxBytes, 0), - TxPackets: takeFirst(orig.TxPackets, 0), - TxBytes: takeFirst(orig.TxBytes, 0), - SessionCountVSCode: takeFirst(orig.SessionCountVSCode, 0), - SessionCountJetBrains: takeFirst(orig.SessionCountJetBrains, 0), - SessionCountReconnectingPTY: takeFirst(orig.SessionCountReconnectingPTY, 0), - SessionCountSSH: takeFirst(orig.SessionCountSSH, 0), - ConnectionMedianLatencyMS: takeFirst(orig.ConnectionMedianLatencyMS, 0), - }) + jsonProto := []byte(fmt.Sprintf("[%s]", orig.ConnectionsByProto)) + + params := database.InsertWorkspaceAgentStatsParams{ + ID: []uuid.UUID{takeFirst(orig.ID, uuid.New())}, + CreatedAt: []time.Time{takeFirst(orig.CreatedAt, dbtime.Now())}, + UserID: []uuid.UUID{takeFirst(orig.UserID, uuid.New())}, + TemplateID: []uuid.UUID{takeFirst(orig.TemplateID, uuid.New())}, + WorkspaceID: []uuid.UUID{takeFirst(orig.WorkspaceID, uuid.New())}, + AgentID: []uuid.UUID{takeFirst(orig.AgentID, uuid.New())}, + ConnectionsByProto: jsonProto, + ConnectionCount: []int64{takeFirst(orig.ConnectionCount, 0)}, + RxPackets: []int64{takeFirst(orig.RxPackets, 0)}, + RxBytes: []int64{takeFirst(orig.RxBytes, 0)}, + TxPackets: []int64{takeFirst(orig.TxPackets, 0)}, + TxBytes: []int64{takeFirst(orig.TxBytes, 0)}, + SessionCountVSCode: []int64{takeFirst(orig.SessionCountVSCode, 0)}, + SessionCountJetBrains: []int64{takeFirst(orig.SessionCountJetBrains, 0)}, + SessionCountReconnectingPTY: []int64{takeFirst(orig.SessionCountReconnectingPTY, 0)}, + SessionCountSSH: []int64{takeFirst(orig.SessionCountSSH, 0)}, + ConnectionMedianLatencyMS: []float64{takeFirst(orig.ConnectionMedianLatencyMS, 0)}, + } + err := db.InsertWorkspaceAgentStats(genCtx, params) require.NoError(t, err, "insert workspace agent stat") - return scheme + + return database.WorkspaceAgentStat{ + ID: params.ID[0], + CreatedAt: params.CreatedAt[0], + UserID: params.UserID[0], + AgentID: params.AgentID[0], + WorkspaceID: params.WorkspaceID[0], + TemplateID: params.TemplateID[0], + ConnectionsByProto: orig.ConnectionsByProto, + ConnectionCount: params.ConnectionCount[0], + RxPackets: params.RxPackets[0], + RxBytes: params.RxBytes[0], + TxPackets: params.TxPackets[0], + TxBytes: params.TxBytes[0], + ConnectionMedianLatencyMS: params.ConnectionMedianLatencyMS[0], + SessionCountVSCode: params.SessionCountVSCode[0], + SessionCountJetBrains: params.SessionCountJetBrains[0], + SessionCountReconnectingPTY: params.SessionCountReconnectingPTY[0], + SessionCountSSH: params.SessionCountSSH[0], + } } func OAuth2ProviderApp(t testing.TB, db database.Store, seed database.OAuth2ProviderApp) database.OAuth2ProviderApp { diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go index ef112da121..2b9db8b1f2 100644 --- a/coderd/database/dbmem/dbmem.go +++ b/coderd/database/dbmem/dbmem.go @@ -404,6 +404,16 @@ func (q *FakeQuerier) convertToWorkspaceRowsNoLock(ctx context.Context, workspac break } } + + if pj, err := q.getProvisionerJobByIDNoLock(ctx, build.JobID); err == nil { + wr.LatestBuildStatus = pj.JobStatus + } + + wr.LatestBuildTransition = build.Transition + } + + if u, err := q.getUserByIDNoLock(w.OwnerID); err == nil { + wr.Username = u.Username } rows = append(rows, wr) @@ -2240,6 +2250,30 @@ func (q *FakeQuerier) GetGroupMembers(_ context.Context, id uuid.UUID) ([]databa return users, nil } +func (q *FakeQuerier) GetGroupsByOrganizationAndUserID(_ context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) { + err := validateDatabaseType(arg) + if err != nil { + return nil, err + } + + q.mutex.RLock() + defer q.mutex.RUnlock() + var groupIds []uuid.UUID + for _, member := range q.groupMembers { + if member.UserID == arg.UserID { + groupIds = append(groupIds, member.GroupID) + } + } + groups := []database.Group{} + for _, group := range q.groups { + if slices.Contains(groupIds, group.ID) && group.OrganizationID == arg.OrganizationID { + groups = append(groups, group) + } + } + + return groups, nil +} + func (q *FakeQuerier) GetGroupsByOrganizationID(_ context.Context, id uuid.UUID) ([]database.Group, error) { q.mutex.RLock() defer q.mutex.RUnlock() @@ -6447,37 +6481,6 @@ func (q *FakeQuerier) InsertWorkspaceAgentScripts(_ context.Context, arg databas return scripts, nil } -func (q *FakeQuerier) InsertWorkspaceAgentStat(_ context.Context, p database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) { - if err := validateDatabaseType(p); err != nil { - return database.WorkspaceAgentStat{}, err - } - - q.mutex.Lock() - defer q.mutex.Unlock() - - stat := database.WorkspaceAgentStat{ - ID: p.ID, - CreatedAt: p.CreatedAt, - WorkspaceID: p.WorkspaceID, - AgentID: p.AgentID, - UserID: p.UserID, - ConnectionsByProto: p.ConnectionsByProto, - ConnectionCount: p.ConnectionCount, - RxPackets: p.RxPackets, - RxBytes: p.RxBytes, - TxPackets: p.TxPackets, - TxBytes: p.TxBytes, - TemplateID: p.TemplateID, - SessionCountVSCode: p.SessionCountVSCode, - SessionCountJetBrains: p.SessionCountJetBrains, - SessionCountReconnectingPTY: p.SessionCountReconnectingPTY, - SessionCountSSH: p.SessionCountSSH, - ConnectionMedianLatencyMS: p.ConnectionMedianLatencyMS, - } - q.workspaceAgentStats = append(q.workspaceAgentStats, stat) - return stat, nil -} - func (q *FakeQuerier) InsertWorkspaceAgentStats(_ context.Context, arg database.InsertWorkspaceAgentStatsParams) error { err := validateDatabaseType(arg) if err != nil { diff --git a/coderd/database/dbmetrics/dbmetrics.go b/coderd/database/dbmetrics/dbmetrics.go index 5cd452d328..53dc3f2feb 100644 --- a/coderd/database/dbmetrics/dbmetrics.go +++ b/coderd/database/dbmetrics/dbmetrics.go @@ -559,6 +559,13 @@ func (m metricsStore) GetGroupMembers(ctx context.Context, groupID uuid.UUID) ([ return users, err } +func (m metricsStore) GetGroupsByOrganizationAndUserID(ctx context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) { + start := time.Now() + r0, r1 := m.s.GetGroupsByOrganizationAndUserID(ctx, arg) + m.queryLatencies.WithLabelValues("GetGroupsByOrganizationAndUserID").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]database.Group, error) { start := time.Now() groups, err := m.s.GetGroupsByOrganizationID(ctx, organizationID) @@ -1642,13 +1649,6 @@ func (m metricsStore) InsertWorkspaceAgentScripts(ctx context.Context, arg datab return r0, r1 } -func (m metricsStore) InsertWorkspaceAgentStat(ctx context.Context, arg database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) { - start := time.Now() - stat, err := m.s.InsertWorkspaceAgentStat(ctx, arg) - m.queryLatencies.WithLabelValues("InsertWorkspaceAgentStat").Observe(time.Since(start).Seconds()) - return stat, err -} - func (m metricsStore) InsertWorkspaceAgentStats(ctx context.Context, arg database.InsertWorkspaceAgentStatsParams) error { start := time.Now() r0 := m.s.InsertWorkspaceAgentStats(ctx, arg) diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go index 32049ba072..2bb62e8c92 100644 --- a/coderd/database/dbmock/dbmock.go +++ b/coderd/database/dbmock/dbmock.go @@ -1095,6 +1095,21 @@ func (mr *MockStoreMockRecorder) GetGroupMembers(arg0, arg1 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupMembers", reflect.TypeOf((*MockStore)(nil).GetGroupMembers), arg0, arg1) } +// GetGroupsByOrganizationAndUserID mocks base method. +func (m *MockStore) GetGroupsByOrganizationAndUserID(arg0 context.Context, arg1 database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroupsByOrganizationAndUserID", arg0, arg1) + ret0, _ := ret[0].([]database.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroupsByOrganizationAndUserID indicates an expected call of GetGroupsByOrganizationAndUserID. +func (mr *MockStoreMockRecorder) GetGroupsByOrganizationAndUserID(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupsByOrganizationAndUserID", reflect.TypeOf((*MockStore)(nil).GetGroupsByOrganizationAndUserID), arg0, arg1) +} + // GetGroupsByOrganizationID mocks base method. func (m *MockStore) GetGroupsByOrganizationID(arg0 context.Context, arg1 uuid.UUID) ([]database.Group, error) { m.ctrl.T.Helper() @@ -3456,21 +3471,6 @@ func (mr *MockStoreMockRecorder) InsertWorkspaceAgentScripts(arg0, arg1 any) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWorkspaceAgentScripts", reflect.TypeOf((*MockStore)(nil).InsertWorkspaceAgentScripts), arg0, arg1) } -// InsertWorkspaceAgentStat mocks base method. -func (m *MockStore) InsertWorkspaceAgentStat(arg0 context.Context, arg1 database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InsertWorkspaceAgentStat", arg0, arg1) - ret0, _ := ret[0].(database.WorkspaceAgentStat) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// InsertWorkspaceAgentStat indicates an expected call of InsertWorkspaceAgentStat. -func (mr *MockStoreMockRecorder) InsertWorkspaceAgentStat(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWorkspaceAgentStat", reflect.TypeOf((*MockStore)(nil).InsertWorkspaceAgentStat), arg0, arg1) -} - // InsertWorkspaceAgentStats mocks base method. func (m *MockStore) InsertWorkspaceAgentStats(arg0 context.Context, arg1 database.InsertWorkspaceAgentStatsParams) error { m.ctrl.T.Helper() diff --git a/coderd/database/dbrollup/dbrollup_test.go b/coderd/database/dbrollup/dbrollup_test.go index f2455e8d7a..6c8e96b847 100644 --- a/coderd/database/dbrollup/dbrollup_test.go +++ b/coderd/database/dbrollup/dbrollup_test.go @@ -143,8 +143,8 @@ func TestRollupTemplateUsageStats(t *testing.T) { db, ps := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure()) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - anHourAgo := dbtime.Now().Add(-time.Hour).Truncate(time.Hour) - anHourAndSixMonthsAgo := anHourAgo.AddDate(0, -6, 0) + anHourAgo := dbtime.Now().Add(-time.Hour).Truncate(time.Hour).UTC() + anHourAndSixMonthsAgo := anHourAgo.AddDate(0, -6, 0).UTC() var ( org = dbgen.Organization(t, db, database.Organization{}) @@ -242,6 +242,12 @@ func TestRollupTemplateUsageStats(t *testing.T) { require.NoError(t, err) require.Len(t, stats, 1) + // I do not know a better way to do this. Our database runs in a *random* + // timezone. So the returned time is in a random timezone and fails on the + // equal even though they are the same time if converted back to the same timezone. + stats[0].EndTime = stats[0].EndTime.UTC() + stats[0].StartTime = stats[0].StartTime.UTC() + require.Equal(t, database.TemplateUsageStat{ TemplateID: tpl.ID, UserID: user.ID, diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql index 55872db31d..03d3640f8d 100644 --- a/coderd/database/dump.sql +++ b/coderd/database/dump.sql @@ -1624,6 +1624,10 @@ CREATE INDEX idx_tailnet_clients_coordinator ON tailnet_clients USING btree (coo CREATE INDEX idx_tailnet_peers_coordinator ON tailnet_peers USING btree (coordinator_id); +CREATE INDEX idx_tailnet_tunnels_dst_id ON tailnet_tunnels USING hash (dst_id); + +CREATE INDEX idx_tailnet_tunnels_src_id ON tailnet_tunnels USING hash (src_id); + CREATE UNIQUE INDEX idx_users_email ON users USING btree (email) WHERE (deleted = false); CREATE UNIQUE INDEX idx_users_username ON users USING btree (username) WHERE (deleted = false); @@ -1644,6 +1648,8 @@ COMMENT ON INDEX template_usage_stats_start_time_template_id_user_id_idx IS 'Ind CREATE UNIQUE INDEX templates_organization_id_name_idx ON templates USING btree (organization_id, lower((name)::text)) WHERE (deleted = false); +CREATE UNIQUE INDEX user_links_linked_id_login_type_idx ON user_links USING btree (linked_id, login_type) WHERE (linked_id <> ''::text); + CREATE UNIQUE INDEX users_email_lower_idx ON users USING btree (lower(email)) WHERE (deleted = false); CREATE UNIQUE INDEX users_username_lower_idx ON users USING btree (lower(username)) WHERE (deleted = false); diff --git a/coderd/database/migrations/000205_unique_linked_id.down.sql b/coderd/database/migrations/000205_unique_linked_id.down.sql new file mode 100644 index 0000000000..81e7d14fc1 --- /dev/null +++ b/coderd/database/migrations/000205_unique_linked_id.down.sql @@ -0,0 +1 @@ +DROP INDEX user_links_linked_id_login_type_idx; diff --git a/coderd/database/migrations/000205_unique_linked_id.up.sql b/coderd/database/migrations/000205_unique_linked_id.up.sql new file mode 100644 index 0000000000..da3ff6126a --- /dev/null +++ b/coderd/database/migrations/000205_unique_linked_id.up.sql @@ -0,0 +1,21 @@ +-- Remove the linked_id if two user_links share the same value. +-- This will affect the user if they attempt to change their settings on +-- the oauth/oidc provider. However, if two users exist with the same +-- linked_value, there is no way to determine correctly which user should +-- be updated. Since the linked_id is empty, this value will be linked +-- by email. +UPDATE ONLY user_links AS out +SET + linked_id = + CASE WHEN ( + -- When the count of linked_id is greater than 1, set the linked_id to empty + SELECT + COUNT(*) + FROM + user_links inn + WHERE + out.linked_id = inn.linked_id AND out.login_type = inn.login_type + ) > 1 THEN '' ELSE out.linked_id END; + +-- Enforce unique linked_id constraint on non-empty linked_id +CREATE UNIQUE INDEX user_links_linked_id_login_type_idx ON user_links USING btree (linked_id, login_type) WHERE (linked_id != ''); diff --git a/coderd/database/migrations/000206_add_tailnet_tunnels_indexes.down.sql b/coderd/database/migrations/000206_add_tailnet_tunnels_indexes.down.sql new file mode 100644 index 0000000000..475e509ac6 --- /dev/null +++ b/coderd/database/migrations/000206_add_tailnet_tunnels_indexes.down.sql @@ -0,0 +1,2 @@ +DROP INDEX idx_tailnet_tunnels_src_id; +DROP INDEX idx_tailnet_tunnels_dst_id; diff --git a/coderd/database/migrations/000206_add_tailnet_tunnels_indexes.up.sql b/coderd/database/migrations/000206_add_tailnet_tunnels_indexes.up.sql new file mode 100644 index 0000000000..42f5729e14 --- /dev/null +++ b/coderd/database/migrations/000206_add_tailnet_tunnels_indexes.up.sql @@ -0,0 +1,3 @@ +-- Since src_id and dst_id are UUIDs, we only ever compare them with equality, so hash is better +CREATE INDEX idx_tailnet_tunnels_src_id ON tailnet_tunnels USING hash (src_id); +CREATE INDEX idx_tailnet_tunnels_dst_id ON tailnet_tunnels USING hash (dst_id); diff --git a/coderd/database/migrations/testdata/fixtures/000048_userdelete.up.sql b/coderd/database/migrations/testdata/fixtures/000048_userdelete.up.sql index 0fb1d0efd4..c4f8b2e909 100644 --- a/coderd/database/migrations/testdata/fixtures/000048_userdelete.up.sql +++ b/coderd/database/migrations/testdata/fixtures/000048_userdelete.up.sql @@ -17,3 +17,18 @@ INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token -- This has happened on a production database. INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token) VALUES('fc1511ef-4fcf-4a3b-98a1-8df64160e35a', 'oidc', 'foo', ''); + + +-- Lastly, make 2 other users who have the same user link. +INSERT INTO public.users(id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, deleted) +VALUES ('580ed397-727d-4aaf-950a-51f89f556c24', 'dup_link_a@coder.com', 'dupe_a', '\x', '2022-11-02 13:05:21.445455+02', '2022-11-02 13:05:21.445455+02', 'active', '{}', false) ON CONFLICT DO NOTHING; +INSERT INTO public.organization_members VALUES ('580ed397-727d-4aaf-950a-51f89f556c24', 'bb640d07-ca8a-4869-b6bc-ae61ebb2fda1', '2022-11-02 13:05:21.447595+02', '2022-11-02 13:05:21.447595+02', '{}') ON CONFLICT DO NOTHING; +INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token) +VALUES('580ed397-727d-4aaf-950a-51f89f556c24', 'github', '500', ''); + + +INSERT INTO public.users(id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, deleted) +VALUES ('c813366b-2fde-45ae-920c-101c3ad6a1e1', 'dup_link_b@coder.com', 'dupe_b', '\x', '2022-11-02 13:05:21.445455+02', '2022-11-02 13:05:21.445455+02', 'active', '{}', false) ON CONFLICT DO NOTHING; +INSERT INTO public.organization_members VALUES ('c813366b-2fde-45ae-920c-101c3ad6a1e1', 'bb640d07-ca8a-4869-b6bc-ae61ebb2fda1', '2022-11-02 13:05:21.447595+02', '2022-11-02 13:05:21.447595+02', '{}') ON CONFLICT DO NOTHING; +INSERT INTO public.user_links(user_id, login_type, linked_id, oauth_access_token) +VALUES('c813366b-2fde-45ae-920c-101c3ad6a1e1', 'github', '500', ''); diff --git a/coderd/database/modelqueries.go b/coderd/database/modelqueries.go index 40c953375d..ca38505b28 100644 --- a/coderd/database/modelqueries.go +++ b/coderd/database/modelqueries.go @@ -266,6 +266,7 @@ func (q *sqlQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg GetWorkspa &i.LatestBuildCanceledAt, &i.LatestBuildError, &i.LatestBuildTransition, + &i.LatestBuildStatus, &i.Count, ); err != nil { return nil, err diff --git a/coderd/database/querier.go b/coderd/database/querier.go index bf1a1909fe..7d8f504cb5 100644 --- a/coderd/database/querier.go +++ b/coderd/database/querier.go @@ -123,6 +123,7 @@ type sqlcQuerier interface { // If the group is a user made group, then we need to check the group_members table. // If it is the "Everyone" group, then we need to check the organization_members table. GetGroupMembers(ctx context.Context, groupID uuid.UUID) ([]User, error) + GetGroupsByOrganizationAndUserID(ctx context.Context, arg GetGroupsByOrganizationAndUserIDParams) ([]Group, error) GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]Group, error) GetHealthSettings(ctx context.Context) (string, error) GetHungProvisionerJobs(ctx context.Context, updatedAt time.Time) ([]ProvisionerJob, error) @@ -335,7 +336,6 @@ type sqlcQuerier interface { InsertWorkspaceAgentLogs(ctx context.Context, arg InsertWorkspaceAgentLogsParams) ([]WorkspaceAgentLog, error) InsertWorkspaceAgentMetadata(ctx context.Context, arg InsertWorkspaceAgentMetadataParams) error InsertWorkspaceAgentScripts(ctx context.Context, arg InsertWorkspaceAgentScriptsParams) ([]WorkspaceAgentScript, error) - InsertWorkspaceAgentStat(ctx context.Context, arg InsertWorkspaceAgentStatParams) (WorkspaceAgentStat, error) InsertWorkspaceAgentStats(ctx context.Context, arg InsertWorkspaceAgentStatsParams) error InsertWorkspaceApp(ctx context.Context, arg InsertWorkspaceAppParams) (WorkspaceApp, error) InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 12d0658989..5b2b54929d 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -1484,6 +1484,67 @@ func (q *sqlQuerier) GetGroupByOrgAndName(ctx context.Context, arg GetGroupByOrg return i, err } +const getGroupsByOrganizationAndUserID = `-- name: GetGroupsByOrganizationAndUserID :many +SELECT + groups.id, groups.name, groups.organization_id, groups.avatar_url, groups.quota_allowance, groups.display_name, groups.source +FROM + groups + -- If the group is a user made group, then we need to check the group_members table. +LEFT JOIN + group_members +ON + group_members.group_id = groups.id AND + group_members.user_id = $1 + -- If it is the "Everyone" group, then we need to check the organization_members table. +LEFT JOIN + organization_members +ON + organization_members.organization_id = groups.id AND + organization_members.user_id = $1 +WHERE + -- In either case, the group_id will only match an org or a group. + (group_members.user_id = $1 OR organization_members.user_id = $1) +AND + -- Ensure the group or organization is the specified organization. + groups.organization_id = $2 +` + +type GetGroupsByOrganizationAndUserIDParams struct { + UserID uuid.UUID `db:"user_id" json:"user_id"` + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` +} + +func (q *sqlQuerier) GetGroupsByOrganizationAndUserID(ctx context.Context, arg GetGroupsByOrganizationAndUserIDParams) ([]Group, error) { + rows, err := q.db.QueryContext(ctx, getGroupsByOrganizationAndUserID, arg.UserID, arg.OrganizationID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Group + for rows.Next() { + var i Group + if err := rows.Scan( + &i.ID, + &i.Name, + &i.OrganizationID, + &i.AvatarURL, + &i.QuotaAllowance, + &i.DisplayName, + &i.Source, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const getGroupsByOrganizationID = `-- name: GetGroupsByOrganizationID :many SELECT id, name, organization_id, avatar_url, quota_allowance, display_name, source @@ -10386,94 +10447,6 @@ func (q *sqlQuerier) GetWorkspaceAgentStatsAndLabels(ctx context.Context, create return items, nil } -const insertWorkspaceAgentStat = `-- name: InsertWorkspaceAgentStat :one -INSERT INTO - workspace_agent_stats ( - id, - created_at, - user_id, - workspace_id, - template_id, - agent_id, - connections_by_proto, - connection_count, - rx_packets, - rx_bytes, - tx_packets, - tx_bytes, - session_count_vscode, - session_count_jetbrains, - session_count_reconnecting_pty, - session_count_ssh, - connection_median_latency_ms - ) -VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17) RETURNING id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh -` - -type InsertWorkspaceAgentStatParams struct { - ID uuid.UUID `db:"id" json:"id"` - CreatedAt time.Time `db:"created_at" json:"created_at"` - UserID uuid.UUID `db:"user_id" json:"user_id"` - WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` - TemplateID uuid.UUID `db:"template_id" json:"template_id"` - AgentID uuid.UUID `db:"agent_id" json:"agent_id"` - ConnectionsByProto json.RawMessage `db:"connections_by_proto" json:"connections_by_proto"` - ConnectionCount int64 `db:"connection_count" json:"connection_count"` - RxPackets int64 `db:"rx_packets" json:"rx_packets"` - RxBytes int64 `db:"rx_bytes" json:"rx_bytes"` - TxPackets int64 `db:"tx_packets" json:"tx_packets"` - TxBytes int64 `db:"tx_bytes" json:"tx_bytes"` - SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"` - SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"` - SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"` - SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"` - ConnectionMedianLatencyMS float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"` -} - -func (q *sqlQuerier) InsertWorkspaceAgentStat(ctx context.Context, arg InsertWorkspaceAgentStatParams) (WorkspaceAgentStat, error) { - row := q.db.QueryRowContext(ctx, insertWorkspaceAgentStat, - arg.ID, - arg.CreatedAt, - arg.UserID, - arg.WorkspaceID, - arg.TemplateID, - arg.AgentID, - arg.ConnectionsByProto, - arg.ConnectionCount, - arg.RxPackets, - arg.RxBytes, - arg.TxPackets, - arg.TxBytes, - arg.SessionCountVSCode, - arg.SessionCountJetBrains, - arg.SessionCountReconnectingPTY, - arg.SessionCountSSH, - arg.ConnectionMedianLatencyMS, - ) - var i WorkspaceAgentStat - err := row.Scan( - &i.ID, - &i.CreatedAt, - &i.UserID, - &i.AgentID, - &i.WorkspaceID, - &i.TemplateID, - &i.ConnectionsByProto, - &i.ConnectionCount, - &i.RxPackets, - &i.RxBytes, - &i.TxPackets, - &i.TxBytes, - &i.ConnectionMedianLatencyMS, - &i.SessionCountVSCode, - &i.SessionCountJetBrains, - &i.SessionCountReconnectingPTY, - &i.SessionCountSSH, - ) - return i, err -} - const insertWorkspaceAgentStats = `-- name: InsertWorkspaceAgentStats :exec INSERT INTO workspace_agent_stats ( @@ -12280,7 +12253,8 @@ SELECT latest_build.completed_at as latest_build_completed_at, latest_build.canceled_at as latest_build_canceled_at, latest_build.error as latest_build_error, - latest_build.transition as latest_build_transition + latest_build.transition as latest_build_transition, + latest_build.job_status as latest_build_status FROM workspaces JOIN @@ -12302,7 +12276,7 @@ LEFT JOIN LATERAL ( provisioner_jobs.job_status FROM workspace_builds - LEFT JOIN + JOIN provisioner_jobs ON provisioner_jobs.id = workspace_builds.job_id @@ -12507,7 +12481,7 @@ WHERE -- @authorize_filter ), filtered_workspaces_order AS ( SELECT - fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.template_name, fw.template_version_id, fw.template_version_name, fw.username, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition + fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.template_name, fw.template_version_id, fw.template_version_name, fw.username, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status FROM filtered_workspaces fw ORDER BY @@ -12528,7 +12502,7 @@ WHERE $19 ), filtered_workspaces_order_with_summary AS ( SELECT - fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.template_name, fwo.template_version_id, fwo.template_version_name, fwo.username, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition + fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.template_name, fwo.template_version_id, fwo.template_version_name, fwo.username, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status FROM filtered_workspaces_order fwo -- Return a technical summary row with total count of workspaces. @@ -12558,7 +12532,8 @@ WHERE '0001-01-01 00:00:00+00'::timestamptz, -- latest_build_completed_at, '0001-01-01 00:00:00+00'::timestamptz, -- latest_build_canceled_at, '', -- latest_build_error - 'start'::workspace_transition -- latest_build_transition + 'start'::workspace_transition, -- latest_build_transition + 'unknown'::provisioner_job_status -- latest_build_status WHERE $21 :: boolean = true ), total_count AS ( @@ -12568,7 +12543,7 @@ WHERE filtered_workspaces ) SELECT - fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.template_name, fwos.template_version_id, fwos.template_version_name, fwos.username, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, + fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.template_name, fwos.template_version_id, fwos.template_version_name, fwos.username, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, tc.count FROM filtered_workspaces_order_with_summary fwos @@ -12601,30 +12576,31 @@ type GetWorkspacesParams struct { } type GetWorkspacesRow struct { - ID uuid.UUID `db:"id" json:"id"` - CreatedAt time.Time `db:"created_at" json:"created_at"` - UpdatedAt time.Time `db:"updated_at" json:"updated_at"` - OwnerID uuid.UUID `db:"owner_id" json:"owner_id"` - OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` - TemplateID uuid.UUID `db:"template_id" json:"template_id"` - Deleted bool `db:"deleted" json:"deleted"` - Name string `db:"name" json:"name"` - AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"` - Ttl sql.NullInt64 `db:"ttl" json:"ttl"` - LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"` - DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"` - DeletingAt sql.NullTime `db:"deleting_at" json:"deleting_at"` - AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"` - Favorite bool `db:"favorite" json:"favorite"` - TemplateName string `db:"template_name" json:"template_name"` - TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"` - TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"` - Username string `db:"username" json:"username"` - LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"` - LatestBuildCanceledAt sql.NullTime `db:"latest_build_canceled_at" json:"latest_build_canceled_at"` - LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"` - LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"` - Count int64 `db:"count" json:"count"` + ID uuid.UUID `db:"id" json:"id"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + UpdatedAt time.Time `db:"updated_at" json:"updated_at"` + OwnerID uuid.UUID `db:"owner_id" json:"owner_id"` + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + TemplateID uuid.UUID `db:"template_id" json:"template_id"` + Deleted bool `db:"deleted" json:"deleted"` + Name string `db:"name" json:"name"` + AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"` + Ttl sql.NullInt64 `db:"ttl" json:"ttl"` + LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"` + DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"` + DeletingAt sql.NullTime `db:"deleting_at" json:"deleting_at"` + AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"` + Favorite bool `db:"favorite" json:"favorite"` + TemplateName string `db:"template_name" json:"template_name"` + TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"` + TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"` + Username string `db:"username" json:"username"` + LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"` + LatestBuildCanceledAt sql.NullTime `db:"latest_build_canceled_at" json:"latest_build_canceled_at"` + LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"` + LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"` + LatestBuildStatus ProvisionerJobStatus `db:"latest_build_status" json:"latest_build_status"` + Count int64 `db:"count" json:"count"` } // build_params is used to filter by build parameters if present. @@ -12685,6 +12661,7 @@ func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams) &i.LatestBuildCanceledAt, &i.LatestBuildError, &i.LatestBuildTransition, + &i.LatestBuildStatus, &i.Count, ); err != nil { return nil, err diff --git a/coderd/database/queries/groups.sql b/coderd/database/queries/groups.sql index e772d21a58..53d0b25874 100644 --- a/coderd/database/queries/groups.sql +++ b/coderd/database/queries/groups.sql @@ -28,6 +28,31 @@ FROM WHERE organization_id = $1; +-- name: GetGroupsByOrganizationAndUserID :many +SELECT + groups.* +FROM + groups + -- If the group is a user made group, then we need to check the group_members table. +LEFT JOIN + group_members +ON + group_members.group_id = groups.id AND + group_members.user_id = @user_id + -- If it is the "Everyone" group, then we need to check the organization_members table. +LEFT JOIN + organization_members +ON + organization_members.organization_id = groups.id AND + organization_members.user_id = @user_id +WHERE + -- In either case, the group_id will only match an org or a group. + (group_members.user_id = @user_id OR organization_members.user_id = @user_id) +AND + -- Ensure the group or organization is the specified organization. + groups.organization_id = @organization_id; + + -- name: InsertGroup :one INSERT INTO groups ( id, diff --git a/coderd/database/queries/workspaceagentstats.sql b/coderd/database/queries/workspaceagentstats.sql index cf059121de..4b7f86fba4 100644 --- a/coderd/database/queries/workspaceagentstats.sql +++ b/coderd/database/queries/workspaceagentstats.sql @@ -1,27 +1,3 @@ --- name: InsertWorkspaceAgentStat :one -INSERT INTO - workspace_agent_stats ( - id, - created_at, - user_id, - workspace_id, - template_id, - agent_id, - connections_by_proto, - connection_count, - rx_packets, - rx_bytes, - tx_packets, - tx_bytes, - session_count_vscode, - session_count_jetbrains, - session_count_reconnecting_pty, - session_count_ssh, - connection_median_latency_ms - ) -VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17) RETURNING *; - -- name: InsertWorkspaceAgentStats :exec INSERT INTO workspace_agent_stats ( diff --git a/coderd/database/queries/workspaces.sql b/coderd/database/queries/workspaces.sql index 767280634f..616e83a2ba 100644 --- a/coderd/database/queries/workspaces.sql +++ b/coderd/database/queries/workspaces.sql @@ -96,7 +96,8 @@ SELECT latest_build.completed_at as latest_build_completed_at, latest_build.canceled_at as latest_build_canceled_at, latest_build.error as latest_build_error, - latest_build.transition as latest_build_transition + latest_build.transition as latest_build_transition, + latest_build.job_status as latest_build_status FROM workspaces JOIN @@ -118,7 +119,7 @@ LEFT JOIN LATERAL ( provisioner_jobs.job_status FROM workspace_builds - LEFT JOIN + JOIN provisioner_jobs ON provisioner_jobs.id = workspace_builds.job_id @@ -374,7 +375,8 @@ WHERE '0001-01-01 00:00:00+00'::timestamptz, -- latest_build_completed_at, '0001-01-01 00:00:00+00'::timestamptz, -- latest_build_canceled_at, '', -- latest_build_error - 'start'::workspace_transition -- latest_build_transition + 'start'::workspace_transition, -- latest_build_transition + 'unknown'::provisioner_job_status -- latest_build_status WHERE @with_summary :: boolean = true ), total_count AS ( diff --git a/coderd/database/unique_constraint.go b/coderd/database/unique_constraint.go index 52de0a50f6..9db8af72c8 100644 --- a/coderd/database/unique_constraint.go +++ b/coderd/database/unique_constraint.go @@ -82,6 +82,7 @@ const ( UniqueOrganizationsSingleDefaultOrg UniqueConstraint = "organizations_single_default_org" // CREATE UNIQUE INDEX organizations_single_default_org ON organizations USING btree (is_default) WHERE (is_default = true); UniqueTemplateUsageStatsStartTimeTemplateIDUserIDIndex UniqueConstraint = "template_usage_stats_start_time_template_id_user_id_idx" // CREATE UNIQUE INDEX template_usage_stats_start_time_template_id_user_id_idx ON template_usage_stats USING btree (start_time, template_id, user_id); UniqueTemplatesOrganizationIDNameIndex UniqueConstraint = "templates_organization_id_name_idx" // CREATE UNIQUE INDEX templates_organization_id_name_idx ON templates USING btree (organization_id, lower((name)::text)) WHERE (deleted = false); + UniqueUserLinksLinkedIDLoginTypeIndex UniqueConstraint = "user_links_linked_id_login_type_idx" // CREATE UNIQUE INDEX user_links_linked_id_login_type_idx ON user_links USING btree (linked_id, login_type) WHERE (linked_id <> ''::text); UniqueUsersEmailLowerIndex UniqueConstraint = "users_email_lower_idx" // CREATE UNIQUE INDEX users_email_lower_idx ON users USING btree (lower(email)) WHERE (deleted = false); UniqueUsersUsernameLowerIndex UniqueConstraint = "users_username_lower_idx" // CREATE UNIQUE INDEX users_username_lower_idx ON users USING btree (lower(username)) WHERE (deleted = false); UniqueWorkspaceProxiesLowerNameIndex UniqueConstraint = "workspace_proxies_lower_name_idx" // CREATE UNIQUE INDEX workspace_proxies_lower_name_idx ON workspace_proxies USING btree (lower(name)) WHERE (deleted = false); diff --git a/coderd/externalauth/externalauth_test.go b/coderd/externalauth/externalauth_test.go index 84fbe4ff5d..88f3b7a3b5 100644 --- a/coderd/externalauth/externalauth_test.go +++ b/coderd/externalauth/externalauth_test.go @@ -3,9 +3,11 @@ package externalauth_test import ( "context" "encoding/json" + "fmt" "net/http" "net/http/httptest" "net/url" + "strings" "testing" "time" @@ -13,6 +15,7 @@ import ( "github.com/golang-jwt/jwt/v4" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/oauth2" "golang.org/x/xerrors" @@ -417,6 +420,78 @@ func TestConvertYAML(t *testing.T) { }) } +// TestConstantQueryParams verifies a constant query parameter can be set in the +// "authenticate" url for external auth applications, and it will be carried forward +// to actual auth requests. +// This unit test was specifically created for Auth0 which can set an +// audience query parameter in it's /authorize endpoint. +func TestConstantQueryParams(t *testing.T) { + t.Parallel() + const constantQueryParamKey = "audience" + const constantQueryParamValue = "foobar" + constantQueryParam := fmt.Sprintf("%s=%s", constantQueryParamKey, constantQueryParamValue) + fake, config, _ := setupOauth2Test(t, testConfig{ + FakeIDPOpts: []oidctest.FakeIDPOpt{ + oidctest.WithMiddlewares(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + if strings.Contains(request.URL.Path, "authorize") { + // Assert has the audience query param + assert.Equal(t, request.URL.Query().Get(constantQueryParamKey), constantQueryParamValue) + } + next.ServeHTTP(writer, request) + }) + }), + }, + CoderOIDCConfigOpts: []func(cfg *coderd.OIDCConfig){ + func(cfg *coderd.OIDCConfig) { + // Include a constant query parameter. + authURL, err := url.Parse(cfg.OAuth2Config.(*oauth2.Config).Endpoint.AuthURL) + require.NoError(t, err) + + authURL.RawQuery = url.Values{constantQueryParamKey: []string{constantQueryParamValue}}.Encode() + cfg.OAuth2Config.(*oauth2.Config).Endpoint.AuthURL = authURL.String() + require.Contains(t, cfg.OAuth2Config.(*oauth2.Config).Endpoint.AuthURL, constantQueryParam) + }, + }, + }) + + callbackCalled := false + fake.SetCoderdCallbackHandler(func(writer http.ResponseWriter, request *http.Request) { + // Just record the callback was hit, and the auth succeeded. + callbackCalled = true + }) + + // Verify the AuthURL endpoint contains the constant query parameter and is a valid URL. + // It should look something like: + // http://127.0.0.1:>/oauth2/authorize? + // audience=foobar& + // client_id=d& + // redirect_uri=& + // response_type=code& + // scope=openid+email+profile& + // state=state + const state = "state" + rawAuthURL := config.AuthCodeURL(state) + // Parsing the url is not perfect. It allows imperfections like the query + // params having 2 question marks '?a=foo?b=bar'. + // So use it to validate, then verify the raw url is as expected. + authURL, err := url.Parse(rawAuthURL) + require.NoError(t, err) + require.Equal(t, authURL.Query().Get(constantQueryParamKey), constantQueryParamValue) + // We are not using a real server, so it fakes https://coder.com + require.Equal(t, authURL.Scheme, "https") + // Validate the raw URL. + // Double check only 1 '?' exists. Url parsing allows multiple '?' in the query string. + require.Equal(t, strings.Count(rawAuthURL, "?"), 1) + + // Actually run an auth request. Although it says OIDC, the flow is the same + // for oauth2. + //nolint:bodyclose + resp := fake.OIDCCallback(t, state, jwt.MapClaims{}) + require.True(t, callbackCalled) + require.Equal(t, http.StatusOK, resp.StatusCode) +} + type testConfig struct { FakeIDPOpts []oidctest.FakeIDPOpt CoderOIDCConfigOpts []func(cfg *coderd.OIDCConfig) @@ -433,6 +508,10 @@ type testConfig struct { func setupOauth2Test(t *testing.T, settings testConfig) (*oidctest.FakeIDP, *externalauth.Config, database.ExternalAuthLink) { t.Helper() + if settings.ExternalAuthOpt == nil { + settings.ExternalAuthOpt = func(_ *externalauth.Config) {} + } + const providerID = "test-idp" fake := oidctest.NewFakeIDP(t, append([]oidctest.FakeIDPOpt{}, settings.FakeIDPOpts...)..., diff --git a/coderd/identityprovider/tokens.go b/coderd/identityprovider/tokens.go index 0673eb7d1a..e9c9e743e7 100644 --- a/coderd/identityprovider/tokens.go +++ b/coderd/identityprovider/tokens.go @@ -7,7 +7,6 @@ import ( "fmt" "net/http" "net/url" - "time" "github.com/google/uuid" "golang.org/x/oauth2" @@ -75,7 +74,11 @@ func extractTokenParams(r *http.Request, callbackURL *url.URL) (tokenParams, []c return params, nil, nil } -func Tokens(db database.Store, defaultLifetime time.Duration) http.HandlerFunc { +// Tokens +// TODO: the sessions lifetime config passed is for coder api tokens. +// Should there be a separate config for oauth2 tokens? They are related, +// but they are not the same. +func Tokens(db database.Store, lifetimes codersdk.SessionLifetime) http.HandlerFunc { return func(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() app := httpmw.OAuth2ProviderApp(r) @@ -104,9 +107,9 @@ func Tokens(db database.Store, defaultLifetime time.Duration) http.HandlerFunc { switch params.grantType { // TODO: Client creds, device code. case codersdk.OAuth2ProviderGrantTypeRefreshToken: - token, err = refreshTokenGrant(ctx, db, app, defaultLifetime, params) + token, err = refreshTokenGrant(ctx, db, app, lifetimes, params) case codersdk.OAuth2ProviderGrantTypeAuthorizationCode: - token, err = authorizationCodeGrant(ctx, db, app, defaultLifetime, params) + token, err = authorizationCodeGrant(ctx, db, app, lifetimes, params) default: // Grant types are validated by the parser, so getting through here means // the developer added a type but forgot to add a case here. @@ -137,7 +140,7 @@ func Tokens(db database.Store, defaultLifetime time.Duration) http.HandlerFunc { } } -func authorizationCodeGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, defaultLifetime time.Duration, params tokenParams) (oauth2.Token, error) { +func authorizationCodeGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, lifetimes codersdk.SessionLifetime, params tokenParams) (oauth2.Token, error) { // Validate the client secret. secret, err := parseSecret(params.clientSecret) if err != nil { @@ -195,11 +198,9 @@ func authorizationCodeGrant(ctx context.Context, db database.Store, app database // TODO: We are ignoring scopes for now. tokenName := fmt.Sprintf("%s_%s_oauth_session_token", dbCode.UserID, app.ID) key, sessionToken, err := apikey.Generate(apikey.CreateParams{ - UserID: dbCode.UserID, - LoginType: database.LoginTypeOAuth2ProviderApp, - // TODO: This is just the lifetime for api keys, maybe have its own config - // settings. #11693 - DefaultLifetime: defaultLifetime, + UserID: dbCode.UserID, + LoginType: database.LoginTypeOAuth2ProviderApp, + DefaultLifetime: lifetimes.DefaultDuration.Value(), // For now, we allow only one token per app and user at a time. TokenName: tokenName, }) @@ -271,7 +272,7 @@ func authorizationCodeGrant(ctx context.Context, db database.Store, app database }, nil } -func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, defaultLifetime time.Duration, params tokenParams) (oauth2.Token, error) { +func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAuth2ProviderApp, lifetimes codersdk.SessionLifetime, params tokenParams) (oauth2.Token, error) { // Validate the token. token, err := parseSecret(params.refreshToken) if err != nil { @@ -326,11 +327,9 @@ func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAut // TODO: We are ignoring scopes for now. tokenName := fmt.Sprintf("%s_%s_oauth_session_token", prevKey.UserID, app.ID) key, sessionToken, err := apikey.Generate(apikey.CreateParams{ - UserID: prevKey.UserID, - LoginType: database.LoginTypeOAuth2ProviderApp, - // TODO: This is just the lifetime for api keys, maybe have its own config - // settings. #11693 - DefaultLifetime: defaultLifetime, + UserID: prevKey.UserID, + LoginType: database.LoginTypeOAuth2ProviderApp, + DefaultLifetime: lifetimes.DefaultDuration.Value(), // For now, we allow only one token per app and user at a time. TokenName: tokenName, }) diff --git a/coderd/metricscache/metricscache_test.go b/coderd/metricscache/metricscache_test.go index 391017aaba..bcc9396d3c 100644 --- a/coderd/metricscache/metricscache_test.go +++ b/coderd/metricscache/metricscache_test.go @@ -3,6 +3,7 @@ package metricscache_test import ( "context" "database/sql" + "encoding/json" "testing" "time" @@ -280,14 +281,25 @@ func TestCache_DeploymentStats(t *testing.T) { }) defer cache.Close() - _, err := db.InsertWorkspaceAgentStat(context.Background(), database.InsertWorkspaceAgentStatParams{ - ID: uuid.New(), - AgentID: uuid.New(), - CreatedAt: dbtime.Now(), - ConnectionCount: 1, - RxBytes: 1, - TxBytes: 1, - SessionCountVSCode: 1, + err := db.InsertWorkspaceAgentStats(context.Background(), database.InsertWorkspaceAgentStatsParams{ + ID: []uuid.UUID{uuid.New()}, + CreatedAt: []time.Time{dbtime.Now()}, + WorkspaceID: []uuid.UUID{uuid.New()}, + UserID: []uuid.UUID{uuid.New()}, + TemplateID: []uuid.UUID{uuid.New()}, + AgentID: []uuid.UUID{uuid.New()}, + ConnectionsByProto: json.RawMessage(`[{}]`), + + RxPackets: []int64{0}, + RxBytes: []int64{1}, + TxPackets: []int64{0}, + TxBytes: []int64{1}, + ConnectionCount: []int64{1}, + SessionCountVSCode: []int64{1}, + SessionCountJetBrains: []int64{0}, + SessionCountReconnectingPTY: []int64{0}, + SessionCountSSH: []int64{0}, + ConnectionMedianLatencyMS: []float64{10}, }) require.NoError(t, err) diff --git a/coderd/oauth2.go b/coderd/oauth2.go index 9e2df641bf..ef68e93a1f 100644 --- a/coderd/oauth2.go +++ b/coderd/oauth2.go @@ -354,7 +354,7 @@ func (api *API) getOAuth2ProviderAppAuthorize() http.HandlerFunc { // @Success 200 {object} oauth2.Token // @Router /oauth2/tokens [post] func (api *API) postOAuth2ProviderAppToken() http.HandlerFunc { - return identityprovider.Tokens(api.Database, api.DeploymentValues.SessionDuration.Value()) + return identityprovider.Tokens(api.Database, api.DeploymentValues.Sessions) } // @Summary Delete OAuth2 application tokens. diff --git a/coderd/prometheusmetrics/prometheusmetrics.go b/coderd/prometheusmetrics/prometheusmetrics.go index b2c4b46677..4d3f1d1a04 100644 --- a/coderd/prometheusmetrics/prometheusmetrics.go +++ b/coderd/prometheusmetrics/prometheusmetrics.go @@ -24,10 +24,12 @@ import ( "github.com/coder/coder/v2/tailnet" ) +const defaultRefreshRate = time.Minute + // ActiveUsers tracks the number of users that have authenticated within the past hour. func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) { if duration == 0 { - duration = 5 * time.Minute + duration = defaultRefreshRate } gauge := prometheus.NewGauge(prometheus.GaugeOpts{ @@ -72,36 +74,42 @@ func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db datab } // Workspaces tracks the total number of workspaces with labels on status. -func Workspaces(ctx context.Context, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) { +func Workspaces(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) { if duration == 0 { - duration = 5 * time.Minute + duration = defaultRefreshRate } - gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + workspaceLatestBuildTotals := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "coderd", Subsystem: "api", Name: "workspace_latest_build_total", - Help: "The latest workspace builds with a status.", + Help: "The current number of workspace builds by status.", }, []string{"status"}) - err := registerer.Register(gauge) - if err != nil { + if err := registerer.Register(workspaceLatestBuildTotals); err != nil { + return nil, err + } + + workspaceLatestBuildStatuses := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "coderd", + Name: "workspace_latest_build_status", + Help: "The current workspace statuses by template, transition, and owner.", + }, []string{"status", "template_name", "template_version", "workspace_owner", "workspace_transition"}) + if err := registerer.Register(workspaceLatestBuildStatuses); err != nil { return nil, err } - // This exists so the prometheus metric exports immediately when set. - // It helps with tests so they don't have to wait for a tick. - gauge.WithLabelValues("pending").Set(0) ctx, cancelFunc := context.WithCancel(ctx) done := make(chan struct{}) - // Use time.Nanosecond to force an initial tick. It will be reset to the - // correct duration after executing once. - ticker := time.NewTicker(time.Nanosecond) - doTick := func() { - defer ticker.Reset(duration) - + updateWorkspaceTotals := func() { builds, err := db.GetLatestWorkspaceBuilds(ctx) if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // clear all series if there are no database entries + workspaceLatestBuildTotals.Reset() + } + + logger.Warn(ctx, "failed to load latest workspace builds", slog.Error(err)) return } jobIDs := make([]uuid.UUID, 0, len(builds)) @@ -110,16 +118,53 @@ func Workspaces(ctx context.Context, registerer prometheus.Registerer, db databa } jobs, err := db.GetProvisionerJobsByIDs(ctx, jobIDs) if err != nil { + ids := make([]string, 0, len(jobIDs)) + for _, id := range jobIDs { + ids = append(ids, id.String()) + } + + logger.Warn(ctx, "failed to load provisioner jobs", slog.F("ids", ids), slog.Error(err)) return } - gauge.Reset() + workspaceLatestBuildTotals.Reset() for _, job := range jobs { status := codersdk.ProvisionerJobStatus(job.JobStatus) - gauge.WithLabelValues(string(status)).Add(1) + workspaceLatestBuildTotals.WithLabelValues(string(status)).Add(1) } } + updateWorkspaceStatuses := func() { + ws, err := db.GetWorkspaces(ctx, database.GetWorkspacesParams{ + Deleted: false, + WithSummary: false, + }) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // clear all series if there are no database entries + workspaceLatestBuildStatuses.Reset() + } + + logger.Warn(ctx, "failed to load active workspaces", slog.Error(err)) + return + } + + workspaceLatestBuildStatuses.Reset() + for _, w := range ws { + workspaceLatestBuildStatuses.WithLabelValues(string(w.LatestBuildStatus), w.TemplateName, w.TemplateVersionName.String, w.Username, string(w.LatestBuildTransition)).Add(1) + } + } + + // Use time.Nanosecond to force an initial tick. It will be reset to the + // correct duration after executing once. + ticker := time.NewTicker(time.Nanosecond) + doTick := func() { + defer ticker.Reset(duration) + + updateWorkspaceTotals() + updateWorkspaceStatuses() + } + go func() { defer close(done) defer ticker.Stop() @@ -141,7 +186,7 @@ func Workspaces(ctx context.Context, registerer prometheus.Registerer, db databa // Agents tracks the total number of workspaces with labels on status. func Agents(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, coordinator *atomic.Pointer[tailnet.Coordinator], derpMapFn func() *tailcfg.DERPMap, agentInactiveDisconnectTimeout, duration time.Duration) (func(), error) { if duration == 0 { - duration = 1 * time.Minute + duration = defaultRefreshRate } agentsGauge := NewCachedGaugeVec(prometheus.NewGaugeVec(prometheus.GaugeOpts{ @@ -330,7 +375,7 @@ func Agents(ctx context.Context, logger slog.Logger, registerer prometheus.Regis func AgentStats(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, initialCreateAfter time.Time, duration time.Duration, aggregateByLabels []string) (func(), error) { if duration == 0 { - duration = 1 * time.Minute + duration = defaultRefreshRate } if len(aggregateByLabels) == 0 { diff --git a/coderd/prometheusmetrics/prometheusmetrics_test.go b/coderd/prometheusmetrics/prometheusmetrics_test.go index 32e97f84c3..2322982a65 100644 --- a/coderd/prometheusmetrics/prometheusmetrics_test.go +++ b/coderd/prometheusmetrics/prometheusmetrics_test.go @@ -31,6 +31,7 @@ import ( "github.com/coder/coder/v2/coderd/prometheusmetrics" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" + "github.com/coder/coder/v2/cryptorand" "github.com/coder/coder/v2/provisioner/echo" "github.com/coder/coder/v2/provisionersdk/proto" "github.com/coder/coder/v2/tailnet" @@ -110,89 +111,9 @@ func TestActiveUsers(t *testing.T) { } } -func TestWorkspaces(t *testing.T) { +func TestWorkspaceLatestBuildTotals(t *testing.T) { t.Parallel() - insertRunning := func(db database.Store) database.ProvisionerJob { - job, err := db.InsertProvisionerJob(context.Background(), database.InsertProvisionerJobParams{ - ID: uuid.New(), - CreatedAt: dbtime.Now(), - UpdatedAt: dbtime.Now(), - Provisioner: database.ProvisionerTypeEcho, - StorageMethod: database.ProvisionerStorageMethodFile, - Type: database.ProvisionerJobTypeWorkspaceBuild, - }) - require.NoError(t, err) - err = db.InsertWorkspaceBuild(context.Background(), database.InsertWorkspaceBuildParams{ - ID: uuid.New(), - WorkspaceID: uuid.New(), - JobID: job.ID, - BuildNumber: 1, - Transition: database.WorkspaceTransitionStart, - Reason: database.BuildReasonInitiator, - }) - require.NoError(t, err) - // This marks the job as started. - _, err = db.AcquireProvisionerJob(context.Background(), database.AcquireProvisionerJobParams{ - OrganizationID: job.OrganizationID, - StartedAt: sql.NullTime{ - Time: dbtime.Now(), - Valid: true, - }, - Types: []database.ProvisionerType{database.ProvisionerTypeEcho}, - }) - require.NoError(t, err) - return job - } - - insertCanceled := func(db database.Store) { - job := insertRunning(db) - err := db.UpdateProvisionerJobWithCancelByID(context.Background(), database.UpdateProvisionerJobWithCancelByIDParams{ - ID: job.ID, - CanceledAt: sql.NullTime{ - Time: dbtime.Now(), - Valid: true, - }, - }) - require.NoError(t, err) - err = db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{ - ID: job.ID, - CompletedAt: sql.NullTime{ - Time: dbtime.Now(), - Valid: true, - }, - }) - require.NoError(t, err) - } - - insertFailed := func(db database.Store) { - job := insertRunning(db) - err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{ - ID: job.ID, - CompletedAt: sql.NullTime{ - Time: dbtime.Now(), - Valid: true, - }, - Error: sql.NullString{ - String: "failed", - Valid: true, - }, - }) - require.NoError(t, err) - } - - insertSuccess := func(db database.Store) { - job := insertRunning(db) - err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{ - ID: job.ID, - CompletedAt: sql.NullTime{ - Time: dbtime.Now(), - Valid: true, - }, - }) - require.NoError(t, err) - } - for _, tc := range []struct { Name string Database func() database.Store @@ -208,13 +129,13 @@ func TestWorkspaces(t *testing.T) { Name: "Multiple", Database: func() database.Store { db := dbmem.New() - insertCanceled(db) - insertFailed(db) - insertFailed(db) - insertSuccess(db) - insertSuccess(db) - insertSuccess(db) - insertRunning(db) + insertCanceled(t, db) + insertFailed(t, db) + insertFailed(t, db) + insertSuccess(t, db) + insertSuccess(t, db) + insertSuccess(t, db) + insertRunning(t, db) return db }, Total: 7, @@ -229,29 +150,32 @@ func TestWorkspaces(t *testing.T) { t.Run(tc.Name, func(t *testing.T) { t.Parallel() registry := prometheus.NewRegistry() - closeFunc, err := prometheusmetrics.Workspaces(context.Background(), registry, tc.Database(), time.Millisecond) + closeFunc, err := prometheusmetrics.Workspaces(context.Background(), slogtest.Make(t, nil).Leveled(slog.LevelWarn), registry, tc.Database(), testutil.IntervalFast) require.NoError(t, err) t.Cleanup(closeFunc) require.Eventually(t, func() bool { metrics, err := registry.Gather() assert.NoError(t, err) - if len(metrics) < 1 { - return false - } sum := 0 - for _, metric := range metrics[0].Metric { - count, ok := tc.Status[codersdk.ProvisionerJobStatus(metric.Label[0].GetValue())] - if metric.Gauge.GetValue() == 0 { + for _, m := range metrics { + if m.GetName() != "coderd_api_workspace_latest_build_total" { continue } - if !ok { - t.Fail() + + for _, metric := range m.Metric { + count, ok := tc.Status[codersdk.ProvisionerJobStatus(metric.Label[0].GetValue())] + if metric.Gauge.GetValue() == 0 { + continue + } + if !ok { + t.Fail() + } + if metric.Gauge.GetValue() != float64(count) { + return false + } + sum += int(metric.Gauge.GetValue()) } - if metric.Gauge.GetValue() != float64(count) { - return false - } - sum += int(metric.Gauge.GetValue()) } t.Logf("sum %d == total %d", sum, tc.Total) return sum == tc.Total @@ -260,6 +184,90 @@ func TestWorkspaces(t *testing.T) { } } +func TestWorkspaceLatestBuildStatuses(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + Name string + Database func() database.Store + ExpectedWorkspaces int + ExpectedStatuses map[codersdk.ProvisionerJobStatus]int + }{{ + Name: "None", + Database: func() database.Store { + return dbmem.New() + }, + ExpectedWorkspaces: 0, + }, { + Name: "Multiple", + Database: func() database.Store { + db := dbmem.New() + insertTemplates(t, db) + insertCanceled(t, db) + insertFailed(t, db) + insertFailed(t, db) + insertSuccess(t, db) + insertSuccess(t, db) + insertSuccess(t, db) + insertRunning(t, db) + return db + }, + ExpectedWorkspaces: 7, + ExpectedStatuses: map[codersdk.ProvisionerJobStatus]int{ + codersdk.ProvisionerJobCanceled: 1, + codersdk.ProvisionerJobFailed: 2, + codersdk.ProvisionerJobSucceeded: 3, + codersdk.ProvisionerJobRunning: 1, + }, + }} { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + registry := prometheus.NewRegistry() + closeFunc, err := prometheusmetrics.Workspaces(context.Background(), slogtest.Make(t, nil), registry, tc.Database(), testutil.IntervalFast) + require.NoError(t, err) + t.Cleanup(closeFunc) + + require.Eventually(t, func() bool { + metrics, err := registry.Gather() + assert.NoError(t, err) + + stMap := map[codersdk.ProvisionerJobStatus]int{} + for _, m := range metrics { + if m.GetName() != "coderd_workspace_latest_build_status" { + continue + } + + for _, metric := range m.Metric { + for _, l := range metric.Label { + if l == nil { + continue + } + + if l.GetName() == "status" { + status := codersdk.ProvisionerJobStatus(l.GetValue()) + stMap[status] += int(metric.Gauge.GetValue()) + } + } + } + } + + stSum := 0 + for st, count := range stMap { + if tc.ExpectedStatuses[st] != count { + return false + } + + stSum += count + } + + t.Logf("status series = %d, expected == %d", stSum, tc.ExpectedWorkspaces) + return stSum == tc.ExpectedWorkspaces + }, testutil.WaitShort, testutil.IntervalFast) + }) + } +} + func TestAgents(t *testing.T) { t.Parallel() @@ -601,3 +609,153 @@ func prepareWorkspaceAndAgent(t *testing.T, client *codersdk.Client, user coders agentClient.SetSessionToken(authToken) return agentClient } + +var ( + templateA = uuid.New() + templateVersionA = uuid.New() + templateB = uuid.New() + templateVersionB = uuid.New() +) + +func insertTemplates(t *testing.T, db database.Store) { + require.NoError(t, db.InsertTemplate(context.Background(), database.InsertTemplateParams{ + ID: templateA, + Name: "template-a", + Provisioner: database.ProvisionerTypeTerraform, + MaxPortSharingLevel: database.AppSharingLevelAuthenticated, + })) + + require.NoError(t, db.InsertTemplateVersion(context.Background(), database.InsertTemplateVersionParams{ + ID: templateVersionA, + TemplateID: uuid.NullUUID{UUID: templateA}, + Name: "version-1a", + })) + + require.NoError(t, db.InsertTemplate(context.Background(), database.InsertTemplateParams{ + ID: templateB, + Name: "template-b", + Provisioner: database.ProvisionerTypeTerraform, + MaxPortSharingLevel: database.AppSharingLevelAuthenticated, + })) + + require.NoError(t, db.InsertTemplateVersion(context.Background(), database.InsertTemplateVersionParams{ + ID: templateVersionB, + TemplateID: uuid.NullUUID{UUID: templateB}, + Name: "version-1b", + })) +} + +func insertUser(t *testing.T, db database.Store) database.User { + username, err := cryptorand.String(8) + require.NoError(t, err) + + user, err := db.InsertUser(context.Background(), database.InsertUserParams{ + ID: uuid.New(), + Username: username, + LoginType: database.LoginTypeNone, + }) + require.NoError(t, err) + + return user +} + +func insertRunning(t *testing.T, db database.Store) database.ProvisionerJob { + var template, templateVersion uuid.UUID + rnd, err := cryptorand.Intn(10) + require.NoError(t, err) + if rnd > 5 { + template = templateB + templateVersion = templateVersionB + } else { + template = templateA + templateVersion = templateVersionA + } + + workspace, err := db.InsertWorkspace(context.Background(), database.InsertWorkspaceParams{ + ID: uuid.New(), + OwnerID: insertUser(t, db).ID, + Name: uuid.NewString(), + TemplateID: template, + AutomaticUpdates: database.AutomaticUpdatesNever, + }) + require.NoError(t, err) + + job, err := db.InsertProvisionerJob(context.Background(), database.InsertProvisionerJobParams{ + ID: uuid.New(), + CreatedAt: dbtime.Now(), + UpdatedAt: dbtime.Now(), + Provisioner: database.ProvisionerTypeEcho, + StorageMethod: database.ProvisionerStorageMethodFile, + Type: database.ProvisionerJobTypeWorkspaceBuild, + }) + require.NoError(t, err) + err = db.InsertWorkspaceBuild(context.Background(), database.InsertWorkspaceBuildParams{ + ID: uuid.New(), + WorkspaceID: workspace.ID, + JobID: job.ID, + BuildNumber: 1, + Transition: database.WorkspaceTransitionStart, + Reason: database.BuildReasonInitiator, + TemplateVersionID: templateVersion, + }) + require.NoError(t, err) + // This marks the job as started. + _, err = db.AcquireProvisionerJob(context.Background(), database.AcquireProvisionerJobParams{ + OrganizationID: job.OrganizationID, + StartedAt: sql.NullTime{ + Time: dbtime.Now(), + Valid: true, + }, + Types: []database.ProvisionerType{database.ProvisionerTypeEcho}, + }) + require.NoError(t, err) + return job +} + +func insertCanceled(t *testing.T, db database.Store) { + job := insertRunning(t, db) + err := db.UpdateProvisionerJobWithCancelByID(context.Background(), database.UpdateProvisionerJobWithCancelByIDParams{ + ID: job.ID, + CanceledAt: sql.NullTime{ + Time: dbtime.Now(), + Valid: true, + }, + }) + require.NoError(t, err) + err = db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{ + ID: job.ID, + CompletedAt: sql.NullTime{ + Time: dbtime.Now(), + Valid: true, + }, + }) + require.NoError(t, err) +} + +func insertFailed(t *testing.T, db database.Store) { + job := insertRunning(t, db) + err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{ + ID: job.ID, + CompletedAt: sql.NullTime{ + Time: dbtime.Now(), + Valid: true, + }, + Error: sql.NullString{ + String: "failed", + Valid: true, + }, + }) + require.NoError(t, err) +} + +func insertSuccess(t *testing.T, db database.Store) { + job := insertRunning(t, db) + err := db.UpdateProvisionerJobWithCompleteByID(context.Background(), database.UpdateProvisionerJobWithCompleteByIDParams{ + ID: job.ID, + CompletedAt: sql.NullTime{ + Time: dbtime.Now(), + Valid: true, + }, + }) + require.NoError(t, err) +} diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go index 6183ffc028..ee1d455265 100644 --- a/coderd/provisionerdserver/provisionerdserver.go +++ b/coderd/provisionerdserver/provisionerdserver.go @@ -467,6 +467,17 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo if err != nil { return nil, failJob(fmt.Sprintf("get owner: %s", err)) } + ownerGroups, err := s.Database.GetGroupsByOrganizationAndUserID(ctx, database.GetGroupsByOrganizationAndUserIDParams{ + UserID: owner.ID, + OrganizationID: s.OrganizationID, + }) + if err != nil { + return nil, failJob(fmt.Sprintf("get owner group names: %s", err)) + } + ownerGroupNames := []string{} + for _, group := range ownerGroups { + ownerGroupNames = append(ownerGroupNames, group.Name) + } err = s.Pubsub.Publish(codersdk.WorkspaceNotifyChannel(workspace.ID), []byte{}) if err != nil { return nil, failJob(fmt.Sprintf("publish workspace update: %s", err)) @@ -567,6 +578,7 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo WorkspaceOwner: owner.Username, WorkspaceOwnerEmail: owner.Email, WorkspaceOwnerName: owner.Name, + WorkspaceOwnerGroups: ownerGroupNames, WorkspaceOwnerOidcAccessToken: workspaceOwnerOIDCAccessToken, WorkspaceId: workspace.ID.String(), WorkspaceOwnerId: owner.ID.String(), @@ -1725,9 +1737,9 @@ func (s *server) regenerateSessionToken(ctx context.Context, user database.User, newkey, sessionToken, err := apikey.Generate(apikey.CreateParams{ UserID: user.ID, LoginType: user.LoginType, - DefaultLifetime: s.DeploymentValues.SessionDuration.Value(), TokenName: workspaceSessionTokenName(workspace), - LifetimeSeconds: int64(s.DeploymentValues.MaxTokenLifetime.Value().Seconds()), + DefaultLifetime: s.DeploymentValues.Sessions.DefaultDuration.Value(), + LifetimeSeconds: int64(s.DeploymentValues.Sessions.MaximumTokenDuration.Value().Seconds()), }) if err != nil { return "", xerrors.Errorf("generate API key: %w", err) diff --git a/coderd/provisionerdserver/provisionerdserver_test.go b/coderd/provisionerdserver/provisionerdserver_test.go index 05572d381e..6757bd2c63 100644 --- a/coderd/provisionerdserver/provisionerdserver_test.go +++ b/coderd/provisionerdserver/provisionerdserver_test.go @@ -166,7 +166,11 @@ func TestAcquireJob(t *testing.T) { // Set the max session token lifetime so we can assert we // create an API key with an expiration within the bounds of the // deployment config. - dv := &codersdk.DeploymentValues{MaxTokenLifetime: serpent.Duration(time.Hour)} + dv := &codersdk.DeploymentValues{ + Sessions: codersdk.SessionLifetime{ + MaximumTokenDuration: serpent.Duration(time.Hour), + }, + } gitAuthProvider := &sdkproto.ExternalAuthProviderResource{ Id: "github", } @@ -182,6 +186,15 @@ func TestAcquireJob(t *testing.T) { defer cancel() user := dbgen.User(t, db, database.User{}) + group1 := dbgen.Group(t, db, database.Group{ + Name: "group1", + OrganizationID: pd.OrganizationID, + }) + err := db.InsertGroupMember(ctx, database.InsertGroupMemberParams{ + UserID: user.ID, + GroupID: group1.ID, + }) + require.NoError(t, err) link := dbgen.UserLink(t, db, database.UserLink{ LoginType: database.LoginTypeOIDC, UserID: user.ID, @@ -310,8 +323,8 @@ func TestAcquireJob(t *testing.T) { require.Len(t, toks, 2, "invalid api key") key, err := db.GetAPIKeyByID(ctx, toks[0]) require.NoError(t, err) - require.Equal(t, int64(dv.MaxTokenLifetime.Value().Seconds()), key.LifetimeSeconds) - require.WithinDuration(t, time.Now().Add(dv.MaxTokenLifetime.Value()), key.ExpiresAt, time.Minute) + require.Equal(t, int64(dv.Sessions.MaximumTokenDuration.Value().Seconds()), key.LifetimeSeconds) + require.WithinDuration(t, time.Now().Add(dv.Sessions.MaximumTokenDuration.Value()), key.ExpiresAt, time.Minute) want, err := json.Marshal(&proto.AcquiredJob_WorkspaceBuild_{ WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{ @@ -340,6 +353,7 @@ func TestAcquireJob(t *testing.T) { WorkspaceOwnerEmail: user.Email, WorkspaceOwnerName: user.Name, WorkspaceOwnerOidcAccessToken: link.OAuthAccessToken, + WorkspaceOwnerGroups: []string{group1.Name}, WorkspaceId: workspace.ID.String(), WorkspaceOwnerId: user.ID.String(), TemplateId: template.ID.String(), diff --git a/coderd/tailnet.go b/coderd/tailnet.go index f684b05cd2..0bcf21bb9d 100644 --- a/coderd/tailnet.go +++ b/coderd/tailnet.go @@ -32,11 +32,14 @@ import ( var tailnetTransport *http.Transport func init() { - var valid bool - tailnetTransport, valid = http.DefaultTransport.(*http.Transport) + tp, valid := http.DefaultTransport.(*http.Transport) if !valid { panic("dev error: default transport is the wrong type") } + tailnetTransport = tp.Clone() + // We do not want to respect the proxy settings from the environment, since + // all network traffic happens over wireguard. + tailnetTransport.Proxy = nil } var _ workspaceapps.AgentProvider = (*ServerTailnet)(nil) diff --git a/coderd/tailnet_test.go b/coderd/tailnet_test.go index b7b7ad1df9..0a78a8349c 100644 --- a/coderd/tailnet_test.go +++ b/coderd/tailnet_test.go @@ -68,6 +68,35 @@ func TestServerTailnet_AgentConn_NoSTUN(t *testing.T) { assert.True(t, conn.AwaitReachable(ctx)) } +//nolint:paralleltest // t.Setenv +func TestServerTailnet_ReverseProxy_ProxyEnv(t *testing.T) { + t.Setenv("HTTP_PROXY", "http://169.254.169.254:12345") + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + agents, serverTailnet := setupServerTailnetAgent(t, 1) + a := agents[0] + + u, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", workspacesdk.AgentHTTPAPIServerPort)) + require.NoError(t, err) + + rp := serverTailnet.ReverseProxy(u, u, a.id) + + rw := httptest.NewRecorder() + req := httptest.NewRequest( + http.MethodGet, + u.String(), + nil, + ).WithContext(ctx) + + rp.ServeHTTP(rw, req) + res := rw.Result() + defer res.Body.Close() + + assert.Equal(t, http.StatusOK, res.StatusCode) +} + func TestServerTailnet_ReverseProxy(t *testing.T) { t.Parallel() diff --git a/coderd/userauth.go b/coderd/userauth.go index 366f566c59..eda4dd60ab 100644 --- a/coderd/userauth.go +++ b/coderd/userauth.go @@ -252,7 +252,7 @@ func (api *API) postLogin(rw http.ResponseWriter, r *http.Request) { UserID: user.ID, LoginType: database.LoginTypePassword, RemoteAddr: r.RemoteAddr, - DefaultLifetime: api.DeploymentValues.SessionDuration.Value(), + DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(), }) if err != nil { logger.Error(ctx, "unable to create API key", slog.Error(err)) @@ -1612,7 +1612,7 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C cookie, newKey, err := api.createAPIKey(dbauthz.AsSystemRestricted(ctx), apikey.CreateParams{ UserID: user.ID, LoginType: params.LoginType, - DefaultLifetime: api.DeploymentValues.SessionDuration.Value(), + DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(), RemoteAddr: r.RemoteAddr, }) if err != nil { diff --git a/coderd/workspaceagents.go b/coderd/workspaceagents.go index 34170b3bf7..4848fef38c 100644 --- a/coderd/workspaceagents.go +++ b/coderd/workspaceagents.go @@ -1132,6 +1132,7 @@ func convertScripts(dbScripts []database.WorkspaceAgentScript) []codersdk.Worksp // @Param request body agentsdk.Stats true "Stats request" // @Success 200 {object} agentsdk.StatsResponse // @Router /workspaceagents/me/report-stats [post] +// @Deprecated Uses agent API v2 endpoint instead. func (api *API) workspaceAgentReportStats(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() diff --git a/coderd/workspaceagentsrpc.go b/coderd/workspaceagentsrpc.go index 7130d0b88e..a0cd4c1032 100644 --- a/coderd/workspaceagentsrpc.go +++ b/coderd/workspaceagentsrpc.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "io" "net/http" "runtime/pprof" "sync" @@ -156,7 +157,7 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) { ctx = tailnet.WithStreamID(ctx, streamID) ctx = agentapi.WithAPIVersion(ctx, version) err = agentAPI.Serve(ctx, mux) - if err != nil { + if err != nil && !xerrors.Is(err, yamux.ErrSessionShutdown) && !xerrors.Is(err, io.EOF) { logger.Warn(ctx, "workspace agent RPC listen error", slog.Error(err)) _ = conn.Close(websocket.StatusInternalError, err.Error()) return diff --git a/coderd/workspaceapps.go b/coderd/workspaceapps.go index d4a31e1822..8c6ffdb62e 100644 --- a/coderd/workspaceapps.go +++ b/coderd/workspaceapps.go @@ -102,14 +102,14 @@ func (api *API) workspaceApplicationAuth(rw http.ResponseWriter, r *http.Request // the current session. exp := apiKey.ExpiresAt lifetimeSeconds := apiKey.LifetimeSeconds - if exp.IsZero() || time.Until(exp) > api.DeploymentValues.SessionDuration.Value() { - exp = dbtime.Now().Add(api.DeploymentValues.SessionDuration.Value()) - lifetimeSeconds = int64(api.DeploymentValues.SessionDuration.Value().Seconds()) + if exp.IsZero() || time.Until(exp) > api.DeploymentValues.Sessions.DefaultDuration.Value() { + exp = dbtime.Now().Add(api.DeploymentValues.Sessions.DefaultDuration.Value()) + lifetimeSeconds = int64(api.DeploymentValues.Sessions.DefaultDuration.Value().Seconds()) } cookie, _, err := api.createAPIKey(ctx, apikey.CreateParams{ UserID: apiKey.UserID, LoginType: database.LoginTypePassword, - DefaultLifetime: api.DeploymentValues.SessionDuration.Value(), + DefaultLifetime: api.DeploymentValues.Sessions.DefaultDuration.Value(), ExpiresAt: exp, LifetimeSeconds: lifetimeSeconds, Scope: database.APIKeyScopeApplicationConnect, diff --git a/coderd/workspaceapps/apptest/apptest.go b/coderd/workspaceapps/apptest/apptest.go index dea232c867..5ba60fbb58 100644 --- a/coderd/workspaceapps/apptest/apptest.go +++ b/coderd/workspaceapps/apptest/apptest.go @@ -1165,6 +1165,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) { appDetails := setupProxyTest(t, &DeploymentOptions{ ServeHTTPS: true, }) + // using the fact that Apps.Port and Apps.PortHTTPS are the same port here port, err := strconv.ParseInt(appDetails.Apps.Port.AppSlugOrPort, 10, 32) require.NoError(t, err) _, err = appDetails.SDKClient.UpsertWorkspaceAgentPortShare(ctx, appDetails.Workspace.ID, codersdk.UpsertWorkspaceAgentPortShareRequest{ @@ -1178,7 +1179,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) { publicAppClient := appDetails.AppClient(t) publicAppClient.SetSessionToken("") - resp, err := requestWithRetries(ctx, t, publicAppClient, http.MethodGet, appDetails.SubdomainAppURL(appDetails.Apps.Port).String(), nil) + resp, err := requestWithRetries(ctx, t, publicAppClient, http.MethodGet, appDetails.SubdomainAppURL(appDetails.Apps.PortHTTPS).String(), nil) require.NoError(t, err) defer resp.Body.Close() require.Equal(t, http.StatusOK, resp.StatusCode) @@ -1765,9 +1766,11 @@ func assertWorkspaceLastUsedAtUpdated(t testing.TB, details *Details) { require.NotNil(t, details.Workspace, "can't assert LastUsedAt on a nil workspace!") before, err := details.SDKClient.Workspace(context.Background(), details.Workspace.ID) require.NoError(t, err) - // Wait for stats to fully flush. - details.FlushStats() require.Eventually(t, func() bool { + // We may need to flush multiple times, since the stats from the app we are testing might be + // collected asynchronously from when we see the connection close, and thus, could race + // against being flushed. + details.FlushStats() after, err := details.SDKClient.Workspace(context.Background(), details.Workspace.ID) return assert.NoError(t, err) && after.LastUsedAt.After(before.LastUsedAt) }, testutil.WaitShort, testutil.IntervalMedium) diff --git a/coderd/workspaceapps/apptest/setup.go b/coderd/workspaceapps/apptest/setup.go index 702789e4cf..c27032c192 100644 --- a/coderd/workspaceapps/apptest/setup.go +++ b/coderd/workspaceapps/apptest/setup.go @@ -116,6 +116,7 @@ type Details struct { Authenticated App Public App Port App + PortHTTPS App } } @@ -247,6 +248,12 @@ func setupProxyTestWithFactory(t *testing.T, factory DeploymentFactory, opts *De AgentName: agnt.Name, AppSlugOrPort: strconv.Itoa(int(opts.port)), } + details.Apps.PortHTTPS = App{ + Username: me.Username, + WorkspaceName: workspace.Name, + AgentName: agnt.Name, + AppSlugOrPort: strconv.Itoa(int(opts.port)) + "s", + } return details } diff --git a/coderd/workspaceapps/appurl/appurl.go b/coderd/workspaceapps/appurl/appurl.go index 4daa05a7e3..8b8cfd74d3 100644 --- a/coderd/workspaceapps/appurl/appurl.go +++ b/coderd/workspaceapps/appurl/appurl.go @@ -90,9 +90,10 @@ func (a ApplicationURL) Path() string { // // Subdomains should be in the form: // -// ({PREFIX}---)?{PORT/APP_SLUG}--{AGENT_NAME}--{WORKSPACE_NAME}--{USERNAME} +// ({PREFIX}---)?{PORT{s?}/APP_SLUG}--{AGENT_NAME}--{WORKSPACE_NAME}--{USERNAME} // e.g. // https://8080--main--dev--dean.hi.c8s.io +// https://8080s--main--dev--dean.hi.c8s.io // https://app--main--dev--dean.hi.c8s.io // https://prefix---8080--main--dev--dean.hi.c8s.io // https://prefix---app--main--dev--dean.hi.c8s.io diff --git a/coderd/workspaceapps/db.go b/coderd/workspaceapps/db.go index 32eaec1cf0..619bdd95ba 100644 --- a/coderd/workspaceapps/db.go +++ b/coderd/workspaceapps/db.go @@ -85,7 +85,7 @@ func (p *DBTokenProvider) Issue(ctx context.Context, rw http.ResponseWriter, r * DB: p.Database, OAuth2Configs: p.OAuth2Configs, RedirectToLogin: false, - DisableSessionExpiryRefresh: p.DeploymentValues.DisableSessionExpiryRefresh.Value(), + DisableSessionExpiryRefresh: p.DeploymentValues.Sessions.DisableExpiryRefresh.Value(), // Optional is true to allow for public apps. If the authorization check // (later on) fails and the user is not authenticated, they will be // redirected to the login page or app auth endpoint using code below. diff --git a/coderd/workspaceapps/db_test.go b/coderd/workspaceapps/db_test.go index eccc96d008..e8c7464f88 100644 --- a/coderd/workspaceapps/db_test.go +++ b/coderd/workspaceapps/db_test.go @@ -40,6 +40,7 @@ func Test_ResolveRequest(t *testing.T) { // Users can access unhealthy and initializing apps (as of 2024-02). appNameUnhealthy = "app-unhealthy" appNameInitializing = "app-initializing" + appNameEndsInS = "app-ends-in-s" // This agent will never connect, so it will never become "connected". // Users cannot access unhealthy agents. @@ -166,6 +167,12 @@ func Test_ResolveRequest(t *testing.T) { Threshold: 1000, }, }, + { + Slug: appNameEndsInS, + DisplayName: appNameEndsInS, + SharingLevel: proto.AppSharingLevel_OWNER, + Url: appURL, + }, }, }, { @@ -644,6 +651,67 @@ func Test_ResolveRequest(t *testing.T) { require.Equal(t, "http://127.0.0.1:9090", token.AppURL) }) + t.Run("PortSubdomainHTTPSS", func(t *testing.T) { + t.Parallel() + + req := (workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + BasePath: "/", + UsernameOrID: me.Username, + WorkspaceNameOrID: workspace.Name, + AgentNameOrID: agentName, + AppSlugOrPort: "9090ss", + }).Normalize() + + rw := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) + + _, ok := workspaceapps.ResolveRequest(rw, r, workspaceapps.ResolveRequestOptions{ + Logger: api.Logger, + SignedTokenProvider: api.WorkspaceAppsProvider, + DashboardURL: api.AccessURL, + PathAppBaseURL: api.AccessURL, + AppHostname: api.AppHostname, + AppRequest: req, + }) + // should parse as app and fail to find app "9090ss" + require.False(t, ok) + w := rw.Result() + _ = w.Body.Close() + b, err := io.ReadAll(w.Body) + require.NoError(t, err) + require.Contains(t, string(b), "404 - Application Not Found") + }) + + t.Run("SubdomainEndsInS", func(t *testing.T) { + t.Parallel() + + req := (workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + BasePath: "/", + UsernameOrID: me.Username, + WorkspaceNameOrID: workspace.Name, + AgentNameOrID: agentName, + AppSlugOrPort: appNameEndsInS, + }).Normalize() + + rw := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) + + token, ok := workspaceapps.ResolveRequest(rw, r, workspaceapps.ResolveRequestOptions{ + Logger: api.Logger, + SignedTokenProvider: api.WorkspaceAppsProvider, + DashboardURL: api.AccessURL, + PathAppBaseURL: api.AccessURL, + AppHostname: api.AppHostname, + AppRequest: req, + }) + require.True(t, ok) + require.Equal(t, req.AppSlugOrPort, token.AppSlugOrPort) + }) + t.Run("Terminal", func(t *testing.T) { t.Parallel() diff --git a/coderd/workspaceapps/request.go b/coderd/workspaceapps/request.go index 0f3eddf6cb..4f6a6f3a64 100644 --- a/coderd/workspaceapps/request.go +++ b/coderd/workspaceapps/request.go @@ -287,12 +287,20 @@ func (r Request) getDatabase(ctx context.Context, db database.Store) (*databaseR // whether the app is a slug or a port and whether there are multiple agents // in the workspace or not. var ( - agentNameOrID = r.AgentNameOrID - appURL string - appSharingLevel database.AppSharingLevel - portUint, portUintErr = strconv.ParseUint(r.AppSlugOrPort, 10, 16) + agentNameOrID = r.AgentNameOrID + appURL string + appSharingLevel database.AppSharingLevel + // First check if it's a port-based URL with an optional "s" suffix for HTTPS. + potentialPortStr = strings.TrimSuffix(r.AppSlugOrPort, "s") + portUint, portUintErr = strconv.ParseUint(potentialPortStr, 10, 16) ) + //nolint:nestif if portUintErr == nil { + protocol := "http" + if strings.HasSuffix(r.AppSlugOrPort, "s") { + protocol = "https" + } + if r.AccessMethod != AccessMethodSubdomain { // TODO(@deansheather): this should return a 400 instead of a 500. return nil, xerrors.New("port-based URLs are only supported for subdomain-based applications") @@ -309,10 +317,10 @@ func (r Request) getDatabase(ctx context.Context, db database.Store) (*databaseR } // If the app slug is a port number, then route to the port as an - // "anonymous app". We only support HTTP for port-based URLs. + // "anonymous app". // // This is only supported for subdomain-based applications. - appURL = fmt.Sprintf("http://127.0.0.1:%d", portUint) + appURL = fmt.Sprintf("%s://127.0.0.1:%d", protocol, portUint) appSharingLevel = database.AppSharingLevelOwner // Port sharing authorization @@ -342,10 +350,6 @@ func (r Request) getDatabase(ctx context.Context, db database.Store) (*databaseR } // No port share found, so we keep default to owner. } else { - if ps.Protocol == database.PortShareProtocolHttps { - // Apply HTTPS protocol if specified. - appURL = fmt.Sprintf("https://127.0.0.1:%d", portUint) - } appSharingLevel = ps.ShareLevel } } else { diff --git a/coderd/workspaceapps/request_test.go b/coderd/workspaceapps/request_test.go index 7240937a06..b6e4bb7a2e 100644 --- a/coderd/workspaceapps/request_test.go +++ b/coderd/workspaceapps/request_test.go @@ -57,6 +57,26 @@ func Test_RequestValidate(t *testing.T) { AppSlugOrPort: "baz", }, }, + { + name: "OK5", + req: workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + BasePath: "/", + UsernameOrID: "foo", + WorkspaceNameOrID: "bar", + AppSlugOrPort: "8080", + }, + }, + { + name: "OK6", + req: workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + BasePath: "/", + UsernameOrID: "foo", + WorkspaceNameOrID: "bar", + AppSlugOrPort: "8080s", + }, + }, { name: "NoAccessMethod", req: workspaceapps.Request{ diff --git a/coderd/workspaceapps/token_test.go b/coderd/workspaceapps/token_test.go index 06ab8a2acd..c656ae2ab7 100644 --- a/coderd/workspaceapps/token_test.go +++ b/coderd/workspaceapps/token_test.go @@ -222,6 +222,54 @@ func Test_TokenMatchesRequest(t *testing.T) { }, want: false, }, + { + name: "PortPortocolHTTP", + req: workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + Prefix: "yolo--", + BasePath: "/", + UsernameOrID: "foo", + WorkspaceNameOrID: "bar", + AgentNameOrID: "baz", + AppSlugOrPort: "8080", + }, + token: workspaceapps.SignedToken{ + Request: workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + Prefix: "yolo--", + BasePath: "/", + UsernameOrID: "foo", + WorkspaceNameOrID: "bar", + AgentNameOrID: "baz", + AppSlugOrPort: "8080", + }, + }, + want: true, + }, + { + name: "PortPortocolHTTPS", + req: workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + Prefix: "yolo--", + BasePath: "/", + UsernameOrID: "foo", + WorkspaceNameOrID: "bar", + AgentNameOrID: "baz", + AppSlugOrPort: "8080s", + }, + token: workspaceapps.SignedToken{ + Request: workspaceapps.Request{ + AccessMethod: workspaceapps.AccessMethodSubdomain, + Prefix: "yolo--", + BasePath: "/", + UsernameOrID: "foo", + WorkspaceNameOrID: "bar", + AgentNameOrID: "baz", + AppSlugOrPort: "8080s", + }, + }, + want: true, + }, } for _, c := range cases { diff --git a/coderd/workspaces.go b/coderd/workspaces.go index f29d44d6d7..87aea6919a 100644 --- a/coderd/workspaces.go +++ b/coderd/workspaces.go @@ -332,6 +332,10 @@ func (api *API) workspaceByOwnerAndName(rw http.ResponseWriter, r *http.Request) // Create a new workspace for the currently authenticated user. // // @Summary Create user workspace by organization +// @Description Create a new workspace using a template. The request must +// @Description specify either the Template ID or the Template Version ID, +// @Description not both. If the Template ID is specified, the active version +// @Description of the template will be used. // @ID create-user-workspace-by-organization // @Security CoderSessionToken // @Accept json @@ -1645,6 +1649,11 @@ func convertWorkspace( } ttlMillis := convertWorkspaceTTLMillis(workspace.Ttl) + // If the template doesn't allow a workspace-configured value, then report the + // template value instead. + if !template.AllowUserAutostop { + ttlMillis = convertWorkspaceTTLMillis(sql.NullInt64{Valid: true, Int64: template.DefaultTTL}) + } // Only show favorite status if you own the workspace. requesterFavorite := workspace.OwnerID == requesterID && workspace.Favorite diff --git a/coderd/workspaces_test.go b/coderd/workspaces_test.go index f16d40f072..c01f9689d6 100644 --- a/coderd/workspaces_test.go +++ b/coderd/workspaces_test.go @@ -761,8 +761,8 @@ func TestPostWorkspacesByOrganization(t *testing.T) { coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) // TTL should be set by the template - require.Equal(t, template.DefaultTTLMillis, templateTTL) - require.Equal(t, template.DefaultTTLMillis, *workspace.TTLMillis) + require.Equal(t, templateTTL, template.DefaultTTLMillis) + require.Equal(t, templateTTL, *workspace.TTLMillis) }) t.Run("InvalidTTL", func(t *testing.T) { @@ -789,7 +789,7 @@ func TestPostWorkspacesByOrganization(t *testing.T) { require.ErrorAs(t, err, &apiErr) require.Equal(t, http.StatusBadRequest, apiErr.StatusCode()) require.Len(t, apiErr.Validations, 1) - require.Equal(t, apiErr.Validations[0].Field, "ttl_ms") + require.Equal(t, "ttl_ms", apiErr.Validations[0].Field) require.Equal(t, "time until shutdown must be at least one minute", apiErr.Validations[0].Detail) }) }) diff --git a/codersdk/deployment.go b/codersdk/deployment.go index ee174075a7..34eaa4edd4 100644 --- a/codersdk/deployment.go +++ b/codersdk/deployment.go @@ -182,13 +182,11 @@ type DeploymentValues struct { RateLimit RateLimitConfig `json:"rate_limit,omitempty" typescript:",notnull"` Experiments serpent.StringArray `json:"experiments,omitempty" typescript:",notnull"` UpdateCheck serpent.Bool `json:"update_check,omitempty" typescript:",notnull"` - MaxTokenLifetime serpent.Duration `json:"max_token_lifetime,omitempty" typescript:",notnull"` Swagger SwaggerConfig `json:"swagger,omitempty" typescript:",notnull"` Logging LoggingConfig `json:"logging,omitempty" typescript:",notnull"` Dangerous DangerousConfig `json:"dangerous,omitempty" typescript:",notnull"` DisablePathApps serpent.Bool `json:"disable_path_apps,omitempty" typescript:",notnull"` - SessionDuration serpent.Duration `json:"max_session_expiry,omitempty" typescript:",notnull"` - DisableSessionExpiryRefresh serpent.Bool `json:"disable_session_expiry_refresh,omitempty" typescript:",notnull"` + Sessions SessionLifetime `json:"session_lifetime,omitempty" typescript:",notnull"` DisablePasswordAuth serpent.Bool `json:"disable_password_auth,omitempty" typescript:",notnull"` Support SupportConfig `json:"support,omitempty" typescript:",notnull"` ExternalAuthConfigs serpent.Struct[[]ExternalAuthConfig] `json:"external_auth,omitempty" typescript:",notnull"` @@ -244,6 +242,33 @@ func ParseSSHConfigOption(opt string) (key string, value string, err error) { return opt[:idx], opt[idx+1:], nil } +// SessionLifetime refers to "sessions" authenticating into Coderd. Coder has +// multiple different session types: api keys, tokens, workspace app tokens, +// agent tokens, etc. This configuration struct should be used to group all +// settings referring to any of these session lifetime controls. +// TODO: These config options were created back when coder only had api keys. +// Today, the config is ambigously used for all of them. For example: +// - cli based api keys ignore all settings +// - login uses the default lifetime, not the MaximumTokenDuration +// - Tokens use the Default & MaximumTokenDuration +// - ... etc ... +// The rational behind each decision is undocumented. The naming behind these +// config options is also confusing without any clear documentation. +// 'CreateAPIKey' is used to make all sessions, and it's parameters are just +// 'LifetimeSeconds' and 'DefaultLifetime'. Which does not directly correlate to +// the config options here. +type SessionLifetime struct { + // DisableExpiryRefresh will disable automatically refreshing api + // keys when they are used from the api. This means the api key lifetime at + // creation is the lifetime of the api key. + DisableExpiryRefresh serpent.Bool `json:"disable_expiry_refresh,omitempty" typescript:",notnull"` + + // DefaultDuration is for api keys, not tokens. + DefaultDuration serpent.Duration `json:"default_duration" typescript:",notnull"` + + MaximumTokenDuration serpent.Duration `json:"max_token_lifetime,omitempty" typescript:",notnull"` +} + type DERP struct { Server DERPServerConfig `json:"server" typescript:",notnull"` Config DERPConfig `json:"config" typescript:",notnull"` @@ -1579,7 +1604,7 @@ when required by your organization's security policy.`, // We have to add in the 25 leap days for the frontend to show the // "100 years" correctly. Default: ((100 * 365 * time.Hour * 24) + (25 * time.Hour * 24)).String(), - Value: &c.MaxTokenLifetime, + Value: &c.Sessions.MaximumTokenDuration, Group: &deploymentGroupNetworkingHTTP, YAML: "maxTokenLifetime", Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), @@ -1773,7 +1798,7 @@ when required by your organization's security policy.`, Flag: "session-duration", Env: "CODER_SESSION_DURATION", Default: (24 * time.Hour).String(), - Value: &c.SessionDuration, + Value: &c.Sessions.DefaultDuration, Group: &deploymentGroupNetworkingHTTP, YAML: "sessionDuration", Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), @@ -1784,7 +1809,7 @@ when required by your organization's security policy.`, Flag: "disable-session-expiry-refresh", Env: "CODER_DISABLE_SESSION_EXPIRY_REFRESH", - Value: &c.DisableSessionExpiryRefresh, + Value: &c.Sessions.DisableExpiryRefresh, Group: &deploymentGroupNetworkingHTTP, YAML: "disableSessionExpiryRefresh", }, diff --git a/codersdk/organizations.go b/codersdk/organizations.go index a6a1b927ca..cb353dff27 100644 --- a/codersdk/organizations.go +++ b/codersdk/organizations.go @@ -138,6 +138,9 @@ type CreateTemplateRequest struct { // CreateWorkspaceRequest provides options for creating a new workspace. // Either TemplateID or TemplateVersionID must be specified. They cannot both be present. +// @Description CreateWorkspaceRequest provides options for creating a new workspace. +// @Description Only one of TemplateID or TemplateVersionID can be specified, not both. +// @Description If TemplateID is specified, the active version of the template will be used. type CreateWorkspaceRequest struct { // TemplateID specifies which template should be used for creating the workspace. TemplateID uuid.UUID `json:"template_id,omitempty" validate:"required_without=TemplateVersionID,excluded_with=TemplateVersionID" format:"uuid"` diff --git a/codersdk/workspacesdk/connector.go b/codersdk/workspacesdk/connector.go index 5c1d9e600a..7955e8fb33 100644 --- a/codersdk/workspacesdk/connector.go +++ b/codersdk/workspacesdk/connector.go @@ -86,9 +86,11 @@ func runTailnetAPIConnector( func (tac *tailnetAPIConnector) manageGracefulTimeout() { defer tac.cancelGracefulCtx() <-tac.ctx.Done() + timer := time.NewTimer(time.Second) + defer timer.Stop() select { case <-tac.closed: - case <-time.After(time.Second): + case <-timer.C: } } diff --git a/codersdk/workspacesdk/workspacesdk_internal_test.go b/codersdk/workspacesdk/connector_internal_test.go similarity index 98% rename from codersdk/workspacesdk/workspacesdk_internal_test.go rename to codersdk/workspacesdk/connector_internal_test.go index 57e6f751ff..9f70891fda 100644 --- a/codersdk/workspacesdk/workspacesdk_internal_test.go +++ b/codersdk/workspacesdk/connector_internal_test.go @@ -102,6 +102,8 @@ func (*fakeTailnetConn) SetNodeCallback(func(*tailnet.Node)) {} func (*fakeTailnetConn) SetDERPMap(*tailcfg.DERPMap) {} +func (*fakeTailnetConn) SetTunnelDestination(uuid.UUID) {} + func newFakeTailnetConn() *fakeTailnetConn { return &fakeTailnetConn{} } diff --git a/docs/about/architecture.md b/docs/about/architecture.md index 027bd0ff5b..61b06d68d4 100644 --- a/docs/about/architecture.md +++ b/docs/about/architecture.md @@ -1,39 +1,38 @@ # Architecture -This document provides a high level overview of Coder's architecture. +The Coder deployment model is flexible and offers various components that +platform administrators can deploy and scale depending on their use case. This +page describes possible deployments, challenges, and risks associated with them. -## Single region architecture +Learn more about our [Reference Architectures](../admin/architectures/index.md) +and platform scaling capabilities. -![Architecture Diagram](../images/architecture-single-region.png) +## Primary components -## Multi-region architecture +### coderd -![Architecture Diagram](../images/architecture-multi-region.png) - -## coderd - -coderd is the service created by running `coder server`. It is a thin API that -connects workspaces, provisioners and users. coderd stores its state in Postgres -and is the only service that communicates with Postgres. +_coderd_ is the service created by running `coder server`. It is a thin API that +connects workspaces, provisioners and users. _coderd_ stores its state in +Postgres and is the only service that communicates with Postgres. It offers: - Dashboard (UI) - HTTP API - Dev URLs (HTTP reverse proxy to workspaces) -- Workspace Web Applications (e.g easily access code-server) +- Workspace Web Applications (e.g for easy access to `code-server`) - Agent registration -## provisionerd +### provisionerd -provisionerd is the execution context for infrastructure modifying providers. At -the moment, the only provider is Terraform (running `terraform`). +_provisionerd_ is the execution context for infrastructure modifying providers. +At the moment, the only provider is Terraform (running `terraform`). By default, the Coder server runs multiple provisioner daemons. [External provisioners](../admin/provisioners.md) can be added for security or scalability purposes. -## Agents +### Agents An agent is the Coder service that runs within a user's remote workspace. It provides a consistent interface for coderd and clients to communicate with @@ -50,9 +49,9 @@ Templates are responsible for [creating and running agents](../templates/index.md#coder-agent) within workspaces. -## Service Bundling +### Service Bundling -While coderd and Postgres can be orchestrated independently, our default +While _coderd_ and Postgres can be orchestrated independently, our default installation paths bundle them all together into one system service. It's perfectly fine to run a production deployment this way, but there are certain situations that necessitate decomposition: @@ -61,7 +60,7 @@ situations that necessitate decomposition: - Achieving greater availability and efficiency (horizontally scale individual services) -## Workspaces +### Workspaces At the highest level, a workspace is a set of cloud resources. These resources can be VMs, Kubernetes clusters, storage buckets, or whatever else Terraform @@ -72,3 +71,329 @@ while those that don't are called _peripheral resources_. Each resource may also be _persistent_ or _ephemeral_ depending on whether they're destroyed on workspace stop. + +## Deployment models + +### Single region architecture + +![Architecture Diagram](../images/architecture-single-region.png) + +#### Components + +This architecture consists of a single load balancer, several _coderd_ replicas, +and _Coder workspaces_ deployed in the same region. + +##### Workload resources + +- Deploy at least one _coderd_ replica per availability zone with _coderd_ + instances and provisioners. High availability is recommended but not essential + for small deployments. +- Single replica deployment is a special case that can address a + tiny/small/proof-of-concept installation on a single virtual machine. If you + are serving more than 100 users/workspaces, you should add more replicas. + +**Coder workspace** + +- For small deployments consider a lightweight workspace runtime like the + [Sysbox](https://github.com/nestybox/sysbox) container runtime. Learn more how + to enable + [docker-in-docker using Sysbox](https://asciinema.org/a/kkTmOxl8DhEZiM2fLZNFlYzbo?speed=2). + +**HA Database** + +- Monitor node status and resource utilization metrics. +- Implement robust backup and disaster recovery strategies to protect against + data loss. + +##### Workload supporting resources + +**Load balancer** + +- Distributes and load balances traffic from agents and clients to _Coder + Server_ replicas across availability zones. +- Layer 7 load balancing. The load balancer can decrypt SSL traffic, and + re-encrypt using an internal certificate. +- Session persistence (sticky sessions) can be disabled as _coderd_ instances + are stateless. +- WebSocket and long-lived connections must be supported. + +**Single sign-on** + +- Integrate with existing Single Sign-On (SSO) solutions used within the + organization via the supported OAuth 2.0 or OpenID Connect standards. +- Learn more about [Authentication in Coder](../admin/auth.md). + +### Multi-region architecture + +![Architecture Diagram](../images/architecture-multi-region.png) + +#### Components + +This architecture is for globally distributed developer teams using Coder +workspaces on daily basis. It features a single load balancer with regionally +deployed _Workspace Proxies_, several _coderd_ replicas, and _Coder workspaces_ +provisioned in different regions. + +Note: The _multi-region architecture_ assumes the same deployment principles as +the _single region architecture_, but it extends them to multi region deployment +with workspace proxies. Proxies are deployed in regions closest to developers to +offer the fastest developer experience. + +##### Workload resources + +**Workspace proxy** + +- Workspace proxy offers developers the option to establish a fast relay + connection when accessing their workspace via SSH, a workspace application, or + port forwarding. +- Dashboard connections, API calls (e.g. _list workspaces_) are not served over + proxies. +- Proxies do not establish connections to the database. +- Proxy instances do not share authentication tokens between one another. + +##### Workload supporting resources + +**Proxy load balancer** + +- Distributes and load balances workspace relay traffic in a single region + across availability zones. +- Layer 7 load balancing. The load balancer can decrypt SSL traffic, and + re-encrypt using internal certificate. +- Session persistence (sticky sessions) can be disabled as _coderd_ instances + are stateless. +- WebSocket and long-lived connections must be supported. + +### Multi-cloud architecture + +By distributing Coder workspaces across different cloud providers, organizations +can mitigate the risk of downtime caused by provider-specific outages or +disruptions. Additionally, multi-cloud deployment enables organizations to +leverage the unique features and capabilities offered by each cloud provider, +such as region availability and pricing models. + +![Architecture Diagram](../images/architecture-multi-cloud.png) + +#### Components + +The deployment model comprises: + +- `coderd` instances deployed within a single region of the same cloud provider, + with replicas strategically distributed across availability zones. +- Workspace provisioners deployed in each cloud, communicating with `coderd` + instances. +- Workspace proxies running in the same locations as provisioners to optimize + user connections to workspaces for maximum speed. + +Due to the relatively large overhead of cross-regional communication, it is not +advised to set up multi-cloud control planes. It is recommended to keep coderd +replicas and the database within the same cloud-provider and region. + +Note: The _multi-cloud architecture_ follows the deployment principles outlined +in the _multi-region architecture_. However, it adapts component selection based +on the specific cloud provider. Developers can initiate workspaces based on the +nearest region and technical specifications provided by the cloud providers. + +##### Workload resources + +**Workspace provisioner** + +- _Security recommendation_: Create a long, random pre-shared key (PSK) and add + it to the regional secret store, so that local _provisionerd_ can access it. + Remember to distribute it using safe, encrypted communication channel. The PSK + must also be added to the _coderd_ configuration. + +**Workspace proxy** + +- _Security recommendation_: Use `coder` CLI to create + [authentication tokens for every workspace proxy](../admin/workspace-proxies.md#requirements), + and keep them in regional secret stores. Remember to distribute them using + safe, encrypted communication channel. + +**Managed database** + +- For AWS: _Amazon RDS for PostgreSQL_ +- For Azure: _Azure Database for PostgreSQL - Flexible Server_ +- For GCP: _Cloud SQL for PostgreSQL_ + +##### Workload supporting resources + +**Kubernetes platform (optional)** + +- For AWS: _Amazon Elastic Kubernetes Service_ +- For Azure: _Azure Kubernetes Service_ +- For GCP: _Google Kubernetes Engine_ + +See how to deploy +[Coder on Azure Kubernetes Service](https://github.com/ericpaulsen/coder-aks). + +Learn more about [security requirements](../install/kubernetes.md) for deploying +Coder on Kubernetes. + +**Load balancer** + +- For AWS: + - _AWS Network Load Balancer_ + - Level 4 load balancing + - For Kubernetes deployment: annotate service with + `service.beta.kubernetes.io/aws-load-balancer-type: "nlb"`, preserve the + client source IP with `externalTrafficPolicy: Local` + - _AWS Classic Load Balancer_ + - Level 7 load balancing + - For Kubernetes deployment: set `sessionAffinity` to `None` +- For Azure: + - _Azure Load Balancer_ + - Level 7 load balancing + - Azure Application Gateway + - Deploy Azure Application Gateway when more advanced traffic routing + policies are needed for Kubernetes applications. + - Take advantage of features such as WebSocket support and TLS termination + provided by Azure Application Gateway, enhancing the capabilities of + Kubernetes deployments on Azure. +- For GCP: + - _Cloud Load Balancing_ with SSL load balancer: + - Layer 4 load balancing, SSL enabled + - _Cloud Load Balancing_ with HTTPS load balancer: + - Layer 7 load balancing + - For Kubernetes deployment: annotate service (with ingress enabled) with + `kubernetes.io/ingress.class: "gce"`, leverage the `NodePort` service + type. + - Note: HTTP load balancer rejects DERP upgrade, Coder will fallback to + WebSockets + +**Single sign-on** + +- For AWS: + [AWS IAM Identity Center](https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +- For Azure: + [Microsoft Entra ID Sign-On](https://learn.microsoft.com/en-us/entra/identity/app-proxy/) +- For GCP: + [Google Cloud Identity Platform](https://cloud.google.com/architecture/identity/single-sign-on) + +### Air-gapped architecture + +The air-gapped deployment model refers to the setup of Coder's development +environment within a restricted network environment that lacks internet +connectivity. This deployment model is often required for organizations with +strict security policies or those operating in isolated environments, such as +government agencies or certain enterprise setups. + +The key features of the air-gapped architecture include: + +- _Offline installation_: Deploy workspaces without relying on an external + internet connection. +- _Isolated package/plugin repositories_: Depend on local repositories for + software installation, updates, and security patches. +- _Secure data transfer_: Enable encrypted communication channels and robust + access controls to safeguard sensitive information. + +Learn more about [offline deployments](../install/offline.md) of Coder. + +![Architecture Diagram](../images/architecture-air-gapped.png) + +#### Components + +The deployment model includes: + +- _Workspace provisioners_ with direct access to self-hosted package and plugin + repositories and restricted internet access. +- _Mirror of Terraform Registry_ with multiple versions of Terraform plugins. +- _Certificate Authority_ with all TLS certificates to build secure + communication channels. + +The model is compatible with various infrastructure models, enabling deployment +across multiple regions and diverse cloud platforms. + +##### Workload resources + +**Workspace provisioner** + +- Includes Terraform binary in the container or system image. +- Checks out Terraform plugins from self-hosted _Registry_ mirror. +- Deploys workspace images stored in the self-hosted _Container Registry_. + +**Coder server** + +- Update checks are disabled (`CODER_UPDATE_CHECK=false`). +- Telemetry data is not collected (`CODER_TELEMETRY_ENABLE=false`). +- Direct connections are not possible, workspace traffic is relayed through + control plane's DERP proxy. + +##### Workload supporting resources + +**Self-hosted Database** + +- In the air-gapped deployment model, _Coderd_ instance is unable to download + Postgres binaries from the internet, so external database must be provided. + +**Container Registry** + +- Since the _Registry_ is isolated from the internet, platform engineers are + responsible for maintaining Workspace container images and conducting periodic + updates of base Docker images. +- It is recommended to keep [Dev Containers](../templates/devcontainers.md) up + to date with the latest released + [Envbuilder](https://github.com/coder/envbuilder) runtime. + +**Mirror of Terraform Registry** + +- Stores all necessary Terraform plugin dependencies, ensuring successful + workspace provisioning and maintenance without internet access. +- Platform engineers are responsible for periodically updating the mirrored + Terraform plugins, including + [terraform-provider-coder](https://github.com/coder/terraform-provider-coder). + +**Certificate Authority** + +- Manages and issues TLS certificates to facilitate secure communication + channels within the infrastructure. + +### Dev Containers + +Note: _Dev containers_ are at early stage and considered experimental at the +moment. + +This architecture enhances a Coder workspace with a +[development container](https://containers.dev/) setup built using the +[envbuilder](https://github.com/coder/envbuilder) project. Workspace users have +the flexibility to extend generic, base developer environments with custom, +project-oriented [features](https://containers.dev/features) without requiring +platform administrators to push altered Docker images. + +Learn more about +[Dev containers support](https://coder.com/docs/v2/latest/templates/devcontainers) +in Coder. + +![Architecture Diagram](../images/architecture-devcontainers.png) + +#### Components + +The deployment model includes: + +- _Workspace_ built using Coder template with _envbuilder_ enabled to set up the + developer environment accordingly to the dev container spec. +- _Container Registry_ for Docker images used by _envbuilder_, maintained by + Coder platform engineers or developer productivity engineers. + +Since this model is strictly focused on workspace nodes, it does not affect the +setup of regional infrastructure. It can be deployed alongside other deployment +models, in multiple regions, or across various cloud platforms. + +##### Workload resources + +**Coder workspace** + +- Docker and Kubernetes based templates are supported. +- The `docker_container` resource uses `ghcr.io/coder/envbuilder` as the base + image. + +_Envbuilder_ checks out the base Docker image from the container registry and +installs selected features as specified in the `devcontainer.json` on top. +Eventually, it starts the container with the developer environment. + +##### Workload supporting resources + +**Container Registry (optional)** + +- Workspace nodes need access to the Container Registry to check out images. To + shorten the provisioning time, it is recommended to deploy registry mirrors in + the same region as the workspace nodes. diff --git a/docs/api/general.md b/docs/api/general.md index 69f57b9a99..330c41a335 100644 --- a/docs/api/general.md +++ b/docs/api/general.md @@ -200,7 +200,6 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \ "disable_owner_workspace_exec": true, "disable_password_auth": true, "disable_path_apps": true, - "disable_session_expiry_refresh": true, "docs_url": { "forceQuery": true, "fragment": "string", @@ -252,8 +251,6 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \ "log_filter": ["string"], "stackdriver": "string" }, - "max_session_expiry": 0, - "max_token_lifetime": 0, "metrics_cache_refresh_interval": 0, "oauth2": { "github": { @@ -341,6 +338,11 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \ "redirect_to_access_url": true, "scim_api_key": "string", "secure_auth_cookie": true, + "session_lifetime": { + "default_duration": 0, + "disable_expiry_refresh": true, + "max_token_lifetime": 0 + }, "ssh_keygen_algorithm": "string", "strict_transport_security": 0, "strict_transport_security_options": ["string"], diff --git a/docs/api/schemas.md b/docs/api/schemas.md index f46e02a636..efc3a38f01 100644 --- a/docs/api/schemas.md +++ b/docs/api/schemas.md @@ -1646,6 +1646,8 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in } ``` +CreateWorkspaceRequest provides options for creating a new workspace. Only one of TemplateID or TemplateVersionID can be specified, not both. If TemplateID is specified, the active version of the template will be used. + ### Properties | Name | Type | Required | Restrictions | Description | @@ -1923,7 +1925,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "disable_owner_workspace_exec": true, "disable_password_auth": true, "disable_path_apps": true, - "disable_session_expiry_refresh": true, "docs_url": { "forceQuery": true, "fragment": "string", @@ -1975,8 +1976,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "log_filter": ["string"], "stackdriver": "string" }, - "max_session_expiry": 0, - "max_token_lifetime": 0, "metrics_cache_refresh_interval": 0, "oauth2": { "github": { @@ -2064,6 +2063,11 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "redirect_to_access_url": true, "scim_api_key": "string", "secure_auth_cookie": true, + "session_lifetime": { + "default_duration": 0, + "disable_expiry_refresh": true, + "max_token_lifetime": 0 + }, "ssh_keygen_algorithm": "string", "strict_transport_security": 0, "strict_transport_security_options": ["string"], @@ -2293,7 +2297,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "disable_owner_workspace_exec": true, "disable_password_auth": true, "disable_path_apps": true, - "disable_session_expiry_refresh": true, "docs_url": { "forceQuery": true, "fragment": "string", @@ -2345,8 +2348,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "log_filter": ["string"], "stackdriver": "string" }, - "max_session_expiry": 0, - "max_token_lifetime": 0, "metrics_cache_refresh_interval": 0, "oauth2": { "github": { @@ -2434,6 +2435,11 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "redirect_to_access_url": true, "scim_api_key": "string", "secure_auth_cookie": true, + "session_lifetime": { + "default_duration": 0, + "disable_expiry_refresh": true, + "max_token_lifetime": 0 + }, "ssh_keygen_algorithm": "string", "strict_transport_security": 0, "strict_transport_security_options": ["string"], @@ -2524,7 +2530,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `disable_owner_workspace_exec` | boolean | false | | | | `disable_password_auth` | boolean | false | | | | `disable_path_apps` | boolean | false | | | -| `disable_session_expiry_refresh` | boolean | false | | | | `docs_url` | [serpent.URL](#serpenturl) | false | | | | `enable_terraform_debug_mode` | boolean | false | | | | `experiments` | array of string | false | | | @@ -2535,8 +2540,6 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `in_memory_database` | boolean | false | | | | `job_hang_detector_interval` | integer | false | | | | `logging` | [codersdk.LoggingConfig](#codersdkloggingconfig) | false | | | -| `max_session_expiry` | integer | false | | | -| `max_token_lifetime` | integer | false | | | | `metrics_cache_refresh_interval` | integer | false | | | | `oauth2` | [codersdk.OAuth2Config](#codersdkoauth2config) | false | | | | `oidc` | [codersdk.OIDCConfig](#codersdkoidcconfig) | false | | | @@ -2552,6 +2555,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `redirect_to_access_url` | boolean | false | | | | `scim_api_key` | string | false | | | | `secure_auth_cookie` | boolean | false | | | +| `session_lifetime` | [codersdk.SessionLifetime](#codersdksessionlifetime) | false | | | | `ssh_keygen_algorithm` | string | false | | | | `strict_transport_security` | integer | false | | | | `strict_transport_security_options` | array of string | false | | | @@ -4292,6 +4296,24 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `ssh` | integer | false | | | | `vscode` | integer | false | | | +## codersdk.SessionLifetime + +```json +{ + "default_duration": 0, + "disable_expiry_refresh": true, + "max_token_lifetime": 0 +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| ------------------------ | ------- | -------- | ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `default_duration` | integer | false | | Default duration is for api keys, not tokens. | +| `disable_expiry_refresh` | boolean | false | | Disable expiry refresh will disable automatically refreshing api keys when they are used from the api. This means the api key lifetime at creation is the lifetime of the api key. | +| `max_token_lifetime` | integer | false | | | + ## codersdk.SupportConfig ```json diff --git a/docs/api/workspaces.md b/docs/api/workspaces.md index c16dd970a5..886f8401f7 100644 --- a/docs/api/workspaces.md +++ b/docs/api/workspaces.md @@ -14,6 +14,11 @@ curl -X POST http://coder-server:8080/api/v2/organizations/{organization}/member `POST /organizations/{organization}/members/{user}/workspaces` +Create a new workspace using a template. The request must +specify either the Template ID or the Template Version ID, +not both. If the Template ID is specified, the active version +of the template will be used. + > Body parameter ```json diff --git a/docs/changelogs/images/support-bundle.png b/docs/changelogs/images/support-bundle.png new file mode 100644 index 0000000000..c4046edcd6 Binary files /dev/null and b/docs/changelogs/images/support-bundle.png differ diff --git a/docs/changelogs/v2.10.0.md b/docs/changelogs/v2.10.0.md new file mode 100644 index 0000000000..9d7b76a88f --- /dev/null +++ b/docs/changelogs/v2.10.0.md @@ -0,0 +1,130 @@ +## Changelog + +> [!NOTE] +> This is a mainline Coder release. We advise enterprise customers without a staging environment to install our [latest stable release](https://github.com/coder/coder/releases/latest) while we refine this version. Learn more about our [Release Schedule](../install/releases.md). + +### BREAKING CHANGES + +- Removed `max_ttl` from templates (#12644) (@Emyrk) + > Maximum Workspace Lifetime, or `MAX_TTL`, has been removed from the product in favor of Autostop Requirement. Max Lifetime was designed to automate workspace shutdowns to enable security policy enforcement, enforce routine updates, and reduce idle resource costs. + > + > If you use Maximum Lifetime in your templates, workspaces will no longer stop at the end of this timer. Instead, we advise migrating to Autostop Requirement. + > + > Autostop Requirement shares the benefits of `MAX_TTL`, but also respects user-configured quiet hours to avoid forcing shutdowns while developers are connected. + > + > We only completely deprecate features after a 2-month heads up in the UI. + +### Features + +- Make agent stats' cardinality configurable (#12535) (@dannykopping) +- Upgrade tailscale fork to set TCP options for performance (#12574) (@spikecurtis) +- Add AWS IAM RDS Database auth driver (#12566) (@f0ssel) +- Support Windows containers in bootstrap script (#12662) (@kylecarbs) +- Add `workspace_id` to `workspace_build` audit logs (#12718) (@sreya) +- Make OAuth2 provider not enterprise-only (#12732) (@code-asher) +- Allow number options with monotonic validation (#12726) (@dannykopping) +- Expose workspace statuses (with details) as a prometheus metric (#12762) (@dannykopping) +- Agent: Support adjusting child process OOM scores (#12655) (@sreya) + > This opt-in configuration protects the Agent process from crashing via OOM. To prevent the agent from being killed in most scenarios, set `CODER_PROC_PRIO_MGMT=1` on your container. +- Expose HTTP debug server over tailnet API (#12582) (@johnstcn) +- Show queue position during workspace builds (#12606) (@dannykopping) +- Unhide support bundle command (#12745) (@johnstcn) + > The Coder support bundle grabs a variety of deployment health information to improve and expedite the debugging experience. + > ![Coder Support Bundle](https://raw.githubusercontent.com/coder/coder/main/docs/changelogs/images/support-bundle.png) +- Add golden tests for errors (#11588) (#12698) (@elasticspoon) +- Enforce confirmation before creating bundle (#12684) (@johnstcn) +- Add enabled experiments to telemetry (#12656) (@dannykopping) +- Export metric indicating each experiment's status (#12657) (@dannykopping) +- Add sftp to insights apps (#12675) (@mafredri) +- Add `template_usage_stats` table and rollup query (#12664) (@mafredri) +- Add `dbrollup` service to rollup insights (#12665) (@mafredri) +- Use `template_usage_stats` in `GetTemplateInsights` query (#12666) (@mafredri) +- Use `template_usage_stats` in `GetTemplateInsightsByInterval` query (#12667) (@mafredri) +- Use `template_usage_stats` in `GetTemplateAppInsights` query (#12669) (@mafredri) +- Use `template_usage_stats` in `GetUserLatencyInsights` query (#12671) (@mafredri) +- Use `template_usage_stats` in `GetUserActivityInsights` query (#12672) (@mafredri) +- Use `template_usage_stats` in `*ByTemplate` insights queries (#12668) (@mafredri) +- Add debug handlers for logs, manifest, and token to agent (#12593) (@johnstcn) +- Add linting to all examples (#12595) (@mafredri) +- Add C++ icon (#12572) (@michaelbrewer) +- Add support for `--mainline` (default) and `--stable` (#12858) (@mafredri) +- Make listening ports scrollable (#12660) (@BrunoQuaresma) +- Fetch agent network info over tailnet (#12577) (@johnstcn) +- Add client magicsock and agent prometheus metrics to support bundle (#12604) (@johnstcn) + +### Bug fixes + +- Server: Fix data race in TestLabelsAggregation tests (#12578) (@dannykopping) +- Dashboard: Hide actions and notifications from deleted workspaces (#12563) (@aslilac) +- VSCode: Importing api into vscode-coder (#12570) (@code-asher) +- CLI: Clean template destination path for `pull` (#12559) (@dannykopping) +- Agent: Ensure agent token is from latest build in middleware (#12443) (@f0ssel) +- CLI: Handle CLI default organization when none exists in - How do I add an enterprise license? +### How do I add an enterprise license? Visit https://coder.com/trial or contact [sales@coder.com](mailto:sales@coder.com?subject=License) to get a v2 enterprise @@ -32,10 +31,7 @@ If the license is in a file: coder licenses add -f ``` - - -
- I'm experiencing networking issues, so want to disable Tailscale, STUN, Direct connections and force use of websockets +### I'm experiencing networking issues, so want to disable Tailscale, STUN, Direct connections and force use of websockets The primary developer use case is a local IDE connecting over SSH to a Coder workspace. @@ -62,19 +58,13 @@ troubleshooting. | [`CODER_DERP_SERVER_STUN_ADDRESSES`](https://coder.com/docs/v2/latest/cli/server#--derp-server-stun-addresses) | `"disable"` | Disables STUN | | [`CODER_DERP_FORCE_WEBSOCKETS`](https://coder.com/docs/v2/latest/cli/server#--derp-force-websockets) | `true` | Forces websockets over Tailscale DERP | -
- -
- How do I configure NGINX as the reverse proxy in front of Coder? +### How do I configure NGINX as the reverse proxy in front of Coder? [This doc](https://github.com/coder/coder/tree/main/examples/web-server/nginx#configure-nginx) in our repo explains in detail how to configure NGINX with Coder so that our Tailscale Wireguard networking functions properly. -
- -
- How do I hide some of the default icons in a workspace like VS Code Desktop, Terminal, SSH, Ports? +### How do I hide some of the default icons in a workspace like VS Code Desktop, Terminal, SSH, Ports? The visibility of Coder apps is configurable in the template. To change the default (shows all), add this block inside the @@ -93,10 +83,7 @@ of a template and configure as needed: This example will hide all built-in coder_app icons except the web terminal. -
- -
- I want to allow code-server to be accessible by other users in my deployment. +### I want to allow code-server to be accessible by other users in my deployment. > It is **not** recommended to share a web IDE, but if required, the following > deployment environment variable settings are required. @@ -126,10 +113,7 @@ resource "coder_app" "code-server" { } ``` -
- -
- I installed Coder and created a workspace but the icons do not load. +### I installed Coder and created a workspace but the icons do not load. An important concept to understand is that Coder creates workspaces which have an agent that must be able to reach the `coder server`. @@ -153,10 +137,7 @@ coder server --access-url http://localhost:3000 --address 0.0.0.0:3000 > Even `coder server` which creates a reverse proxy, will let you use > http://localhost to access Coder from a browser. -
- -
- I updated a template, and an existing workspace based on that template fails to start. +### I updated a template, and an existing workspace based on that template fails to start. When updating a template, be aware of potential issues with input variables. For example, if a template prompts users to choose options like a @@ -176,10 +157,7 @@ potentially saving the workspace from a failed status. coder update --always-prompt ``` -
- -
- I'm running coder on a VM with systemd but latest release installed isn't showing up. +### I'm running coder on a VM with systemd but latest release installed isn't showing up. Take, for example, a Coder deployment on a VM with a 2 shared vCPU systemd service. In this scenario, it's necessary to reload the daemon and then restart @@ -194,10 +172,7 @@ sudo systemctl daemon-reload sudo systemctl restart coder.service ``` -
- -
- I'm using the built-in Postgres database and forgot admin email I set up. +### I'm using the built-in Postgres database and forgot admin email I set up. 1. Run the `coder server` command below to retrieve the `psql` connection URL which includes the database user and password. @@ -210,10 +185,7 @@ coder server postgres-builtin-url psql "postgres://coder@localhost:53737/coder?sslmode=disable&password=I2S...pTk" ``` -
- -
- How to find out Coder's latest Terraform provider version? +### How to find out Coder's latest Terraform provider version? [Coder is on the HashiCorp's Terraform registry](https://registry.terraform.io/providers/coder/coder/latest). Check this frequently to make sure you are on the latest version. @@ -222,10 +194,7 @@ Sometimes, the version may change and `resource` configurations will either become deprecated or new ones will be added when you get warnings or errors creating and pushing templates. -
- -
- How can I set up TLS for my deployment and not create a signed certificate? +### How can I set up TLS for my deployment and not create a signed certificate? Caddy is an easy-to-configure reverse proxy that also automatically creates certificates from Let's Encrypt. @@ -250,10 +219,7 @@ coder.example.com { } ``` -
- -
- I'm using Caddy as my reverse proxy in front of Coder. How do I set up a wildcard domain for port forwarding? +### I'm using Caddy as my reverse proxy in front of Coder. How do I set up a wildcard domain for port forwarding? Caddy requires your DNS provider's credentials to create wildcard certificates. This involves building the Caddy binary @@ -283,10 +249,7 @@ The updated Caddyfile configuration will look like this: } ``` -
- -
- Can I use local or remote Terraform Modules in Coder templates? +### Can I use local or remote Terraform Modules in Coder templates? One way is to reference a Terraform module from a GitHub repo to avoid duplication and then just extend it or pass template-specific @@ -328,10 +291,8 @@ References: - [Public Github Issue 6117](https://github.com/coder/coder/issues/6117) - [Public Github Issue 5677](https://github.com/coder/coder/issues/5677) - [Coder docs: Templates/Change Management](https://coder.com/docs/v2/latest/templates/change-management) -
-
- Can I run Coder in an air-gapped or offline mode? (no Internet)? +### Can I run Coder in an air-gapped or offline mode? (no Internet)? Yes, Coder can be deployed in air-gapped or offline mode. https://coder.com/docs/v2/latest/install/offline @@ -345,10 +306,7 @@ defaults to Google's STUN servers, so you can either create your STUN server in your network or disable and force all traffic through the control plane's DERP proxy. -
- -
- Create a randomized computer_name for an Azure VM +### Create a randomized computer_name for an Azure VM Azure VMs have a 15 character limit for the `computer_name` which can lead to duplicate name errors. @@ -363,10 +321,7 @@ locals { } ``` -
- -
- Do you have example JetBrains Gateway templates? +### Do you have example JetBrains Gateway templates? In August 2023, JetBrains certified the Coder plugin signifying enhanced stability and reliability. @@ -387,10 +342,8 @@ open the IDE. - [IntelliJ IDEA](https://github.com/sharkymark/v2-templates/tree/main/pod-idea) - [IntelliJ IDEA with Icon](https://github.com/sharkymark/v2-templates/tree/main/pod-idea-icon) -
-
- What options do I have for adding VS Code extensions into code-server, VS Code Desktop or Microsoft's Code Server? +### What options do I have for adding VS Code extensions into code-server, VS Code Desktop or Microsoft's Code Server? Coder has an open-source project called [`code-marketplace`](https://github.com/coder/code-marketplace) which is a @@ -416,10 +369,7 @@ https://github.com/sharkymark/v2-templates/blob/main/vs-code-server/main.tf > Note: these are example templates with no SLAs on them and are not guaranteed > for long-term support. -
- -
- I want to run Docker for my workspaces but not install Docker Desktop. +### I want to run Docker for my workspaces but not install Docker Desktop. [Colima](https://github.com/abiosoft/colima) is a Docker Desktop alternative. @@ -454,10 +404,7 @@ Colima will show the path to the docker socket so we have a [community template](https://github.com/sharkymark/v2-templates/tree/main/docker-code-server) that prompts the Coder admin to enter the docker socket as a Terraform variable. -
- -
- How to make a `coder_app` optional? +### How to make a `coder_app` optional? An example use case is the user should decide if they want a browser-based IDE like code-server when creating the workspace. @@ -515,10 +462,7 @@ resource "coder_app" "code-server" { } ``` -
- -
- Why am I getting this "remote host doesn't meet VS Code Server's prerequisites" error when opening up VSCode remote in a Linux environment? +### Why am I getting this "remote host doesn't meet VS Code Server's prerequisites" error when opening up VSCode remote in a Linux environment? ![VS Code Server prerequisite](https://github.com/coder/coder/assets/10648092/150c5996-18b1-4fae-afd0-be2b386a3239) @@ -529,10 +473,7 @@ image or supported OS for the VS Code Server. For more information on OS prerequisites for Linux, please look at the VSCode docs. https://code.visualstudio.com/docs/remote/linux#_local-linux-prerequisites -
- -
- How can I resolve disconnects when connected to Coder via JetBrains Gateway? +### How can I resolve disconnects when connected to Coder via JetBrains Gateway? If your JetBrains IDE is disconnected for a long period of time due to a network change (for example turning off a VPN), you may find that the IDE will not @@ -560,5 +501,3 @@ Note that the JetBrains Gateway configuration blocks for each host in your SSH config file will be overwritten by the JetBrains Gateway client when it re-authenticates to your Coder deployment so you must add the above config as a separate block and not add it to any existing ones. - -
diff --git a/docs/guides/support-bundle.md b/docs/guides/support-bundle.md new file mode 100644 index 0000000000..9b2d8ec52c --- /dev/null +++ b/docs/guides/support-bundle.md @@ -0,0 +1,87 @@ +# Generate and upload a Support Bundle to Coder Support + +When you engage with Coder support to diagnose an issue with your deployment, +you may be asked to generate and upload a "Support Bundle" for offline analysis. +This document explains the contents of a support bundle and the steps to submit +a support bundle to Coder staff. + +## What is a Support Bundle? + +A support bundle is an archive containing a snapshot of information about your +Coder deployment. + +It contains information about the workspace, the template it uses, running +agents in the workspace, and other detailed information useful for +troubleshooting. + +It is primarily intended for troubleshooting connectivity issues to workspaces, +but can be useful for diagnosing other issues as well. + +**While we attempt to redact sensitive information from support bundles, they +may contain information deemed sensitive by your organization and should be +treated as such.** + +A brief overview of all files contained in the bundle is provided below: + +> Note: detailed descriptions of all the information available in the bundle is +> out of scope, as support bundles are primarily intended for internal use. + +| Filename | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------ | +| `agent/agent.json` | The agent used to connect to the workspace with environment variables stripped. | +| `agent/agent_magicsock.html` | The contents of the HTTP debug endpoint of the agent's Tailscale connection. | +| `agent/client_magicsock.html` | The contents of the HTTP debug endpoint of the client's Tailscale connection. | +| `agent/listening_ports.json` | The listening ports detected by the selected agent running in the workspace. | +| `agent/logs.txt` | The logs of the selected agent running in the workspace. | +| `agent/manifest.json` | The manifest of the selected agent with environment variables stripped. | +| `agent/startup_logs.txt` | Startup logs of the workspace agent. | +| `agent/prometheus.txt` | The contents of the agent's Prometheus endpoint. | +| `cli_logs.txt` | Logs from running the `coder support bundle` command. | +| `deployment/buildinfo.json` | Coder version and build information. | +| `deployment/config.json` | Deployment [configuration](../api/general.md#get-deployment-config), with secret values removed. | +| `deployment/experiments.json` | Any [experiments](../cli/server.md#experiments) currently enabled for the deployment. | +| `deployment/health.json` | A snapshot of the [health status](../admin/healthcheck.md) of the deployment. | +| `logs.txt` | Logs from the `codersdk.Client` used to generate the bundle. | +| `network/connection_info.json` | Information used by workspace agents used to connect to Coder (DERP map etc.) | +| `network/coordinator_debug.html` | Peers currently connected to each Coder instance and the tunnels established between peers. | +| `network/netcheck.json` | Results of running `coder netcheck` locally. | +| `network/tailnet_debug.html` | Tailnet coordinators, their heartbeat ages, connected peers, and tunnels. | +| `workspace/build_logs.txt` | Build logs of the selected workspace. | +| `workspace/workspace.json` | Details of the selected workspace. | +| `workspace/parameters.json` | Build parameters of the selected workspace. | +| `workspace/template.json` | The template currently in use by the selected workspace. | +| `workspace/template_file.zip` | The source code of the template currently in use by the selected workspace. | +| `workspace/template_version.json` | The template version currently in use by the selected workspace. | + +## How do I generate a Support Bundle? + +1. Ensure your deployment is up and running. Generating a support bundle + requires the Coder deployment to be available. + +2. Ensure you have the Coder CLI installed on a local machine. See + (installation)[../install/index.md] for steps on how to do this. + + > Note: It is recommended to generate a support bundle from a location + > experiencing workspace connectivity issues. + +3. Ensure you are [logged in](../cli/login.md#login) to your Coder deployment as + a user with the Owner privilege. + +4. Run `coder support bundle [owner/workspace]`, and respond `yes` to the + prompt. The support bundle will be generated in the current directory with + the filename `coder-support-$TIMESTAMP.zip`. + + > While support bundles can be generated without a running workspace, it is + > recommended to specify one to maximize troubleshooting information. + +5. (Recommended) Extract the support bundle and review its contents, redacting + any information you deem necessary. + +6. Coder staff will provide you a link where you can upload the bundle along + with any other necessary supporting files. + + > Note: It is helpful to leave an informative message regarding the nature of + > supporting files. + +Coder support will then review the information you provided and respond to you +with next steps. diff --git a/docs/guides/xray-integration.md b/docs/guides/xray-integration.md index 90d449853e..76c29e3343 100644 --- a/docs/guides/xray-integration.md +++ b/docs/guides/xray-integration.md @@ -19,7 +19,7 @@ using Coder's [JFrog Xray Integration](github.com/coder/coder-xray). - A self-hosted JFrog Platform instance. - Kubernetes workspaces running on Coder. -## Deploying the Coder Xray Integration +## Deploying the Coder - JFrog Xray Integration 1. Create a JFrog Platform [Access Token](https://jfrog.com/help/r/jfrog-platform-administration-documentation/access-tokens) @@ -37,7 +37,7 @@ kubectl create secret generic coder-token --from-literal=coder-token='' kubectl create secret generic jfrog-token --from-literal=user='' --from-literal=token='' ``` -4. Deploy the Coder Xray integration. +4. Deploy the Coder - JFrog Xray integration. ```bash helm repo add coder-xray https://helm.coder.com/coder-xray @@ -69,4 +69,4 @@ image = "//:" > use it in the `imagePullSecrets` field of the kubernetes pod. See this > [guide](./image-pull-secret.md) for more information. -![Coder Xray Integration](../images/guides/xray-integration/example.png) +![JFrog Xray Integration](../images/guides/xray-integration/example.png) diff --git a/docs/images/architecture-air-gapped.png b/docs/images/architecture-air-gapped.png new file mode 100644 index 0000000000..b907eae150 Binary files /dev/null and b/docs/images/architecture-air-gapped.png differ diff --git a/docs/images/architecture-devcontainers.png b/docs/images/architecture-devcontainers.png new file mode 100644 index 0000000000..c61ad77085 Binary files /dev/null and b/docs/images/architecture-devcontainers.png differ diff --git a/docs/images/architecture-multi-cloud.png b/docs/images/architecture-multi-cloud.png new file mode 100644 index 0000000000..4b40126c7b Binary files /dev/null and b/docs/images/architecture-multi-cloud.png differ diff --git a/docs/images/architecture-multi-region.png b/docs/images/architecture-multi-region.png index 503c8e0fe9..d76141e837 100644 Binary files a/docs/images/architecture-multi-region.png and b/docs/images/architecture-multi-region.png differ diff --git a/docs/install/index.md b/docs/install/index.md index 7e63b91ca6..69fb818f31 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -2,6 +2,9 @@ A single CLI (`coder`) is used for both the Coder server and the client. +We support two release channels: mainline and stable - read the +[Releases](./releases.md) page to learn more about which best suits your team. + There are several ways to install Coder. For production deployments with 50+ users, we recommend [installing on Kubernetes](./kubernetes.md). Otherwise, you can install Coder on your local machine or on a VM: diff --git a/docs/install/kubernetes.md b/docs/install/kubernetes.md index afa6d24fff..2e7fd125de 100644 --- a/docs/install/kubernetes.md +++ b/docs/install/kubernetes.md @@ -7,6 +7,15 @@ You'll also want to install the [latest version of Coder](https://github.com/coder/coder/releases/latest) locally in order to log in and manage templates. +> Coder supports two release channels: mainline for the true latest version of +> Coder, and stable for large enterprise deployments. Before installing your +> control plane via Helm, please read the [Releases](./releases.md) document to +> identify the best-suited release for your team, then specify the version using +> Helm's `--version` flag. + +> The version flags for both stable and mainline are automatically filled in +> this page. + ## Install Coder with Helm 1. Create a namespace for Coder, such as `coder`: @@ -112,10 +121,22 @@ locally in order to log in and manage templates. 1. Run the following command to install the chart in your cluster. + For the **mainline** Coder release: + ```shell helm install coder coder-v2/coder \ --namespace coder \ - --values values.yaml + --values values.yaml \ + --version 2.10.0 + ``` + + For the **stable** Coder release: + + ```shell + helm install coder coder-v2/coder \ + --namespace coder \ + --values values.yaml \ + --version 2.9.1 ``` You can watch Coder start up by running `kubectl get pods -n coder`. Once diff --git a/docs/install/releases.md b/docs/install/releases.md new file mode 100644 index 0000000000..701c9793e4 --- /dev/null +++ b/docs/install/releases.md @@ -0,0 +1,56 @@ +# Releases + +Coder releases are cut directly from main in our +[Github](https://github.com/coder/coder) on the first Tuesday of each month. + +We recommend enterprise customers test the compatibility of new releases with +their infrastructure on a staging environment before upgrading a production +deployment. + +We support two release channels: +[mainline](https://github.com/coder/coder/2.10.0) for the edge version of Coder +and [stable](https://github.com/coder/coder/releases/latest) for those with +lower tolerance for fault. We field our mainline releases publicly for two weeks +before promoting them to stable. + +### Mainline releases + +- Intended for customers with a staging environment +- Gives earliest access to new features +- May include minor bugs +- All bugfixes and security patches are supported + +### Stable releases + +- Safest upgrade/installation path +- May not include the latest features +- Security vulnerabilities and major bugfixes are supported + +> Note: We support major security vulnerabilities (CVEs) for the past three +> versions of Coder. + +## Installing stable + +When installing Coder, we generally advise specifying the desired version from +our Github [releases page](https://github.com/coder/coder/releases). + +You can also use our `install.sh` script with the `stable` flag to install the +latest stable release: + +```shell +curl -fsSL https://coder.com/install.sh | sh -s -- --stable +``` + +Best practices for installing Coder can be found on our [install](./index.md) +pages. + +## Release schedule + +| Release name | Date | Status | +| ------------ | ------------------ | ---------------- | +| 2.7.0 | January 01, 2024 | Not Supported | +| 2.8.0 | Februrary 06, 2024 | Security Support | +| 2.9.0 | March 07, 2024 | Stable | +| 2.10.0 | April 03, 2024 | Mainline | +| 2.11.0 | May 07, 2024 | Not Released | +| 2.12.0 | June 04, 2024 | Not Released | diff --git a/docs/manifest.json b/docs/manifest.json index 65a5175f20..3717600421 100644 --- a/docs/manifest.json +++ b/docs/manifest.json @@ -55,6 +55,11 @@ "title": "1-click install", "description": "Install Coder on a cloud provider with a single click", "path": "./install/1-click.md" + }, + { + "title": "Releases", + "description": "Coder Release Channels and Cadence", + "path": "./install/releases.md" } ] }, @@ -230,9 +235,9 @@ "icon_path": "./images/icons/docker.svg" }, { - "title": "Devcontainers", - "description": "Use devcontainers in workspaces", - "path": "./templates/devcontainers.md", + "title": "Dev Containers", + "description": "Use Dev Containers in workspaces", + "path": "./templates/dev-containers.md", "state": "alpha" }, { @@ -1070,6 +1075,11 @@ "path": "./guides/index.md", "icon_path": "./images/icons/notes.svg", "children": [ + { + "title": "Generate a Support Bundle", + "description": "Generate and upload a Support Bundle to Coder Support", + "path": "./guides/support-bundle.md" + }, { "title": "Configuring Okta", "description": "Custom claims/scopes with Okta for group/role sync", @@ -1101,7 +1111,7 @@ "path": "./guides/azure-federation.md" }, { - "title": "Scanning Coder Workspaces with Xray", + "title": "Scanning Coder Workspaces with JFrog Xray", "description": "Integrate Coder with JFrog Xray", "path": "./guides/xray-integration.md" } diff --git a/docs/platforms/kubernetes/deployment-logs.md b/docs/platforms/kubernetes/deployment-logs.md index 14d8abd170..184362cc14 100644 --- a/docs/platforms/kubernetes/deployment-logs.md +++ b/docs/platforms/kubernetes/deployment-logs.md @@ -33,7 +33,7 @@ serviceAccount: ## Installation -Install the `coder-kubestream-logs` helm chart on the cluster where the +Install the `coder-logstream-kube` helm chart on the cluster where the deployment is running. ```shell diff --git a/docs/templates/devcontainers.md b/docs/templates/dev-containers.md similarity index 85% rename from docs/templates/devcontainers.md rename to docs/templates/dev-containers.md index d8b0417ac1..787955ada7 100644 --- a/docs/templates/devcontainers.md +++ b/docs/templates/dev-containers.md @@ -1,17 +1,17 @@ -# Devcontainers (alpha) +# Dev Containers (alpha) -[Devcontainers](https://containers.dev) are an open source specification for -defining development environments. +[Development containers](https://containers.dev) are an open source +specification for defining development environments. [envbuilder](https://github.com/coder/envbuilder) is an open source project by -Coder that runs devcontainers via Coder templates and your underlying +Coder that runs dev containers via Coder templates and your underlying infrastructure. It can run on Docker or Kubernetes. There are several benefits to adding a devcontainer-compatible template to Coder: -- Drop-in migration from Codespaces (or any existing repositories that use - devcontainers) +- Drop-in migration from Codespaces (or any existing repositories that use dev + containers) - Easier to start projects from Coder. Just create a new workspace then pick a starter devcontainer. - Developer teams can "bring their own image." No need for platform teams to @@ -47,7 +47,7 @@ information. ## Caching -To improve build times, devcontainers can be cached. Refer to the +To improve build times, dev containers can be cached. Refer to the [envbuilder documentation](https://github.com/coder/envbuilder/) for more information. diff --git a/dogfood/Dockerfile b/dogfood/Dockerfile index c2899a48c0..4daaa0a636 100644 --- a/dogfood/Dockerfile +++ b/dogfood/Dockerfile @@ -8,7 +8,7 @@ FROM ubuntu:jammy AS go RUN apt-get update && apt-get install --yes curl gcc # Install Go manually, so that we can control the version -ARG GO_VERSION=1.21.5 +ARG GO_VERSION=1.21.9 RUN mkdir --parents /usr/local/go # Boring Go is needed to build FIPS-compliant binaries. diff --git a/enterprise/coderd/coderd.go b/enterprise/coderd/coderd.go index c3b8cc0199..2bb9eb3bc0 100644 --- a/enterprise/coderd/coderd.go +++ b/enterprise/coderd/coderd.go @@ -148,7 +148,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { DB: options.Database, OAuth2Configs: oauthConfigs, RedirectToLogin: false, - DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(), + DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(), Optional: false, SessionTokenFunc: nil, // Default behavior PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc, @@ -157,7 +157,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { DB: options.Database, OAuth2Configs: oauthConfigs, RedirectToLogin: false, - DisableSessionExpiryRefresh: options.DeploymentValues.DisableSessionExpiryRefresh.Value(), + DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(), Optional: true, SessionTokenFunc: nil, // Default behavior PostAuthAdditionalHeadersFunc: options.PostAuthAdditionalHeadersFunc, @@ -630,7 +630,13 @@ func (api *API) updateEntitlements(ctx context.Context) error { if initial, changed, enabled := featureChanged(codersdk.FeatureHighAvailability); shouldUpdate(initial, changed, enabled) { var coordinator agpltailnet.Coordinator - if enabled { + // If HA is enabled, but the database is in-memory, we can't actually + // run HA and the PG coordinator. So throw a log line, and continue to use + // the in memory AGPL coordinator. + if enabled && api.DeploymentValues.InMemoryDatabase.Value() { + api.Logger.Warn(ctx, "high availability is enabled, but cannot be configured due to the database being set to in-memory") + } + if enabled && !api.DeploymentValues.InMemoryDatabase.Value() { haCoordinator, err := tailnet.NewPGCoord(api.ctx, api.Logger, api.Pubsub, api.Database) if err != nil { api.Logger.Error(ctx, "unable to set up high availability coordinator", slog.Error(err)) diff --git a/enterprise/coderd/workspaceproxy.go b/enterprise/coderd/workspaceproxy.go index 379d01ad43..234212f479 100644 --- a/enterprise/coderd/workspaceproxy.go +++ b/enterprise/coderd/workspaceproxy.go @@ -658,7 +658,7 @@ func (api *API) workspaceProxyRegister(rw http.ResponseWriter, r *http.Request) if err != nil { return xerrors.Errorf("insert replica: %w", err) } - } else if err != nil { + } else { return xerrors.Errorf("get replica: %w", err) } diff --git a/enterprise/coderd/workspaces_test.go b/enterprise/coderd/workspaces_test.go index 6d40d77bc2..b44357c5b5 100644 --- a/enterprise/coderd/workspaces_test.go +++ b/enterprise/coderd/workspaces_test.go @@ -913,6 +913,44 @@ func TestWorkspaceAutobuild(t *testing.T) { ws = coderdtest.MustWorkspace(t, client, ws.ID) require.Equal(t, version2.ID, ws.LatestBuild.TemplateVersionID) }) + + t.Run("TemplateDoesNotAllowUserAutostop", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, &coderdtest.Options{ + IncludeProvisionerDaemon: true, + TemplateScheduleStore: schedule.NewEnterpriseTemplateScheduleStore(agplUserQuietHoursScheduleStore()), + }) + user := coderdtest.CreateFirstUser(t, client) + version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) + templateTTL := 24 * time.Hour.Milliseconds() + template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) { + ctr.DefaultTTLMillis = ptr.Ref(templateTTL) + ctr.AllowUserAutostop = ptr.Ref(false) + }) + coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID, func(cwr *codersdk.CreateWorkspaceRequest) { + cwr.TTLMillis = nil // ensure that no default TTL is set + }) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) + + // TTL should be set by the template + require.Equal(t, false, template.AllowUserAutostop) + require.Equal(t, templateTTL, template.DefaultTTLMillis) + require.Equal(t, templateTTL, *workspace.TTLMillis) + + // Change the template's default TTL and refetch the workspace + templateTTL = 72 * time.Hour.Milliseconds() + ctx := testutil.Context(t, testutil.WaitShort) + template = coderdtest.UpdateTemplateMeta(t, client, template.ID, codersdk.UpdateTemplateMeta{ + DefaultTTLMillis: templateTTL, + }) + workspace, err := client.Workspace(ctx, workspace.ID) + require.NoError(t, err) + + // Ensure that the new value is reflected in the template and workspace + require.Equal(t, templateTTL, template.DefaultTTLMillis) + require.Equal(t, templateTTL, *workspace.TTLMillis) + }) } // Blocked by autostart requirements diff --git a/enterprise/tailnet/pgcoord.go b/enterprise/tailnet/pgcoord.go index aabb21eef6..aecdcde828 100644 --- a/enterprise/tailnet/pgcoord.go +++ b/enterprise/tailnet/pgcoord.go @@ -231,6 +231,17 @@ func (c *pgCoord) Coordinate( logger := c.logger.With(slog.F("peer_id", id)) reqs := make(chan *proto.CoordinateRequest, agpl.RequestBufferSize) resps := make(chan *proto.CoordinateResponse, agpl.ResponseBufferSize) + if !c.querier.isHealthy() { + // If the coordinator is unhealthy, we don't want to hook this Coordinate call up to the + // binder, as that can cause an unnecessary call to DeleteTailnetPeer when the connIO is + // closed. Instead, we just close the response channel and bail out. + // c.f. https://github.com/coder/coder/issues/12923 + c.logger.Info(ctx, "closed incoming coordinate call while unhealthy", + slog.F("peer_id", id), + ) + close(resps) + return reqs, resps + } cIO := newConnIO(c.ctx, ctx, logger, c.bindings, c.tunnelerCh, reqs, resps, id, name, a) err := agpl.SendCtx(c.ctx, c.newConnections, cIO) if err != nil { @@ -842,7 +853,12 @@ func (q *querier) newConn(c *connIO) { defer q.mu.Unlock() if !q.healthy { err := c.Close() - q.logger.Info(q.ctx, "closed incoming connection while unhealthy", + // This can only happen during a narrow window where we were healthy + // when pgCoord checked before accepting the connection, but now are + // unhealthy now that we get around to processing it. Seeing a small + // number of these logs is not worrying, but a large number probably + // indicates something is amiss. + q.logger.Warn(q.ctx, "closed incoming connection while unhealthy", slog.Error(err), slog.F("peer_id", c.UniqueID()), ) @@ -865,6 +881,12 @@ func (q *querier) newConn(c *connIO) { }) } +func (q *querier) isHealthy() bool { + q.mu.Lock() + defer q.mu.Unlock() + return q.healthy +} + func (q *querier) cleanupConn(c *connIO) { logger := q.logger.With(slog.F("peer_id", c.UniqueID())) q.mu.Lock() diff --git a/enterprise/tailnet/pgcoord_internal_test.go b/enterprise/tailnet/pgcoord_internal_test.go index d5b79d6225..b1bfb371f0 100644 --- a/enterprise/tailnet/pgcoord_internal_test.go +++ b/enterprise/tailnet/pgcoord_internal_test.go @@ -13,6 +13,7 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" + "golang.org/x/xerrors" gProto "google.golang.org/protobuf/proto" "cdr.dev/slog" @@ -21,6 +22,8 @@ import ( "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbmock" "github.com/coder/coder/v2/coderd/database/dbtestutil" + "github.com/coder/coder/v2/coderd/database/pubsub" + agpl "github.com/coder/coder/v2/tailnet" "github.com/coder/coder/v2/tailnet/proto" "github.com/coder/coder/v2/testutil" ) @@ -291,3 +294,51 @@ func TestGetDebug(t *testing.T) { require.Equal(t, peerID, debug.Tunnels[0].SrcID) require.Equal(t, dstID, debug.Tunnels[0].DstID) } + +// TestPGCoordinatorUnhealthy tests that when the coordinator fails to send heartbeats and is +// unhealthy it disconnects any peers and does not send any extraneous database queries. +func TestPGCoordinatorUnhealthy(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + + ctrl := gomock.NewController(t) + mStore := dbmock.NewMockStore(ctrl) + ps := pubsub.NewInMemory() + + // after 3 failed heartbeats, the coordinator is unhealthy + mStore.EXPECT(). + UpsertTailnetCoordinator(gomock.Any(), gomock.Any()). + MinTimes(3). + Return(database.TailnetCoordinator{}, xerrors.New("badness")) + mStore.EXPECT(). + DeleteCoordinator(gomock.Any(), gomock.Any()). + Times(1). + Return(nil) + // But, in particular we DO NOT want the coordinator to call DeleteTailnetPeer, as this is + // unnecessary and can spam the database. c.f. https://github.com/coder/coder/issues/12923 + + // these cleanup queries run, but we don't care for this test + mStore.EXPECT().CleanTailnetCoordinators(gomock.Any()).AnyTimes().Return(nil) + mStore.EXPECT().CleanTailnetLostPeers(gomock.Any()).AnyTimes().Return(nil) + mStore.EXPECT().CleanTailnetTunnels(gomock.Any()).AnyTimes().Return(nil) + + coordinator, err := newPGCoordInternal(ctx, logger, ps, mStore) + require.NoError(t, err) + + require.Eventually(t, func() bool { + return !coordinator.querier.isHealthy() + }, testutil.WaitShort, testutil.IntervalFast) + + pID := uuid.UUID{5} + _, resps := coordinator.Coordinate(ctx, pID, "test", agpl.AgentCoordinateeAuth{ID: pID}) + resp := testutil.RequireRecvCtx(ctx, t, resps) + require.Nil(t, resp, "channel should be closed") + + // give the coordinator some time to process any pending work. We are + // testing here that a database call is absent, so we don't want to race to + // shut down the test. + time.Sleep(testutil.IntervalMedium) + _ = coordinator.Close() + require.Eventually(t, ctrl.Satisfied, testutil.WaitShort, testutil.IntervalFast) +} diff --git a/examples/templates/docker/main.tf b/examples/templates/docker/main.tf index baa0bbab66..3d8bef5c59 100644 --- a/examples/templates/docker/main.tf +++ b/examples/templates/docker/main.tf @@ -28,6 +28,12 @@ resource "coder_agent" "main" { startup_script = <<-EOT set -e + # Prepare user home with default files on first start. + if [ ! -f ~/.init_done ]; then + cp -rT /etc/skel ~ + touch ~/.init_done + fi + # install and start code-server curl -fsSL https://code-server.dev/install.sh | sh -s -- --method=standalone --prefix=/tmp/code-server --version 4.19.1 /tmp/code-server/bin/code-server --auth none --port 13337 >/tmp/code-server.log 2>&1 & diff --git a/flake.lock b/flake.lock index fe4bb7c34f..2bbf425275 100644 --- a/flake.lock +++ b/flake.lock @@ -43,11 +43,11 @@ "systems": "systems_2" }, "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -73,11 +73,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1706550542, - "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", + "lastModified": 1712439257, + "narHash": "sha256-aSpiNepFOMk9932HOax0XwNxbA38GOUVOiXfUVPOrck=", "owner": "nixos", "repo": "nixpkgs", - "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", + "rev": "ff0dbd94265ac470dda06a657d5fe49de93b4599", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index f906f3c3d5..c0407c9e18 100644 --- a/flake.nix +++ b/flake.nix @@ -58,6 +58,7 @@ pango pixman pkg-config + playwright-driver.browsers postgresql_13 protobuf protoc-gen-go @@ -86,7 +87,13 @@ in { defaultPackage = formatter; # or replace it with your desired default package. - devShell = pkgs.mkShell { buildInputs = devShellPackages; }; + devShell = pkgs.mkShell { + buildInputs = devShellPackages; + shellHook = '' + export PLAYWRIGHT_BROWSERS_PATH=${pkgs.playwright-driver.browsers} + export PLAYWRIGHT_SKIP_VALIDATE_HOST_REQUIREMENTS=true + ''; + }; packages.all = allPackages; } ); diff --git a/go.mod b/go.mod index 1a43916705..9346fc7427 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,15 @@ module github.com/coder/coder/v2 go 1.21.4 +// Required until a v3 of chroma is created to lazily initialize all XML files. +// None of our dependencies seem to use the registries anyways, so this +// should be fine... +// See: https://github.com/kylecarbs/chroma/commit/9e036e0631f38ef60de5ee8eec7a42e9cb7da423 +replace github.com/alecthomas/chroma/v2 => github.com/kylecarbs/chroma/v2 v2.0.0-20240401211003-9e036e0631f3 + +// Required until https://github.com/go-playground/validator/pull/1246 is merged. +replace github.com/go-playground/validator/v10 => github.com/kylecarbs/validator/v10 v10.0.0-20240401214733-cebbc77c0ece + // Required until https://github.com/hashicorp/terraform-config-inspect/pull/74 is merged. replace github.com/hashicorp/terraform-config-inspect => github.com/kylecarbs/terraform-config-inspect v0.0.0-20211215004401-bbc517866b88 @@ -33,7 +42,7 @@ replace github.com/dlclark/regexp2 => github.com/dlclark/regexp2 v1.7.0 // There are a few minor changes we make to Tailscale that we're slowly upstreaming. Compare here: // https://github.com/tailscale/tailscale/compare/main...coder:tailscale:main -replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20240312053019-86ba201e56df +replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d // Fixes a race-condition in coder/wgtunnel. // Upstream PR: https://github.com/WireGuard/wireguard-go/pull/85 @@ -84,7 +93,7 @@ require ( github.com/awalterschulze/gographviz v2.0.3+incompatible github.com/aws/smithy-go v1.20.1 github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 - github.com/bramvdbogaerde/go-scp v1.3.0 + github.com/bramvdbogaerde/go-scp v1.4.0 github.com/briandowns/spinner v1.18.1 github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 github.com/cenkalti/backoff/v4 v4.3.0 @@ -92,18 +101,17 @@ require ( github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89 github.com/chromedp/chromedp v0.9.2 github.com/cli/safeexec v1.0.1 - github.com/codeclysm/extract/v3 v3.1.1 github.com/coder/flog v1.1.0 github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0 github.com/coder/retry v1.5.1 - github.com/coder/terraform-provider-coder v0.19.0 + github.com/coder/terraform-provider-coder v0.20.1 github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a github.com/coreos/go-oidc/v3 v3.10.0 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/creack/pty v1.1.21 github.com/dave/dst v0.27.2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/elastic/go-sysinfo v1.13.1 + github.com/elastic/go-sysinfo v1.14.0 github.com/fatih/color v1.16.0 github.com/fatih/structs v1.1.0 github.com/fatih/structtag v1.2.0 @@ -180,20 +188,20 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.2.1 go4.org/netipx v0.0.0-20230728180743-ad4cb58a6516 - golang.org/x/crypto v0.21.0 + golang.org/x/crypto v0.22.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.16.0 - golang.org/x/net v0.22.0 - golang.org/x/oauth2 v0.18.0 - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.18.0 - golang.org/x/term v0.18.0 + golang.org/x/mod v0.17.0 + golang.org/x/net v0.24.0 + golang.org/x/oauth2 v0.19.0 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.19.0 + golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 - golang.org/x/tools v0.19.0 + golang.org/x/tools v0.20.0 golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 google.golang.org/api v0.172.0 - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.63.0 google.golang.org/protobuf v1.33.0 gopkg.in/DataDog/dd-trace-go.v1 v1.61.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 @@ -224,7 +232,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.23.4 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/logging v1.9.0 // indirect cloud.google.com/go/longrunning v0.5.5 // indirect filippo.io/edwards25519 v1.0.0 // indirect @@ -307,7 +315,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/flatbuffers v23.1.21+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -319,7 +327,6 @@ require ( github.com/gorilla/css v1.0.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect - github.com/h2non/filetype v1.1.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect @@ -336,11 +343,9 @@ require ( github.com/imdario/mergo v0.3.15 // indirect github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect github.com/jsimonetti/rtnetlink v1.3.5 // indirect - github.com/juju/errors v1.0.0 // indirect github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect github.com/kr/fs v0.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect @@ -397,7 +402,6 @@ require ( github.com/tidwall/pretty v1.2.1 // indirect github.com/tinylib/msgp v1.1.8 // indirect github.com/u-root/uio v0.0.0-20240209044354-b3d14b93376a // indirect - github.com/ulikunitz/xz v0.5.11 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect @@ -423,8 +427,8 @@ require ( golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 // indirect golang.zx2c4.com/wireguard/windows v0.5.3 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect + google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect howett.net/plist v1.0.0 // indirect diff --git a/go.sum b/go.sum index 05dc5fa961..10e0890965 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,8 @@ cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6 h1:KHblWIE/KHOwQ6lEbMZt6YpcGve2FEZ1sDtrW1Am5UI= cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6/go.mod h1:NaoTA7KwopCrnaSb0JXTC0PTp/O/Y83Lndnq0OEV3ZQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= -cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/logging v1.9.0 h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw= @@ -65,8 +65,6 @@ github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A= github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.13.0 h1:VP72+99Fb2zEcYM0MeaWJmV+xQvz5v5cxRHd+ooU1lI= -github.com/alecthomas/chroma/v2 v2.13.0/go.mod h1:BUGjjsD+ndS6eX37YgTchSEG+Jg9Jv1GiZs9sqPqztk= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= @@ -86,8 +84,6 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/arduino/go-paths-helper v1.2.0 h1:qDW93PR5IZUN/jzO4rCtexiwF8P4OIcOmcSgAYLZfY4= -github.com/arduino/go-paths-helper v1.2.0/go.mod h1:HpxtKph+g238EJHq4geEPv9p+gl3v5YYu35Yb+w31Ck= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-radix v1.0.1-0.20221118154546-54df44f2176c h1:651/eoCRnQ7YtSjAnSzRucrJz+3iGEFt+ysraELS81M= @@ -160,8 +156,8 @@ github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2 github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bool64/shared v0.1.5 h1:fp3eUhBsrSjNCQPcSdQqZxxh9bBwrYiZ+zOKFkM0/2E= github.com/bool64/shared v0.1.5/go.mod h1:081yz68YC9jeFB3+Bbmno2RFWvGKv1lPKkMP6MHJlPs= -github.com/bramvdbogaerde/go-scp v1.3.0 h1:2EDcH4fQr6ylcVV4p9HLI0Lcr6JuHZyR+sBOPukXSAA= -github.com/bramvdbogaerde/go-scp v1.3.0/go.mod h1:27lDUlS44PyAtY6BGtu2a8ksStHLNaJraqw65UmRA8c= +github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY= +github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= github.com/bytedance/sonic v1.10.0 h1:qtNZduETEIWJVIyDl01BeNxur2rW9OwTQ/yBqFRkKEk= @@ -201,8 +197,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/codeclysm/extract/v3 v3.1.1 h1:iHZtdEAwSTqPrd+1n4jfhr1qBhUWtHlMTjT90+fJVXg= -github.com/codeclysm/extract/v3 v3.1.1/go.mod h1:ZJi80UG2JtfHqJI+lgJSCACttZi++dHxfWuPaMhlOfQ= github.com/coder/flog v1.1.0 h1:kbAes1ai8fIS5OeV+QAnKBQE22ty1jRF/mcAwHpLBa4= github.com/coder/flog v1.1.0/go.mod h1:UQlQvrkJBvnRGo69Le8E24Tcl5SJleAAR7gYEHzAmdQ= github.com/coder/glog v1.0.1-0.20220322161911-7365fe7f2cd1 h1:UqBrPWSYvRI2s5RtOul20JukUEpu4ip9u7biBL+ntgk= @@ -221,10 +215,10 @@ github.com/coder/serpent v0.7.0 h1:zGpD2GlF3lKIVkMjNGKbkip88qzd5r/TRcc30X/SrT0= github.com/coder/serpent v0.7.0/go.mod h1:REkJ5ZFHQUWFTPLExhXYZ1CaHFjxvGNRlLXLdsI08YA= github.com/coder/ssh v0.0.0-20231128192721-70855dedb788 h1:YoUSJ19E8AtuUFVYBpXuOD6a/zVP3rcxezNsoDseTUw= github.com/coder/ssh v0.0.0-20231128192721-70855dedb788/go.mod h1:aGQbuCLyhRLMzZF067xc84Lh7JDs1FKwCmF1Crl9dxQ= -github.com/coder/tailscale v1.1.1-0.20240312053019-86ba201e56df h1:ZipFsPxJXSgTPUemHK4IFYydacrwtEmYJc4/XisybTw= -github.com/coder/tailscale v1.1.1-0.20240312053019-86ba201e56df/go.mod h1:L8tPrwSi31RAMEMV8rjb0vYTGs7rXt8rAHbqY/p41j4= -github.com/coder/terraform-provider-coder v0.19.0 h1:mmUXSXcar1h2wgwoHIUwdEKy9Kw0GW7fLO4Vzzf+4R4= -github.com/coder/terraform-provider-coder v0.19.0/go.mod h1:pACHRoXSHBGyY696mLeQ1hR/Ag1G2wFk5bw0mT5Zp2g= +github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d h1:IMvBC1GrCIiZFxpOYRQacZtdjnmsdWNAMilPz+kvdG4= +github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d/go.mod h1:L8tPrwSi31RAMEMV8rjb0vYTGs7rXt8rAHbqY/p41j4= +github.com/coder/terraform-provider-coder v0.20.1 h1:hz0yvDl8rDJyDgUlFH8QrGUxFKrwmyAQpOhaoTMEmtY= +github.com/coder/terraform-provider-coder v0.20.1/go.mod h1:pACHRoXSHBGyY696mLeQ1hR/Ag1G2wFk5bw0mT5Zp2g= github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a h1:KhR9LUVllMZ+e9lhubZ1HNrtJDgH5YLoTvpKwmrGag4= github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a/go.mod h1:QzfptVUdEO+XbkzMKx1kw13i9wwpJlfI1RrZ6SNZ0hA= github.com/coder/wireguard-go v0.0.0-20230807234434-d825b45ccbf5 h1:eDk/42Kj4xN4yfE504LsvcFEo3dWUiCOaBiWJ2uIH2A= @@ -281,8 +275,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY= github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= -github.com/elastic/go-sysinfo v1.13.1 h1:U5Jlx6c/rLkR72O8wXXXo1abnGlWGJU/wbzNJ2AfQa4= -github.com/elastic/go-sysinfo v1.13.1/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= +github.com/elastic/go-sysinfo v1.14.0 h1:dQRtiqLycoOOla7IflZg3aN213vqJmP0lpVpKQ9lUEY= +github.com/elastic/go-sysinfo v1.14.0/go.mod h1:FKUXnZWhnYI0ueO7jhsGV3uQJ5hiz8OqM5b3oGyaRr8= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -378,18 +372,12 @@ github.com/go-openapi/swag v0.22.8 h1:/9RjDSQ0vbFR+NyjGMkFTsA1IA0fmhKSThmfGZjicb github.com/go-openapi/swag v0.22.8/go.mod h1:6QT22icPLEqAM/z/TChgb4WAveCHF92+2gF0CNjHpPI= github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw= github.com/go-ping/ping v1.1.0/go.mod h1:xIFjORFzTxqIV/tDVGO4eDy/bLuSyawEeojSm3GfRGk= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= -github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= @@ -454,8 +442,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomarkdown/markdown v0.0.0-20231222211730-1d6d20845b47 h1:k4Tw0nt6lwro3Uin8eqoET7MDA4JnT8YgbCjc/g5E3k= @@ -510,8 +498,6 @@ github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/ github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= -github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= -github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/hairyhenderson/go-codeowners v0.4.0 h1:Wx/tRXb07sCyHeC8mXfio710Iu35uAy5KYiBdLHdv4Q= github.com/hairyhenderson/go-codeowners v0.4.0/go.mod h1:iJgZeCt+W/GzXo5uchFCqvVHZY2T4TAIpvuVlKVkLxc= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -594,8 +580,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= @@ -605,8 +589,6 @@ github.com/jsimonetti/rtnetlink v1.3.5/go.mod h1:0LFedyiTkebnd43tE4YAkWGIq9jQpho github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM= -github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8= github.com/justinas/nosurf v1.1.1 h1:92Aw44hjSK4MxJeMSyDa7jwuI9GR2J/JCQiaKvXXSlk= github.com/justinas/nosurf v1.1.1/go.mod h1:ALpWdSbuNGy2lZWtyXdjkYv4edL23oSEgfBT1gPJ5BQ= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= @@ -633,12 +615,16 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylecarbs/chroma/v2 v2.0.0-20240401211003-9e036e0631f3 h1:Z9/bo5PSeMutpdiKYNt/TTSfGM1Ll0naj3QzYX9VxTc= +github.com/kylecarbs/chroma/v2 v2.0.0-20240401211003-9e036e0631f3/go.mod h1:BUGjjsD+ndS6eX37YgTchSEG+Jg9Jv1GiZs9sqPqztk= github.com/kylecarbs/opencensus-go v0.23.1-0.20220307014935-4d0325a68f8b h1:1Y1X6aR78kMEQE1iCjQodB3lA7VO4jB88Wf8ZrzXSsA= github.com/kylecarbs/opencensus-go v0.23.1-0.20220307014935-4d0325a68f8b/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= github.com/kylecarbs/spinner v1.18.2-0.20220329160715-20702b5af89e h1:OP0ZMFeZkUnOzTFRfpuK3m7Kp4fNvC6qN+exwj7aI4M= github.com/kylecarbs/spinner v1.18.2-0.20220329160715-20702b5af89e/go.mod h1:mQak9GHqbspjC/5iUx3qMlIho8xBS/ppAL/hX5SmPJU= github.com/kylecarbs/terraform-config-inspect v0.0.0-20211215004401-bbc517866b88 h1:tvG/qs5c4worwGyGnbbb4i/dYYLjpFwDMqcIT3awAf8= github.com/kylecarbs/terraform-config-inspect v0.0.0-20211215004401-bbc517866b88/go.mod h1:Z0Nnk4+3Cy89smEbrq+sl1bxc9198gIP4I7wcQF6Kqs= +github.com/kylecarbs/validator/v10 v10.0.0-20240401214733-cebbc77c0ece h1:FDpneVFUZzTpR6HrrHZhfD09gKB2gGKfCmKkquh/Trk= +github.com/kylecarbs/validator/v10 v10.0.0-20240401214733-cebbc77c0ece/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= @@ -646,7 +632,6 @@ github.com/kyokomi/emoji/v2 v2.2.12 h1:sSVA5nH9ebR3Zji1o31wu3yOwD1zKXQA2z0zUyeit github.com/kyokomi/emoji/v2 v2.2.12/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -909,8 +894,6 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/unrolled/secure v1.14.0 h1:u9vJTU/pR4Bny0ntLUMxdfLtmIRGvQf2sEFuA0TG9AE= github.com/unrolled/secure v1.14.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -1020,9 +1003,10 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -1037,8 +1021,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1059,11 +1043,13 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1072,8 +1058,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1114,9 +1100,10 @@ golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepC golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1124,9 +1111,10 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1136,6 +1124,7 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1153,8 +1142,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1177,10 +1166,10 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1188,8 +1177,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/install.sh b/install.sh index bdcf3af007..50c3c85a8f 100755 --- a/install.sh +++ b/install.sh @@ -26,18 +26,21 @@ The remote host must have internet access. ${not_curl_usage-} Usage: - $arg0 [--dry-run] [--version X.X.X] [--edge] [--method detect] \ + ${arg0} [--dry-run] [--mainline | --stable | --version X.X.X] [--method detect] \ [--prefix ~/.local] [--rsh ssh] [user@host] --dry-run Echo the commands for the install process without running them. + --mainline + Install the latest mainline version (default). + + --stable + Install the latest stable version instead of the latest mainline version. + --version X.X.X Install a specific version instead of the latest. - --edge - Install the latest edge version instead of the latest stable version. - --method [detect | standalone] Choose the installation method. Defaults to detect. - detect detects the system package manager and tries to use it. @@ -88,16 +91,25 @@ The installer will cache all downloaded assets into ~/.cache/coder EOF } -echo_latest_version() { - if [ "${EDGE-}" ]; then - version="$(curl -fsSL https://api.github.com/repos/coder/coder/releases | awk 'match($0,/.*"html_url": "(.*\/releases\/tag\/.*)".*/)' | head -n 1 | awk -F '"' '{print $4}')" - else - # https://gist.github.com/lukechilds/a83e1d7127b78fef38c2914c4ececc3c#gistcomment-2758860 - version="$(curl -fsSLI -o /dev/null -w "%{url_effective}" https://github.com/coder/coder/releases/latest)" - fi - version="${version#https://github.com/coder/coder/releases/tag/}" - version="${version#v}" - echo "$version" +echo_latest_stable_version() { + # https://gist.github.com/lukechilds/a83e1d7127b78fef38c2914c4ececc3c#gistcomment-2758860 + version="$(curl -fsSLI -o /dev/null -w "%{url_effective}" https://github.com/coder/coder/releases/latest)" + version="${version#https://github.com/coder/coder/releases/tag/v}" + echo "${version}" +} + +echo_latest_mainline_version() { + # Fetch the releases from the GitHub API, sort by version number, + # and take the first result. Note that we're sorting by space- + # separated numbers and without utilizing the sort -V flag for the + # best compatibility. + curl -fsSL https://api.github.com/repos/coder/coder/releases | + awk -F'"' '/"tag_name"/ {print $4}' | + tr -d v | + tr . ' ' | + sort -k1,1nr -k2,2nr -k3,3nr | + head -n1 | + tr ' ' . } echo_standalone_postinstall() { @@ -106,9 +118,21 @@ echo_standalone_postinstall() { return fi + channel= + advisory="To install our stable release (v${STABLE_VERSION}), use the --stable flag. " + if [ "${STABLE}" = 1 ]; then + channel="stable " + advisory="" + fi + if [ "${MAINLINE}" = 1 ]; then + channel="mainline " + fi + cath </dev/null || true + sh_c mkdir -p "$CACHE_DIR/tmp" + if [ "$STANDALONE_ARCHIVE_FORMAT" = tar.gz ]; then + sh_c tar -C "$CACHE_DIR/tmp" -xzf "$CACHE_DIR/coder_${VERSION}_${OS}_${ARCH}.tar.gz" + else + sh_c unzip -d "$CACHE_DIR/tmp" -o "$CACHE_DIR/coder_${VERSION}_${OS}_${ARCH}.zip" + fi + + STANDALONE_BINARY_LOCATION="$STANDALONE_INSTALL_PREFIX/bin/$STANDALONE_BINARY_NAME" + sh_c="sh_c" if [ ! -w "$STANDALONE_INSTALL_PREFIX" ]; then sh_c="sudo_sh_c" fi "$sh_c" mkdir -p "$STANDALONE_INSTALL_PREFIX/bin" - if [ "$STANDALONE_ARCHIVE_FORMAT" = tar.gz ]; then - "$sh_c" tar -C "$CACHE_DIR" -xzf "$CACHE_DIR/coder_${VERSION}_${OS}_${ARCH}.tar.gz" - else - "$sh_c" unzip -d "$CACHE_DIR" -o "$CACHE_DIR/coder_${VERSION}_${OS}_${ARCH}.zip" - fi - - STANDALONE_BINARY_LOCATION="$STANDALONE_INSTALL_PREFIX/bin/$STANDALONE_BINARY_NAME" # Remove the file if it already exists to # avoid https://github.com/coder/coder/issues/2086 @@ -606,7 +662,10 @@ install_standalone() { fi # Copy the binary to the correct location. - "$sh_c" cp "$CACHE_DIR/coder" "$STANDALONE_BINARY_LOCATION" + "$sh_c" cp "$CACHE_DIR/tmp/coder" "$STANDALONE_BINARY_LOCATION" + + # Clean up the extracted files (note, not using sudo: $sh_c -> sh_c). + sh_c rm -rv "$CACHE_DIR/tmp" echo_standalone_postinstall } diff --git a/provisioner/terraform/install.go b/provisioner/terraform/install.go index 24e4001419..e50c3d9af9 100644 --- a/provisioner/terraform/install.go +++ b/provisioner/terraform/install.go @@ -19,6 +19,7 @@ var ( // TerraformVersion is the version of Terraform used internally // when Terraform is not available on the system. // NOTE: Keep this in sync with the version in scripts/Dockerfile.base. + // NOTE: Keep this in sync with the version in install.sh. TerraformVersion = version.Must(version.NewVersion("1.6.6")) minTerraformVersion = version.Must(version.NewVersion("1.1.0")) diff --git a/provisioner/terraform/provision.go b/provisioner/terraform/provision.go index 40f24ecfb8..542006f27e 100644 --- a/provisioner/terraform/provision.go +++ b/provisioner/terraform/provision.go @@ -2,12 +2,14 @@ package terraform import ( "context" + "encoding/json" "fmt" "os" "strings" "time" "github.com/spf13/afero" + "golang.org/x/xerrors" "cdr.dev/slog" "github.com/coder/terraform-provider-coder/provider" @@ -186,6 +188,11 @@ func provisionEnv( richParams []*proto.RichParameterValue, externalAuth []*proto.ExternalAuthProvider, ) ([]string, error) { env := safeEnviron() + ownerGroups, err := json.Marshal(metadata.GetWorkspaceOwnerGroups()) + if err != nil { + return nil, xerrors.Errorf("marshal owner groups: %w", err) + } + env = append(env, "CODER_AGENT_URL="+metadata.GetCoderUrl(), "CODER_WORKSPACE_TRANSITION="+strings.ToLower(metadata.GetWorkspaceTransition().String()), @@ -194,6 +201,7 @@ func provisionEnv( "CODER_WORKSPACE_OWNER_EMAIL="+metadata.GetWorkspaceOwnerEmail(), "CODER_WORKSPACE_OWNER_NAME="+metadata.GetWorkspaceOwnerName(), "CODER_WORKSPACE_OWNER_OIDC_ACCESS_TOKEN="+metadata.GetWorkspaceOwnerOidcAccessToken(), + "CODER_WORKSPACE_OWNER_GROUPS="+string(ownerGroups), "CODER_WORKSPACE_ID="+metadata.GetWorkspaceId(), "CODER_WORKSPACE_OWNER_ID="+metadata.GetWorkspaceOwnerId(), "CODER_WORKSPACE_OWNER_SESSION_TOKEN="+metadata.GetWorkspaceOwnerSessionToken(), diff --git a/provisionersdk/archive.go b/provisionersdk/archive.go index 4051698fa4..410315c18a 100644 --- a/provisionersdk/archive.go +++ b/provisionersdk/archive.go @@ -171,6 +171,10 @@ func Untar(directory string, r io.Reader) error { } } case tar.TypeReg: + err := os.MkdirAll(filepath.Dir(target), os.FileMode(header.Mode)|os.ModeDir|100) + if err != nil { + return err + } file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) if err != nil { return err diff --git a/provisionersdk/proto/provisioner.pb.go b/provisionersdk/proto/provisioner.pb.go index f91be315a5..99d7b2e26a 100644 --- a/provisionersdk/proto/provisioner.pb.go +++ b/provisionersdk/proto/provisioner.pb.go @@ -1637,6 +1637,7 @@ type Metadata struct { WorkspaceOwnerSessionToken string `protobuf:"bytes,11,opt,name=workspace_owner_session_token,json=workspaceOwnerSessionToken,proto3" json:"workspace_owner_session_token,omitempty"` TemplateId string `protobuf:"bytes,12,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` WorkspaceOwnerName string `protobuf:"bytes,13,opt,name=workspace_owner_name,json=workspaceOwnerName,proto3" json:"workspace_owner_name,omitempty"` + WorkspaceOwnerGroups []string `protobuf:"bytes,14,rep,name=workspace_owner_groups,json=workspaceOwnerGroups,proto3" json:"workspace_owner_groups,omitempty"` } func (x *Metadata) Reset() { @@ -1762,6 +1763,13 @@ func (x *Metadata) GetWorkspaceOwnerName() string { return "" } +func (x *Metadata) GetWorkspaceOwnerGroups() []string { + if x != nil { + return x.WorkspaceOwnerGroups + } + return nil +} + // Config represents execution configuration shared by all subsequent requests in the Session type Config struct { state protoimpl.MessageState @@ -2868,7 +2876,7 @@ var file_provisionersdk_proto_provisioner_proto_rawDesc = []byte{ 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, - 0x73, 0x4e, 0x75, 0x6c, 0x6c, 0x22, 0x81, 0x05, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x73, 0x4e, 0x75, 0x6c, 0x6c, 0x22, 0xb7, 0x05, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x12, 0x53, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x72, 0x61, @@ -2908,133 +2916,137 @@ var file_provisionersdk_proto_provisioner_proto_rawDesc = []byte{ 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x06, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, - 0x72, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x4c, 0x6f, - 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x73, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x8b, 0x01, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x73, 0x65, - 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4c, - 0x0a, 0x12, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, - 0x72, 0x65, 0x61, 0x64, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, - 0x61, 0x64, 0x6d, 0x65, 0x22, 0xb5, 0x02, 0x0a, 0x0b, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x53, 0x0a, 0x15, 0x72, 0x69, 0x63, 0x68, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x72, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0f, - 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x12, 0x59, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, - 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, - 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, - 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x22, 0xf8, 0x01, 0x0a, - 0x0c, 0x50, 0x6c, 0x61, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x77, 0x6f, 0x72, + 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x22, + 0x8a, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x72, 0x63, 0x68, 0x69, + 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x0e, 0x0a, 0x0c, + 0x50, 0x61, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x8b, 0x01, 0x0a, + 0x0d, 0x50, 0x61, 0x72, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4c, 0x0a, 0x12, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x11, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x64, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x61, 0x64, 0x6d, 0x65, 0x22, 0xb5, 0x02, 0x0a, 0x0b, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x53, 0x0a, + 0x15, 0x72, 0x69, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x63, 0x68, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x22, 0x41, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8f, 0x02, 0x0a, 0x0d, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, - 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, - 0x52, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x17, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, - 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x22, 0x0f, 0x0a, 0x0d, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x8c, 0x02, - 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x0a, 0x05, 0x70, 0x61, 0x72, 0x73, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x00, 0x52, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x04, 0x70, - 0x6c, 0x61, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x12, 0x31, 0x0a, 0x05, 0x61, - 0x70, 0x70, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x34, - 0x0a, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x06, 0x63, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xd1, 0x01, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x03, 0x6c, 0x6f, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x6f, 0x67, 0x48, 0x00, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x12, - 0x32, 0x0a, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, - 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x70, 0x61, - 0x72, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, - 0x50, 0x6c, 0x61, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x04, - 0x70, 0x6c, 0x61, 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, - 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, - 0x00, 0x52, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x2a, 0x3f, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, - 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, - 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, - 0x57, 0x41, 0x52, 0x4e, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, - 0x04, 0x2a, 0x3b, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x10, 0x00, 0x12, - 0x11, 0x0a, 0x0d, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x10, 0x02, 0x2a, 0x37, - 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x52, 0x54, 0x10, 0x00, - 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x53, 0x54, 0x52, 0x4f, 0x59, 0x10, 0x02, 0x32, 0x49, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x64, 0x6b, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x72, + 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, + 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x15, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x73, 0x22, 0xf8, 0x01, 0x0a, 0x0c, 0x50, 0x6c, 0x61, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3a, + 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, + 0x2e, 0x52, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x17, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x22, 0x41, 0x0a, + 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x8f, 0x02, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x33, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x63, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x61, 0x0a, 0x17, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, + 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x15, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x73, 0x22, 0x0f, 0x0a, 0x0d, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x8c, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, + 0x0a, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x05, 0x70, 0x61, 0x72, 0x73, + 0x65, 0x12, 0x2e, 0x0a, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x6c, + 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6c, 0x61, + 0x6e, 0x12, 0x31, 0x0a, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x41, + 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x05, 0x61, + 0x70, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x65, 0x72, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0xd1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x24, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x6f, 0x67, 0x48, 0x00, + 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x12, 0x32, 0x0a, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x65, 0x72, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x48, 0x00, 0x52, 0x05, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x70, 0x6c, 0x61, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x61, 0x70, + 0x70, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x06, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x3f, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, + 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x2a, 0x3b, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x53, 0x68, + 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x57, + 0x4e, 0x45, 0x52, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, + 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x55, 0x42, 0x4c, + 0x49, 0x43, 0x10, 0x02, 0x2a, 0x37, 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x10, 0x02, 0x32, 0x49, 0x0a, + 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x07, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, + 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x72, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/provisionersdk/proto/provisioner.proto b/provisionersdk/proto/provisioner.proto index fb683293a4..1ee779aa76 100644 --- a/provisionersdk/proto/provisioner.proto +++ b/provisionersdk/proto/provisioner.proto @@ -225,6 +225,7 @@ message Metadata { string workspace_owner_session_token = 11; string template_id = 12; string workspace_owner_name = 13; + repeated string workspace_owner_groups = 14; } // Config represents execution configuration shared by all subsequent requests in the Session diff --git a/provisionersdk/scripts/bootstrap_windows.ps1 b/provisionersdk/scripts/bootstrap_windows.ps1 index aa3089f90d..e51dd9415a 100644 --- a/provisionersdk/scripts/bootstrap_windows.ps1 +++ b/provisionersdk/scripts/bootstrap_windows.ps1 @@ -14,20 +14,20 @@ while ($true) { # executing shell to be named "sshd", otherwise it fails. See: # https://github.com/microsoft/vscode-remote-release/issues/5699 $BINARY_URL="${ACCESS_URL}/bin/coder-windows-${ARCH}.exe" - Write-Output "Fetching coder agent from ${BINARY_URL}" + Write-Output "$(Get-Date) Fetching coder agent from ${BINARY_URL}" Invoke-WebRequest -Uri "${BINARY_URL}" -OutFile $env:TEMP\sshd.exe break } catch { - Write-Output "error: unhandled exception fetching coder agent:" + Write-Output "$(Get-Date) error: unhandled exception fetching coder agent:" Write-Output $_ - Write-Output "trying again in 30 seconds..." + Write-Output "$(Get-Date) trying again in 30 seconds..." Start-Sleep -Seconds 30 } } # Check if running in a Windows container if (-not (Get-Command 'Set-MpPreference' -ErrorAction SilentlyContinue)) { - Write-Output "Set-MpPreference not available, skipping..." + Write-Output "$(Get-Date) Set-MpPreference not available, skipping..." } else { Set-MpPreference -DisableRealtimeMonitoring $true -ExclusionPath $env:TEMP\sshd.exe } diff --git a/site/e2e/README.md b/site/e2e/README.md index 698d470e34..315de9dd47 100644 --- a/site/e2e/README.md +++ b/site/e2e/README.md @@ -20,3 +20,37 @@ pnpm playwright:test # Run a specific test (`-g` stands for grep. It accepts regex). pnpm playwright:test -g '' ``` + +# Using nix + +If this breaks, it is likely because the flake chromium version and playwright +are no longer compatible. To fix this, update the flake to get the latest +chromium version, and adjust the playwright version in the package.json. + +You can see the playwright version here: +https://search.nixos.org/packages?channel=unstable&show=playwright-driver&from=0&size=50&sort=relevance&type=packages&query=playwright-driver + +```shell +# Optionally add '--command zsh' to choose your shell. +nix develop +cd site +pnpm install +pnpm build +pnpm playwright:test +``` + +# Enterprise tests + +Enterprise tests require a license key to run. + +```shell +export CODER_E2E_ENTERPRISE_LICENSE= +``` + +# Debugging tests + +To debug a test, it is more helpful to run it in `ui` mode. + +``` +pnpm playwright:test-ui +``` diff --git a/site/e2e/api.ts b/site/e2e/api.ts new file mode 100644 index 0000000000..9bb8d62719 --- /dev/null +++ b/site/e2e/api.ts @@ -0,0 +1,49 @@ +import type { Page } from "@playwright/test"; +import * as API from "api/api"; +import { coderPort } from "./constants"; +import { findSessionToken, randomName } from "./helpers"; + +let currentOrgId: string; + +export const setupApiCalls = async (page: Page) => { + try { + const token = await findSessionToken(page); + API.setSessionToken(token); + } catch { + // If this fails, we have an unauthenticated client. + } + API.setHost(`http://127.0.0.1:${coderPort}`); +}; + +export const getCurrentOrgId = async (): Promise => { + if (currentOrgId) { + return currentOrgId; + } + const currentUser = await API.getAuthenticatedUser(); + currentOrgId = currentUser.organization_ids[0]; + return currentOrgId; +}; + +export const createUser = async (orgId: string) => { + const name = randomName(); + const user = await API.createUser({ + email: `${name}@coder.com`, + username: name, + password: "s3cure&password!", + login_type: "password", + disable_login: false, + organization_id: orgId, + }); + return user; +}; + +export const createGroup = async (orgId: string) => { + const name = randomName(); + const group = await API.createGroup(orgId, { + name, + display_name: `Display ${name}`, + avatar_url: "/emojis/1f60d.png", + quota_allowance: 0, + }); + return group; +}; diff --git a/site/e2e/constants.ts b/site/e2e/constants.ts index 8b4fbe50d5..6998968977 100644 --- a/site/e2e/constants.ts +++ b/site/e2e/constants.ts @@ -33,4 +33,11 @@ export const gitAuth = { installationsPath: "/installations", }; +export const requireEnterpriseTests = Boolean( + process.env.CODER_E2E_REQUIRE_ENTERPRISE_TESTS, +); export const enterpriseLicense = process.env.CODER_E2E_ENTERPRISE_LICENSE ?? ""; + +// Fake experiments to verify that site presents them as enabled. +export const e2eFakeExperiment1 = "e2e-fake-experiment-1"; +export const e2eFakeExperiment2 = "e2e-fake-experiment-2"; diff --git a/site/e2e/expectUrl.ts b/site/e2e/expectUrl.ts new file mode 100644 index 0000000000..eb3777f577 --- /dev/null +++ b/site/e2e/expectUrl.ts @@ -0,0 +1,29 @@ +import { expect, type Page } from "@playwright/test"; + +type PollingOptions = { timeout?: number; intervals?: number[] }; + +export const expectUrl = expect.extend({ + /** + * toHavePathName is an alternative to `toHaveURL` that won't fail if the URL contains query parameters. + */ + async toHavePathName(page: Page, expected: string, options?: PollingOptions) { + let actual: string = new URL(page.url()).pathname; + let pass: boolean; + try { + await expect + .poll(() => (actual = new URL(page.url()).pathname), options) + .toBe(expected); + pass = true; + } catch { + pass = false; + } + + return { + name: "toHavePathName", + pass, + actual, + expected, + message: () => "The page does not have the expected URL pathname.", + }; + }, +}); diff --git a/site/e2e/global.setup.ts b/site/e2e/global.setup.ts index 9a9d4d026f..8c8526af9a 100644 --- a/site/e2e/global.setup.ts +++ b/site/e2e/global.setup.ts @@ -1,10 +1,20 @@ -import { test, expect } from "@playwright/test"; +import { expect, test } from "@playwright/test"; +import { hasFirstUser } from "api/api"; import { Language } from "pages/CreateUserPage/CreateUserForm"; +import { setupApiCalls } from "./api"; import * as constants from "./constants"; +import { expectUrl } from "./expectUrl"; import { storageState } from "./playwright.config"; test("setup deployment", async ({ page }) => { await page.goto("/", { waitUntil: "domcontentloaded" }); + await setupApiCalls(page); + const exists = await hasFirstUser(); + // First user already exists, abort early. All tests execute this as a dependency, + // if you run multiple tests in the UI, this will fail unless we check this. + if (exists) { + return; + } // Setup first user await page.getByLabel(Language.usernameLabel).fill(constants.username); @@ -12,19 +22,26 @@ test("setup deployment", async ({ page }) => { await page.getByLabel(Language.passwordLabel).fill(constants.password); await page.getByTestId("create").click(); - await expect(page).toHaveURL(/\/workspaces.*/); + await expectUrl(page).toHavePathName("/workspaces"); await page.context().storageState({ path: storageState }); await page.getByTestId("button-select-template").isVisible(); // Setup license - if (constants.enterpriseLicense) { + if (constants.requireEnterpriseTests || constants.enterpriseLicense) { + // Make sure that we have something that looks like a real license + expect(constants.enterpriseLicense).toBeTruthy(); + expect(constants.enterpriseLicense.length).toBeGreaterThan(92); // the signature alone should be this long + expect(constants.enterpriseLicense.split(".").length).toBe(3); // otherwise it's invalid + await page.goto("/deployment/licenses", { waitUntil: "domcontentloaded" }); await page.getByText("Add a license").click(); await page.getByRole("textbox").fill(constants.enterpriseLicense); await page.getByText("Upload License").click(); - await page.getByText("You have successfully added a license").isVisible(); + await expect( + page.getByText("You have successfully added a license"), + ).toBeVisible(); } }); diff --git a/site/e2e/helpers.ts b/site/e2e/helpers.ts index 519a8968f8..05ce694a97 100644 --- a/site/e2e/helpers.ts +++ b/site/e2e/helpers.ts @@ -18,7 +18,9 @@ import { coderPort, enterpriseLicense, prometheusPort, + requireEnterpriseTests, } from "./constants"; +import { expectUrl } from "./expectUrl"; import { Agent, type App, @@ -33,6 +35,10 @@ import { // requiresEnterpriseLicense will skip the test if we're not running with an enterprise license export function requiresEnterpriseLicense() { + if (requireEnterpriseTests) { + return; + } + test.skip(!enterpriseLicense); } @@ -44,10 +50,10 @@ export const createWorkspace = async ( richParameters: RichParameter[] = [], buildParameters: WorkspaceBuildParameter[] = [], ): Promise => { - await page.goto("/templates/" + templateName + "/workspace", { + await page.goto(`/templates/${templateName}/workspace`, { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL("/templates/" + templateName + "/workspace"); + await expectUrl(page).toHavePathName(`/templates/${templateName}/workspace`); const name = randomName(); await page.getByLabel("name").fill(name); @@ -55,7 +61,7 @@ export const createWorkspace = async ( await fillParameters(page, richParameters, buildParameters); await page.getByTestId("form-submit").click(); - await expect(page).toHaveURL("/@admin/" + name); + await expectUrl(page).toHavePathName("/@admin/" + name); await page.waitForSelector("*[data-testid='build-status'] >> text=Running", { state: "visible", @@ -72,8 +78,8 @@ export const verifyParameters = async ( await page.goto("/@admin/" + workspaceName + "/settings/parameters", { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL( - "/@admin/" + workspaceName + "/settings/parameters", + await expectUrl(page).toHavePathName( + `/@admin/${workspaceName}/settings/parameters`, ); for (const buildParameter of expectedBuildParameters) { @@ -134,7 +140,7 @@ export const createTemplate = async ( }); await page.goto("/templates/new", { waitUntil: "domcontentloaded" }); - await expect(page).toHaveURL("/templates/new"); + await expectUrl(page).toHavePathName("/templates/new"); await page.getByTestId("file-upload").setInputFiles({ buffer: await createTemplateVersionTar(responses), @@ -144,7 +150,7 @@ export const createTemplate = async ( const name = randomName(); await page.getByLabel("Name *").fill(name); await page.getByTestId("form-submit").click(); - await expect(page).toHaveURL(`/templates/${name}/files`, { + await expectUrl(page).toHavePathName(`/templates/${name}/files`, { timeout: 30000, }); return name; @@ -154,7 +160,7 @@ export const createTemplate = async ( // random name. export const createGroup = async (page: Page): Promise => { await page.goto("/groups/create", { waitUntil: "domcontentloaded" }); - await expect(page).toHaveURL("/groups/create"); + await expectUrl(page).toHavePathName("/groups/create"); const name = randomName(); await page.getByLabel("Name", { exact: true }).fill(name); @@ -215,7 +221,7 @@ export const stopWorkspace = async (page: Page, workspaceName: string) => { await page.goto("/@admin/" + workspaceName, { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL("/@admin/" + workspaceName); + await expectUrl(page).toHavePathName(`/@admin/${workspaceName}`); await page.getByTestId("workspace-stop-button").click(); @@ -234,7 +240,7 @@ export const buildWorkspaceWithParameters = async ( await page.goto("/@admin/" + workspaceName, { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL("/@admin/" + workspaceName); + await expectUrl(page).toHavePathName(`/@admin/${workspaceName}`); await page.getByTestId("build-parameters-button").click(); @@ -565,7 +571,7 @@ const createTemplateVersionTar = async ( ); }; -const randomName = () => { +export const randomName = () => { return randomUUID().slice(0, 8); }; @@ -603,7 +609,7 @@ export const createServer = async ( return e; }; -const findSessionToken = async (page: Page): Promise => { +export const findSessionToken = async (page: Page): Promise => { const cookies = await page.context().cookies(); const sessionCookie = cookies.find((c) => c.name === "coder_session_token"); if (!sessionCookie) { @@ -746,7 +752,7 @@ export const updateTemplateSettings = async ( await page.goto(`/templates/${templateName}/settings`, { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL(`/templates/${templateName}/settings`); + await expectUrl(page).toHavePathName(`/templates/${templateName}/settings`); for (const [key, value] of Object.entries(templateSettingValues)) { // Skip max_port_share_level for now since the frontend is not yet able to handle it @@ -760,7 +766,7 @@ export const updateTemplateSettings = async ( await page.getByTestId("form-submit").click(); const name = templateSettingValues.name ?? templateName; - await expect(page).toHaveURL(`/templates/${name}`); + await expectUrl(page).toHavePathName(`/templates/${name}`); }; export const updateWorkspace = async ( @@ -772,7 +778,7 @@ export const updateWorkspace = async ( await page.goto("/@admin/" + workspaceName, { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL("/@admin/" + workspaceName); + await expectUrl(page).toHavePathName(`/@admin/${workspaceName}`); await page.getByTestId("workspace-update-button").click(); await page.getByTestId("confirm-button").click(); @@ -794,8 +800,8 @@ export const updateWorkspaceParameters = async ( await page.goto("/@admin/" + workspaceName + "/settings/parameters", { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL( - "/@admin/" + workspaceName + "/settings/parameters", + await expectUrl(page).toHavePathName( + `/@admin/${workspaceName}/settings/parameters`, ); await fillParameters(page, richParameters, buildParameters); @@ -820,7 +826,9 @@ export async function openTerminalWindow( // Specify that the shell should be `bash`, to prevent inheriting a shell that // isn't POSIX compatible, such as Fish. const commandQuery = `?command=${encodeURIComponent("/usr/bin/env bash")}`; - await expect(terminal).toHaveURL(`/@admin/${workspaceName}.dev/terminal`); + await expectUrl(terminal).toHavePathName( + `/@admin/${workspaceName}.dev/terminal`, + ); await terminal.goto(`/@admin/${workspaceName}.dev/terminal${commandQuery}`); return terminal; diff --git a/site/e2e/playwright.config.ts b/site/e2e/playwright.config.ts index df38e57243..2fe84b17a2 100644 --- a/site/e2e/playwright.config.ts +++ b/site/e2e/playwright.config.ts @@ -1,6 +1,13 @@ import { defineConfig } from "@playwright/test"; import * as path from "path"; -import { coderMain, coderPort, coderdPProfPort, gitAuth } from "./constants"; +import { + coderMain, + coderPort, + coderdPProfPort, + e2eFakeExperiment1, + e2eFakeExperiment2, + gitAuth, +} from "./constants"; export const wsEndpoint = process.env.CODER_E2E_WS_ENDPOINT; @@ -22,7 +29,7 @@ export default defineConfig({ testMatch: /.*\.spec\.ts/, dependencies: ["testsSetup"], use: { storageState }, - timeout: 20_000, + timeout: 50_000, }, ], reporter: [["./reporter.ts"]], @@ -43,19 +50,25 @@ export default defineConfig({ }, webServer: { url: `http://localhost:${coderPort}/api/v2/deployment/config`, - command: - `go run -tags embed ${coderMain} server ` + - `--global-config $(mktemp -d -t e2e-XXXXXXXXXX) ` + - `--access-url=http://localhost:${coderPort} ` + - `--http-address=localhost:${coderPort} ` + - `--in-memory --telemetry=false ` + - `--dangerous-disable-rate-limits ` + - `--provisioner-daemons 10 ` + - `--provisioner-daemons-echo ` + - `--web-terminal-renderer=dom ` + - `--pprof-enable`, + command: [ + `go run -tags embed ${coderMain} server`, + "--global-config $(mktemp -d -t e2e-XXXXXXXXXX)", + `--access-url=http://localhost:${coderPort}`, + `--http-address=localhost:${coderPort}`, + "--in-memory", + "--telemetry=false", + "--dangerous-disable-rate-limits", + "--provisioner-daemons 10", + "--provisioner-daemons-echo", + "--web-terminal-renderer=dom", + "--pprof-enable", + ] + .filter(Boolean) + .join(" "), env: { ...process.env, + // Otherwise, the runner fails on Mac with: could not determine kind of name for C.uuid_string_t + CGO_ENABLED: "0", // This is the test provider for git auth with devices! CODER_GITAUTH_0_ID: gitAuth.deviceProvider, @@ -97,6 +110,7 @@ export default defineConfig({ gitAuth.validatePath, ), CODER_PPROF_ADDRESS: "127.0.0.1:" + coderdPProfPort, + CODER_EXPERIMENTS: e2eFakeExperiment1 + "," + e2eFakeExperiment2, }, reuseExistingServer: false, }, diff --git a/site/e2e/provisionerGenerated.ts b/site/e2e/provisionerGenerated.ts index 962544ba2c..1ba6f6f64a 100644 --- a/site/e2e/provisionerGenerated.ts +++ b/site/e2e/provisionerGenerated.ts @@ -230,6 +230,7 @@ export interface Metadata { workspaceOwnerSessionToken: string; templateId: string; workspaceOwnerName: string; + workspaceOwnerGroups: string[]; } /** Config represents execution configuration shared by all subsequent requests in the Session */ @@ -832,6 +833,9 @@ export const Metadata = { if (message.workspaceOwnerName !== "") { writer.uint32(106).string(message.workspaceOwnerName); } + for (const v of message.workspaceOwnerGroups) { + writer.uint32(114).string(v!); + } return writer; }, }; diff --git a/site/e2e/reporter.ts b/site/e2e/reporter.ts index 9b4eaabd5b..fe214d8aed 100644 --- a/site/e2e/reporter.ts +++ b/site/e2e/reporter.ts @@ -11,12 +11,13 @@ import type { import axios from "axios"; import * as fs from "fs/promises"; import type { Writable } from "stream"; -import { coderdPProfPort } from "./constants"; +import { coderdPProfPort, enterpriseLicense } from "./constants"; class CoderReporter implements Reporter { config: FullConfig | null = null; testOutput = new Map>(); passedCount = 0; + skippedCount = 0; failedTests: TestCase[] = []; timedOutTests: TestCase[] = []; @@ -31,45 +32,56 @@ class CoderReporter implements Reporter { } onStdOut(chunk: string, test?: TestCase, _?: TestResult): void { - for (const line of filteredServerLogLines(chunk)) { - console.log(`[stdout] ${line}`); - } + // If there's no associated test, just print it now if (!test) { + for (const line of logLines(chunk)) { + console.log(`[stdout] ${line}`); + } return; } + // Will be printed if the test fails this.testOutput.get(test.id)!.push([process.stdout, chunk]); } onStdErr(chunk: string, test?: TestCase, _?: TestResult): void { - for (const line of filteredServerLogLines(chunk)) { - console.error(`[stderr] ${line}`); - } + // If there's no associated test, just print it now if (!test) { + for (const line of logLines(chunk)) { + console.error(`[stderr] ${line}`); + } return; } + // Will be printed if the test fails this.testOutput.get(test.id)!.push([process.stderr, chunk]); } async onTestEnd(test: TestCase, result: TestResult) { - console.log(`==> Finished test ${test.title}: ${result.status}`); + try { + if (test.expectedStatus === "skipped") { + console.log(`==> Skipping test ${test.title}`); + this.skippedCount++; + return; + } - if (result.status === "passed") { - this.passedCount++; - } + console.log(`==> Finished test ${test.title}: ${result.status}`); - if (result.status === "failed") { - this.failedTests.push(test); - } + if (result.status === "passed") { + this.passedCount++; + return; + } - if (result.status === "timedOut") { - this.timedOutTests.push(test); - } + if (result.status === "failed") { + this.failedTests.push(test); + } - const fsTestTitle = test.title.replaceAll(" ", "-"); - const outputFile = `test-results/debug-pprof-goroutine-${fsTestTitle}.txt`; - await exportDebugPprof(outputFile); + if (result.status === "timedOut") { + this.timedOutTests.push(test); + } + + const fsTestTitle = test.title.replaceAll(" ", "-"); + const outputFile = `test-results/debug-pprof-goroutine-${fsTestTitle}.txt`; + await exportDebugPprof(outputFile); - if (result.status !== "passed") { console.log(`Data from pprof has been saved to ${outputFile}`); console.log("==> Output"); const output = this.testOutput.get(test.id)!; @@ -90,13 +102,22 @@ class CoderReporter implements Reporter { console.log(attachment); } } + } finally { + this.testOutput.delete(test.id); } - this.testOutput.delete(test.id); } onEnd(result: FullResult) { console.log(`==> Tests ${result.status}`); + if (!enterpriseLicense) { + console.log( + "==> Enterprise tests were skipped, because no license was provided", + ); + } console.log(`${this.passedCount} passed`); + if (this.skippedCount > 0) { + console.log(`${this.skippedCount} skipped`); + } if (this.failedTests.length > 0) { console.log(`${this.failedTests.length} failed`); for (const test of this.failedTests) { @@ -112,11 +133,7 @@ class CoderReporter implements Reporter { } } -const shouldPrintLine = (line: string) => - [" error=EOF", "coderd: audit_log"].every((noise) => !line.includes(noise)); - -const filteredServerLogLines = (chunk: string): string[] => - chunk.trimEnd().split("\n").filter(shouldPrintLine); +const logLines = (chunk: string): string[] => chunk.trimEnd().split("\n"); const exportDebugPprof = async (outputFile: string) => { const response = await axios.get( diff --git a/site/e2e/tests/deployment/appearance.spec.ts b/site/e2e/tests/deployment/appearance.spec.ts new file mode 100644 index 0000000000..0fec1a7d75 --- /dev/null +++ b/site/e2e/tests/deployment/appearance.spec.ts @@ -0,0 +1,82 @@ +import { chromium, expect, test } from "@playwright/test"; +import { expectUrl } from "../../expectUrl"; +import { randomName, requiresEnterpriseLicense } from "../../helpers"; + +test("set application name", async ({ page }) => { + requiresEnterpriseLicense(); + + await page.goto("/deployment/appearance", { waitUntil: "domcontentloaded" }); + + const applicationName = randomName(); + + // Fill out the form + const form = page.locator("form", { hasText: "Application name" }); + await form + .getByLabel("Application name", { exact: true }) + .fill(applicationName); + await form.getByRole("button", { name: "Submit" }).click(); + + // Open a new session without cookies to see the login page + const browser = await chromium.launch(); + const incognitoContext = await browser.newContext(); + await incognitoContext.clearCookies(); + const incognitoPage = await incognitoContext.newPage(); + await incognitoPage.goto("/", { waitUntil: "domcontentloaded" }); + + // Verify the application name + const name = incognitoPage.locator("h1", { hasText: applicationName }); + await expect(name).toBeVisible(); + + // Shut down browser + await incognitoPage.close(); + await browser.close(); +}); + +test("set application logo", async ({ page }) => { + requiresEnterpriseLicense(); + + await page.goto("/deployment/appearance", { waitUntil: "domcontentloaded" }); + + const imageLink = "/icon/azure.png"; + + // Fill out the form + const form = page.locator("form", { hasText: "Logo URL" }); + await form.getByLabel("Logo URL", { exact: true }).fill(imageLink); + await form.getByRole("button", { name: "Submit" }).click(); + + // Open a new session without cookies to see the login page + const browser = await chromium.launch(); + const incognitoContext = await browser.newContext(); + await incognitoContext.clearCookies(); + const incognitoPage = await incognitoContext.newPage(); + await incognitoPage.goto("/", { waitUntil: "domcontentloaded" }); + + // Verify banner + const logo = incognitoPage.locator("img"); + await expect(logo).toHaveAttribute("src", imageLink); + + // Shut down browser + await incognitoPage.close(); + await browser.close(); +}); + +test("set service banner", async ({ page }) => { + requiresEnterpriseLicense(); + + await page.goto("/deployment/appearance", { waitUntil: "domcontentloaded" }); + + const message = "Mary has a little lamb."; + + // Fill out the form + const form = page.locator("form", { hasText: "Service Banner" }); + await form.getByLabel("Enabled", { exact: true }).check(); + await form.getByLabel("Message", { exact: true }).fill(message); + await form.getByRole("button", { name: "Submit" }).click(); + + // Verify service banner + await page.goto("/workspaces", { waitUntil: "domcontentloaded" }); + await expectUrl(page).toHavePathName("/workspaces"); + + const bar = page.locator("div.service-banner", { hasText: message }); + await expect(bar).toBeVisible(); +}); diff --git a/site/e2e/tests/deployment/general.spec.ts b/site/e2e/tests/deployment/general.spec.ts new file mode 100644 index 0000000000..de334a95b0 --- /dev/null +++ b/site/e2e/tests/deployment/general.spec.ts @@ -0,0 +1,39 @@ +import { expect, test } from "@playwright/test"; +import * as API from "api/api"; +import { setupApiCalls } from "../../api"; +import { e2eFakeExperiment1, e2eFakeExperiment2 } from "../../constants"; + +test("experiments", async ({ page }) => { + await setupApiCalls(page); + + // Load experiments from backend API + const availableExperiments = await API.getAvailableExperiments(); + + // Verify if the site lists the same experiments + await page.goto("/deployment/general", { waitUntil: "networkidle" }); + + const experimentsLocator = page.locator( + "div.options-table tr.option-experiments ul.option-array", + ); + await expect(experimentsLocator).toBeVisible(); + + // Firstly, check if all enabled experiments are listed + expect( + experimentsLocator.locator( + `li.option-array-item-${e2eFakeExperiment1}.option-enabled`, + ), + ).toBeVisible; + expect( + experimentsLocator.locator( + `li.option-array-item-${e2eFakeExperiment2}.option-enabled`, + ), + ).toBeVisible; + + // Secondly, check if available experiments are listed + for (const experiment of availableExperiments.safe) { + const experimentLocator = experimentsLocator.locator( + `li.option-array-item-${experiment}`, + ); + await expect(experimentLocator).toBeVisible(); + } +}); diff --git a/site/e2e/tests/deployment/licenses.spec.ts b/site/e2e/tests/deployment/licenses.spec.ts new file mode 100644 index 0000000000..89546bbec8 --- /dev/null +++ b/site/e2e/tests/deployment/licenses.spec.ts @@ -0,0 +1,30 @@ +import { expect, test } from "@playwright/test"; +import { requiresEnterpriseLicense } from "../../helpers"; + +test("license was added successfully", async ({ page }) => { + requiresEnterpriseLicense(); + + await page.goto("/deployment/licenses", { waitUntil: "domcontentloaded" }); + const firstLicense = page.locator(".licenses > .license-card", { + hasText: "#1", + }); + await expect(firstLicense).toBeVisible(); + + // Trial vs. Enterprise? + const accountType = firstLicense.locator(".account-type"); + await expect(accountType).toHaveText("Enterprise"); + + // User limit 1/1 + const userLimit = firstLicense.locator(".user-limit"); + await expect(userLimit).toHaveText("1 / 1"); + + // License should not be expired yet + const licenseExpires = firstLicense.locator(".license-expires"); + const licenseExpiresDate = new Date(await licenseExpires.innerText()); + const now = new Date(); + expect(licenseExpiresDate.getTime()).toBeGreaterThan(now.getTime()); + + // "Remove" button should be visible + const removeButton = firstLicense.locator(".remove-button"); + await expect(removeButton).toBeVisible(); +}); diff --git a/site/e2e/tests/groups/addMembers.spec.ts b/site/e2e/tests/groups/addMembers.spec.ts new file mode 100644 index 0000000000..f9532733d8 --- /dev/null +++ b/site/e2e/tests/groups/addMembers.spec.ts @@ -0,0 +1,34 @@ +import { test, expect } from "@playwright/test"; +import { + createGroup, + createUser, + getCurrentOrgId, + setupApiCalls, +} from "../../api"; +import { requiresEnterpriseLicense } from "../../helpers"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +test("add members", async ({ page, baseURL }) => { + requiresEnterpriseLicense(); + await setupApiCalls(page); + const orgId = await getCurrentOrgId(); + const group = await createGroup(orgId); + const numberOfMembers = 3; + const users = await Promise.all( + Array.from({ length: numberOfMembers }, () => createUser(orgId)), + ); + + await page.goto(`${baseURL}/groups/${group.id}`, { + waitUntil: "domcontentloaded", + }); + await expect(page).toHaveTitle(`${group.display_name} - Coder`); + + for (const user of users) { + await page.getByPlaceholder("User email or username").fill(user.username); + await page.getByRole("option", { name: user.email }).click(); + await page.getByRole("button", { name: "Add user" }).click(); + await expect(page.getByRole("row", { name: user.username })).toBeVisible(); + } +}); diff --git a/site/e2e/tests/groups/addUsersToDefaultGroup.spec.ts b/site/e2e/tests/groups/addUsersToDefaultGroup.spec.ts new file mode 100644 index 0000000000..b5767026c0 --- /dev/null +++ b/site/e2e/tests/groups/addUsersToDefaultGroup.spec.ts @@ -0,0 +1,32 @@ +import { test, expect } from "@playwright/test"; +import { createUser, getCurrentOrgId, setupApiCalls } from "../../api"; +import { requiresEnterpriseLicense } from "../../helpers"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +const DEFAULT_GROUP_NAME = "Everyone"; + +test(`Every user should be automatically added to the default '${DEFAULT_GROUP_NAME}' group upon creation`, async ({ + page, + baseURL, +}) => { + requiresEnterpriseLicense(); + await setupApiCalls(page); + const orgId = await getCurrentOrgId(); + const numberOfMembers = 3; + const users = await Promise.all( + Array.from({ length: numberOfMembers }, () => createUser(orgId)), + ); + + await page.goto(`${baseURL}/groups`, { waitUntil: "domcontentloaded" }); + await expect(page).toHaveTitle("Groups - Coder"); + + const groupRow = page.getByRole("row", { name: DEFAULT_GROUP_NAME }); + await groupRow.click(); + await expect(page).toHaveTitle(`${DEFAULT_GROUP_NAME} - Coder`); + + for (const user of users) { + await expect(page.getByRole("row", { name: user.username })).toBeVisible(); + } +}); diff --git a/site/e2e/tests/groups/createGroup.spec.ts b/site/e2e/tests/groups/createGroup.spec.ts new file mode 100644 index 0000000000..9542f4ea13 --- /dev/null +++ b/site/e2e/tests/groups/createGroup.spec.ts @@ -0,0 +1,30 @@ +import { test, expect } from "@playwright/test"; +import { randomName, requiresEnterpriseLicense } from "../../helpers"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +test("create group", async ({ page, baseURL }) => { + requiresEnterpriseLicense(); + await page.goto(`${baseURL}/groups`, { waitUntil: "domcontentloaded" }); + await expect(page).toHaveTitle("Groups - Coder"); + + await page.getByText("Create group").click(); + await expect(page).toHaveTitle("Create Group - Coder"); + + const name = randomName(); + const groupValues = { + name: name, + displayName: `Display Name for ${name}`, + avatarURL: "/emojis/1f60d.png", + }; + + await page.getByLabel("Name", { exact: true }).fill(groupValues.name); + await page.getByLabel("Display Name").fill(groupValues.displayName); + await page.getByLabel("Avatar URL").fill(groupValues.avatarURL); + await page.getByRole("button", { name: "Submit" }).click(); + + await expect(page).toHaveTitle(`${groupValues.displayName} - Coder`); + await expect(page.getByText(groupValues.displayName)).toBeVisible(); + await expect(page.getByText("No members yet")).toBeVisible(); +}); diff --git a/site/e2e/tests/groups/navigateToGroupPage.spec.ts b/site/e2e/tests/groups/navigateToGroupPage.spec.ts new file mode 100644 index 0000000000..44e2224df7 --- /dev/null +++ b/site/e2e/tests/groups/navigateToGroupPage.spec.ts @@ -0,0 +1,23 @@ +import { test, expect } from "@playwright/test"; +import { createGroup, getCurrentOrgId, setupApiCalls } from "../../api"; +import { requiresEnterpriseLicense } from "../../helpers"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +test("navigate to group page", async ({ page, baseURL }) => { + requiresEnterpriseLicense(); + await setupApiCalls(page); + const orgId = await getCurrentOrgId(); + const group = await createGroup(orgId); + + await page.goto(`${baseURL}/users`, { waitUntil: "domcontentloaded" }); + await expect(page).toHaveTitle("Users - Coder"); + + await page.getByRole("link", { name: "Groups" }).click(); + await expect(page).toHaveTitle("Groups - Coder"); + + const groupRow = page.getByRole("row", { name: group.display_name }); + await groupRow.click(); + await expect(page).toHaveTitle(`${group.display_name} - Coder`); +}); diff --git a/site/e2e/tests/groups/removeGroup.spec.ts b/site/e2e/tests/groups/removeGroup.spec.ts new file mode 100644 index 0000000000..9011ecbb71 --- /dev/null +++ b/site/e2e/tests/groups/removeGroup.spec.ts @@ -0,0 +1,26 @@ +import { test, expect } from "@playwright/test"; +import { createGroup, getCurrentOrgId, setupApiCalls } from "../../api"; +import { requiresEnterpriseLicense } from "../../helpers"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +test("remove group", async ({ page, baseURL }) => { + requiresEnterpriseLicense(); + await setupApiCalls(page); + const orgId = await getCurrentOrgId(); + const group = await createGroup(orgId); + + await page.goto(`${baseURL}/groups/${group.id}`, { + waitUntil: "domcontentloaded", + }); + await expect(page).toHaveTitle(`${group.display_name} - Coder`); + + await page.getByRole("button", { name: "Delete" }).click(); + const dialog = page.getByTestId("dialog"); + await dialog.getByLabel("Name of the group to delete").fill(group.name); + await dialog.getByRole("button", { name: "Delete" }).click(); + await expect(page.getByText("Group deleted successfully.")).toBeVisible(); + + await expect(page).toHaveTitle("Groups - Coder"); +}); diff --git a/site/e2e/tests/groups/removeMember.spec.ts b/site/e2e/tests/groups/removeMember.spec.ts new file mode 100644 index 0000000000..716c86af84 --- /dev/null +++ b/site/e2e/tests/groups/removeMember.spec.ts @@ -0,0 +1,36 @@ +import { test, expect } from "@playwright/test"; +import * as API from "api/api"; +import { + createGroup, + createUser, + getCurrentOrgId, + setupApiCalls, +} from "../../api"; +import { requiresEnterpriseLicense } from "../../helpers"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +test("remove member", async ({ page, baseURL }) => { + requiresEnterpriseLicense(); + await setupApiCalls(page); + const orgId = await getCurrentOrgId(); + const [group, member] = await Promise.all([ + createGroup(orgId), + createUser(orgId), + ]); + await API.addMember(group.id, member.id); + + await page.goto(`${baseURL}/groups/${group.id}`, { + waitUntil: "domcontentloaded", + }); + await expect(page).toHaveTitle(`${group.display_name} - Coder`); + + const userRow = page.getByRole("row", { name: member.username }); + await userRow.getByRole("button", { name: "More options" }).click(); + + const menu = page.locator("#more-options"); + await menu.getByText("Remove").click({ timeout: 1_000 }); + + await expect(page.getByText("Member removed successfully.")).toBeVisible(); +}); diff --git a/site/e2e/tests/outdatedAgent.spec.ts b/site/e2e/tests/outdatedAgent.spec.ts index 24f1442f7d..56207e9dbc 100644 --- a/site/e2e/tests/outdatedAgent.spec.ts +++ b/site/e2e/tests/outdatedAgent.spec.ts @@ -17,6 +17,8 @@ const agentVersion = "v0.27.0"; test.beforeEach(async ({ page }) => await beforeCoderTest(page)); test("ssh with agent " + agentVersion, async ({ page }) => { + test.setTimeout(40_000); // This is a slow test, 20s may not be enough on Mac. + const token = randomUUID(); const template = await createTemplate(page, { apply: [ diff --git a/site/e2e/tests/updateTemplate.spec.ts b/site/e2e/tests/updateTemplate.spec.ts index 1159b9903f..4e967b2947 100644 --- a/site/e2e/tests/updateTemplate.spec.ts +++ b/site/e2e/tests/updateTemplate.spec.ts @@ -1,4 +1,5 @@ import { expect, test } from "@playwright/test"; +import { expectUrl } from "../expectUrl"; import { createGroup, createTemplate, @@ -25,7 +26,7 @@ test("add and remove a group", async ({ page }) => { await page.goto(`/templates/${templateName}/settings/permissions`, { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL( + await expectUrl(page).toHavePathName( `/templates/${templateName}/settings/permissions`, ); @@ -37,9 +38,14 @@ test("add and remove a group", async ({ page }) => { // Select the group from the list and add it await page.getByText(groupName).click(); await page.getByText("Add member").click(); - await expect( - page.locator(".MuiTable-root").getByText(groupName), - ).toBeVisible(); + const row = page.locator(".MuiTableRow-root", { hasText: groupName }); + await expect(row).toBeVisible(); + + // Now remove the group + await row.getByLabel("More options").click(); + await page.getByText("Remove").click(); + await expect(page.getByText("Group removed successfully!")).toBeVisible(); + await expect(row).not.toBeVisible(); }); test("require latest version", async ({ page }) => { @@ -50,7 +56,7 @@ test("require latest version", async ({ page }) => { await page.goto(`/templates/${templateName}/settings`, { waitUntil: "domcontentloaded", }); - await expect(page).toHaveURL(`/templates/${templateName}/settings`); + await expectUrl(page).toHavePathName(`/templates/${templateName}/settings`); let checkbox = await page.waitForSelector("#require_active_version"); await checkbox.click(); await page.getByTestId("form-submit").click(); diff --git a/site/e2e/tests/users/createUserWithPassword.spec.ts b/site/e2e/tests/users/createUserWithPassword.spec.ts new file mode 100644 index 0000000000..b8c95d35b3 --- /dev/null +++ b/site/e2e/tests/users/createUserWithPassword.spec.ts @@ -0,0 +1,35 @@ +import { test, expect } from "@playwright/test"; +import { randomName } from "../../helpers"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +test("create user with password", async ({ page, baseURL }) => { + await page.goto(`${baseURL}/users`, { waitUntil: "domcontentloaded" }); + await expect(page).toHaveTitle("Users - Coder"); + + await page.getByRole("button", { name: "Create user" }).click(); + await expect(page).toHaveTitle("Create User - Coder"); + + const name = randomName(); + const userValues = { + username: name, + email: `${name}@coder.com`, + loginType: "password", + password: "s3cure&password!", + }; + + await page.getByLabel("Username").fill(userValues.username); + await page.getByLabel("Email").fill(userValues.email); + await page.getByLabel("Login Type").click(); + await page.getByRole("option", { name: "Password", exact: false }).click(); + // Using input[name=password] due to the select element utilizing 'password' + // as the label for the currently active option. + const passwordField = page.locator("input[name=password]"); + await passwordField.fill(userValues.password); + await page.getByRole("button", { name: "Create user" }).click(); + await expect(page.getByText("Successfully created user.")).toBeVisible(); + + await expect(page).toHaveTitle("Users - Coder"); + await expect(page.locator("tr", { hasText: userValues.email })).toBeVisible(); +}); diff --git a/site/e2e/tests/users/removeUser.spec.ts b/site/e2e/tests/users/removeUser.spec.ts new file mode 100644 index 0000000000..cd09d13611 --- /dev/null +++ b/site/e2e/tests/users/removeUser.spec.ts @@ -0,0 +1,25 @@ +import { test, expect } from "@playwright/test"; +import { createUser, getCurrentOrgId, setupApiCalls } from "../../api"; +import { beforeCoderTest } from "../../hooks"; + +test.beforeEach(async ({ page }) => await beforeCoderTest(page)); + +test("remove user", async ({ page, baseURL }) => { + await setupApiCalls(page); + const orgId = await getCurrentOrgId(); + const user = await createUser(orgId); + + await page.goto(`${baseURL}/users`, { waitUntil: "domcontentloaded" }); + await expect(page).toHaveTitle("Users - Coder"); + + const userRow = page.getByRole("row", { name: user.email }); + await userRow.getByRole("button", { name: "More options" }).click(); + const menu = page.locator("#more-options"); + await menu.getByText("Delete").click(); + + const dialog = page.getByTestId("dialog"); + await dialog.getByLabel("Name of the user to delete").fill(user.username); + await dialog.getByRole("button", { name: "Delete" }).click(); + + await expect(page.getByText("Successfully deleted the user.")).toBeVisible(); +}); diff --git a/site/package.json b/site/package.json index 4d2ffd9fd6..24ba4d5262 100644 --- a/site/package.json +++ b/site/package.json @@ -16,6 +16,7 @@ "lint:types": "tsc -p .", "playwright:install": "playwright install --with-deps chromium", "playwright:test": "playwright test --config=e2e/playwright.config.ts", + "playwright:test-ui": "playwright test --config=e2e/playwright.config.ts --ui $([[ \"$CODER\" == \"true\" ]] && echo --ui-port=7500 --ui-host=0.0.0.0)", "gen:provisioner": "protoc --plugin=./node_modules/.bin/protoc-gen-ts_proto --ts_proto_out=./e2e/ --ts_proto_opt=outputJsonMethods=false,outputEncodeMethods=encode-no-creation,outputClientImpl=false,nestJs=false,outputPartialMethods=false,fileSuffix=Generated,suffix=hey -I ../provisionersdk/proto ../provisionersdk/proto/provisioner.proto && pnpm exec prettier --ignore-path '/dev/null' --cache --write './e2e/provisionerGenerated.ts'", "storybook": "STORYBOOK=true storybook dev -p 6006", "storybook:build": "storybook build", @@ -95,7 +96,7 @@ }, "devDependencies": { "@octokit/types": "12.3.0", - "@playwright/test": "1.42.1", + "@playwright/test": "1.40.1", "@storybook/addon-actions": "8.0.5", "@storybook/addon-essentials": "8.0.5", "@storybook/addon-interactions": "8.0.5", diff --git a/site/pnpm-lock.yaml b/site/pnpm-lock.yaml index 0f135505b2..b723b4cf25 100644 --- a/site/pnpm-lock.yaml +++ b/site/pnpm-lock.yaml @@ -204,8 +204,8 @@ devDependencies: specifier: 12.3.0 version: 12.3.0 '@playwright/test': - specifier: 1.42.1 - version: 1.42.1 + specifier: 1.40.1 + version: 1.40.1 '@storybook/addon-actions': specifier: 8.0.5 version: 8.0.5 @@ -3295,12 +3295,12 @@ packages: dev: true optional: true - /@playwright/test@1.42.1: - resolution: {integrity: sha512-Gq9rmS54mjBL/7/MvBaNOBwbfnh7beHvS6oS4srqXFcQHpQCV1+c8JXWE8VLPyRDhgS3H8x8A7hztqI9VnwrAQ==} + /@playwright/test@1.40.1: + resolution: {integrity: sha512-EaaawMTOeEItCRvfmkI9v6rBkF1svM8wjl/YPRrg2N2Wmp+4qJYkWtJsbew1szfKKDm6fPLy4YAanBhIlf9dWw==} engines: {node: '>=16'} hasBin: true dependencies: - playwright: 1.42.1 + playwright: 1.40.1 dev: true /@popperjs/core@2.11.8: @@ -10706,18 +10706,18 @@ packages: find-up: 5.0.0 dev: true - /playwright-core@1.42.1: - resolution: {integrity: sha512-mxz6zclokgrke9p1vtdy/COWBH+eOZgYUVVU34C73M+4j4HLlQJHtfcqiqqxpP0o8HhMkflvfbquLX5dg6wlfA==} + /playwright-core@1.40.1: + resolution: {integrity: sha512-+hkOycxPiV534c4HhpfX6yrlawqVUzITRKwHAmYfmsVreltEl6fAZJ3DPfLMOODw0H3s1Itd6MDCWmP1fl/QvQ==} engines: {node: '>=16'} hasBin: true dev: true - /playwright@1.42.1: - resolution: {integrity: sha512-PgwB03s2DZBcNRoW+1w9E+VkLBxweib6KTXM0M3tkiT4jVxKSi6PmVJ591J+0u10LUrgxB7dLRbiJqO5s2QPMg==} + /playwright@1.40.1: + resolution: {integrity: sha512-2eHI7IioIpQ0bS1Ovg/HszsN/XKNwEG1kbzSDDmADpclKc7CyqkHw7Mg2JCz/bbCxg25QUPcjksoMW7JcIFQmw==} engines: {node: '>=16'} hasBin: true dependencies: - playwright-core: 1.42.1 + playwright-core: 1.40.1 optionalDependencies: fsevents: 2.3.2 dev: true diff --git a/site/src/api/api.ts b/site/src/api/api.ts index 7048d9bca9..12c2a63b2c 100644 --- a/site/src/api/api.ts +++ b/site/src/api/api.ts @@ -72,6 +72,14 @@ if (token !== null && token.getAttribute("content") !== null) { } } +export const setSessionToken = (token: string) => { + axios.defaults.headers.common["Coder-Session-Token"] = token; +}; + +export const setHost = (host?: string) => { + axios.defaults.baseURL = host; +}; + const CONTENT_TYPE_JSON = { "Content-Type": "application/json", }; @@ -1139,7 +1147,6 @@ export const patchGroup = async ( export const addMember = async (groupId: string, userId: string) => { return patchGroup(groupId, { name: "", - display_name: "", add_users: [userId], remove_users: [], }); diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts index bdf744e104..be751559f2 100644 --- a/site/src/api/typesGenerated.ts +++ b/site/src/api/typesGenerated.ts @@ -427,13 +427,11 @@ export interface DeploymentValues { readonly rate_limit?: RateLimitConfig; readonly experiments?: string[]; readonly update_check?: boolean; - readonly max_token_lifetime?: number; readonly swagger?: SwaggerConfig; readonly logging?: LoggingConfig; readonly dangerous?: DangerousConfig; readonly disable_path_apps?: boolean; - readonly max_session_expiry?: number; - readonly disable_session_expiry_refresh?: boolean; + readonly session_lifetime?: SessionLifetime; readonly disable_password_auth?: boolean; readonly support?: SupportConfig; readonly external_auth?: ExternalAuthConfig[]; @@ -998,6 +996,13 @@ export interface SessionCountDeploymentStats { readonly reconnecting_pty: number; } +// From codersdk/deployment.go +export interface SessionLifetime { + readonly disable_expiry_refresh?: boolean; + readonly default_duration: number; + readonly max_token_lifetime?: number; +} + // From codersdk/deployment.go export interface SupportConfig { readonly links: LinkConfig[]; diff --git a/site/src/modules/dashboard/Navbar/NavbarView.stories.tsx b/site/src/modules/dashboard/Navbar/NavbarView.stories.tsx index fac1f344ce..cf5522c562 100644 --- a/site/src/modules/dashboard/Navbar/NavbarView.stories.tsx +++ b/site/src/modules/dashboard/Navbar/NavbarView.stories.tsx @@ -25,3 +25,9 @@ export const ForMember: Story = { canViewAllUsers: false, }, }; + +export const CustomLogo: Story = { + args: { + logo_url: "/icon/github.svg", + }, +}; diff --git a/site/src/modules/dashboard/Navbar/NavbarView.tsx b/site/src/modules/dashboard/Navbar/NavbarView.tsx index 2ad38dcf9f..558706eee6 100644 --- a/site/src/modules/dashboard/Navbar/NavbarView.tsx +++ b/site/src/modules/dashboard/Navbar/NavbarView.tsx @@ -13,6 +13,7 @@ import { type FC, type ReactNode, useRef, useState } from "react"; import { NavLink, useLocation, useNavigate } from "react-router-dom"; import type * as TypesGen from "api/typesGenerated"; import { Abbr } from "components/Abbr/Abbr"; +import { ExternalImage } from "components/ExternalImage/ExternalImage"; import { displayError } from "components/GlobalSnackbar/utils"; import { CoderIcon } from "components/Icons/CoderIcon"; import { Latency } from "components/Latency/Latency"; @@ -150,7 +151,7 @@ export const NavbarView: FC = ({
{logo_url ? ( - Custom Logo + ) : ( )} @@ -167,7 +168,7 @@ export const NavbarView: FC = ({ {logo_url ? ( - Custom Logo + ) : ( )} diff --git a/site/src/modules/dashboard/ServiceBanner/ServiceBannerView.tsx b/site/src/modules/dashboard/ServiceBanner/ServiceBannerView.tsx index c747285550..e907085cb2 100644 --- a/site/src/modules/dashboard/ServiceBanner/ServiceBannerView.tsx +++ b/site/src/modules/dashboard/ServiceBanner/ServiceBannerView.tsx @@ -16,7 +16,7 @@ export const ServiceBannerView: FC = ({ isPreview, }) => { return ( -
+
{isPreview && Preview}
= ({ agent.name, workspaceName, username, + share.protocol === "https", ); const label = share.port; return ( diff --git a/site/src/pages/CreateTokenPage/CreateTokenForm.tsx b/site/src/pages/CreateTokenPage/CreateTokenForm.tsx index d679e8f812..15af6174cb 100644 --- a/site/src/pages/CreateTokenPage/CreateTokenForm.tsx +++ b/site/src/pages/CreateTokenPage/CreateTokenForm.tsx @@ -116,6 +116,7 @@ export const CreateTokenForm: FC = ({ {lifetimeDays === "custom" && ( - + ); diff --git a/site/src/pages/CreateUserPage/CreateUserPage.test.tsx b/site/src/pages/CreateUserPage/CreateUserPage.test.tsx index 2787b26244..83a1c0266b 100644 --- a/site/src/pages/CreateUserPage/CreateUserPage.test.tsx +++ b/site/src/pages/CreateUserPage/CreateUserPage.test.tsx @@ -1,6 +1,5 @@ import { fireEvent, screen } from "@testing-library/react"; import userEvent from "@testing-library/user-event"; -import { Language as FooterLanguage } from "components/FormFooter/FormFooter"; import { renderWithAuth, waitForLoaderToBeRemoved, @@ -35,9 +34,9 @@ const fillForm = async ({ await userEvent.type(emailField, email); await userEvent.type(loginTypeField, "password"); await userEvent.type(passwordField as HTMLElement, password); - const submitButton = await screen.findByText( - FooterLanguage.defaultSubmitLabel, - ); + const submitButton = screen.getByRole("button", { + name: "Create user", + }); fireEvent.click(submitButton); }; diff --git a/site/src/pages/DeploySettingsPage/AppearanceSettingsPage/AppearanceSettingsPageView.tsx b/site/src/pages/DeploySettingsPage/AppearanceSettingsPage/AppearanceSettingsPageView.tsx index 07203567a0..784ccb94ac 100644 --- a/site/src/pages/DeploySettingsPage/AppearanceSettingsPage/AppearanceSettingsPageView.tsx +++ b/site/src/pages/DeploySettingsPage/AppearanceSettingsPage/AppearanceSettingsPageView.tsx @@ -105,6 +105,9 @@ export const AppearanceSettingsPageView: FC< fullWidth placeholder='Leave empty to display "Coder".' disabled={!isEntitled} + inputProps={{ + "aria-label": "Application name", + }} /> @@ -150,6 +153,9 @@ export const AppearanceSettingsPageView: FC< ), }} + inputProps={{ + "aria-label": "Logo URL", + }} /> @@ -208,6 +214,7 @@ export const AppearanceSettingsPageView: FC< ); await serviceBannerForm.setFieldValue("enabled", newState); }} + data-testid="switch-service-banner" /> } label="Enabled" @@ -221,6 +228,9 @@ export const AppearanceSettingsPageView: FC< fullWidth label="Message" multiline + inputProps={{ + "aria-label": "Message", + }} /> diff --git a/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicenseCard.tsx b/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicenseCard.tsx index e22330e663..f3c9707c19 100644 --- a/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicenseCard.tsx +++ b/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicenseCard.tsx @@ -32,7 +32,12 @@ export const LicenseCard: FC = ({ license.claims.features["user_limit"] || userLimitLimit; return ( - + = ({ alignItems="center" > #{license.id} - + {license.claims.trial ? "Trial" : "Enterprise"} = ({ > Users - + {userLimitActual} {` / ${currentUserLimit || "Unlimited"}`} @@ -92,7 +97,7 @@ export const LicenseCard: FC = ({ ) : ( Valid Until )} - + {dayjs .unix(license.claims.license_expires) .format("MMMM D, YYYY")} @@ -104,6 +109,7 @@ export const LicenseCard: FC = ({ variant="contained" size="small" onClick={() => setLicenseIDMarkedForRemoval(license.id)} + className="remove-button" > Remove… diff --git a/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicensesSettingsPageView.tsx b/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicensesSettingsPageView.tsx index 08c2db5862..9d023c1749 100644 --- a/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicensesSettingsPageView.tsx +++ b/site/src/pages/DeploySettingsPage/LicensesSettingsPage/LicensesSettingsPageView.tsx @@ -84,7 +84,7 @@ const LicensesSettingsPageView: FC = ({ {isLoading && } {!isLoading && licenses && licenses?.length > 0 && ( - + {licenses ?.sort( (a, b) => diff --git a/site/src/pages/DeploySettingsPage/Option.tsx b/site/src/pages/DeploySettingsPage/Option.tsx index de9ead5cb9..9e5e9f7abd 100644 --- a/site/src/pages/DeploySettingsPage/Option.tsx +++ b/site/src/pages/DeploySettingsPage/Option.tsx @@ -51,7 +51,7 @@ export const OptionValue: FC = (props) => { if (typeof value === "object" && !Array.isArray(value)) { return ( -
    +
      {Object.entries(value) .sort((a, b) => a[0].localeCompare(b[0])) .map(([option, isEnabled]) => ( @@ -64,6 +64,9 @@ export const OptionValue: FC = (props) => { color: theme.palette.text.disabled, }, ]} + className={`option-array-item-${option} ${ + isEnabled ? "option-enabled" : "option-disabled" + }`} >
      = ({ options, additionalValues }) => { } return ( - + = ({ options, additionalValues }) => { return null; } return ( - + {option.name} {option.description} diff --git a/site/src/pages/DeploySettingsPage/Sidebar.tsx b/site/src/pages/DeploySettingsPage/Sidebar.tsx index 15cf879e87..e473ab94ca 100644 --- a/site/src/pages/DeploySettingsPage/Sidebar.tsx +++ b/site/src/pages/DeploySettingsPage/Sidebar.tsx @@ -3,7 +3,6 @@ import HubOutlinedIcon from "@mui/icons-material/HubOutlined"; import InsertChartIcon from "@mui/icons-material/InsertChart"; import LaunchOutlined from "@mui/icons-material/LaunchOutlined"; import LockRounded from "@mui/icons-material/LockOutlined"; -import MonitorHeartOutlined from "@mui/icons-material/MonitorHeartOutlined"; import Globe from "@mui/icons-material/PublicOutlined"; import ApprovalIcon from "@mui/icons-material/VerifiedUserOutlined"; import VpnKeyOutlined from "@mui/icons-material/VpnKeyOutlined"; @@ -48,9 +47,6 @@ export const Sidebar: FC = () => { Observability - - Health - ); }; diff --git a/site/src/pages/GroupsPage/GroupPage.tsx b/site/src/pages/GroupsPage/GroupPage.tsx index f1f3a7bd24..01e8dc250b 100644 --- a/site/src/pages/GroupsPage/GroupPage.tsx +++ b/site/src/pages/GroupsPage/GroupPage.tsx @@ -197,6 +197,7 @@ export const GroupPage: FC = () => { onConfirm={async () => { try { await deleteGroupMutation.mutateAsync(groupId); + displaySuccess("Group deleted successfully."); navigate("/groups"); } catch (error) { displayError(getErrorMessage(error, "Failed to delete group.")); diff --git a/site/src/pages/UserSettingsPage/AppearancePage/AppearanceForm.tsx b/site/src/pages/UserSettingsPage/AppearancePage/AppearanceForm.tsx index ae0d15358e..6b34ba19b9 100644 --- a/site/src/pages/UserSettingsPage/AppearancePage/AppearanceForm.tsx +++ b/site/src/pages/UserSettingsPage/AppearancePage/AppearanceForm.tsx @@ -57,7 +57,6 @@ export const AppearanceForm: FC = ({ /> onChangeTheme("light")} diff --git a/site/src/pages/UsersPage/UsersLayout.tsx b/site/src/pages/UsersPage/UsersLayout.tsx index dc39ae33ac..bb85cae1b0 100644 --- a/site/src/pages/UsersPage/UsersLayout.tsx +++ b/site/src/pages/UsersPage/UsersLayout.tsx @@ -1,7 +1,6 @@ import GroupAdd from "@mui/icons-material/GroupAddOutlined"; import PersonAdd from "@mui/icons-material/PersonAddOutlined"; import Button from "@mui/material/Button"; -import Link from "@mui/material/Link"; import { type FC, Suspense } from "react"; import { Link as RouterLink, @@ -43,9 +42,13 @@ export const UsersLayout: FC = () => { )} {canCreateGroup && isTemplateRBACEnabled && ( - - - + )} } diff --git a/site/src/pages/WorkspaceSettingsPage/WorkspaceSettingsPage.tsx b/site/src/pages/WorkspaceSettingsPage/WorkspaceSettingsPage.tsx index af05809a9a..e289a58c5c 100644 --- a/site/src/pages/WorkspaceSettingsPage/WorkspaceSettingsPage.tsx +++ b/site/src/pages/WorkspaceSettingsPage/WorkspaceSettingsPage.tsx @@ -1,3 +1,4 @@ +import type { FC } from "react"; import { Helmet } from "react-helmet-async"; import { useMutation } from "react-query"; import { useNavigate, useParams } from "react-router-dom"; @@ -8,7 +9,7 @@ import type { WorkspaceSettingsFormValues } from "./WorkspaceSettingsForm"; import { useWorkspaceSettings } from "./WorkspaceSettingsLayout"; import { WorkspaceSettingsPageView } from "./WorkspaceSettingsPageView"; -const WorkspaceSettingsPage = () => { +const WorkspaceSettingsPage: FC = () => { const params = useParams() as { workspace: string; username: string; diff --git a/site/src/utils/portForward.ts b/site/src/utils/portForward.ts index 6d2dc4cbef..bd666823b2 100644 --- a/site/src/utils/portForward.ts +++ b/site/src/utils/portForward.ts @@ -4,12 +4,12 @@ export const portForwardURL = ( agentName: string, workspaceName: string, username: string, + https = false, ): string => { const { location } = window; + const suffix = https ? "s" : ""; - const subdomain = `${ - isNaN(port) ? 3000 : port - }--${agentName}--${workspaceName}--${username}`; + const subdomain = `${port}${suffix}--${agentName}--${workspaceName}--${username}`; return `${location.protocol}//${host}`.replace("*", subdomain); }; diff --git a/support/support.go b/support/support.go index 47cad76a7d..341e01e186 100644 --- a/support/support.go +++ b/support/support.go @@ -13,6 +13,9 @@ import ( "golang.org/x/sync/errgroup" "golang.org/x/xerrors" "tailscale.com/ipn/ipnstate" + "tailscale.com/net/netcheck" + + "github.com/coder/coder/v2/coderd/healthcheck/derphealth" "github.com/google/uuid" @@ -46,9 +49,16 @@ type Deployment struct { } type Network struct { - CoordinatorDebug string `json:"coordinator_debug"` - TailnetDebug string `json:"tailnet_debug"` - Netcheck *workspacesdk.AgentConnectionInfo `json:"netcheck"` + ConnectionInfo workspacesdk.AgentConnectionInfo + CoordinatorDebug string `json:"coordinator_debug"` + Netcheck *derphealth.Report `json:"netcheck"` + TailnetDebug string `json:"tailnet_debug"` +} + +type Netcheck struct { + Report *netcheck.Report `json:"report"` + Error string `json:"error"` + Logs []string `json:"logs"` } type Workspace struct { @@ -62,6 +72,7 @@ type Workspace struct { type Agent struct { Agent *codersdk.WorkspaceAgent `json:"agent"` + ConnectionInfo *workspacesdk.AgentConnectionInfo `json:"connection_info"` ListeningPorts *codersdk.WorkspaceAgentListeningPortsResponse `json:"listening_ports"` Logs []byte `json:"logs"` ClientMagicsockHTML []byte `json:"client_magicsock_html"` @@ -136,7 +147,7 @@ func DeploymentInfo(ctx context.Context, client *codersdk.Client, log slog.Logge return d } -func NetworkInfo(ctx context.Context, client *codersdk.Client, log slog.Logger, agentID uuid.UUID) Network { +func NetworkInfo(ctx context.Context, client *codersdk.Client, log slog.Logger) Network { var ( n Network eg errgroup.Group @@ -171,15 +182,18 @@ func NetworkInfo(ctx context.Context, client *codersdk.Client, log slog.Logger, }) eg.Go(func() error { - if agentID == uuid.Nil { - log.Warn(ctx, "agent id required for agent connection info") + // Need connection info to get DERP map for netcheck + connInfo, err := workspacesdk.New(client).AgentConnectionInfoGeneric(ctx) + if err != nil { + log.Warn(ctx, "unable to fetch generic agent connection info") return nil } - connInfo, err := workspacesdk.New(client).AgentConnectionInfo(ctx, agentID) - if err != nil { - return xerrors.Errorf("fetch agent conn info: %w", err) - } - n.Netcheck = &connInfo + n.ConnectionInfo = connInfo + var rpt derphealth.Report + rpt.Run(ctx, &derphealth.ReportOptions{ + DERPMap: connInfo.DERPMap, + }) + n.Netcheck = &rpt return nil }) @@ -482,7 +496,7 @@ func Run(ctx context.Context, d *Deps) (*Bundle, error) { return nil }) eg.Go(func() error { - ni := NetworkInfo(ctx, d.Client, d.Log, d.AgentID) + ni := NetworkInfo(ctx, d.Client, d.Log) b.Network = ni return nil }) diff --git a/support/support_test.go b/support/support_test.go index 58d5c9731a..55eb6a1f23 100644 --- a/support/support_test.go +++ b/support/support_test.go @@ -62,9 +62,10 @@ func TestRun(t *testing.T) { assertSanitizedDeploymentConfig(t, bun.Deployment.Config) assertNotNilNotEmpty(t, bun.Deployment.HealthReport, "deployment health report should be present") assertNotNilNotEmpty(t, bun.Deployment.Experiments, "deployment experiments should be present") + assertNotNilNotEmpty(t, bun.Network.ConnectionInfo, "agent connection info should be present") assertNotNilNotEmpty(t, bun.Network.CoordinatorDebug, "network coordinator debug should be present") - assertNotNilNotEmpty(t, bun.Network.TailnetDebug, "network tailnet debug should be present") assertNotNilNotEmpty(t, bun.Network.Netcheck, "network netcheck should be present") + assertNotNilNotEmpty(t, bun.Network.TailnetDebug, "network tailnet debug should be present") assertNotNilNotEmpty(t, bun.Workspace.Workspace, "workspace should be present") assertSanitizedWorkspace(t, bun.Workspace.Workspace) assertNotNilNotEmpty(t, bun.Workspace.BuildLogs, "workspace build logs should be present") @@ -109,9 +110,10 @@ func TestRun(t *testing.T) { assertSanitizedDeploymentConfig(t, bun.Deployment.Config) assertNotNilNotEmpty(t, bun.Deployment.HealthReport, "deployment health report should be present") assertNotNilNotEmpty(t, bun.Deployment.Experiments, "deployment experiments should be present") + assertNotNilNotEmpty(t, bun.Network.ConnectionInfo, "agent connection info should be present") assertNotNilNotEmpty(t, bun.Network.CoordinatorDebug, "network coordinator debug should be present") + assertNotNilNotEmpty(t, bun.Network.Netcheck, "network netcheck should be present") assertNotNilNotEmpty(t, bun.Network.TailnetDebug, "network tailnet debug should be present") - assert.Empty(t, bun.Network.Netcheck, "did not expect netcheck to be present") assert.Empty(t, bun.Workspace.Workspace, "did not expect workspace to be present") assert.Empty(t, bun.Agent, "did not expect agent to be present") assertNotNilNotEmpty(t, bun.Logs, "bundle logs should be present") diff --git a/tailnet/configmaps.go b/tailnet/configmaps.go index 57a2d9f2d1..8b3aee1585 100644 --- a/tailnet/configmaps.go +++ b/tailnet/configmaps.go @@ -186,7 +186,7 @@ func (c *configMaps) close() { c.L.Lock() defer c.L.Unlock() for _, lc := range c.peers { - lc.resetTimer() + lc.resetLostTimer() } c.closing = true c.Broadcast() @@ -216,6 +216,12 @@ func (c *configMaps) netMapLocked() *netmap.NetworkMap { func (c *configMaps) peerConfigLocked() []*tailcfg.Node { out := make([]*tailcfg.Node, 0, len(c.peers)) for _, p := range c.peers { + // Don't add nodes that we havent received a READY_FOR_HANDSHAKE for + // yet, if they're a destination. If we received a READY_FOR_HANDSHAKE + // for a peer before we receive their node, the node will be nil. + if (!p.readyForHandshake && p.isDestination) || p.node == nil { + continue + } n := p.node.Clone() if c.blockEndpoints { n.Endpoints = nil @@ -225,6 +231,19 @@ func (c *configMaps) peerConfigLocked() []*tailcfg.Node { return out } +func (c *configMaps) setTunnelDestination(id uuid.UUID) { + c.L.Lock() + defer c.L.Unlock() + lc, ok := c.peers[id] + if !ok { + lc = &peerLifecycle{ + peerID: id, + } + c.peers[id] = lc + } + lc.isDestination = true +} + // setAddresses sets the addresses belonging to this node to the given slice. It // triggers configuration of the engine if the addresses have changed. // c.L MUST NOT be held. @@ -331,8 +350,10 @@ func (c *configMaps) updatePeers(updates []*proto.CoordinateResponse_PeerUpdate) // worry about them being up-to-date when handling updates below, and it covers // all peers, not just the ones we got updates about. for _, lc := range c.peers { - if peerStatus, ok := status.Peer[lc.node.Key]; ok { - lc.lastHandshake = peerStatus.LastHandshake + if lc.node != nil { + if peerStatus, ok := status.Peer[lc.node.Key]; ok { + lc.lastHandshake = peerStatus.LastHandshake + } } } @@ -363,7 +384,7 @@ func (c *configMaps) updatePeerLocked(update *proto.CoordinateResponse_PeerUpdat return false } logger := c.logger.With(slog.F("peer_id", id)) - lc, ok := c.peers[id] + lc, peerOk := c.peers[id] var node *tailcfg.Node if update.Kind == proto.CoordinateResponse_PeerUpdate_NODE { // If no preferred DERP is provided, we can't reach the node. @@ -377,48 +398,76 @@ func (c *configMaps) updatePeerLocked(update *proto.CoordinateResponse_PeerUpdat return false } logger = logger.With(slog.F("key_id", node.Key.ShortString()), slog.F("node", node)) - peerStatus, ok := status.Peer[node.Key] - // Starting KeepAlive messages at the initialization of a connection - // causes a race condition. If we send the handshake before the peer has - // our node, we'll have to wait for 5 seconds before trying again. - // Ideally, the first handshake starts when the user first initiates a - // connection to the peer. After a successful connection we enable - // keep alives to persist the connection and keep it from becoming idle. - // SSH connections don't send packets while idle, so we use keep alives - // to avoid random hangs while we set up the connection again after - // inactivity. - node.KeepAlive = ok && peerStatus.Active + node.KeepAlive = c.nodeKeepalive(lc, status, node) } switch { - case !ok && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE: + case !peerOk && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE: // new! var lastHandshake time.Time if ps, ok := status.Peer[node.Key]; ok { lastHandshake = ps.LastHandshake } - c.peers[id] = &peerLifecycle{ + lc = &peerLifecycle{ peerID: id, node: node, lastHandshake: lastHandshake, lost: false, } + c.peers[id] = lc logger.Debug(context.Background(), "adding new peer") - return true - case ok && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE: + return lc.validForWireguard() + case peerOk && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE: // update - node.Created = lc.node.Created + if lc.node != nil { + node.Created = lc.node.Created + } dirty = !lc.node.Equal(node) lc.node = node + // validForWireguard checks that the node is non-nil, so should be + // called after we update the node. + dirty = dirty && lc.validForWireguard() lc.lost = false - lc.resetTimer() + lc.resetLostTimer() + if lc.isDestination && !lc.readyForHandshake { + // We received the node of a destination peer before we've received + // their READY_FOR_HANDSHAKE. Set a timer + lc.setReadyForHandshakeTimer(c) + logger.Debug(context.Background(), "setting ready for handshake timeout") + } logger.Debug(context.Background(), "node update to existing peer", slog.F("dirty", dirty)) return dirty - case !ok: + case peerOk && update.Kind == proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE: + dirty := !lc.readyForHandshake + lc.readyForHandshake = true + if lc.readyForHandshakeTimer != nil { + lc.readyForHandshakeTimer.Stop() + } + if lc.node != nil { + old := lc.node.KeepAlive + lc.node.KeepAlive = c.nodeKeepalive(lc, status, lc.node) + dirty = dirty || (old != lc.node.KeepAlive) + } + logger.Debug(context.Background(), "peer ready for handshake") + // only force a reconfig if the node populated + return dirty && lc.node != nil + case !peerOk && update.Kind == proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE: + // When we receive a READY_FOR_HANDSHAKE for a peer we don't know about, + // we create a peerLifecycle with the peerID and set readyForHandshake + // to true. Eventually we should receive a NODE update for this peer, + // and it'll be programmed into wireguard. + logger.Debug(context.Background(), "got peer ready for handshake for unknown peer") + lc = &peerLifecycle{ + peerID: id, + readyForHandshake: true, + } + c.peers[id] = lc + return false + case !peerOk: // disconnected or lost, but we don't have the node. No op logger.Debug(context.Background(), "skipping update for peer we don't recognize") return false case update.Kind == proto.CoordinateResponse_PeerUpdate_DISCONNECTED: - lc.resetTimer() + lc.resetLostTimer() delete(c.peers, id) logger.Debug(context.Background(), "disconnected peer") return true @@ -476,10 +525,12 @@ func (c *configMaps) peerLostTimeout(id uuid.UUID) { "timeout triggered for peer that is removed from the map") return } - if peerStatus, ok := status.Peer[lc.node.Key]; ok { - lc.lastHandshake = peerStatus.LastHandshake + if lc.node != nil { + if peerStatus, ok := status.Peer[lc.node.Key]; ok { + lc.lastHandshake = peerStatus.LastHandshake + } + logger = logger.With(slog.F("key_id", lc.node.Key.ShortString())) } - logger = logger.With(slog.F("key_id", lc.node.Key.ShortString())) if !lc.lost { logger.Debug(context.Background(), "timeout triggered for peer that is no longer lost") @@ -522,7 +573,7 @@ func (c *configMaps) nodeAddresses(publicKey key.NodePublic) ([]netip.Prefix, bo c.L.Lock() defer c.L.Unlock() for _, lc := range c.peers { - if lc.node.Key == publicKey { + if lc.node != nil && lc.node.Key == publicKey { return lc.node.Addresses, true } } @@ -539,9 +590,10 @@ func (c *configMaps) fillPeerDiagnostics(d *PeerDiagnostics, peerID uuid.UUID) { } } lc, ok := c.peers[peerID] - if !ok { + if !ok || lc.node == nil { return } + d.ReceivedNode = lc.node ps, ok := status.Peer[lc.node.Key] if !ok { @@ -550,34 +602,102 @@ func (c *configMaps) fillPeerDiagnostics(d *PeerDiagnostics, peerID uuid.UUID) { d.LastWireguardHandshake = ps.LastHandshake } -type peerLifecycle struct { - peerID uuid.UUID - node *tailcfg.Node - lost bool - lastHandshake time.Time - timer *clock.Timer +func (c *configMaps) peerReadyForHandshakeTimeout(peerID uuid.UUID) { + logger := c.logger.With(slog.F("peer_id", peerID)) + logger.Debug(context.Background(), "peer ready for handshake timeout") + c.L.Lock() + defer c.L.Unlock() + lc, ok := c.peers[peerID] + if !ok { + logger.Debug(context.Background(), + "ready for handshake timeout triggered for peer that is removed from the map") + return + } + + wasReady := lc.readyForHandshake + lc.readyForHandshake = true + if !wasReady { + logger.Info(context.Background(), "setting peer ready for handshake after timeout") + c.netmapDirty = true + c.Broadcast() + } } -func (l *peerLifecycle) resetTimer() { - if l.timer != nil { - l.timer.Stop() - l.timer = nil +func (*configMaps) nodeKeepalive(lc *peerLifecycle, status *ipnstate.Status, node *tailcfg.Node) bool { + // If the peer is already active, keepalives should be enabled. + if peerStatus, statusOk := status.Peer[node.Key]; statusOk && peerStatus.Active { + return true + } + // If the peer is a destination, we should only enable keepalives if we've + // received the READY_FOR_HANDSHAKE. + if lc != nil && lc.isDestination && lc.readyForHandshake { + return true + } + + // If none of the above are true, keepalives should not be enabled. + return false +} + +type peerLifecycle struct { + peerID uuid.UUID + // isDestination specifies if the peer is a destination, meaning we + // initiated a tunnel to the peer. When the peer is a destination, we do not + // respond to node updates with `READY_FOR_HANDSHAKE`s, and we wait to + // program the peer into wireguard until we receive a READY_FOR_HANDSHAKE + // from the peer or the timeout is reached. + isDestination bool + // node is the tailcfg.Node for the peer. It may be nil until we receive a + // NODE update for it. + node *tailcfg.Node + lost bool + lastHandshake time.Time + lostTimer *clock.Timer + readyForHandshake bool + readyForHandshakeTimer *clock.Timer +} + +func (l *peerLifecycle) resetLostTimer() { + if l.lostTimer != nil { + l.lostTimer.Stop() + l.lostTimer = nil } } func (l *peerLifecycle) setLostTimer(c *configMaps) { - if l.timer != nil { - l.timer.Stop() + if l.lostTimer != nil { + l.lostTimer.Stop() } ttl := lostTimeout - c.clock.Since(l.lastHandshake) if ttl <= 0 { ttl = time.Nanosecond } - l.timer = c.clock.AfterFunc(ttl, func() { + l.lostTimer = c.clock.AfterFunc(ttl, func() { c.peerLostTimeout(l.peerID) }) } +const readyForHandshakeTimeout = 5 * time.Second + +func (l *peerLifecycle) setReadyForHandshakeTimer(c *configMaps) { + if l.readyForHandshakeTimer != nil { + l.readyForHandshakeTimer.Stop() + } + l.readyForHandshakeTimer = c.clock.AfterFunc(readyForHandshakeTimeout, func() { + c.logger.Debug(context.Background(), "ready for handshake timeout", slog.F("peer_id", l.peerID)) + c.peerReadyForHandshakeTimeout(l.peerID) + }) +} + +// validForWireguard returns true if the peer is ready to be programmed into +// wireguard. +func (l *peerLifecycle) validForWireguard() bool { + valid := l.node != nil + if l.isDestination { + return valid && l.readyForHandshake + } + return valid +} + // prefixesDifferent returns true if the two slices contain different prefixes // where order doesn't matter. func prefixesDifferent(a, b []netip.Prefix) bool { diff --git a/tailnet/configmaps_internal_test.go b/tailnet/configmaps_internal_test.go index 1008562904..49171ecf03 100644 --- a/tailnet/configmaps_internal_test.go +++ b/tailnet/configmaps_internal_test.go @@ -185,6 +185,258 @@ func TestConfigMaps_updatePeers_new(t *testing.T) { _ = testutil.RequireRecvCtx(ctx, t, done) } +func TestConfigMaps_updatePeers_new_waitForHandshake_neverConfigures(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + fEng := newFakeEngineConfigurable() + nodePrivateKey := key.NewNode() + nodeID := tailcfg.NodeID(5) + discoKey := key.NewDisco() + uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public()) + defer uut.close() + start := time.Date(2024, time.March, 29, 8, 0, 0, 0, time.UTC) + mClock := clock.NewMock() + mClock.Set(start) + uut.clock = mClock + + p1ID := uuid.UUID{1} + p1Node := newTestNode(1) + p1n, err := NodeToProto(p1Node) + require.NoError(t, err) + uut.setTunnelDestination(p1ID) + + // it should not send the peer to the netmap + requireNeverConfigures(ctx, t, &uut.phased) + + go func() { + <-fEng.status + fEng.statusDone <- struct{}{} + }() + + u1 := []*proto.CoordinateResponse_PeerUpdate{ + { + Id: p1ID[:], + Kind: proto.CoordinateResponse_PeerUpdate_NODE, + Node: p1n, + }, + } + uut.updatePeers(u1) + + done := make(chan struct{}) + go func() { + defer close(done) + uut.close() + }() + _ = testutil.RequireRecvCtx(ctx, t, done) +} + +func TestConfigMaps_updatePeers_new_waitForHandshake_outOfOrder(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + fEng := newFakeEngineConfigurable() + nodePrivateKey := key.NewNode() + nodeID := tailcfg.NodeID(5) + discoKey := key.NewDisco() + uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public()) + defer uut.close() + start := time.Date(2024, time.March, 29, 8, 0, 0, 0, time.UTC) + mClock := clock.NewMock() + mClock.Set(start) + uut.clock = mClock + + p1ID := uuid.UUID{1} + p1Node := newTestNode(1) + p1n, err := NodeToProto(p1Node) + require.NoError(t, err) + uut.setTunnelDestination(p1ID) + + go func() { + <-fEng.status + fEng.statusDone <- struct{}{} + }() + + u2 := []*proto.CoordinateResponse_PeerUpdate{ + { + Id: p1ID[:], + Kind: proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE, + }, + } + uut.updatePeers(u2) + + // it should not send the peer to the netmap yet + + go func() { + <-fEng.status + fEng.statusDone <- struct{}{} + }() + + u1 := []*proto.CoordinateResponse_PeerUpdate{ + { + Id: p1ID[:], + Kind: proto.CoordinateResponse_PeerUpdate_NODE, + Node: p1n, + }, + } + uut.updatePeers(u1) + + // it should now send the peer to the netmap + + nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap) + r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig) + + require.Len(t, nm.Peers, 1) + n1 := getNodeWithID(t, nm.Peers, 1) + require.Equal(t, "127.3.3.40:1", n1.DERP) + require.Equal(t, p1Node.Endpoints, n1.Endpoints) + require.True(t, n1.KeepAlive) + + // we rely on nmcfg.WGCfg() to convert the netmap to wireguard config, so just + // require the right number of peers. + require.Len(t, r.wg.Peers, 1) + + done := make(chan struct{}) + go func() { + defer close(done) + uut.close() + }() + _ = testutil.RequireRecvCtx(ctx, t, done) +} + +func TestConfigMaps_updatePeers_new_waitForHandshake(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + fEng := newFakeEngineConfigurable() + nodePrivateKey := key.NewNode() + nodeID := tailcfg.NodeID(5) + discoKey := key.NewDisco() + uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public()) + defer uut.close() + start := time.Date(2024, time.March, 29, 8, 0, 0, 0, time.UTC) + mClock := clock.NewMock() + mClock.Set(start) + uut.clock = mClock + + p1ID := uuid.UUID{1} + p1Node := newTestNode(1) + p1n, err := NodeToProto(p1Node) + require.NoError(t, err) + uut.setTunnelDestination(p1ID) + + go func() { + <-fEng.status + fEng.statusDone <- struct{}{} + }() + + u1 := []*proto.CoordinateResponse_PeerUpdate{ + { + Id: p1ID[:], + Kind: proto.CoordinateResponse_PeerUpdate_NODE, + Node: p1n, + }, + } + uut.updatePeers(u1) + + // it should not send the peer to the netmap yet + + go func() { + <-fEng.status + fEng.statusDone <- struct{}{} + }() + + u2 := []*proto.CoordinateResponse_PeerUpdate{ + { + Id: p1ID[:], + Kind: proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE, + }, + } + uut.updatePeers(u2) + + // it should now send the peer to the netmap + + nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap) + r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig) + + require.Len(t, nm.Peers, 1) + n1 := getNodeWithID(t, nm.Peers, 1) + require.Equal(t, "127.3.3.40:1", n1.DERP) + require.Equal(t, p1Node.Endpoints, n1.Endpoints) + require.True(t, n1.KeepAlive) + + // we rely on nmcfg.WGCfg() to convert the netmap to wireguard config, so just + // require the right number of peers. + require.Len(t, r.wg.Peers, 1) + + done := make(chan struct{}) + go func() { + defer close(done) + uut.close() + }() + _ = testutil.RequireRecvCtx(ctx, t, done) +} + +func TestConfigMaps_updatePeers_new_waitForHandshake_timeout(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + fEng := newFakeEngineConfigurable() + nodePrivateKey := key.NewNode() + nodeID := tailcfg.NodeID(5) + discoKey := key.NewDisco() + uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public()) + defer uut.close() + start := time.Date(2024, time.March, 29, 8, 0, 0, 0, time.UTC) + mClock := clock.NewMock() + mClock.Set(start) + uut.clock = mClock + + p1ID := uuid.UUID{1} + p1Node := newTestNode(1) + p1n, err := NodeToProto(p1Node) + require.NoError(t, err) + uut.setTunnelDestination(p1ID) + + go func() { + <-fEng.status + fEng.statusDone <- struct{}{} + }() + + u1 := []*proto.CoordinateResponse_PeerUpdate{ + { + Id: p1ID[:], + Kind: proto.CoordinateResponse_PeerUpdate_NODE, + Node: p1n, + }, + } + uut.updatePeers(u1) + + mClock.Add(5 * time.Second) + + // it should now send the peer to the netmap + + nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap) + r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig) + + require.Len(t, nm.Peers, 1) + n1 := getNodeWithID(t, nm.Peers, 1) + require.Equal(t, "127.3.3.40:1", n1.DERP) + require.Equal(t, p1Node.Endpoints, n1.Endpoints) + require.False(t, n1.KeepAlive) + + // we rely on nmcfg.WGCfg() to convert the netmap to wireguard config, so just + // require the right number of peers. + require.Len(t, r.wg.Peers, 1) + + done := make(chan struct{}) + go func() { + defer close(done) + uut.close() + }() + _ = testutil.RequireRecvCtx(ctx, t, done) +} + func TestConfigMaps_updatePeers_same(t *testing.T) { t.Parallel() ctx := testutil.Context(t, testutil.WaitShort) @@ -274,7 +526,7 @@ func TestConfigMaps_updatePeers_disconnect(t *testing.T) { peerID: p1ID, node: p1tcn, lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC), - timer: timer, + lostTimer: timer, } uut.L.Unlock() @@ -947,6 +1199,7 @@ func requireNeverConfigures(ctx context.Context, t *testing.T, uut *phased) { t.Helper() waiting := make(chan struct{}) go func() { + t.Helper() // ensure that we never configure, and go straight to closed uut.L.Lock() defer uut.L.Unlock() diff --git a/tailnet/conn.go b/tailnet/conn.go index e6dbdfdc38..d4d58c7cc9 100644 --- a/tailnet/conn.go +++ b/tailnet/conn.go @@ -88,7 +88,6 @@ type Options struct { // falling back. This is useful for misbehaving proxies that prevent // fallback due to odd behavior, like Azure App Proxy. DERPForceWebSockets bool - // BlockEndpoints specifies whether P2P endpoints are blocked. // If so, only DERPs can establish connections. BlockEndpoints bool @@ -311,6 +310,10 @@ type Conn struct { trafficStats *connstats.Statistics } +func (c *Conn) SetTunnelDestination(id uuid.UUID) { + c.configMaps.setTunnelDestination(id) +} + func (c *Conn) GetBlockEndpoints() bool { return c.configMaps.getBlockEndpoints() && c.nodeUpdater.getBlockEndpoints() } diff --git a/tailnet/coordinator.go b/tailnet/coordinator.go index ce9c8e99b2..95f61637f7 100644 --- a/tailnet/coordinator.go +++ b/tailnet/coordinator.go @@ -99,6 +99,9 @@ type Coordinatee interface { UpdatePeers([]*proto.CoordinateResponse_PeerUpdate) error SetAllPeersLost() SetNodeCallback(func(*Node)) + // SetTunnelDestination indicates to tailnet that the peer id is a + // destination. + SetTunnelDestination(id uuid.UUID) } type Coordination interface { @@ -111,6 +114,7 @@ type remoteCoordination struct { closed bool errChan chan error coordinatee Coordinatee + tgt uuid.UUID logger slog.Logger protocol proto.DRPCTailnet_CoordinateClient respLoopDone chan struct{} @@ -161,11 +165,37 @@ func (c *remoteCoordination) respLoop() { c.sendErr(xerrors.Errorf("read: %w", err)) return } + err = c.coordinatee.UpdatePeers(resp.GetPeerUpdates()) if err != nil { c.sendErr(xerrors.Errorf("update peers: %w", err)) return } + + // Only send acks from peers without a target. + if c.tgt == uuid.Nil { + // Send an ack back for all received peers. This could + // potentially be smarter to only send an ACK once per client, + // but there's nothing currently stopping clients from reusing + // IDs. + rfh := []*proto.CoordinateRequest_ReadyForHandshake{} + for _, peer := range resp.GetPeerUpdates() { + if peer.Kind != proto.CoordinateResponse_PeerUpdate_NODE { + continue + } + + rfh = append(rfh, &proto.CoordinateRequest_ReadyForHandshake{Id: peer.Id}) + } + if len(rfh) > 0 { + err := c.protocol.Send(&proto.CoordinateRequest{ + ReadyForHandshake: rfh, + }) + if err != nil { + c.sendErr(xerrors.Errorf("send: %w", err)) + return + } + } + } } } @@ -179,11 +209,14 @@ func NewRemoteCoordination(logger slog.Logger, c := &remoteCoordination{ errChan: make(chan error, 1), coordinatee: coordinatee, + tgt: tunnelTarget, logger: logger, protocol: protocol, respLoopDone: make(chan struct{}), } if tunnelTarget != uuid.Nil { + // TODO: reenable in upstack PR + // c.coordinatee.SetTunnelDestination(tunnelTarget) c.Lock() err := c.protocol.Send(&proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Id: tunnelTarget[:]}}) c.Unlock() @@ -327,6 +360,13 @@ func (c *inMemoryCoordination) respLoop() { } } +func (*inMemoryCoordination) AwaitAck() <-chan struct{} { + // This is only used for tests, so just return a closed channel. + ch := make(chan struct{}) + close(ch) + return ch +} + func (c *inMemoryCoordination) Close() error { c.Lock() defer c.Unlock() @@ -658,6 +698,54 @@ func (c *core) handleRequest(p *peer, req *proto.CoordinateRequest) error { if req.Disconnect != nil { c.removePeerLocked(p.id, proto.CoordinateResponse_PeerUpdate_DISCONNECTED, "graceful disconnect") } + if rfhs := req.ReadyForHandshake; rfhs != nil { + err := c.handleReadyForHandshakeLocked(pr, rfhs) + if err != nil { + return xerrors.Errorf("handle ack: %w", err) + } + } + return nil +} + +func (c *core) handleReadyForHandshakeLocked(src *peer, rfhs []*proto.CoordinateRequest_ReadyForHandshake) error { + for _, rfh := range rfhs { + dstID, err := uuid.FromBytes(rfh.Id) + if err != nil { + // this shouldn't happen unless there is a client error. Close the connection so the client + // doesn't just happily continue thinking everything is fine. + return xerrors.Errorf("unable to convert bytes to UUID: %w", err) + } + + if !c.tunnels.tunnelExists(src.id, dstID) { + // We intentionally do not return an error here, since it's + // inherently racy. It's possible for a source to connect, then + // subsequently disconnect before the agent has sent back the RFH. + // Since this could potentially happen to a non-malicious agent, we + // don't want to kill its connection. + select { + case src.resps <- &proto.CoordinateResponse{ + Error: fmt.Sprintf("you do not share a tunnel with %q", dstID.String()), + }: + default: + return ErrWouldBlock + } + continue + } + + dst, ok := c.peers[dstID] + if ok { + select { + case dst.resps <- &proto.CoordinateResponse{ + PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{{ + Id: src.id[:], + Kind: proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE, + }}, + }: + default: + return ErrWouldBlock + } + } + } return nil } diff --git a/tailnet/coordinator_test.go b/tailnet/coordinator_test.go index d8a6f297b5..c4e269c53c 100644 --- a/tailnet/coordinator_test.go +++ b/tailnet/coordinator_test.go @@ -412,6 +412,68 @@ func TestCoordinator(t *testing.T) { _ = testutil.RequireRecvCtx(ctx, t, clientErrChan) _ = testutil.RequireRecvCtx(ctx, t, closeClientChan) }) + + t.Run("AgentAck", func(t *testing.T) { + t.Parallel() + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + coordinator := tailnet.NewCoordinator(logger) + ctx := testutil.Context(t, testutil.WaitShort) + + clientID := uuid.New() + agentID := uuid.New() + + aReq, aRes := coordinator.Coordinate(ctx, agentID, agentID.String(), tailnet.AgentCoordinateeAuth{ID: agentID}) + cReq, cRes := coordinator.Coordinate(ctx, clientID, clientID.String(), tailnet.ClientCoordinateeAuth{AgentID: agentID}) + + { + nk, err := key.NewNode().Public().MarshalBinary() + require.NoError(t, err) + dk, err := key.NewDisco().Public().MarshalText() + require.NoError(t, err) + cReq <- &proto.CoordinateRequest{UpdateSelf: &proto.CoordinateRequest_UpdateSelf{ + Node: &proto.Node{ + Id: 3, + Key: nk, + Disco: string(dk), + }, + }} + } + + cReq <- &proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{ + Id: agentID[:], + }} + + testutil.RequireRecvCtx(ctx, t, aRes) + + aReq <- &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{ + Id: clientID[:], + }}} + ack := testutil.RequireRecvCtx(ctx, t, cRes) + require.NotNil(t, ack.PeerUpdates) + require.Len(t, ack.PeerUpdates, 1) + require.Equal(t, proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE, ack.PeerUpdates[0].Kind) + require.Equal(t, agentID[:], ack.PeerUpdates[0].Id) + }) + + t.Run("AgentAck_NoPermission", func(t *testing.T) { + t.Parallel() + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + coordinator := tailnet.NewCoordinator(logger) + ctx := testutil.Context(t, testutil.WaitShort) + + clientID := uuid.New() + agentID := uuid.New() + + aReq, aRes := coordinator.Coordinate(ctx, agentID, agentID.String(), tailnet.AgentCoordinateeAuth{ID: agentID}) + _, _ = coordinator.Coordinate(ctx, clientID, clientID.String(), tailnet.ClientCoordinateeAuth{AgentID: agentID}) + + aReq <- &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{ + Id: clientID[:], + }}} + + rfhError := testutil.RequireRecvCtx(ctx, t, aRes) + require.NotEmpty(t, rfhError.Error) + }) } // TestCoordinator_AgentUpdateWhileClientConnects tests for regression on @@ -638,6 +700,76 @@ func TestRemoteCoordination(t *testing.T) { } } +func TestRemoteCoordination_SendsReadyForHandshake(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + clientID := uuid.UUID{1} + agentID := uuid.UUID{2} + mCoord := tailnettest.NewMockCoordinator(gomock.NewController(t)) + fConn := &fakeCoordinatee{} + + reqs := make(chan *proto.CoordinateRequest, 100) + resps := make(chan *proto.CoordinateResponse, 100) + mCoord.EXPECT().Coordinate(gomock.Any(), clientID, gomock.Any(), tailnet.ClientCoordinateeAuth{agentID}). + Times(1).Return(reqs, resps) + + var coord tailnet.Coordinator = mCoord + coordPtr := atomic.Pointer[tailnet.Coordinator]{} + coordPtr.Store(&coord) + svc, err := tailnet.NewClientService( + logger.Named("svc"), &coordPtr, + time.Hour, + func() *tailcfg.DERPMap { panic("not implemented") }, + ) + require.NoError(t, err) + sC, cC := net.Pipe() + + serveErr := make(chan error, 1) + go func() { + err := svc.ServeClient(ctx, proto.CurrentVersion.String(), sC, clientID, agentID) + serveErr <- err + }() + + client, err := tailnet.NewDRPCClient(cC, logger) + require.NoError(t, err) + protocol, err := client.Coordinate(ctx) + require.NoError(t, err) + + uut := tailnet.NewRemoteCoordination(logger.Named("coordination"), protocol, fConn, uuid.UUID{}) + defer uut.Close() + + nk, err := key.NewNode().Public().MarshalBinary() + require.NoError(t, err) + dk, err := key.NewDisco().Public().MarshalText() + require.NoError(t, err) + testutil.RequireSendCtx(ctx, t, resps, &proto.CoordinateResponse{ + PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{{ + Id: clientID[:], + Kind: proto.CoordinateResponse_PeerUpdate_NODE, + Node: &proto.Node{ + Id: 3, + Key: nk, + Disco: string(dk), + }, + }}, + }) + + rfh := testutil.RequireRecvCtx(ctx, t, reqs) + require.NotNil(t, rfh.ReadyForHandshake) + require.Len(t, rfh.ReadyForHandshake, 1) + require.Equal(t, clientID[:], rfh.ReadyForHandshake[0].Id) + + require.NoError(t, uut.Close()) + + select { + case err := <-uut.Error(): + require.ErrorContains(t, err, "stream terminated by sending close") + default: + // OK! + } +} + // coordinationTest tests that a coordination behaves correctly func coordinationTest( ctx context.Context, t *testing.T, @@ -698,6 +830,7 @@ type fakeCoordinatee struct { callback func(*tailnet.Node) updates [][]*proto.CoordinateResponse_PeerUpdate setAllPeersLostCalls int + tunnelDestinations map[uuid.UUID]struct{} } func (f *fakeCoordinatee) UpdatePeers(updates []*proto.CoordinateResponse_PeerUpdate) error { @@ -713,6 +846,16 @@ func (f *fakeCoordinatee) SetAllPeersLost() { f.setAllPeersLostCalls++ } +func (f *fakeCoordinatee) SetTunnelDestination(id uuid.UUID) { + f.Lock() + defer f.Unlock() + + if f.tunnelDestinations == nil { + f.tunnelDestinations = map[uuid.UUID]struct{}{} + } + f.tunnelDestinations[id] = struct{}{} +} + func (f *fakeCoordinatee) SetNodeCallback(callback func(*tailnet.Node)) { f.Lock() defer f.Unlock() diff --git a/tailnet/proto/tailnet.pb.go b/tailnet/proto/tailnet.pb.go index 63444f2173..5f623cf2b8 100644 --- a/tailnet/proto/tailnet.pb.go +++ b/tailnet/proto/tailnet.pb.go @@ -24,10 +24,11 @@ const ( type CoordinateResponse_PeerUpdate_Kind int32 const ( - CoordinateResponse_PeerUpdate_KIND_UNSPECIFIED CoordinateResponse_PeerUpdate_Kind = 0 - CoordinateResponse_PeerUpdate_NODE CoordinateResponse_PeerUpdate_Kind = 1 - CoordinateResponse_PeerUpdate_DISCONNECTED CoordinateResponse_PeerUpdate_Kind = 2 - CoordinateResponse_PeerUpdate_LOST CoordinateResponse_PeerUpdate_Kind = 3 + CoordinateResponse_PeerUpdate_KIND_UNSPECIFIED CoordinateResponse_PeerUpdate_Kind = 0 + CoordinateResponse_PeerUpdate_NODE CoordinateResponse_PeerUpdate_Kind = 1 + CoordinateResponse_PeerUpdate_DISCONNECTED CoordinateResponse_PeerUpdate_Kind = 2 + CoordinateResponse_PeerUpdate_LOST CoordinateResponse_PeerUpdate_Kind = 3 + CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE CoordinateResponse_PeerUpdate_Kind = 4 ) // Enum value maps for CoordinateResponse_PeerUpdate_Kind. @@ -37,12 +38,14 @@ var ( 1: "NODE", 2: "DISCONNECTED", 3: "LOST", + 4: "READY_FOR_HANDSHAKE", } CoordinateResponse_PeerUpdate_Kind_value = map[string]int32{ - "KIND_UNSPECIFIED": 0, - "NODE": 1, - "DISCONNECTED": 2, - "LOST": 3, + "KIND_UNSPECIFIED": 0, + "NODE": 1, + "DISCONNECTED": 2, + "LOST": 3, + "READY_FOR_HANDSHAKE": 4, } ) @@ -291,10 +294,11 @@ type CoordinateRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - UpdateSelf *CoordinateRequest_UpdateSelf `protobuf:"bytes,1,opt,name=update_self,json=updateSelf,proto3" json:"update_self,omitempty"` - Disconnect *CoordinateRequest_Disconnect `protobuf:"bytes,2,opt,name=disconnect,proto3" json:"disconnect,omitempty"` - AddTunnel *CoordinateRequest_Tunnel `protobuf:"bytes,3,opt,name=add_tunnel,json=addTunnel,proto3" json:"add_tunnel,omitempty"` - RemoveTunnel *CoordinateRequest_Tunnel `protobuf:"bytes,4,opt,name=remove_tunnel,json=removeTunnel,proto3" json:"remove_tunnel,omitempty"` + UpdateSelf *CoordinateRequest_UpdateSelf `protobuf:"bytes,1,opt,name=update_self,json=updateSelf,proto3" json:"update_self,omitempty"` + Disconnect *CoordinateRequest_Disconnect `protobuf:"bytes,2,opt,name=disconnect,proto3" json:"disconnect,omitempty"` + AddTunnel *CoordinateRequest_Tunnel `protobuf:"bytes,3,opt,name=add_tunnel,json=addTunnel,proto3" json:"add_tunnel,omitempty"` + RemoveTunnel *CoordinateRequest_Tunnel `protobuf:"bytes,4,opt,name=remove_tunnel,json=removeTunnel,proto3" json:"remove_tunnel,omitempty"` + ReadyForHandshake []*CoordinateRequest_ReadyForHandshake `protobuf:"bytes,5,rep,name=ready_for_handshake,json=readyForHandshake,proto3" json:"ready_for_handshake,omitempty"` } func (x *CoordinateRequest) Reset() { @@ -357,12 +361,20 @@ func (x *CoordinateRequest) GetRemoveTunnel() *CoordinateRequest_Tunnel { return nil } +func (x *CoordinateRequest) GetReadyForHandshake() []*CoordinateRequest_ReadyForHandshake { + if x != nil { + return x.ReadyForHandshake + } + return nil +} + type CoordinateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields PeerUpdates []*CoordinateResponse_PeerUpdate `protobuf:"bytes,1,rep,name=peer_updates,json=peerUpdates,proto3" json:"peer_updates,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } func (x *CoordinateResponse) Reset() { @@ -404,6 +416,13 @@ func (x *CoordinateResponse) GetPeerUpdates() []*CoordinateResponse_PeerUpdate { return nil } +func (x *CoordinateResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + type DERPMap_HomeParams struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -813,6 +832,57 @@ func (x *CoordinateRequest_Tunnel) GetId() []byte { return nil } +// ReadyForHandskales are sent from destinations back to the source, +// acknowledging receipt of the source's node. If the source starts pinging +// before a ReadyForHandshake, the Wireguard handshake will likely be +// dropped. +type CoordinateRequest_ReadyForHandshake struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *CoordinateRequest_ReadyForHandshake) Reset() { + *x = CoordinateRequest_ReadyForHandshake{} + if protoimpl.UnsafeEnabled { + mi := &file_tailnet_proto_tailnet_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CoordinateRequest_ReadyForHandshake) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CoordinateRequest_ReadyForHandshake) ProtoMessage() {} + +func (x *CoordinateRequest_ReadyForHandshake) ProtoReflect() protoreflect.Message { + mi := &file_tailnet_proto_tailnet_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CoordinateRequest_ReadyForHandshake.ProtoReflect.Descriptor instead. +func (*CoordinateRequest_ReadyForHandshake) Descriptor() ([]byte, []int) { + return file_tailnet_proto_tailnet_proto_rawDescGZIP(), []int{3, 3} +} + +func (x *CoordinateRequest_ReadyForHandshake) GetId() []byte { + if x != nil { + return x.Id + } + return nil +} + type CoordinateResponse_PeerUpdate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -827,7 +897,7 @@ type CoordinateResponse_PeerUpdate struct { func (x *CoordinateResponse_PeerUpdate) Reset() { *x = CoordinateResponse_PeerUpdate{} if protoimpl.UnsafeEnabled { - mi := &file_tailnet_proto_tailnet_proto_msgTypes[15] + mi := &file_tailnet_proto_tailnet_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -840,7 +910,7 @@ func (x *CoordinateResponse_PeerUpdate) String() string { func (*CoordinateResponse_PeerUpdate) ProtoMessage() {} func (x *CoordinateResponse_PeerUpdate) ProtoReflect() protoreflect.Message { - mi := &file_tailnet_proto_tailnet_proto_msgTypes[15] + mi := &file_tailnet_proto_tailnet_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -992,7 +1062,7 @@ var file_tailnet_proto_tailnet_proto_rawDesc = []byte{ 0x57, 0x65, 0x62, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x03, 0x0a, 0x11, 0x43, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbe, 0x04, 0x0a, 0x11, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, @@ -1013,50 +1083,62 @@ var file_tailnet_proto_tailnet_proto_rawDesc = []byte{ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x52, - 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x1a, 0x38, 0x0a, - 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x2a, 0x0a, 0x04, 0x6e, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x64, 0x65, - 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x0c, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x1a, 0x18, 0x0a, 0x06, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, - 0xd9, 0x02, 0x0a, 0x12, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, - 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, + 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x65, 0x0a, + 0x13, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x63, 0x6f, 0x64, + 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x52, 0x65, 0x61, 0x64, 0x79, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x52, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, + 0x6c, 0x66, 0x12, 0x2a, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x0c, + 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x1a, 0x18, 0x0a, 0x06, + 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x23, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x79, 0x46, + 0x6f, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x88, 0x03, 0x0a, 0x12, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x70, - 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x1a, 0xee, 0x01, 0x0a, 0x0a, 0x50, - 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x04, 0x6e, 0x6f, 0x64, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, - 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, - 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x48, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, - 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, - 0x14, 0x0a, 0x10, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x44, 0x45, 0x10, 0x01, 0x12, - 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x32, 0xbe, 0x01, 0x0a, 0x07, - 0x54, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, - 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, - 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x30, 0x01, 0x12, - 0x5b, 0x0a, 0x0a, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, + 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, + 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, + 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x87, 0x02, 0x0a, + 0x0a, 0x50, 0x65, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x64, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, + 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x48, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, + 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x04, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x14, 0x0a, 0x10, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x44, 0x45, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x17, 0x0a, + 0x13, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x53, + 0x48, 0x41, 0x4b, 0x45, 0x10, 0x04, 0x32, 0xbe, 0x01, 0x0a, 0x07, 0x54, 0x61, 0x69, 0x6c, 0x6e, + 0x65, 0x74, 0x12, 0x56, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x45, 0x52, 0x50, + 0x4d, 0x61, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, + 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x45, + 0x52, 0x50, 0x4d, 0x61, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, - 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x29, 0x5a, 0x27, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, - 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, - 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x44, 0x45, 0x52, 0x50, 0x4d, 0x61, 0x70, 0x30, 0x01, 0x12, 0x5b, 0x0a, 0x0a, 0x43, 0x6f, + 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, + 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6f, 0x72, + 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, + 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, + 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x61, 0x69, 0x6c, 0x6e, 0x65, 0x74, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1072,7 +1154,7 @@ func file_tailnet_proto_tailnet_proto_rawDescGZIP() []byte { } var file_tailnet_proto_tailnet_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_tailnet_proto_tailnet_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_tailnet_proto_tailnet_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_tailnet_proto_tailnet_proto_goTypes = []interface{}{ (CoordinateResponse_PeerUpdate_Kind)(0), // 0: coder.tailnet.v2.CoordinateResponse.PeerUpdate.Kind (*DERPMap)(nil), // 1: coder.tailnet.v2.DERPMap @@ -1090,35 +1172,37 @@ var file_tailnet_proto_tailnet_proto_goTypes = []interface{}{ (*CoordinateRequest_UpdateSelf)(nil), // 13: coder.tailnet.v2.CoordinateRequest.UpdateSelf (*CoordinateRequest_Disconnect)(nil), // 14: coder.tailnet.v2.CoordinateRequest.Disconnect (*CoordinateRequest_Tunnel)(nil), // 15: coder.tailnet.v2.CoordinateRequest.Tunnel - (*CoordinateResponse_PeerUpdate)(nil), // 16: coder.tailnet.v2.CoordinateResponse.PeerUpdate - (*timestamppb.Timestamp)(nil), // 17: google.protobuf.Timestamp + (*CoordinateRequest_ReadyForHandshake)(nil), // 16: coder.tailnet.v2.CoordinateRequest.ReadyForHandshake + (*CoordinateResponse_PeerUpdate)(nil), // 17: coder.tailnet.v2.CoordinateResponse.PeerUpdate + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp } var file_tailnet_proto_tailnet_proto_depIdxs = []int32{ 6, // 0: coder.tailnet.v2.DERPMap.home_params:type_name -> coder.tailnet.v2.DERPMap.HomeParams 8, // 1: coder.tailnet.v2.DERPMap.regions:type_name -> coder.tailnet.v2.DERPMap.RegionsEntry - 17, // 2: coder.tailnet.v2.Node.as_of:type_name -> google.protobuf.Timestamp + 18, // 2: coder.tailnet.v2.Node.as_of:type_name -> google.protobuf.Timestamp 11, // 3: coder.tailnet.v2.Node.derp_latency:type_name -> coder.tailnet.v2.Node.DerpLatencyEntry 12, // 4: coder.tailnet.v2.Node.derp_forced_websocket:type_name -> coder.tailnet.v2.Node.DerpForcedWebsocketEntry 13, // 5: coder.tailnet.v2.CoordinateRequest.update_self:type_name -> coder.tailnet.v2.CoordinateRequest.UpdateSelf 14, // 6: coder.tailnet.v2.CoordinateRequest.disconnect:type_name -> coder.tailnet.v2.CoordinateRequest.Disconnect 15, // 7: coder.tailnet.v2.CoordinateRequest.add_tunnel:type_name -> coder.tailnet.v2.CoordinateRequest.Tunnel 15, // 8: coder.tailnet.v2.CoordinateRequest.remove_tunnel:type_name -> coder.tailnet.v2.CoordinateRequest.Tunnel - 16, // 9: coder.tailnet.v2.CoordinateResponse.peer_updates:type_name -> coder.tailnet.v2.CoordinateResponse.PeerUpdate - 9, // 10: coder.tailnet.v2.DERPMap.HomeParams.region_score:type_name -> coder.tailnet.v2.DERPMap.HomeParams.RegionScoreEntry - 10, // 11: coder.tailnet.v2.DERPMap.Region.nodes:type_name -> coder.tailnet.v2.DERPMap.Region.Node - 7, // 12: coder.tailnet.v2.DERPMap.RegionsEntry.value:type_name -> coder.tailnet.v2.DERPMap.Region - 3, // 13: coder.tailnet.v2.CoordinateRequest.UpdateSelf.node:type_name -> coder.tailnet.v2.Node - 3, // 14: coder.tailnet.v2.CoordinateResponse.PeerUpdate.node:type_name -> coder.tailnet.v2.Node - 0, // 15: coder.tailnet.v2.CoordinateResponse.PeerUpdate.kind:type_name -> coder.tailnet.v2.CoordinateResponse.PeerUpdate.Kind - 2, // 16: coder.tailnet.v2.Tailnet.StreamDERPMaps:input_type -> coder.tailnet.v2.StreamDERPMapsRequest - 4, // 17: coder.tailnet.v2.Tailnet.Coordinate:input_type -> coder.tailnet.v2.CoordinateRequest - 1, // 18: coder.tailnet.v2.Tailnet.StreamDERPMaps:output_type -> coder.tailnet.v2.DERPMap - 5, // 19: coder.tailnet.v2.Tailnet.Coordinate:output_type -> coder.tailnet.v2.CoordinateResponse - 18, // [18:20] is the sub-list for method output_type - 16, // [16:18] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 16, // 9: coder.tailnet.v2.CoordinateRequest.ready_for_handshake:type_name -> coder.tailnet.v2.CoordinateRequest.ReadyForHandshake + 17, // 10: coder.tailnet.v2.CoordinateResponse.peer_updates:type_name -> coder.tailnet.v2.CoordinateResponse.PeerUpdate + 9, // 11: coder.tailnet.v2.DERPMap.HomeParams.region_score:type_name -> coder.tailnet.v2.DERPMap.HomeParams.RegionScoreEntry + 10, // 12: coder.tailnet.v2.DERPMap.Region.nodes:type_name -> coder.tailnet.v2.DERPMap.Region.Node + 7, // 13: coder.tailnet.v2.DERPMap.RegionsEntry.value:type_name -> coder.tailnet.v2.DERPMap.Region + 3, // 14: coder.tailnet.v2.CoordinateRequest.UpdateSelf.node:type_name -> coder.tailnet.v2.Node + 3, // 15: coder.tailnet.v2.CoordinateResponse.PeerUpdate.node:type_name -> coder.tailnet.v2.Node + 0, // 16: coder.tailnet.v2.CoordinateResponse.PeerUpdate.kind:type_name -> coder.tailnet.v2.CoordinateResponse.PeerUpdate.Kind + 2, // 17: coder.tailnet.v2.Tailnet.StreamDERPMaps:input_type -> coder.tailnet.v2.StreamDERPMapsRequest + 4, // 18: coder.tailnet.v2.Tailnet.Coordinate:input_type -> coder.tailnet.v2.CoordinateRequest + 1, // 19: coder.tailnet.v2.Tailnet.StreamDERPMaps:output_type -> coder.tailnet.v2.DERPMap + 5, // 20: coder.tailnet.v2.Tailnet.Coordinate:output_type -> coder.tailnet.v2.CoordinateResponse + 19, // [19:21] is the sub-list for method output_type + 17, // [17:19] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name } func init() { file_tailnet_proto_tailnet_proto_init() } @@ -1260,6 +1344,18 @@ func file_tailnet_proto_tailnet_proto_init() { } } file_tailnet_proto_tailnet_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CoordinateRequest_ReadyForHandshake); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tailnet_proto_tailnet_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CoordinateResponse_PeerUpdate); i { case 0: return &v.state @@ -1278,7 +1374,7 @@ func file_tailnet_proto_tailnet_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tailnet_proto_tailnet_proto_rawDesc, NumEnums: 1, - NumMessages: 16, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/tailnet/proto/tailnet.proto b/tailnet/proto/tailnet.proto index 83445e7579..1e948ebac6 100644 --- a/tailnet/proto/tailnet.proto +++ b/tailnet/proto/tailnet.proto @@ -68,6 +68,15 @@ message CoordinateRequest { } Tunnel add_tunnel = 3; Tunnel remove_tunnel = 4; + + // ReadyForHandskales are sent from destinations back to the source, + // acknowledging receipt of the source's node. If the source starts pinging + // before a ReadyForHandshake, the Wireguard handshake will likely be + // dropped. + message ReadyForHandshake { + bytes id = 1; + } + repeated ReadyForHandshake ready_for_handshake = 5; } message CoordinateResponse { @@ -80,12 +89,14 @@ message CoordinateResponse { NODE = 1; DISCONNECTED = 2; LOST = 3; + READY_FOR_HANDSHAKE = 4; } Kind kind = 3; string reason = 4; } repeated PeerUpdate peer_updates = 1; + string error = 2; } service Tailnet { diff --git a/tailnet/tailnettest/.gitignore b/tailnet/tailnettest/.gitignore new file mode 100644 index 0000000000..d3b709ea9c --- /dev/null +++ b/tailnet/tailnettest/.gitignore @@ -0,0 +1 @@ +gomock_*/ diff --git a/tailnet/tailnettest/coordinateemock.go b/tailnet/tailnettest/coordinateemock.go index 51f2dd2bce..c06243685a 100644 --- a/tailnet/tailnettest/coordinateemock.go +++ b/tailnet/tailnettest/coordinateemock.go @@ -14,6 +14,7 @@ import ( tailnet "github.com/coder/coder/v2/tailnet" proto "github.com/coder/coder/v2/tailnet/proto" + uuid "github.com/google/uuid" gomock "go.uber.org/mock/gomock" ) @@ -64,6 +65,18 @@ func (mr *MockCoordinateeMockRecorder) SetNodeCallback(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNodeCallback", reflect.TypeOf((*MockCoordinatee)(nil).SetNodeCallback), arg0) } +// SetTunnelDestination mocks base method. +func (m *MockCoordinatee) SetTunnelDestination(arg0 uuid.UUID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTunnelDestination", arg0) +} + +// SetTunnelDestination indicates an expected call of SetTunnelDestination. +func (mr *MockCoordinateeMockRecorder) SetTunnelDestination(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTunnelDestination", reflect.TypeOf((*MockCoordinatee)(nil).SetTunnelDestination), arg0) +} + // UpdatePeers mocks base method. func (m *MockCoordinatee) UpdatePeers(arg0 []*proto.CoordinateResponse_PeerUpdate) error { m.ctrl.T.Helper() diff --git a/tailnet/tunnel.go b/tailnet/tunnel.go index bc5becbc94..68b78d4f92 100644 --- a/tailnet/tunnel.go +++ b/tailnet/tunnel.go @@ -52,6 +52,10 @@ func (c ClientCoordinateeAuth) Authorize(req *proto.CoordinateRequest) error { } } + if rfh := req.GetReadyForHandshake(); rfh != nil { + return xerrors.Errorf("clients may not send ready_for_handshake") + } + return nil } @@ -147,6 +151,12 @@ func (s *tunnelStore) findTunnelPeers(id uuid.UUID) []uuid.UUID { return out } +func (s *tunnelStore) tunnelExists(src, dst uuid.UUID) bool { + _, srcOK := s.bySrc[src][dst] + _, dstOK := s.byDst[src][dst] + return srcOK || dstOK +} + func (s *tunnelStore) htmlDebug() []HTMLTunnel { out := make([]HTMLTunnel, 0) for src, dsts := range s.bySrc { diff --git a/tailnet/tunnel_internal_test.go b/tailnet/tunnel_internal_test.go index 3ba7cc4165..b05871f086 100644 --- a/tailnet/tunnel_internal_test.go +++ b/tailnet/tunnel_internal_test.go @@ -43,3 +43,18 @@ func TestTunnelStore_RemoveAll(t *testing.T) { require.Len(t, uut.findTunnelPeers(p2), 0) require.Len(t, uut.findTunnelPeers(p3), 0) } + +func TestTunnelStore_TunnelExists(t *testing.T) { + t.Parallel() + p1 := uuid.UUID{1} + p2 := uuid.UUID{2} + uut := newTunnelStore() + require.False(t, uut.tunnelExists(p1, p2)) + require.False(t, uut.tunnelExists(p2, p1)) + uut.add(p1, p2) + require.True(t, uut.tunnelExists(p1, p2)) + require.True(t, uut.tunnelExists(p2, p1)) + uut.remove(p1, p2) + require.False(t, uut.tunnelExists(p1, p2)) + require.False(t, uut.tunnelExists(p2, p1)) +}