Merge branch 'main' into node-20

This commit is contained in:
Muhammad Atif Ali 2024-04-22 16:51:35 +03:00 committed by GitHub
commit 3c860571b5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
60 changed files with 1677 additions and 325 deletions

View File

@ -640,6 +640,7 @@ jobs:
- test-e2e - test-e2e
- offlinedocs - offlinedocs
- sqlc-vet - sqlc-vet
- dependency-license-review
# Allow this job to run even if the needed jobs fail, are skipped or # Allow this job to run even if the needed jobs fail, are skipped or
# cancelled. # cancelled.
if: always() if: always()
@ -656,6 +657,7 @@ jobs:
echo "- test-js: ${{ needs.test-js.result }}" echo "- test-js: ${{ needs.test-js.result }}"
echo "- test-e2e: ${{ needs.test-e2e.result }}" echo "- test-e2e: ${{ needs.test-e2e.result }}"
echo "- offlinedocs: ${{ needs.offlinedocs.result }}" echo "- offlinedocs: ${{ needs.offlinedocs.result }}"
echo "- dependency-license-review: ${{ needs.dependency-license-review.result }}"
echo echo
# We allow skipped jobs to pass, but not failed or cancelled jobs. # We allow skipped jobs to pass, but not failed or cancelled jobs.
@ -896,3 +898,42 @@ jobs:
- name: Setup and run sqlc vet - name: Setup and run sqlc vet
run: | run: |
make sqlc-vet make sqlc-vet
# dependency-license-review checks that no license-incompatible dependencies have been introduced.
# This action is not intended to do a vulnerability check since that is handled by a separate action.
dependency-license-review:
runs-on: ubuntu-latest
if: github.ref != 'refs/heads/main'
steps:
- name: "Checkout Repository"
uses: actions/checkout@v4
- name: "Dependency Review"
id: review
uses: actions/dependency-review-action@v4
with:
allow-licenses: Apache-2.0, BSD-2-Clause, BSD-3-Clause, CC0-1.0, ISC, MIT, MIT-0, MPL-2.0
license-check: true
vulnerability-check: false
- name: "Report"
# make sure this step runs even if the previous failed
if: always()
shell: bash
env:
VULNERABLE_CHANGES: ${{ steps.review.outputs.invalid-license-changes }}
run: |
fields=( "unlicensed" "unresolved" "forbidden" )
# This is unfortunate that we have to do this but the action does not support failing on
# an unknown license. The unknown dependency could easily have a GPL license which
# would be problematic for us.
# Track https://github.com/actions/dependency-review-action/issues/672 for when
# we can remove this brittle workaround.
for field in "${fields[@]}"; do
# Use jq to check if the array is not empty
if [[ $(echo "$VULNERABLE_CHANGES" | jq ".${field} | length") -ne 0 ]]; then
echo "Invalid or unknown licenses detected, contact @sreya to ensure your added dependency falls under one of our allowed licenses."
echo "$VULNERABLE_CHANGES" | jq
exit 1
fi
done
echo "No incompatible licenses detected"

View File

@ -965,7 +965,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
defer shutdownConns() defer shutdownConns()
// Ensures that old database entries are cleaned up over time! // Ensures that old database entries are cleaned up over time!
purger := dbpurge.New(ctx, logger, options.Database) purger := dbpurge.New(ctx, logger.Named("dbpurge"), options.Database)
defer purger.Close() defer purger.Close()
// Updates workspace usage // Updates workspace usage

View File

@ -25,12 +25,8 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"github.com/coder/retry"
"github.com/coder/serpent"
"cdr.dev/slog" "cdr.dev/slog"
"cdr.dev/slog/sloggers/sloghuman" "cdr.dev/slog/sloggers/sloghuman"
"github.com/coder/coder/v2/cli/cliui" "github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/cli/cliutil" "github.com/coder/coder/v2/cli/cliutil"
"github.com/coder/coder/v2/coderd/autobuild/notify" "github.com/coder/coder/v2/coderd/autobuild/notify"
@ -38,6 +34,9 @@ import (
"github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/workspacesdk" "github.com/coder/coder/v2/codersdk/workspacesdk"
"github.com/coder/coder/v2/cryptorand" "github.com/coder/coder/v2/cryptorand"
"github.com/coder/coder/v2/pty"
"github.com/coder/retry"
"github.com/coder/serpent"
) )
var ( var (
@ -56,6 +55,7 @@ func (r *RootCmd) ssh() *serpent.Command {
noWait bool noWait bool
logDirPath string logDirPath string
remoteForwards []string remoteForwards []string
env []string
disableAutostart bool disableAutostart bool
) )
client := new(codersdk.Client) client := new(codersdk.Client)
@ -145,16 +145,23 @@ func (r *RootCmd) ssh() *serpent.Command {
stack := newCloserStack(ctx, logger) stack := newCloserStack(ctx, logger)
defer stack.close(nil) defer stack.close(nil)
if len(remoteForwards) > 0 { for _, remoteForward := range remoteForwards {
for _, remoteForward := range remoteForwards { isValid := validateRemoteForward(remoteForward)
isValid := validateRemoteForward(remoteForward) if !isValid {
if !isValid { return xerrors.Errorf(`invalid format of remote-forward, expected: remote_port:local_address:local_port`)
return xerrors.Errorf(`invalid format of remote-forward, expected: remote_port:local_address:local_port`)
}
if isValid && stdio {
return xerrors.Errorf(`remote-forward can't be enabled in the stdio mode`)
}
} }
if isValid && stdio {
return xerrors.Errorf(`remote-forward can't be enabled in the stdio mode`)
}
}
var parsedEnv [][2]string
for _, e := range env {
k, v, ok := strings.Cut(e, "=")
if !ok {
return xerrors.Errorf("invalid environment variable setting %q", e)
}
parsedEnv = append(parsedEnv, [2]string{k, v})
} }
workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, inv, client, !disableAutostart, inv.Args[0]) workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, inv, client, !disableAutostart, inv.Args[0])
@ -341,15 +348,22 @@ func (r *RootCmd) ssh() *serpent.Command {
} }
} }
stdoutFile, validOut := inv.Stdout.(*os.File)
stdinFile, validIn := inv.Stdin.(*os.File) stdinFile, validIn := inv.Stdin.(*os.File)
if validOut && validIn && isatty.IsTerminal(stdoutFile.Fd()) { stdoutFile, validOut := inv.Stdout.(*os.File)
state, err := term.MakeRaw(int(stdinFile.Fd())) if validIn && validOut && isatty.IsTerminal(stdinFile.Fd()) && isatty.IsTerminal(stdoutFile.Fd()) {
inState, err := pty.MakeInputRaw(stdinFile.Fd())
if err != nil { if err != nil {
return err return err
} }
defer func() { defer func() {
_ = term.Restore(int(stdinFile.Fd()), state) _ = pty.RestoreTerminal(stdinFile.Fd(), inState)
}()
outState, err := pty.MakeOutputRaw(stdoutFile.Fd())
if err != nil {
return err
}
defer func() {
_ = pty.RestoreTerminal(stdoutFile.Fd(), outState)
}() }()
windowChange := listenWindowSize(ctx) windowChange := listenWindowSize(ctx)
@ -369,6 +383,12 @@ func (r *RootCmd) ssh() *serpent.Command {
}() }()
} }
for _, kv := range parsedEnv {
if err := sshSession.Setenv(kv[0], kv[1]); err != nil {
return xerrors.Errorf("setenv: %w", err)
}
}
err = sshSession.RequestPty("xterm-256color", 128, 128, gossh.TerminalModes{}) err = sshSession.RequestPty("xterm-256color", 128, 128, gossh.TerminalModes{})
if err != nil { if err != nil {
return xerrors.Errorf("request pty: %w", err) return xerrors.Errorf("request pty: %w", err)
@ -477,6 +497,13 @@ func (r *RootCmd) ssh() *serpent.Command {
FlagShorthand: "R", FlagShorthand: "R",
Value: serpent.StringArrayOf(&remoteForwards), Value: serpent.StringArrayOf(&remoteForwards),
}, },
{
Flag: "env",
Description: "Set environment variable(s) for session (key1=value1,key2=value2,...).",
Env: "CODER_SSH_ENV",
FlagShorthand: "e",
Value: serpent.StringArrayOf(&env),
},
sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)), sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)),
} }
return cmd return cmd

View File

@ -968,6 +968,49 @@ func TestSSH(t *testing.T) {
<-cmdDone <-cmdDone
}) })
t.Run("Env", func(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Test not supported on windows")
}
t.Parallel()
client, workspace, agentToken := setupWorkspaceForAgent(t)
_ = agenttest.New(t, client.URL, agentToken)
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
inv, root := clitest.New(t,
"ssh",
workspace.Name,
"--env",
"foo=bar,baz=qux",
)
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t).Attach(inv)
inv.Stderr = pty.Output()
// Wait super long so this doesn't flake on -race test.
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
defer cancel()
w := clitest.StartWithWaiter(t, inv.WithContext(ctx))
defer w.Wait() // We don't care about any exit error (exit code 255: SSH connection ended unexpectedly).
// Since something was output, it should be safe to write input.
// This could show a prompt or "running startup scripts", so it's
// not indicative of the SSH connection being ready.
_ = pty.Peek(ctx, 1)
// Ensure the SSH connection is ready by testing the shell
// input/output.
pty.WriteLine("echo $foo $baz")
pty.ExpectMatchContext(ctx, "bar qux")
// And we're done.
pty.WriteLine("exit")
})
t.Run("RemoteForwardUnixSocket", func(t *testing.T) { t.Run("RemoteForwardUnixSocket", func(t *testing.T) {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.Skip("Test not supported on windows") t.Skip("Test not supported on windows")

View File

@ -9,6 +9,9 @@ OPTIONS:
--disable-autostart bool, $CODER_SSH_DISABLE_AUTOSTART (default: false) --disable-autostart bool, $CODER_SSH_DISABLE_AUTOSTART (default: false)
Disable starting the workspace automatically when connecting via SSH. Disable starting the workspace automatically when connecting via SSH.
-e, --env string-array, $CODER_SSH_ENV
Set environment variable(s) for session (key1=value1,key2=value2,...).
-A, --forward-agent bool, $CODER_SSH_FORWARD_AGENT -A, --forward-agent bool, $CODER_SSH_FORWARD_AGENT
Specifies whether to forward the SSH agent specified in Specifies whether to forward the SSH agent specified in
$SSH_AUTH_SOCK. $SSH_AUTH_SOCK.

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"runtime"
"testing" "testing"
"time" "time"
@ -14,6 +15,11 @@ import (
func TestValidate(t *testing.T) { func TestValidate(t *testing.T) {
t.Parallel() t.Parallel()
if runtime.GOOS == "darwin" {
// This test fails on MacOS for some reason. See https://github.com/coder/coder/issues/12978
t.Skip()
}
mustTime := func(layout string, value string) time.Time { mustTime := func(layout string, value string) time.Time {
ti, err := time.Parse(layout, value) ti, err := time.Parse(layout, value)
require.NoError(t, err) require.NoError(t, err)

View File

@ -103,7 +103,7 @@ func (q *sqlQuerier) InTx(function func(Store) error, txOpts *sql.TxOptions) err
// Transaction succeeded. // Transaction succeeded.
return nil return nil
} }
if err != nil && !IsSerializedError(err) { if !IsSerializedError(err) {
// We should only retry if the error is a serialization error. // We should only retry if the error is a serialization error.
return err return err
} }

View File

@ -1506,13 +1506,65 @@ func (q *FakeQuerier) DeleteOldWorkspaceAgentStats(_ context.Context) error {
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
/*
DELETE FROM
workspace_agent_stats
WHERE
created_at < (
SELECT
COALESCE(
-- When generating initial template usage stats, all the
-- raw agent stats are needed, after that only ~30 mins
-- from last rollup is needed. Deployment stats seem to
-- use between 15 mins and 1 hour of data. We keep a
-- little bit more (1 day) just in case.
MAX(start_time) - '1 days'::interval,
-- Fall back to 6 months ago if there are no template
-- usage stats so that we don't delete the data before
-- it's rolled up.
NOW() - '6 months'::interval
)
FROM
template_usage_stats
)
AND created_at < (
-- Delete at most in batches of 3 days (with a batch size of 3 days, we
-- can clear out the previous 6 months of data in ~60 iterations) whilst
-- keeping the DB load relatively low.
SELECT
COALESCE(MIN(created_at) + '3 days'::interval, NOW())
FROM
workspace_agent_stats
);
*/
now := dbtime.Now() now := dbtime.Now()
sixMonthInterval := 6 * 30 * 24 * time.Hour var limit time.Time
sixMonthsAgo := now.Add(-sixMonthInterval) // MAX
for _, stat := range q.templateUsageStats {
if stat.StartTime.After(limit) {
limit = stat.StartTime.AddDate(0, 0, -1)
}
}
// COALESCE
if limit.IsZero() {
limit = now.AddDate(0, -6, 0)
}
var validStats []database.WorkspaceAgentStat var validStats []database.WorkspaceAgentStat
var batchLimit time.Time
for _, stat := range q.workspaceAgentStats { for _, stat := range q.workspaceAgentStats {
if stat.CreatedAt.Before(sixMonthsAgo) { if batchLimit.IsZero() || stat.CreatedAt.Before(batchLimit) {
batchLimit = stat.CreatedAt
}
}
if batchLimit.IsZero() {
batchLimit = time.Now()
} else {
batchLimit = batchLimit.AddDate(0, 0, 3)
}
for _, stat := range q.workspaceAgentStats {
if stat.CreatedAt.Before(limit) && stat.CreatedAt.Before(batchLimit) {
continue continue
} }
validStats = append(validStats, stat) validStats = append(validStats, stat)

View File

@ -2,11 +2,10 @@ package dbpurge
import ( import (
"context" "context"
"errors"
"io" "io"
"time" "time"
"golang.org/x/sync/errgroup" "golang.org/x/xerrors"
"cdr.dev/slog" "cdr.dev/slog"
@ -24,7 +23,6 @@ const (
// This is for cleaning up old, unused resources from the database that take up space. // This is for cleaning up old, unused resources from the database that take up space.
func New(ctx context.Context, logger slog.Logger, db database.Store) io.Closer { func New(ctx context.Context, logger slog.Logger, db database.Store) io.Closer {
closed := make(chan struct{}) closed := make(chan struct{})
logger = logger.Named("dbpurge")
ctx, cancelFunc := context.WithCancel(ctx) ctx, cancelFunc := context.WithCancel(ctx)
//nolint:gocritic // The system purges old db records without user input. //nolint:gocritic // The system purges old db records without user input.
@ -36,22 +34,37 @@ func New(ctx context.Context, logger slog.Logger, db database.Store) io.Closer {
doTick := func() { doTick := func() {
defer ticker.Reset(delay) defer ticker.Reset(delay)
var eg errgroup.Group start := time.Now()
eg.Go(func() error { // Start a transaction to grab advisory lock, we don't want to run
return db.DeleteOldWorkspaceAgentLogs(ctx) // multiple purges at the same time (multiple replicas).
}) if err := db.InTx(func(tx database.Store) error {
eg.Go(func() error { // Acquire a lock to ensure that only one instance of the
return db.DeleteOldWorkspaceAgentStats(ctx) // purge is running at a time.
}) ok, err := tx.TryAcquireLock(ctx, database.LockIDDBPurge)
eg.Go(func() error { if err != nil {
return db.DeleteOldProvisionerDaemons(ctx) return err
})
err := eg.Wait()
if err != nil {
if errors.Is(err, context.Canceled) {
return
} }
if !ok {
logger.Debug(ctx, "unable to acquire lock for purging old database entries, skipping")
return nil
}
if err := tx.DeleteOldWorkspaceAgentLogs(ctx); err != nil {
return xerrors.Errorf("failed to delete old workspace agent logs: %w", err)
}
if err := tx.DeleteOldWorkspaceAgentStats(ctx); err != nil {
return xerrors.Errorf("failed to delete old workspace agent stats: %w", err)
}
if err := tx.DeleteOldProvisionerDaemons(ctx); err != nil {
return xerrors.Errorf("failed to delete old provisioner daemons: %w", err)
}
logger.Info(ctx, "purged old database entries", slog.F("duration", time.Since(start)))
return nil
}, nil); err != nil {
logger.Error(ctx, "failed to purge old database entries", slog.Error(err)) logger.Error(ctx, "failed to purge old database entries", slog.Error(err))
return
} }
} }

View File

@ -11,12 +11,14 @@ import (
"go.uber.org/goleak" "go.uber.org/goleak"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest" "cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/database/dbmem"
"github.com/coder/coder/v2/coderd/database/dbpurge" "github.com/coder/coder/v2/coderd/database/dbpurge"
"github.com/coder/coder/v2/coderd/database/dbrollup"
"github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/provisionerd/proto" "github.com/coder/coder/v2/provisionerd/proto"
@ -40,27 +42,62 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) {
t.Parallel() t.Parallel()
db, _ := dbtestutil.NewDB(t) db, _ := dbtestutil.NewDB(t)
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
now := dbtime.Now()
defer func() {
if t.Failed() {
t.Logf("Test failed, printing rows...")
ctx := testutil.Context(t, testutil.WaitShort)
wasRows, err := db.GetWorkspaceAgentStats(ctx, now.AddDate(0, -7, 0))
if err == nil {
for _, row := range wasRows {
t.Logf("workspace agent stat: %v", row)
}
}
tusRows, err := db.GetTemplateUsageStats(context.Background(), database.GetTemplateUsageStatsParams{
StartTime: now.AddDate(0, -7, 0),
EndTime: now,
})
if err == nil {
for _, row := range tusRows {
t.Logf("template usage stat: %v", row)
}
}
}
}()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel() defer cancel()
now := dbtime.Now()
// given // given
// Let's use RxBytes to identify stat entries. // Let's use RxBytes to identify stat entries.
// Stat inserted 6 months + 1 hour ago, should be deleted. // Stat inserted 6 months + 1 hour ago, should be deleted.
first := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{ first := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{
CreatedAt: now.Add(-6*30*24*time.Hour - time.Hour), CreatedAt: now.AddDate(0, -6, 0).Add(-time.Hour),
ConnectionCount: 1,
ConnectionMedianLatencyMS: 1, ConnectionMedianLatencyMS: 1,
RxBytes: 1111, RxBytes: 1111,
SessionCountSSH: 1,
}) })
// Stat inserted 6 months - 1 hour ago, should not be deleted. // Stat inserted 6 months - 1 hour ago, should not be deleted before rollup.
second := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{ second := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{
CreatedAt: now.Add(-5*30*24*time.Hour + time.Hour), CreatedAt: now.AddDate(0, -6, 0).Add(time.Hour),
ConnectionCount: 1,
ConnectionMedianLatencyMS: 1, ConnectionMedianLatencyMS: 1,
RxBytes: 2222, RxBytes: 2222,
SessionCountSSH: 1,
})
// Stat inserted 6 months - 1 day - 2 hour ago, should not be deleted at all.
third := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{
CreatedAt: now.AddDate(0, -6, 0).AddDate(0, 0, 1).Add(2 * time.Hour),
ConnectionCount: 1,
ConnectionMedianLatencyMS: 1,
RxBytes: 3333,
SessionCountSSH: 1,
}) })
// when // when
@ -70,15 +107,39 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) {
// then // then
var stats []database.GetWorkspaceAgentStatsRow var stats []database.GetWorkspaceAgentStatsRow
var err error var err error
require.Eventually(t, func() bool { require.Eventuallyf(t, func() bool {
// Query all stats created not earlier than 7 months ago // Query all stats created not earlier than 7 months ago
stats, err = db.GetWorkspaceAgentStats(ctx, now.Add(-7*30*24*time.Hour)) stats, err = db.GetWorkspaceAgentStats(ctx, now.AddDate(0, -7, 0))
if err != nil { if err != nil {
return false return false
} }
return !containsWorkspaceAgentStat(stats, first) && return !containsWorkspaceAgentStat(stats, first) &&
containsWorkspaceAgentStat(stats, second) containsWorkspaceAgentStat(stats, second)
}, testutil.WaitShort, testutil.IntervalFast, stats) }, testutil.WaitShort, testutil.IntervalFast, "it should delete old stats: %v", stats)
// when
events := make(chan dbrollup.Event)
rolluper := dbrollup.New(logger, db, dbrollup.WithEventChannel(events))
defer rolluper.Close()
_, _ = <-events, <-events
// Start a new purger to immediately trigger delete after rollup.
_ = closer.Close()
closer = dbpurge.New(ctx, logger, db)
defer closer.Close()
// then
require.Eventuallyf(t, func() bool {
// Query all stats created not earlier than 7 months ago
stats, err = db.GetWorkspaceAgentStats(ctx, now.AddDate(0, -7, 0))
if err != nil {
return false
}
return !containsWorkspaceAgentStat(stats, first) &&
!containsWorkspaceAgentStat(stats, second) &&
containsWorkspaceAgentStat(stats, third)
}, testutil.WaitShort, testutil.IntervalFast, "it should delete old stats after rollup: %v", stats)
} }
func containsWorkspaceAgentStat(stats []database.GetWorkspaceAgentStatsRow, needle database.WorkspaceAgentStat) bool { func containsWorkspaceAgentStat(stats []database.GetWorkspaceAgentStatsRow, needle database.WorkspaceAgentStat) bool {

View File

@ -9,6 +9,7 @@ const (
LockIDDeploymentSetup = iota + 1 LockIDDeploymentSetup = iota + 1
LockIDEnterpriseDeploymentSetup LockIDEnterpriseDeploymentSetup
LockIDDBRollup LockIDDBRollup
LockIDDBPurge
) )
// GenLockID generates a unique and consistent lock ID from a given string. // GenLockID generates a unique and consistent lock ID from a given string.

View File

@ -11,11 +11,15 @@ CREATE OR REPLACE FUNCTION revert_migrate_external_auth_providers_to_jsonb(jsonb
DECLARE DECLARE
result text[]; result text[];
BEGIN BEGIN
SELECT IF jsonb_typeof($1) = 'null' THEN
array_agg(id::text) INTO result result := '{}';
FROM ( ELSE
SELECT SELECT
jsonb_array_elements($1) ->> 'id' AS id) AS external_auth_provider_ids; array_agg(id::text) INTO result
FROM (
SELECT
jsonb_array_elements($1) ->> 'id' AS id) AS external_auth_provider_ids;
END IF;
RETURN result; RETURN result;
END; END;
$$; $$;

View File

@ -10111,7 +10111,35 @@ func (q *sqlQuerier) UpdateWorkspaceAgentStartupByID(ctx context.Context, arg Up
} }
const deleteOldWorkspaceAgentStats = `-- name: DeleteOldWorkspaceAgentStats :exec const deleteOldWorkspaceAgentStats = `-- name: DeleteOldWorkspaceAgentStats :exec
DELETE FROM workspace_agent_stats WHERE created_at < NOW() - INTERVAL '180 days' DELETE FROM
workspace_agent_stats
WHERE
created_at < (
SELECT
COALESCE(
-- When generating initial template usage stats, all the
-- raw agent stats are needed, after that only ~30 mins
-- from last rollup is needed. Deployment stats seem to
-- use between 15 mins and 1 hour of data. We keep a
-- little bit more (1 day) just in case.
MAX(start_time) - '1 days'::interval,
-- Fall back to 6 months ago if there are no template
-- usage stats so that we don't delete the data before
-- it's rolled up.
NOW() - '6 months'::interval
)
FROM
template_usage_stats
)
AND created_at < (
-- Delete at most in batches of 3 days (with a batch size of 3 days, we
-- can clear out the previous 6 months of data in ~60 iterations) whilst
-- keeping the DB load relatively low.
SELECT
COALESCE(MIN(created_at) + '3 days'::interval, NOW())
FROM
workspace_agent_stats
)
` `
func (q *sqlQuerier) DeleteOldWorkspaceAgentStats(ctx context.Context) error { func (q *sqlQuerier) DeleteOldWorkspaceAgentStats(ctx context.Context) error {

View File

@ -66,7 +66,35 @@ ORDER BY
date ASC; date ASC;
-- name: DeleteOldWorkspaceAgentStats :exec -- name: DeleteOldWorkspaceAgentStats :exec
DELETE FROM workspace_agent_stats WHERE created_at < NOW() - INTERVAL '180 days'; DELETE FROM
workspace_agent_stats
WHERE
created_at < (
SELECT
COALESCE(
-- When generating initial template usage stats, all the
-- raw agent stats are needed, after that only ~30 mins
-- from last rollup is needed. Deployment stats seem to
-- use between 15 mins and 1 hour of data. We keep a
-- little bit more (1 day) just in case.
MAX(start_time) - '1 days'::interval,
-- Fall back to 6 months ago if there are no template
-- usage stats so that we don't delete the data before
-- it's rolled up.
NOW() - '6 months'::interval
)
FROM
template_usage_stats
)
AND created_at < (
-- Delete at most in batches of 3 days (with a batch size of 3 days, we
-- can clear out the previous 6 months of data in ~60 iterations) whilst
-- keeping the DB load relatively low.
SELECT
COALESCE(MIN(created_at) + '3 days'::interval, NOW())
FROM
workspace_agent_stats
);
-- name: GetDeploymentWorkspaceAgentStats :one -- name: GetDeploymentWorkspaceAgentStats :one
WITH agent_stats AS ( WITH agent_stats AS (

View File

@ -162,6 +162,7 @@ func (c *Cache) refreshDeploymentStats(ctx context.Context) error {
} }
func (c *Cache) run(ctx context.Context, name string, interval time.Duration, refresh func(context.Context) error) { func (c *Cache) run(ctx context.Context, name string, interval time.Duration, refresh func(context.Context) error) {
logger := c.log.With(slog.F("name", name), slog.F("interval", interval))
ticker := time.NewTicker(interval) ticker := time.NewTicker(interval)
defer ticker.Stop() defer ticker.Stop()
@ -173,15 +174,13 @@ func (c *Cache) run(ctx context.Context, name string, interval time.Duration, re
if ctx.Err() != nil { if ctx.Err() != nil {
return return
} }
c.log.Error(ctx, "refresh", slog.Error(err)) if xerrors.Is(err, sql.ErrNoRows) {
break
}
logger.Error(ctx, "refresh metrics failed", slog.Error(err))
continue continue
} }
c.log.Debug( logger.Debug(ctx, "metrics refreshed", slog.F("took", time.Since(start)))
ctx,
name+" metrics refreshed",
slog.F("took", time.Since(start)),
slog.F("interval", interval),
)
break break
} }

View File

@ -20,6 +20,8 @@ import (
"github.com/coder/retry" "github.com/coder/retry"
) )
var tailnetConnectorGracefulTimeout = time.Second
// tailnetConn is the subset of the tailnet.Conn methods that tailnetAPIConnector uses. It is // tailnetConn is the subset of the tailnet.Conn methods that tailnetAPIConnector uses. It is
// included so that we can fake it in testing. // included so that we can fake it in testing.
// //
@ -86,7 +88,7 @@ func runTailnetAPIConnector(
func (tac *tailnetAPIConnector) manageGracefulTimeout() { func (tac *tailnetAPIConnector) manageGracefulTimeout() {
defer tac.cancelGracefulCtx() defer tac.cancelGracefulCtx()
<-tac.ctx.Done() <-tac.ctx.Done()
timer := time.NewTimer(time.Second) timer := time.NewTimer(tailnetConnectorGracefulTimeout)
defer timer.Stop() defer timer.Stop()
select { select {
case <-tac.closed: case <-tac.closed:

View File

@ -24,6 +24,11 @@ import (
"github.com/coder/coder/v2/testutil" "github.com/coder/coder/v2/testutil"
) )
func init() {
// Give tests a bit more time to timeout. Darwin is particularly slow.
tailnetConnectorGracefulTimeout = 5 * time.Second
}
func TestTailnetAPIConnector_Disconnects(t *testing.T) { func TestTailnetAPIConnector_Disconnects(t *testing.T) {
t.Parallel() t.Parallel()
testCtx := testutil.Context(t, testutil.WaitShort) testCtx := testutil.Context(t, testutil.WaitShort)

View File

@ -25,16 +25,12 @@ application. The following providers are supported:
- [Azure DevOps](https://learn.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/oauth?view=azure-devops) - [Azure DevOps](https://learn.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/oauth?view=azure-devops)
- [Azure DevOps (via Entra ID)](https://learn.microsoft.com/en-us/entra/architecture/auth-oauth2) - [Azure DevOps (via Entra ID)](https://learn.microsoft.com/en-us/entra/architecture/auth-oauth2)
Example callback URL: The next step is to [configure the Coder server](./configure.md) to use the
`https://coder.example.com/external-auth/primary-github/callback`. Use an OAuth application by setting the following environment variables:
arbitrary ID for your provider (e.g. `primary-github`).
Set the following environment variables to
[configure the Coder server](./configure.md):
```env ```env
CODER_EXTERNAL_AUTH_0_ID="primary-github" CODER_EXTERNAL_AUTH_0_ID="<USER_DEFINED_ID>"
CODER_EXTERNAL_AUTH_0_TYPE=github|gitlab|azure-devops|bitbucket-cloud|bitbucket-server|<name of service e.g. jfrog> CODER_EXTERNAL_AUTH_0_TYPE=<github|gitlab|azure-devops|bitbucket-cloud|bitbucket-server|etc>
CODER_EXTERNAL_AUTH_0_CLIENT_ID=xxxxxx CODER_EXTERNAL_AUTH_0_CLIENT_ID=xxxxxx
CODER_EXTERNAL_AUTH_0_CLIENT_SECRET=xxxxxxx CODER_EXTERNAL_AUTH_0_CLIENT_SECRET=xxxxxxx
@ -43,11 +39,22 @@ CODER_EXTERNAL_AUTH_0_DISPLAY_NAME="Google Calendar"
CODER_EXTERNAL_AUTH_0_DISPLAY_ICON="https://mycustomicon.com/google.svg" CODER_EXTERNAL_AUTH_0_DISPLAY_ICON="https://mycustomicon.com/google.svg"
``` ```
The `CODER_EXTERNAL_AUTH_0_ID` environment variable is used for internal
reference. Therefore, it can be set arbitrarily (e.g., `primary-github` for your
GitHub provider).
### GitHub ### GitHub
> If you don't require fine-grained access control, it's easier to configure a
> GitHub OAuth app!
1. [Create a GitHub App](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app) 1. [Create a GitHub App](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app)
to enable fine-grained access to specific repositories, or a subset of
permissions for security. - Set the callback URL to
`https://coder.example.com/external-auth/USER_DEFINED_ID/callback`.
- Deactivate Webhooks.
- Enable fine-grained access to specific repositories or a subset of
permissions for security.
![Register GitHub App](../images/admin/github-app-register.png) ![Register GitHub App](../images/admin/github-app-register.png)
@ -69,6 +76,13 @@ CODER_EXTERNAL_AUTH_0_DISPLAY_ICON="https://mycustomicon.com/google.svg"
![Install GitHub App](../images/admin/github-app-install.png) ![Install GitHub App](../images/admin/github-app-install.png)
```env
CODER_EXTERNAL_AUTH_0_ID="USER_DEFINED_ID"
CODER_EXTERNAL_AUTH_0_TYPE=github
CODER_EXTERNAL_AUTH_0_CLIENT_ID=xxxxxx
CODER_EXTERNAL_AUTH_0_CLIENT_SECRET=xxxxxxx
```
### GitHub Enterprise ### GitHub Enterprise
GitHub Enterprise requires the following environment variables: GitHub Enterprise requires the following environment variables:
@ -204,6 +218,50 @@ add this to the
git config --global credential.useHttpPath true git config --global credential.useHttpPath true
``` ```
### Kubernetes environment variables
If you deployed Coder with Kubernetes you can set the environment variables in
your `values.yaml` file:
```yaml
coder:
env:
# […]
- name: CODER_EXTERNAL_AUTH_0_ID
value: USER_DEFINED_ID
- name: CODER_EXTERNAL_AUTH_0_TYPE
value: github
- name: CODER_EXTERNAL_AUTH_0_CLIENT_ID
valueFrom:
secretKeyRef:
name: github-primary-basic-auth
key: client-id
- name: CODER_EXTERNAL_AUTH_0_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: github-primary-basic-auth
key: client-secret
```
You can set the secrets by creating a `github-primary-basic-auth.yaml` file and
applying it.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: github-primary-basic-auth
type: Opaque
stringData:
client-secret: xxxxxxxxx
client-id: xxxxxxxxx
```
Make sure to restart the affected pods for the change to take effect.
## Require git authentication in templates ## Require git authentication in templates
If your template requires git authentication (e.g. running `git clone` in the If your template requires git authentication (e.g. running `git clone` in the

9
docs/cli/ssh.md generated
View File

@ -95,6 +95,15 @@ Specify the directory containing SSH diagnostic log files.
Enable remote port forwarding (remote_port:local_address:local_port). Enable remote port forwarding (remote_port:local_address:local_port).
### -e, --env
| | |
| ----------- | --------------------------- |
| Type | <code>string-array</code> |
| Environment | <code>$CODER_SSH_ENV</code> |
Set environment variable(s) for session (key1=value1,key2=value2,...).
### --disable-autostart ### --disable-autostart
| | | | | |

View File

@ -59,7 +59,7 @@ A brief overview of all files contained in the bundle is provided below:
requires the Coder deployment to be available. requires the Coder deployment to be available.
2. Ensure you have the Coder CLI installed on a local machine. See 2. Ensure you have the Coder CLI installed on a local machine. See
(installation)[../install/index.md] for steps on how to do this. [installation](../install/index.md) for steps on how to do this.
> Note: It is recommended to generate a support bundle from a location > Note: It is recommended to generate a support bundle from a location
> experiencing workspace connectivity issues. > experiencing workspace connectivity issues.

View File

@ -10,7 +10,7 @@ deployment.
We support two release channels: We support two release channels:
[mainline](https://github.com/coder/coder/2.10.0) for the edge version of Coder [mainline](https://github.com/coder/coder/2.10.0) for the edge version of Coder
and [stable](https://github.com/coder/coder/releases/latest) for those with and [stable](https://github.com/coder/coder/releases/latest) for those with
lower tolerance for fault. We field our mainline releases publicly for two weeks lower tolerance for fault. We field our mainline releases publicly for one month
before promoting them to stable. before promoting them to stable.
### Mainline releases ### Mainline releases
@ -46,11 +46,11 @@ pages.
## Release schedule ## Release schedule
| Release name | Date | Status | | Release name | Release Date | Status |
| ------------ | ------------------ | ---------------- | | ------------ | ------------------ | ---------------- |
| 2.7.0 | January 01, 2024 | Not Supported | | 2.7.x | January 01, 2024 | Not Supported |
| 2.8.0 | Februrary 06, 2024 | Security Support | | 2.8.x | Februrary 06, 2024 | Security Support |
| 2.9.0 | March 07, 2024 | Stable | | 2.9.x | March 07, 2024 | Stable |
| 2.10.0 | April 03, 2024 | Mainline | | 2.10.x | April 03, 2024 | Mainline |
| 2.11.0 | May 07, 2024 | Not Released | | 2.11.x | May 07, 2024 | Not Released |
| 2.12.0 | June 04, 2024 | Not Released | | 2.12.x | June 04, 2024 | Not Released |

View File

@ -91,7 +91,7 @@ provider "kubernetes" {
Alternatively, you can authenticate with remote clusters with ServiceAccount Alternatively, you can authenticate with remote clusters with ServiceAccount
tokens. Coder can store these secrets on your behalf with tokens. Coder can store these secrets on your behalf with
[managed Terraform variables](../../templates/parameters.md#managed-terraform-variables). [managed Terraform variables](../../templates/variables.md).
Alternatively, these could also be fetched from Kubernetes secrets or even Alternatively, these could also be fetched from Kubernetes secrets or even
[Hashicorp Vault](https://registry.terraform.io/providers/hashicorp/vault/latest/docs/data-sources/generic_secret). [Hashicorp Vault](https://registry.terraform.io/providers/hashicorp/vault/latest/docs/data-sources/generic_secret).

View File

@ -13,6 +13,7 @@ import (
"time" "time"
"github.com/coder/coder/v2/coderd/appearance" "github.com/coder/coder/v2/coderd/appearance"
"github.com/coder/coder/v2/coderd/database"
agplportsharing "github.com/coder/coder/v2/coderd/portsharing" agplportsharing "github.com/coder/coder/v2/coderd/portsharing"
"github.com/coder/coder/v2/enterprise/coderd/portsharing" "github.com/coder/coder/v2/enterprise/coderd/portsharing"
@ -27,6 +28,7 @@ import (
"github.com/coder/coder/v2/coderd" "github.com/coder/coder/v2/coderd"
agplaudit "github.com/coder/coder/v2/coderd/audit" agplaudit "github.com/coder/coder/v2/coderd/audit"
agpldbauthz "github.com/coder/coder/v2/coderd/database/dbauthz" agpldbauthz "github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/healthcheck" "github.com/coder/coder/v2/coderd/healthcheck"
"github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw" "github.com/coder/coder/v2/coderd/httpmw"
@ -64,6 +66,11 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
if options.Options.Authorizer == nil { if options.Options.Authorizer == nil {
options.Options.Authorizer = rbac.NewCachingAuthorizer(options.PrometheusRegistry) options.Options.Authorizer = rbac.NewCachingAuthorizer(options.PrometheusRegistry)
} }
if options.ReplicaErrorGracePeriod == 0 {
// This will prevent the error from being shown for a minute
// from when an additional replica was started.
options.ReplicaErrorGracePeriod = time.Minute
}
ctx, cancelFunc := context.WithCancel(ctx) ctx, cancelFunc := context.WithCancel(ctx)
@ -429,6 +436,7 @@ type Options struct {
// Used for high availability. // Used for high availability.
ReplicaSyncUpdateInterval time.Duration ReplicaSyncUpdateInterval time.Duration
ReplicaErrorGracePeriod time.Duration
DERPServerRelayAddress string DERPServerRelayAddress string
DERPServerRegionID int DERPServerRegionID int
@ -525,9 +533,24 @@ func (api *API) updateEntitlements(ctx context.Context) error {
api.entitlementsUpdateMu.Lock() api.entitlementsUpdateMu.Lock()
defer api.entitlementsUpdateMu.Unlock() defer api.entitlementsUpdateMu.Unlock()
replicas := api.replicaManager.AllPrimary()
agedReplicas := make([]database.Replica, 0, len(replicas))
for _, replica := range replicas {
// If a replica is less than the update interval old, we don't
// want to display a warning. In the open-source version of Coder,
// Kubernetes Pods will start up before shutting down the other,
// and we don't want to display a warning in that case.
//
// Only display warnings for long-lived replicas!
if dbtime.Now().Sub(replica.StartedAt) < api.ReplicaErrorGracePeriod {
continue
}
agedReplicas = append(agedReplicas, replica)
}
entitlements, err := license.Entitlements( entitlements, err := license.Entitlements(
ctx, api.Database, ctx, api.Database,
api.Logger, len(api.replicaManager.AllPrimary()), len(api.ExternalAuthConfigs), api.LicenseKeys, map[codersdk.FeatureName]bool{ api.Logger, len(agedReplicas), len(api.ExternalAuthConfigs), api.LicenseKeys, map[codersdk.FeatureName]bool{
codersdk.FeatureAuditLog: api.AuditLogging, codersdk.FeatureAuditLog: api.AuditLogging,
codersdk.FeatureBrowserOnly: api.BrowserOnly, codersdk.FeatureBrowserOnly: api.BrowserOnly,
codersdk.FeatureSCIM: len(api.SCIMAPIKey) != 0, codersdk.FeatureSCIM: len(api.SCIMAPIKey) != 0,

View File

@ -57,6 +57,7 @@ type Options struct {
DontAddLicense bool DontAddLicense bool
DontAddFirstUser bool DontAddFirstUser bool
ReplicaSyncUpdateInterval time.Duration ReplicaSyncUpdateInterval time.Duration
ReplicaErrorGracePeriod time.Duration
ExternalTokenEncryption []dbcrypt.Cipher ExternalTokenEncryption []dbcrypt.Cipher
ProvisionerDaemonPSK string ProvisionerDaemonPSK string
} }
@ -93,6 +94,7 @@ func NewWithAPI(t *testing.T, options *Options) (
DERPServerRelayAddress: oop.AccessURL.String(), DERPServerRelayAddress: oop.AccessURL.String(),
DERPServerRegionID: oop.BaseDERPMap.RegionIDs()[0], DERPServerRegionID: oop.BaseDERPMap.RegionIDs()[0],
ReplicaSyncUpdateInterval: options.ReplicaSyncUpdateInterval, ReplicaSyncUpdateInterval: options.ReplicaSyncUpdateInterval,
ReplicaErrorGracePeriod: options.ReplicaErrorGracePeriod,
Options: oop, Options: oop,
EntitlementsUpdateInterval: options.EntitlementsUpdateInterval, EntitlementsUpdateInterval: options.EntitlementsUpdateInterval,
LicenseKeys: Keys, LicenseKeys: Keys,

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"testing" "testing"
"time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -22,9 +23,42 @@ import (
func TestReplicas(t *testing.T) { func TestReplicas(t *testing.T) {
t.Parallel() t.Parallel()
if !dbtestutil.WillUsePostgres() { if !dbtestutil.WillUsePostgres() {
t.Skip("only test with real postgresF") t.Skip("only test with real postgres")
} }
t.Run("ErrorWithoutLicense", func(t *testing.T) { t.Run("ErrorWithoutLicense", func(t *testing.T) {
t.Parallel()
// This will error because replicas are expected to instantly report
// errors when the license is not present.
db, pubsub := dbtestutil.NewDB(t)
firstClient, _ := coderdenttest.New(t, &coderdenttest.Options{
Options: &coderdtest.Options{
IncludeProvisionerDaemon: true,
Database: db,
Pubsub: pubsub,
},
DontAddLicense: true,
ReplicaErrorGracePeriod: time.Nanosecond,
})
secondClient, _, secondAPI, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{
Options: &coderdtest.Options{
Database: db,
Pubsub: pubsub,
},
DontAddFirstUser: true,
DontAddLicense: true,
ReplicaErrorGracePeriod: time.Nanosecond,
})
secondClient.SetSessionToken(firstClient.SessionToken())
ents, err := secondClient.Entitlements(context.Background())
require.NoError(t, err)
require.Len(t, ents.Errors, 1)
_ = secondAPI.Close()
ents, err = firstClient.Entitlements(context.Background())
require.NoError(t, err)
require.Len(t, ents.Warnings, 0)
})
t.Run("DoesNotErrorBeforeGrace", func(t *testing.T) {
t.Parallel() t.Parallel()
db, pubsub := dbtestutil.NewDB(t) db, pubsub := dbtestutil.NewDB(t)
firstClient, _ := coderdenttest.New(t, &coderdenttest.Options{ firstClient, _ := coderdenttest.New(t, &coderdenttest.Options{
@ -46,12 +80,12 @@ func TestReplicas(t *testing.T) {
secondClient.SetSessionToken(firstClient.SessionToken()) secondClient.SetSessionToken(firstClient.SessionToken())
ents, err := secondClient.Entitlements(context.Background()) ents, err := secondClient.Entitlements(context.Background())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, ents.Errors, 1) require.Len(t, ents.Errors, 0)
_ = secondAPI.Close() _ = secondAPI.Close()
ents, err = firstClient.Entitlements(context.Background()) ents, err = firstClient.Entitlements(context.Background())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, ents.Warnings, 0) require.Len(t, ents.Errors, 0)
}) })
t.Run("ConnectAcrossMultiple", func(t *testing.T) { t.Run("ConnectAcrossMultiple", func(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -2,6 +2,8 @@ package tailnet
import ( import (
"context" "context"
"fmt"
"slices"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -30,10 +32,13 @@ type connIO struct {
responses chan<- *proto.CoordinateResponse responses chan<- *proto.CoordinateResponse
bindings chan<- binding bindings chan<- binding
tunnels chan<- tunnel tunnels chan<- tunnel
rfhs chan<- readyForHandshake
auth agpl.CoordinateeAuth auth agpl.CoordinateeAuth
mu sync.Mutex mu sync.Mutex
closed bool closed bool
disconnected bool disconnected bool
// latest is the most recent, unfiltered snapshot of the mappings we know about
latest []mapping
name string name string
start int64 start int64
@ -46,6 +51,7 @@ func newConnIO(coordContext context.Context,
logger slog.Logger, logger slog.Logger,
bindings chan<- binding, bindings chan<- binding,
tunnels chan<- tunnel, tunnels chan<- tunnel,
rfhs chan<- readyForHandshake,
requests <-chan *proto.CoordinateRequest, requests <-chan *proto.CoordinateRequest,
responses chan<- *proto.CoordinateResponse, responses chan<- *proto.CoordinateResponse,
id uuid.UUID, id uuid.UUID,
@ -64,6 +70,7 @@ func newConnIO(coordContext context.Context,
responses: responses, responses: responses,
bindings: bindings, bindings: bindings,
tunnels: tunnels, tunnels: tunnels,
rfhs: rfhs,
auth: auth, auth: auth,
name: name, name: name,
start: now, start: now,
@ -190,9 +197,54 @@ func (c *connIO) handleRequest(req *proto.CoordinateRequest) error {
c.disconnected = true c.disconnected = true
return errDisconnect return errDisconnect
} }
if req.ReadyForHandshake != nil {
c.logger.Debug(c.peerCtx, "got ready for handshake ", slog.F("rfh", req.ReadyForHandshake))
for _, rfh := range req.ReadyForHandshake {
dst, err := uuid.FromBytes(rfh.Id)
if err != nil {
c.logger.Error(c.peerCtx, "unable to convert bytes to UUID", slog.Error(err))
// this shouldn't happen unless there is a client error. Close the connection so the client
// doesn't just happily continue thinking everything is fine.
return err
}
mappings := c.getLatestMapping()
if !slices.ContainsFunc(mappings, func(mapping mapping) bool {
return mapping.peer == dst
}) {
c.logger.Debug(c.peerCtx, "cannot process ready for handshake, src isn't peered with dst",
slog.F("dst", dst.String()),
)
_ = c.Enqueue(&proto.CoordinateResponse{
Error: fmt.Sprintf("you do not share a tunnel with %q", dst.String()),
})
return nil
}
if err := agpl.SendCtx(c.coordCtx, c.rfhs, readyForHandshake{
src: c.id,
dst: dst,
}); err != nil {
c.logger.Debug(c.peerCtx, "failed to send ready for handshake", slog.Error(err))
return err
}
}
}
return nil return nil
} }
func (c *connIO) setLatestMapping(latest []mapping) {
c.mu.Lock()
defer c.mu.Unlock()
c.latest = latest
}
func (c *connIO) getLatestMapping() []mapping {
c.mu.Lock()
defer c.mu.Unlock()
return c.latest
}
func (c *connIO) UniqueID() uuid.UUID { func (c *connIO) UniqueID() uuid.UUID {
return c.id return c.id
} }

View File

@ -0,0 +1,73 @@
package tailnet
import (
"context"
"fmt"
"sync"
"github.com/google/uuid"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database/pubsub"
)
type readyForHandshake struct {
src uuid.UUID
dst uuid.UUID
}
type handshaker struct {
ctx context.Context
logger slog.Logger
coordinatorID uuid.UUID
pubsub pubsub.Pubsub
updates <-chan readyForHandshake
workerWG sync.WaitGroup
}
func newHandshaker(ctx context.Context,
logger slog.Logger,
id uuid.UUID,
ps pubsub.Pubsub,
updates <-chan readyForHandshake,
startWorkers <-chan struct{},
) *handshaker {
s := &handshaker{
ctx: ctx,
logger: logger,
coordinatorID: id,
pubsub: ps,
updates: updates,
}
// add to the waitgroup immediately to avoid any races waiting for it before
// the workers start.
s.workerWG.Add(numHandshakerWorkers)
go func() {
<-startWorkers
for i := 0; i < numHandshakerWorkers; i++ {
go s.worker()
}
}()
return s
}
func (t *handshaker) worker() {
defer t.workerWG.Done()
for {
select {
case <-t.ctx.Done():
t.logger.Debug(t.ctx, "handshaker worker exiting", slog.Error(t.ctx.Err()))
return
case rfh := <-t.updates:
err := t.pubsub.Publish(eventReadyForHandshake, []byte(fmt.Sprintf(
"%s,%s", rfh.dst.String(), rfh.src.String(),
)))
if err != nil {
t.logger.Error(t.ctx, "publish ready for handshake", slog.Error(err))
}
}
}
}

View File

@ -0,0 +1,47 @@
package tailnet_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/enterprise/tailnet"
agpltest "github.com/coder/coder/v2/tailnet/test"
"github.com/coder/coder/v2/testutil"
)
func TestPGCoordinator_ReadyForHandshake_OK(t *testing.T) {
t.Parallel()
if !dbtestutil.WillUsePostgres() {
t.Skip("test only with postgres")
}
store, ps := dbtestutil.NewDB(t)
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
defer cancel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coord1, err := tailnet.NewPGCoord(ctx, logger.Named("coord1"), ps, store)
require.NoError(t, err)
defer coord1.Close()
agpltest.ReadyForHandshakeTest(ctx, t, coord1)
}
func TestPGCoordinator_ReadyForHandshake_NoPermission(t *testing.T) {
t.Parallel()
if !dbtestutil.WillUsePostgres() {
t.Skip("test only with postgres")
}
store, ps := dbtestutil.NewDB(t)
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
defer cancel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coord1, err := tailnet.NewPGCoord(ctx, logger.Named("coord1"), ps, store)
require.NoError(t, err)
defer coord1.Close()
agpltest.ReadyForHandshakeNoPermissionTest(ctx, t, coord1)
}

View File

@ -9,8 +9,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/coder/coder/v2/tailnet/proto"
"github.com/cenkalti/backoff/v4" "github.com/cenkalti/backoff/v4"
"github.com/google/uuid" "github.com/google/uuid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -22,25 +20,31 @@ import (
"github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac"
agpl "github.com/coder/coder/v2/tailnet" agpl "github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/proto"
) )
const ( const (
EventHeartbeats = "tailnet_coordinator_heartbeat" EventHeartbeats = "tailnet_coordinator_heartbeat"
eventPeerUpdate = "tailnet_peer_update" eventPeerUpdate = "tailnet_peer_update"
eventTunnelUpdate = "tailnet_tunnel_update" eventTunnelUpdate = "tailnet_tunnel_update"
HeartbeatPeriod = time.Second * 2 eventReadyForHandshake = "tailnet_ready_for_handshake"
MissedHeartbeats = 3 HeartbeatPeriod = time.Second * 2
numQuerierWorkers = 10 MissedHeartbeats = 3
numBinderWorkers = 10 numQuerierWorkers = 10
numTunnelerWorkers = 10 numBinderWorkers = 10
dbMaxBackoff = 10 * time.Second numTunnelerWorkers = 10
cleanupPeriod = time.Hour numHandshakerWorkers = 5
dbMaxBackoff = 10 * time.Second
cleanupPeriod = time.Hour
) )
// pgCoord is a postgres-backed coordinator // pgCoord is a postgres-backed coordinator
// //
// ┌──────────┐ // ┌────────────┐
// ┌────────────► tunneler ├──────────┐ // ┌────────────► handshaker ├────────┐
// │ └────────────┘ │
// │ ┌──────────┐ │
// ├────────────► tunneler ├──────────┤
// │ └──────────┘ │ // │ └──────────┘ │
// │ │ // │ │
// ┌────────┐ ┌────────┐ ┌───▼───┐ // ┌────────┐ ┌────────┐ ┌───▼───┐
@ -78,15 +82,17 @@ type pgCoord struct {
newConnections chan *connIO newConnections chan *connIO
closeConnections chan *connIO closeConnections chan *connIO
tunnelerCh chan tunnel tunnelerCh chan tunnel
handshakerCh chan readyForHandshake
id uuid.UUID id uuid.UUID
cancel context.CancelFunc cancel context.CancelFunc
closeOnce sync.Once closeOnce sync.Once
closed chan struct{} closed chan struct{}
binder *binder binder *binder
tunneler *tunneler tunneler *tunneler
querier *querier handshaker *handshaker
querier *querier
} }
var pgCoordSubject = rbac.Subject{ var pgCoordSubject = rbac.Subject{
@ -126,6 +132,8 @@ func newPGCoordInternal(
ccCh := make(chan *connIO) ccCh := make(chan *connIO)
// for communicating subscriptions with the tunneler // for communicating subscriptions with the tunneler
sCh := make(chan tunnel) sCh := make(chan tunnel)
// for communicating ready for handshakes with the handshaker
rfhCh := make(chan readyForHandshake)
// signals when first heartbeat has been sent, so it's safe to start binding. // signals when first heartbeat has been sent, so it's safe to start binding.
fHB := make(chan struct{}) fHB := make(chan struct{})
@ -145,6 +153,8 @@ func newPGCoordInternal(
closeConnections: ccCh, closeConnections: ccCh,
tunneler: newTunneler(ctx, logger, id, store, sCh, fHB), tunneler: newTunneler(ctx, logger, id, store, sCh, fHB),
tunnelerCh: sCh, tunnelerCh: sCh,
handshaker: newHandshaker(ctx, logger, id, ps, rfhCh, fHB),
handshakerCh: rfhCh,
id: id, id: id,
querier: newQuerier(querierCtx, logger, id, ps, store, id, cCh, ccCh, numQuerierWorkers, fHB), querier: newQuerier(querierCtx, logger, id, ps, store, id, cCh, ccCh, numQuerierWorkers, fHB),
closed: make(chan struct{}), closed: make(chan struct{}),
@ -242,7 +252,7 @@ func (c *pgCoord) Coordinate(
close(resps) close(resps)
return reqs, resps return reqs, resps
} }
cIO := newConnIO(c.ctx, ctx, logger, c.bindings, c.tunnelerCh, reqs, resps, id, name, a) cIO := newConnIO(c.ctx, ctx, logger, c.bindings, c.tunnelerCh, c.handshakerCh, reqs, resps, id, name, a)
err := agpl.SendCtx(c.ctx, c.newConnections, cIO) err := agpl.SendCtx(c.ctx, c.newConnections, cIO)
if err != nil { if err != nil {
// this can only happen if the context is canceled, no need to log // this can only happen if the context is canceled, no need to log
@ -626,8 +636,6 @@ type mapper struct {
c *connIO c *connIO
// latest is the most recent, unfiltered snapshot of the mappings we know about
latest []mapping
// sent is the state of mappings we have actually enqueued; used to compute diffs for updates. // sent is the state of mappings we have actually enqueued; used to compute diffs for updates.
sent map[uuid.UUID]mapping sent map[uuid.UUID]mapping
@ -660,11 +668,11 @@ func (m *mapper) run() {
return return
case mappings := <-m.mappings: case mappings := <-m.mappings:
m.logger.Debug(m.ctx, "got new mappings") m.logger.Debug(m.ctx, "got new mappings")
m.latest = mappings m.c.setLatestMapping(mappings)
best = m.bestMappings(mappings) best = m.bestMappings(mappings)
case <-m.update: case <-m.update:
m.logger.Debug(m.ctx, "triggered update") m.logger.Debug(m.ctx, "triggered update")
best = m.bestMappings(m.latest) best = m.bestMappings(m.c.getLatestMapping())
} }
update := m.bestToUpdate(best) update := m.bestToUpdate(best)
if update == nil { if update == nil {
@ -1067,6 +1075,28 @@ func (q *querier) subscribe() {
}() }()
q.logger.Info(q.ctx, "subscribed to tunnel updates") q.logger.Info(q.ctx, "subscribed to tunnel updates")
var cancelRFH context.CancelFunc
err = backoff.Retry(func() error {
cancelFn, err := q.pubsub.SubscribeWithErr(eventReadyForHandshake, q.listenReadyForHandshake)
if err != nil {
q.logger.Warn(q.ctx, "failed to subscribe to ready for handshakes", slog.Error(err))
return err
}
cancelRFH = cancelFn
return nil
}, bkoff)
if err != nil {
if q.ctx.Err() == nil {
q.logger.Error(q.ctx, "code bug: retry failed before context canceled", slog.Error(err))
}
return
}
defer func() {
q.logger.Info(q.ctx, "canceling ready for handshake subscription")
cancelRFH()
}()
q.logger.Info(q.ctx, "subscribed to ready for handshakes")
// unblock the outer function from returning // unblock the outer function from returning
subscribed <- struct{}{} subscribed <- struct{}{}
@ -1112,6 +1142,7 @@ func (q *querier) listenTunnel(_ context.Context, msg []byte, err error) {
} }
if err != nil { if err != nil {
q.logger.Warn(q.ctx, "unhandled pubsub error", slog.Error(err)) q.logger.Warn(q.ctx, "unhandled pubsub error", slog.Error(err))
return
} }
peers, err := parseTunnelUpdate(string(msg)) peers, err := parseTunnelUpdate(string(msg))
if err != nil { if err != nil {
@ -1133,6 +1164,36 @@ func (q *querier) listenTunnel(_ context.Context, msg []byte, err error) {
} }
} }
func (q *querier) listenReadyForHandshake(_ context.Context, msg []byte, err error) {
if err != nil && !xerrors.Is(err, pubsub.ErrDroppedMessages) {
q.logger.Warn(q.ctx, "unhandled pubsub error", slog.Error(err))
return
}
to, from, err := parseReadyForHandshake(string(msg))
if err != nil {
q.logger.Error(q.ctx, "failed to parse ready for handshake", slog.F("msg", string(msg)), slog.Error(err))
return
}
mk := mKey(to)
q.mu.Lock()
mpr, ok := q.mappers[mk]
q.mu.Unlock()
if !ok {
q.logger.Debug(q.ctx, "ignoring ready for handshake because we have no mapper",
slog.F("peer_id", to))
return
}
_ = mpr.c.Enqueue(&proto.CoordinateResponse{
PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{{
Id: from[:],
Kind: proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE,
}},
})
}
func (q *querier) resyncPeerMappings() { func (q *querier) resyncPeerMappings() {
q.mu.Lock() q.mu.Lock()
defer q.mu.Unlock() defer q.mu.Unlock()
@ -1225,6 +1286,21 @@ func parsePeerUpdate(msg string) (peer uuid.UUID, err error) {
return peer, nil return peer, nil
} }
func parseReadyForHandshake(msg string) (to uuid.UUID, from uuid.UUID, err error) {
parts := strings.Split(msg, ",")
if len(parts) != 2 {
return uuid.Nil, uuid.Nil, xerrors.Errorf("expected 2 parts separated by comma")
}
ids := make([]uuid.UUID, 2)
for i, part := range parts {
ids[i], err = uuid.Parse(part)
if err != nil {
return uuid.Nil, uuid.Nil, xerrors.Errorf("failed to parse UUID: %w", err)
}
}
return ids[0], ids[1], nil
}
// mKey identifies a set of node mappings we want to query. // mKey identifies a set of node mappings we want to query.
type mKey uuid.UUID type mKey uuid.UUID

View File

@ -10,9 +10,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/coder/coder/v2/codersdk/workspacesdk"
agpltest "github.com/coder/coder/v2/tailnet/test"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -24,14 +21,15 @@ import (
"cdr.dev/slog" "cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest" "cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbmock" "github.com/coder/coder/v2/coderd/database/dbmock"
"github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/codersdk/workspacesdk"
"github.com/coder/coder/v2/enterprise/tailnet" "github.com/coder/coder/v2/enterprise/tailnet"
agpl "github.com/coder/coder/v2/tailnet" agpl "github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/proto" "github.com/coder/coder/v2/tailnet/proto"
agpltest "github.com/coder/coder/v2/tailnet/test"
"github.com/coder/coder/v2/testutil" "github.com/coder/coder/v2/testutil"
) )

15
go.mod
View File

@ -83,7 +83,7 @@ replace github.com/pkg/sftp => github.com/mafredri/sftp v1.13.6-0.20231212144145
require ( require (
cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6 cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6
cloud.google.com/go/compute/metadata v0.2.3 cloud.google.com/go/compute/metadata v0.3.0
github.com/AlecAivazis/survey/v2 v2.3.5 github.com/AlecAivazis/survey/v2 v2.3.5
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/adrg/xdg v0.4.0 github.com/adrg/xdg v0.4.0
@ -104,7 +104,7 @@ require (
github.com/coder/flog v1.1.0 github.com/coder/flog v1.1.0
github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0 github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0
github.com/coder/retry v1.5.1 github.com/coder/retry v1.5.1
github.com/coder/terraform-provider-coder v0.20.1 github.com/coder/terraform-provider-coder v0.21.0
github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a
github.com/coreos/go-oidc/v3 v3.10.0 github.com/coreos/go-oidc/v3 v3.10.0
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
@ -153,7 +153,7 @@ require (
github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-isatty v0.0.20
github.com/mitchellh/go-wordwrap v1.0.1 github.com/mitchellh/go-wordwrap v1.0.1
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c
github.com/moby/moby v25.0.2+incompatible github.com/moby/moby v26.0.1+incompatible
github.com/muesli/termenv v0.15.2 github.com/muesli/termenv v0.15.2
github.com/open-policy-agent/opa v0.58.0 github.com/open-policy-agent/opa v0.58.0
github.com/ory/dockertest/v3 v3.10.0 github.com/ory/dockertest/v3 v3.10.0
@ -200,8 +200,8 @@ require (
golang.org/x/tools v0.20.0 golang.org/x/tools v0.20.0
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028
golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1
google.golang.org/api v0.172.0 google.golang.org/api v0.175.0
google.golang.org/grpc v1.63.0 google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0 google.golang.org/protobuf v1.33.0
gopkg.in/DataDog/dd-trace-go.v1 v1.61.0 gopkg.in/DataDog/dd-trace-go.v1 v1.61.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/natefinch/lumberjack.v2 v2.2.1
@ -221,6 +221,8 @@ require (
) )
require ( require (
cloud.google.com/go/auth v0.2.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.1 // indirect
github.com/DataDog/go-libddwaf/v2 v2.3.1 // indirect github.com/DataDog/go-libddwaf/v2 v2.3.1 // indirect
github.com/alecthomas/chroma/v2 v2.13.0 // indirect github.com/alecthomas/chroma/v2 v2.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
@ -232,7 +234,6 @@ require (
) )
require ( require (
cloud.google.com/go/compute v1.24.0 // indirect
cloud.google.com/go/logging v1.9.0 // indirect cloud.google.com/go/logging v1.9.0 // indirect
cloud.google.com/go/longrunning v0.5.5 // indirect cloud.google.com/go/longrunning v0.5.5 // indirect
filippo.io/edwards25519 v1.0.0 // indirect filippo.io/edwards25519 v1.0.0 // indirect
@ -429,7 +430,7 @@ require (
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
howett.net/plist v1.0.0 // indirect howett.net/plist v1.0.0 // indirect
inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect

30
go.sum
View File

@ -1,10 +1,12 @@
cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6 h1:KHblWIE/KHOwQ6lEbMZt6YpcGve2FEZ1sDtrW1Am5UI= cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6 h1:KHblWIE/KHOwQ6lEbMZt6YpcGve2FEZ1sDtrW1Am5UI=
cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6/go.mod h1:NaoTA7KwopCrnaSb0JXTC0PTp/O/Y83Lndnq0OEV3ZQ= cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6/go.mod h1:NaoTA7KwopCrnaSb0JXTC0PTp/O/Y83Lndnq0OEV3ZQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= cloud.google.com/go/auth v0.2.2 h1:gmxNJs4YZYcw6YvKRtVBaF2fyUE6UrWPyzU8jHvYfmI=
cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/auth v0.2.2/go.mod h1:2bDNJWtWziDT3Pu1URxHHbkHE/BbOCuyUiKIGcNvafo=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/auth/oauth2adapt v0.2.1 h1:VSPmMmUlT8CkIZ2PzD9AlLN+R3+D1clXMWHHa6vG/Ag=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/auth/oauth2adapt v0.2.1/go.mod h1:tOdK/k+D2e4GEwfBRA48dKNQiDsqIXxLh7VU319eV0g=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/logging v1.9.0 h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw= cloud.google.com/go/logging v1.9.0 h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw=
cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE=
cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg=
@ -217,8 +219,8 @@ github.com/coder/ssh v0.0.0-20231128192721-70855dedb788 h1:YoUSJ19E8AtuUFVYBpXuO
github.com/coder/ssh v0.0.0-20231128192721-70855dedb788/go.mod h1:aGQbuCLyhRLMzZF067xc84Lh7JDs1FKwCmF1Crl9dxQ= github.com/coder/ssh v0.0.0-20231128192721-70855dedb788/go.mod h1:aGQbuCLyhRLMzZF067xc84Lh7JDs1FKwCmF1Crl9dxQ=
github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d h1:IMvBC1GrCIiZFxpOYRQacZtdjnmsdWNAMilPz+kvdG4= github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d h1:IMvBC1GrCIiZFxpOYRQacZtdjnmsdWNAMilPz+kvdG4=
github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d/go.mod h1:L8tPrwSi31RAMEMV8rjb0vYTGs7rXt8rAHbqY/p41j4= github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d/go.mod h1:L8tPrwSi31RAMEMV8rjb0vYTGs7rXt8rAHbqY/p41j4=
github.com/coder/terraform-provider-coder v0.20.1 h1:hz0yvDl8rDJyDgUlFH8QrGUxFKrwmyAQpOhaoTMEmtY= github.com/coder/terraform-provider-coder v0.21.0 h1:aoDmFJULYZpS66EIAZuNY4IxElaDkdRaWMWp9ScD2R8=
github.com/coder/terraform-provider-coder v0.20.1/go.mod h1:pACHRoXSHBGyY696mLeQ1hR/Ag1G2wFk5bw0mT5Zp2g= github.com/coder/terraform-provider-coder v0.21.0/go.mod h1:hqxd15PJeftFBOnGBBPN6WfNQutZtnahwwPeV8U6TyA=
github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a h1:KhR9LUVllMZ+e9lhubZ1HNrtJDgH5YLoTvpKwmrGag4= github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a h1:KhR9LUVllMZ+e9lhubZ1HNrtJDgH5YLoTvpKwmrGag4=
github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a/go.mod h1:QzfptVUdEO+XbkzMKx1kw13i9wwpJlfI1RrZ6SNZ0hA= github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a/go.mod h1:QzfptVUdEO+XbkzMKx1kw13i9wwpJlfI1RrZ6SNZ0hA=
github.com/coder/wireguard-go v0.0.0-20230807234434-d825b45ccbf5 h1:eDk/42Kj4xN4yfE504LsvcFEo3dWUiCOaBiWJ2uIH2A= github.com/coder/wireguard-go v0.0.0-20230807234434-d825b45ccbf5 h1:eDk/42Kj4xN4yfE504LsvcFEo3dWUiCOaBiWJ2uIH2A=
@ -699,8 +701,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/moby v25.0.2+incompatible h1:g2oKRI7vgWkiPHZbBghaPbcV/SuKP1g/YLx0I2nxFT4= github.com/moby/moby v26.0.1+incompatible h1:vCKs/AM0lLYnMxFwpf8ycsOekPPPcGn0s0Iczqv3/ec=
github.com/moby/moby v25.0.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/moby v26.0.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -1156,8 +1158,8 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvY
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= google.golang.org/api v0.175.0 h1:9bMDh10V9cBuU8N45Wlc3cKkItfqMRV0Fi8UscLEtbY=
google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= google.golang.org/api v0.175.0/go.mod h1:Rra+ltKu14pps/4xTycZfobMgLpbosoaaL7c+SEMrO8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
@ -1170,15 +1172,15 @@ google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUE
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

31
pty/terminal.go Normal file
View File

@ -0,0 +1,31 @@
package pty
// TerminalState differs per-platform.
type TerminalState struct {
state terminalState
}
// MakeInputRaw calls term.MakeRaw on non-Windows platforms. On Windows it sets
// special terminal modes that enable VT100 emulation as well as setting the
// same modes that term.MakeRaw sets.
//
//nolint:revive
func MakeInputRaw(fd uintptr) (*TerminalState, error) {
return makeInputRaw(fd)
}
// MakeOutputRaw does nothing on non-Windows platforms. On Windows it sets
// special terminal modes that enable VT100 emulation as well as setting the
// same modes that term.MakeRaw sets.
//
//nolint:revive
func MakeOutputRaw(fd uintptr) (*TerminalState, error) {
return makeOutputRaw(fd)
}
// RestoreTerminal restores the terminal back to its original state.
//
//nolint:revive
func RestoreTerminal(fd uintptr, state *TerminalState) error {
return restoreTerminal(fd, state)
}

36
pty/terminal_other.go Normal file
View File

@ -0,0 +1,36 @@
//go:build !windows
// +build !windows
package pty
import "golang.org/x/term"
type terminalState *term.State
//nolint:revive
func makeInputRaw(fd uintptr) (*TerminalState, error) {
s, err := term.MakeRaw(int(fd))
if err != nil {
return nil, err
}
return &TerminalState{
state: s,
}, nil
}
//nolint:revive
func makeOutputRaw(_ uintptr) (*TerminalState, error) {
// Does nothing. makeInputRaw does enough for both input and output.
return &TerminalState{
state: nil,
}, nil
}
//nolint:revive
func restoreTerminal(fd uintptr, state *TerminalState) error {
if state == nil || state.state == nil {
return nil
}
return term.Restore(int(fd), state.state)
}

65
pty/terminal_windows.go Normal file
View File

@ -0,0 +1,65 @@
//go:build windows
// +build windows
package pty
import "golang.org/x/sys/windows"
type terminalState uint32
// This is adapted from term.MakeRaw, but adds
// ENABLE_VIRTUAL_TERMINAL_PROCESSING to the output mode and
// ENABLE_VIRTUAL_TERMINAL_INPUT to the input mode.
//
// See: https://github.com/golang/term/blob/5b15d269ba1f54e8da86c8aa5574253aea0c2198/term_windows.go#L23
//
// Copyright 2019 The Go Authors. BSD-3-Clause license. See:
// https://github.com/golang/term/blob/master/LICENSE
func makeRaw(handle windows.Handle, input bool) (uint32, error) {
var prevState uint32
if err := windows.GetConsoleMode(handle, &prevState); err != nil {
return 0, err
}
var raw uint32
if input {
raw = prevState &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
} else {
raw = prevState | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
}
if err := windows.SetConsoleMode(handle, raw); err != nil {
return 0, err
}
return prevState, nil
}
//nolint:revive
func makeInputRaw(handle uintptr) (*TerminalState, error) {
prevState, err := makeRaw(windows.Handle(handle), true)
if err != nil {
return nil, err
}
return &TerminalState{
state: terminalState(prevState),
}, nil
}
//nolint:revive
func makeOutputRaw(handle uintptr) (*TerminalState, error) {
prevState, err := makeRaw(windows.Handle(handle), false)
if err != nil {
return nil, err
}
return &TerminalState{
state: terminalState(prevState),
}, nil
}
//nolint:revive
func restoreTerminal(handle uintptr, state *TerminalState) error {
return windows.SetConsoleMode(windows.Handle(handle), uint32(state.state))
}

View File

@ -1,6 +1,8 @@
import type { Page } from "@playwright/test"; import type { Page } from "@playwright/test";
import { expect } from "@playwright/test"; import { expect } from "@playwright/test";
import { formatDuration, intervalToDuration } from "date-fns";
import * as API from "api/api"; import * as API from "api/api";
import type { SerpentOption } from "api/typesGenerated";
import { coderPort } from "./constants"; import { coderPort } from "./constants";
import { findSessionToken, randomName } from "./helpers"; import { findSessionToken, randomName } from "./helpers";
@ -49,67 +51,119 @@ export const createGroup = async (orgId: string) => {
return group; return group;
}; };
export async function verifyConfigFlag( export async function verifyConfigFlagBoolean(
page: Page, page: Page,
config: API.DeploymentConfig, config: API.DeploymentConfig,
flag: string, flag: string,
) { ) {
const opt = findConfigOption(config, flag);
const type = opt.value ? "option-enabled" : "option-disabled";
const value = opt.value ? "Enabled" : "Disabled";
const configOption = page.locator(
`div.options-table .option-${flag} .${type}`,
);
await expect(configOption).toHaveText(value);
}
export async function verifyConfigFlagNumber(
page: Page,
config: API.DeploymentConfig,
flag: string,
) {
const opt = findConfigOption(config, flag);
const configOption = page.locator(
`div.options-table .option-${flag} .option-value-number`,
);
await expect(configOption).toHaveText(String(opt.value));
}
export async function verifyConfigFlagString(
page: Page,
config: API.DeploymentConfig,
flag: string,
) {
const opt = findConfigOption(config, flag);
const configOption = page.locator(
`div.options-table .option-${flag} .option-value-string`,
);
await expect(configOption).toHaveText(opt.value);
}
export async function verifyConfigFlagEmpty(page: Page, flag: string) {
const configOption = page.locator(
`div.options-table .option-${flag} .option-value-empty`,
);
await expect(configOption).toHaveText("Not set");
}
export async function verifyConfigFlagArray(
page: Page,
config: API.DeploymentConfig,
flag: string,
) {
const opt = findConfigOption(config, flag);
const configOption = page.locator(
`div.options-table .option-${flag} .option-array`,
);
// Verify array of options with simple dots
for (const item of opt.value) {
await expect(configOption.locator("li", { hasText: item })).toBeVisible();
}
}
export async function verifyConfigFlagEntries(
page: Page,
config: API.DeploymentConfig,
flag: string,
) {
const opt = findConfigOption(config, flag);
const configOption = page.locator(
`div.options-table .option-${flag} .option-array`,
);
// Verify array of options with green marks.
Object.entries(opt.value)
.sort((a, b) => a[0].localeCompare(b[0]))
.map(async ([item]) => {
await expect(
configOption.locator(`.option-array-item-${item}.option-enabled`, {
hasText: item,
}),
).toBeVisible();
});
}
export async function verifyConfigFlagDuration(
page: Page,
config: API.DeploymentConfig,
flag: string,
) {
const opt = findConfigOption(config, flag);
const configOption = page.locator(
`div.options-table .option-${flag} .option-value-string`,
);
await expect(configOption).toHaveText(
formatDuration(
// intervalToDuration takes ms, so convert nanoseconds to ms
intervalToDuration({
start: 0,
end: (opt.value as number) / 1e6,
}),
),
);
}
export function findConfigOption(
config: API.DeploymentConfig,
flag: string,
): SerpentOption {
const opt = config.options.find((option) => option.flag === flag); const opt = config.options.find((option) => option.flag === flag);
if (opt === undefined) { if (opt === undefined) {
// must be undefined as `false` is expected // must be undefined as `false` is expected
throw new Error(`Option with env ${flag} has undefined value.`); throw new Error(`Option with env ${flag} has undefined value.`);
} }
return opt;
// Map option type to test class name.
let type: string;
let value = opt.value;
if (typeof value === "boolean") {
// Boolean options map to string (Enabled/Disabled).
type = value ? "option-enabled" : "option-disabled";
value = value ? "Enabled" : "Disabled";
} else if (typeof value === "number") {
type = "option-value-number";
value = String(value);
} else if (!value || value.length === 0) {
type = "option-value-empty";
} else if (typeof value === "string") {
type = "option-value-string";
} else if (typeof value === "object") {
type = "option-array";
} else {
type = "option-value-json";
}
// Special cases
if (opt.flag === "strict-transport-security" && opt.value === 0) {
type = "option-value-string";
value = "Disabled"; // Display "Disabled" instead of zero seconds.
}
const configOption = page.locator(
`div.options-table .option-${flag} .${type}`,
);
// Verify array of options with green marks.
if (typeof value === "object" && !Array.isArray(value)) {
Object.entries(value)
.sort((a, b) => a[0].localeCompare(b[0]))
.map(async ([item]) => {
await expect(
configOption.locator(`.option-array-item-${item}.option-enabled`, {
hasText: item,
}),
).toBeVisible();
});
return;
}
// Verify array of options with simmple dots
if (Array.isArray(value)) {
for (const item of value) {
await expect(configOption.locator("li", { hasText: item })).toBeVisible();
}
return;
}
await expect(configOption).toHaveText(String(value));
} }

View File

@ -7,6 +7,7 @@ export const coderPort = process.env.CODER_E2E_PORT
? Number(process.env.CODER_E2E_PORT) ? Number(process.env.CODER_E2E_PORT)
: 3111; : 3111;
export const prometheusPort = 2114; export const prometheusPort = 2114;
export const workspaceProxyPort = 3112;
// Use alternate ports in case we're running in a Coder Workspace. // Use alternate ports in case we're running in a Coder Workspace.
export const agentPProfPort = 6061; export const agentPProfPort = 6061;

View File

@ -31,6 +31,7 @@ import {
type Resource, type Resource,
Response, Response,
type RichParameter, type RichParameter,
type ExternalAuthProviderResource,
} from "./provisionerGenerated"; } from "./provisionerGenerated";
// requiresEnterpriseLicense will skip the test if we're not running with an enterprise license // requiresEnterpriseLicense will skip the test if we're not running with an enterprise license
@ -49,6 +50,7 @@ export const createWorkspace = async (
templateName: string, templateName: string,
richParameters: RichParameter[] = [], richParameters: RichParameter[] = [],
buildParameters: WorkspaceBuildParameter[] = [], buildParameters: WorkspaceBuildParameter[] = [],
useExternalAuthProvider: string | undefined = undefined,
): Promise<string> => { ): Promise<string> => {
await page.goto(`/templates/${templateName}/workspace`, { await page.goto(`/templates/${templateName}/workspace`, {
waitUntil: "domcontentloaded", waitUntil: "domcontentloaded",
@ -59,6 +61,25 @@ export const createWorkspace = async (
await page.getByLabel("name").fill(name); await page.getByLabel("name").fill(name);
await fillParameters(page, richParameters, buildParameters); await fillParameters(page, richParameters, buildParameters);
if (useExternalAuthProvider !== undefined) {
// Create a new context for the popup which will be created when clicking the button
const popupPromise = page.waitForEvent("popup");
// Find the "Login with <Provider>" button
const externalAuthLoginButton = page
.getByRole("button")
.getByText("Login with GitHub");
await expect(externalAuthLoginButton).toBeVisible();
// Click it
await externalAuthLoginButton.click();
// Wait for authentication to occur
const popup = await popupPromise;
await popup.waitForSelector("text=You are now authenticated.");
}
await page.getByTestId("form-submit").click(); await page.getByTestId("form-submit").click();
await expectUrl(page).toHavePathName("/@admin/" + name); await expectUrl(page).toHavePathName("/@admin/" + name);
@ -370,7 +391,7 @@ export const stopAgent = async (cp: ChildProcess, goRun: boolean = true) => {
await waitUntilUrlIsNotResponding("http://localhost:" + prometheusPort); await waitUntilUrlIsNotResponding("http://localhost:" + prometheusPort);
}; };
const waitUntilUrlIsNotResponding = async (url: string) => { export const waitUntilUrlIsNotResponding = async (url: string) => {
const maxRetries = 30; const maxRetries = 30;
const retryIntervalMs = 1000; const retryIntervalMs = 1000;
let retries = 0; let retries = 0;
@ -648,6 +669,37 @@ export const echoResponsesWithParameters = (
}; };
}; };
export const echoResponsesWithExternalAuth = (
providers: ExternalAuthProviderResource[],
): EchoProvisionerResponses => {
return {
parse: [
{
parse: {},
},
],
plan: [
{
plan: {
externalAuthProviders: providers,
},
},
],
apply: [
{
apply: {
externalAuthProviders: providers,
resources: [
{
name: "example",
},
],
},
},
],
};
};
export const fillParameters = async ( export const fillParameters = async (
page: Page, page: Page,
richParameters: RichParameter[] = [], richParameters: RichParameter[] = [],

View File

@ -1,4 +1,6 @@
import type { Page } from "@playwright/test"; import type { BrowserContext, Page } from "@playwright/test";
import http from "http";
import { coderPort, gitAuth } from "./constants";
export const beforeCoderTest = async (page: Page) => { export const beforeCoderTest = async (page: Page) => {
// eslint-disable-next-line no-console -- Show everything that was printed with console.log() // eslint-disable-next-line no-console -- Show everything that was printed with console.log()
@ -45,6 +47,41 @@ export const beforeCoderTest = async (page: Page) => {
}); });
}; };
export const resetExternalAuthKey = async (context: BrowserContext) => {
// Find the session token so we can destroy the external auth link between tests, to ensure valid authentication happens each time.
const cookies = await context.cookies();
const sessionCookie = cookies.find((c) => c.name === "coder_session_token");
const options = {
method: "DELETE",
hostname: "127.0.0.1",
port: coderPort,
path: `/api/v2/external-auth/${gitAuth.webProvider}?coder_session_token=${sessionCookie?.value}`,
};
const req = http.request(options, (res) => {
let data = "";
res.on("data", (chunk) => {
data += chunk;
});
res.on("end", () => {
// Both 200 (key deleted successfully) and 500 (key was not found) are valid responses.
if (res.statusCode !== 200 && res.statusCode !== 500) {
console.error("failed to delete external auth link", data);
throw new Error(
`failed to delete external auth link: HTTP response ${res.statusCode}`,
);
}
});
});
req.on("error", (err) => {
throw err.message;
});
req.end();
};
const isApiCall = (urlString: string): boolean => { const isApiCall = (urlString: string): boolean => {
const url = new URL(urlString); const url = new URL(urlString);
const apiPath = "/api/v2"; const apiPath = "/api/v2";

View File

@ -115,7 +115,7 @@ export default defineConfig({
// Tests for Deployment / User Authentication / OIDC // Tests for Deployment / User Authentication / OIDC
CODER_OIDC_ISSUER_URL: "https://accounts.google.com", CODER_OIDC_ISSUER_URL: "https://accounts.google.com",
CODER_OIDC_EMAIL_DOMAIN: "coder.com", CODER_OIDC_EMAIL_DOMAIN: "coder.com",
CODER_OIDC_CLIENT_ID: "1234567890", // FIXME: https://github.com/coder/coder/issues/12585 CODER_OIDC_CLIENT_ID: "1234567890",
CODER_OIDC_CLIENT_SECRET: "1234567890Secret", CODER_OIDC_CLIENT_SECRET: "1234567890Secret",
CODER_OIDC_ALLOW_SIGNUPS: "false", CODER_OIDC_ALLOW_SIGNUPS: "false",
CODER_OIDC_SIGN_IN_TEXT: "Hello", CODER_OIDC_SIGN_IN_TEXT: "Hello",

41
site/e2e/proxy.ts Normal file
View File

@ -0,0 +1,41 @@
import { spawn, type ChildProcess, exec } from "child_process";
import { coderMain, coderPort, workspaceProxyPort } from "./constants";
import { waitUntilUrlIsNotResponding } from "./helpers";
export const startWorkspaceProxy = async (
token: string,
): Promise<ChildProcess> => {
const cp = spawn("go", ["run", coderMain, "wsproxy", "server"], {
env: {
...process.env,
CODER_PRIMARY_ACCESS_URL: `http://127.0.0.1:${coderPort}`,
CODER_PROXY_SESSION_TOKEN: token,
CODER_HTTP_ADDRESS: `localhost:${workspaceProxyPort}`,
},
});
cp.stdout.on("data", (data: Buffer) => {
// eslint-disable-next-line no-console -- Log wsproxy activity
console.log(
`[wsproxy] [stdout] [onData] ${data.toString().replace(/\n$/g, "")}`,
);
});
cp.stderr.on("data", (data: Buffer) => {
// eslint-disable-next-line no-console -- Log wsproxy activity
console.log(
`[wsproxy] [stderr] [onData] ${data.toString().replace(/\n$/g, "")}`,
);
});
return cp;
};
export const stopWorkspaceProxy = async (
cp: ChildProcess,
goRun: boolean = true,
) => {
exec(goRun ? `pkill -P ${cp.pid}` : `kill ${cp.pid}`, (error) => {
if (error) {
throw new Error(`exec error: ${JSON.stringify(error)}`);
}
});
await waitUntilUrlIsNotResponding(`http://127.0.0.1:${workspaceProxyPort}`);
};

View File

@ -52,7 +52,7 @@ test("set application logo", async ({ page }) => {
await incognitoPage.goto("/", { waitUntil: "domcontentloaded" }); await incognitoPage.goto("/", { waitUntil: "domcontentloaded" });
// Verify banner // Verify banner
const logo = incognitoPage.locator("img"); const logo = incognitoPage.locator("img.application-logo");
await expect(logo).toHaveAttribute("src", imageLink); await expect(logo).toHaveAttribute("src", imageLink);
// Shut down browser // Shut down browser

View File

@ -0,0 +1,40 @@
import { test } from "@playwright/test";
import { getDeploymentConfig } from "api/api";
import {
setupApiCalls,
verifyConfigFlagArray,
verifyConfigFlagBoolean,
verifyConfigFlagDuration,
verifyConfigFlagNumber,
verifyConfigFlagString,
} from "../../api";
test("enabled network settings", async ({ page }) => {
await setupApiCalls(page);
const config = await getDeploymentConfig();
await page.goto("/deployment/network", { waitUntil: "domcontentloaded" });
await verifyConfigFlagString(page, config, "access-url");
await verifyConfigFlagBoolean(page, config, "block-direct-connections");
await verifyConfigFlagBoolean(page, config, "browser-only");
await verifyConfigFlagBoolean(page, config, "derp-force-websockets");
await verifyConfigFlagBoolean(page, config, "derp-server-enable");
await verifyConfigFlagString(page, config, "derp-server-region-code");
await verifyConfigFlagString(page, config, "derp-server-region-code");
await verifyConfigFlagNumber(page, config, "derp-server-region-id");
await verifyConfigFlagString(page, config, "derp-server-region-name");
await verifyConfigFlagArray(page, config, "derp-server-stun-addresses");
await verifyConfigFlagBoolean(page, config, "disable-password-auth");
await verifyConfigFlagBoolean(page, config, "disable-session-expiry-refresh");
await verifyConfigFlagDuration(page, config, "max-token-lifetime");
await verifyConfigFlagDuration(page, config, "proxy-health-interval");
await verifyConfigFlagBoolean(page, config, "redirect-to-access-url");
await verifyConfigFlagBoolean(page, config, "secure-auth-cookie");
await verifyConfigFlagDuration(page, config, "session-duration");
await verifyConfigFlagString(page, config, "tls-address");
await verifyConfigFlagBoolean(page, config, "tls-allow-insecure-ciphers");
await verifyConfigFlagString(page, config, "tls-client-auth");
await verifyConfigFlagBoolean(page, config, "tls-enable");
await verifyConfigFlagString(page, config, "tls-min-version");
});

View File

@ -0,0 +1,39 @@
import { test } from "@playwright/test";
import { getDeploymentConfig } from "api/api";
import {
setupApiCalls,
verifyConfigFlagArray,
verifyConfigFlagBoolean,
verifyConfigFlagDuration,
verifyConfigFlagEmpty,
verifyConfigFlagString,
} from "../../api";
test("enabled observability settings", async ({ page }) => {
await setupApiCalls(page);
const config = await getDeploymentConfig();
await page.goto("/deployment/observability", {
waitUntil: "domcontentloaded",
});
await verifyConfigFlagBoolean(page, config, "trace-logs");
await verifyConfigFlagBoolean(page, config, "enable-terraform-debug-mode");
await verifyConfigFlagBoolean(page, config, "enable-terraform-debug-mode");
await verifyConfigFlagDuration(page, config, "health-check-refresh");
await verifyConfigFlagEmpty(page, "health-check-threshold-database");
await verifyConfigFlagString(page, config, "log-human");
await verifyConfigFlagString(page, config, "prometheus-address");
await verifyConfigFlagArray(
page,
config,
"prometheus-aggregate-agent-stats-by",
);
await verifyConfigFlagBoolean(page, config, "prometheus-collect-agent-stats");
await verifyConfigFlagBoolean(page, config, "prometheus-collect-db-metrics");
await verifyConfigFlagBoolean(page, config, "prometheus-enable");
await verifyConfigFlagBoolean(page, config, "trace-datadog");
await verifyConfigFlagBoolean(page, config, "trace");
await verifyConfigFlagBoolean(page, config, "verbose");
await verifyConfigFlagBoolean(page, config, "pprof-enable");
});

View File

@ -1,6 +1,14 @@
import { test } from "@playwright/test"; import type { Page } from "@playwright/test";
import { expect, test } from "@playwright/test";
import type * as API from "api/api";
import { getDeploymentConfig } from "api/api"; import { getDeploymentConfig } from "api/api";
import { setupApiCalls, verifyConfigFlag } from "../../api"; import {
findConfigOption,
setupApiCalls,
verifyConfigFlagBoolean,
verifyConfigFlagNumber,
verifyConfigFlagString,
} from "../../api";
test("enabled security settings", async ({ page }) => { test("enabled security settings", async ({ page }) => {
await setupApiCalls(page); await setupApiCalls(page);
@ -8,21 +16,32 @@ test("enabled security settings", async ({ page }) => {
await page.goto("/deployment/security", { waitUntil: "domcontentloaded" }); await page.goto("/deployment/security", { waitUntil: "domcontentloaded" });
const flags = [ await verifyConfigFlagString(page, config, "ssh-keygen-algorithm");
"ssh-keygen-algorithm", await verifyConfigFlagBoolean(page, config, "secure-auth-cookie");
"secure-auth-cookie", await verifyConfigFlagBoolean(page, config, "disable-owner-workspace-access");
"disable-owner-workspace-access",
"tls-redirect-http-to-https", await verifyConfigFlagBoolean(page, config, "tls-redirect-http-to-https");
"strict-transport-security", await verifyStrictTransportSecurity(page, config);
"tls-address", await verifyConfigFlagString(page, config, "tls-address");
"tls-allow-insecure-ciphers", await verifyConfigFlagBoolean(page, config, "tls-allow-insecure-ciphers");
"tls-client-auth", await verifyConfigFlagString(page, config, "tls-client-auth");
"tls-enable", await verifyConfigFlagBoolean(page, config, "tls-enable");
"tls-min-version", await verifyConfigFlagString(page, config, "tls-min-version");
];
for (const flag of flags) {
await verifyConfigFlag(page, config, flag);
}
}); });
async function verifyStrictTransportSecurity(
page: Page,
config: API.DeploymentConfig,
) {
const flag = "strict-transport-security";
const opt = findConfigOption(config, flag);
if (opt.value !== 0) {
await verifyConfigFlagNumber(page, config, flag);
return;
}
const configOption = page.locator(
`div.options-table .option-${flag} .option-value-string`,
);
await expect(configOption).toHaveText("Disabled");
}

View File

@ -1,6 +1,12 @@
import { test } from "@playwright/test"; import { test } from "@playwright/test";
import { getDeploymentConfig } from "api/api"; import { getDeploymentConfig } from "api/api";
import { setupApiCalls, verifyConfigFlag } from "../../api"; import {
setupApiCalls,
verifyConfigFlagArray,
verifyConfigFlagBoolean,
verifyConfigFlagEntries,
verifyConfigFlagString,
} from "../../api";
test("login with OIDC", async ({ page }) => { test("login with OIDC", async ({ page }) => {
await setupApiCalls(page); await setupApiCalls(page);
@ -8,26 +14,20 @@ test("login with OIDC", async ({ page }) => {
await page.goto("/deployment/userauth", { waitUntil: "domcontentloaded" }); await page.goto("/deployment/userauth", { waitUntil: "domcontentloaded" });
const flags = [ await verifyConfigFlagBoolean(page, config, "oidc-group-auto-create");
"oidc-group-auto-create", await verifyConfigFlagBoolean(page, config, "oidc-allow-signups");
"oidc-allow-signups", await verifyConfigFlagEntries(page, config, "oidc-auth-url-params");
"oidc-auth-url-params", await verifyConfigFlagString(page, config, "oidc-client-id");
"oidc-client-id", await verifyConfigFlagArray(page, config, "oidc-email-domain");
"oidc-email-domain", await verifyConfigFlagString(page, config, "oidc-email-field");
"oidc-email-field", await verifyConfigFlagEntries(page, config, "oidc-group-mapping");
"oidc-group-mapping", await verifyConfigFlagBoolean(page, config, "oidc-ignore-email-verified");
"oidc-ignore-email-verified", await verifyConfigFlagBoolean(page, config, "oidc-ignore-userinfo");
"oidc-ignore-userinfo", await verifyConfigFlagString(page, config, "oidc-issuer-url");
"oidc-issuer-url", await verifyConfigFlagString(page, config, "oidc-group-regex-filter");
"oidc-group-regex-filter", await verifyConfigFlagArray(page, config, "oidc-scopes");
"oidc-scopes", await verifyConfigFlagEntries(page, config, "oidc-user-role-mapping");
"oidc-user-role-mapping", await verifyConfigFlagString(page, config, "oidc-username-field");
"oidc-username-field", await verifyConfigFlagString(page, config, "oidc-sign-in-text");
"oidc-sign-in-text", await verifyConfigFlagString(page, config, "oidc-icon-url");
"oidc-icon-url",
];
for (const flag of flags) {
await verifyConfigFlag(page, config, flag);
}
}); });

View File

@ -0,0 +1,105 @@
import { test, expect, type Page } from "@playwright/test";
import { createWorkspaceProxy } from "api/api";
import { setupApiCalls } from "../../api";
import { coderPort, workspaceProxyPort } from "../../constants";
import { randomName, requiresEnterpriseLicense } from "../../helpers";
import { startWorkspaceProxy, stopWorkspaceProxy } from "../../proxy";
test("default proxy is online", async ({ page }) => {
requiresEnterpriseLicense();
await setupApiCalls(page);
await page.goto("/deployment/workspace-proxies", {
waitUntil: "domcontentloaded",
});
// Verify if the default proxy is healthy
const workspaceProxyPrimary = page.locator(
`table.MuiTable-root tr[data-testid="primary"]`,
);
const workspaceProxyName = workspaceProxyPrimary.locator("td.name span");
const workspaceProxyURL = workspaceProxyPrimary.locator("td.url");
const workspaceProxyStatus = workspaceProxyPrimary.locator("td.status span");
await expect(workspaceProxyName).toHaveText("Default");
await expect(workspaceProxyURL).toHaveText("http://localhost:" + coderPort);
await expect(workspaceProxyStatus).toHaveText("Healthy");
});
test("custom proxy is online", async ({ page }) => {
requiresEnterpriseLicense();
await setupApiCalls(page);
const proxyName = randomName();
// Register workspace proxy
const proxyResponse = await createWorkspaceProxy({
name: proxyName,
display_name: "",
icon: "/emojis/1f1e7-1f1f7.png",
});
expect(proxyResponse.proxy_token).toBeDefined();
// Start "wsproxy server"
const proxyServer = await startWorkspaceProxy(proxyResponse.proxy_token);
await waitUntilWorkspaceProxyIsHealthy(page, proxyName);
// Verify if custom proxy is healthy
await page.goto("/deployment/workspace-proxies", {
waitUntil: "domcontentloaded",
});
const workspaceProxy = page.locator(`table.MuiTable-root tr`, {
hasText: proxyName,
});
const workspaceProxyName = workspaceProxy.locator("td.name span");
const workspaceProxyURL = workspaceProxy.locator("td.url");
const workspaceProxyStatus = workspaceProxy.locator("td.status span");
await expect(workspaceProxyName).toHaveText(proxyName);
await expect(workspaceProxyURL).toHaveText(
`http://127.0.0.1:${workspaceProxyPort}`,
);
await expect(workspaceProxyStatus).toHaveText("Healthy");
// Tear down the proxy
await stopWorkspaceProxy(proxyServer);
});
const waitUntilWorkspaceProxyIsHealthy = async (
page: Page,
proxyName: string,
) => {
await page.goto("/deployment/workspace-proxies", {
waitUntil: "domcontentloaded",
});
const maxRetries = 30;
const retryIntervalMs = 1000;
let retries = 0;
while (retries < maxRetries) {
await page.reload();
const workspaceProxy = page.locator(`table.MuiTable-root tr`, {
hasText: proxyName,
});
const workspaceProxyStatus = workspaceProxy.locator("td.status span");
try {
await expect(workspaceProxyStatus).toHaveText("Healthy", {
timeout: 1_000,
});
return; // healthy!
} catch {
retries++;
await new Promise((resolve) => setTimeout(resolve, retryIntervalMs));
}
}
throw new Error(
`Workspace proxy "${proxyName}" is unhealthy after ${
maxRetries * retryIntervalMs
}ms`,
);
};

View File

@ -2,8 +2,37 @@ import type { Endpoints } from "@octokit/types";
import { test } from "@playwright/test"; import { test } from "@playwright/test";
import type { ExternalAuthDevice } from "api/typesGenerated"; import type { ExternalAuthDevice } from "api/typesGenerated";
import { gitAuth } from "../constants"; import { gitAuth } from "../constants";
import { Awaiter, createServer } from "../helpers"; import {
import { beforeCoderTest } from "../hooks"; Awaiter,
createServer,
createTemplate,
createWorkspace,
echoResponsesWithExternalAuth,
} from "../helpers";
import { beforeCoderTest, resetExternalAuthKey } from "../hooks";
test.beforeAll(async ({ baseURL }) => {
const srv = await createServer(gitAuth.webPort);
// The GitHub validate endpoint returns the currently authenticated user!
srv.use(gitAuth.validatePath, (req, res) => {
res.write(JSON.stringify(ghUser));
res.end();
});
srv.use(gitAuth.tokenPath, (req, res) => {
const r = (Math.random() + 1).toString(36).substring(7);
res.write(JSON.stringify({ access_token: r }));
res.end();
});
srv.use(gitAuth.authPath, (req, res) => {
res.redirect(
`${baseURL}/external-auth/${gitAuth.webProvider}/callback?code=1234&state=` +
req.query.state,
);
});
});
test.beforeEach(async ({ context }) => resetExternalAuthKey(context));
test.beforeEach(({ page }) => beforeCoderTest(page)); test.beforeEach(({ page }) => beforeCoderTest(page));
@ -57,23 +86,7 @@ test("external auth device", async ({ page }) => {
await page.waitForSelector("text=1 organization authorized"); await page.waitForSelector("text=1 organization authorized");
}); });
test("external auth web", async ({ baseURL, page }) => { test("external auth web", async ({ page }) => {
const srv = await createServer(gitAuth.webPort);
// The GitHub validate endpoint returns the currently authenticated user!
srv.use(gitAuth.validatePath, (req, res) => {
res.write(JSON.stringify(ghUser));
res.end();
});
srv.use(gitAuth.tokenPath, (req, res) => {
res.write(JSON.stringify({ access_token: "hello-world" }));
res.end();
});
srv.use(gitAuth.authPath, (req, res) => {
res.redirect(
`${baseURL}/external-auth/${gitAuth.webProvider}/callback?code=1234&state=` +
req.query.state,
);
});
await page.goto(`/external-auth/${gitAuth.webProvider}`, { await page.goto(`/external-auth/${gitAuth.webProvider}`, {
waitUntil: "domcontentloaded", waitUntil: "domcontentloaded",
}); });
@ -81,6 +94,17 @@ test("external auth web", async ({ baseURL, page }) => {
await page.waitForSelector("text=You've authenticated with GitHub!"); await page.waitForSelector("text=You've authenticated with GitHub!");
}); });
test("successful external auth from workspace", async ({ page }) => {
const templateName = await createTemplate(
page,
echoResponsesWithExternalAuth([
{ id: gitAuth.webProvider, optional: false },
]),
);
await createWorkspace(page, templateName, [], [], gitAuth.webProvider);
});
const ghUser: Endpoints["GET /user"]["response"]["data"] = { const ghUser: Endpoints["GET /user"]["response"]["data"] = {
login: "kylecarbs", login: "kylecarbs",
id: 7122116, id: 7122116,

View File

@ -1270,6 +1270,13 @@ export const getWorkspaceProxies = async (): Promise<
return response.data; return response.data;
}; };
export const createWorkspaceProxy = async (
b: TypesGen.CreateWorkspaceProxyRequest,
): Promise<TypesGen.UpdateWorkspaceProxyResponse> => {
const response = await axios.post(`/api/v2/workspaceproxies`, b);
return response.data;
};
export const getAppearance = async (): Promise<TypesGen.AppearanceConfig> => { export const getAppearance = async (): Promise<TypesGen.AppearanceConfig> => {
try { try {
const response = await axios.get(`/api/v2/appearance`); const response = await axios.get(`/api/v2/appearance`);

View File

@ -3,7 +3,7 @@ import { Helmet } from "react-helmet-async";
import { useQuery } from "react-query"; import { useQuery } from "react-query";
import { deploymentDAUs } from "api/queries/deployment"; import { deploymentDAUs } from "api/queries/deployment";
import { entitlements } from "api/queries/entitlements"; import { entitlements } from "api/queries/entitlements";
import { availableExperiments } from "api/queries/experiments"; import { availableExperiments, experiments } from "api/queries/experiments";
import { pageTitle } from "utils/page"; import { pageTitle } from "utils/page";
import { useDeploySettings } from "../DeploySettingsLayout"; import { useDeploySettings } from "../DeploySettingsLayout";
import { GeneralSettingsPageView } from "./GeneralSettingsPageView"; import { GeneralSettingsPageView } from "./GeneralSettingsPageView";
@ -12,7 +12,14 @@ const GeneralSettingsPage: FC = () => {
const { deploymentValues } = useDeploySettings(); const { deploymentValues } = useDeploySettings();
const deploymentDAUsQuery = useQuery(deploymentDAUs()); const deploymentDAUsQuery = useQuery(deploymentDAUs());
const entitlementsQuery = useQuery(entitlements()); const entitlementsQuery = useQuery(entitlements());
const experimentsQuery = useQuery(availableExperiments()); const enabledExperimentsQuery = useQuery(experiments());
const safeExperimentsQuery = useQuery(availableExperiments());
const safeExperiments = safeExperimentsQuery.data?.safe ?? [];
const invalidExperiments =
enabledExperimentsQuery.data?.filter((exp) => {
return !safeExperiments.includes(exp);
}) ?? [];
return ( return (
<> <>
@ -24,7 +31,8 @@ const GeneralSettingsPage: FC = () => {
deploymentDAUs={deploymentDAUsQuery.data} deploymentDAUs={deploymentDAUsQuery.data}
deploymentDAUsError={deploymentDAUsQuery.error} deploymentDAUsError={deploymentDAUsQuery.error}
entitlements={entitlementsQuery.data} entitlements={entitlementsQuery.data}
safeExperiments={experimentsQuery.data?.safe ?? []} invalidExperiments={invalidExperiments}
safeExperiments={safeExperiments}
/> />
</> </>
); );

View File

@ -40,6 +40,7 @@ const meta: Meta<typeof GeneralSettingsPageView> = {
}, },
], ],
deploymentDAUs: MockDeploymentDAUResponse, deploymentDAUs: MockDeploymentDAUResponse,
invalidExperiments: [],
safeExperiments: [], safeExperiments: [],
}, },
}; };
@ -102,6 +103,43 @@ export const allExperimentsEnabled: Story = {
hidden: false, hidden: false,
}, },
], ],
safeExperiments: [], safeExperiments: ["shared-ports"],
invalidExperiments: ["invalid"],
},
};
export const invalidExperimentsEnabled: Story = {
args: {
deploymentOptions: [
{
name: "Access URL",
description:
"The URL that users will use to access the Coder deployment.",
flag: "access-url",
flag_shorthand: "",
value: "https://dev.coder.com",
hidden: false,
},
{
name: "Wildcard Access URL",
description:
'Specifies the wildcard hostname to use for workspace applications in the form "*.example.com".',
flag: "wildcard-access-url",
flag_shorthand: "",
value: "*--apps.dev.coder.com",
hidden: false,
},
{
name: "Experiments",
description:
"Enable one or more experiments. These are not ready for production. Separate multiple experiments with commas, or enter '*' to opt-in to all available experiments.",
flag: "experiments",
value: ["invalid", "*"],
flag_shorthand: "",
hidden: false,
},
],
safeExperiments: ["shared-ports"],
invalidExperiments: ["invalid"],
}, },
}; };

View File

@ -1,3 +1,4 @@
import AlertTitle from "@mui/material/AlertTitle";
import type { FC } from "react"; import type { FC } from "react";
import type { import type {
SerpentOption, SerpentOption,
@ -13,6 +14,7 @@ import { ErrorAlert } from "components/Alert/ErrorAlert";
import { Stack } from "components/Stack/Stack"; import { Stack } from "components/Stack/Stack";
import { useDeploymentOptions } from "utils/deployOptions"; import { useDeploymentOptions } from "utils/deployOptions";
import { docs } from "utils/docs"; import { docs } from "utils/docs";
import { Alert } from "../../../components/Alert/Alert";
import { Header } from "../Header"; import { Header } from "../Header";
import OptionsTable from "../OptionsTable"; import OptionsTable from "../OptionsTable";
import { ChartSection } from "./ChartSection"; import { ChartSection } from "./ChartSection";
@ -22,7 +24,8 @@ export type GeneralSettingsPageViewProps = {
deploymentDAUs?: DAUsResponse; deploymentDAUs?: DAUsResponse;
deploymentDAUsError: unknown; deploymentDAUsError: unknown;
entitlements: Entitlements | undefined; entitlements: Entitlements | undefined;
safeExperiments: Experiments | undefined; readonly invalidExperiments: Experiments | string[];
readonly safeExperiments: Experiments | string[];
}; };
export const GeneralSettingsPageView: FC<GeneralSettingsPageViewProps> = ({ export const GeneralSettingsPageView: FC<GeneralSettingsPageViewProps> = ({
@ -31,6 +34,7 @@ export const GeneralSettingsPageView: FC<GeneralSettingsPageViewProps> = ({
deploymentDAUsError, deploymentDAUsError,
entitlements, entitlements,
safeExperiments, safeExperiments,
invalidExperiments,
}) => { }) => {
return ( return (
<> <>
@ -58,6 +62,28 @@ export const GeneralSettingsPageView: FC<GeneralSettingsPageViewProps> = ({
</ChartSection> </ChartSection>
</div> </div>
)} )}
{invalidExperiments.length > 0 && (
<Alert severity="warning">
<AlertTitle>Invalid experiments in use:</AlertTitle>
<ul>
{invalidExperiments.map((it) => (
<li key={it}>
<pre>{it}</pre>
</li>
))}
</ul>
It is recommended that you remove these experiments from your
configuration as they have no effect. See{" "}
<a
href="https://coder.com/docs/v2/latest/cli/server#--experiments"
target="_blank"
rel="noreferrer"
>
the documentation
</a>{" "}
for more details.
</Alert>
)}
<OptionsTable <OptionsTable
options={useDeploymentOptions( options={useDeploymentOptions(
deploymentOptions, deploymentOptions,

View File

@ -1,5 +1,5 @@
import { css, type Interpolation, type Theme, useTheme } from "@emotion/react"; import { css, type Interpolation, type Theme, useTheme } from "@emotion/react";
import CheckCircleOutlined from "@mui/icons-material/CheckCircleOutlined"; import BuildCircleOutlinedIcon from "@mui/icons-material/BuildCircleOutlined";
import type { FC, HTMLAttributes, PropsWithChildren } from "react"; import type { FC, HTMLAttributes, PropsWithChildren } from "react";
import { DisabledBadge, EnabledBadge } from "components/Badges/Badges"; import { DisabledBadge, EnabledBadge } from "components/Badges/Badges";
import { MONOSPACE_FONT_FAMILY } from "theme/constants"; import { MONOSPACE_FONT_FAMILY } from "theme/constants";
@ -91,11 +91,11 @@ export const OptionValue: FC<OptionValueProps> = (props) => {
}} }}
> >
{isEnabled && ( {isEnabled && (
<CheckCircleOutlined <BuildCircleOutlinedIcon
css={(theme) => ({ css={(theme) => ({
width: 16, width: 16,
height: 16, height: 16,
color: theme.palette.success.light, color: theme.palette.mode,
margin: "0 8px", margin: "0 8px",
})} })}
/> />

View File

@ -38,13 +38,13 @@ export function optionValue(
([key, value]) => `"${key}"->"${value}"`, ([key, value]) => `"${key}"->"${value}"`,
); );
case "Experiments": { case "Experiments": {
const experimentMap: Record<string, boolean> | undefined = const experimentMap = additionalValues?.reduce<Record<string, boolean>>(
additionalValues?.reduce( (acc, v) => {
(acc, v) => { acc[v] = option.value.includes("*");
return { ...acc, [v]: option.value.includes("*") ? true : false }; return acc;
}, },
{} as Record<string, boolean>, {},
); );
if (!experimentMap) { if (!experimentMap) {
break; break;

View File

@ -41,6 +41,7 @@ export const LoginPageView: FC<LoginPageViewProps> = ({
css={{ css={{
maxWidth: "200px", maxWidth: "200px",
}} }}
className="application-logo"
/> />
) : ( ) : (
<CoderIcon fill="white" opacity={1} css={styles.icon} /> <CoderIcon fill="white" opacity={1} css={styles.icon} />

View File

@ -40,7 +40,7 @@ export const ProxyRow: FC<ProxyRowProps> = ({ proxy, latency }) => {
return ( return (
<> <>
<TableRow key={proxy.name} data-testid={proxy.name}> <TableRow key={proxy.name} data-testid={proxy.name}>
<TableCell> <TableCell className="name">
<AvatarData <AvatarData
title={ title={
proxy.display_name && proxy.display_name.length > 0 proxy.display_name && proxy.display_name.length > 0
@ -60,8 +60,12 @@ export const ProxyRow: FC<ProxyRowProps> = ({ proxy, latency }) => {
/> />
</TableCell> </TableCell>
<TableCell css={{ fontSize: 14 }}>{proxy.path_app_url}</TableCell> <TableCell css={{ fontSize: 14 }} className="url">
<TableCell css={{ fontSize: 14 }}>{statusBadge}</TableCell> {proxy.path_app_url}
</TableCell>
<TableCell css={{ fontSize: 14 }} className="status">
{statusBadge}
</TableCell>
<TableCell <TableCell
css={{ css={{
fontSize: 14, fontSize: 14,

View File

@ -215,8 +215,7 @@ func NewRemoteCoordination(logger slog.Logger,
respLoopDone: make(chan struct{}), respLoopDone: make(chan struct{}),
} }
if tunnelTarget != uuid.Nil { if tunnelTarget != uuid.Nil {
// TODO: reenable in upstack PR c.coordinatee.SetTunnelDestination(tunnelTarget)
// c.coordinatee.SetTunnelDestination(tunnelTarget)
c.Lock() c.Lock()
err := c.protocol.Send(&proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Id: tunnelTarget[:]}}) err := c.protocol.Send(&proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Id: tunnelTarget[:]}})
c.Unlock() c.Unlock()

View File

@ -419,60 +419,16 @@ func TestCoordinator(t *testing.T) {
coordinator := tailnet.NewCoordinator(logger) coordinator := tailnet.NewCoordinator(logger)
ctx := testutil.Context(t, testutil.WaitShort) ctx := testutil.Context(t, testutil.WaitShort)
clientID := uuid.New() test.ReadyForHandshakeTest(ctx, t, coordinator)
agentID := uuid.New()
aReq, aRes := coordinator.Coordinate(ctx, agentID, agentID.String(), tailnet.AgentCoordinateeAuth{ID: agentID})
cReq, cRes := coordinator.Coordinate(ctx, clientID, clientID.String(), tailnet.ClientCoordinateeAuth{AgentID: agentID})
{
nk, err := key.NewNode().Public().MarshalBinary()
require.NoError(t, err)
dk, err := key.NewDisco().Public().MarshalText()
require.NoError(t, err)
cReq <- &proto.CoordinateRequest{UpdateSelf: &proto.CoordinateRequest_UpdateSelf{
Node: &proto.Node{
Id: 3,
Key: nk,
Disco: string(dk),
},
}}
}
cReq <- &proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{
Id: agentID[:],
}}
testutil.RequireRecvCtx(ctx, t, aRes)
aReq <- &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{
Id: clientID[:],
}}}
ack := testutil.RequireRecvCtx(ctx, t, cRes)
require.NotNil(t, ack.PeerUpdates)
require.Len(t, ack.PeerUpdates, 1)
require.Equal(t, proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE, ack.PeerUpdates[0].Kind)
require.Equal(t, agentID[:], ack.PeerUpdates[0].Id)
}) })
t.Run("AgentAck_NoPermission", func(t *testing.T) { t.Run("AgentAck_NoPermission", func(t *testing.T) {
t.Parallel() t.Parallel()
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coordinator := tailnet.NewCoordinator(logger) coordinator := tailnet.NewCoordinator(logger)
ctx := testutil.Context(t, testutil.WaitShort) ctx := testutil.Context(t, testutil.WaitShort)
clientID := uuid.New() test.ReadyForHandshakeNoPermissionTest(ctx, t, coordinator)
agentID := uuid.New()
aReq, aRes := coordinator.Coordinate(ctx, agentID, agentID.String(), tailnet.AgentCoordinateeAuth{ID: agentID})
_, _ = coordinator.Coordinate(ctx, clientID, clientID.String(), tailnet.ClientCoordinateeAuth{AgentID: agentID})
aReq <- &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{
Id: clientID[:],
}}}
rfhError := testutil.RequireRecvCtx(ctx, t, aRes)
require.NotEmpty(t, rfhError.Error)
}) })
} }

View File

@ -2,6 +2,7 @@ package test
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"github.com/coder/coder/v2/tailnet" "github.com/coder/coder/v2/tailnet"
@ -53,3 +54,31 @@ func BidirectionalTunnels(ctx context.Context, t *testing.T, coordinator tailnet
p1.AssertEventuallyHasDERP(p2.ID, 2) p1.AssertEventuallyHasDERP(p2.ID, 2)
p2.AssertEventuallyHasDERP(p1.ID, 1) p2.AssertEventuallyHasDERP(p1.ID, 1)
} }
func ReadyForHandshakeTest(ctx context.Context, t *testing.T, coordinator tailnet.CoordinatorV2) {
p1 := NewPeer(ctx, t, coordinator, "p1")
defer p1.Close(ctx)
p2 := NewPeer(ctx, t, coordinator, "p2")
defer p2.Close(ctx)
p1.AddTunnel(p2.ID)
p2.AddTunnel(p1.ID)
p1.UpdateDERP(1)
p2.UpdateDERP(2)
p1.AssertEventuallyHasDERP(p2.ID, 2)
p2.AssertEventuallyHasDERP(p1.ID, 1)
p2.ReadyForHandshake(p1.ID)
p1.AssertEventuallyReadyForHandshake(p2.ID)
}
func ReadyForHandshakeNoPermissionTest(ctx context.Context, t *testing.T, coordinator tailnet.CoordinatorV2) {
p1 := NewPeer(ctx, t, coordinator, "p1")
defer p1.Close(ctx)
p2 := NewPeer(ctx, t, coordinator, "p2")
defer p2.Close(ctx)
p1.UpdateDERP(1)
p2.UpdateDERP(2)
p2.ReadyForHandshake(p1.ID)
p2.AssertEventuallyGetsError(fmt.Sprintf("you do not share a tunnel with %q", p1.ID.String()))
}

View File

@ -13,8 +13,9 @@ import (
) )
type PeerStatus struct { type PeerStatus struct {
preferredDERP int32 preferredDERP int32
status proto.CoordinateResponse_PeerUpdate_Kind status proto.CoordinateResponse_PeerUpdate_Kind
readyForHandshake bool
} }
type Peer struct { type Peer struct {
@ -68,6 +69,21 @@ func (p *Peer) UpdateDERP(derp int32) {
} }
} }
func (p *Peer) ReadyForHandshake(peer uuid.UUID) {
p.t.Helper()
req := &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{
Id: peer[:],
}}}
select {
case <-p.ctx.Done():
p.t.Errorf("timeout sending ready for handshake for %s", p.name)
return
case p.reqs <- req:
return
}
}
func (p *Peer) Disconnect() { func (p *Peer) Disconnect() {
p.t.Helper() p.t.Helper()
req := &proto.CoordinateRequest{Disconnect: &proto.CoordinateRequest_Disconnect{}} req := &proto.CoordinateRequest{Disconnect: &proto.CoordinateRequest_Disconnect{}}
@ -135,6 +151,35 @@ func (p *Peer) AssertEventuallyResponsesClosed() {
} }
} }
func (p *Peer) AssertEventuallyReadyForHandshake(other uuid.UUID) {
p.t.Helper()
for {
o := p.peers[other]
if o.readyForHandshake {
return
}
err := p.handleOneResp()
if xerrors.Is(err, responsesClosed) {
return
}
}
}
func (p *Peer) AssertEventuallyGetsError(match string) {
p.t.Helper()
for {
err := p.handleOneResp()
if xerrors.Is(err, responsesClosed) {
return
}
if err != nil && assert.ErrorContains(p.t, err, match) {
return
}
}
}
var responsesClosed = xerrors.New("responses closed") var responsesClosed = xerrors.New("responses closed")
func (p *Peer) handleOneResp() error { func (p *Peer) handleOneResp() error {
@ -145,6 +190,9 @@ func (p *Peer) handleOneResp() error {
if !ok { if !ok {
return responsesClosed return responsesClosed
} }
if resp.Error != "" {
return xerrors.New(resp.Error)
}
for _, update := range resp.PeerUpdates { for _, update := range resp.PeerUpdates {
id, err := uuid.FromBytes(update.Id) id, err := uuid.FromBytes(update.Id)
if err != nil { if err != nil {
@ -152,12 +200,16 @@ func (p *Peer) handleOneResp() error {
} }
switch update.Kind { switch update.Kind {
case proto.CoordinateResponse_PeerUpdate_NODE, proto.CoordinateResponse_PeerUpdate_LOST: case proto.CoordinateResponse_PeerUpdate_NODE, proto.CoordinateResponse_PeerUpdate_LOST:
p.peers[id] = PeerStatus{ peer := p.peers[id]
preferredDERP: update.GetNode().GetPreferredDerp(), peer.preferredDERP = update.GetNode().GetPreferredDerp()
status: update.Kind, peer.status = update.Kind
} p.peers[id] = peer
case proto.CoordinateResponse_PeerUpdate_DISCONNECTED: case proto.CoordinateResponse_PeerUpdate_DISCONNECTED:
delete(p.peers, id) delete(p.peers, id)
case proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE:
peer := p.peers[id]
peer.readyForHandshake = true
p.peers[id] = peer
default: default:
return xerrors.Errorf("unhandled update kind %s", update.Kind) return xerrors.Errorf("unhandled update kind %s", update.Kind)
} }