From 942e90270e00e104c7c6d6d96f5b738d080ed405 Mon Sep 17 00:00:00 2001 From: Colin Adler Date: Tue, 16 Apr 2024 14:33:04 -0500 Subject: [PATCH 01/25] fix: disable azureidentity test on darwin (#12979) See https://github.com/coder/coder/issues/12978 --- coderd/azureidentity/azureidentity_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/coderd/azureidentity/azureidentity_test.go b/coderd/azureidentity/azureidentity_test.go index 081c081dc4..cd7ef11bea 100644 --- a/coderd/azureidentity/azureidentity_test.go +++ b/coderd/azureidentity/azureidentity_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/x509" "encoding/pem" + "runtime" "testing" "time" @@ -14,6 +15,11 @@ import ( func TestValidate(t *testing.T) { t.Parallel() + if runtime.GOOS == "darwin" { + // This test fails on MacOS for some reason. See https://github.com/coder/coder/issues/12978 + t.Skip() + } + mustTime := func(layout string, value string) time.Time { ti, err := time.Parse(layout, value) require.NoError(t, err) From 777dfbe965c7b2bd340d4224408a667e48a7388c Mon Sep 17 00:00:00 2001 From: Colin Adler Date: Tue, 16 Apr 2024 15:01:10 -0500 Subject: [PATCH 02/25] feat(enterprise): add ready for handshake support to pgcoord (#12935) --- coderd/database/db.go | 2 +- enterprise/tailnet/connio.go | 52 +++++++++++ enterprise/tailnet/handshaker.go | 73 ++++++++++++++++ enterprise/tailnet/handshaker_test.go | 47 ++++++++++ enterprise/tailnet/pgcoord.go | 120 +++++++++++++++++++++----- enterprise/tailnet/pgcoord_test.go | 6 +- tailnet/coordinator.go | 3 +- tailnet/coordinator_test.go | 50 +---------- tailnet/test/cases.go | 29 +++++++ tailnet/test/peer.go | 64 ++++++++++++-- 10 files changed, 364 insertions(+), 82 deletions(-) create mode 100644 enterprise/tailnet/handshaker.go create mode 100644 enterprise/tailnet/handshaker_test.go diff --git a/coderd/database/db.go b/coderd/database/db.go index 9ad1234070..51e61e4ce2 100644 --- a/coderd/database/db.go +++ b/coderd/database/db.go @@ -103,7 +103,7 @@ func (q *sqlQuerier) InTx(function func(Store) error, txOpts *sql.TxOptions) err // Transaction succeeded. return nil } - if err != nil && !IsSerializedError(err) { + if !IsSerializedError(err) { // We should only retry if the error is a serialization error. return err } diff --git a/enterprise/tailnet/connio.go b/enterprise/tailnet/connio.go index 2e64bb4bd6..665f58fd8a 100644 --- a/enterprise/tailnet/connio.go +++ b/enterprise/tailnet/connio.go @@ -2,6 +2,8 @@ package tailnet import ( "context" + "fmt" + "slices" "sync" "sync/atomic" "time" @@ -30,10 +32,13 @@ type connIO struct { responses chan<- *proto.CoordinateResponse bindings chan<- binding tunnels chan<- tunnel + rfhs chan<- readyForHandshake auth agpl.CoordinateeAuth mu sync.Mutex closed bool disconnected bool + // latest is the most recent, unfiltered snapshot of the mappings we know about + latest []mapping name string start int64 @@ -46,6 +51,7 @@ func newConnIO(coordContext context.Context, logger slog.Logger, bindings chan<- binding, tunnels chan<- tunnel, + rfhs chan<- readyForHandshake, requests <-chan *proto.CoordinateRequest, responses chan<- *proto.CoordinateResponse, id uuid.UUID, @@ -64,6 +70,7 @@ func newConnIO(coordContext context.Context, responses: responses, bindings: bindings, tunnels: tunnels, + rfhs: rfhs, auth: auth, name: name, start: now, @@ -190,9 +197,54 @@ func (c *connIO) handleRequest(req *proto.CoordinateRequest) error { c.disconnected = true return errDisconnect } + if req.ReadyForHandshake != nil { + c.logger.Debug(c.peerCtx, "got ready for handshake ", slog.F("rfh", req.ReadyForHandshake)) + for _, rfh := range req.ReadyForHandshake { + dst, err := uuid.FromBytes(rfh.Id) + if err != nil { + c.logger.Error(c.peerCtx, "unable to convert bytes to UUID", slog.Error(err)) + // this shouldn't happen unless there is a client error. Close the connection so the client + // doesn't just happily continue thinking everything is fine. + return err + } + + mappings := c.getLatestMapping() + if !slices.ContainsFunc(mappings, func(mapping mapping) bool { + return mapping.peer == dst + }) { + c.logger.Debug(c.peerCtx, "cannot process ready for handshake, src isn't peered with dst", + slog.F("dst", dst.String()), + ) + _ = c.Enqueue(&proto.CoordinateResponse{ + Error: fmt.Sprintf("you do not share a tunnel with %q", dst.String()), + }) + return nil + } + + if err := agpl.SendCtx(c.coordCtx, c.rfhs, readyForHandshake{ + src: c.id, + dst: dst, + }); err != nil { + c.logger.Debug(c.peerCtx, "failed to send ready for handshake", slog.Error(err)) + return err + } + } + } return nil } +func (c *connIO) setLatestMapping(latest []mapping) { + c.mu.Lock() + defer c.mu.Unlock() + c.latest = latest +} + +func (c *connIO) getLatestMapping() []mapping { + c.mu.Lock() + defer c.mu.Unlock() + return c.latest +} + func (c *connIO) UniqueID() uuid.UUID { return c.id } diff --git a/enterprise/tailnet/handshaker.go b/enterprise/tailnet/handshaker.go new file mode 100644 index 0000000000..fc66262884 --- /dev/null +++ b/enterprise/tailnet/handshaker.go @@ -0,0 +1,73 @@ +package tailnet + +import ( + "context" + "fmt" + "sync" + + "github.com/google/uuid" + + "cdr.dev/slog" + "github.com/coder/coder/v2/coderd/database/pubsub" +) + +type readyForHandshake struct { + src uuid.UUID + dst uuid.UUID +} + +type handshaker struct { + ctx context.Context + logger slog.Logger + coordinatorID uuid.UUID + pubsub pubsub.Pubsub + updates <-chan readyForHandshake + + workerWG sync.WaitGroup +} + +func newHandshaker(ctx context.Context, + logger slog.Logger, + id uuid.UUID, + ps pubsub.Pubsub, + updates <-chan readyForHandshake, + startWorkers <-chan struct{}, +) *handshaker { + s := &handshaker{ + ctx: ctx, + logger: logger, + coordinatorID: id, + pubsub: ps, + updates: updates, + } + // add to the waitgroup immediately to avoid any races waiting for it before + // the workers start. + s.workerWG.Add(numHandshakerWorkers) + go func() { + <-startWorkers + for i := 0; i < numHandshakerWorkers; i++ { + go s.worker() + } + }() + return s +} + +func (t *handshaker) worker() { + defer t.workerWG.Done() + + for { + select { + case <-t.ctx.Done(): + t.logger.Debug(t.ctx, "handshaker worker exiting", slog.Error(t.ctx.Err())) + return + + case rfh := <-t.updates: + err := t.pubsub.Publish(eventReadyForHandshake, []byte(fmt.Sprintf( + "%s,%s", rfh.dst.String(), rfh.src.String(), + ))) + if err != nil { + t.logger.Error(t.ctx, "publish ready for handshake", slog.Error(err)) + } + } + } +} diff --git a/enterprise/tailnet/handshaker_test.go b/enterprise/tailnet/handshaker_test.go new file mode 100644 index 0000000000..6196be2215 --- /dev/null +++ b/enterprise/tailnet/handshaker_test.go @@ -0,0 +1,47 @@ +package tailnet_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/v2/coderd/database/dbtestutil" + "github.com/coder/coder/v2/enterprise/tailnet" + agpltest "github.com/coder/coder/v2/tailnet/test" + "github.com/coder/coder/v2/testutil" +) + +func TestPGCoordinator_ReadyForHandshake_OK(t *testing.T) { + t.Parallel() + if !dbtestutil.WillUsePostgres() { + t.Skip("test only with postgres") + } + store, ps := dbtestutil.NewDB(t) + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong) + defer cancel() + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + coord1, err := tailnet.NewPGCoord(ctx, logger.Named("coord1"), ps, store) + require.NoError(t, err) + defer coord1.Close() + + agpltest.ReadyForHandshakeTest(ctx, t, coord1) +} + +func TestPGCoordinator_ReadyForHandshake_NoPermission(t *testing.T) { + t.Parallel() + if !dbtestutil.WillUsePostgres() { + t.Skip("test only with postgres") + } + store, ps := dbtestutil.NewDB(t) + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong) + defer cancel() + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + coord1, err := tailnet.NewPGCoord(ctx, logger.Named("coord1"), ps, store) + require.NoError(t, err) + defer coord1.Close() + + agpltest.ReadyForHandshakeNoPermissionTest(ctx, t, coord1) +} diff --git a/enterprise/tailnet/pgcoord.go b/enterprise/tailnet/pgcoord.go index aecdcde828..390e13621f 100644 --- a/enterprise/tailnet/pgcoord.go +++ b/enterprise/tailnet/pgcoord.go @@ -9,8 +9,6 @@ import ( "sync/atomic" "time" - "github.com/coder/coder/v2/tailnet/proto" - "github.com/cenkalti/backoff/v4" "github.com/google/uuid" "golang.org/x/xerrors" @@ -22,25 +20,31 @@ import ( "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/rbac" agpl "github.com/coder/coder/v2/tailnet" + "github.com/coder/coder/v2/tailnet/proto" ) const ( - EventHeartbeats = "tailnet_coordinator_heartbeat" - eventPeerUpdate = "tailnet_peer_update" - eventTunnelUpdate = "tailnet_tunnel_update" - HeartbeatPeriod = time.Second * 2 - MissedHeartbeats = 3 - numQuerierWorkers = 10 - numBinderWorkers = 10 - numTunnelerWorkers = 10 - dbMaxBackoff = 10 * time.Second - cleanupPeriod = time.Hour + EventHeartbeats = "tailnet_coordinator_heartbeat" + eventPeerUpdate = "tailnet_peer_update" + eventTunnelUpdate = "tailnet_tunnel_update" + eventReadyForHandshake = "tailnet_ready_for_handshake" + HeartbeatPeriod = time.Second * 2 + MissedHeartbeats = 3 + numQuerierWorkers = 10 + numBinderWorkers = 10 + numTunnelerWorkers = 10 + numHandshakerWorkers = 5 + dbMaxBackoff = 10 * time.Second + cleanupPeriod = time.Hour ) // pgCoord is a postgres-backed coordinator // -// ┌──────────┐ -// ┌────────────► tunneler ├──────────┐ +// ┌────────────┐ +// ┌────────────► handshaker ├────────┐ +// │ └────────────┘ │ +// │ ┌──────────┐ │ +// ├────────────► tunneler ├──────────┤ // │ └──────────┘ │ // │ │ // ┌────────┐ ┌────────┐ ┌───▼───┐ @@ -78,15 +82,17 @@ type pgCoord struct { newConnections chan *connIO closeConnections chan *connIO tunnelerCh chan tunnel + handshakerCh chan readyForHandshake id uuid.UUID cancel context.CancelFunc closeOnce sync.Once closed chan struct{} - binder *binder - tunneler *tunneler - querier *querier + binder *binder + tunneler *tunneler + handshaker *handshaker + querier *querier } var pgCoordSubject = rbac.Subject{ @@ -126,6 +132,8 @@ func newPGCoordInternal( ccCh := make(chan *connIO) // for communicating subscriptions with the tunneler sCh := make(chan tunnel) + // for communicating ready for handshakes with the handshaker + rfhCh := make(chan readyForHandshake) // signals when first heartbeat has been sent, so it's safe to start binding. fHB := make(chan struct{}) @@ -145,6 +153,8 @@ func newPGCoordInternal( closeConnections: ccCh, tunneler: newTunneler(ctx, logger, id, store, sCh, fHB), tunnelerCh: sCh, + handshaker: newHandshaker(ctx, logger, id, ps, rfhCh, fHB), + handshakerCh: rfhCh, id: id, querier: newQuerier(querierCtx, logger, id, ps, store, id, cCh, ccCh, numQuerierWorkers, fHB), closed: make(chan struct{}), @@ -242,7 +252,7 @@ func (c *pgCoord) Coordinate( close(resps) return reqs, resps } - cIO := newConnIO(c.ctx, ctx, logger, c.bindings, c.tunnelerCh, reqs, resps, id, name, a) + cIO := newConnIO(c.ctx, ctx, logger, c.bindings, c.tunnelerCh, c.handshakerCh, reqs, resps, id, name, a) err := agpl.SendCtx(c.ctx, c.newConnections, cIO) if err != nil { // this can only happen if the context is canceled, no need to log @@ -626,8 +636,6 @@ type mapper struct { c *connIO - // latest is the most recent, unfiltered snapshot of the mappings we know about - latest []mapping // sent is the state of mappings we have actually enqueued; used to compute diffs for updates. sent map[uuid.UUID]mapping @@ -660,11 +668,11 @@ func (m *mapper) run() { return case mappings := <-m.mappings: m.logger.Debug(m.ctx, "got new mappings") - m.latest = mappings + m.c.setLatestMapping(mappings) best = m.bestMappings(mappings) case <-m.update: m.logger.Debug(m.ctx, "triggered update") - best = m.bestMappings(m.latest) + best = m.bestMappings(m.c.getLatestMapping()) } update := m.bestToUpdate(best) if update == nil { @@ -1067,6 +1075,28 @@ func (q *querier) subscribe() { }() q.logger.Info(q.ctx, "subscribed to tunnel updates") + var cancelRFH context.CancelFunc + err = backoff.Retry(func() error { + cancelFn, err := q.pubsub.SubscribeWithErr(eventReadyForHandshake, q.listenReadyForHandshake) + if err != nil { + q.logger.Warn(q.ctx, "failed to subscribe to ready for handshakes", slog.Error(err)) + return err + } + cancelRFH = cancelFn + return nil + }, bkoff) + if err != nil { + if q.ctx.Err() == nil { + q.logger.Error(q.ctx, "code bug: retry failed before context canceled", slog.Error(err)) + } + return + } + defer func() { + q.logger.Info(q.ctx, "canceling ready for handshake subscription") + cancelRFH() + }() + q.logger.Info(q.ctx, "subscribed to ready for handshakes") + // unblock the outer function from returning subscribed <- struct{}{} @@ -1112,6 +1142,7 @@ func (q *querier) listenTunnel(_ context.Context, msg []byte, err error) { } if err != nil { q.logger.Warn(q.ctx, "unhandled pubsub error", slog.Error(err)) + return } peers, err := parseTunnelUpdate(string(msg)) if err != nil { @@ -1133,6 +1164,36 @@ func (q *querier) listenTunnel(_ context.Context, msg []byte, err error) { } } +func (q *querier) listenReadyForHandshake(_ context.Context, msg []byte, err error) { + if err != nil && !xerrors.Is(err, pubsub.ErrDroppedMessages) { + q.logger.Warn(q.ctx, "unhandled pubsub error", slog.Error(err)) + return + } + + to, from, err := parseReadyForHandshake(string(msg)) + if err != nil { + q.logger.Error(q.ctx, "failed to parse ready for handshake", slog.F("msg", string(msg)), slog.Error(err)) + return + } + + mk := mKey(to) + q.mu.Lock() + mpr, ok := q.mappers[mk] + q.mu.Unlock() + if !ok { + q.logger.Debug(q.ctx, "ignoring ready for handshake because we have no mapper", + slog.F("peer_id", to)) + return + } + + _ = mpr.c.Enqueue(&proto.CoordinateResponse{ + PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{{ + Id: from[:], + Kind: proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE, + }}, + }) +} + func (q *querier) resyncPeerMappings() { q.mu.Lock() defer q.mu.Unlock() @@ -1225,6 +1286,21 @@ func parsePeerUpdate(msg string) (peer uuid.UUID, err error) { return peer, nil } +func parseReadyForHandshake(msg string) (to uuid.UUID, from uuid.UUID, err error) { + parts := strings.Split(msg, ",") + if len(parts) != 2 { + return uuid.Nil, uuid.Nil, xerrors.Errorf("expected 2 parts separated by comma") + } + ids := make([]uuid.UUID, 2) + for i, part := range parts { + ids[i], err = uuid.Parse(part) + if err != nil { + return uuid.Nil, uuid.Nil, xerrors.Errorf("failed to parse UUID: %w", err) + } + } + return ids[0], ids[1], nil +} + // mKey identifies a set of node mappings we want to query. type mKey uuid.UUID diff --git a/enterprise/tailnet/pgcoord_test.go b/enterprise/tailnet/pgcoord_test.go index 623772b63c..b27db149f6 100644 --- a/enterprise/tailnet/pgcoord_test.go +++ b/enterprise/tailnet/pgcoord_test.go @@ -10,9 +10,6 @@ import ( "testing" "time" - "github.com/coder/coder/v2/codersdk/workspacesdk" - agpltest "github.com/coder/coder/v2/tailnet/test" - "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -24,14 +21,15 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" - "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbmock" "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/pubsub" + "github.com/coder/coder/v2/codersdk/workspacesdk" "github.com/coder/coder/v2/enterprise/tailnet" agpl "github.com/coder/coder/v2/tailnet" "github.com/coder/coder/v2/tailnet/proto" + agpltest "github.com/coder/coder/v2/tailnet/test" "github.com/coder/coder/v2/testutil" ) diff --git a/tailnet/coordinator.go b/tailnet/coordinator.go index 95f61637f7..423dc0ecbf 100644 --- a/tailnet/coordinator.go +++ b/tailnet/coordinator.go @@ -215,8 +215,7 @@ func NewRemoteCoordination(logger slog.Logger, respLoopDone: make(chan struct{}), } if tunnelTarget != uuid.Nil { - // TODO: reenable in upstack PR - // c.coordinatee.SetTunnelDestination(tunnelTarget) + c.coordinatee.SetTunnelDestination(tunnelTarget) c.Lock() err := c.protocol.Send(&proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{Id: tunnelTarget[:]}}) c.Unlock() diff --git a/tailnet/coordinator_test.go b/tailnet/coordinator_test.go index c4e269c53c..ddf5006614 100644 --- a/tailnet/coordinator_test.go +++ b/tailnet/coordinator_test.go @@ -419,60 +419,16 @@ func TestCoordinator(t *testing.T) { coordinator := tailnet.NewCoordinator(logger) ctx := testutil.Context(t, testutil.WaitShort) - clientID := uuid.New() - agentID := uuid.New() - - aReq, aRes := coordinator.Coordinate(ctx, agentID, agentID.String(), tailnet.AgentCoordinateeAuth{ID: agentID}) - cReq, cRes := coordinator.Coordinate(ctx, clientID, clientID.String(), tailnet.ClientCoordinateeAuth{AgentID: agentID}) - - { - nk, err := key.NewNode().Public().MarshalBinary() - require.NoError(t, err) - dk, err := key.NewDisco().Public().MarshalText() - require.NoError(t, err) - cReq <- &proto.CoordinateRequest{UpdateSelf: &proto.CoordinateRequest_UpdateSelf{ - Node: &proto.Node{ - Id: 3, - Key: nk, - Disco: string(dk), - }, - }} - } - - cReq <- &proto.CoordinateRequest{AddTunnel: &proto.CoordinateRequest_Tunnel{ - Id: agentID[:], - }} - - testutil.RequireRecvCtx(ctx, t, aRes) - - aReq <- &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{ - Id: clientID[:], - }}} - ack := testutil.RequireRecvCtx(ctx, t, cRes) - require.NotNil(t, ack.PeerUpdates) - require.Len(t, ack.PeerUpdates, 1) - require.Equal(t, proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE, ack.PeerUpdates[0].Kind) - require.Equal(t, agentID[:], ack.PeerUpdates[0].Id) + test.ReadyForHandshakeTest(ctx, t, coordinator) }) t.Run("AgentAck_NoPermission", func(t *testing.T) { t.Parallel() - logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) coordinator := tailnet.NewCoordinator(logger) ctx := testutil.Context(t, testutil.WaitShort) - clientID := uuid.New() - agentID := uuid.New() - - aReq, aRes := coordinator.Coordinate(ctx, agentID, agentID.String(), tailnet.AgentCoordinateeAuth{ID: agentID}) - _, _ = coordinator.Coordinate(ctx, clientID, clientID.String(), tailnet.ClientCoordinateeAuth{AgentID: agentID}) - - aReq <- &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{ - Id: clientID[:], - }}} - - rfhError := testutil.RequireRecvCtx(ctx, t, aRes) - require.NotEmpty(t, rfhError.Error) + test.ReadyForHandshakeNoPermissionTest(ctx, t, coordinator) }) } diff --git a/tailnet/test/cases.go b/tailnet/test/cases.go index a54c1e9320..8361c77f4d 100644 --- a/tailnet/test/cases.go +++ b/tailnet/test/cases.go @@ -2,6 +2,7 @@ package test import ( "context" + "fmt" "testing" "github.com/coder/coder/v2/tailnet" @@ -53,3 +54,31 @@ func BidirectionalTunnels(ctx context.Context, t *testing.T, coordinator tailnet p1.AssertEventuallyHasDERP(p2.ID, 2) p2.AssertEventuallyHasDERP(p1.ID, 1) } + +func ReadyForHandshakeTest(ctx context.Context, t *testing.T, coordinator tailnet.CoordinatorV2) { + p1 := NewPeer(ctx, t, coordinator, "p1") + defer p1.Close(ctx) + p2 := NewPeer(ctx, t, coordinator, "p2") + defer p2.Close(ctx) + p1.AddTunnel(p2.ID) + p2.AddTunnel(p1.ID) + p1.UpdateDERP(1) + p2.UpdateDERP(2) + + p1.AssertEventuallyHasDERP(p2.ID, 2) + p2.AssertEventuallyHasDERP(p1.ID, 1) + p2.ReadyForHandshake(p1.ID) + p1.AssertEventuallyReadyForHandshake(p2.ID) +} + +func ReadyForHandshakeNoPermissionTest(ctx context.Context, t *testing.T, coordinator tailnet.CoordinatorV2) { + p1 := NewPeer(ctx, t, coordinator, "p1") + defer p1.Close(ctx) + p2 := NewPeer(ctx, t, coordinator, "p2") + defer p2.Close(ctx) + p1.UpdateDERP(1) + p2.UpdateDERP(2) + + p2.ReadyForHandshake(p1.ID) + p2.AssertEventuallyGetsError(fmt.Sprintf("you do not share a tunnel with %q", p1.ID.String())) +} diff --git a/tailnet/test/peer.go b/tailnet/test/peer.go index 87d0b586ed..791c3b0e91 100644 --- a/tailnet/test/peer.go +++ b/tailnet/test/peer.go @@ -13,8 +13,9 @@ import ( ) type PeerStatus struct { - preferredDERP int32 - status proto.CoordinateResponse_PeerUpdate_Kind + preferredDERP int32 + status proto.CoordinateResponse_PeerUpdate_Kind + readyForHandshake bool } type Peer struct { @@ -68,6 +69,21 @@ func (p *Peer) UpdateDERP(derp int32) { } } +func (p *Peer) ReadyForHandshake(peer uuid.UUID) { + p.t.Helper() + + req := &proto.CoordinateRequest{ReadyForHandshake: []*proto.CoordinateRequest_ReadyForHandshake{{ + Id: peer[:], + }}} + select { + case <-p.ctx.Done(): + p.t.Errorf("timeout sending ready for handshake for %s", p.name) + return + case p.reqs <- req: + return + } +} + func (p *Peer) Disconnect() { p.t.Helper() req := &proto.CoordinateRequest{Disconnect: &proto.CoordinateRequest_Disconnect{}} @@ -135,6 +151,35 @@ func (p *Peer) AssertEventuallyResponsesClosed() { } } +func (p *Peer) AssertEventuallyReadyForHandshake(other uuid.UUID) { + p.t.Helper() + for { + o := p.peers[other] + if o.readyForHandshake { + return + } + + err := p.handleOneResp() + if xerrors.Is(err, responsesClosed) { + return + } + } +} + +func (p *Peer) AssertEventuallyGetsError(match string) { + p.t.Helper() + for { + err := p.handleOneResp() + if xerrors.Is(err, responsesClosed) { + return + } + + if err != nil && assert.ErrorContains(p.t, err, match) { + return + } + } +} + var responsesClosed = xerrors.New("responses closed") func (p *Peer) handleOneResp() error { @@ -145,6 +190,9 @@ func (p *Peer) handleOneResp() error { if !ok { return responsesClosed } + if resp.Error != "" { + return xerrors.New(resp.Error) + } for _, update := range resp.PeerUpdates { id, err := uuid.FromBytes(update.Id) if err != nil { @@ -152,12 +200,16 @@ func (p *Peer) handleOneResp() error { } switch update.Kind { case proto.CoordinateResponse_PeerUpdate_NODE, proto.CoordinateResponse_PeerUpdate_LOST: - p.peers[id] = PeerStatus{ - preferredDERP: update.GetNode().GetPreferredDerp(), - status: update.Kind, - } + peer := p.peers[id] + peer.preferredDERP = update.GetNode().GetPreferredDerp() + peer.status = update.Kind + p.peers[id] = peer case proto.CoordinateResponse_PeerUpdate_DISCONNECTED: delete(p.peers, id) + case proto.CoordinateResponse_PeerUpdate_READY_FOR_HANDSHAKE: + peer := p.peers[id] + peer.readyForHandshake = true + p.peers[id] = peer default: return xerrors.Errorf("unhandled update kind %s", update.Kind) } From 80f597812450d8f2caf48228c51d5857bade77de Mon Sep 17 00:00:00 2001 From: Jon Ayers Date: Tue, 16 Apr 2024 22:52:07 -0500 Subject: [PATCH 03/25] chore: add license review to CI (#12981) --- .github/workflows/ci.yaml | 40 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8aaaa74398..dc30004878 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -640,6 +640,7 @@ jobs: - test-e2e - offlinedocs - sqlc-vet + - dependency-license-review # Allow this job to run even if the needed jobs fail, are skipped or # cancelled. if: always() @@ -656,6 +657,7 @@ jobs: echo "- test-js: ${{ needs.test-js.result }}" echo "- test-e2e: ${{ needs.test-e2e.result }}" echo "- offlinedocs: ${{ needs.offlinedocs.result }}" + echo "- dependency-license-review: ${{ needs.dependency-license-review.result }}" echo # We allow skipped jobs to pass, but not failed or cancelled jobs. @@ -896,3 +898,41 @@ jobs: - name: Setup and run sqlc vet run: | make sqlc-vet + + # dependency-license-review checks that no license-incompatible dependencies have been introduced. + # This action is not intended to do a vulnerability check since that is handled by a separate action. + dependency-license-review: + runs-on: ubuntu-latest + steps: + - name: "Checkout Repository" + uses: actions/checkout@v4 + - name: "Dependency Review" + id: review + uses: actions/dependency-review-action@v4 + with: + allow-licenses: Apache-2.0, BSD-2-Clause, BSD-3-Clause, CC0-1.0, ISC, MIT, MIT-0, MPL-2.0 + license-check: true + vulnerability-check: false + - name: "Report" + # make sure this step runs even if the previous failed + if: always() + shell: bash + env: + VULNERABLE_CHANGES: ${{ steps.review.outputs.invalid-license-changes }} + run: | + fields=( "unlicensed" "unresolved" "forbidden" ) + + # This is unfortunate that we have to do this but the action does not support failing on + # an unknown license. The unknown dependency could easily have a GPL license which + # would be problematic for us. + # Track https://github.com/actions/dependency-review-action/issues/672 for when + # we can remove this brittle workaround. + for field in "${fields[@]}"; do + # Use jq to check if the array is not empty + if [[ $(echo "$VULNERABLE_CHANGES" | jq ".${field} | length") -ne 0 ]]; then + echo "Invalid or unknown licenses detected, contact @sreya to ensure your added dependency falls under one of our allowed licenses." + echo "$VULNERABLE_CHANGES" | jq + exit 1 + fi + done + echo "No incompatible licenses detected" From 0c993566dd4b16867920f3fecf6a1a44755cd6d0 Mon Sep 17 00:00:00 2001 From: Jon Ayers Date: Tue, 16 Apr 2024 23:08:22 -0500 Subject: [PATCH 04/25] hotfix: skip dependency license review on main (#12982) --- .github/workflows/ci.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index dc30004878..b922608aaf 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -903,6 +903,7 @@ jobs: # This action is not intended to do a vulnerability check since that is handled by a separate action. dependency-license-review: runs-on: ubuntu-latest + if: github.ref != 'refs/heads/main' steps: - name: "Checkout Repository" uses: actions/checkout@v4 From ee7dda81117947b6138ed89b39df183f8befdeb1 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Wed, 17 Apr 2024 13:51:55 +0200 Subject: [PATCH 05/25] refactor(site): verify deployment config flags in e2e tests (#12986) --- site/e2e/api.ts | 134 ++++++++++++--------- site/e2e/tests/deployment/security.spec.ts | 55 ++++++--- site/e2e/tests/deployment/userAuth.spec.ts | 46 +++---- 3 files changed, 140 insertions(+), 95 deletions(-) diff --git a/site/e2e/api.ts b/site/e2e/api.ts index c100c16d36..940dcf11ab 100644 --- a/site/e2e/api.ts +++ b/site/e2e/api.ts @@ -1,6 +1,7 @@ import type { Page } from "@playwright/test"; import { expect } from "@playwright/test"; import * as API from "api/api"; +import type { SerpentOption } from "api/typesGenerated"; import { coderPort } from "./constants"; import { findSessionToken, randomName } from "./helpers"; @@ -49,67 +50,92 @@ export const createGroup = async (orgId: string) => { return group; }; -export async function verifyConfigFlag( +export async function verifyConfigFlagBoolean( page: Page, config: API.DeploymentConfig, flag: string, ) { + const opt = findConfigOption(config, flag); + const type = opt.value ? "option-enabled" : "option-disabled"; + const value = opt.value ? "Enabled" : "Disabled"; + + const configOption = page.locator( + `div.options-table .option-${flag} .${type}`, + ); + await expect(configOption).toHaveText(value); +} + +export async function verifyConfigFlagNumber( + page: Page, + config: API.DeploymentConfig, + flag: string, +) { + const opt = findConfigOption(config, flag); + const configOption = page.locator( + `div.options-table .option-${flag} .option-value-number`, + ); + await expect(configOption).toHaveText(String(opt.value)); +} + +export async function verifyConfigFlagString( + page: Page, + config: API.DeploymentConfig, + flag: string, +) { + const opt = findConfigOption(config, flag); + + const configOption = page.locator( + `div.options-table .option-${flag} .option-value-string`, + ); + await expect(configOption).toHaveText(opt.value); +} + +export async function verifyConfigFlagArray( + page: Page, + config: API.DeploymentConfig, + flag: string, +) { + const opt = findConfigOption(config, flag); + const configOption = page.locator( + `div.options-table .option-${flag} .option-array`, + ); + + // Verify array of options with simple dots + for (const item of opt.value) { + await expect(configOption.locator("li", { hasText: item })).toBeVisible(); + } +} + +export async function verifyConfigFlagEntries( + page: Page, + config: API.DeploymentConfig, + flag: string, +) { + const opt = findConfigOption(config, flag); + const configOption = page.locator( + `div.options-table .option-${flag} .option-array`, + ); + + // Verify array of options with green marks. + Object.entries(opt.value) + .sort((a, b) => a[0].localeCompare(b[0])) + .map(async ([item]) => { + await expect( + configOption.locator(`.option-array-item-${item}.option-enabled`, { + hasText: item, + }), + ).toBeVisible(); + }); +} + +export function findConfigOption( + config: API.DeploymentConfig, + flag: string, +): SerpentOption { const opt = config.options.find((option) => option.flag === flag); if (opt === undefined) { // must be undefined as `false` is expected throw new Error(`Option with env ${flag} has undefined value.`); } - - // Map option type to test class name. - let type: string; - let value = opt.value; - - if (typeof value === "boolean") { - // Boolean options map to string (Enabled/Disabled). - type = value ? "option-enabled" : "option-disabled"; - value = value ? "Enabled" : "Disabled"; - } else if (typeof value === "number") { - type = "option-value-number"; - value = String(value); - } else if (!value || value.length === 0) { - type = "option-value-empty"; - } else if (typeof value === "string") { - type = "option-value-string"; - } else if (typeof value === "object") { - type = "option-array"; - } else { - type = "option-value-json"; - } - - // Special cases - if (opt.flag === "strict-transport-security" && opt.value === 0) { - type = "option-value-string"; - value = "Disabled"; // Display "Disabled" instead of zero seconds. - } - - const configOption = page.locator( - `div.options-table .option-${flag} .${type}`, - ); - - // Verify array of options with green marks. - if (typeof value === "object" && !Array.isArray(value)) { - Object.entries(value) - .sort((a, b) => a[0].localeCompare(b[0])) - .map(async ([item]) => { - await expect( - configOption.locator(`.option-array-item-${item}.option-enabled`, { - hasText: item, - }), - ).toBeVisible(); - }); - return; - } - // Verify array of options with simmple dots - if (Array.isArray(value)) { - for (const item of value) { - await expect(configOption.locator("li", { hasText: item })).toBeVisible(); - } - return; - } - await expect(configOption).toHaveText(String(value)); + return opt; } diff --git a/site/e2e/tests/deployment/security.spec.ts b/site/e2e/tests/deployment/security.spec.ts index 4aa81f109a..ede966260c 100644 --- a/site/e2e/tests/deployment/security.spec.ts +++ b/site/e2e/tests/deployment/security.spec.ts @@ -1,6 +1,14 @@ -import { test } from "@playwright/test"; +import type { Page } from "@playwright/test"; +import { expect, test } from "@playwright/test"; +import type * as API from "api/api"; import { getDeploymentConfig } from "api/api"; -import { setupApiCalls, verifyConfigFlag } from "../../api"; +import { + findConfigOption, + setupApiCalls, + verifyConfigFlagBoolean, + verifyConfigFlagNumber, + verifyConfigFlagString, +} from "../../api"; test("enabled security settings", async ({ page }) => { await setupApiCalls(page); @@ -8,21 +16,32 @@ test("enabled security settings", async ({ page }) => { await page.goto("/deployment/security", { waitUntil: "domcontentloaded" }); - const flags = [ - "ssh-keygen-algorithm", - "secure-auth-cookie", - "disable-owner-workspace-access", + await verifyConfigFlagString(page, config, "ssh-keygen-algorithm"); + await verifyConfigFlagBoolean(page, config, "secure-auth-cookie"); + await verifyConfigFlagBoolean(page, config, "disable-owner-workspace-access"); - "tls-redirect-http-to-https", - "strict-transport-security", - "tls-address", - "tls-allow-insecure-ciphers", - "tls-client-auth", - "tls-enable", - "tls-min-version", - ]; - - for (const flag of flags) { - await verifyConfigFlag(page, config, flag); - } + await verifyConfigFlagBoolean(page, config, "tls-redirect-http-to-https"); + await verifyStrictTransportSecurity(page, config); + await verifyConfigFlagString(page, config, "tls-address"); + await verifyConfigFlagBoolean(page, config, "tls-allow-insecure-ciphers"); + await verifyConfigFlagString(page, config, "tls-client-auth"); + await verifyConfigFlagBoolean(page, config, "tls-enable"); + await verifyConfigFlagString(page, config, "tls-min-version"); }); + +async function verifyStrictTransportSecurity( + page: Page, + config: API.DeploymentConfig, +) { + const flag = "strict-transport-security"; + const opt = findConfigOption(config, flag); + if (opt.value !== 0) { + await verifyConfigFlagNumber(page, config, flag); + return; + } + + const configOption = page.locator( + `div.options-table .option-${flag} .option-value-string`, + ); + await expect(configOption).toHaveText("Disabled"); +} diff --git a/site/e2e/tests/deployment/userAuth.spec.ts b/site/e2e/tests/deployment/userAuth.spec.ts index 68e28fe6b7..cf656c99fa 100644 --- a/site/e2e/tests/deployment/userAuth.spec.ts +++ b/site/e2e/tests/deployment/userAuth.spec.ts @@ -1,6 +1,12 @@ import { test } from "@playwright/test"; import { getDeploymentConfig } from "api/api"; -import { setupApiCalls, verifyConfigFlag } from "../../api"; +import { + setupApiCalls, + verifyConfigFlagArray, + verifyConfigFlagBoolean, + verifyConfigFlagEntries, + verifyConfigFlagString, +} from "../../api"; test("login with OIDC", async ({ page }) => { await setupApiCalls(page); @@ -8,26 +14,20 @@ test("login with OIDC", async ({ page }) => { await page.goto("/deployment/userauth", { waitUntil: "domcontentloaded" }); - const flags = [ - "oidc-group-auto-create", - "oidc-allow-signups", - "oidc-auth-url-params", - "oidc-client-id", - "oidc-email-domain", - "oidc-email-field", - "oidc-group-mapping", - "oidc-ignore-email-verified", - "oidc-ignore-userinfo", - "oidc-issuer-url", - "oidc-group-regex-filter", - "oidc-scopes", - "oidc-user-role-mapping", - "oidc-username-field", - "oidc-sign-in-text", - "oidc-icon-url", - ]; - - for (const flag of flags) { - await verifyConfigFlag(page, config, flag); - } + await verifyConfigFlagBoolean(page, config, "oidc-group-auto-create"); + await verifyConfigFlagBoolean(page, config, "oidc-allow-signups"); + await verifyConfigFlagEntries(page, config, "oidc-auth-url-params"); + await verifyConfigFlagString(page, config, "oidc-client-id"); + await verifyConfigFlagArray(page, config, "oidc-email-domain"); + await verifyConfigFlagString(page, config, "oidc-email-field"); + await verifyConfigFlagEntries(page, config, "oidc-group-mapping"); + await verifyConfigFlagBoolean(page, config, "oidc-ignore-email-verified"); + await verifyConfigFlagBoolean(page, config, "oidc-ignore-userinfo"); + await verifyConfigFlagString(page, config, "oidc-issuer-url"); + await verifyConfigFlagString(page, config, "oidc-group-regex-filter"); + await verifyConfigFlagArray(page, config, "oidc-scopes"); + await verifyConfigFlagEntries(page, config, "oidc-user-role-mapping"); + await verifyConfigFlagString(page, config, "oidc-username-field"); + await verifyConfigFlagString(page, config, "oidc-sign-in-text"); + await verifyConfigFlagString(page, config, "oidc-icon-url"); }); From cb8c576c9383dc89e5eb0878639a28c241a4d67d Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Wed, 17 Apr 2024 16:06:49 +0200 Subject: [PATCH 06/25] test(site): add e2e tests for network (#12987) --- site/e2e/api.ts | 21 ++++++++++++ site/e2e/tests/deployment/network.spec.ts | 40 +++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 site/e2e/tests/deployment/network.spec.ts diff --git a/site/e2e/api.ts b/site/e2e/api.ts index 940dcf11ab..90fe72b1c1 100644 --- a/site/e2e/api.ts +++ b/site/e2e/api.ts @@ -1,5 +1,6 @@ import type { Page } from "@playwright/test"; import { expect } from "@playwright/test"; +import { formatDuration, intervalToDuration } from "date-fns"; import * as API from "api/api"; import type { SerpentOption } from "api/typesGenerated"; import { coderPort } from "./constants"; @@ -128,6 +129,26 @@ export async function verifyConfigFlagEntries( }); } +export async function verifyConfigFlagDuration( + page: Page, + config: API.DeploymentConfig, + flag: string, +) { + const opt = findConfigOption(config, flag); + const configOption = page.locator( + `div.options-table .option-${flag} .option-value-string`, + ); + await expect(configOption).toHaveText( + formatDuration( + // intervalToDuration takes ms, so convert nanoseconds to ms + intervalToDuration({ + start: 0, + end: (opt.value as number) / 1e6, + }), + ), + ); +} + export function findConfigOption( config: API.DeploymentConfig, flag: string, diff --git a/site/e2e/tests/deployment/network.spec.ts b/site/e2e/tests/deployment/network.spec.ts new file mode 100644 index 0000000000..ec4d869622 --- /dev/null +++ b/site/e2e/tests/deployment/network.spec.ts @@ -0,0 +1,40 @@ +import { test } from "@playwright/test"; +import { getDeploymentConfig } from "api/api"; +import { + setupApiCalls, + verifyConfigFlagArray, + verifyConfigFlagBoolean, + verifyConfigFlagDuration, + verifyConfigFlagNumber, + verifyConfigFlagString, +} from "../../api"; + +test("login with OIDC", async ({ page }) => { + await setupApiCalls(page); + const config = await getDeploymentConfig(); + + await page.goto("/deployment/network", { waitUntil: "domcontentloaded" }); + + await verifyConfigFlagString(page, config, "access-url"); + await verifyConfigFlagBoolean(page, config, "block-direct-connections"); + await verifyConfigFlagBoolean(page, config, "browser-only"); + await verifyConfigFlagBoolean(page, config, "derp-force-websockets"); + await verifyConfigFlagBoolean(page, config, "derp-server-enable"); + await verifyConfigFlagString(page, config, "derp-server-region-code"); + await verifyConfigFlagString(page, config, "derp-server-region-code"); + await verifyConfigFlagNumber(page, config, "derp-server-region-id"); + await verifyConfigFlagString(page, config, "derp-server-region-name"); + await verifyConfigFlagArray(page, config, "derp-server-stun-addresses"); + await verifyConfigFlagBoolean(page, config, "disable-password-auth"); + await verifyConfigFlagBoolean(page, config, "disable-session-expiry-refresh"); + await verifyConfigFlagDuration(page, config, "max-token-lifetime"); + await verifyConfigFlagDuration(page, config, "proxy-health-interval"); + await verifyConfigFlagBoolean(page, config, "redirect-to-access-url"); + await verifyConfigFlagBoolean(page, config, "secure-auth-cookie"); + await verifyConfigFlagDuration(page, config, "session-duration"); + await verifyConfigFlagString(page, config, "tls-address"); + await verifyConfigFlagBoolean(page, config, "tls-allow-insecure-ciphers"); + await verifyConfigFlagString(page, config, "tls-client-auth"); + await verifyConfigFlagBoolean(page, config, "tls-enable"); + await verifyConfigFlagString(page, config, "tls-min-version"); +}); From b85d5d84919bc9552d30d264c232f8cc401dfca6 Mon Sep 17 00:00:00 2001 From: Danny Kopping Date: Wed, 17 Apr 2024 16:59:31 +0200 Subject: [PATCH 07/25] feat: add warning about use of old/removed/invalid experiments (#12962) --- .../GeneralSettingsPage.tsx | 14 +++++-- .../GeneralSettingsPageView.stories.tsx | 40 ++++++++++++++++++- .../GeneralSettingsPageView.tsx | 28 ++++++++++++- site/src/pages/DeploySettingsPage/Option.tsx | 6 +-- .../pages/DeploySettingsPage/optionValue.ts | 14 +++---- 5 files changed, 87 insertions(+), 15 deletions(-) diff --git a/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPage.tsx b/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPage.tsx index 4f9f627a0b..2e79b36f46 100644 --- a/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPage.tsx +++ b/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPage.tsx @@ -3,7 +3,7 @@ import { Helmet } from "react-helmet-async"; import { useQuery } from "react-query"; import { deploymentDAUs } from "api/queries/deployment"; import { entitlements } from "api/queries/entitlements"; -import { availableExperiments } from "api/queries/experiments"; +import { availableExperiments, experiments } from "api/queries/experiments"; import { pageTitle } from "utils/page"; import { useDeploySettings } from "../DeploySettingsLayout"; import { GeneralSettingsPageView } from "./GeneralSettingsPageView"; @@ -12,7 +12,14 @@ const GeneralSettingsPage: FC = () => { const { deploymentValues } = useDeploySettings(); const deploymentDAUsQuery = useQuery(deploymentDAUs()); const entitlementsQuery = useQuery(entitlements()); - const experimentsQuery = useQuery(availableExperiments()); + const enabledExperimentsQuery = useQuery(experiments()); + const safeExperimentsQuery = useQuery(availableExperiments()); + + const safeExperiments = safeExperimentsQuery.data?.safe ?? []; + const invalidExperiments = + enabledExperimentsQuery.data?.filter((exp) => { + return !safeExperiments.includes(exp); + }) ?? []; return ( <> @@ -24,7 +31,8 @@ const GeneralSettingsPage: FC = () => { deploymentDAUs={deploymentDAUsQuery.data} deploymentDAUsError={deploymentDAUsQuery.error} entitlements={entitlementsQuery.data} - safeExperiments={experimentsQuery.data?.safe ?? []} + invalidExperiments={invalidExperiments} + safeExperiments={safeExperiments} /> ); diff --git a/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.stories.tsx b/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.stories.tsx index c945ef9381..a04270b9e3 100644 --- a/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.stories.tsx +++ b/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.stories.tsx @@ -40,6 +40,7 @@ const meta: Meta = { }, ], deploymentDAUs: MockDeploymentDAUResponse, + invalidExperiments: [], safeExperiments: [], }, }; @@ -102,6 +103,43 @@ export const allExperimentsEnabled: Story = { hidden: false, }, ], - safeExperiments: [], + safeExperiments: ["shared-ports"], + invalidExperiments: ["invalid"], + }, +}; + +export const invalidExperimentsEnabled: Story = { + args: { + deploymentOptions: [ + { + name: "Access URL", + description: + "The URL that users will use to access the Coder deployment.", + flag: "access-url", + flag_shorthand: "", + value: "https://dev.coder.com", + hidden: false, + }, + { + name: "Wildcard Access URL", + description: + 'Specifies the wildcard hostname to use for workspace applications in the form "*.example.com".', + flag: "wildcard-access-url", + flag_shorthand: "", + value: "*--apps.dev.coder.com", + hidden: false, + }, + { + name: "Experiments", + description: + "Enable one or more experiments. These are not ready for production. Separate multiple experiments with commas, or enter '*' to opt-in to all available experiments.", + flag: "experiments", + value: ["invalid", "*"], + flag_shorthand: "", + hidden: false, + }, + ], + safeExperiments: ["shared-ports"], + invalidExperiments: ["invalid"], }, }; diff --git a/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.tsx b/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.tsx index 612a4f280a..fbbe4127c9 100644 --- a/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.tsx +++ b/site/src/pages/DeploySettingsPage/GeneralSettingsPage/GeneralSettingsPageView.tsx @@ -1,3 +1,4 @@ +import AlertTitle from "@mui/material/AlertTitle"; import type { FC } from "react"; import type { SerpentOption, @@ -13,6 +14,7 @@ import { ErrorAlert } from "components/Alert/ErrorAlert"; import { Stack } from "components/Stack/Stack"; import { useDeploymentOptions } from "utils/deployOptions"; import { docs } from "utils/docs"; +import { Alert } from "../../../components/Alert/Alert"; import { Header } from "../Header"; import OptionsTable from "../OptionsTable"; import { ChartSection } from "./ChartSection"; @@ -22,7 +24,8 @@ export type GeneralSettingsPageViewProps = { deploymentDAUs?: DAUsResponse; deploymentDAUsError: unknown; entitlements: Entitlements | undefined; - safeExperiments: Experiments | undefined; + readonly invalidExperiments: Experiments | string[]; + readonly safeExperiments: Experiments | string[]; }; export const GeneralSettingsPageView: FC = ({ @@ -31,6 +34,7 @@ export const GeneralSettingsPageView: FC = ({ deploymentDAUsError, entitlements, safeExperiments, + invalidExperiments, }) => { return ( <> @@ -58,6 +62,28 @@ export const GeneralSettingsPageView: FC = ({ )} + {invalidExperiments.length > 0 && ( + + Invalid experiments in use: +
    + {invalidExperiments.map((it) => ( +
  • +
    {it}
    +
  • + ))} +
+ It is recommended that you remove these experiments from your + configuration as they have no effect. See{" "} + + the documentation + {" "} + for more details. +
+ )} = (props) => { }} > {isEnabled && ( - ({ width: 16, height: 16, - color: theme.palette.success.light, + color: theme.palette.mode, margin: "0 8px", })} /> diff --git a/site/src/pages/DeploySettingsPage/optionValue.ts b/site/src/pages/DeploySettingsPage/optionValue.ts index 75435a894c..596a3333eb 100644 --- a/site/src/pages/DeploySettingsPage/optionValue.ts +++ b/site/src/pages/DeploySettingsPage/optionValue.ts @@ -38,13 +38,13 @@ export function optionValue( ([key, value]) => `"${key}"->"${value}"`, ); case "Experiments": { - const experimentMap: Record | undefined = - additionalValues?.reduce( - (acc, v) => { - return { ...acc, [v]: option.value.includes("*") ? true : false }; - }, - {} as Record, - ); + const experimentMap = additionalValues?.reduce>( + (acc, v) => { + acc[v] = option.value.includes("*"); + return acc; + }, + {}, + ); if (!experimentMap) { break; From 227e6320539850405a868daf179e9000569cf479 Mon Sep 17 00:00:00 2001 From: Kyle Carberry Date: Wed, 17 Apr 2024 13:30:32 -0400 Subject: [PATCH 08/25] fix: add grace period before showing replicas license error (#12989) Fixes #8665. --- enterprise/coderd/coderd.go | 25 +++++++++++- .../coderd/coderdenttest/coderdenttest.go | 2 + enterprise/coderd/replicas_test.go | 40 +++++++++++++++++-- 3 files changed, 63 insertions(+), 4 deletions(-) diff --git a/enterprise/coderd/coderd.go b/enterprise/coderd/coderd.go index 2bb9eb3bc0..722cc3631e 100644 --- a/enterprise/coderd/coderd.go +++ b/enterprise/coderd/coderd.go @@ -13,6 +13,7 @@ import ( "time" "github.com/coder/coder/v2/coderd/appearance" + "github.com/coder/coder/v2/coderd/database" agplportsharing "github.com/coder/coder/v2/coderd/portsharing" "github.com/coder/coder/v2/enterprise/coderd/portsharing" @@ -27,6 +28,7 @@ import ( "github.com/coder/coder/v2/coderd" agplaudit "github.com/coder/coder/v2/coderd/audit" agpldbauthz "github.com/coder/coder/v2/coderd/database/dbauthz" + "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/healthcheck" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" @@ -64,6 +66,11 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { if options.Options.Authorizer == nil { options.Options.Authorizer = rbac.NewCachingAuthorizer(options.PrometheusRegistry) } + if options.ReplicaErrorGracePeriod == 0 { + // This will prevent the error from being shown for a minute + // from when an additional replica was started. + options.ReplicaErrorGracePeriod = time.Minute + } ctx, cancelFunc := context.WithCancel(ctx) @@ -429,6 +436,7 @@ type Options struct { // Used for high availability. ReplicaSyncUpdateInterval time.Duration + ReplicaErrorGracePeriod time.Duration DERPServerRelayAddress string DERPServerRegionID int @@ -525,9 +533,24 @@ func (api *API) updateEntitlements(ctx context.Context) error { api.entitlementsUpdateMu.Lock() defer api.entitlementsUpdateMu.Unlock() + replicas := api.replicaManager.AllPrimary() + agedReplicas := make([]database.Replica, 0, len(replicas)) + for _, replica := range replicas { + // If a replica is less than the update interval old, we don't + // want to display a warning. In the open-source version of Coder, + // Kubernetes Pods will start up before shutting down the other, + // and we don't want to display a warning in that case. + // + // Only display warnings for long-lived replicas! + if dbtime.Now().Sub(replica.StartedAt) < api.ReplicaErrorGracePeriod { + continue + } + agedReplicas = append(agedReplicas, replica) + } + entitlements, err := license.Entitlements( ctx, api.Database, - api.Logger, len(api.replicaManager.AllPrimary()), len(api.ExternalAuthConfigs), api.LicenseKeys, map[codersdk.FeatureName]bool{ + api.Logger, len(agedReplicas), len(api.ExternalAuthConfigs), api.LicenseKeys, map[codersdk.FeatureName]bool{ codersdk.FeatureAuditLog: api.AuditLogging, codersdk.FeatureBrowserOnly: api.BrowserOnly, codersdk.FeatureSCIM: len(api.SCIMAPIKey) != 0, diff --git a/enterprise/coderd/coderdenttest/coderdenttest.go b/enterprise/coderd/coderdenttest/coderdenttest.go index 75900fd06d..22ca40a391 100644 --- a/enterprise/coderd/coderdenttest/coderdenttest.go +++ b/enterprise/coderd/coderdenttest/coderdenttest.go @@ -57,6 +57,7 @@ type Options struct { DontAddLicense bool DontAddFirstUser bool ReplicaSyncUpdateInterval time.Duration + ReplicaErrorGracePeriod time.Duration ExternalTokenEncryption []dbcrypt.Cipher ProvisionerDaemonPSK string } @@ -93,6 +94,7 @@ func NewWithAPI(t *testing.T, options *Options) ( DERPServerRelayAddress: oop.AccessURL.String(), DERPServerRegionID: oop.BaseDERPMap.RegionIDs()[0], ReplicaSyncUpdateInterval: options.ReplicaSyncUpdateInterval, + ReplicaErrorGracePeriod: options.ReplicaErrorGracePeriod, Options: oop, EntitlementsUpdateInterval: options.EntitlementsUpdateInterval, LicenseKeys: Keys, diff --git a/enterprise/coderd/replicas_test.go b/enterprise/coderd/replicas_test.go index 595f2fe375..6be1283d2e 100644 --- a/enterprise/coderd/replicas_test.go +++ b/enterprise/coderd/replicas_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/tls" "testing" + "time" "github.com/stretchr/testify/require" @@ -22,9 +23,42 @@ import ( func TestReplicas(t *testing.T) { t.Parallel() if !dbtestutil.WillUsePostgres() { - t.Skip("only test with real postgresF") + t.Skip("only test with real postgres") } t.Run("ErrorWithoutLicense", func(t *testing.T) { + t.Parallel() + // This will error because replicas are expected to instantly report + // errors when the license is not present. + db, pubsub := dbtestutil.NewDB(t) + firstClient, _ := coderdenttest.New(t, &coderdenttest.Options{ + Options: &coderdtest.Options{ + IncludeProvisionerDaemon: true, + Database: db, + Pubsub: pubsub, + }, + DontAddLicense: true, + ReplicaErrorGracePeriod: time.Nanosecond, + }) + secondClient, _, secondAPI, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{ + Options: &coderdtest.Options{ + Database: db, + Pubsub: pubsub, + }, + DontAddFirstUser: true, + DontAddLicense: true, + ReplicaErrorGracePeriod: time.Nanosecond, + }) + secondClient.SetSessionToken(firstClient.SessionToken()) + ents, err := secondClient.Entitlements(context.Background()) + require.NoError(t, err) + require.Len(t, ents.Errors, 1) + _ = secondAPI.Close() + + ents, err = firstClient.Entitlements(context.Background()) + require.NoError(t, err) + require.Len(t, ents.Warnings, 0) + }) + t.Run("DoesNotErrorBeforeGrace", func(t *testing.T) { t.Parallel() db, pubsub := dbtestutil.NewDB(t) firstClient, _ := coderdenttest.New(t, &coderdenttest.Options{ @@ -46,12 +80,12 @@ func TestReplicas(t *testing.T) { secondClient.SetSessionToken(firstClient.SessionToken()) ents, err := secondClient.Entitlements(context.Background()) require.NoError(t, err) - require.Len(t, ents.Errors, 1) + require.Len(t, ents.Errors, 0) _ = secondAPI.Close() ents, err = firstClient.Entitlements(context.Background()) require.NoError(t, err) - require.Len(t, ents.Warnings, 0) + require.Len(t, ents.Errors, 0) }) t.Run("ConnectAcrossMultiple", func(t *testing.T) { t.Parallel() From 3338cdca77dc0e166ae31e94083889d4bfc1445f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Apr 2024 13:30:53 -0400 Subject: [PATCH 09/25] chore: bump github.com/moby/moby (#12960) Bumps [github.com/moby/moby](https://github.com/moby/moby) from 25.0.2+incompatible to 26.0.1+incompatible. - [Release notes](https://github.com/moby/moby/releases) - [Commits](https://github.com/moby/moby/compare/v25.0.2...v26.0.1) --- updated-dependencies: - dependency-name: github.com/moby/moby dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9346fc7427..0751b4216b 100644 --- a/go.mod +++ b/go.mod @@ -153,7 +153,7 @@ require ( github.com/mattn/go-isatty v0.0.20 github.com/mitchellh/go-wordwrap v1.0.1 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c - github.com/moby/moby v25.0.2+incompatible + github.com/moby/moby v26.0.1+incompatible github.com/muesli/termenv v0.15.2 github.com/open-policy-agent/opa v0.58.0 github.com/ory/dockertest/v3 v3.10.0 diff --git a/go.sum b/go.sum index 10e0890965..ac1f3fdd9f 100644 --- a/go.sum +++ b/go.sum @@ -699,8 +699,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/moby v25.0.2+incompatible h1:g2oKRI7vgWkiPHZbBghaPbcV/SuKP1g/YLx0I2nxFT4= -github.com/moby/moby v25.0.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/moby v26.0.1+incompatible h1:vCKs/AM0lLYnMxFwpf8ycsOekPPPcGn0s0Iczqv3/ec= +github.com/moby/moby v26.0.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= From 6b4eb03192fa8cf8e8a7f3771c665c739146fe7d Mon Sep 17 00:00:00 2001 From: Colin Adler Date: Wed, 17 Apr 2024 12:38:17 -0500 Subject: [PATCH 10/25] chore: give additional time in tests for `tailnetAPIConnector` graceful disconnect (#12980) Failure seen here: https://github.com/coder/coder/actions/runs/8711258577/job/23894964182?pr=12979 --- codersdk/workspacesdk/connector.go | 4 +++- codersdk/workspacesdk/connector_internal_test.go | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/codersdk/workspacesdk/connector.go b/codersdk/workspacesdk/connector.go index 7955e8fb33..d6349adaf6 100644 --- a/codersdk/workspacesdk/connector.go +++ b/codersdk/workspacesdk/connector.go @@ -20,6 +20,8 @@ import ( "github.com/coder/retry" ) +var tailnetConnectorGracefulTimeout = time.Second + // tailnetConn is the subset of the tailnet.Conn methods that tailnetAPIConnector uses. It is // included so that we can fake it in testing. // @@ -86,7 +88,7 @@ func runTailnetAPIConnector( func (tac *tailnetAPIConnector) manageGracefulTimeout() { defer tac.cancelGracefulCtx() <-tac.ctx.Done() - timer := time.NewTimer(time.Second) + timer := time.NewTimer(tailnetConnectorGracefulTimeout) defer timer.Stop() select { case <-tac.closed: diff --git a/codersdk/workspacesdk/connector_internal_test.go b/codersdk/workspacesdk/connector_internal_test.go index 9f70891fda..06ff3e2c66 100644 --- a/codersdk/workspacesdk/connector_internal_test.go +++ b/codersdk/workspacesdk/connector_internal_test.go @@ -24,6 +24,11 @@ import ( "github.com/coder/coder/v2/testutil" ) +func init() { + // Give tests a bit more time to timeout. Darwin is particularly slow. + tailnetConnectorGracefulTimeout = 5 * time.Second +} + func TestTailnetAPIConnector_Disconnects(t *testing.T) { t.Parallel() testCtx := testutil.Context(t, testutil.WaitShort) From 92190443ff94b762e3efe16783cb97632244d74e Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 17 Apr 2024 20:43:13 +0300 Subject: [PATCH 11/25] fix(coderd/metricscache): avoid logging error for no rows (#12988) Fixes #12938 --- coderd/metricscache/metricscache.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/coderd/metricscache/metricscache.go b/coderd/metricscache/metricscache.go index f1f06fcacd..13b2e1bb68 100644 --- a/coderd/metricscache/metricscache.go +++ b/coderd/metricscache/metricscache.go @@ -162,6 +162,7 @@ func (c *Cache) refreshDeploymentStats(ctx context.Context) error { } func (c *Cache) run(ctx context.Context, name string, interval time.Duration, refresh func(context.Context) error) { + logger := c.log.With(slog.F("name", name), slog.F("interval", interval)) ticker := time.NewTicker(interval) defer ticker.Stop() @@ -173,15 +174,13 @@ func (c *Cache) run(ctx context.Context, name string, interval time.Duration, re if ctx.Err() != nil { return } - c.log.Error(ctx, "refresh", slog.Error(err)) + if xerrors.Is(err, sql.ErrNoRows) { + break + } + logger.Error(ctx, "refresh metrics failed", slog.Error(err)) continue } - c.log.Debug( - ctx, - name+" metrics refreshed", - slog.F("took", time.Since(start)), - slog.F("interval", interval), - ) + logger.Debug(ctx, "metrics refreshed", slog.F("took", time.Since(start))) break } From d426569d4a1bb40042af52244768848c56af5aac Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Wed, 17 Apr 2024 11:01:20 -0700 Subject: [PATCH 12/25] fix: make terminal raw in ssh command on windows (#12990) --- cli/ssh.go | 22 +++++++++----- pty/terminal.go | 31 ++++++++++++++++++++ pty/terminal_other.go | 36 +++++++++++++++++++++++ pty/terminal_windows.go | 65 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 146 insertions(+), 8 deletions(-) create mode 100644 pty/terminal.go create mode 100644 pty/terminal_other.go create mode 100644 pty/terminal_windows.go diff --git a/cli/ssh.go b/cli/ssh.go index 7f40959a59..a291b3764b 100644 --- a/cli/ssh.go +++ b/cli/ssh.go @@ -25,12 +25,8 @@ import ( "golang.org/x/xerrors" "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" - "github.com/coder/retry" - "github.com/coder/serpent" - "cdr.dev/slog" "cdr.dev/slog/sloggers/sloghuman" - "github.com/coder/coder/v2/cli/cliui" "github.com/coder/coder/v2/cli/cliutil" "github.com/coder/coder/v2/coderd/autobuild/notify" @@ -38,6 +34,9 @@ import ( "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/workspacesdk" "github.com/coder/coder/v2/cryptorand" + "github.com/coder/coder/v2/pty" + "github.com/coder/retry" + "github.com/coder/serpent" ) var ( @@ -341,15 +340,22 @@ func (r *RootCmd) ssh() *serpent.Command { } } - stdoutFile, validOut := inv.Stdout.(*os.File) stdinFile, validIn := inv.Stdin.(*os.File) - if validOut && validIn && isatty.IsTerminal(stdoutFile.Fd()) { - state, err := term.MakeRaw(int(stdinFile.Fd())) + stdoutFile, validOut := inv.Stdout.(*os.File) + if validIn && validOut && isatty.IsTerminal(stdinFile.Fd()) && isatty.IsTerminal(stdoutFile.Fd()) { + inState, err := pty.MakeInputRaw(stdinFile.Fd()) if err != nil { return err } defer func() { - _ = term.Restore(int(stdinFile.Fd()), state) + _ = pty.RestoreTerminal(stdinFile.Fd(), inState) + }() + outState, err := pty.MakeOutputRaw(stdoutFile.Fd()) + if err != nil { + return err + } + defer func() { + _ = pty.RestoreTerminal(stdoutFile.Fd(), outState) }() windowChange := listenWindowSize(ctx) diff --git a/pty/terminal.go b/pty/terminal.go new file mode 100644 index 0000000000..2c1a35c3ee --- /dev/null +++ b/pty/terminal.go @@ -0,0 +1,31 @@ +package pty + +// TerminalState differs per-platform. +type TerminalState struct { + state terminalState +} + +// MakeInputRaw calls term.MakeRaw on non-Windows platforms. On Windows it sets +// special terminal modes that enable VT100 emulation as well as setting the +// same modes that term.MakeRaw sets. +// +//nolint:revive +func MakeInputRaw(fd uintptr) (*TerminalState, error) { + return makeInputRaw(fd) +} + +// MakeOutputRaw does nothing on non-Windows platforms. On Windows it sets +// special terminal modes that enable VT100 emulation as well as setting the +// same modes that term.MakeRaw sets. +// +//nolint:revive +func MakeOutputRaw(fd uintptr) (*TerminalState, error) { + return makeOutputRaw(fd) +} + +// RestoreTerminal restores the terminal back to its original state. +// +//nolint:revive +func RestoreTerminal(fd uintptr, state *TerminalState) error { + return restoreTerminal(fd, state) +} diff --git a/pty/terminal_other.go b/pty/terminal_other.go new file mode 100644 index 0000000000..9c04354715 --- /dev/null +++ b/pty/terminal_other.go @@ -0,0 +1,36 @@ +//go:build !windows +// +build !windows + +package pty + +import "golang.org/x/term" + +type terminalState *term.State + +//nolint:revive +func makeInputRaw(fd uintptr) (*TerminalState, error) { + s, err := term.MakeRaw(int(fd)) + if err != nil { + return nil, err + } + return &TerminalState{ + state: s, + }, nil +} + +//nolint:revive +func makeOutputRaw(_ uintptr) (*TerminalState, error) { + // Does nothing. makeInputRaw does enough for both input and output. + return &TerminalState{ + state: nil, + }, nil +} + +//nolint:revive +func restoreTerminal(fd uintptr, state *TerminalState) error { + if state == nil || state.state == nil { + return nil + } + + return term.Restore(int(fd), state.state) +} diff --git a/pty/terminal_windows.go b/pty/terminal_windows.go new file mode 100644 index 0000000000..1d8f99d5b9 --- /dev/null +++ b/pty/terminal_windows.go @@ -0,0 +1,65 @@ +//go:build windows +// +build windows + +package pty + +import "golang.org/x/sys/windows" + +type terminalState uint32 + +// This is adapted from term.MakeRaw, but adds +// ENABLE_VIRTUAL_TERMINAL_PROCESSING to the output mode and +// ENABLE_VIRTUAL_TERMINAL_INPUT to the input mode. +// +// See: https://github.com/golang/term/blob/5b15d269ba1f54e8da86c8aa5574253aea0c2198/term_windows.go#L23 +// +// Copyright 2019 The Go Authors. BSD-3-Clause license. See: +// https://github.com/golang/term/blob/master/LICENSE +func makeRaw(handle windows.Handle, input bool) (uint32, error) { + var prevState uint32 + if err := windows.GetConsoleMode(handle, &prevState); err != nil { + return 0, err + } + + var raw uint32 + if input { + raw = prevState &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } else { + raw = prevState | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } + + if err := windows.SetConsoleMode(handle, raw); err != nil { + return 0, err + } + return prevState, nil +} + +//nolint:revive +func makeInputRaw(handle uintptr) (*TerminalState, error) { + prevState, err := makeRaw(windows.Handle(handle), true) + if err != nil { + return nil, err + } + + return &TerminalState{ + state: terminalState(prevState), + }, nil +} + +//nolint:revive +func makeOutputRaw(handle uintptr) (*TerminalState, error) { + prevState, err := makeRaw(windows.Handle(handle), false) + if err != nil { + return nil, err + } + + return &TerminalState{ + state: terminalState(prevState), + }, nil +} + +//nolint:revive +func restoreTerminal(handle uintptr, state *TerminalState) error { + return windows.SetConsoleMode(windows.Handle(handle), uint32(state.state)) +} From f5a32b3f272f35472c2182b30894a6a18bfddc54 Mon Sep 17 00:00:00 2001 From: Ben Potter Date: Wed, 17 Apr 2024 20:10:40 -0500 Subject: [PATCH 13/25] docs: explain that mainline stays around for one month now (#12993) * docs: mainline stays around for one month * switch to .x to encourage latest --- docs/install/releases.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/install/releases.md b/docs/install/releases.md index 701c9793e4..ce9711716c 100644 --- a/docs/install/releases.md +++ b/docs/install/releases.md @@ -10,7 +10,7 @@ deployment. We support two release channels: [mainline](https://github.com/coder/coder/2.10.0) for the edge version of Coder and [stable](https://github.com/coder/coder/releases/latest) for those with -lower tolerance for fault. We field our mainline releases publicly for two weeks +lower tolerance for fault. We field our mainline releases publicly for one month before promoting them to stable. ### Mainline releases @@ -46,11 +46,11 @@ pages. ## Release schedule -| Release name | Date | Status | +| Release name | Release Date | Status | | ------------ | ------------------ | ---------------- | -| 2.7.0 | January 01, 2024 | Not Supported | -| 2.8.0 | Februrary 06, 2024 | Security Support | -| 2.9.0 | March 07, 2024 | Stable | -| 2.10.0 | April 03, 2024 | Mainline | -| 2.11.0 | May 07, 2024 | Not Released | -| 2.12.0 | June 04, 2024 | Not Released | +| 2.7.x | January 01, 2024 | Not Supported | +| 2.8.x | Februrary 06, 2024 | Security Support | +| 2.9.x | March 07, 2024 | Stable | +| 2.10.x | April 03, 2024 | Mainline | +| 2.11.x | May 07, 2024 | Not Released | +| 2.12.x | June 04, 2024 | Not Released | From 75223dfd8b644f496f38a735339eca7064f7c66d Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Thu, 18 Apr 2024 12:50:34 +0200 Subject: [PATCH 14/25] test(site): add e2e tests for observability --- site/e2e/api.ts | 7 ++++ site/e2e/tests/deployment/network.spec.ts | 2 +- .../tests/deployment/observability.spec.ts | 39 +++++++++++++++++++ 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 site/e2e/tests/deployment/observability.spec.ts diff --git a/site/e2e/api.ts b/site/e2e/api.ts index 90fe72b1c1..65a4aaa40a 100644 --- a/site/e2e/api.ts +++ b/site/e2e/api.ts @@ -91,6 +91,13 @@ export async function verifyConfigFlagString( await expect(configOption).toHaveText(opt.value); } +export async function verifyConfigFlagEmpty(page: Page, flag: string) { + const configOption = page.locator( + `div.options-table .option-${flag} .option-value-empty`, + ); + await expect(configOption).toHaveText("Not set"); +} + export async function verifyConfigFlagArray( page: Page, config: API.DeploymentConfig, diff --git a/site/e2e/tests/deployment/network.spec.ts b/site/e2e/tests/deployment/network.spec.ts index ec4d869622..c979bb8e10 100644 --- a/site/e2e/tests/deployment/network.spec.ts +++ b/site/e2e/tests/deployment/network.spec.ts @@ -9,7 +9,7 @@ import { verifyConfigFlagString, } from "../../api"; -test("login with OIDC", async ({ page }) => { +test("enabled network settings", async ({ page }) => { await setupApiCalls(page); const config = await getDeploymentConfig(); diff --git a/site/e2e/tests/deployment/observability.spec.ts b/site/e2e/tests/deployment/observability.spec.ts new file mode 100644 index 0000000000..e94f14b6ce --- /dev/null +++ b/site/e2e/tests/deployment/observability.spec.ts @@ -0,0 +1,39 @@ +import { test } from "@playwright/test"; +import { getDeploymentConfig } from "api/api"; +import { + setupApiCalls, + verifyConfigFlagArray, + verifyConfigFlagBoolean, + verifyConfigFlagDuration, + verifyConfigFlagEmpty, + verifyConfigFlagString, +} from "../../api"; + +test("enabled observability settings", async ({ page }) => { + await setupApiCalls(page); + const config = await getDeploymentConfig(); + + await page.goto("/deployment/observability", { + waitUntil: "domcontentloaded", + }); + + await verifyConfigFlagBoolean(page, config, "trace-logs"); + await verifyConfigFlagBoolean(page, config, "enable-terraform-debug-mode"); + await verifyConfigFlagBoolean(page, config, "enable-terraform-debug-mode"); + await verifyConfigFlagDuration(page, config, "health-check-refresh"); + await verifyConfigFlagEmpty(page, "health-check-threshold-database"); + await verifyConfigFlagString(page, config, "log-human"); + await verifyConfigFlagString(page, config, "prometheus-address"); + await verifyConfigFlagArray( + page, + config, + "prometheus-aggregate-agent-stats-by", + ); + await verifyConfigFlagBoolean(page, config, "prometheus-collect-agent-stats"); + await verifyConfigFlagBoolean(page, config, "prometheus-collect-db-metrics"); + await verifyConfigFlagBoolean(page, config, "prometheus-enable"); + await verifyConfigFlagBoolean(page, config, "trace-datadog"); + await verifyConfigFlagBoolean(page, config, "trace"); + await verifyConfigFlagBoolean(page, config, "verbose"); + await verifyConfigFlagBoolean(page, config, "pprof-enable"); +}); From 319fd5bf1dd02a4ef8fc2f2d41d22dd8ed7c1dda Mon Sep 17 00:00:00 2001 From: Danny Kopping Date: Thu, 18 Apr 2024 19:43:10 +0200 Subject: [PATCH 15/25] chore: add e2e test against an external auth provider during workspace creation (#12985) --- site/e2e/helpers.ts | 52 ++++++++++++++++++++++++ site/e2e/hooks.ts | 39 +++++++++++++++++- site/e2e/playwright.config.ts | 2 +- site/e2e/tests/externalAuth.spec.ts | 62 ++++++++++++++++++++--------- 4 files changed, 134 insertions(+), 21 deletions(-) diff --git a/site/e2e/helpers.ts b/site/e2e/helpers.ts index 05ce694a97..ca3a507007 100644 --- a/site/e2e/helpers.ts +++ b/site/e2e/helpers.ts @@ -31,6 +31,7 @@ import { type Resource, Response, type RichParameter, + type ExternalAuthProviderResource, } from "./provisionerGenerated"; // requiresEnterpriseLicense will skip the test if we're not running with an enterprise license @@ -49,6 +50,7 @@ export const createWorkspace = async ( templateName: string, richParameters: RichParameter[] = [], buildParameters: WorkspaceBuildParameter[] = [], + useExternalAuthProvider: string | undefined = undefined, ): Promise => { await page.goto(`/templates/${templateName}/workspace`, { waitUntil: "domcontentloaded", @@ -59,6 +61,25 @@ export const createWorkspace = async ( await page.getByLabel("name").fill(name); await fillParameters(page, richParameters, buildParameters); + + if (useExternalAuthProvider !== undefined) { + // Create a new context for the popup which will be created when clicking the button + const popupPromise = page.waitForEvent("popup"); + + // Find the "Login with " button + const externalAuthLoginButton = page + .getByRole("button") + .getByText("Login with GitHub"); + await expect(externalAuthLoginButton).toBeVisible(); + + // Click it + await externalAuthLoginButton.click(); + + // Wait for authentication to occur + const popup = await popupPromise; + await popup.waitForSelector("text=You are now authenticated."); + } + await page.getByTestId("form-submit").click(); await expectUrl(page).toHavePathName("/@admin/" + name); @@ -648,6 +669,37 @@ export const echoResponsesWithParameters = ( }; }; +export const echoResponsesWithExternalAuth = ( + providers: ExternalAuthProviderResource[], +): EchoProvisionerResponses => { + return { + parse: [ + { + parse: {}, + }, + ], + plan: [ + { + plan: { + externalAuthProviders: providers, + }, + }, + ], + apply: [ + { + apply: { + externalAuthProviders: providers, + resources: [ + { + name: "example", + }, + ], + }, + }, + ], + }; +}; + export const fillParameters = async ( page: Page, richParameters: RichParameter[] = [], diff --git a/site/e2e/hooks.ts b/site/e2e/hooks.ts index 0fc483319a..c04233bc9c 100644 --- a/site/e2e/hooks.ts +++ b/site/e2e/hooks.ts @@ -1,4 +1,6 @@ -import type { Page } from "@playwright/test"; +import type { BrowserContext, Page } from "@playwright/test"; +import http from "http"; +import { coderPort, gitAuth } from "./constants"; export const beforeCoderTest = async (page: Page) => { // eslint-disable-next-line no-console -- Show everything that was printed with console.log() @@ -45,6 +47,41 @@ export const beforeCoderTest = async (page: Page) => { }); }; +export const resetExternalAuthKey = async (context: BrowserContext) => { + // Find the session token so we can destroy the external auth link between tests, to ensure valid authentication happens each time. + const cookies = await context.cookies(); + const sessionCookie = cookies.find((c) => c.name === "coder_session_token"); + const options = { + method: "DELETE", + hostname: "127.0.0.1", + port: coderPort, + path: `/api/v2/external-auth/${gitAuth.webProvider}?coder_session_token=${sessionCookie?.value}`, + }; + + const req = http.request(options, (res) => { + let data = ""; + res.on("data", (chunk) => { + data += chunk; + }); + + res.on("end", () => { + // Both 200 (key deleted successfully) and 500 (key was not found) are valid responses. + if (res.statusCode !== 200 && res.statusCode !== 500) { + console.error("failed to delete external auth link", data); + throw new Error( + `failed to delete external auth link: HTTP response ${res.statusCode}`, + ); + } + }); + }); + + req.on("error", (err) => { + throw err.message; + }); + + req.end(); +}; + const isApiCall = (urlString: string): boolean => { const url = new URL(urlString); const apiPath = "/api/v2"; diff --git a/site/e2e/playwright.config.ts b/site/e2e/playwright.config.ts index bb1929a062..7bc8ead8e4 100644 --- a/site/e2e/playwright.config.ts +++ b/site/e2e/playwright.config.ts @@ -115,7 +115,7 @@ export default defineConfig({ // Tests for Deployment / User Authentication / OIDC CODER_OIDC_ISSUER_URL: "https://accounts.google.com", CODER_OIDC_EMAIL_DOMAIN: "coder.com", - CODER_OIDC_CLIENT_ID: "1234567890", // FIXME: https://github.com/coder/coder/issues/12585 + CODER_OIDC_CLIENT_ID: "1234567890", CODER_OIDC_CLIENT_SECRET: "1234567890Secret", CODER_OIDC_ALLOW_SIGNUPS: "false", CODER_OIDC_SIGN_IN_TEXT: "Hello", diff --git a/site/e2e/tests/externalAuth.spec.ts b/site/e2e/tests/externalAuth.spec.ts index 2067582ccc..d5c98228ea 100644 --- a/site/e2e/tests/externalAuth.spec.ts +++ b/site/e2e/tests/externalAuth.spec.ts @@ -2,8 +2,37 @@ import type { Endpoints } from "@octokit/types"; import { test } from "@playwright/test"; import type { ExternalAuthDevice } from "api/typesGenerated"; import { gitAuth } from "../constants"; -import { Awaiter, createServer } from "../helpers"; -import { beforeCoderTest } from "../hooks"; +import { + Awaiter, + createServer, + createTemplate, + createWorkspace, + echoResponsesWithExternalAuth, +} from "../helpers"; +import { beforeCoderTest, resetExternalAuthKey } from "../hooks"; + +test.beforeAll(async ({ baseURL }) => { + const srv = await createServer(gitAuth.webPort); + + // The GitHub validate endpoint returns the currently authenticated user! + srv.use(gitAuth.validatePath, (req, res) => { + res.write(JSON.stringify(ghUser)); + res.end(); + }); + srv.use(gitAuth.tokenPath, (req, res) => { + const r = (Math.random() + 1).toString(36).substring(7); + res.write(JSON.stringify({ access_token: r })); + res.end(); + }); + srv.use(gitAuth.authPath, (req, res) => { + res.redirect( + `${baseURL}/external-auth/${gitAuth.webProvider}/callback?code=1234&state=` + + req.query.state, + ); + }); +}); + +test.beforeEach(async ({ context }) => resetExternalAuthKey(context)); test.beforeEach(({ page }) => beforeCoderTest(page)); @@ -57,23 +86,7 @@ test("external auth device", async ({ page }) => { await page.waitForSelector("text=1 organization authorized"); }); -test("external auth web", async ({ baseURL, page }) => { - const srv = await createServer(gitAuth.webPort); - // The GitHub validate endpoint returns the currently authenticated user! - srv.use(gitAuth.validatePath, (req, res) => { - res.write(JSON.stringify(ghUser)); - res.end(); - }); - srv.use(gitAuth.tokenPath, (req, res) => { - res.write(JSON.stringify({ access_token: "hello-world" })); - res.end(); - }); - srv.use(gitAuth.authPath, (req, res) => { - res.redirect( - `${baseURL}/external-auth/${gitAuth.webProvider}/callback?code=1234&state=` + - req.query.state, - ); - }); +test("external auth web", async ({ page }) => { await page.goto(`/external-auth/${gitAuth.webProvider}`, { waitUntil: "domcontentloaded", }); @@ -81,6 +94,17 @@ test("external auth web", async ({ baseURL, page }) => { await page.waitForSelector("text=You've authenticated with GitHub!"); }); +test("successful external auth from workspace", async ({ page }) => { + const templateName = await createTemplate( + page, + echoResponsesWithExternalAuth([ + { id: gitAuth.webProvider, optional: false }, + ]), + ); + + await createWorkspace(page, templateName, [], [], gitAuth.webProvider); +}); + const ghUser: Endpoints["GET /user"]["response"]["data"] = { login: "kylecarbs", id: 7122116, From 3aa0d738116a06ec9059a69783ce73cf3bf9b964 Mon Sep 17 00:00:00 2001 From: Colin Adler Date: Thu, 18 Apr 2024 18:47:02 -0500 Subject: [PATCH 16/25] chore: fix down migration 196 (#13006) It didn't account for null values. --- .../000196_external_auth_providers_jsonb.down.sql | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/coderd/database/migrations/000196_external_auth_providers_jsonb.down.sql b/coderd/database/migrations/000196_external_auth_providers_jsonb.down.sql index e1a9b42cad..7d440dd5b3 100644 --- a/coderd/database/migrations/000196_external_auth_providers_jsonb.down.sql +++ b/coderd/database/migrations/000196_external_auth_providers_jsonb.down.sql @@ -11,11 +11,15 @@ CREATE OR REPLACE FUNCTION revert_migrate_external_auth_providers_to_jsonb(jsonb DECLARE result text[]; BEGIN - SELECT - array_agg(id::text) INTO result - FROM ( - SELECT - jsonb_array_elements($1) ->> 'id' AS id) AS external_auth_provider_ids; + IF jsonb_typeof($1) = 'null' THEN + result := '{}'; + ELSE + SELECT + array_agg(id::text) INTO result + FROM ( + SELECT + jsonb_array_elements($1) ->> 'id' AS id) AS external_auth_provider_ids; + END IF; RETURN result; END; $$; From 3d7740bd322db24b2a17c19f7311fbe78574a07a Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Fri, 19 Apr 2024 14:45:52 +0200 Subject: [PATCH 17/25] test(site): add e2e tests for workspace proxies (#13009) --- site/e2e/constants.ts | 1 + site/e2e/helpers.ts | 2 +- site/e2e/proxy.ts | 41 +++++++ site/e2e/tests/deployment/appearance.spec.ts | 2 +- .../tests/deployment/workspaceProxies.spec.ts | 105 ++++++++++++++++++ site/src/api/api.ts | 7 ++ site/src/pages/LoginPage/LoginPageView.tsx | 1 + .../WorkspaceProxyPage/WorkspaceProxyRow.tsx | 10 +- 8 files changed, 164 insertions(+), 5 deletions(-) create mode 100644 site/e2e/proxy.ts create mode 100644 site/e2e/tests/deployment/workspaceProxies.spec.ts diff --git a/site/e2e/constants.ts b/site/e2e/constants.ts index 6998968977..3e1283e549 100644 --- a/site/e2e/constants.ts +++ b/site/e2e/constants.ts @@ -7,6 +7,7 @@ export const coderPort = process.env.CODER_E2E_PORT ? Number(process.env.CODER_E2E_PORT) : 3111; export const prometheusPort = 2114; +export const workspaceProxyPort = 3112; // Use alternate ports in case we're running in a Coder Workspace. export const agentPProfPort = 6061; diff --git a/site/e2e/helpers.ts b/site/e2e/helpers.ts index ca3a507007..26e416cad7 100644 --- a/site/e2e/helpers.ts +++ b/site/e2e/helpers.ts @@ -391,7 +391,7 @@ export const stopAgent = async (cp: ChildProcess, goRun: boolean = true) => { await waitUntilUrlIsNotResponding("http://localhost:" + prometheusPort); }; -const waitUntilUrlIsNotResponding = async (url: string) => { +export const waitUntilUrlIsNotResponding = async (url: string) => { const maxRetries = 30; const retryIntervalMs = 1000; let retries = 0; diff --git a/site/e2e/proxy.ts b/site/e2e/proxy.ts new file mode 100644 index 0000000000..620fcf0a96 --- /dev/null +++ b/site/e2e/proxy.ts @@ -0,0 +1,41 @@ +import { spawn, type ChildProcess, exec } from "child_process"; +import { coderMain, coderPort, workspaceProxyPort } from "./constants"; +import { waitUntilUrlIsNotResponding } from "./helpers"; + +export const startWorkspaceProxy = async ( + token: string, +): Promise => { + const cp = spawn("go", ["run", coderMain, "wsproxy", "server"], { + env: { + ...process.env, + CODER_PRIMARY_ACCESS_URL: `http://127.0.0.1:${coderPort}`, + CODER_PROXY_SESSION_TOKEN: token, + CODER_HTTP_ADDRESS: `localhost:${workspaceProxyPort}`, + }, + }); + cp.stdout.on("data", (data: Buffer) => { + // eslint-disable-next-line no-console -- Log wsproxy activity + console.log( + `[wsproxy] [stdout] [onData] ${data.toString().replace(/\n$/g, "")}`, + ); + }); + cp.stderr.on("data", (data: Buffer) => { + // eslint-disable-next-line no-console -- Log wsproxy activity + console.log( + `[wsproxy] [stderr] [onData] ${data.toString().replace(/\n$/g, "")}`, + ); + }); + return cp; +}; + +export const stopWorkspaceProxy = async ( + cp: ChildProcess, + goRun: boolean = true, +) => { + exec(goRun ? `pkill -P ${cp.pid}` : `kill ${cp.pid}`, (error) => { + if (error) { + throw new Error(`exec error: ${JSON.stringify(error)}`); + } + }); + await waitUntilUrlIsNotResponding(`http://127.0.0.1:${workspaceProxyPort}`); +}; diff --git a/site/e2e/tests/deployment/appearance.spec.ts b/site/e2e/tests/deployment/appearance.spec.ts index 0fec1a7d75..14aeafe750 100644 --- a/site/e2e/tests/deployment/appearance.spec.ts +++ b/site/e2e/tests/deployment/appearance.spec.ts @@ -52,7 +52,7 @@ test("set application logo", async ({ page }) => { await incognitoPage.goto("/", { waitUntil: "domcontentloaded" }); // Verify banner - const logo = incognitoPage.locator("img"); + const logo = incognitoPage.locator("img.application-logo"); await expect(logo).toHaveAttribute("src", imageLink); // Shut down browser diff --git a/site/e2e/tests/deployment/workspaceProxies.spec.ts b/site/e2e/tests/deployment/workspaceProxies.spec.ts new file mode 100644 index 0000000000..5f67bda7d7 --- /dev/null +++ b/site/e2e/tests/deployment/workspaceProxies.spec.ts @@ -0,0 +1,105 @@ +import { test, expect, type Page } from "@playwright/test"; +import { createWorkspaceProxy } from "api/api"; +import { setupApiCalls } from "../../api"; +import { coderPort, workspaceProxyPort } from "../../constants"; +import { randomName, requiresEnterpriseLicense } from "../../helpers"; +import { startWorkspaceProxy, stopWorkspaceProxy } from "../../proxy"; + +test("default proxy is online", async ({ page }) => { + requiresEnterpriseLicense(); + await setupApiCalls(page); + + await page.goto("/deployment/workspace-proxies", { + waitUntil: "domcontentloaded", + }); + + // Verify if the default proxy is healthy + const workspaceProxyPrimary = page.locator( + `table.MuiTable-root tr[data-testid="primary"]`, + ); + + const workspaceProxyName = workspaceProxyPrimary.locator("td.name span"); + const workspaceProxyURL = workspaceProxyPrimary.locator("td.url"); + const workspaceProxyStatus = workspaceProxyPrimary.locator("td.status span"); + + await expect(workspaceProxyName).toHaveText("Default"); + await expect(workspaceProxyURL).toHaveText("http://localhost:" + coderPort); + await expect(workspaceProxyStatus).toHaveText("Healthy"); +}); + +test("custom proxy is online", async ({ page }) => { + requiresEnterpriseLicense(); + await setupApiCalls(page); + + const proxyName = randomName(); + + // Register workspace proxy + const proxyResponse = await createWorkspaceProxy({ + name: proxyName, + display_name: "", + icon: "/emojis/1f1e7-1f1f7.png", + }); + expect(proxyResponse.proxy_token).toBeDefined(); + + // Start "wsproxy server" + const proxyServer = await startWorkspaceProxy(proxyResponse.proxy_token); + await waitUntilWorkspaceProxyIsHealthy(page, proxyName); + + // Verify if custom proxy is healthy + await page.goto("/deployment/workspace-proxies", { + waitUntil: "domcontentloaded", + }); + + const workspaceProxy = page.locator(`table.MuiTable-root tr`, { + hasText: proxyName, + }); + + const workspaceProxyName = workspaceProxy.locator("td.name span"); + const workspaceProxyURL = workspaceProxy.locator("td.url"); + const workspaceProxyStatus = workspaceProxy.locator("td.status span"); + + await expect(workspaceProxyName).toHaveText(proxyName); + await expect(workspaceProxyURL).toHaveText( + `http://127.0.0.1:${workspaceProxyPort}`, + ); + await expect(workspaceProxyStatus).toHaveText("Healthy"); + + // Tear down the proxy + await stopWorkspaceProxy(proxyServer); +}); + +const waitUntilWorkspaceProxyIsHealthy = async ( + page: Page, + proxyName: string, +) => { + await page.goto("/deployment/workspace-proxies", { + waitUntil: "domcontentloaded", + }); + + const maxRetries = 30; + const retryIntervalMs = 1000; + let retries = 0; + while (retries < maxRetries) { + await page.reload(); + + const workspaceProxy = page.locator(`table.MuiTable-root tr`, { + hasText: proxyName, + }); + const workspaceProxyStatus = workspaceProxy.locator("td.status span"); + + try { + await expect(workspaceProxyStatus).toHaveText("Healthy", { + timeout: 1_000, + }); + return; // healthy! + } catch { + retries++; + await new Promise((resolve) => setTimeout(resolve, retryIntervalMs)); + } + } + throw new Error( + `Workspace proxy "${proxyName}" is unhealthy after ${ + maxRetries * retryIntervalMs + }ms`, + ); +}; diff --git a/site/src/api/api.ts b/site/src/api/api.ts index 12c2a63b2c..a243123a31 100644 --- a/site/src/api/api.ts +++ b/site/src/api/api.ts @@ -1270,6 +1270,13 @@ export const getWorkspaceProxies = async (): Promise< return response.data; }; +export const createWorkspaceProxy = async ( + b: TypesGen.CreateWorkspaceProxyRequest, +): Promise => { + const response = await axios.post(`/api/v2/workspaceproxies`, b); + return response.data; +}; + export const getAppearance = async (): Promise => { try { const response = await axios.get(`/api/v2/appearance`); diff --git a/site/src/pages/LoginPage/LoginPageView.tsx b/site/src/pages/LoginPage/LoginPageView.tsx index b3039e5ad9..e62e8be0d8 100644 --- a/site/src/pages/LoginPage/LoginPageView.tsx +++ b/site/src/pages/LoginPage/LoginPageView.tsx @@ -41,6 +41,7 @@ export const LoginPageView: FC = ({ css={{ maxWidth: "200px", }} + className="application-logo" /> ) : ( diff --git a/site/src/pages/UserSettingsPage/WorkspaceProxyPage/WorkspaceProxyRow.tsx b/site/src/pages/UserSettingsPage/WorkspaceProxyPage/WorkspaceProxyRow.tsx index fbd76ba2be..e0807cf758 100644 --- a/site/src/pages/UserSettingsPage/WorkspaceProxyPage/WorkspaceProxyRow.tsx +++ b/site/src/pages/UserSettingsPage/WorkspaceProxyPage/WorkspaceProxyRow.tsx @@ -40,7 +40,7 @@ export const ProxyRow: FC = ({ proxy, latency }) => { return ( <> - + 0 @@ -60,8 +60,12 @@ export const ProxyRow: FC = ({ proxy, latency }) => { /> - {proxy.path_app_url} - {statusBadge} + + {proxy.path_app_url} + + + {statusBadge} + Date: Sat, 20 Apr 2024 12:26:53 -0400 Subject: [PATCH 18/25] chore(docs): make external auth docs easier to follow (#12970) * add additional context to github external auth provider documentation * Apply suggestions from code review Co-authored-by: Kyle Carberry * Update docs/admin/external-auth.md * fmt * fmt --------- Co-authored-by: Kyle Carberry --- docs/admin/external-auth.md | 78 ++++++++++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 10 deletions(-) diff --git a/docs/admin/external-auth.md b/docs/admin/external-auth.md index 2f785c5c51..f281afe0d7 100644 --- a/docs/admin/external-auth.md +++ b/docs/admin/external-auth.md @@ -25,16 +25,12 @@ application. The following providers are supported: - [Azure DevOps](https://learn.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/oauth?view=azure-devops) - [Azure DevOps (via Entra ID)](https://learn.microsoft.com/en-us/entra/architecture/auth-oauth2) -Example callback URL: -`https://coder.example.com/external-auth/primary-github/callback`. Use an -arbitrary ID for your provider (e.g. `primary-github`). - -Set the following environment variables to -[configure the Coder server](./configure.md): +The next step is to [configure the Coder server](./configure.md) to use the +OAuth application by setting the following environment variables: ```env -CODER_EXTERNAL_AUTH_0_ID="primary-github" -CODER_EXTERNAL_AUTH_0_TYPE=github|gitlab|azure-devops|bitbucket-cloud|bitbucket-server| +CODER_EXTERNAL_AUTH_0_ID="" +CODER_EXTERNAL_AUTH_0_TYPE= CODER_EXTERNAL_AUTH_0_CLIENT_ID=xxxxxx CODER_EXTERNAL_AUTH_0_CLIENT_SECRET=xxxxxxx @@ -43,11 +39,22 @@ CODER_EXTERNAL_AUTH_0_DISPLAY_NAME="Google Calendar" CODER_EXTERNAL_AUTH_0_DISPLAY_ICON="https://mycustomicon.com/google.svg" ``` +The `CODER_EXTERNAL_AUTH_0_ID` environment variable is used for internal +reference. Therefore, it can be set arbitrarily (e.g., `primary-github` for your +GitHub provider). + ### GitHub +> If you don't require fine-grained access control, it's easier to configure a +> GitHub OAuth app! + 1. [Create a GitHub App](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app) - to enable fine-grained access to specific repositories, or a subset of - permissions for security. + + - Set the callback URL to + `https://coder.example.com/external-auth/USER_DEFINED_ID/callback`. + - Deactivate Webhooks. + - Enable fine-grained access to specific repositories or a subset of + permissions for security. ![Register GitHub App](../images/admin/github-app-register.png) @@ -69,6 +76,13 @@ CODER_EXTERNAL_AUTH_0_DISPLAY_ICON="https://mycustomicon.com/google.svg" ![Install GitHub App](../images/admin/github-app-install.png) +```env +CODER_EXTERNAL_AUTH_0_ID="USER_DEFINED_ID" +CODER_EXTERNAL_AUTH_0_TYPE=github +CODER_EXTERNAL_AUTH_0_CLIENT_ID=xxxxxx +CODER_EXTERNAL_AUTH_0_CLIENT_SECRET=xxxxxxx +``` + ### GitHub Enterprise GitHub Enterprise requires the following environment variables: @@ -204,6 +218,50 @@ add this to the git config --global credential.useHttpPath true ``` +### Kubernetes environment variables + +If you deployed Coder with Kubernetes you can set the environment variables in +your `values.yaml` file: + +```yaml +coder: + env: + # […] + - name: CODER_EXTERNAL_AUTH_0_ID + value: USER_DEFINED_ID + + - name: CODER_EXTERNAL_AUTH_0_TYPE + value: github + + - name: CODER_EXTERNAL_AUTH_0_CLIENT_ID + valueFrom: + secretKeyRef: + name: github-primary-basic-auth + key: client-id + + - name: CODER_EXTERNAL_AUTH_0_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: github-primary-basic-auth + key: client-secret +``` + +You can set the secrets by creating a `github-primary-basic-auth.yaml` file and +applying it. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: github-primary-basic-auth +type: Opaque +stringData: + client-secret: xxxxxxxxx + client-id: xxxxxxxxx +``` + +Make sure to restart the affected pods for the change to take effect. + ## Require git authentication in templates If your template requires git authentication (e.g. running `git clone` in the From 4a6693a171227c30a3440bf9883d13dd9aa59b0e Mon Sep 17 00:00:00 2001 From: Michael Brewer Date: Mon, 22 Apr 2024 03:09:05 -0700 Subject: [PATCH 19/25] chore: fix 404 for managed terraform variables (#13018) --- docs/platforms/kubernetes/additional-clusters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/platforms/kubernetes/additional-clusters.md b/docs/platforms/kubernetes/additional-clusters.md index c3bcd42d18..e30a432c73 100644 --- a/docs/platforms/kubernetes/additional-clusters.md +++ b/docs/platforms/kubernetes/additional-clusters.md @@ -91,7 +91,7 @@ provider "kubernetes" { Alternatively, you can authenticate with remote clusters with ServiceAccount tokens. Coder can store these secrets on your behalf with -[managed Terraform variables](../../templates/parameters.md#managed-terraform-variables). +[managed Terraform variables](../../templates/variables.md). Alternatively, these could also be fetched from Kubernetes secrets or even [Hashicorp Vault](https://registry.terraform.io/providers/hashicorp/vault/latest/docs/data-sources/generic_secret). From e17e8aa3c99eedf8fdf49fef54d56c99780b2493 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 22 Apr 2024 13:11:50 +0300 Subject: [PATCH 20/25] feat(coderd/database): keep only 1 day of `workspace_agent_stats` after rollup (#12674) --- cli/server.go | 2 +- coderd/database/dbmem/dbmem.go | 58 +++++++++++++- coderd/database/dbpurge/dbpurge.go | 1 - coderd/database/dbpurge/dbpurge_test.go | 79 ++++++++++++++++--- coderd/database/queries.sql.go | 30 ++++++- .../database/queries/workspaceagentstats.sql | 30 ++++++- 6 files changed, 184 insertions(+), 16 deletions(-) diff --git a/cli/server.go b/cli/server.go index d278dbea35..5b804f9278 100644 --- a/cli/server.go +++ b/cli/server.go @@ -965,7 +965,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. defer shutdownConns() // Ensures that old database entries are cleaned up over time! - purger := dbpurge.New(ctx, logger, options.Database) + purger := dbpurge.New(ctx, logger.Named("dbpurge"), options.Database) defer purger.Close() // Updates workspace usage diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go index 9ce98ff14c..fcc3140133 100644 --- a/coderd/database/dbmem/dbmem.go +++ b/coderd/database/dbmem/dbmem.go @@ -1506,13 +1506,65 @@ func (q *FakeQuerier) DeleteOldWorkspaceAgentStats(_ context.Context) error { q.mutex.Lock() defer q.mutex.Unlock() + /* + DELETE FROM + workspace_agent_stats + WHERE + created_at < ( + SELECT + COALESCE( + -- When generating initial template usage stats, all the + -- raw agent stats are needed, after that only ~30 mins + -- from last rollup is needed. Deployment stats seem to + -- use between 15 mins and 1 hour of data. We keep a + -- little bit more (1 day) just in case. + MAX(start_time) - '1 days'::interval, + -- Fall back to 6 months ago if there are no template + -- usage stats so that we don't delete the data before + -- it's rolled up. + NOW() - '6 months'::interval + ) + FROM + template_usage_stats + ) + AND created_at < ( + -- Delete at most in batches of 3 days (with a batch size of 3 days, we + -- can clear out the previous 6 months of data in ~60 iterations) whilst + -- keeping the DB load relatively low. + SELECT + COALESCE(MIN(created_at) + '3 days'::interval, NOW()) + FROM + workspace_agent_stats + ); + */ + now := dbtime.Now() - sixMonthInterval := 6 * 30 * 24 * time.Hour - sixMonthsAgo := now.Add(-sixMonthInterval) + var limit time.Time + // MAX + for _, stat := range q.templateUsageStats { + if stat.StartTime.After(limit) { + limit = stat.StartTime.AddDate(0, 0, -1) + } + } + // COALESCE + if limit.IsZero() { + limit = now.AddDate(0, -6, 0) + } var validStats []database.WorkspaceAgentStat + var batchLimit time.Time for _, stat := range q.workspaceAgentStats { - if stat.CreatedAt.Before(sixMonthsAgo) { + if batchLimit.IsZero() || stat.CreatedAt.Before(batchLimit) { + batchLimit = stat.CreatedAt + } + } + if batchLimit.IsZero() { + batchLimit = time.Now() + } else { + batchLimit = batchLimit.AddDate(0, 0, 3) + } + for _, stat := range q.workspaceAgentStats { + if stat.CreatedAt.Before(limit) && stat.CreatedAt.Before(batchLimit) { continue } validStats = append(validStats, stat) diff --git a/coderd/database/dbpurge/dbpurge.go b/coderd/database/dbpurge/dbpurge.go index c4b5a609a3..d3fc56a8c5 100644 --- a/coderd/database/dbpurge/dbpurge.go +++ b/coderd/database/dbpurge/dbpurge.go @@ -24,7 +24,6 @@ const ( // This is for cleaning up old, unused resources from the database that take up space. func New(ctx context.Context, logger slog.Logger, db database.Store) io.Closer { closed := make(chan struct{}) - logger = logger.Named("dbpurge") ctx, cancelFunc := context.WithCancel(ctx) //nolint:gocritic // The system purges old db records without user input. diff --git a/coderd/database/dbpurge/dbpurge_test.go b/coderd/database/dbpurge/dbpurge_test.go index 9e949f270d..4255409ba6 100644 --- a/coderd/database/dbpurge/dbpurge_test.go +++ b/coderd/database/dbpurge/dbpurge_test.go @@ -11,12 +11,14 @@ import ( "go.uber.org/goleak" "golang.org/x/exp/slices" + "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/database/dbpurge" + "github.com/coder/coder/v2/coderd/database/dbrollup" "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/provisionerd/proto" @@ -40,27 +42,62 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) { t.Parallel() db, _ := dbtestutil.NewDB(t) - logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}) + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + + now := dbtime.Now() + + defer func() { + if t.Failed() { + t.Logf("Test failed, printing rows...") + ctx := testutil.Context(t, testutil.WaitShort) + wasRows, err := db.GetWorkspaceAgentStats(ctx, now.AddDate(0, -7, 0)) + if err == nil { + for _, row := range wasRows { + t.Logf("workspace agent stat: %v", row) + } + } + tusRows, err := db.GetTemplateUsageStats(context.Background(), database.GetTemplateUsageStatsParams{ + StartTime: now.AddDate(0, -7, 0), + EndTime: now, + }) + if err == nil { + for _, row := range tusRows { + t.Logf("template usage stat: %v", row) + } + } + } + }() ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) defer cancel() - now := dbtime.Now() - // given // Let's use RxBytes to identify stat entries. // Stat inserted 6 months + 1 hour ago, should be deleted. first := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{ - CreatedAt: now.Add(-6*30*24*time.Hour - time.Hour), + CreatedAt: now.AddDate(0, -6, 0).Add(-time.Hour), + ConnectionCount: 1, ConnectionMedianLatencyMS: 1, RxBytes: 1111, + SessionCountSSH: 1, }) - // Stat inserted 6 months - 1 hour ago, should not be deleted. + // Stat inserted 6 months - 1 hour ago, should not be deleted before rollup. second := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{ - CreatedAt: now.Add(-5*30*24*time.Hour + time.Hour), + CreatedAt: now.AddDate(0, -6, 0).Add(time.Hour), + ConnectionCount: 1, ConnectionMedianLatencyMS: 1, RxBytes: 2222, + SessionCountSSH: 1, + }) + + // Stat inserted 6 months - 1 day - 2 hour ago, should not be deleted at all. + third := dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{ + CreatedAt: now.AddDate(0, -6, 0).AddDate(0, 0, 1).Add(2 * time.Hour), + ConnectionCount: 1, + ConnectionMedianLatencyMS: 1, + RxBytes: 3333, + SessionCountSSH: 1, }) // when @@ -70,15 +107,39 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) { // then var stats []database.GetWorkspaceAgentStatsRow var err error - require.Eventually(t, func() bool { + require.Eventuallyf(t, func() bool { // Query all stats created not earlier than 7 months ago - stats, err = db.GetWorkspaceAgentStats(ctx, now.Add(-7*30*24*time.Hour)) + stats, err = db.GetWorkspaceAgentStats(ctx, now.AddDate(0, -7, 0)) if err != nil { return false } return !containsWorkspaceAgentStat(stats, first) && containsWorkspaceAgentStat(stats, second) - }, testutil.WaitShort, testutil.IntervalFast, stats) + }, testutil.WaitShort, testutil.IntervalFast, "it should delete old stats: %v", stats) + + // when + events := make(chan dbrollup.Event) + rolluper := dbrollup.New(logger, db, dbrollup.WithEventChannel(events)) + defer rolluper.Close() + + _, _ = <-events, <-events + + // Start a new purger to immediately trigger delete after rollup. + _ = closer.Close() + closer = dbpurge.New(ctx, logger, db) + defer closer.Close() + + // then + require.Eventuallyf(t, func() bool { + // Query all stats created not earlier than 7 months ago + stats, err = db.GetWorkspaceAgentStats(ctx, now.AddDate(0, -7, 0)) + if err != nil { + return false + } + return !containsWorkspaceAgentStat(stats, first) && + !containsWorkspaceAgentStat(stats, second) && + containsWorkspaceAgentStat(stats, third) + }, testutil.WaitShort, testutil.IntervalFast, "it should delete old stats after rollup: %v", stats) } func containsWorkspaceAgentStat(stats []database.GetWorkspaceAgentStatsRow, needle database.WorkspaceAgentStat) bool { diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 5b2b54929d..8594436116 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -10111,7 +10111,35 @@ func (q *sqlQuerier) UpdateWorkspaceAgentStartupByID(ctx context.Context, arg Up } const deleteOldWorkspaceAgentStats = `-- name: DeleteOldWorkspaceAgentStats :exec -DELETE FROM workspace_agent_stats WHERE created_at < NOW() - INTERVAL '180 days' +DELETE FROM + workspace_agent_stats +WHERE + created_at < ( + SELECT + COALESCE( + -- When generating initial template usage stats, all the + -- raw agent stats are needed, after that only ~30 mins + -- from last rollup is needed. Deployment stats seem to + -- use between 15 mins and 1 hour of data. We keep a + -- little bit more (1 day) just in case. + MAX(start_time) - '1 days'::interval, + -- Fall back to 6 months ago if there are no template + -- usage stats so that we don't delete the data before + -- it's rolled up. + NOW() - '6 months'::interval + ) + FROM + template_usage_stats + ) + AND created_at < ( + -- Delete at most in batches of 3 days (with a batch size of 3 days, we + -- can clear out the previous 6 months of data in ~60 iterations) whilst + -- keeping the DB load relatively low. + SELECT + COALESCE(MIN(created_at) + '3 days'::interval, NOW()) + FROM + workspace_agent_stats + ) ` func (q *sqlQuerier) DeleteOldWorkspaceAgentStats(ctx context.Context) error { diff --git a/coderd/database/queries/workspaceagentstats.sql b/coderd/database/queries/workspaceagentstats.sql index 4b7f86fba4..5623fd2cf9 100644 --- a/coderd/database/queries/workspaceagentstats.sql +++ b/coderd/database/queries/workspaceagentstats.sql @@ -66,7 +66,35 @@ ORDER BY date ASC; -- name: DeleteOldWorkspaceAgentStats :exec -DELETE FROM workspace_agent_stats WHERE created_at < NOW() - INTERVAL '180 days'; +DELETE FROM + workspace_agent_stats +WHERE + created_at < ( + SELECT + COALESCE( + -- When generating initial template usage stats, all the + -- raw agent stats are needed, after that only ~30 mins + -- from last rollup is needed. Deployment stats seem to + -- use between 15 mins and 1 hour of data. We keep a + -- little bit more (1 day) just in case. + MAX(start_time) - '1 days'::interval, + -- Fall back to 6 months ago if there are no template + -- usage stats so that we don't delete the data before + -- it's rolled up. + NOW() - '6 months'::interval + ) + FROM + template_usage_stats + ) + AND created_at < ( + -- Delete at most in batches of 3 days (with a batch size of 3 days, we + -- can clear out the previous 6 months of data in ~60 iterations) whilst + -- keeping the DB load relatively low. + SELECT + COALESCE(MIN(created_at) + '3 days'::interval, NOW()) + FROM + workspace_agent_stats + ); -- name: GetDeploymentWorkspaceAgentStats :one WITH agent_stats AS ( From 8a1216254eac8ab95967d16e1c5041f32afaedc5 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 22 Apr 2024 03:13:48 -0700 Subject: [PATCH 21/25] feat(cli): add `--env` flag for `coder ssh` (#12991) This allows environment variables to be set on the SSH session. Example: coder ssh myworkspace --env VAR1=val1,VAR2=val2 --- cli/ssh.go | 39 +++++++++++++++++++------ cli/ssh_test.go | 43 ++++++++++++++++++++++++++++ cli/testdata/coder_ssh_--help.golden | 3 ++ docs/cli/ssh.md | 9 ++++++ 4 files changed, 85 insertions(+), 9 deletions(-) diff --git a/cli/ssh.go b/cli/ssh.go index a291b3764b..1aa832fcda 100644 --- a/cli/ssh.go +++ b/cli/ssh.go @@ -55,6 +55,7 @@ func (r *RootCmd) ssh() *serpent.Command { noWait bool logDirPath string remoteForwards []string + env []string disableAutostart bool ) client := new(codersdk.Client) @@ -144,16 +145,23 @@ func (r *RootCmd) ssh() *serpent.Command { stack := newCloserStack(ctx, logger) defer stack.close(nil) - if len(remoteForwards) > 0 { - for _, remoteForward := range remoteForwards { - isValid := validateRemoteForward(remoteForward) - if !isValid { - return xerrors.Errorf(`invalid format of remote-forward, expected: remote_port:local_address:local_port`) - } - if isValid && stdio { - return xerrors.Errorf(`remote-forward can't be enabled in the stdio mode`) - } + for _, remoteForward := range remoteForwards { + isValid := validateRemoteForward(remoteForward) + if !isValid { + return xerrors.Errorf(`invalid format of remote-forward, expected: remote_port:local_address:local_port`) } + if isValid && stdio { + return xerrors.Errorf(`remote-forward can't be enabled in the stdio mode`) + } + } + + var parsedEnv [][2]string + for _, e := range env { + k, v, ok := strings.Cut(e, "=") + if !ok { + return xerrors.Errorf("invalid environment variable setting %q", e) + } + parsedEnv = append(parsedEnv, [2]string{k, v}) } workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, inv, client, !disableAutostart, inv.Args[0]) @@ -375,6 +383,12 @@ func (r *RootCmd) ssh() *serpent.Command { }() } + for _, kv := range parsedEnv { + if err := sshSession.Setenv(kv[0], kv[1]); err != nil { + return xerrors.Errorf("setenv: %w", err) + } + } + err = sshSession.RequestPty("xterm-256color", 128, 128, gossh.TerminalModes{}) if err != nil { return xerrors.Errorf("request pty: %w", err) @@ -483,6 +497,13 @@ func (r *RootCmd) ssh() *serpent.Command { FlagShorthand: "R", Value: serpent.StringArrayOf(&remoteForwards), }, + { + Flag: "env", + Description: "Set environment variable(s) for session (key1=value1,key2=value2,...).", + Env: "CODER_SSH_ENV", + FlagShorthand: "e", + Value: serpent.StringArrayOf(&env), + }, sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)), } return cmd diff --git a/cli/ssh_test.go b/cli/ssh_test.go index f1e52b1a0c..8c3c1a4e40 100644 --- a/cli/ssh_test.go +++ b/cli/ssh_test.go @@ -968,6 +968,49 @@ func TestSSH(t *testing.T) { <-cmdDone }) + t.Run("Env", func(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Test not supported on windows") + } + + t.Parallel() + + client, workspace, agentToken := setupWorkspaceForAgent(t) + _ = agenttest.New(t, client.URL, agentToken) + coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID) + + inv, root := clitest.New(t, + "ssh", + workspace.Name, + "--env", + "foo=bar,baz=qux", + ) + clitest.SetupConfig(t, client, root) + + pty := ptytest.New(t).Attach(inv) + inv.Stderr = pty.Output() + + // Wait super long so this doesn't flake on -race test. + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong) + defer cancel() + + w := clitest.StartWithWaiter(t, inv.WithContext(ctx)) + defer w.Wait() // We don't care about any exit error (exit code 255: SSH connection ended unexpectedly). + + // Since something was output, it should be safe to write input. + // This could show a prompt or "running startup scripts", so it's + // not indicative of the SSH connection being ready. + _ = pty.Peek(ctx, 1) + + // Ensure the SSH connection is ready by testing the shell + // input/output. + pty.WriteLine("echo $foo $baz") + pty.ExpectMatchContext(ctx, "bar qux") + + // And we're done. + pty.WriteLine("exit") + }) + t.Run("RemoteForwardUnixSocket", func(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Test not supported on windows") diff --git a/cli/testdata/coder_ssh_--help.golden b/cli/testdata/coder_ssh_--help.golden index ce53948c70..80aaa3c204 100644 --- a/cli/testdata/coder_ssh_--help.golden +++ b/cli/testdata/coder_ssh_--help.golden @@ -9,6 +9,9 @@ OPTIONS: --disable-autostart bool, $CODER_SSH_DISABLE_AUTOSTART (default: false) Disable starting the workspace automatically when connecting via SSH. + -e, --env string-array, $CODER_SSH_ENV + Set environment variable(s) for session (key1=value1,key2=value2,...). + -A, --forward-agent bool, $CODER_SSH_FORWARD_AGENT Specifies whether to forward the SSH agent specified in $SSH_AUTH_SOCK. diff --git a/docs/cli/ssh.md b/docs/cli/ssh.md index c2945a0be2..d2110628fe 100644 --- a/docs/cli/ssh.md +++ b/docs/cli/ssh.md @@ -95,6 +95,15 @@ Specify the directory containing SSH diagnostic log files. Enable remote port forwarding (remote_port:local_address:local_port). +### -e, --env + +| | | +| ----------- | --------------------------- | +| Type | string-array | +| Environment | $CODER_SSH_ENV | + +Set environment variable(s) for session (key1=value1,key2=value2,...). + ### --disable-autostart | | | From 3adcccb61884b4629ff197cad3c7de9ea83ea94e Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 22 Apr 2024 14:10:32 +0300 Subject: [PATCH 22/25] fix(coderd/database): reduce db load via dbpurge advisory locking (#13021) --- coderd/database/dbpurge/dbpurge.go | 46 +++++++++++++++++++----------- coderd/database/lock.go | 1 + 2 files changed, 31 insertions(+), 16 deletions(-) diff --git a/coderd/database/dbpurge/dbpurge.go b/coderd/database/dbpurge/dbpurge.go index d3fc56a8c5..a6ad0a125d 100644 --- a/coderd/database/dbpurge/dbpurge.go +++ b/coderd/database/dbpurge/dbpurge.go @@ -2,11 +2,10 @@ package dbpurge import ( "context" - "errors" "io" "time" - "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" "cdr.dev/slog" @@ -35,22 +34,37 @@ func New(ctx context.Context, logger slog.Logger, db database.Store) io.Closer { doTick := func() { defer ticker.Reset(delay) - var eg errgroup.Group - eg.Go(func() error { - return db.DeleteOldWorkspaceAgentLogs(ctx) - }) - eg.Go(func() error { - return db.DeleteOldWorkspaceAgentStats(ctx) - }) - eg.Go(func() error { - return db.DeleteOldProvisionerDaemons(ctx) - }) - err := eg.Wait() - if err != nil { - if errors.Is(err, context.Canceled) { - return + start := time.Now() + // Start a transaction to grab advisory lock, we don't want to run + // multiple purges at the same time (multiple replicas). + if err := db.InTx(func(tx database.Store) error { + // Acquire a lock to ensure that only one instance of the + // purge is running at a time. + ok, err := tx.TryAcquireLock(ctx, database.LockIDDBPurge) + if err != nil { + return err } + if !ok { + logger.Debug(ctx, "unable to acquire lock for purging old database entries, skipping") + return nil + } + + if err := tx.DeleteOldWorkspaceAgentLogs(ctx); err != nil { + return xerrors.Errorf("failed to delete old workspace agent logs: %w", err) + } + if err := tx.DeleteOldWorkspaceAgentStats(ctx); err != nil { + return xerrors.Errorf("failed to delete old workspace agent stats: %w", err) + } + if err := tx.DeleteOldProvisionerDaemons(ctx); err != nil { + return xerrors.Errorf("failed to delete old provisioner daemons: %w", err) + } + + logger.Info(ctx, "purged old database entries", slog.F("duration", time.Since(start))) + + return nil + }, nil); err != nil { logger.Error(ctx, "failed to purge old database entries", slog.Error(err)) + return } } diff --git a/coderd/database/lock.go b/coderd/database/lock.go index 65dd6eb84a..b724e9b26d 100644 --- a/coderd/database/lock.go +++ b/coderd/database/lock.go @@ -9,6 +9,7 @@ const ( LockIDDeploymentSetup = iota + 1 LockIDEnterpriseDeploymentSetup LockIDDBRollup + LockIDDBPurge ) // GenLockID generates a unique and consistent lock ID from a given string. From d2acb6776e8d479752c446d8c358e6145e94d05f Mon Sep 17 00:00:00 2001 From: Michael Brewer Date: Mon, 22 Apr 2024 05:23:09 -0700 Subject: [PATCH 23/25] chore: fix link to install (#13019) --- docs/guides/support-bundle.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/support-bundle.md b/docs/guides/support-bundle.md index 9b2d8ec52c..2b0ffad843 100644 --- a/docs/guides/support-bundle.md +++ b/docs/guides/support-bundle.md @@ -59,7 +59,7 @@ A brief overview of all files contained in the bundle is provided below: requires the Coder deployment to be available. 2. Ensure you have the Coder CLI installed on a local machine. See - (installation)[../install/index.md] for steps on how to do this. + [installation](../install/index.md) for steps on how to do this. > Note: It is recommended to generate a support bundle from a location > experiencing workspace connectivity issues. From ea472c5388d85c5ae5e2c05530525ba4c56bdf16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 08:23:22 -0400 Subject: [PATCH 24/25] chore: bump google.golang.org/api from 0.172.0 to 0.175.0 (#13026) Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.172.0 to 0.175.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.172.0...v0.175.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 11 ++++++----- go.sum | 22 ++++++++++++---------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 0751b4216b..93f2709cd1 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ replace github.com/pkg/sftp => github.com/mafredri/sftp v1.13.6-0.20231212144145 require ( cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6 - cloud.google.com/go/compute/metadata v0.2.3 + cloud.google.com/go/compute/metadata v0.3.0 github.com/AlecAivazis/survey/v2 v2.3.5 github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/adrg/xdg v0.4.0 @@ -200,8 +200,8 @@ require ( golang.org/x/tools v0.20.0 golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 - google.golang.org/api v0.172.0 - google.golang.org/grpc v1.63.0 + google.golang.org/api v0.175.0 + google.golang.org/grpc v1.63.2 google.golang.org/protobuf v1.33.0 gopkg.in/DataDog/dd-trace-go.v1 v1.61.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 @@ -221,6 +221,8 @@ require ( ) require ( + cloud.google.com/go/auth v0.2.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.1 // indirect github.com/DataDog/go-libddwaf/v2 v2.3.1 // indirect github.com/alecthomas/chroma/v2 v2.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect @@ -232,7 +234,6 @@ require ( ) require ( - cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/logging v1.9.0 // indirect cloud.google.com/go/longrunning v0.5.5 // indirect filippo.io/edwards25519 v1.0.0 // indirect @@ -429,7 +430,7 @@ require ( google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect gopkg.in/yaml.v2 v2.4.0 // indirect howett.net/plist v1.0.0 // indirect inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect diff --git a/go.sum b/go.sum index ac1f3fdd9f..3b2285dd72 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,12 @@ cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6 h1:KHblWIE/KHOwQ6lEbMZt6YpcGve2FEZ1sDtrW1Am5UI= cdr.dev/slog v1.6.2-0.20240126064726-20367d4aede6/go.mod h1:NaoTA7KwopCrnaSb0JXTC0PTp/O/Y83Lndnq0OEV3ZQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/auth v0.2.2 h1:gmxNJs4YZYcw6YvKRtVBaF2fyUE6UrWPyzU8jHvYfmI= +cloud.google.com/go/auth v0.2.2/go.mod h1:2bDNJWtWziDT3Pu1URxHHbkHE/BbOCuyUiKIGcNvafo= +cloud.google.com/go/auth/oauth2adapt v0.2.1 h1:VSPmMmUlT8CkIZ2PzD9AlLN+R3+D1clXMWHHa6vG/Ag= +cloud.google.com/go/auth/oauth2adapt v0.2.1/go.mod h1:tOdK/k+D2e4GEwfBRA48dKNQiDsqIXxLh7VU319eV0g= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/logging v1.9.0 h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw= cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= @@ -1156,8 +1158,8 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvY golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= -google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= -google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= +google.golang.org/api v0.175.0 h1:9bMDh10V9cBuU8N45Wlc3cKkItfqMRV0Fi8UscLEtbY= +google.golang.org/api v0.175.0/go.mod h1:Rra+ltKu14pps/4xTycZfobMgLpbosoaaL7c+SEMrO8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -1170,15 +1172,15 @@ google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUE google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= -google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 2e49fa94d4d89111de7a41dd810a8bf755574de6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 08:23:35 -0400 Subject: [PATCH 25/25] chore: bump github.com/coder/terraform-provider-coder (#13022) Bumps [github.com/coder/terraform-provider-coder](https://github.com/coder/terraform-provider-coder) from 0.20.1 to 0.21.0. - [Release notes](https://github.com/coder/terraform-provider-coder/releases) - [Changelog](https://github.com/coder/terraform-provider-coder/blob/main/.goreleaser.yml) - [Commits](https://github.com/coder/terraform-provider-coder/compare/v0.20.1...v0.21.0) --- updated-dependencies: - dependency-name: github.com/coder/terraform-provider-coder dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 93f2709cd1..784305d9eb 100644 --- a/go.mod +++ b/go.mod @@ -104,7 +104,7 @@ require ( github.com/coder/flog v1.1.0 github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0 github.com/coder/retry v1.5.1 - github.com/coder/terraform-provider-coder v0.20.1 + github.com/coder/terraform-provider-coder v0.21.0 github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a github.com/coreos/go-oidc/v3 v3.10.0 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf diff --git a/go.sum b/go.sum index 3b2285dd72..971c935ab0 100644 --- a/go.sum +++ b/go.sum @@ -219,8 +219,8 @@ github.com/coder/ssh v0.0.0-20231128192721-70855dedb788 h1:YoUSJ19E8AtuUFVYBpXuO github.com/coder/ssh v0.0.0-20231128192721-70855dedb788/go.mod h1:aGQbuCLyhRLMzZF067xc84Lh7JDs1FKwCmF1Crl9dxQ= github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d h1:IMvBC1GrCIiZFxpOYRQacZtdjnmsdWNAMilPz+kvdG4= github.com/coder/tailscale v1.1.1-0.20240401202854-d329bbdb530d/go.mod h1:L8tPrwSi31RAMEMV8rjb0vYTGs7rXt8rAHbqY/p41j4= -github.com/coder/terraform-provider-coder v0.20.1 h1:hz0yvDl8rDJyDgUlFH8QrGUxFKrwmyAQpOhaoTMEmtY= -github.com/coder/terraform-provider-coder v0.20.1/go.mod h1:pACHRoXSHBGyY696mLeQ1hR/Ag1G2wFk5bw0mT5Zp2g= +github.com/coder/terraform-provider-coder v0.21.0 h1:aoDmFJULYZpS66EIAZuNY4IxElaDkdRaWMWp9ScD2R8= +github.com/coder/terraform-provider-coder v0.21.0/go.mod h1:hqxd15PJeftFBOnGBBPN6WfNQutZtnahwwPeV8U6TyA= github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a h1:KhR9LUVllMZ+e9lhubZ1HNrtJDgH5YLoTvpKwmrGag4= github.com/coder/wgtunnel v0.1.13-0.20231127054351-578bfff9b92a/go.mod h1:QzfptVUdEO+XbkzMKx1kw13i9wwpJlfI1RrZ6SNZ0hA= github.com/coder/wireguard-go v0.0.0-20230807234434-d825b45ccbf5 h1:eDk/42Kj4xN4yfE504LsvcFEo3dWUiCOaBiWJ2uIH2A=