mirror of https://github.com/coder/coder.git
fix(coderd): remove websocket wait group on shutdown
There's really no great way to shutdown websockets. In the code currently it seems to assume that `(*http.Server).Shutdown()` gracefully kills them, but that's not the case: https://pkg.go.dev/net/http#Server.Shutdown This removes the mutex so that `coder server` no longer waits for them to exit.
This commit is contained in:
parent
951dfaa99c
commit
afdba27ec7
|
@ -1187,9 +1187,9 @@ type API struct {
|
|||
// SiteHandler serves static files for the dashboard.
|
||||
SiteHandler *site.Handler
|
||||
|
||||
WebsocketWaitMutex sync.Mutex
|
||||
WebsocketWaitGroup sync.WaitGroup
|
||||
derpCloseFunc func()
|
||||
ProvisionerWaitMutex sync.Mutex
|
||||
ProvisionerWaitGroup sync.WaitGroup
|
||||
derpCloseFunc func()
|
||||
|
||||
metricsCache *metricscache.Cache
|
||||
updateChecker *updatecheck.Checker
|
||||
|
@ -1220,9 +1220,10 @@ func (api *API) Close() error {
|
|||
api.derpCloseFunc()
|
||||
}
|
||||
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Wait()
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
// Wait for in-memory provisioners to close.
|
||||
api.ProvisionerWaitMutex.Lock()
|
||||
api.ProvisionerWaitGroup.Wait()
|
||||
api.ProvisionerWaitMutex.Unlock()
|
||||
|
||||
api.dbRolluper.Close()
|
||||
api.metricsCache.Close()
|
||||
|
@ -1343,14 +1344,12 @@ func (api *API) CreateInMemoryProvisionerDaemon(dialCtx context.Context, name st
|
|||
},
|
||||
},
|
||||
)
|
||||
// in-mem pipes aren't technically "websockets" but they have the same properties as far as the
|
||||
// API is concerned: they are long-lived connections that we need to close before completing
|
||||
// shutdown of the API.
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Add(1)
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
|
||||
api.ProvisionerWaitMutex.Lock()
|
||||
api.ProvisionerWaitGroup.Add(1)
|
||||
api.ProvisionerWaitMutex.Unlock()
|
||||
go func() {
|
||||
defer api.WebsocketWaitGroup.Done()
|
||||
defer api.ProvisionerWaitGroup.Done()
|
||||
// here we pass the background context, since we want the server to keep serving until the
|
||||
// client hangs up. If we, say, pass the API context, then when it is canceled, we could
|
||||
// drop a job that we locked in the database but never passed to the provisionerd. The
|
||||
|
|
|
@ -61,10 +61,6 @@ func (api *API) provisionerJobLogs(rw http.ResponseWriter, r *http.Request, job
|
|||
}
|
||||
|
||||
follower := newLogFollower(ctx, logger, api.Database, api.Pubsub, rw, r, job, after)
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Add(1)
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
defer api.WebsocketWaitGroup.Done()
|
||||
follower.follow()
|
||||
}
|
||||
|
||||
|
|
|
@ -515,11 +515,6 @@ func (api *API) workspaceAgentLogs(rw http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
workspace := row.Workspace
|
||||
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Add(1)
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
defer api.WebsocketWaitGroup.Done()
|
||||
|
||||
opts := &websocket.AcceptOptions{}
|
||||
|
||||
// Allow client to request no compression. This is useful for buggy
|
||||
|
@ -867,11 +862,6 @@ func (api *API) workspaceAgentConnectionGeneric(rw http.ResponseWriter, r *http.
|
|||
func (api *API) derpMapUpdates(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Add(1)
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
defer api.WebsocketWaitGroup.Done()
|
||||
|
||||
ws, err := websocket.Accept(rw, r, nil)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
|
@ -950,10 +940,6 @@ func (api *API) derpMapUpdates(rw http.ResponseWriter, r *http.Request) {
|
|||
func (api *API) workspaceAgentCoordinate(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Add(1)
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
defer api.WebsocketWaitGroup.Done()
|
||||
// The middleware only accept agents for resources on the latest build.
|
||||
workspaceAgent := httpmw.WorkspaceAgent(r)
|
||||
build := httpmw.LatestBuild(r)
|
||||
|
@ -1058,10 +1044,6 @@ func (api *API) workspaceAgentClientCoordinate(rw http.ResponseWriter, r *http.R
|
|||
return
|
||||
}
|
||||
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Add(1)
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
defer api.WebsocketWaitGroup.Done()
|
||||
workspaceAgent := httpmw.WorkspaceAgentParam(r)
|
||||
|
||||
conn, err := websocket.Accept(rw, r, nil)
|
||||
|
|
|
@ -56,10 +56,6 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
api.WebsocketWaitMutex.Lock()
|
||||
api.WebsocketWaitGroup.Add(1)
|
||||
api.WebsocketWaitMutex.Unlock()
|
||||
defer api.WebsocketWaitGroup.Done()
|
||||
workspaceAgent := httpmw.WorkspaceAgent(r)
|
||||
build := httpmw.LatestBuild(r)
|
||||
|
||||
|
|
|
@ -267,11 +267,6 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
api.AGPL.WebsocketWaitMutex.Lock()
|
||||
api.AGPL.WebsocketWaitGroup.Add(1)
|
||||
api.AGPL.WebsocketWaitMutex.Unlock()
|
||||
defer api.AGPL.WebsocketWaitGroup.Done()
|
||||
|
||||
tep := telemetry.ConvertExternalProvisioner(id, tags, provisioners)
|
||||
api.Telemetry.Report(&telemetry.Snapshot{ExternalProvisioners: []telemetry.ExternalProvisioner{tep}})
|
||||
defer func() {
|
||||
|
|
|
@ -43,11 +43,6 @@ func (api *API) workspaceProxyCoordinate(rw http.ResponseWriter, r *http.Request
|
|||
msgType = websocket.MessageBinary
|
||||
}
|
||||
|
||||
api.AGPL.WebsocketWaitMutex.Lock()
|
||||
api.AGPL.WebsocketWaitGroup.Add(1)
|
||||
api.AGPL.WebsocketWaitMutex.Unlock()
|
||||
defer api.AGPL.WebsocketWaitGroup.Done()
|
||||
|
||||
conn, err := websocket.Accept(rw, r, nil)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
|
|
Loading…
Reference in New Issue