Compare commits

...

18 Commits

Author SHA1 Message Date
Jon Ayers d0cf3def1b
Merge afc9564d5d into 15157c1c40 2024-04-27 23:12:59 +00:00
Jon Ayers afc9564d5d make fmt 2024-04-27 23:12:53 +00:00
Jon Ayers 40449b85fb remove test release workflow 2024-04-27 23:10:55 +00:00
Jon Ayers 640cc1bb7d update windows script comment 2024-04-27 23:10:36 +00:00
Jon Ayers 902bff86e8 update CI workflow 2024-04-27 23:08:46 +00:00
Jon Ayers ea9afe982b i mean wtf 2024-04-27 22:04:04 +00:00
Jon Ayers 6fad0226fa big O 2024-04-27 21:59:49 +00:00
Jon Ayers f7879a746b wrong format 2024-04-27 21:50:54 +00:00
Jon Ayers f136fa1f60 test token 2024-04-27 21:47:20 +00:00
Jon Ayers 156b5eaa34 wrong principal 2024-04-27 21:44:42 +00:00
Jon Ayers d0cc85ed39 idk 2024-04-27 21:25:15 +00:00
Colin Adler 15157c1c40
chore: add network integration test suite scaffolding (#13072)
* chore: add network integration test suite scaffolding

* dean comments
2024-04-26 17:48:41 +00:00
Cian Johnston 73ba36c9d2
chore(docs): add note regarding Apr 26 scaletest (#13085) 2024-04-26 17:06:36 +01:00
Garrett Delfosse 8ba05a9052
feat: add switch http(s) button to error page (#12942) 2024-04-26 11:52:53 -04:00
Michael Brewer 848ea7e9f1
chore: correct name for github enterprise example (#13083)
Co-authored-by: Muhammad Atif Ali <me@matifali.dev>
2024-04-26 14:43:28 +00:00
Cian Johnston f1ef9fd673
chore(docs): add note regarding vcredist for embedded postgres (#13020) 2024-04-26 10:56:43 +01:00
Mathias Fredriksson d50a31ef62
chore(scripts): auto create autoversion PR from release script (#13074)
Ref #12465
2024-04-26 12:53:22 +03:00
Cian Johnston 365231b1e5
fix(cli): scaletest: ignore errors syncing output (#13076) 2024-04-26 09:18:33 +01:00
22 changed files with 611 additions and 229 deletions

View File

@ -128,6 +128,13 @@ jobs:
- name: Setup Node
uses: ./.github/actions/setup-node
# Necessary for signing Windows binaries.
- name: Setup Java
uses: actions/setup-java@v4
with:
distribution: "zulu"
java-version: "11.0"
- name: Install nsis and zstd
run: sudo apt-get install -y nsis zstd
@ -161,10 +168,32 @@ jobs:
AC_CERTIFICATE_PASSWORD: ${{ secrets.AC_CERTIFICATE_PASSWORD }}
AC_APIKEY_P8_BASE64: ${{ secrets.AC_APIKEY_P8_BASE64 }}
- name: Setup Windows EV Signing Certificate
run: |
set -euo pipefail
touch /tmp/ev_cert.pem
chmod 600 /tmp/ev_cert.pem
echo "$EV_SIGNING_CERT" > /tmp/ev_cert.pem
wget https://github.com/ebourg/jsign/releases/download/6.0/jsign-6.0.jar -O /tmp/jsign-6.0.jar
env:
EV_SIGNING_CERT: ${{ secrets.EV_SIGNING_CERT }}
- name: Test migrations from current ref to main
run: |
make test-migrations
# Setup GCloud for signing Windows binaries.
- name: Authenticate to Google Cloud
id: gcloud_auth
uses: google-github-actions/auth@v2
with:
workload_identity_provider: ${{ secrets.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
service_account: ${{ secrets.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
token_format: "access_token"
- name: Setup GCloud SDK
uses: "google-github-actions/setup-gcloud@v2"
- name: Build binaries
run: |
set -euo pipefail
@ -179,16 +208,26 @@ jobs:
build/coder_helm_"$version".tgz \
build/provisioner_helm_"$version".tgz
env:
CODER_SIGN_WINDOWS: "1"
CODER_SIGN_DARWIN: "1"
AC_CERTIFICATE_FILE: /tmp/apple_cert.p12
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
AC_APIKEY_ISSUER_ID: ${{ secrets.AC_APIKEY_ISSUER_ID }}
AC_APIKEY_ID: ${{ secrets.AC_APIKEY_ID }}
AC_APIKEY_FILE: /tmp/apple_apikey.p8
EV_KEY: ${{ secrets.EV_KEY }}
EV_KEYSTORE: ${{ secrets.EV_KEYSTORE }}
EV_TSA_URL: ${{ secrets.EV_TSA_URL }}
EV_CERTIFICATE_PATH: /tmp/ev_cert.pem
GCLOUD_ACCESS_TOKEN: ${{ steps.gcloud_auth.outputs.access_token }}
JSIGN_PATH: /tmp/jsign-6.0.jar
- name: Delete Apple Developer certificate and API key
run: rm -f /tmp/{apple_cert.p12,apple_cert_password.txt,apple_apikey.p8}
- name: Delete Windows EV Signing Cert
run: rm /tmp/ev_cert.pem
- name: Determine base image tag
id: image-base-tag
run: |

View File

@ -1,168 +0,0 @@
# GitHub release workflow.
name: TestRelease
on:
pull_request:
permissions:
# Required to publish a release
contents: write
# Necessary to push docker images to ghcr.io.
packages: write
# Necessary for GCP authentication (https://github.com/google-github-actions/setup-gcloud#usage)
id-token: write
concurrency: ${{ github.workflow }}-${{ github.ref }}
jobs:
release:
name: Build and publish
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
env:
# Necessary for Docker manifest
DOCKER_CLI_EXPERIMENTAL: "enabled"
outputs:
version: ${{ steps.version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Authenticate to Google Cloud
id: gcloud_auth
uses: google-github-actions/auth@v2
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_ID_PROVIDER }}
service_account: ${{ secrets.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
token_format: "access_token"
- name: Setup GCloud SDK
uses: "google-github-actions/setup-gcloud@v2"
# If the event that triggered the build was an annotated tag (which our
# tags are supposed to be), actions/checkout has a bug where the tag in
# question is only a lightweight tag and not a full annotated tag. This
# command seems to fix it.
# https://github.com/actions/checkout/issues/290
- name: Fetch git tags
run: git fetch --tags --force
- name: Print version
id: version
run: |
set -euo pipefail
version="0.0.1-rc.1"
echo "version=$version" >> $GITHUB_OUTPUT
# Speed up future version.sh calls.
echo "CODER_FORCE_VERSION=$version" >> $GITHUB_ENV
echo "$version"
- name: Docker Login
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Go
uses: ./.github/actions/setup-go
- name: Setup Node
uses: ./.github/actions/setup-node
- name: Setup Java
uses: actions/setup-java@v4
with:
distribution: "zulu"
java-version: "11.0"
- name: Install nsis and zstd
run: sudo apt-get install -y nsis zstd
- name: Install nfpm
run: |
set -euo pipefail
wget -O /tmp/nfpm.deb https://github.com/goreleaser/nfpm/releases/download/v2.35.1/nfpm_2.35.1_amd64.deb
sudo dpkg -i /tmp/nfpm.deb
rm /tmp/nfpm.deb
- name: Install rcodesign
run: |
set -euo pipefail
wget -O /tmp/rcodesign.tar.gz https://github.com/indygreg/apple-platform-rs/releases/download/apple-codesign%2F0.22.0/apple-codesign-0.22.0-x86_64-unknown-linux-musl.tar.gz
sudo tar -xzf /tmp/rcodesign.tar.gz \
-C /usr/bin \
--strip-components=1 \
apple-codesign-0.22.0-x86_64-unknown-linux-musl/rcodesign
rm /tmp/rcodesign.tar.gz
- name: Setup Apple Developer certificate and API key
run: |
set -euo pipefail
touch /tmp/{apple_cert.p12,apple_cert_password.txt,apple_apikey.p8}
chmod 600 /tmp/{apple_cert.p12,apple_cert_password.txt,apple_apikey.p8}
echo "$AC_CERTIFICATE_P12_BASE64" | base64 -d > /tmp/apple_cert.p12
echo "$AC_CERTIFICATE_PASSWORD" > /tmp/apple_cert_password.txt
echo "$AC_APIKEY_P8_BASE64" | base64 -d > /tmp/apple_apikey.p8
env:
AC_CERTIFICATE_P12_BASE64: ${{ secrets.AC_CERTIFICATE_P12_BASE64 }}
AC_CERTIFICATE_PASSWORD: ${{ secrets.AC_CERTIFICATE_PASSWORD }}
AC_APIKEY_P8_BASE64: ${{ secrets.AC_APIKEY_P8_BASE64 }}
- name: Setup Windows EV Signing Certificate
run: |
set -euo pipefail
touch /tmp/ev_cert.pem
chmod 600 /tmp/ev_cert.pem
echo "$EV_SIGNING_CERT" > /tmp/ev_cert.pem
wget https://github.com/ebourg/jsign/releases/download/6.0/jsign-6.0.jar -o /tmp/jsign-6.0.jar
env:
EV_SIGNING_CERT: ${{ secrets.EV_SIGNING_CERT }}
- name: Build binaries
run: |
set -euo pipefail
go mod download
version="$(./scripts/version.sh)"
make gen/mark-fresh
make -j \
build/coder_"$version"_linux_{amd64,armv7,arm64}.{tar.gz,apk,deb,rpm} \
build/coder_"$version"_{darwin,windows}_{amd64,arm64}.zip \
build/coder_"$version"_windows_amd64_installer.exe \
build/coder_helm_"$version".tgz \
build/provisioner_helm_"$version".tgz
env:
CODER_SIGN_WINDOWS: "1"
CODER_SIGN_DARWIN: "1"
AC_CERTIFICATE_FILE: /tmp/apple_cert.p12
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
AC_APIKEY_ISSUER_ID: ${{ secrets.AC_APIKEY_ISSUER_ID }}
AC_APIKEY_ID: ${{ secrets.AC_APIKEY_ID }}
AC_APIKEY_FILE: /tmp/apple_apikey.p8
EV_KEY: ${{ secrets.EV_KEY }}
EV_KEYSTORE: ${{ secrets.EV_KEYSTORE }}
EV_TSA_URL: ${{ secrets.EV_TSA_URL }}
EV_CERTIFICATE_PATH: /tmp/ev_cert.pem
GCLOUD_ACCESS_TOKEN: ${{ steps.gcloud_auth.outputs.access_token }}
JSIGN_PATH: /tmp/jsign-6.0.jar
- name: Delete Apple Developer certificate and API key
run: rm -f /tmp/{apple_cert.p12,apple_cert_password.txt,apple_apikey.p8}
- name: Delete Windows EV Signing Cert
run: rm /tmp/ev_cert.pem
- name: Upload artifacts to actions (if dry-run)
uses: actions/upload-artifact@v4
with:
name: release-artifacts
path: |
./build/*_installer.exe
./build/*.zip
./build/*.tar.gz
./build/*.tgz
./build/*.apk
./build/*.deb
./build/*.rpm
retention-days: 1

View File

@ -200,7 +200,8 @@ endef
# calling this manually.
$(CODER_ALL_BINARIES): go.mod go.sum \
$(GO_SRC_FILES) \
$(shell find ./examples/templates)
$(shell find ./examples/templates) \
site/static/error.html
$(get-mode-os-arch-ext)
if [[ "$$os" != "windows" ]] && [[ "$$ext" != "" ]]; then

View File

@ -14,7 +14,6 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/google/uuid"
@ -245,14 +244,8 @@ func (o *scaleTestOutput) write(res harness.Results, stdout io.Writer) error {
// Sync the file to disk if it's a file.
if s, ok := w.(interface{ Sync() error }); ok {
err := s.Sync()
// On Linux, EINVAL is returned when calling fsync on /dev/stdout. We
// can safely ignore this error.
// On macOS, ENOTTY is returned when calling sync on /dev/stdout. We
// can safely ignore this error.
if err != nil && !xerrors.Is(err, syscall.EINVAL) && !xerrors.Is(err, syscall.ENOTTY) {
return xerrors.Errorf("flush output file: %w", err)
}
// Best effort. If we get an error from syncing, just ignore it.
_ = s.Sync()
}
if c != nil {

View File

@ -4,11 +4,14 @@ import (
"bufio"
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/netip"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
@ -23,6 +26,7 @@ import (
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/tracing"
"github.com/coder/coder/v2/coderd/workspaceapps"
"github.com/coder/coder/v2/coderd/workspaceapps/appurl"
"github.com/coder/coder/v2/codersdk/workspacesdk"
"github.com/coder/coder/v2/site"
"github.com/coder/coder/v2/tailnet"
@ -341,7 +345,7 @@ type ServerTailnet struct {
totalConns *prometheus.CounterVec
}
func (s *ServerTailnet) ReverseProxy(targetURL, dashboardURL *url.URL, agentID uuid.UUID) *httputil.ReverseProxy {
func (s *ServerTailnet) ReverseProxy(targetURL, dashboardURL *url.URL, agentID uuid.UUID, app appurl.ApplicationURL, wildcardHostname string) *httputil.ReverseProxy {
// Rewrite the targetURL's Host to point to the agent's IP. This is
// necessary because due to TCP connection caching, each agent needs to be
// addressed invidivually. Otherwise, all connections get dialed as
@ -351,13 +355,46 @@ func (s *ServerTailnet) ReverseProxy(targetURL, dashboardURL *url.URL, agentID u
tgt.Host = net.JoinHostPort(tailnet.IPFromUUID(agentID).String(), port)
proxy := httputil.NewSingleHostReverseProxy(&tgt)
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, theErr error) {
var (
desc = "Failed to proxy request to application: " + theErr.Error()
additionalInfo = ""
additionalButtonLink = ""
additionalButtonText = ""
)
var tlsError tls.RecordHeaderError
if (errors.As(theErr, &tlsError) && tlsError.Msg == "first record does not look like a TLS handshake") ||
errors.Is(theErr, http.ErrSchemeMismatch) {
// If the error is due to an HTTP/HTTPS mismatch, we can provide a
// more helpful error message with redirect buttons.
switchURL := url.URL{
Scheme: dashboardURL.Scheme,
}
_, protocol, isPort := app.PortInfo()
if isPort {
targetProtocol := "https"
if protocol == "https" {
targetProtocol = "http"
}
app = app.ChangePortProtocol(targetProtocol)
switchURL.Host = fmt.Sprintf("%s%s", app.String(), strings.TrimPrefix(wildcardHostname, "*"))
additionalButtonLink = switchURL.String()
additionalButtonText = fmt.Sprintf("Switch to %s", strings.ToUpper(targetProtocol))
additionalInfo += fmt.Sprintf("This error seems to be due to an app protocol mismatch, try switching to %s.", strings.ToUpper(targetProtocol))
}
}
site.RenderStaticErrorPage(w, r, site.ErrorPageData{
Status: http.StatusBadGateway,
Title: "Bad Gateway",
Description: "Failed to proxy request to application: " + err.Error(),
RetryEnabled: true,
DashboardURL: dashboardURL.String(),
Status: http.StatusBadGateway,
Title: "Bad Gateway",
Description: desc,
RetryEnabled: true,
DashboardURL: dashboardURL.String(),
AdditionalInfo: additionalInfo,
AdditionalButtonLink: additionalButtonLink,
AdditionalButtonText: additionalButtonText,
})
}
proxy.Director = s.director(agentID, proxy.Director)

View File

@ -26,6 +26,7 @@ import (
"github.com/coder/coder/v2/agent/agenttest"
"github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd"
"github.com/coder/coder/v2/coderd/workspaceapps/appurl"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/codersdk/workspacesdk"
"github.com/coder/coder/v2/tailnet"
@ -81,7 +82,7 @@ func TestServerTailnet_ReverseProxy_ProxyEnv(t *testing.T) {
u, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", workspacesdk.AgentHTTPAPIServerPort))
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(u, u, a.id)
rp := serverTailnet.ReverseProxy(u, u, a.id, appurl.ApplicationURL{}, "")
rw := httptest.NewRecorder()
req := httptest.NewRequest(
@ -112,7 +113,7 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
u, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", workspacesdk.AgentHTTPAPIServerPort))
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(u, u, a.id)
rp := serverTailnet.ReverseProxy(u, u, a.id, appurl.ApplicationURL{}, "")
rw := httptest.NewRecorder()
req := httptest.NewRequest(
@ -143,7 +144,7 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
u, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", workspacesdk.AgentHTTPAPIServerPort))
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(u, u, a.id)
rp := serverTailnet.ReverseProxy(u, u, a.id, appurl.ApplicationURL{}, "")
rw := httptest.NewRecorder()
req := httptest.NewRequest(
@ -177,7 +178,7 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
u, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", workspacesdk.AgentHTTPAPIServerPort))
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(u, u, a.id)
rp := serverTailnet.ReverseProxy(u, u, a.id, appurl.ApplicationURL{}, "")
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
require.NoError(t, err)
@ -222,7 +223,7 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
u, err := url.Parse("http://127.0.0.1" + port)
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(u, u, a.id)
rp := serverTailnet.ReverseProxy(u, u, a.id, appurl.ApplicationURL{}, "")
for i := 0; i < 5; i++ {
rw := httptest.NewRecorder()
@ -279,7 +280,7 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
require.NoError(t, err)
for i, ag := range agents {
rp := serverTailnet.ReverseProxy(u, u, ag.id)
rp := serverTailnet.ReverseProxy(u, u, ag.id, appurl.ApplicationURL{}, "")
rw := httptest.NewRecorder()
req := httptest.NewRequest(
@ -317,7 +318,7 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
uri, err := url.Parse(s.URL)
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(uri, uri, a.id)
rp := serverTailnet.ReverseProxy(uri, uri, a.id, appurl.ApplicationURL{}, "")
rw := httptest.NewRecorder()
req := httptest.NewRequest(
@ -347,7 +348,7 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
u, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", workspacesdk.AgentHTTPAPIServerPort))
require.NoError(t, err)
rp := serverTailnet.ReverseProxy(u, u, a.id)
rp := serverTailnet.ReverseProxy(u, u, a.id, appurl.ApplicationURL{}, "")
rw := httptest.NewRecorder()
req := httptest.NewRequest(

View File

@ -5,6 +5,7 @@ import (
"net"
"net/url"
"regexp"
"strconv"
"strings"
"golang.org/x/xerrors"
@ -83,6 +84,55 @@ func (a ApplicationURL) Path() string {
return fmt.Sprintf("/@%s/%s.%s/apps/%s", a.Username, a.WorkspaceName, a.AgentName, a.AppSlugOrPort)
}
// PortInfo returns the port, protocol, and whether the AppSlugOrPort is a port or not.
func (a ApplicationURL) PortInfo() (uint, string, bool) {
var (
port uint64
protocol string
isPort bool
err error
)
if strings.HasSuffix(a.AppSlugOrPort, "s") {
trimmed := strings.TrimSuffix(a.AppSlugOrPort, "s")
port, err = strconv.ParseUint(trimmed, 10, 16)
if err == nil {
protocol = "https"
isPort = true
}
} else {
port, err = strconv.ParseUint(a.AppSlugOrPort, 10, 16)
if err == nil {
protocol = "http"
isPort = true
}
}
return uint(port), protocol, isPort
}
func (a *ApplicationURL) ChangePortProtocol(target string) ApplicationURL {
newAppURL := *a
port, protocol, isPort := a.PortInfo()
if !isPort {
return newAppURL
}
if target == protocol {
return newAppURL
}
if target == "https" {
newAppURL.AppSlugOrPort = fmt.Sprintf("%ds", port)
}
if target == "http" {
newAppURL.AppSlugOrPort = fmt.Sprintf("%d", port)
}
return newAppURL
}
// ParseSubdomainAppURL parses an ApplicationURL from the given subdomain. If
// the subdomain is not a valid application URL hostname, returns a non-nil
// error. If the hostname is not a subdomain of the given base hostname, returns

View File

@ -124,6 +124,16 @@ func TestParseSubdomainAppURL(t *testing.T) {
Username: "user",
},
},
{
Name: "Port--Agent--Workspace--User",
Subdomain: "8080s--agent--workspace--user",
Expected: appurl.ApplicationURL{
AppSlugOrPort: "8080s",
AgentName: "agent",
WorkspaceName: "workspace",
Username: "user",
},
},
{
Name: "HyphenatedNames",
Subdomain: "app-slug--agent-name--workspace-name--user-name",

View File

@ -66,7 +66,7 @@ var nonCanonicalHeaders = map[string]string{
type AgentProvider interface {
// ReverseProxy returns an httputil.ReverseProxy for proxying HTTP requests
// to the specified agent.
ReverseProxy(targetURL, dashboardURL *url.URL, agentID uuid.UUID) *httputil.ReverseProxy
ReverseProxy(targetURL, dashboardURL *url.URL, agentID uuid.UUID, app appurl.ApplicationURL, wildcardHost string) *httputil.ReverseProxy
// AgentConn returns a new connection to the specified agent.
AgentConn(ctx context.Context, agentID uuid.UUID) (_ *workspacesdk.AgentConn, release func(), _ error)
@ -314,7 +314,7 @@ func (s *Server) workspaceAppsProxyPath(rw http.ResponseWriter, r *http.Request)
return
}
s.proxyWorkspaceApp(rw, r, *token, chiPath)
s.proxyWorkspaceApp(rw, r, *token, chiPath, appurl.ApplicationURL{})
}
// HandleSubdomain handles subdomain-based application proxy requests (aka.
@ -417,7 +417,7 @@ func (s *Server) HandleSubdomain(middlewares ...func(http.Handler) http.Handler)
if !ok {
return
}
s.proxyWorkspaceApp(rw, r, *token, r.URL.Path)
s.proxyWorkspaceApp(rw, r, *token, r.URL.Path, app)
})).ServeHTTP(rw, r.WithContext(ctx))
})
}
@ -476,7 +476,7 @@ func (s *Server) parseHostname(rw http.ResponseWriter, r *http.Request, next htt
return app, true
}
func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appToken SignedToken, path string) {
func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appToken SignedToken, path string, app appurl.ApplicationURL) {
ctx := r.Context()
// Filter IP headers from untrusted origins.
@ -545,8 +545,12 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT
r.URL.Path = path
appURL.RawQuery = ""
_, protocol, isPort := app.PortInfo()
if isPort {
appURL.Scheme = protocol
}
proxy := s.AgentProvider.ReverseProxy(appURL, s.DashboardURL, appToken.AgentID)
proxy := s.AgentProvider.ReverseProxy(appURL, s.DashboardURL, appToken.AgentID, app, s.Hostname)
proxy.ModifyResponse = func(r *http.Response) error {
r.Header.Del(httpmw.AccessControlAllowOriginHeader)

View File

@ -89,7 +89,7 @@ GitHub Enterprise requires the following environment variables:
```env
CODER_EXTERNAL_AUTH_0_ID="primary-github"
CODER_EXTERNAL_AUTH_0_TYPE=github-enterprise
CODER_EXTERNAL_AUTH_0_TYPE=github
CODER_EXTERNAL_AUTH_0_CLIENT_ID=xxxxxx
CODER_EXTERNAL_AUTH_0_CLIENT_SECRET=xxxxxxx
CODER_EXTERNAL_AUTH_0_VALIDATE_URL="https://github.example.com/api/v3/user"
@ -102,8 +102,8 @@ CODER_EXTERNAL_AUTH_0_TOKEN_URL="https://github.example.com/login/oauth/access_t
Bitbucket Server requires the following environment variables:
```env
CODER_EXTERNAL_AUTH_0_TYPE="bitbucket-server"
CODER_EXTERNAL_AUTH_0_ID=bitbucket
CODER_EXTERNAL_AUTH_0_ID="primary-bitbucket-server"
CODER_EXTERNAL_AUTH_0_TYPE=bitbucket-server
CODER_EXTERNAL_AUTH_0_CLIENT_ID=xxx
CODER_EXTERNAL_AUTH_0_CLIENT_SECRET=xxx
CODER_EXTERNAL_AUTH_0_AUTH_URL=https://bitbucket.domain.com/rest/oauth2/latest/authorize

View File

@ -21,6 +21,7 @@ Learn more about [Coders architecture](../about/architecture.md) and our
| Kubernetes (GKE) | 2 cores | 4 GB | 1 | db-custom-1-3840 | 500 | 20 | 500 simulated | `v0.27.2` | Jul 27, 2023 |
| Kubernetes (GKE) | 2 cores | 8 GB | 2 | db-custom-2-7680 | 1000 | 20 | 1000 simulated | `v2.2.1` | Oct 9, 2023 |
| Kubernetes (GKE) | 4 cores | 16 GB | 2 | db-custom-8-30720 | 2000 | 50 | 2000 simulated | `v2.8.4` | Feb 28, 2024 |
| Kubernetes (GKE) | 2 cores | 4 GB | 2 | db-custom-2-7680 | 1000 | 50 | 1000 simulated | `v2.10.2` | Apr 26, 2024 |
> Note: a simulated connection reads and writes random data at 40KB/s per
> connection.

View File

@ -24,6 +24,11 @@ alternate installation methods (e.g. standalone binaries, system packages).
## Windows
> **Important:** If you plan to use the built-in PostgreSQL database, you will
> need to ensure that the
> [Visual C++ Runtime](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist#latest-microsoft-visual-c-redistributable-version)
> is installed.
Use [GitHub releases](https://github.com/coder/coder/releases) to download the
Windows installer (`.msi`) or standalone binary (`.exe`).

View File

@ -53,6 +53,10 @@ script_check=1
mainline=1
channel=mainline
# These values will be used for any PRs created.
pr_review_assignee=${CODER_RELEASE_PR_REVIEW_ASSIGNEE:-@me}
pr_review_reviewer=${CODER_RELEASE_PR_REVIEW_REVIEWER:-bpmct,stirby}
args="$(getopt -o h -l dry-run,help,ref:,mainline,stable,major,minor,patch,force,ignore-script-out-of-date -- "$@")"
eval set -- "$args"
while true; do
@ -294,7 +298,7 @@ log "Release tags for ${new_version} created successfully and pushed to ${remote
log
# Write to a tmp file for ease of debugging.
release_json_file=$(mktemp -t coder-release.json)
release_json_file=$(mktemp -t coder-release.json.XXXXXX)
log "Writing release JSON to ${release_json_file}"
jq -n \
--argjson dry_run "${dry_run}" \
@ -310,6 +314,49 @@ maybedryrun "${dry_run}" cat "${release_json_file}" |
log
log "Release workflow started successfully!"
log
log "Would you like for me to create a pull request for you to automatically bump the version numbers in the docs?"
while [[ ! ${create_pr:-} =~ ^[YyNn]$ ]]; do
read -p "Create PR? (y/n) " -n 1 -r create_pr
log
done
if [[ ${create_pr} =~ ^[Yy]$ ]]; then
pr_branch=autoversion/${new_version}
title="docs: bump ${channel} version to ${new_version}"
body="This PR was automatically created by the [release script](https://github.com/coder/coder/blob/main/scripts/release.sh).
Please review the changes and merge if they look good and the release is complete.
You can follow the release progress [here](https://github.com/coder/coder/actions/workflows/release.yaml) and view the published release [here](https://github.com/coder/coder/releases/tag/${new_version}) (once complete)."
log
log "Creating branch \"${pr_branch}\" and updating versions..."
create_pr_stash=0
if ! git diff --quiet --exit-code -- docs; then
maybedryrun "${dry_run}" git stash push --message "scripts/release.sh: autostash (autoversion)" -- docs
create_pr_stash=1
fi
maybedryrun "${dry_run}" git checkout -b "${pr_branch}" "${remote}/${branch}"
execrelative go run ./release autoversion --channel "${channel}" "${new_version}" --dry-run
maybedryrun "${dry_run}" git add docs
maybedryrun "${dry_run}" git commit -m "${title}"
# Return to previous branch.
maybedryrun "${dry_run}" git checkout -
if ((create_pr_stash)); then
maybedryrun "${dry_run}" git stash pop
fi
log "Creating pull request..."
maybedryrun "${dry_run}" gh pr create \
--assignee "${pr_review_assignee}" \
--reviewer "${pr_review_reviewer}" \
--base "${branch}" \
--head "${pr_branch}" \
--title "${title}" \
--body "${body}"
fi
if ((dry_run)); then
# We can't watch the release.yaml workflow if we're in dry-run mode.
exit 0

View File

@ -381,12 +381,18 @@ func (r *releaseCommand) autoversionFile(ctx context.Context, file, channel, ver
}
}
if matchRe != nil {
// Apply matchRe and find the group named "version", then replace it with the new version.
// Utilize the index where the match was found to replace the correct part. The only
// match group is the version.
// Apply matchRe and find the group named "version", then replace it
// with the new version.
if match := matchRe.FindStringSubmatchIndex(line); match != nil {
logger.Info(ctx, "updating version number", "line_number", i+1, "match", match)
lines[i] = line[:match[2]] + version + line[match[3]:]
vg := matchRe.SubexpIndex("version")
if vg == -1 {
logger.Error(ctx, "version group not found in match", "num_subexp", matchRe.NumSubexp(), "subexp_names", matchRe.SubexpNames(), "match", match)
return xerrors.Errorf("bug: version group not found in match")
}
start := match[vg*2]
end := match[vg*2+1]
logger.Info(ctx, "updating version number", "line_number", i+1, "match_start", start, "match_end", end, "old_version", line[start:end])
lines[i] = line[:start] + version + line[end:]
matchRe = nil
break
}

View File

@ -191,7 +191,7 @@ fi
# Ensure the ref is in the release branch.
branch_contains_ref=$(git branch --contains "${ref}" --list "${release_branch}" --format='%(refname)')
if [[ -z $branch_contains_ref ]]; then
if ((!dry_run)) && [[ -z $branch_contains_ref ]]; then
error "Provided ref (${ref_name}) is not in the required release branch (${release_branch})."
fi

View File

@ -0,0 +1,14 @@
# Some documentation
1. Run the following command to install the chart in your cluster.
For the **mainline** Coder release:
<!-- autoversion(mainline): "--version [version] # trailing comment!" -->
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml \
--version 2.10.0 # trailing comment!
```

View File

@ -0,0 +1,14 @@
# Some documentation
1. Run the following command to install the chart in your cluster.
For the **mainline** Coder release:
<!-- autoversion(mainline): "--version [version] # trailing comment!" -->
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml \
--version 2.11.1 # trailing comment!
```

View File

@ -7,10 +7,6 @@
#
# On success, the input file will be signed using the EV cert.
#
# You can also run the following command to verify the signature on other
# systems, but it may be less accurate:
# rcodesign verify path/to/binary
#
# Depends on the jsign utility (and thus Java). Requires the following environment variables
# to be set:
# - $JSIGN_PATH: The path to the jsign jar.
@ -28,12 +24,12 @@ dependencies java
requiredenvs JSIGN_PATH EV_KEYSTORE EV_KEY EV_CERTIFICATE_PATH EV_TSA_URL GCLOUD_ACCESS_TOKEN
java -jar "$JSIGN_PATH" \
--storetype GOOGLECLOUD \
--storepass "$GCLOUD_ACCESS_TOKEN" \
--keystore "$EV_KEYSTORE" \
--alias "$EV_KEY" \
--certfile "$EV_CERTIFICATE_PATH" \
--tsmode RFC3161 \
--tsaurl "$EV_TSA_URL" \
"$@" \
1>&2
--storetype GOOGLECLOUD \
--storepass "$GCLOUD_ACCESS_TOKEN" \
--keystore "$EV_KEYSTORE" \
--alias "$EV_KEY" \
--certfile "$EV_CERTIFICATE_PATH" \
--tsmode RFC3161 \
--tsaurl "$EV_TSA_URL" \
"$@" \
1>&2

View File

@ -786,12 +786,15 @@ func extractBin(dest string, r io.Reader) (numExtracted int, err error) {
type ErrorPageData struct {
Status int
// HideStatus will remove the status code from the page.
HideStatus bool
Title string
Description string
RetryEnabled bool
DashboardURL string
Warnings []string
HideStatus bool
Title string
Description string
RetryEnabled bool
DashboardURL string
Warnings []string
AdditionalInfo string
AdditionalButtonLink string
AdditionalButtonText string
RenderDescriptionMarkdown bool
}

View File

@ -33,7 +33,7 @@ running). */}}
.container {
--side-padding: 24px;
width: 100%;
max-width: calc(320px + var(--side-padding) * 2);
max-width: calc(500px + var(--side-padding) * 2);
padding: 0 var(--side-padding);
text-align: center;
}
@ -170,6 +170,9 @@ running). */}}
{{- if .Error.RenderDescriptionMarkdown }} {{ .ErrorDescriptionHTML }} {{
else }}
<p>{{ .Error.Description }}</p>
{{ end }} {{- if .Error.AdditionalInfo }}
<br />
<p>{{ .Error.AdditionalInfo }}</p>
{{ end }} {{- if .Error.Warnings }}
<div class="warning">
<div class="warning-title">
@ -195,7 +198,11 @@ running). */}}
</div>
{{ end }}
<div class="button-group">
{{- if .Error.RetryEnabled }}
{{- if and .Error.AdditionalButtonText .Error.AdditionalButtonLink }}
<a href="{{ .Error.AdditionalButtonLink }}"
>{{ .Error.AdditionalButtonText }}</a
>
{{ end }} {{- if .Error.RetryEnabled }}
<button onclick="window.location.reload()">Retry</button>
{{ end }}
<a href="{{ .Error.DashboardURL }}">Back to site</a>

View File

@ -0,0 +1,128 @@
package integration
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/netip"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"nhooyr.io/websocket"
"tailscale.com/tailcfg"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/testutil"
)
func NetworkSetupDefault(*testing.T) {}
func DERPMapTailscale(ctx context.Context, t *testing.T) *tailcfg.DERPMap {
ctx, cancel := context.WithTimeout(ctx, testutil.WaitShort)
defer cancel()
req, err := http.NewRequestWithContext(ctx, "GET", "https://controlplane.tailscale.com/derpmap/default", nil)
require.NoError(t, err)
res, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer res.Body.Close()
dm := &tailcfg.DERPMap{}
dec := json.NewDecoder(res.Body)
err = dec.Decode(dm)
require.NoError(t, err)
return dm
}
func CoordinatorInMemory(t *testing.T, logger slog.Logger, dm *tailcfg.DERPMap) (coord tailnet.Coordinator, url string) {
coord = tailnet.NewCoordinator(logger)
var coordPtr atomic.Pointer[tailnet.Coordinator]
coordPtr.Store(&coord)
t.Cleanup(func() { _ = coord.Close() })
csvc, err := tailnet.NewClientService(logger, &coordPtr, 10*time.Minute, func() *tailcfg.DERPMap {
return dm
})
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
idStr := strings.TrimPrefix(r.URL.Path, "/")
id, err := uuid.Parse(idStr)
if err != nil {
httpapi.Write(r.Context(), w, http.StatusBadRequest, codersdk.Response{
Message: "Bad agent id.",
Detail: err.Error(),
})
return
}
conn, err := websocket.Accept(w, r, nil)
if err != nil {
httpapi.Write(r.Context(), w, http.StatusBadRequest, codersdk.Response{
Message: "Failed to accept websocket.",
Detail: err.Error(),
})
return
}
ctx, wsNetConn := codersdk.WebsocketNetConn(r.Context(), conn, websocket.MessageBinary)
defer wsNetConn.Close()
err = csvc.ServeConnV2(ctx, wsNetConn, tailnet.StreamID{
Name: "client-" + id.String(),
ID: id,
Auth: tailnet.SingleTailnetCoordinateeAuth{},
})
if err != nil && !xerrors.Is(err, io.EOF) && !xerrors.Is(err, context.Canceled) {
_ = conn.Close(websocket.StatusInternalError, err.Error())
return
}
}))
t.Cleanup(srv.Close)
return coord, srv.URL
}
func TailnetSetupDRPC(ctx context.Context, t *testing.T, logger slog.Logger,
id, agentID uuid.UUID,
coordinateURL string,
dm *tailcfg.DERPMap,
) *tailnet.Conn {
ip := tailnet.IPFromUUID(id)
conn, err := tailnet.NewConn(&tailnet.Options{
Addresses: []netip.Prefix{netip.PrefixFrom(ip, 128)},
DERPMap: dm,
Logger: logger,
})
require.NoError(t, err)
t.Cleanup(func() { _ = conn.Close() })
//nolint:bodyclose
ws, _, err := websocket.Dial(ctx, coordinateURL+"/"+id.String(), nil)
require.NoError(t, err)
client, err := tailnet.NewDRPCClient(
websocket.NetConn(ctx, ws, websocket.MessageBinary),
logger,
)
require.NoError(t, err)
coord, err := client.Coordinate(ctx)
require.NoError(t, err)
coordination := tailnet.NewRemoteCoordination(logger, coord, conn, agentID)
t.Cleanup(func() { _ = coordination.Close() })
return conn
}

View File

@ -0,0 +1,194 @@
package integration
import (
"context"
"flag"
"fmt"
"os"
"os/exec"
"strconv"
"syscall"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/testutil"
)
var (
isChild = flag.Bool("child", false, "Run tests as a child")
childTestID = flag.Int("child-test-id", 0, "Which test is being run")
childCoordinateURL = flag.String("child-coordinate-url", "", "The coordinate url to connect back to")
childAgentID = flag.String("child-agent-id", "", "The agent id of the child")
)
func TestMain(m *testing.M) {
if run := os.Getenv("CODER_TAILNET_TESTS"); run == "" {
_, _ = fmt.Println("skipping tests...")
return
}
if os.Getuid() != 0 {
_, _ = fmt.Println("networking integration tests must run as root")
return
}
flag.Parse()
os.Exit(m.Run())
}
var tests = []Test{{
Name: "Normal",
DERPMap: DERPMapTailscale,
Coordinator: CoordinatorInMemory,
Parent: Parent{
NetworkSetup: NetworkSetupDefault,
TailnetSetup: TailnetSetupDRPC,
Run: func(ctx context.Context, t *testing.T, opts ParentOpts) {
reach := opts.Conn.AwaitReachable(ctx, tailnet.IPFromUUID(opts.AgentID))
assert.True(t, reach)
},
},
Child: Child{
NetworkSetup: NetworkSetupDefault,
TailnetSetup: TailnetSetupDRPC,
Run: func(ctx context.Context, t *testing.T, opts ChildOpts) {
// wait until the parent kills us
<-make(chan struct{})
},
},
}}
//nolint:paralleltest
func TestIntegration(t *testing.T) {
if *isChild {
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
t.Cleanup(cancel)
agentID, err := uuid.Parse(*childAgentID)
require.NoError(t, err)
test := tests[*childTestID]
test.Child.NetworkSetup(t)
dm := test.DERPMap(ctx, t)
conn := test.Child.TailnetSetup(ctx, t, logger, agentID, uuid.Nil, *childCoordinateURL, dm)
test.Child.Run(ctx, t, ChildOpts{
Logger: logger,
Conn: conn,
AgentID: agentID,
})
return
}
for id, test := range tests {
t.Run(test.Name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
t.Cleanup(cancel)
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
parentID, childID := uuid.New(), uuid.New()
dm := test.DERPMap(ctx, t)
_, coordURL := test.Coordinator(t, logger, dm)
child, waitChild := execChild(ctx, id, coordURL, childID)
test.Parent.NetworkSetup(t)
conn := test.Parent.TailnetSetup(ctx, t, logger, parentID, childID, coordURL, dm)
test.Parent.Run(ctx, t, ParentOpts{
Logger: logger,
Conn: conn,
ClientID: parentID,
AgentID: childID,
})
child.Process.Signal(syscall.SIGINT)
<-waitChild
})
}
}
type Test struct {
// Name is the name of the test.
Name string
// DERPMap returns the DERP map to use for both the parent and child. It is
// called once at the beginning of the test.
DERPMap func(ctx context.Context, t *testing.T) *tailcfg.DERPMap
// Coordinator returns a running tailnet coordinator, and the url to reach
// it on.
Coordinator func(t *testing.T, logger slog.Logger, dm *tailcfg.DERPMap) (coord tailnet.Coordinator, url string)
Parent Parent
Child Child
}
// Parent is the struct containing all of the parent specific configurations.
// Functions are invoked in order of struct definition.
type Parent struct {
// NetworkSetup is run before all test code. It can be used to setup
// networking scenarios.
NetworkSetup func(t *testing.T)
// TailnetSetup creates a tailnet network.
TailnetSetup func(
ctx context.Context, t *testing.T, logger slog.Logger,
id, agentID uuid.UUID, coordURL string, dm *tailcfg.DERPMap,
) *tailnet.Conn
Run func(ctx context.Context, t *testing.T, opts ParentOpts)
}
// Child is the struct containing all of the child specific configurations.
// Functions are invoked in order of struct definition.
type Child struct {
// NetworkSetup is run before all test code. It can be used to setup
// networking scenarios.
NetworkSetup func(t *testing.T)
// TailnetSetup creates a tailnet network.
TailnetSetup func(
ctx context.Context, t *testing.T, logger slog.Logger,
id, agentID uuid.UUID, coordURL string, dm *tailcfg.DERPMap,
) *tailnet.Conn
// Run runs the actual test. Parents and children run in separate processes,
// so it's important to ensure no communication happens over memory between
// run functions of parents and children.
Run func(ctx context.Context, t *testing.T, opts ChildOpts)
}
type ParentOpts struct {
Logger slog.Logger
Conn *tailnet.Conn
ClientID uuid.UUID
AgentID uuid.UUID
}
type ChildOpts struct {
Logger slog.Logger
Conn *tailnet.Conn
AgentID uuid.UUID
}
func execChild(ctx context.Context, testID int, coordURL string, agentID uuid.UUID) (*exec.Cmd, <-chan error) {
ch := make(chan error)
binary := os.Args[0]
args := os.Args[1:]
args = append(args,
"--child=true",
"--child-test-id="+strconv.Itoa(testID),
"--child-coordinate-url="+coordURL,
"--child-agent-id="+agentID.String(),
)
cmd := exec.CommandContext(ctx, binary, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
go func() {
ch <- cmd.Run()
}()
return cmd, ch
}