feat: remove loadtest cmd, add new scaletest cmd (#5310)

This commit is contained in:
Dean Sheather 2022-12-16 01:04:24 +10:00 committed by GitHub
parent 306fe4a91b
commit 6b6eac2518
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 1887 additions and 1007 deletions

View File

@ -1,391 +0,0 @@
package cli
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/spf13/cobra"
"go.opentelemetry.io/otel/trace"
"golang.org/x/xerrors"
"github.com/coder/coder/cli/cliflag"
"github.com/coder/coder/coderd/tracing"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/harness"
)
const loadtestTracerName = "coder_loadtest"
func loadtest() *cobra.Command {
var (
configPath string
outputSpecs []string
traceEnable bool
traceCoder bool
traceHoneycombAPIKey string
tracePropagate bool
)
cmd := &cobra.Command{
Use: "loadtest --config <path> [--output json[:path]] [--output text[:path]]]",
Short: "Load test the Coder API",
// TODO: documentation and a JSON schema file
Long: "Perform load tests against the Coder server. The load tests are configurable via a JSON file.",
Example: formatExamples(
example{
Description: "Run a loadtest with the given configuration file",
Command: "coder loadtest --config path/to/config.json",
},
example{
Description: "Run a loadtest, reading the configuration from stdin",
Command: "cat path/to/config.json | coder loadtest --config -",
},
example{
Description: "Run a loadtest outputting JSON results instead",
Command: "coder loadtest --config path/to/config.json --output json",
},
example{
Description: "Run a loadtest outputting JSON results to a file",
Command: "coder loadtest --config path/to/config.json --output json:path/to/results.json",
},
example{
Description: "Run a loadtest outputting text results to stdout and JSON results to a file",
Command: "coder loadtest --config path/to/config.json --output text --output json:path/to/results.json",
},
),
Hidden: true,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := tracing.SetTracerName(cmd.Context(), loadtestTracerName)
config, err := loadLoadTestConfigFile(configPath, cmd.InOrStdin())
if err != nil {
return err
}
outputs, err := parseLoadTestOutputs(outputSpecs)
if err != nil {
return err
}
client, err := CreateClient(cmd)
if err != nil {
return err
}
me, err := client.User(ctx, codersdk.Me)
if err != nil {
return xerrors.Errorf("fetch current user: %w", err)
}
// Only owners can do loadtests. This isn't a very strong check but
// there's not much else we can do. Ratelimits are enforced for
// non-owners so hopefully that limits the damage if someone
// disables this check and runs it against a non-owner account.
ok := false
for _, role := range me.Roles {
if role.Name == "owner" {
ok = true
break
}
}
if !ok {
return xerrors.Errorf("Not logged in as a site owner. Load testing is only available to site owners.")
}
// Setup tracing and start a span.
var (
shouldTrace = traceEnable || traceCoder || traceHoneycombAPIKey != ""
tracerProvider trace.TracerProvider = trace.NewNoopTracerProvider()
closeTracingOnce sync.Once
closeTracing = func(_ context.Context) error {
return nil
}
)
if shouldTrace {
tracerProvider, closeTracing, err = tracing.TracerProvider(ctx, loadtestTracerName, tracing.TracerOpts{
Default: traceEnable,
Coder: traceCoder,
Honeycomb: traceHoneycombAPIKey,
})
if err != nil {
return xerrors.Errorf("initialize tracing: %w", err)
}
defer func() {
closeTracingOnce.Do(func() {
// Allow time for traces to flush even if command
// context is canceled.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_ = closeTracing(ctx)
})
}()
}
tracer := tracerProvider.Tracer(loadtestTracerName)
// Disable ratelimits and propagate tracing spans for future
// requests. Individual tests will setup their own loggers.
client.BypassRatelimits = true
client.PropagateTracing = tracePropagate
// Prepare the test.
runStrategy := config.Strategy.ExecutionStrategy()
cleanupStrategy := config.CleanupStrategy.ExecutionStrategy()
th := harness.NewTestHarness(runStrategy, cleanupStrategy)
for i, t := range config.Tests {
name := fmt.Sprintf("%s-%d", t.Type, i)
for j := 0; j < t.Count; j++ {
id := strconv.Itoa(j)
runner, err := t.NewRunner(client.Clone())
if err != nil {
return xerrors.Errorf("create %q runner for %s/%s: %w", t.Type, name, id, err)
}
th.AddRun(name, id, &runnableTraceWrapper{
tracer: tracer,
spanName: fmt.Sprintf("%s/%s", name, id),
runner: runner,
})
}
}
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "Running load test...")
testCtx := ctx
if config.Timeout > 0 {
var cancel func()
testCtx, cancel = context.WithTimeout(testCtx, time.Duration(config.Timeout))
defer cancel()
}
// TODO: live progress output
err = th.Run(testCtx)
if err != nil {
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
}
// Print the results.
res := th.Results()
for _, output := range outputs {
var (
w = cmd.OutOrStdout()
c io.Closer
)
if output.path != "-" {
f, err := os.Create(output.path)
if err != nil {
return xerrors.Errorf("create output file: %w", err)
}
w, c = f, f
}
switch output.format {
case loadTestOutputFormatText:
res.PrintText(w)
case loadTestOutputFormatJSON:
err = json.NewEncoder(w).Encode(res)
if err != nil {
return xerrors.Errorf("encode JSON: %w", err)
}
}
if c != nil {
err = c.Close()
if err != nil {
return xerrors.Errorf("close output file: %w", err)
}
}
}
// Cleanup.
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\nCleaning up...")
err = th.Cleanup(ctx)
if err != nil {
return xerrors.Errorf("cleanup tests: %w", err)
}
// Upload traces.
if shouldTrace {
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\nUploading traces...")
closeTracingOnce.Do(func() {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
err := closeTracing(ctx)
if err != nil {
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\nError uploading traces: %+v\n", err)
}
})
}
if res.TotalFail > 0 {
return xerrors.New("load test failed, see above for more details")
}
return nil
},
}
cliflag.StringVarP(cmd.Flags(), &configPath, "config", "", "CODER_LOADTEST_CONFIG_PATH", "", "Path to the load test configuration file, or - to read from stdin.")
cliflag.StringArrayVarP(cmd.Flags(), &outputSpecs, "output", "", "CODER_LOADTEST_OUTPUTS", []string{"text"}, "Output formats, see usage for more information.")
cliflag.BoolVarP(cmd.Flags(), &traceEnable, "trace", "", "CODER_LOADTEST_TRACE", false, "Whether application tracing data is collected. It exports to a backend configured by environment variables. See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md")
cliflag.BoolVarP(cmd.Flags(), &traceCoder, "trace-coder", "", "CODER_LOADTEST_TRACE_CODER", false, "Whether opentelemetry traces are sent to Coder. We recommend keeping this disabled unless we advise you to enable it.")
cliflag.StringVarP(cmd.Flags(), &traceHoneycombAPIKey, "trace-honeycomb-api-key", "", "CODER_LOADTEST_TRACE_HONEYCOMB_API_KEY", "", "Enables trace exporting to Honeycomb.io using the provided API key.")
cliflag.BoolVarP(cmd.Flags(), &tracePropagate, "trace-propagate", "", "CODER_LOADTEST_TRACE_PROPAGATE", false, "Enables trace propagation to the Coder backend, which will be used to correlate server-side spans with client-side spans. Only enable this if the server is configured with the exact same tracing configuration as the client.")
return cmd
}
func loadLoadTestConfigFile(configPath string, stdin io.Reader) (LoadTestConfig, error) {
if configPath == "" {
return LoadTestConfig{}, xerrors.New("config is required")
}
var (
configReader io.ReadCloser
)
if configPath == "-" {
configReader = io.NopCloser(stdin)
} else {
f, err := os.Open(configPath)
if err != nil {
return LoadTestConfig{}, xerrors.Errorf("open config file %q: %w", configPath, err)
}
configReader = f
}
var config LoadTestConfig
err := json.NewDecoder(configReader).Decode(&config)
_ = configReader.Close()
if err != nil {
return LoadTestConfig{}, xerrors.Errorf("read config file %q: %w", configPath, err)
}
err = config.Validate()
if err != nil {
return LoadTestConfig{}, xerrors.Errorf("validate config: %w", err)
}
return config, nil
}
type loadTestOutputFormat string
const (
loadTestOutputFormatText loadTestOutputFormat = "text"
loadTestOutputFormatJSON loadTestOutputFormat = "json"
// TODO: html format
)
type loadTestOutput struct {
format loadTestOutputFormat
// Up to one path (the first path) will have the value "-" which signifies
// stdout.
path string
}
func parseLoadTestOutputs(outputs []string) ([]loadTestOutput, error) {
var stdoutFormat loadTestOutputFormat
validFormats := map[loadTestOutputFormat]struct{}{
loadTestOutputFormatText: {},
loadTestOutputFormatJSON: {},
}
var out []loadTestOutput
for i, o := range outputs {
parts := strings.SplitN(o, ":", 2)
format := loadTestOutputFormat(parts[0])
if _, ok := validFormats[format]; !ok {
return nil, xerrors.Errorf("invalid output format %q in output flag %d", parts[0], i)
}
if len(parts) == 1 {
if stdoutFormat != "" {
return nil, xerrors.Errorf("multiple output flags specified for stdout")
}
stdoutFormat = format
continue
}
if len(parts) != 2 {
return nil, xerrors.Errorf("invalid output flag %d: %q", i, o)
}
out = append(out, loadTestOutput{
format: format,
path: parts[1],
})
}
// Default to --output text
if stdoutFormat == "" && len(out) == 0 {
stdoutFormat = loadTestOutputFormatText
}
if stdoutFormat != "" {
out = append([]loadTestOutput{{
format: stdoutFormat,
path: "-",
}}, out...)
}
return out, nil
}
type runnableTraceWrapper struct {
tracer trace.Tracer
spanName string
runner harness.Runnable
span trace.Span
}
var _ harness.Runnable = &runnableTraceWrapper{}
var _ harness.Cleanable = &runnableTraceWrapper{}
func (r *runnableTraceWrapper) Run(ctx context.Context, id string, logs io.Writer) error {
ctx, span := r.tracer.Start(ctx, r.spanName, trace.WithNewRoot())
defer span.End()
r.span = span
traceID := "unknown trace ID"
spanID := "unknown span ID"
if span.SpanContext().HasTraceID() {
traceID = span.SpanContext().TraceID().String()
}
if span.SpanContext().HasSpanID() {
spanID = span.SpanContext().SpanID().String()
}
_, _ = fmt.Fprintf(logs, "Trace ID: %s\n", traceID)
_, _ = fmt.Fprintf(logs, "Span ID: %s\n\n", spanID)
// Make a separate span for the run itself so the sub-spans are grouped
// neatly. The cleanup span is also a child of the above span so this is
// important for readability.
ctx2, span2 := r.tracer.Start(ctx, r.spanName+" run")
defer span2.End()
return r.runner.Run(ctx2, id, logs)
}
func (r *runnableTraceWrapper) Cleanup(ctx context.Context, id string) error {
c, ok := r.runner.(harness.Cleanable)
if !ok {
return nil
}
if r.span != nil {
ctx = trace.ContextWithSpanContext(ctx, r.span.SpanContext())
}
ctx, span := r.tracer.Start(ctx, r.spanName+" cleanup")
defer span.End()
return c.Cleanup(ctx, id)
}

View File

@ -1,309 +0,0 @@
package cli_test
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/cli"
"github.com/coder/coder/cli/clitest"
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/loadtest/placebo"
"github.com/coder/coder/loadtest/workspacebuild"
"github.com/coder/coder/pty/ptytest"
"github.com/coder/coder/testutil"
)
func TestLoadTest(t *testing.T) {
t.Skipf("This test is flakey. See https://github.com/coder/coder/issues/4942")
t.Parallel()
t.Run("PlaceboFromStdin", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
_ = coderdtest.CreateFirstUser(t, client)
config := cli.LoadTestConfig{
Strategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeLinear,
},
CleanupStrategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeLinear,
},
Tests: []cli.LoadTest{
{
Type: cli.LoadTestTypePlacebo,
Count: 10,
Placebo: &placebo.Config{
Sleep: httpapi.Duration(10 * time.Millisecond),
},
},
},
Timeout: httpapi.Duration(testutil.WaitShort),
}
configBytes, err := json.Marshal(config)
require.NoError(t, err)
cmd, root := clitest.New(t, "loadtest", "--config", "-")
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t)
cmd.SetIn(bytes.NewReader(configBytes))
cmd.SetOut(pty.Output())
cmd.SetErr(pty.Output())
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
done := make(chan any)
go func() {
errC := cmd.ExecuteContext(ctx)
assert.NoError(t, errC)
close(done)
}()
pty.ExpectMatch("Test results:")
pty.ExpectMatch("Pass: 10")
cancelFunc()
<-done
})
t.Run("WorkspaceBuildFromFile", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
user := coderdtest.CreateFirstUser(t, client)
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
config := cli.LoadTestConfig{
Strategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeConcurrent,
ConcurrencyLimit: 2,
},
CleanupStrategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeConcurrent,
ConcurrencyLimit: 2,
},
Tests: []cli.LoadTest{
{
Type: cli.LoadTestTypeWorkspaceBuild,
Count: 2,
WorkspaceBuild: &workspacebuild.Config{
OrganizationID: user.OrganizationID,
UserID: user.UserID.String(),
Request: codersdk.CreateWorkspaceRequest{
TemplateID: template.ID,
},
},
},
},
Timeout: httpapi.Duration(testutil.WaitLong),
}
d := t.TempDir()
configPath := filepath.Join(d, "/config.loadtest.json")
f, err := os.Create(configPath)
require.NoError(t, err)
defer f.Close()
err = json.NewEncoder(f).Encode(config)
require.NoError(t, err)
_ = f.Close()
cmd, root := clitest.New(t, "loadtest", "--config", configPath)
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t)
cmd.SetIn(pty.Input())
cmd.SetOut(pty.Output())
cmd.SetErr(pty.Output())
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
done := make(chan any)
go func() {
errC := cmd.ExecuteContext(ctx)
assert.NoError(t, errC)
close(done)
}()
pty.ExpectMatch("Test results:")
pty.ExpectMatch("Pass: 2")
<-done
cancelFunc()
})
t.Run("OutputFormats", func(t *testing.T) {
t.Parallel()
t.Skip("This test is flakey. See: https://github.com/coder/coder/actions/runs/3415360091/jobs/5684401383")
type outputFlag struct {
format string
path string
}
dir := t.TempDir()
cases := []struct {
name string
outputs []outputFlag
errContains string
}{
{
name: "Default",
outputs: []outputFlag{},
},
{
name: "ExplicitText",
outputs: []outputFlag{{format: "text"}},
},
{
name: "JSON",
outputs: []outputFlag{
{
format: "json",
path: filepath.Join(dir, "results.json"),
},
},
},
{
name: "TextAndJSON",
outputs: []outputFlag{
{
format: "text",
},
{
format: "json",
path: filepath.Join(dir, "results.json"),
},
},
},
{
name: "TextAndJSON2",
outputs: []outputFlag{
{
format: "text",
},
{
format: "text",
path: filepath.Join(dir, "results.txt"),
},
{
format: "json",
path: filepath.Join(dir, "results.json"),
},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
_ = coderdtest.CreateFirstUser(t, client)
config := cli.LoadTestConfig{
Strategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeLinear,
},
CleanupStrategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeLinear,
},
Tests: []cli.LoadTest{
{
Type: cli.LoadTestTypePlacebo,
Count: 10,
Placebo: &placebo.Config{
Sleep: httpapi.Duration(10 * time.Millisecond),
},
},
},
Timeout: httpapi.Duration(testutil.WaitShort),
}
configBytes, err := json.Marshal(config)
require.NoError(t, err)
args := []string{"loadtest", "--config", "-"}
for _, output := range c.outputs {
flag := output.format
if output.path != "" {
flag += ":" + output.path
}
args = append(args, "--output", flag)
}
cmd, root := clitest.New(t, args...)
clitest.SetupConfig(t, client, root)
cmd.SetIn(bytes.NewReader(configBytes))
out := bytes.NewBuffer(nil)
cmd.SetOut(out)
pty := ptytest.New(t)
cmd.SetErr(pty.Output())
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
done := make(chan any)
go func() {
errC := cmd.ExecuteContext(ctx)
if c.errContains != "" {
assert.Error(t, errC)
assert.Contains(t, errC.Error(), c.errContains)
} else {
assert.NoError(t, errC)
}
close(done)
}()
<-done
if c.errContains != "" {
return
}
if len(c.outputs) == 0 {
// This is the default output format when no flags are
// specified.
c.outputs = []outputFlag{{format: "text"}}
}
for i, output := range c.outputs {
msg := fmt.Sprintf("flag %d", i)
var b []byte
if output.path == "" {
b = out.Bytes()
} else {
b, err = os.ReadFile(output.path)
require.NoError(t, err, msg)
}
t.Logf("output %d:\n\n%s", i, string(b))
switch output.format {
case "text":
require.Contains(t, string(b), "Test results:", msg)
require.Contains(t, string(b), "Pass: 10", msg)
case "json":
var res harness.Results
err = json.Unmarshal(b, &res)
require.NoError(t, err, msg)
require.Equal(t, 10, res.TotalRuns, msg)
require.Equal(t, 10, res.TotalPass, msg)
require.Len(t, res.Runs, 10, msg)
}
}
})
}
})
}

View File

@ -1,220 +0,0 @@
package cli
import (
"time"
"golang.org/x/xerrors"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/agentconn"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/loadtest/placebo"
"github.com/coder/coder/loadtest/reconnectingpty"
"github.com/coder/coder/loadtest/workspacebuild"
)
// LoadTestConfig is the overall configuration for a call to `coder loadtest`.
type LoadTestConfig struct {
Strategy LoadTestStrategy `json:"strategy"`
CleanupStrategy LoadTestStrategy `json:"cleanup_strategy"`
Tests []LoadTest `json:"tests"`
// Timeout sets a timeout for the entire test run, to control the timeout
// for each individual run use strategy.timeout.
Timeout httpapi.Duration `json:"timeout"`
}
type LoadTestStrategyType string
const (
LoadTestStrategyTypeLinear LoadTestStrategyType = "linear"
LoadTestStrategyTypeConcurrent LoadTestStrategyType = "concurrent"
)
type LoadTestStrategy struct {
// Type is the type of load test strategy to use. Strategies determine how
// to run tests concurrently.
Type LoadTestStrategyType `json:"type"`
// ConcurrencyLimit is the maximum number of concurrent runs. This only
// applies if type == "concurrent". Negative values disable the concurrency
// limit and attempts to perform all runs concurrently. The default value is
// 100.
ConcurrencyLimit int `json:"concurrency_limit"`
// Shuffle determines whether or not to shuffle the test runs before
// executing them.
Shuffle bool `json:"shuffle"`
// Timeout is the maximum amount of time to run each test for. This is
// independent of the timeout specified in the test run. A timeout of 0
// disables the timeout.
Timeout httpapi.Duration `json:"timeout"`
}
func (s LoadTestStrategy) ExecutionStrategy() harness.ExecutionStrategy {
var strategy harness.ExecutionStrategy
switch s.Type {
case LoadTestStrategyTypeLinear:
strategy = harness.LinearExecutionStrategy{}
case LoadTestStrategyTypeConcurrent:
limit := s.ConcurrencyLimit
if limit < 0 {
return harness.ConcurrentExecutionStrategy{}
}
if limit == 0 {
limit = 100
}
strategy = harness.ParallelExecutionStrategy{
Limit: limit,
}
default:
panic("unreachable, unknown strategy type " + s.Type)
}
if s.Timeout > 0 {
strategy = harness.TimeoutExecutionStrategyWrapper{
Timeout: time.Duration(s.Timeout),
Inner: strategy,
}
}
if s.Shuffle {
strategy = harness.ShuffleExecutionStrategyWrapper{
Inner: strategy,
}
}
return strategy
}
type LoadTestType string
const (
LoadTestTypeAgentConn LoadTestType = "agentconn"
LoadTestTypePlacebo LoadTestType = "placebo"
LoadTestTypeReconnectingPTY LoadTestType = "reconnectingpty"
LoadTestTypeWorkspaceBuild LoadTestType = "workspacebuild"
)
type LoadTest struct {
// Type is the type of load test to run.
Type LoadTestType `json:"type"`
// Count is the number of test runs to execute with this configuration. If
// the count is 0 or negative, defaults to 1.
Count int `json:"count"`
// AgentConn must be set if type == "agentconn".
AgentConn *agentconn.Config `json:"agentconn,omitempty"`
// Placebo must be set if type == "placebo".
Placebo *placebo.Config `json:"placebo,omitempty"`
// ReconnectingPTY must be set if type == "reconnectingpty".
ReconnectingPTY *reconnectingpty.Config `json:"reconnectingpty,omitempty"`
// WorkspaceBuild must be set if type == "workspacebuild".
WorkspaceBuild *workspacebuild.Config `json:"workspacebuild,omitempty"`
}
func (t LoadTest) NewRunner(client *codersdk.Client) (harness.Runnable, error) {
switch t.Type {
case LoadTestTypeAgentConn:
if t.AgentConn == nil {
return nil, xerrors.New("agentconn config must be set")
}
return agentconn.NewRunner(client, *t.AgentConn), nil
case LoadTestTypePlacebo:
if t.Placebo == nil {
return nil, xerrors.New("placebo config must be set")
}
return placebo.NewRunner(*t.Placebo), nil
case LoadTestTypeReconnectingPTY:
if t.ReconnectingPTY == nil {
return nil, xerrors.New("reconnectingpty config must be set")
}
return reconnectingpty.NewRunner(client, *t.ReconnectingPTY), nil
case LoadTestTypeWorkspaceBuild:
if t.WorkspaceBuild == nil {
return nil, xerrors.Errorf("workspacebuild config must be set")
}
return workspacebuild.NewRunner(client, *t.WorkspaceBuild), nil
default:
return nil, xerrors.Errorf("unknown test type %q", t.Type)
}
}
func (c *LoadTestConfig) Validate() error {
err := c.Strategy.Validate()
if err != nil {
return xerrors.Errorf("validate strategy: %w", err)
}
err = c.CleanupStrategy.Validate()
if err != nil {
return xerrors.Errorf("validate cleanup_strategy: %w", err)
}
for i, test := range c.Tests {
err := test.Validate()
if err != nil {
return xerrors.Errorf("validate test %d: %w", i, err)
}
}
return nil
}
func (s *LoadTestStrategy) Validate() error {
switch s.Type {
case LoadTestStrategyTypeLinear:
case LoadTestStrategyTypeConcurrent:
default:
return xerrors.Errorf("invalid load test strategy type: %q", s.Type)
}
if s.Timeout < 0 {
return xerrors.Errorf("invalid load test strategy timeout: %q", s.Timeout)
}
return nil
}
func (t *LoadTest) Validate() error {
switch t.Type {
case LoadTestTypeAgentConn:
if t.AgentConn == nil {
return xerrors.Errorf("agentconn test type must specify agentconn")
}
err := t.AgentConn.Validate()
if err != nil {
return xerrors.Errorf("validate agentconn: %w", err)
}
case LoadTestTypePlacebo:
if t.Placebo == nil {
return xerrors.Errorf("placebo test type must specify placebo")
}
err := t.Placebo.Validate()
if err != nil {
return xerrors.Errorf("validate placebo: %w", err)
}
case LoadTestTypeReconnectingPTY:
if t.ReconnectingPTY == nil {
return xerrors.Errorf("reconnectingpty test type must specify reconnectingpty")
}
err := t.ReconnectingPTY.Validate()
if err != nil {
return xerrors.Errorf("validate reconnectingpty: %w", err)
}
case LoadTestTypeWorkspaceBuild:
if t.WorkspaceBuild == nil {
return xerrors.New("workspacebuild test type must specify workspacebuild")
}
err := t.WorkspaceBuild.Validate()
if err != nil {
return xerrors.Errorf("validate workspacebuild: %w", err)
}
default:
return xerrors.Errorf("invalid load test type: %q", t.Type)
}
return nil
}

View File

@ -77,7 +77,6 @@ func Core() []*cobra.Command {
dotfiles(),
gitssh(),
list(),
loadtest(),
login(),
logout(),
parameters(),
@ -85,6 +84,7 @@ func Core() []*cobra.Command {
publickey(),
rename(),
resetPassword(),
scaletest(),
schedules(),
show(),
speedtest(),

861
cli/scaletest.go Normal file
View File

@ -0,0 +1,861 @@
package cli
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/spf13/cobra"
"go.opentelemetry.io/otel/trace"
"golang.org/x/xerrors"
"github.com/coder/coder/cli/cliflag"
"github.com/coder/coder/cli/cliui"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/coderd/tracing"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/cryptorand"
"github.com/coder/coder/scaletest/agentconn"
"github.com/coder/coder/scaletest/createworkspaces"
"github.com/coder/coder/scaletest/harness"
"github.com/coder/coder/scaletest/reconnectingpty"
"github.com/coder/coder/scaletest/workspacebuild"
)
const scaletestTracerName = "coder_scaletest"
func scaletest() *cobra.Command {
cmd := &cobra.Command{
Use: "scaletest",
Short: "Run a scale test against the Coder API",
Long: "Perform scale tests against the Coder server.",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(
scaletestCleanup(),
scaletestCreateWorkspaces(),
)
return cmd
}
type scaletestTracingFlags struct {
traceEnable bool
traceCoder bool
traceHoneycombAPIKey string
tracePropagate bool
}
func (s *scaletestTracingFlags) attach(cmd *cobra.Command) {
cliflag.BoolVarP(cmd.Flags(), &s.traceEnable, "trace", "", "CODER_LOADTEST_TRACE", false, "Whether application tracing data is collected. It exports to a backend configured by environment variables. See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md")
cliflag.BoolVarP(cmd.Flags(), &s.traceCoder, "trace-coder", "", "CODER_LOADTEST_TRACE_CODER", false, "Whether opentelemetry traces are sent to Coder. We recommend keeping this disabled unless we advise you to enable it.")
cliflag.StringVarP(cmd.Flags(), &s.traceHoneycombAPIKey, "trace-honeycomb-api-key", "", "CODER_LOADTEST_TRACE_HONEYCOMB_API_KEY", "", "Enables trace exporting to Honeycomb.io using the provided API key.")
cliflag.BoolVarP(cmd.Flags(), &s.tracePropagate, "trace-propagate", "", "CODER_LOADTEST_TRACE_PROPAGATE", false, "Enables trace propagation to the Coder backend, which will be used to correlate server-side spans with client-side spans. Only enable this if the server is configured with the exact same tracing configuration as the client.")
}
// provider returns a trace.TracerProvider, a close function and a bool showing
// whether tracing is enabled or not.
func (s *scaletestTracingFlags) provider(ctx context.Context) (trace.TracerProvider, func(context.Context) error, bool, error) {
shouldTrace := s.traceEnable || s.traceCoder || s.traceHoneycombAPIKey != ""
if !shouldTrace {
tracerProvider := trace.NewNoopTracerProvider()
return tracerProvider, func(_ context.Context) error { return nil }, false, nil
}
tracerProvider, closeTracing, err := tracing.TracerProvider(ctx, scaletestTracerName, tracing.TracerOpts{
Default: s.traceEnable,
Coder: s.traceCoder,
Honeycomb: s.traceHoneycombAPIKey,
})
if err != nil {
return nil, nil, false, xerrors.Errorf("initialize tracing: %w", err)
}
var closeTracingOnce sync.Once
return tracerProvider, func(ctx context.Context) error {
var err error
closeTracingOnce.Do(func() {
err = closeTracing(ctx)
})
return err
}, true, nil
}
type scaletestStrategyFlags struct {
cleanup bool
concurrency int
timeout time.Duration
timeoutPerJob time.Duration
}
func (s *scaletestStrategyFlags) attach(cmd *cobra.Command) {
concurrencyLong, concurrencyEnv, concurrencyDescription := "concurrency", "CODER_LOADTEST_CONCURRENCY", "Number of concurrent jobs to run. 0 means unlimited."
timeoutLong, timeoutEnv, timeoutDescription := "timeout", "CODER_LOADTEST_TIMEOUT", "Timeout for the entire test run. 0 means unlimited."
jobTimeoutLong, jobTimeoutEnv, jobTimeoutDescription := "job-timeout", "CODER_LOADTEST_JOB_TIMEOUT", "Timeout per job. Jobs may take longer to complete under higher concurrency limits."
if s.cleanup {
concurrencyLong, concurrencyEnv, concurrencyDescription = "cleanup-"+concurrencyLong, "CODER_LOADTEST_CLEANUP_CONCURRENCY", strings.ReplaceAll(concurrencyDescription, "jobs", "cleanup jobs")
timeoutLong, timeoutEnv, timeoutDescription = "cleanup-"+timeoutLong, "CODER_LOADTEST_CLEANUP_TIMEOUT", strings.ReplaceAll(timeoutDescription, "test", "cleanup")
jobTimeoutLong, jobTimeoutEnv, jobTimeoutDescription = "cleanup-"+jobTimeoutLong, "CODER_LOADTEST_CLEANUP_JOB_TIMEOUT", strings.ReplaceAll(jobTimeoutDescription, "jobs", "cleanup jobs")
}
cliflag.IntVarP(cmd.Flags(), &s.concurrency, concurrencyLong, "", concurrencyEnv, 1, concurrencyDescription)
cliflag.DurationVarP(cmd.Flags(), &s.timeout, timeoutLong, "", timeoutEnv, 30*time.Minute, timeoutDescription)
cliflag.DurationVarP(cmd.Flags(), &s.timeoutPerJob, jobTimeoutLong, "", jobTimeoutEnv, 5*time.Minute, jobTimeoutDescription)
}
func (s *scaletestStrategyFlags) toStrategy() harness.ExecutionStrategy {
var strategy harness.ExecutionStrategy
if s.concurrency == 1 {
strategy = harness.LinearExecutionStrategy{}
} else if s.concurrency == 0 {
strategy = harness.ConcurrentExecutionStrategy{}
} else {
strategy = harness.ParallelExecutionStrategy{
Limit: s.concurrency,
}
}
if s.timeoutPerJob > 0 {
strategy = harness.TimeoutExecutionStrategyWrapper{
Timeout: s.timeoutPerJob,
Inner: strategy,
}
}
return strategy
}
func (s *scaletestStrategyFlags) toContext(ctx context.Context) (context.Context, context.CancelFunc) {
if s.timeout > 0 {
return context.WithTimeout(ctx, s.timeout)
}
return context.WithCancel(ctx)
}
type scaleTestOutputFormat string
const (
scaleTestOutputFormatText scaleTestOutputFormat = "text"
scaleTestOutputFormatJSON scaleTestOutputFormat = "json"
// TODO: html format
)
type scaleTestOutput struct {
format scaleTestOutputFormat
// Zero or one (the first) path will have the path set to "-" to indicate
// stdout.
path string
}
func (o *scaleTestOutput) write(res harness.Results, stdout io.Writer) error {
var (
w = stdout
c io.Closer
)
if o.path != "-" {
f, err := os.Create(o.path)
if err != nil {
return xerrors.Errorf("create output file: %w", err)
}
w, c = f, f
}
switch o.format {
case scaleTestOutputFormatText:
res.PrintText(w)
case scaleTestOutputFormatJSON:
err := json.NewEncoder(w).Encode(res)
if err != nil {
return xerrors.Errorf("encode JSON: %w", err)
}
}
// Sync the file to disk if it's a file.
if s, ok := w.(interface{ Sync() error }); ok {
err := s.Sync()
if err != nil {
return xerrors.Errorf("flush output file: %w", err)
}
}
if c != nil {
err := c.Close()
if err != nil {
return xerrors.Errorf("close output file: %w", err)
}
}
return nil
}
type scaletestOutputFlags struct {
outputSpecs []string
}
func (s *scaletestOutputFlags) attach(cmd *cobra.Command) {
cliflag.StringArrayVarP(cmd.Flags(), &s.outputSpecs, "output", "", "CODER_SCALETEST_OUTPUTS", []string{"text"}, `Output format specs in the format "<format>[:<path>]". Not specifying a path will default to stdout. Available formats: text, json.`)
}
func (s *scaletestOutputFlags) parse() ([]scaleTestOutput, error) {
var stdoutFormat scaleTestOutputFormat
validFormats := map[scaleTestOutputFormat]struct{}{
scaleTestOutputFormatText: {},
scaleTestOutputFormatJSON: {},
}
var out []scaleTestOutput
for i, o := range s.outputSpecs {
parts := strings.SplitN(o, ":", 2)
format := scaleTestOutputFormat(parts[0])
if _, ok := validFormats[format]; !ok {
return nil, xerrors.Errorf("invalid output format %q in output flag %d", parts[0], i)
}
if len(parts) == 1 {
if stdoutFormat != "" {
return nil, xerrors.Errorf("multiple output flags specified for stdout")
}
stdoutFormat = format
continue
}
if len(parts) != 2 {
return nil, xerrors.Errorf("invalid output flag %d: %q", i, o)
}
out = append(out, scaleTestOutput{
format: format,
path: parts[1],
})
}
// Default to --output text
if stdoutFormat == "" && len(out) == 0 {
stdoutFormat = scaleTestOutputFormatText
}
if stdoutFormat != "" {
out = append([]scaleTestOutput{{
format: stdoutFormat,
path: "-",
}}, out...)
}
return out, nil
}
func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
me, err := client.User(ctx, codersdk.Me)
if err != nil {
return codersdk.User{}, xerrors.Errorf("fetch current user: %w", err)
}
// Only owners can do scaletests. This isn't a very strong check but there's
// not much else we can do. Ratelimits are enforced for non-owners so
// hopefully that limits the damage if someone disables this check and runs
// it against a non-owner account.
ok := false
for _, role := range me.Roles {
if role.Name == "owner" {
ok = true
break
}
}
if !ok {
return me, xerrors.Errorf("Not logged in as a site owner. Scale testing is only available to site owners.")
}
return me, nil
}
// userCleanupRunner is a runner that deletes a user in the Run phase.
type userCleanupRunner struct {
client *codersdk.Client
userID uuid.UUID
}
var _ harness.Runnable = &userCleanupRunner{}
// Run implements Runnable.
func (r *userCleanupRunner) Run(ctx context.Context, _ string, _ io.Writer) error {
if r.userID == uuid.Nil {
return nil
}
ctx, span := tracing.StartSpan(ctx)
defer span.End()
err := r.client.DeleteUser(ctx, r.userID)
if err != nil {
return xerrors.Errorf("delete user %q: %w", r.userID, err)
}
return nil
}
func scaletestCleanup() *cobra.Command {
var (
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
)
cmd := &cobra.Command{
Use: "cleanup",
Short: "Cleanup any orphaned scaletest resources",
Long: "Cleanup scaletest workspaces, then cleanup scaletest users. The strategy flags will apply to each stage of the cleanup process.",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
client, err := CreateClient(cmd)
if err != nil {
return err
}
_, err = requireAdmin(ctx, client)
if err != nil {
return err
}
client.BypassRatelimits = true
cmd.PrintErrln("Fetching scaletest workspaces...")
var (
pageNumber = 0
limit = 100
workspaces []codersdk.Workspace
)
for {
page, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
Name: "scaletest-",
Offset: pageNumber * limit,
Limit: limit,
})
if err != nil {
return xerrors.Errorf("fetch scaletest workspaces page %d: %w", pageNumber, err)
}
pageNumber++
if len(page.Workspaces) == 0 {
break
}
pageWorkspaces := make([]codersdk.Workspace, 0, len(page.Workspaces))
for _, w := range page.Workspaces {
if isScaleTestWorkspace(w) {
pageWorkspaces = append(pageWorkspaces, w)
}
}
workspaces = append(workspaces, pageWorkspaces...)
}
cmd.PrintErrf("Found %d scaletest workspaces\n", len(workspaces))
if len(workspaces) != 0 {
cmd.Println("Deleting scaletest workspaces...")
harness := harness.NewTestHarness(cleanupStrategy.toStrategy(), harness.ConcurrentExecutionStrategy{})
for i, w := range workspaces {
const testName = "cleanup-workspace"
r := workspacebuild.NewCleanupRunner(client, w.ID)
harness.AddRun(testName, strconv.Itoa(i), r)
}
ctx, cancel := cleanupStrategy.toContext(ctx)
defer cancel()
err := harness.Run(ctx)
if err != nil {
return xerrors.Errorf("run test harness to delete workspaces (harness failure, not a test failure): %w", err)
}
cmd.Println("Done deleting scaletest workspaces:")
res := harness.Results()
res.PrintText(cmd.ErrOrStderr())
if res.TotalFail > 0 {
return xerrors.Errorf("failed to delete scaletest workspaces")
}
}
cmd.PrintErrln("Fetching scaletest users...")
pageNumber = 0
limit = 100
var users []codersdk.User
for {
page, err := client.Users(ctx, codersdk.UsersRequest{
Search: "scaletest-",
Pagination: codersdk.Pagination{
Offset: pageNumber * limit,
Limit: limit,
},
})
if err != nil {
return xerrors.Errorf("fetch scaletest users page %d: %w", pageNumber, err)
}
pageNumber++
if len(page.Users) == 0 {
break
}
pageUsers := make([]codersdk.User, 0, len(page.Users))
for _, u := range page.Users {
if isScaleTestUser(u) {
pageUsers = append(pageUsers, u)
}
}
users = append(users, pageUsers...)
}
cmd.PrintErrf("Found %d scaletest users\n", len(users))
if len(workspaces) != 0 {
cmd.Println("Deleting scaletest users...")
harness := harness.NewTestHarness(cleanupStrategy.toStrategy(), harness.ConcurrentExecutionStrategy{})
for i, u := range users {
const testName = "cleanup-users"
r := &userCleanupRunner{
client: client,
userID: u.ID,
}
harness.AddRun(testName, strconv.Itoa(i), r)
}
ctx, cancel := cleanupStrategy.toContext(ctx)
defer cancel()
err := harness.Run(ctx)
if err != nil {
return xerrors.Errorf("run test harness to delete users (harness failure, not a test failure): %w", err)
}
cmd.Println("Done deleting scaletest users:")
res := harness.Results()
res.PrintText(cmd.ErrOrStderr())
if res.TotalFail > 0 {
return xerrors.Errorf("failed to delete scaletest users")
}
}
return nil
},
}
cleanupStrategy.attach(cmd)
return cmd
}
func scaletestCreateWorkspaces() *cobra.Command {
var (
count int
template string
parametersFile string
parameters []string // key=value
noPlan bool
noCleanup bool
// TODO: implement this flag
// noCleanupFailures bool
noWaitForAgents bool
runCommand string
runTimeout time.Duration
runExpectTimeout bool
runExpectOutput string
runLogOutput bool
// TODO: customizable agent, currently defaults to the first agent found
// if there are multiple
connectURL string // http://localhost:4/
connectMode string // derp or direct
connectHold time.Duration
connectInterval time.Duration
connectTimeout time.Duration
tracingFlags = &scaletestTracingFlags{}
strategy = &scaletestStrategyFlags{}
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
output = &scaletestOutputFlags{}
)
cmd := &cobra.Command{
Use: "create-workspaces",
Short: "Creates many workspaces and waits for them to be ready",
Long: "Creates many users, then creates a workspace for each user and waits for them finish building and fully come online. Optionally runs a command inside each workspace, and connects to the workspace over WireGuard.",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
client, err := CreateClient(cmd)
if err != nil {
return err
}
me, err := requireAdmin(ctx, client)
if err != nil {
return err
}
client.BypassRatelimits = true
if count <= 0 {
return xerrors.Errorf("--count is required and must be greater than 0")
}
outputs, err := output.parse()
if err != nil {
return xerrors.Errorf("could not parse --output flags")
}
var tpl codersdk.Template
if template == "" {
return xerrors.Errorf("--template is required")
}
if id, err := uuid.Parse(template); err == nil && id != uuid.Nil {
tpl, err = client.Template(ctx, id)
if err != nil {
return xerrors.Errorf("get template by ID %q: %w", template, err)
}
} else {
// List templates in all orgs until we find a match.
orgLoop:
for _, orgID := range me.OrganizationIDs {
tpls, err := client.TemplatesByOrganization(ctx, orgID)
if err != nil {
return xerrors.Errorf("list templates in org %q: %w", orgID, err)
}
for _, t := range tpls {
if t.Name == template {
tpl = t
break orgLoop
}
}
}
}
if tpl.ID == uuid.Nil {
return xerrors.Errorf("could not find template %q in any organization", template)
}
templateVersion, err := client.TemplateVersion(ctx, tpl.ActiveVersionID)
if err != nil {
return xerrors.Errorf("get template version %q: %w", tpl.ActiveVersionID, err)
}
parameterSchemas, err := client.TemplateVersionSchema(ctx, templateVersion.ID)
if err != nil {
return xerrors.Errorf("get template version schema %q: %w", templateVersion.ID, err)
}
paramsMap := map[string]string{}
if parametersFile != "" {
fileMap, err := createParameterMapFromFile(parametersFile)
if err != nil {
return xerrors.Errorf("read parameters file %q: %w", parametersFile, err)
}
paramsMap = fileMap
}
for _, p := range parameters {
parts := strings.SplitN(p, "=", 2)
if len(parts) != 2 {
return xerrors.Errorf("invalid parameter %q", p)
}
paramsMap[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
}
params := []codersdk.CreateParameterRequest{}
for _, p := range parameterSchemas {
value, ok := paramsMap[p.Name]
if !ok {
value = ""
}
params = append(params, codersdk.CreateParameterRequest{
Name: p.Name,
SourceValue: value,
SourceScheme: codersdk.ParameterSourceSchemeData,
DestinationScheme: p.DefaultDestinationScheme,
})
}
// Do a dry-run to ensure the template and parameters are valid
// before we start creating users and workspaces.
if !noPlan {
dryRun, err := client.CreateTemplateVersionDryRun(ctx, templateVersion.ID, codersdk.CreateTemplateVersionDryRunRequest{
WorkspaceName: "scaletest",
ParameterValues: params,
})
if err != nil {
return xerrors.Errorf("start dry run workspace creation: %w", err)
}
_, _ = fmt.Fprintln(cmd.OutOrStdout(), "Planning workspace...")
err = cliui.ProvisionerJob(cmd.Context(), cmd.OutOrStdout(), cliui.ProvisionerJobOptions{
Fetch: func() (codersdk.ProvisionerJob, error) {
return client.TemplateVersionDryRun(cmd.Context(), templateVersion.ID, dryRun.ID)
},
Cancel: func() error {
return client.CancelTemplateVersionDryRun(cmd.Context(), templateVersion.ID, dryRun.ID)
},
Logs: func() (<-chan codersdk.ProvisionerJobLog, io.Closer, error) {
return client.TemplateVersionDryRunLogsAfter(cmd.Context(), templateVersion.ID, dryRun.ID, 0)
},
// Don't show log output for the dry-run unless there's an error.
Silent: true,
})
if err != nil {
return xerrors.Errorf("dry-run workspace: %w", err)
}
}
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
if err != nil {
return xerrors.Errorf("create tracer provider: %w", err)
}
defer func() {
// Allow time for traces to flush even if command context is
// canceled.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_ = closeTracing(ctx)
}()
tracer := tracerProvider.Tracer(scaletestTracerName)
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
for i := 0; i < count; i++ {
const name = "workspacebuild"
id := strconv.Itoa(i)
username, email, err := newScaleTestUser(id)
if err != nil {
return xerrors.Errorf("create scaletest username and email: %w", err)
}
workspaceName, err := newScaleTestWorkspace(id)
if err != nil {
return xerrors.Errorf("create scaletest workspace name: %w", err)
}
config := createworkspaces.Config{
User: createworkspaces.UserConfig{
// TODO: configurable org
OrganizationID: me.OrganizationIDs[0],
Username: username,
Email: email,
},
Workspace: workspacebuild.Config{
OrganizationID: me.OrganizationIDs[0],
// UserID is set by the test automatically.
Request: codersdk.CreateWorkspaceRequest{
TemplateID: tpl.ID,
Name: workspaceName,
ParameterValues: params,
},
NoWaitForAgents: noWaitForAgents,
},
NoCleanup: noCleanup,
}
if runCommand != "" {
config.ReconnectingPTY = &reconnectingpty.Config{
// AgentID is set by the test automatically.
Init: codersdk.ReconnectingPTYInit{
ID: uuid.Nil,
Height: 24,
Width: 80,
Command: runCommand,
},
Timeout: httpapi.Duration(runTimeout),
ExpectTimeout: runExpectTimeout,
ExpectOutput: runExpectOutput,
LogOutput: runLogOutput,
}
}
if connectURL != "" {
config.AgentConn = &agentconn.Config{
// AgentID is set by the test automatically.
// The ConnectionMode gets validated by the Validate()
// call below.
ConnectionMode: agentconn.ConnectionMode(connectMode),
HoldDuration: httpapi.Duration(connectHold),
Connections: []agentconn.Connection{
{
URL: connectURL,
Interval: httpapi.Duration(connectInterval),
Timeout: httpapi.Duration(connectTimeout),
},
},
}
}
err = config.Validate()
if err != nil {
return xerrors.Errorf("validate config: %w", err)
}
var runner harness.Runnable = createworkspaces.NewRunner(client, config)
if tracingEnabled {
runner = &runnableTraceWrapper{
tracer: tracer,
spanName: fmt.Sprintf("%s/%s", name, id),
runner: runner,
}
}
th.AddRun(name, id, runner)
}
// TODO: live progress output
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "Running load test...")
testCtx, testCancel := strategy.toContext(ctx)
defer testCancel()
err = th.Run(testCtx)
if err != nil {
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
}
res := th.Results()
for _, o := range outputs {
err = o.write(res, cmd.OutOrStdout())
if err != nil {
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
}
}
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\nCleaning up...")
cleanupCtx, cleanupCancel := cleanupStrategy.toContext(ctx)
defer cleanupCancel()
err = th.Cleanup(cleanupCtx)
if err != nil {
return xerrors.Errorf("cleanup tests: %w", err)
}
// Upload traces.
if tracingEnabled {
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\nUploading traces...")
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
err := closeTracing(ctx)
if err != nil {
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\nError uploading traces: %+v\n", err)
}
}
if res.TotalFail > 0 {
return xerrors.New("load test failed, see above for more details")
}
return nil
},
}
cliflag.IntVarP(cmd.Flags(), &count, "count", "c", "CODER_LOADTEST_COUNT", 1, "Required: Number of workspaces to create.")
cliflag.StringVarP(cmd.Flags(), &template, "template", "t", "CODER_LOADTEST_TEMPLATE", "", "Required: Name or ID of the template to use for workspaces.")
cliflag.StringVarP(cmd.Flags(), &parametersFile, "parameters-file", "", "CODER_LOADTEST_PARAMETERS_FILE", "", "Path to a YAML file containing the parameters to use for each workspace.")
cliflag.StringArrayVarP(cmd.Flags(), &parameters, "parameter", "", "CODER_LOADTEST_PARAMETERS", []string{}, "Parameters to use for each workspace. Can be specified multiple times. Overrides any existing parameters with the same name from --parameters-file. Format: key=value")
cliflag.BoolVarP(cmd.Flags(), &noPlan, "no-plan", "", "CODER_LOADTEST_NO_PLAN", false, "Skip the dry-run step to plan the workspace creation. This step ensures that the given parameters are valid for the given template.")
cliflag.BoolVarP(cmd.Flags(), &noCleanup, "no-cleanup", "", "CODER_LOADTEST_NO_CLEANUP", false, "Do not clean up resources after the test completes. You can cleanup manually using `coder scaletest cleanup`.")
// cliflag.BoolVarP(cmd.Flags(), &noCleanupFailures, "no-cleanup-failures", "", "CODER_LOADTEST_NO_CLEANUP_FAILURES", false, "Do not clean up resources from failed jobs to aid in debugging failures. You can cleanup manually using `coder scaletest cleanup`.")
cliflag.BoolVarP(cmd.Flags(), &noWaitForAgents, "no-wait-for-agents", "", "CODER_LOADTEST_NO_WAIT_FOR_AGENTS", false, "Do not wait for agents to start before marking the test as succeeded. This can be useful if you are running the test against a template that does not start the agent quickly.")
cliflag.StringVarP(cmd.Flags(), &runCommand, "run-command", "", "CODER_LOADTEST_RUN_COMMAND", "", "Command to run inside each workspace using reconnecting-pty (i.e. web terminal protocol). If not specified, no command will be run.")
cliflag.DurationVarP(cmd.Flags(), &runTimeout, "run-timeout", "", "CODER_LOADTEST_RUN_TIMEOUT", 5*time.Second, "Timeout for the command to complete.")
cliflag.BoolVarP(cmd.Flags(), &runExpectTimeout, "run-expect-timeout", "", "CODER_LOADTEST_RUN_EXPECT_TIMEOUT", false, "Expect the command to timeout. If the command does not finish within the given --run-timeout, it will be marked as succeeded. If the command finishes before the timeout, it will be marked as failed.")
cliflag.StringVarP(cmd.Flags(), &runExpectOutput, "run-expect-output", "", "CODER_LOADTEST_RUN_EXPECT_OUTPUT", "", "Expect the command to output the given string (on a single line). If the command does not output the given string, it will be marked as failed.")
cliflag.BoolVarP(cmd.Flags(), &runLogOutput, "run-log-output", "", "CODER_LOADTEST_RUN_LOG_OUTPUT", false, "Log the output of the command to the test logs. This should be left off unless you expect small amounts of output. Large amounts of output will cause high memory usage.")
cliflag.StringVarP(cmd.Flags(), &connectURL, "connect-url", "", "CODER_LOADTEST_CONNECT_URL", "", "URL to connect to inside the the workspace over WireGuard. If not specified, no connections will be made over WireGuard.")
cliflag.StringVarP(cmd.Flags(), &connectMode, "connect-mode", "", "CODER_LOADTEST_CONNECT_MODE", "derp", "Mode to use for connecting to the workspace. Can be 'derp' or 'direct'.")
cliflag.DurationVarP(cmd.Flags(), &connectHold, "connect-hold", "", "CODER_LOADTEST_CONNECT_HOLD", 30*time.Second, "How long to hold the WireGuard connection open for.")
cliflag.DurationVarP(cmd.Flags(), &connectInterval, "connect-interval", "", "CODER_LOADTEST_CONNECT_INTERVAL", time.Second, "How long to wait between making requests to the --connect-url once the connection is established.")
cliflag.DurationVarP(cmd.Flags(), &connectTimeout, "connect-timeout", "", "CODER_LOADTEST_CONNECT_TIMEOUT", 5*time.Second, "Timeout for each request to the --connect-url.")
tracingFlags.attach(cmd)
strategy.attach(cmd)
cleanupStrategy.attach(cmd)
output.attach(cmd)
return cmd
}
type runnableTraceWrapper struct {
tracer trace.Tracer
spanName string
runner harness.Runnable
span trace.Span
}
var _ harness.Runnable = &runnableTraceWrapper{}
var _ harness.Cleanable = &runnableTraceWrapper{}
func (r *runnableTraceWrapper) Run(ctx context.Context, id string, logs io.Writer) error {
ctx, span := r.tracer.Start(ctx, r.spanName, trace.WithNewRoot())
defer span.End()
r.span = span
traceID := "unknown trace ID"
spanID := "unknown span ID"
if span.SpanContext().HasTraceID() {
traceID = span.SpanContext().TraceID().String()
}
if span.SpanContext().HasSpanID() {
spanID = span.SpanContext().SpanID().String()
}
_, _ = fmt.Fprintf(logs, "Trace ID: %s\n", traceID)
_, _ = fmt.Fprintf(logs, "Span ID: %s\n\n", spanID)
// Make a separate span for the run itself so the sub-spans are grouped
// neatly. The cleanup span is also a child of the above span so this is
// important for readability.
ctx2, span2 := r.tracer.Start(ctx, r.spanName+" run")
defer span2.End()
return r.runner.Run(ctx2, id, logs)
}
func (r *runnableTraceWrapper) Cleanup(ctx context.Context, id string) error {
c, ok := r.runner.(harness.Cleanable)
if !ok {
return nil
}
if r.span != nil {
ctx = trace.ContextWithSpanContext(ctx, r.span.SpanContext())
}
ctx, span := r.tracer.Start(ctx, r.spanName+" cleanup")
defer span.End()
return c.Cleanup(ctx, id)
}
// newScaleTestUser returns a random username and email address that can be used
// for scale testing. The returned username is prefixed with "scaletest-" and
// the returned email address is suffixed with "@scaletest.local".
func newScaleTestUser(id string) (username string, email string, err error) {
randStr, err := cryptorand.String(8)
return fmt.Sprintf("scaletest-%s-%s", randStr, id), fmt.Sprintf("%s-%s@scaletest.local", randStr, id), err
}
// newScaleTestWorkspace returns a random workspace name that can be used for
// scale testing. The returned workspace name is prefixed with "scaletest-" and
// suffixed with the given id.
func newScaleTestWorkspace(id string) (name string, err error) {
randStr, err := cryptorand.String(8)
return fmt.Sprintf("scaletest-%s-%s", randStr, id), err
}
func isScaleTestUser(user codersdk.User) bool {
return strings.HasSuffix(user.Email, "@scaletest.local")
}
func isScaleTestWorkspace(workspace codersdk.Workspace) bool {
if !strings.HasPrefix(workspace.OwnerName, "scaletest-") {
return false
}
return strings.HasPrefix(workspace.Name, "scaletest-")
}

200
cli/scaletest_test.go Normal file
View File

@ -0,0 +1,200 @@
package cli_test
import (
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/cli/clitest"
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/pty/ptytest"
"github.com/coder/coder/scaletest/harness"
"github.com/coder/coder/testutil"
)
func TestScaleTest(t *testing.T) {
t.Skipf("This test is flakey. See https://github.com/coder/coder/issues/4942")
t.Parallel()
// This test does a create-workspaces scale test with --no-cleanup, checks
// that the created resources are OK, and then runs a cleanup.
t.Run("WorkspaceBuildNoCleanup", func(t *testing.T) {
t.Parallel()
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
user := coderdtest.CreateFirstUser(t, client)
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
// Write a parameters file.
tDir := t.TempDir()
paramsFile := filepath.Join(tDir, "params.yaml")
outputFile := filepath.Join(tDir, "output.json")
f, err := os.Create(paramsFile)
require.NoError(t, err)
defer f.Close()
_, err = f.WriteString(`---
param1: foo
param2: true
param3: 1
`)
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
cmd, root := clitest.New(t, "scaletest", "create-workspaces",
"--count", "2",
"--template", template.Name,
"--parameters-file", paramsFile,
"--parameter", "param1=bar",
"--parameter", "param4=baz",
"--no-cleanup",
// This flag is important for tests because agents will never be
// started.
"--no-wait-for-agents",
// Run and connect flags cannot be tested because they require an
// agent.
"--concurrency", "2",
"--timeout", "30s",
"--job-timeout", "15s",
"--cleanup-concurrency", "1",
"--cleanup-timeout", "30s",
"--cleanup-job-timeout", "15s",
"--output", "text",
"--output", "json:"+outputFile,
)
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t)
cmd.SetOut(pty.Output())
cmd.SetErr(pty.Output())
done := make(chan any)
go func() {
err := cmd.ExecuteContext(ctx)
assert.NoError(t, err)
close(done)
}()
pty.ExpectMatch("Test results:")
pty.ExpectMatch("Pass: 2")
select {
case <-done:
case <-ctx.Done():
}
cancelFunc()
<-done
// Recreate the context.
ctx, cancelFunc = context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
// Verify the output file.
f, err = os.Open(outputFile)
require.NoError(t, err)
defer f.Close()
var res harness.Results
err = json.NewDecoder(f).Decode(&res)
require.NoError(t, err)
require.EqualValues(t, 2, res.TotalRuns)
require.EqualValues(t, 2, res.TotalPass)
// Find the workspaces and users and check that they are what we expect.
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
Offset: 0,
Limit: 100,
})
require.NoError(t, err)
require.Len(t, workspaces.Workspaces, 2)
seenUsers := map[string]struct{}{}
for _, w := range workspaces.Workspaces {
// Sadly we can't verify params as the API doesn't seem to return
// them.
// Verify that the user is a unique scaletest user.
u, err := client.User(ctx, w.OwnerID.String())
require.NoError(t, err)
_, ok := seenUsers[u.ID.String()]
require.False(t, ok, "user has more than one workspace")
seenUsers[u.ID.String()] = struct{}{}
require.Contains(t, u.Username, "scaletest-")
require.Contains(t, u.Email, "scaletest")
}
require.Len(t, seenUsers, len(workspaces.Workspaces))
// Check that there are exactly 3 users.
users, err := client.Users(ctx, codersdk.UsersRequest{
Pagination: codersdk.Pagination{
Offset: 0,
Limit: 100,
},
})
require.NoError(t, err)
require.Len(t, users.Users, len(seenUsers)+1)
// Cleanup.
cmd, root = clitest.New(t, "scaletest", "cleanup",
"--cleanup-concurrency", "1",
"--cleanup-timeout", "30s",
"--cleanup-job-timeout", "15s",
)
clitest.SetupConfig(t, client, root)
pty = ptytest.New(t)
cmd.SetOut(pty.Output())
cmd.SetErr(pty.Output())
done = make(chan any)
go func() {
err := cmd.ExecuteContext(ctx)
assert.NoError(t, err)
close(done)
}()
pty.ExpectMatch("Test results:")
pty.ExpectMatch("Pass: 2")
pty.ExpectMatch("Test results:")
pty.ExpectMatch("Pass: 2")
select {
case <-done:
case <-ctx.Done():
}
cancelFunc()
<-done
// Recreate the context (again).
ctx, cancelFunc = context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
// Verify that the workspaces are gone.
workspaces, err = client.Workspaces(ctx, codersdk.WorkspaceFilter{
Offset: 0,
Limit: 100,
})
require.NoError(t, err)
require.Len(t, workspaces.Workspaces, 0)
// Verify that the users are gone.
users, err = client.Users(ctx, codersdk.UsersRequest{
Pagination: codersdk.Pagination{
Offset: 0,
Limit: 100,
},
})
require.NoError(t, err)
require.Len(t, users.Users, 1)
})
}

View File

@ -23,6 +23,7 @@ Commands:
port-forward Forward ports from machine to a workspace
publickey Output your Coder public key used for Git operations
reset-password Directly connect to the database to reset a user's password
scaletest Run a scale test against the Coder API
server Start a Coder server
state Manually manage Terraform state to fix broken workspaces
templates Manage templates

View File

@ -448,7 +448,7 @@ func (q *fakeQuerier) GetUserByEmailOrUsername(_ context.Context, arg database.G
defer q.mutex.RUnlock()
for _, user := range q.users {
if (strings.EqualFold(user.Email, arg.Email) || strings.EqualFold(user.Username, arg.Username)) && user.Deleted == arg.Deleted {
if !user.Deleted && (strings.EqualFold(user.Email, arg.Email) || strings.EqualFold(user.Username, arg.Username)) {
return user, nil
}
}
@ -513,15 +513,14 @@ func (q *fakeQuerier) GetAuthorizedUserCount(ctx context.Context, params databas
users = append(users, user)
}
if params.Deleted {
tmp := make([]database.User, 0, len(users))
for _, user := range users {
if user.Deleted {
tmp = append(tmp, user)
}
// Filter out deleted since they should never be returned..
tmp := make([]database.User, 0, len(users))
for _, user := range users {
if !user.Deleted {
tmp = append(tmp, user)
}
users = tmp
}
users = tmp
if params.Search != "" {
tmp := make([]database.User, 0, len(users))
@ -593,15 +592,14 @@ func (q *fakeQuerier) GetUsers(_ context.Context, params database.GetUsersParams
return a.CreatedAt.Before(b.CreatedAt)
})
if params.Deleted {
tmp := make([]database.User, 0, len(users))
for _, user := range users {
if user.Deleted {
tmp = append(tmp, user)
}
// Filter out deleted since they should never be returned..
tmp := make([]database.User, 0, len(users))
for _, user := range users {
if !user.Deleted {
tmp = append(tmp, user)
}
users = tmp
}
users = tmp
if params.AfterID != uuid.Nil {
found := false

View File

@ -261,7 +261,6 @@ func (q *sqlQuerier) GetAuthorizedUserCount(ctx context.Context, arg GetFiltered
query := fmt.Sprintf("-- name: GetAuthorizedUserCount :one\n%s", filtered)
row := q.db.QueryRowContext(ctx, query,
arg.Deleted,
arg.Search,
pq.Array(arg.Status),
pq.Array(arg.RbacRole),

View File

@ -43,6 +43,7 @@ type sqlcQuerier interface {
GetDeploymentID(ctx context.Context) (string, error)
GetFileByHashAndCreator(ctx context.Context, arg GetFileByHashAndCreatorParams) (File, error)
GetFileByID(ctx context.Context, id uuid.UUID) (File, error)
// This will never count deleted users.
GetFilteredUserCount(ctx context.Context, arg GetFilteredUserCountParams) (int64, error)
GetGitAuthLink(ctx context.Context, arg GetGitAuthLinkParams) (GitAuthLink, error)
GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error)
@ -97,6 +98,7 @@ type sqlcQuerier interface {
GetUserGroups(ctx context.Context, userID uuid.UUID) ([]Group, error)
GetUserLinkByLinkedID(ctx context.Context, linkedID string) (UserLink, error)
GetUserLinkByUserIDLoginType(ctx context.Context, arg GetUserLinkByUserIDLoginTypeParams) (UserLink, error)
// This will never return deleted users.
GetUsers(ctx context.Context, arg GetUsersParams) ([]GetUsersRow, error)
// This shouldn't check for deleted, because it's frequently used
// to look up references to actions. eg. a user could build a workspace

View File

@ -4182,13 +4182,13 @@ SELECT
FROM
users
WHERE
users.deleted = $1
users.deleted = false
-- Start filters
-- Filter by name, email or username
AND CASE
WHEN $2 :: text != '' THEN (
email ILIKE concat('%', $2, '%')
OR username ILIKE concat('%', $2, '%')
WHEN $1 :: text != '' THEN (
email ILIKE concat('%', $1, '%')
OR username ILIKE concat('%', $1, '%')
)
ELSE true
END
@ -4196,15 +4196,15 @@ WHERE
AND CASE
-- @status needs to be a text because it can be empty, If it was
-- user_status enum, it would not.
WHEN cardinality($3 :: user_status[]) > 0 THEN
status = ANY($3 :: user_status[])
WHEN cardinality($2 :: user_status[]) > 0 THEN
status = ANY($2 :: user_status[])
ELSE true
END
-- Filter by rbac_roles
AND CASE
-- @rbac_role allows filtering by rbac roles. If 'member' is included, show everyone, as everyone is a member.
WHEN cardinality($4 :: text[]) > 0 AND 'member' != ANY($4 :: text[])
THEN rbac_roles && $4 :: text[]
WHEN cardinality($3 :: text[]) > 0 AND 'member' != ANY($3 :: text[])
THEN rbac_roles && $3 :: text[]
ELSE true
END
-- Authorize Filter clause will be injected below in GetAuthorizedUserCount
@ -4212,19 +4212,14 @@ WHERE
`
type GetFilteredUserCountParams struct {
Deleted bool `db:"deleted" json:"deleted"`
Search string `db:"search" json:"search"`
Status []UserStatus `db:"status" json:"status"`
RbacRole []string `db:"rbac_role" json:"rbac_role"`
}
// This will never count deleted users.
func (q *sqlQuerier) GetFilteredUserCount(ctx context.Context, arg GetFilteredUserCountParams) (int64, error) {
row := q.db.QueryRowContext(ctx, getFilteredUserCount,
arg.Deleted,
arg.Search,
pq.Array(arg.Status),
pq.Array(arg.RbacRole),
)
row := q.db.QueryRowContext(ctx, getFilteredUserCount, arg.Search, pq.Array(arg.Status), pq.Array(arg.RbacRole))
var count int64
err := row.Scan(&count)
return count, err
@ -4236,8 +4231,8 @@ SELECT
FROM
users
WHERE
(LOWER(username) = LOWER($1) OR LOWER(email) = LOWER($2))
AND deleted = $3
(LOWER(username) = LOWER($1) OR LOWER(email) = LOWER($2)) AND
deleted = false
LIMIT
1
`
@ -4245,11 +4240,10 @@ LIMIT
type GetUserByEmailOrUsernameParams struct {
Username string `db:"username" json:"username"`
Email string `db:"email" json:"email"`
Deleted bool `db:"deleted" json:"deleted"`
}
func (q *sqlQuerier) GetUserByEmailOrUsername(ctx context.Context, arg GetUserByEmailOrUsernameParams) (User, error) {
row := q.db.QueryRowContext(ctx, getUserByEmailOrUsername, arg.Username, arg.Email, arg.Deleted)
row := q.db.QueryRowContext(ctx, getUserByEmailOrUsername, arg.Username, arg.Email)
var i User
err := row.Scan(
&i.ID,
@ -4303,7 +4297,9 @@ const getUserCount = `-- name: GetUserCount :one
SELECT
COUNT(*)
FROM
users WHERE deleted = false
users
WHERE
deleted = false
`
func (q *sqlQuerier) GetUserCount(ctx context.Context) (int64, error) {
@ -4319,12 +4315,12 @@ SELECT
FROM
users
WHERE
users.deleted = $1
users.deleted = false
AND CASE
-- This allows using the last element on a page as effectively a cursor.
-- This is an important option for scripts that need to paginate without
-- duplicating or missing data.
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
-- The pagination cursor is the last ID of the previous page.
-- The query is ordered by the created_at field, so select all
-- rows after the cursor.
@ -4334,7 +4330,7 @@ WHERE
FROM
users
WHERE
id = $2
id = $1
)
)
ELSE true
@ -4342,9 +4338,9 @@ WHERE
-- Start filters
-- Filter by name, email or username
AND CASE
WHEN $3 :: text != '' THEN (
email ILIKE concat('%', $3, '%')
OR username ILIKE concat('%', $3, '%')
WHEN $2 :: text != '' THEN (
email ILIKE concat('%', $2, '%')
OR username ILIKE concat('%', $2, '%')
)
ELSE true
END
@ -4352,30 +4348,29 @@ WHERE
AND CASE
-- @status needs to be a text because it can be empty, If it was
-- user_status enum, it would not.
WHEN cardinality($4 :: user_status[]) > 0 THEN
status = ANY($4 :: user_status[])
WHEN cardinality($3 :: user_status[]) > 0 THEN
status = ANY($3 :: user_status[])
ELSE true
END
-- Filter by rbac_roles
AND CASE
-- @rbac_role allows filtering by rbac roles. If 'member' is included, show everyone, as
-- everyone is a member.
WHEN cardinality($5 :: text[]) > 0 AND 'member' != ANY($5 :: text[]) THEN
rbac_roles && $5 :: text[]
WHEN cardinality($4 :: text[]) > 0 AND 'member' != ANY($4 :: text[]) THEN
rbac_roles && $4 :: text[]
ELSE true
END
-- End of filters
ORDER BY
-- Deterministic and consistent ordering of all users, even if they share
-- a timestamp. This is to ensure consistent pagination.
(created_at, id) ASC OFFSET $6
(created_at, id) ASC OFFSET $5
LIMIT
-- A null limit means "no limit", so 0 means return all
NULLIF($7 :: int, 0)
NULLIF($6 :: int, 0)
`
type GetUsersParams struct {
Deleted bool `db:"deleted" json:"deleted"`
AfterID uuid.UUID `db:"after_id" json:"after_id"`
Search string `db:"search" json:"search"`
Status []UserStatus `db:"status" json:"status"`
@ -4400,9 +4395,9 @@ type GetUsersRow struct {
Count int64 `db:"count" json:"count"`
}
// This will never return deleted users.
func (q *sqlQuerier) GetUsers(ctx context.Context, arg GetUsersParams) ([]GetUsersRow, error) {
rows, err := q.db.QueryContext(ctx, getUsers,
arg.Deleted,
arg.AfterID,
arg.Search,
pq.Array(arg.Status),

View File

@ -20,8 +20,8 @@ SELECT
FROM
users
WHERE
(LOWER(username) = LOWER(@username) OR LOWER(email) = LOWER(@email))
AND deleted = @deleted
(LOWER(username) = LOWER(@username) OR LOWER(email) = LOWER(@email)) AND
deleted = false
LIMIT
1;
@ -29,7 +29,9 @@ LIMIT
SELECT
COUNT(*)
FROM
users WHERE deleted = false;
users
WHERE
deleted = false;
-- name: GetActiveUserCount :one
SELECT
@ -40,12 +42,13 @@ WHERE
status = 'active'::user_status AND deleted = false;
-- name: GetFilteredUserCount :one
-- This will never count deleted users.
SELECT
COUNT(*)
FROM
users
WHERE
users.deleted = @deleted
users.deleted = false
-- Start filters
-- Filter by name, email or username
AND CASE
@ -127,12 +130,13 @@ WHERE
id = $1;
-- name: GetUsers :many
-- This will never return deleted users.
SELECT
*, COUNT(*) OVER() AS count
FROM
users
WHERE
users.deleted = @deleted
users.deleted = false
AND CASE
-- This allows using the last element on a page as effectively a cursor.
-- This is an important option for scripts that need to paginate without

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/loadtest/agentconn"
"github.com/coder/coder/scaletest/agentconn"
)
func Test_Config(t *testing.T) {

View File

@ -18,8 +18,8 @@ import (
"cdr.dev/slog/sloggers/sloghuman"
"github.com/coder/coder/coderd/tracing"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/loadtest/loadtestutil"
"github.com/coder/coder/scaletest/harness"
"github.com/coder/coder/scaletest/loadtestutil"
)
const defaultRequestTimeout = 5 * time.Second

View File

@ -18,9 +18,9 @@ import (
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/agentconn"
"github.com/coder/coder/provisioner/echo"
"github.com/coder/coder/provisionersdk/proto"
"github.com/coder/coder/scaletest/agentconn"
"github.com/coder/coder/testutil"
)

View File

@ -0,0 +1,89 @@
package createworkspaces
import (
"github.com/google/uuid"
"golang.org/x/xerrors"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/scaletest/agentconn"
"github.com/coder/coder/scaletest/reconnectingpty"
"github.com/coder/coder/scaletest/workspacebuild"
)
type UserConfig struct {
// OrganizationID is the ID of the organization to add the user to.
OrganizationID uuid.UUID `json:"organization_id"`
// Username is the username of the new user.
Username string `json:"username"`
// Email is the email of the new user.
Email string `json:"email"`
}
func (c UserConfig) Validate() error {
if c.OrganizationID == uuid.Nil {
return xerrors.New("organization_id must not be a nil UUID")
}
if c.Username == "" {
return xerrors.New("username must be set")
}
if c.Email == "" {
return xerrors.New("email must be set")
}
return nil
}
type Config struct {
// User is the configuration for the user to create.
User UserConfig `json:"user"`
// Workspace is the configuration for the workspace to create. The workspace
// will be built using the new user.
//
// OrganizationID is ignored and set to the new user's organization ID.
Workspace workspacebuild.Config `json:"workspace"`
// ReconnectingPTY is the configuration for web terminal connections to the
// new workspace. If nil, no web terminal connections will be made. Runs in
// parallel to agent connections if specified.
//
// AgentID is ignored and set to the new workspace's agent ID.
ReconnectingPTY *reconnectingpty.Config `json:"reconnecting_pty"`
// AgentConn is the configuration for connections made to the agent. If nil,
// no agent connections will be made. Runs in parallel to reconnecting pty
// connections if specified.
//
// AgentID is ignored and set to the new workspace's agent ID.
AgentConn *agentconn.Config `json:"agent_conn"`
// NoCleanup determines whether the user and workspace should be left as is
// and not deleted or stopped in any way.
NoCleanup bool `json:"no_cleanup"`
}
func (c Config) Validate() error {
if err := c.User.Validate(); err != nil {
return xerrors.Errorf("validate user: %w", err)
}
c.Workspace.OrganizationID = c.User.OrganizationID
// This value will be overwritten during the test.
c.Workspace.UserID = codersdk.Me
if err := c.Workspace.Validate(); err != nil {
return xerrors.Errorf("validate workspace: %w", err)
}
if c.ReconnectingPTY != nil {
// This value will be overwritten during the test.
c.ReconnectingPTY.AgentID = uuid.New()
if err := c.ReconnectingPTY.Validate(); err != nil {
return xerrors.Errorf("validate reconnecting pty: %w", err)
}
}
if c.AgentConn != nil {
// This value will be overwritten during the test.
c.AgentConn.AgentID = uuid.New()
if err := c.AgentConn.Validate(); err != nil {
return xerrors.Errorf("validate agent conn: %w", err)
}
}
return nil
}

View File

@ -0,0 +1,194 @@
package createworkspaces_test
import (
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/scaletest/agentconn"
"github.com/coder/coder/scaletest/createworkspaces"
"github.com/coder/coder/scaletest/reconnectingpty"
"github.com/coder/coder/scaletest/workspacebuild"
)
func Test_UserConfig(t *testing.T) {
t.Parallel()
id := uuid.New()
cases := []struct {
name string
config createworkspaces.UserConfig
errContains string
}{
{
name: "OK",
config: createworkspaces.UserConfig{
OrganizationID: id,
Username: "test",
Email: "test@test.coder.com",
},
},
{
name: "NoOrganizationID",
config: createworkspaces.UserConfig{
OrganizationID: uuid.Nil,
Username: "test",
Email: "test@test.coder.com",
},
errContains: "organization_id must not be a nil UUID",
},
{
name: "NoUsername",
config: createworkspaces.UserConfig{
OrganizationID: id,
Username: "",
Email: "test@test.coder.com",
},
errContains: "username must be set",
},
{
name: "NoEmail",
config: createworkspaces.UserConfig{
OrganizationID: id,
Username: "test",
Email: "",
},
errContains: "email must be set",
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
err := c.config.Validate()
if c.errContains != "" {
require.Error(t, err)
require.Contains(t, err.Error(), c.errContains)
} else {
require.NoError(t, err)
}
})
}
}
func Test_Config(t *testing.T) {
t.Parallel()
id := uuid.New()
userConfig := createworkspaces.UserConfig{
OrganizationID: id,
Username: id.String(),
Email: id.String() + "@example.com",
}
workspaceConfig := workspacebuild.Config{
OrganizationID: id,
UserID: id.String(),
Request: codersdk.CreateWorkspaceRequest{
TemplateID: id,
},
}
reconnectingPTYConfig := reconnectingpty.Config{
AgentID: id,
}
agentConnConfig := agentconn.Config{
AgentID: id,
ConnectionMode: agentconn.ConnectionModeDirect,
HoldDuration: httpapi.Duration(time.Minute),
}
cases := []struct {
name string
config createworkspaces.Config
errContains string
}{
{
name: "OK",
config: createworkspaces.Config{
User: userConfig,
Workspace: workspaceConfig,
ReconnectingPTY: &reconnectingPTYConfig,
AgentConn: &agentConnConfig,
},
},
{
name: "OKOptional",
config: createworkspaces.Config{
User: userConfig,
Workspace: workspaceConfig,
ReconnectingPTY: nil,
AgentConn: nil,
},
},
{
name: "BadUserConfig",
config: createworkspaces.Config{
User: createworkspaces.UserConfig{
OrganizationID: uuid.Nil,
},
},
errContains: "validate user",
},
{
name: "BadWorkspaceConfig",
config: createworkspaces.Config{
User: userConfig,
Workspace: workspacebuild.Config{
Request: codersdk.CreateWorkspaceRequest{
TemplateID: uuid.Nil,
},
},
},
errContains: "validate workspace",
},
{
name: "BadReconnectingPTYConfig",
config: createworkspaces.Config{
User: userConfig,
Workspace: workspaceConfig,
ReconnectingPTY: &reconnectingpty.Config{
Timeout: httpapi.Duration(-1 * time.Second),
},
},
errContains: "validate reconnecting pty",
},
{
name: "BadAgentConnConfig",
config: createworkspaces.Config{
User: userConfig,
Workspace: workspaceConfig,
AgentConn: &agentconn.Config{
ConnectionMode: "bad",
},
},
errContains: "validate agent conn",
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
err := c.config.Validate()
if c.errContains != "" {
require.Error(t, err)
require.Contains(t, err.Error(), c.errContains)
} else {
require.NoError(t, err)
}
})
}
}

View File

@ -0,0 +1,182 @@
package createworkspaces
import (
"context"
"fmt"
"io"
"github.com/google/uuid"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/sloghuman"
"github.com/coder/coder/coderd/tracing"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/cryptorand"
"github.com/coder/coder/scaletest/agentconn"
"github.com/coder/coder/scaletest/harness"
"github.com/coder/coder/scaletest/loadtestutil"
"github.com/coder/coder/scaletest/reconnectingpty"
"github.com/coder/coder/scaletest/workspacebuild"
)
type Runner struct {
client *codersdk.Client
cfg Config
userID uuid.UUID
workspacebuildRunner *workspacebuild.Runner
}
var _ harness.Runnable = &Runner{}
var _ harness.Cleanable = &Runner{}
func NewRunner(client *codersdk.Client, cfg Config) *Runner {
return &Runner{
client: client,
cfg: cfg,
}
}
// Run implements Runnable.
func (r *Runner) Run(ctx context.Context, id string, logs io.Writer) error {
ctx, span := tracing.StartSpan(ctx)
defer span.End()
logs = loadtestutil.NewSyncWriter(logs)
logger := slog.Make(sloghuman.Sink(logs)).Leveled(slog.LevelDebug)
r.client.Logger = logger
r.client.LogBodies = true
_, _ = fmt.Fprintln(logs, "Generating user password...")
password, err := cryptorand.HexString(16)
if err != nil {
return xerrors.Errorf("generate random password for user: %w", err)
}
_, _ = fmt.Fprintln(logs, "Creating user:")
_, _ = fmt.Fprintf(logs, "\tOrg ID: %s\n", r.cfg.User.OrganizationID.String())
_, _ = fmt.Fprintf(logs, "\tUsername: %s\n", r.cfg.User.Username)
_, _ = fmt.Fprintf(logs, "\tEmail: %s\n", r.cfg.User.Email)
_, _ = fmt.Fprintf(logs, "\tPassword: ****************\n")
user, err := r.client.CreateUser(ctx, codersdk.CreateUserRequest{
OrganizationID: r.cfg.User.OrganizationID,
Username: r.cfg.User.Username,
Email: r.cfg.User.Email,
Password: password,
})
if err != nil {
return xerrors.Errorf("create user: %w", err)
}
r.userID = user.ID
_, _ = fmt.Fprintln(logs, "\nLogging in as new user...")
userClient := codersdk.New(r.client.URL)
loginRes, err := userClient.LoginWithPassword(ctx, codersdk.LoginWithPasswordRequest{
Email: r.cfg.User.Email,
Password: password,
})
if err != nil {
return xerrors.Errorf("login as new user: %w", err)
}
userClient.SetSessionToken(loginRes.SessionToken)
_, _ = fmt.Fprintln(logs, "\nCreating workspace...")
workspaceBuildConfig := r.cfg.Workspace
workspaceBuildConfig.OrganizationID = r.cfg.User.OrganizationID
workspaceBuildConfig.UserID = user.ID.String()
r.workspacebuildRunner = workspacebuild.NewRunner(userClient, workspaceBuildConfig)
err = r.workspacebuildRunner.Run(ctx, id, logs)
if err != nil {
return xerrors.Errorf("create workspace: %w", err)
}
if r.cfg.Workspace.NoWaitForAgents {
return nil
}
// Get the workspace.
workspaceID, err := r.workspacebuildRunner.WorkspaceID()
if err != nil {
return xerrors.Errorf("get workspace ID: %w", err)
}
workspace, err := userClient.Workspace(ctx, workspaceID)
if err != nil {
return xerrors.Errorf("get workspace %q: %w", workspaceID.String(), err)
}
// Find the first agent.
var agent codersdk.WorkspaceAgent
resourceLoop:
for _, res := range workspace.LatestBuild.Resources {
for _, a := range res.Agents {
agent = a
break resourceLoop
}
}
if agent.ID == uuid.Nil {
return xerrors.Errorf("no agents found for workspace %q", workspaceID.String())
}
eg, egCtx := errgroup.WithContext(ctx)
if r.cfg.ReconnectingPTY != nil {
eg.Go(func() error {
reconnectingPTYConfig := *r.cfg.ReconnectingPTY
reconnectingPTYConfig.AgentID = agent.ID
reconnectingPTYRunner := reconnectingpty.NewRunner(userClient, reconnectingPTYConfig)
err := reconnectingPTYRunner.Run(egCtx, id, logs)
if err != nil {
return xerrors.Errorf("run reconnecting pty: %w", err)
}
return nil
})
}
if r.cfg.AgentConn != nil {
eg.Go(func() error {
agentConnConfig := *r.cfg.AgentConn
agentConnConfig.AgentID = agent.ID
agentConnRunner := agentconn.NewRunner(userClient, agentConnConfig)
err := agentConnRunner.Run(egCtx, id, logs)
if err != nil {
return xerrors.Errorf("run agent connection: %w", err)
}
return nil
})
}
err = eg.Wait()
if err != nil {
return xerrors.Errorf("run workspace connections in parallel: %w", err)
}
return nil
}
// Cleanup implements Cleanable.
func (r *Runner) Cleanup(ctx context.Context, id string) error {
if r.cfg.NoCleanup {
return nil
}
if r.workspacebuildRunner != nil {
err := r.workspacebuildRunner.Cleanup(ctx, id)
if err != nil {
return xerrors.Errorf("cleanup workspace: %w", err)
}
}
if r.userID != uuid.Nil {
err := r.client.DeleteUser(ctx, r.userID)
if err != nil {
return xerrors.Errorf("delete user: %w", err)
}
}
return nil
}

View File

@ -0,0 +1,236 @@
package createworkspaces_test
import (
"bytes"
"context"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/agent"
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/provisioner/echo"
"github.com/coder/coder/provisionersdk/proto"
"github.com/coder/coder/scaletest/agentconn"
"github.com/coder/coder/scaletest/createworkspaces"
"github.com/coder/coder/scaletest/reconnectingpty"
"github.com/coder/coder/scaletest/workspacebuild"
"github.com/coder/coder/testutil"
)
func Test_Runner(t *testing.T) {
t.Parallel()
t.Skip("Flake seen here: https://github.com/coder/coder/actions/runs/3436164958/jobs/5729513320")
t.Run("OK", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
client := coderdtest.New(t, &coderdtest.Options{
IncludeProvisionerDaemon: true,
})
user := coderdtest.CreateFirstUser(t, client)
authToken := uuid.NewString()
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
Parse: echo.ParseComplete,
ProvisionPlan: echo.ProvisionComplete,
ProvisionApply: []*proto.Provision_Response{
{
Type: &proto.Provision_Response_Log{
Log: &proto.Log{
Level: proto.LogLevel_INFO,
Output: "hello from logs",
},
},
},
{
Type: &proto.Provision_Response_Complete{
Complete: &proto.Provision_Complete{
Resources: []*proto.Resource{
{
Name: "example",
Type: "aws_instance",
Agents: []*proto.Agent{
{
Id: uuid.NewString(),
Name: "agent",
Auth: &proto.Agent_Token{
Token: authToken,
},
Apps: []*proto.App{},
},
},
},
},
},
},
},
},
})
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
// Since the runner creates the workspace on it's own, we have to keep
// listing workspaces until we find it, then wait for the build to
// finish, then start the agents.
go func() {
var workspace codersdk.Workspace
for {
res, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{})
if !assert.NoError(t, err) {
return
}
workspaces := res.Workspaces
if len(workspaces) == 1 {
workspace = workspaces[0]
break
}
time.Sleep(100 * time.Millisecond)
}
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
agentClient := codersdk.New(client.URL)
agentClient.SetSessionToken(authToken)
agentCloser := agent.New(agent.Options{
Client: agentClient,
Logger: slogtest.Make(t, nil).Named("agent").Leveled(slog.LevelWarn),
})
t.Cleanup(func() {
_ = agentCloser.Close()
})
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
}()
const (
username = "scaletest-user"
email = "scaletest@test.coder.com"
)
runner := createworkspaces.NewRunner(client, createworkspaces.Config{
User: createworkspaces.UserConfig{
OrganizationID: user.OrganizationID,
Username: username,
Email: email,
},
Workspace: workspacebuild.Config{
OrganizationID: user.OrganizationID,
Request: codersdk.CreateWorkspaceRequest{
TemplateID: template.ID,
},
},
ReconnectingPTY: &reconnectingpty.Config{
Init: codersdk.ReconnectingPTYInit{
Height: 24,
Width: 80,
Command: "echo hello",
},
Timeout: httpapi.Duration(testutil.WaitLong),
},
AgentConn: &agentconn.Config{
ConnectionMode: agentconn.ConnectionModeDerp,
HoldDuration: 0,
},
})
logs := bytes.NewBuffer(nil)
err := runner.Run(ctx, "1", logs)
logsStr := logs.String()
t.Log("Runner logs:\n\n" + logsStr)
require.NoError(t, err)
// Ensure a user and workspace were created.
users, err := client.Users(ctx, codersdk.UsersRequest{})
require.NoError(t, err)
require.Len(t, users.Users, 2) // 1 user already exists
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{})
require.NoError(t, err)
require.Len(t, workspaces.Workspaces, 1)
// Look for strings in the logs.
require.Contains(t, logsStr, "Generating user password...")
require.Contains(t, logsStr, "Creating user:")
require.Contains(t, logsStr, "Org ID: "+user.OrganizationID.String())
require.Contains(t, logsStr, "Username: "+username)
require.Contains(t, logsStr, "Email: "+email)
require.Contains(t, logsStr, "Logging in as new user...")
require.Contains(t, logsStr, "Creating workspace...")
require.Contains(t, logsStr, `"agent" is connected`)
require.Contains(t, logsStr, "Opening reconnecting PTY connection to agent")
require.Contains(t, logsStr, "Opening connection to workspace agent")
err = runner.Cleanup(ctx, "1")
require.NoError(t, err)
// Ensure the user and workspace were deleted.
users, err = client.Users(ctx, codersdk.UsersRequest{})
require.NoError(t, err)
require.Len(t, users.Users, 1) // 1 user already exists
workspaces, err = client.Workspaces(ctx, codersdk.WorkspaceFilter{})
require.NoError(t, err)
require.Len(t, workspaces.Workspaces, 0)
})
t.Run("FailedBuild", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
client := coderdtest.New(t, &coderdtest.Options{
IncludeProvisionerDaemon: true,
})
user := coderdtest.CreateFirstUser(t, client)
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
Parse: echo.ParseComplete,
ProvisionPlan: echo.ProvisionComplete,
ProvisionApply: []*proto.Provision_Response{
{
Type: &proto.Provision_Response_Complete{
Complete: &proto.Provision_Complete{
Error: "test error",
},
},
},
},
})
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
runner := createworkspaces.NewRunner(client, createworkspaces.Config{
User: createworkspaces.UserConfig{
OrganizationID: user.OrganizationID,
Username: "scaletest-user",
Email: "scaletest@test.coder.com",
},
Workspace: workspacebuild.Config{
OrganizationID: user.OrganizationID,
Request: codersdk.CreateWorkspaceRequest{
TemplateID: template.ID,
},
},
})
logs := bytes.NewBuffer(nil)
err := runner.Run(ctx, "1", logs)
logsStr := logs.String()
t.Log("Runner logs:\n\n" + logsStr)
require.Error(t, err)
require.ErrorContains(t, err, "test error")
})
}

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/scaletest/harness"
)
const testPanicMessage = "expected test panic"

View File

@ -9,7 +9,7 @@ import (
"golang.org/x/xerrors"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/scaletest/harness"
)
func Test_Results(t *testing.T) {

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/scaletest/harness"
)
// testFns implements Runnable and Cleanable.

View File

@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/scaletest/harness"
)
//nolint:paralleltest // this tests uses timings to determine if it's working

View File

@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/loadtest/placebo"
"github.com/coder/coder/scaletest/placebo"
)
func Test_Config(t *testing.T) {

View File

@ -9,7 +9,7 @@ import (
"golang.org/x/xerrors"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/scaletest/harness"
)
type Runner struct {

View File

@ -10,7 +10,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/loadtest/placebo"
"github.com/coder/coder/scaletest/placebo"
)
func Test_Runner(t *testing.T) {

View File

@ -9,7 +9,7 @@ import (
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/reconnectingpty"
"github.com/coder/coder/scaletest/reconnectingpty"
)
func Test_Config(t *testing.T) {

View File

@ -15,8 +15,8 @@ import (
"cdr.dev/slog/sloggers/sloghuman"
"github.com/coder/coder/coderd/tracing"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/loadtest/loadtestutil"
"github.com/coder/coder/scaletest/harness"
"github.com/coder/coder/scaletest/loadtestutil"
)
type Runner struct {

View File

@ -14,9 +14,9 @@ import (
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/reconnectingpty"
"github.com/coder/coder/provisioner/echo"
"github.com/coder/coder/provisionersdk/proto"
"github.com/coder/coder/scaletest/reconnectingpty"
"github.com/coder/coder/testutil"
)

View File

@ -16,6 +16,9 @@ type Config struct {
// request.template_id must be set. A name will be generated if not
// specified.
Request codersdk.CreateWorkspaceRequest `json:"request"`
// NoWaitForAgents determines whether the test should wait for the workspace
// agents to connect before returning.
NoWaitForAgents bool `json:"no_wait_for_agents"`
}
func (c Config) Validate() error {

View File

@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/workspacebuild"
"github.com/coder/coder/scaletest/workspacebuild"
)
func Test_Config(t *testing.T) {
@ -28,6 +28,7 @@ func Test_Config(t *testing.T) {
Request: codersdk.CreateWorkspaceRequest{
TemplateID: id,
},
NoWaitForAgents: true,
},
errContains: "organization_id must be set",
},

View File

@ -15,13 +15,14 @@ import (
"github.com/coder/coder/coderd/tracing"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/cryptorand"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/loadtest/loadtestutil"
"github.com/coder/coder/scaletest/harness"
"github.com/coder/coder/scaletest/loadtestutil"
)
type Runner struct {
client *codersdk.Client
cfg Config
client *codersdk.Client
cfg Config
workspaceID uuid.UUID
}
@ -65,17 +66,44 @@ func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error {
return xerrors.Errorf("wait for build: %w", err)
}
_, _ = fmt.Fprintln(logs, "")
err = waitForAgents(ctx, logs, r.client, workspace.ID)
if err != nil {
return xerrors.Errorf("wait for agent: %w", err)
if r.cfg.NoWaitForAgents {
_, _ = fmt.Fprintln(logs, "Skipping agent connectivity check.")
} else {
_, _ = fmt.Fprintln(logs, "")
err = waitForAgents(ctx, logs, r.client, workspace.ID)
if err != nil {
return xerrors.Errorf("wait for agent: %w", err)
}
}
return nil
}
// Cleanup implements Cleanable.
func (r *Runner) Cleanup(ctx context.Context, _ string) error {
func (r *Runner) WorkspaceID() (uuid.UUID, error) {
if r.workspaceID == uuid.Nil {
return uuid.Nil, xerrors.New("workspace ID not set")
}
return r.workspaceID, nil
}
// CleanupRunner is a runner that deletes a workspace in the Run phase.
type CleanupRunner struct {
client *codersdk.Client
workspaceID uuid.UUID
}
var _ harness.Runnable = &CleanupRunner{}
func NewCleanupRunner(client *codersdk.Client, workspaceID uuid.UUID) *CleanupRunner {
return &CleanupRunner{
client: client,
workspaceID: workspaceID,
}
}
// Run implements Runnable.
func (r *CleanupRunner) Run(ctx context.Context, _ string, logs io.Writer) error {
if r.workspaceID == uuid.Nil {
return nil
}
@ -89,8 +117,6 @@ func (r *Runner) Cleanup(ctx context.Context, _ string) error {
return xerrors.Errorf("delete workspace: %w", err)
}
// TODO: capture these logs
logs := io.Discard
err = waitForBuild(ctx, logs, r.client, build.ID)
if err != nil {
return xerrors.Errorf("wait for build: %w", err)
@ -99,6 +125,15 @@ func (r *Runner) Cleanup(ctx context.Context, _ string) error {
return nil
}
// Cleanup implements Cleanable by wrapping CleanupRunner.
func (r *Runner) Cleanup(ctx context.Context, id string) error {
// TODO: capture these logs
return (&CleanupRunner{
client: r.client,
workspaceID: r.workspaceID,
}).Run(ctx, id, io.Discard)
}
func waitForBuild(ctx context.Context, w io.Writer, client *codersdk.Client, buildID uuid.UUID) error {
ctx, span := tracing.StartSpan(ctx)
defer span.End()

View File

@ -16,9 +16,9 @@ import (
"github.com/coder/coder/agent"
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/workspacebuild"
"github.com/coder/coder/provisioner/echo"
"github.com/coder/coder/provisionersdk/proto"
"github.com/coder/coder/scaletest/workspacebuild"
"github.com/coder/coder/testutil"
)
@ -103,8 +103,8 @@ func Test_Runner(t *testing.T) {
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
// Since the runner creates the workspace on it's own, we have to keep
// listing workspaces until we find it, then wait for the build to finish,
// then start the agents.
// listing workspaces until we find it, then wait for the build to
// finish, then start the agents.
go func() {
var workspace codersdk.Workspace
for {