feat: add load testing harness, coder loadtest command (#4853)

This commit is contained in:
Dean Sheather 2022-11-03 04:30:00 +10:00 committed by GitHub
parent b1c400a7df
commit e7dd3f9378
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 2641 additions and 6 deletions

179
cli/loadtest.go Normal file
View File

@ -0,0 +1,179 @@
package cli
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"strconv"
"time"
"github.com/spf13/cobra"
"golang.org/x/xerrors"
"github.com/coder/coder/cli/cliflag"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/harness"
)
func loadtest() *cobra.Command {
var (
configPath string
)
cmd := &cobra.Command{
Use: "loadtest --config <path>",
Short: "Load test the Coder API",
// TODO: documentation and a JSON scheme file
Long: "Perform load tests against the Coder server. The load tests " +
"configurable via a JSON file.",
Hidden: true,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
if configPath == "" {
return xerrors.New("config is required")
}
var (
configReader io.ReadCloser
)
if configPath == "-" {
configReader = io.NopCloser(cmd.InOrStdin())
} else {
f, err := os.Open(configPath)
if err != nil {
return xerrors.Errorf("open config file %q: %w", configPath, err)
}
configReader = f
}
var config LoadTestConfig
err := json.NewDecoder(configReader).Decode(&config)
_ = configReader.Close()
if err != nil {
return xerrors.Errorf("read config file %q: %w", configPath, err)
}
err = config.Validate()
if err != nil {
return xerrors.Errorf("validate config: %w", err)
}
client, err := CreateClient(cmd)
if err != nil {
return err
}
me, err := client.User(cmd.Context(), codersdk.Me)
if err != nil {
return xerrors.Errorf("fetch current user: %w", err)
}
// Only owners can do loadtests. This isn't a very strong check but
// there's not much else we can do. Ratelimits are enforced for
// non-owners so hopefully that limits the damage if someone
// disables this check and runs it against a non-owner account.
ok := false
for _, role := range me.Roles {
if role.Name == "owner" {
ok = true
break
}
}
if !ok {
return xerrors.Errorf("Not logged in as site owner. Load testing is only available to site owners.")
}
// Disable ratelimits for future requests.
client.BypassRatelimits = true
// Prepare the test.
strategy := config.Strategy.ExecutionStrategy()
th := harness.NewTestHarness(strategy)
for i, t := range config.Tests {
name := fmt.Sprintf("%s-%d", t.Type, i)
for j := 0; j < t.Count; j++ {
id := strconv.Itoa(j)
runner, err := t.NewRunner(client)
if err != nil {
return xerrors.Errorf("create %q runner for %s/%s: %w", t.Type, name, id, err)
}
th.AddRun(name, id, runner)
}
}
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "Running load test...")
testCtx := cmd.Context()
if config.Timeout > 0 {
var cancel func()
testCtx, cancel = context.WithTimeout(testCtx, time.Duration(config.Timeout))
defer cancel()
}
// TODO: live progress output
start := time.Now()
err = th.Run(testCtx)
if err != nil {
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
}
elapsed := time.Since(start)
// Print the results.
// TODO: better result printing
// TODO: move result printing to the loadtest package, add multiple
// output formats (like HTML, JSON)
res := th.Results()
var totalDuration time.Duration
for _, run := range res.Runs {
totalDuration += run.Duration
if run.Error == nil {
continue
}
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\n== FAIL: %s\n\n", run.FullID)
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tError: %s\n\n", run.Error)
// Print log lines indented.
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tLog:\n")
rd := bufio.NewReader(bytes.NewBuffer(run.Logs))
for {
line, err := rd.ReadBytes('\n')
if err == io.EOF {
break
}
if err != nil {
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\n\tLOG PRINT ERROR: %+v\n", err)
}
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\t\t%s", line)
}
}
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\n\nTest results:")
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tPass: %d\n", res.TotalPass)
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tFail: %d\n", res.TotalFail)
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tTotal: %d\n", res.TotalRuns)
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "")
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tTotal duration: %s\n", elapsed)
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tAvg. duration: %s\n", totalDuration/time.Duration(res.TotalRuns))
// Cleanup.
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\nCleaning up...")
err = th.Cleanup(cmd.Context())
if err != nil {
return xerrors.Errorf("cleanup tests: %w", err)
}
return nil
},
}
cliflag.StringVarP(cmd.Flags(), &configPath, "config", "", "CODER_LOADTEST_CONFIG_PATH", "", "Path to the load test configuration file, or - to read from stdin.")
return cmd
}

136
cli/loadtest_test.go Normal file
View File

@ -0,0 +1,136 @@
package cli_test
import (
"bytes"
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/cli"
"github.com/coder/coder/cli/clitest"
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/placebo"
"github.com/coder/coder/loadtest/workspacebuild"
"github.com/coder/coder/pty/ptytest"
"github.com/coder/coder/testutil"
)
func TestLoadTest(t *testing.T) {
t.Parallel()
t.Run("PlaceboFromStdin", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
_ = coderdtest.CreateFirstUser(t, client)
config := cli.LoadTestConfig{
Strategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeLinear,
},
Tests: []cli.LoadTest{
{
Type: cli.LoadTestTypePlacebo,
Count: 10,
Placebo: &placebo.Config{
Sleep: httpapi.Duration(10 * time.Millisecond),
},
},
},
Timeout: httpapi.Duration(testutil.WaitShort),
}
configBytes, err := json.Marshal(config)
require.NoError(t, err)
cmd, root := clitest.New(t, "loadtest", "--config", "-")
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t)
cmd.SetIn(bytes.NewReader(configBytes))
cmd.SetOut(pty.Output())
cmd.SetErr(pty.Output())
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
done := make(chan any)
go func() {
errC := cmd.ExecuteContext(ctx)
assert.NoError(t, errC)
close(done)
}()
pty.ExpectMatch("Test results:")
pty.ExpectMatch("Pass: 10")
cancelFunc()
<-done
})
t.Run("WorkspaceBuildFromFile", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
user := coderdtest.CreateFirstUser(t, client)
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
config := cli.LoadTestConfig{
Strategy: cli.LoadTestStrategy{
Type: cli.LoadTestStrategyTypeConcurrent,
ConcurrencyLimit: 2,
},
Tests: []cli.LoadTest{
{
Type: cli.LoadTestTypeWorkspaceBuild,
Count: 2,
WorkspaceBuild: &workspacebuild.Config{
OrganizationID: user.OrganizationID,
UserID: user.UserID.String(),
Request: codersdk.CreateWorkspaceRequest{
TemplateID: template.ID,
},
},
},
},
Timeout: httpapi.Duration(testutil.WaitLong),
}
d := t.TempDir()
configPath := filepath.Join(d, "/config.loadtest.json")
f, err := os.Create(configPath)
require.NoError(t, err)
defer f.Close()
err = json.NewEncoder(f).Encode(config)
require.NoError(t, err)
_ = f.Close()
cmd, root := clitest.New(t, "loadtest", "--config", configPath)
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t)
cmd.SetIn(pty.Input())
cmd.SetOut(pty.Output())
cmd.SetErr(pty.Output())
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
done := make(chan any)
go func() {
errC := cmd.ExecuteContext(ctx)
assert.NoError(t, errC)
close(done)
}()
pty.ExpectMatch("Test results:")
pty.ExpectMatch("Pass: 2")
<-done
cancelFunc()
})
}

181
cli/loadtestconfig.go Normal file
View File

@ -0,0 +1,181 @@
package cli
import (
"time"
"golang.org/x/xerrors"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/harness"
"github.com/coder/coder/loadtest/placebo"
"github.com/coder/coder/loadtest/workspacebuild"
)
// LoadTestConfig is the overall configuration for a call to `coder loadtest`.
type LoadTestConfig struct {
Strategy LoadTestStrategy `json:"strategy"`
Tests []LoadTest `json:"tests"`
// Timeout sets a timeout for the entire test run, to control the timeout
// for each individual run use strategy.timeout.
Timeout httpapi.Duration `json:"timeout"`
}
type LoadTestStrategyType string
const (
LoadTestStrategyTypeLinear LoadTestStrategyType = "linear"
LoadTestStrategyTypeConcurrent LoadTestStrategyType = "concurrent"
)
type LoadTestStrategy struct {
// Type is the type of load test strategy to use. Strategies determine how
// to run tests concurrently.
Type LoadTestStrategyType `json:"type"`
// ConcurrencyLimit is the maximum number of concurrent runs. This only
// applies if type == "concurrent". Negative values disable the concurrency
// limit and attempts to perform all runs concurrently. The default value is
// 100.
ConcurrencyLimit int `json:"concurrency_limit"`
// Shuffle determines whether or not to shuffle the test runs before
// executing them.
Shuffle bool `json:"shuffle"`
// Timeout is the maximum amount of time to run each test for. This is
// independent of the timeout specified in the test run. A timeout of 0
// disables the timeout.
Timeout httpapi.Duration `json:"timeout"`
}
func (s LoadTestStrategy) ExecutionStrategy() harness.ExecutionStrategy {
var strategy harness.ExecutionStrategy
switch s.Type {
case LoadTestStrategyTypeLinear:
strategy = harness.LinearExecutionStrategy{}
case LoadTestStrategyTypeConcurrent:
limit := s.ConcurrencyLimit
if limit < 0 {
return harness.ConcurrentExecutionStrategy{}
}
if limit == 0 {
limit = 100
}
strategy = harness.ParallelExecutionStrategy{
Limit: limit,
}
default:
panic("unreachable, unknown strategy type " + s.Type)
}
if s.Timeout > 0 {
strategy = harness.TimeoutExecutionStrategyWrapper{
Timeout: time.Duration(s.Timeout),
Inner: strategy,
}
}
if s.Shuffle {
strategy = harness.ShuffleExecutionStrategyWrapper{
Inner: strategy,
}
}
return strategy
}
type LoadTestType string
const (
LoadTestTypePlacebo LoadTestType = "placebo"
LoadTestTypeWorkspaceBuild LoadTestType = "workspacebuild"
)
type LoadTest struct {
// Type is the type of load test to run.
Type LoadTestType `json:"type"`
// Count is the number of test runs to execute with this configuration. If
// the count is 0 or negative, defaults to 1.
Count int `json:"count"`
// Placebo must be set if type == "placebo".
Placebo *placebo.Config `json:"placebo,omitempty"`
// WorkspaceBuild must be set if type == "workspacebuild".
WorkspaceBuild *workspacebuild.Config `json:"workspacebuild,omitempty"`
}
func (t LoadTest) NewRunner(client *codersdk.Client) (harness.Runnable, error) {
switch t.Type {
case LoadTestTypePlacebo:
if t.Placebo == nil {
return nil, xerrors.New("placebo config must be set")
}
return placebo.NewRunner(*t.Placebo), nil
case LoadTestTypeWorkspaceBuild:
if t.WorkspaceBuild == nil {
return nil, xerrors.Errorf("workspacebuild config must be set")
}
return workspacebuild.NewRunner(client, *t.WorkspaceBuild), nil
default:
return nil, xerrors.Errorf("unknown test type %q", t.Type)
}
}
func (c *LoadTestConfig) Validate() error {
err := c.Strategy.Validate()
if err != nil {
return xerrors.Errorf("validate strategy: %w", err)
}
for i, test := range c.Tests {
err := test.Validate()
if err != nil {
return xerrors.Errorf("validate test %d: %w", i, err)
}
}
return nil
}
func (s *LoadTestStrategy) Validate() error {
switch s.Type {
case LoadTestStrategyTypeLinear:
case LoadTestStrategyTypeConcurrent:
default:
return xerrors.Errorf("invalid load test strategy type: %q", s.Type)
}
if s.Timeout < 0 {
return xerrors.Errorf("invalid load test strategy timeout: %q", s.Timeout)
}
return nil
}
func (t *LoadTest) Validate() error {
switch t.Type {
case LoadTestTypePlacebo:
if t.Placebo == nil {
return xerrors.Errorf("placebo test type must specify placebo")
}
err := t.Placebo.Validate()
if err != nil {
return xerrors.Errorf("validate placebo: %w", err)
}
case LoadTestTypeWorkspaceBuild:
if t.WorkspaceBuild == nil {
return xerrors.New("workspacebuild test type must specify workspacebuild")
}
err := t.WorkspaceBuild.Validate()
if err != nil {
return xerrors.Errorf("validate workspacebuild: %w", err)
}
default:
return xerrors.Errorf("invalid load test type: %q", t.Type)
}
return nil
}

View File

@ -70,6 +70,7 @@ func init() {
}
func Core() []*cobra.Command {
// Please re-sort this list alphabetically if you change it!
return []*cobra.Command{
configSSH(),
create(),
@ -77,26 +78,27 @@ func Core() []*cobra.Command {
dotfiles(),
gitssh(),
list(),
loadtest(),
login(),
logout(),
parameters(),
portForward(),
publickey(),
rename(),
resetPassword(),
schedules(),
show(),
ssh(),
speedtest(),
ssh(),
start(),
state(),
stop(),
rename(),
templates(),
tokens(),
update(),
users(),
versionCmd(),
workspaceAgent(),
tokens(),
}
}

50
coderd/httpapi/json.go Normal file
View File

@ -0,0 +1,50 @@
package httpapi
import (
"encoding/json"
"time"
"golang.org/x/xerrors"
)
// Duration wraps time.Duration and provides better JSON marshaling and
// unmarshaling. The default time.Duration marshals as an integer and only
// accepts integers when unmarshaling, which is not very user friendly as users
// cannot write durations like "1h30m".
//
// This type marshals as a string like "1h30m", and unmarshals from either a
// string or an integer.
type Duration time.Duration
var _ json.Marshaler = Duration(0)
var _ json.Unmarshaler = (*Duration)(nil)
// MarshalJSON implements json.Marshaler.
func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Duration(d).String())
}
// UnmarshalJSON implements json.Unmarshaler.
func (d *Duration) UnmarshalJSON(b []byte) error {
var v interface{}
err := json.Unmarshal(b, &v)
if err != nil {
return xerrors.Errorf("unmarshal JSON value: %w", err)
}
switch value := v.(type) {
case float64:
*d = Duration(time.Duration(value))
return nil
case string:
tmp, err := time.ParseDuration(value)
if err != nil {
return xerrors.Errorf("parse duration %q: %w", value, err)
}
*d = Duration(tmp)
return nil
}
return xerrors.New("invalid duration")
}

168
coderd/httpapi/json_test.go Normal file
View File

@ -0,0 +1,168 @@
package httpapi_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/httpapi"
)
func TestDuration(t *testing.T) {
t.Parallel()
t.Run("MarshalJSON", func(t *testing.T) {
t.Parallel()
cases := []struct {
value time.Duration
expected string
}{
{
value: 0,
expected: "0s",
},
{
value: 1 * time.Millisecond,
expected: "1ms",
},
{
value: 1 * time.Second,
expected: "1s",
},
{
value: 1 * time.Minute,
expected: "1m0s",
},
{
value: 1 * time.Hour,
expected: "1h0m0s",
},
{
value: 1*time.Hour + 1*time.Minute + 1*time.Second + 1*time.Millisecond,
expected: "1h1m1.001s",
},
}
for _, c := range cases {
c := c
t.Run(c.expected, func(t *testing.T) {
t.Parallel()
d := httpapi.Duration(c.value)
b, err := d.MarshalJSON()
require.NoError(t, err)
require.Equal(t, `"`+c.expected+`"`, string(b))
})
}
})
t.Run("UnmarshalJSON", func(t *testing.T) {
t.Parallel()
cases := []struct {
value string
expected time.Duration
}{
{
value: "0ms",
expected: 0,
},
{
value: "0s",
expected: 0,
},
{
value: "1ms",
expected: 1 * time.Millisecond,
},
{
value: "1s",
expected: 1 * time.Second,
},
{
value: "1m",
expected: 1 * time.Minute,
},
{
value: "1m0s",
expected: 1 * time.Minute,
},
{
value: "1h",
expected: 1 * time.Hour,
},
{
value: "1h0m0s",
expected: 1 * time.Hour,
},
{
value: "1h1m1.001s",
expected: 1*time.Hour + 1*time.Minute + 1*time.Second + 1*time.Millisecond,
},
{
value: "1h1m1s1ms",
expected: 1*time.Hour + 1*time.Minute + 1*time.Second + 1*time.Millisecond,
},
}
for _, c := range cases {
c := c
t.Run(c.value, func(t *testing.T) {
t.Parallel()
var d httpapi.Duration
err := d.UnmarshalJSON([]byte(`"` + c.value + `"`))
require.NoError(t, err)
require.Equal(t, c.expected, time.Duration(d))
})
}
})
t.Run("UnmarshalJSONInt", func(t *testing.T) {
t.Parallel()
var d httpapi.Duration
err := d.UnmarshalJSON([]byte("12345"))
require.NoError(t, err)
require.EqualValues(t, 12345, d)
})
t.Run("UnmarshalJSONErrors", func(t *testing.T) {
t.Parallel()
cases := []struct {
value string
errContains string
}{
{
value: "not valid json (no double quotes)",
errContains: "unmarshal JSON value",
},
{
value: `"not valid duration"`,
errContains: "parse duration",
},
{
value: "{}",
errContains: "invalid duration",
},
}
for _, c := range cases {
c := c
t.Run(c.value, func(t *testing.T) {
t.Parallel()
var d httpapi.Duration
err := d.UnmarshalJSON([]byte(c.value))
require.Error(t, err)
require.Contains(t, err.Error(), c.errContains)
})
}
})
}

View File

@ -376,9 +376,20 @@ type provisionerJobLogsMessage struct {
func (api *API) followLogs(jobID uuid.UUID) (<-chan database.ProvisionerJobLog, func(), error) {
logger := api.Logger.With(slog.F("job_id", jobID))
bufferedLogs := make(chan database.ProvisionerJobLog, 128)
closeSubscribe, err := api.Pubsub.Subscribe(provisionerJobLogsChannel(jobID),
var (
closed = make(chan struct{})
bufferedLogs = make(chan database.ProvisionerJobLog, 128)
)
closeSubscribe, err := api.Pubsub.Subscribe(
provisionerJobLogsChannel(jobID),
func(ctx context.Context, message []byte) {
select {
case <-closed:
return
default:
}
jlMsg := provisionerJobLogsMessage{}
err := json.Unmarshal(message, &jlMsg)
if err != nil {
@ -399,9 +410,11 @@ func (api *API) followLogs(jobID uuid.UUID) (<-chan database.ProvisionerJobLog,
}
if jlMsg.EndOfLogs {
logger.Debug(ctx, "got End of Logs")
close(closed)
close(bufferedLogs)
}
})
},
)
if err != nil {
return nil, nil, err
}

View File

@ -44,6 +44,10 @@ type Client struct {
HTTPClient *http.Client
SessionToken string
URL *url.URL
// BypassRatelimits is an optional flag that can be set by the site owner to
// disable ratelimit checks for the client.
BypassRatelimits bool
}
type RequestOption func(*http.Request)
@ -87,6 +91,9 @@ func (c *Client) Request(ctx context.Context, method, path string, body interfac
return nil, xerrors.Errorf("create request: %w", err)
}
req.Header.Set(SessionCustomHeader, c.SessionToken)
if c.BypassRatelimits {
req.Header.Set(BypassRatelimitHeader, "true")
}
if body != nil {
req.Header.Set("Content-Type", "application/json")

101
loadtest/harness/harness.go Normal file
View File

@ -0,0 +1,101 @@
package harness
import (
"context"
"sync"
"github.com/hashicorp/go-multierror"
"golang.org/x/xerrors"
)
// ExecutionStrategy defines how a TestHarness should execute a set of runs. It
// essentially defines the concurrency model for a given testing session.
type ExecutionStrategy interface {
// Execute runs the given runs in whatever way the strategy wants. An error
// may only be returned if the strategy has a failure itself, not if any of
// the runs fail.
Execute(ctx context.Context, runs []*TestRun) error
}
// TestHarness runs a bunch of registered test runs using the given
// ExecutionStrategy.
type TestHarness struct {
strategy ExecutionStrategy
mut *sync.Mutex
runIDs map[string]struct{}
runs []*TestRun
started bool
done chan struct{}
}
// NewTestHarness creates a new TestHarness with the given ExecutionStrategy.
func NewTestHarness(strategy ExecutionStrategy) *TestHarness {
return &TestHarness{
strategy: strategy,
mut: new(sync.Mutex),
runIDs: map[string]struct{}{},
runs: []*TestRun{},
done: make(chan struct{}),
}
}
// Run runs the registered tests using the given ExecutionStrategy. The provided
// context can be used to cancel or set a deadline for the test run. Blocks
// until the tests have finished and returns the test execution error (not
// individual run errors).
//
// Panics if called more than once.
func (h *TestHarness) Run(ctx context.Context) (err error) {
h.mut.Lock()
if h.started {
h.mut.Unlock()
panic("harness is already started")
}
h.started = true
h.mut.Unlock()
defer close(h.done)
defer func() {
e := recover()
if e != nil {
err = xerrors.Errorf("execution strategy panicked: %w", e)
}
}()
err = h.strategy.Execute(ctx, h.runs)
//nolint:revive // we use named returns because we mutate it in a defer
return
}
// Cleanup should be called after the test run has finished and results have
// been collected.
func (h *TestHarness) Cleanup(ctx context.Context) (err error) {
h.mut.Lock()
defer h.mut.Unlock()
if !h.started {
panic("harness has not started")
}
select {
case <-h.done:
default:
panic("harness has not finished")
}
defer func() {
e := recover()
if e != nil {
err = multierror.Append(err, xerrors.Errorf("panic in cleanup: %w", e))
}
}()
for _, run := range h.runs {
e := run.Cleanup(ctx)
if e != nil {
err = multierror.Append(err, xerrors.Errorf("cleanup for %s failed: %w", run.FullID(), e))
}
}
//nolint:revive // we use named returns because we mutate it in a defer
return
}

View File

@ -0,0 +1,264 @@
package harness_test
import (
"context"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/coder/coder/loadtest/harness"
)
const testPanicMessage = "expected test panic"
type panickingExecutionStrategy struct{}
var _ harness.ExecutionStrategy = panickingExecutionStrategy{}
func (panickingExecutionStrategy) Execute(_ context.Context, _ []*harness.TestRun) error {
panic(testPanicMessage)
}
type erroringExecutionStrategy struct {
err error
}
var _ harness.ExecutionStrategy = erroringExecutionStrategy{}
func (e erroringExecutionStrategy) Execute(_ context.Context, _ []*harness.TestRun) error {
return e.err
}
func Test_TestHarness(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
expectedErr := xerrors.New("expected error")
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
r1 := h.AddRun("test", "1", fakeTestFns(nil, nil))
r2 := h.AddRun("test", "2", fakeTestFns(expectedErr, nil))
err := h.Run(context.Background())
require.NoError(t, err)
res := h.Results()
require.Equal(t, 2, res.TotalRuns)
require.Equal(t, 1, res.TotalPass)
require.Equal(t, 1, res.TotalFail)
require.Equal(t, map[string]harness.RunResult{
r1.FullID(): r1.Result(),
r2.FullID(): r2.Result(),
}, res.Runs)
err = h.Cleanup(context.Background())
require.NoError(t, err)
})
t.Run("CatchesExecutionError", func(t *testing.T) {
t.Parallel()
expectedErr := xerrors.New("expected error")
h := harness.NewTestHarness(erroringExecutionStrategy{err: expectedErr})
_ = h.AddRun("test", "1", fakeTestFns(nil, nil))
err := h.Run(context.Background())
require.Error(t, err)
require.ErrorIs(t, err, expectedErr)
})
t.Run("CatchesExecutionPanic", func(t *testing.T) {
t.Parallel()
h := harness.NewTestHarness(panickingExecutionStrategy{})
_ = h.AddRun("test", "1", fakeTestFns(nil, nil))
err := h.Run(context.Background())
require.Error(t, err)
require.ErrorContains(t, err, "panic")
require.ErrorContains(t, err, testPanicMessage)
})
t.Run("Cleanup", func(t *testing.T) {
t.Parallel()
t.Run("Error", func(t *testing.T) {
t.Parallel()
expectedErr := xerrors.New("expected error")
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
_ = h.AddRun("test", "1", fakeTestFns(nil, expectedErr))
err := h.Run(context.Background())
require.NoError(t, err)
err = h.Cleanup(context.Background())
require.Error(t, err)
require.ErrorContains(t, err, expectedErr.Error())
})
t.Run("Panic", func(t *testing.T) {
t.Parallel()
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
_ = h.AddRun("test", "1", testFns{
RunFn: func(_ context.Context, _ string, _ io.Writer) error {
return nil
},
CleanupFn: func(_ context.Context, _ string) error {
panic(testPanicMessage)
},
})
err := h.Run(context.Background())
require.NoError(t, err)
err = h.Cleanup(context.Background())
require.Error(t, err)
require.ErrorContains(t, err, "panic")
require.ErrorContains(t, err, testPanicMessage)
})
})
t.Run("Panics", func(t *testing.T) {
t.Parallel()
t.Run("RegisterAfterStart", func(t *testing.T) {
t.Parallel()
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
_ = h.Run(context.Background())
require.Panics(t, func() {
_ = h.AddRun("test", "1", fakeTestFns(nil, nil))
})
})
t.Run("DuplicateTestID", func(t *testing.T) {
t.Parallel()
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
name, id := "test", "1"
_ = h.AddRun(name, id, fakeTestFns(nil, nil))
require.Panics(t, func() {
_ = h.AddRun(name, id, fakeTestFns(nil, nil))
})
})
t.Run("StartedTwice", func(t *testing.T) {
t.Parallel()
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
h.Run(context.Background())
require.Panics(t, func() {
h.Run(context.Background())
})
})
t.Run("ResultsBeforeStart", func(t *testing.T) {
t.Parallel()
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
require.Panics(t, func() {
h.Results()
})
})
t.Run("ResultsBeforeFinish", func(t *testing.T) {
t.Parallel()
var (
started = make(chan struct{})
endRun = make(chan struct{})
testsEnded = make(chan struct{})
)
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
_ = h.AddRun("test", "1", testFns{
RunFn: func(_ context.Context, _ string, _ io.Writer) error {
close(started)
<-endRun
return nil
},
})
go func() {
defer close(testsEnded)
err := h.Run(context.Background())
assert.NoError(t, err)
}()
<-started
require.Panics(t, func() {
h.Results()
})
close(endRun)
<-testsEnded
_ = h.Results()
})
t.Run("CleanupBeforeStart", func(t *testing.T) {
t.Parallel()
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
require.Panics(t, func() {
h.Cleanup(context.Background())
})
})
t.Run("ClenaupBeforeFinish", func(t *testing.T) {
t.Parallel()
var (
started = make(chan struct{})
endRun = make(chan struct{})
testsEnded = make(chan struct{})
)
h := harness.NewTestHarness(harness.LinearExecutionStrategy{})
_ = h.AddRun("test", "1", testFns{
RunFn: func(_ context.Context, _ string, _ io.Writer) error {
close(started)
<-endRun
return nil
},
})
go func() {
defer close(testsEnded)
err := h.Run(context.Background())
assert.NoError(t, err)
}()
<-started
require.Panics(t, func() {
h.Cleanup(context.Background())
})
close(endRun)
<-testsEnded
err := h.Cleanup(context.Background())
require.NoError(t, err)
})
})
}
func fakeTestFns(err, cleanupErr error) testFns {
return testFns{
RunFn: func(_ context.Context, _ string, _ io.Writer) error {
return err
},
CleanupFn: func(_ context.Context, _ string) error {
return cleanupErr
},
}
}

View File

@ -0,0 +1,72 @@
package harness
import "time"
// Results is the full compiled results for a set of test runs.
type Results struct {
TotalRuns int
TotalPass int
TotalFail int
Runs map[string]RunResult
}
// RunResult is the result of a single test run.
type RunResult struct {
FullID string
TestName string
ID string
Logs []byte
Error error
StartedAt time.Time
Duration time.Duration
}
// Results returns the results of the test run. Panics if the test run is not
// done yet.
func (r *TestRun) Result() RunResult {
select {
case <-r.done:
default:
panic("cannot get results of a test run that is not done yet")
}
return RunResult{
FullID: r.FullID(),
TestName: r.testName,
ID: r.id,
Logs: r.logs.Bytes(),
Error: r.err,
StartedAt: r.started,
Duration: r.duration,
}
}
// Results collates the results of all the test runs and returns them.
func (h *TestHarness) Results() Results {
if !h.started {
panic("harness has not started")
}
select {
case <-h.done:
default:
panic("harness has not finished")
}
results := Results{
TotalRuns: len(h.runs),
Runs: make(map[string]RunResult, len(h.runs)),
}
for _, run := range h.runs {
runRes := run.Result()
results.Runs[runRes.FullID] = runRes
if runRes.Error == nil {
results.TotalPass++
} else {
results.TotalFail++
}
}
return results
}

125
loadtest/harness/run.go Normal file
View File

@ -0,0 +1,125 @@
package harness
import (
"bytes"
"context"
"io"
"time"
"golang.org/x/xerrors"
)
// Runnable is a test interface that can be executed by a TestHarness.
type Runnable interface {
// Run should use the passed context to handle cancellation and deadlines
// properly, and should only return once the test has been fully completed
// (no lingering goroutines, unless they are cleaned up by the accompanying
// cleanup function).
//
// The test ID (part after the slash) is passed for identification if
// necessary, and the provided logs write should be used for writing
// whatever may be necessary for debugging the test.
Run(ctx context.Context, id string, logs io.Writer) error
}
// Cleanable is an optional extension to Runnable that allows for post-test
// cleanup.
type Cleanable interface {
Runnable
// Cleanup should clean up any lingering resources from the test.
Cleanup(ctx context.Context, id string) error
}
// AddRun creates a new *TestRun with the given name, ID and Runnable, adds it
// to the harness and returns it. Panics if the harness has been started, or a
// test with the given run.FullID() is already registered.
//
// This is a convenience method that calls NewTestRun() and h.RegisterRun().
func (h *TestHarness) AddRun(testName string, id string, runner Runnable) *TestRun {
run := NewTestRun(testName, id, runner)
h.RegisterRun(run)
return run
}
// RegisterRun registers the given *TestRun with the harness. Panics if the
// harness has been started, or a test with the given run.FullID() is already
// registered.
func (h *TestHarness) RegisterRun(run *TestRun) {
h.mut.Lock()
defer h.mut.Unlock()
if h.started {
panic("cannot add a run after the harness has started")
}
if _, ok := h.runIDs[run.FullID()]; ok {
panic("cannot add test with duplicate full ID: " + run.FullID())
}
h.runIDs[run.FullID()] = struct{}{}
h.runs = append(h.runs, run)
}
// TestRun is a single test run and it's accompanying state.
type TestRun struct {
testName string
id string
runner Runnable
logs *bytes.Buffer
done chan struct{}
started time.Time
duration time.Duration
err error
}
func NewTestRun(testName string, id string, runner Runnable) *TestRun {
return &TestRun{
testName: testName,
id: id,
runner: runner,
}
}
func (r *TestRun) FullID() string {
return r.testName + "/" + r.id
}
// Run executes the Run function with a self-managed log writer, panic handler,
// error recording and duration recording. The test error is returned.
func (r *TestRun) Run(ctx context.Context) (err error) {
r.logs = new(bytes.Buffer)
r.done = make(chan struct{})
defer close(r.done)
r.started = time.Now()
defer func() {
r.duration = time.Since(r.started)
r.err = err
}()
defer func() {
e := recover()
if e != nil {
err = xerrors.Errorf("panic: %v", e)
}
}()
err = r.runner.Run(ctx, r.id, r.logs)
//nolint:revive // we use named returns because we mutate it in a defer
return
}
func (r *TestRun) Cleanup(ctx context.Context) error {
c, ok := r.runner.(Cleanable)
if !ok {
return nil
}
select {
case <-r.done:
default:
// Test wasn't executed, so we don't need to clean up.
return nil
}
return c.Cleanup(ctx, r.id)
}

View File

@ -0,0 +1,134 @@
package harness_test
import (
"context"
"fmt"
"io"
"sync/atomic"
"testing"
"github.com/stretchr/testify/require"
"github.com/coder/coder/loadtest/harness"
)
// testFns implements Runnable and Cleanable.
type testFns struct {
RunFn func(ctx context.Context, id string, logs io.Writer) error
// CleanupFn is optional if no cleanup is required.
CleanupFn func(ctx context.Context, id string) error
}
// Run implements Runnable.
func (fns testFns) Run(ctx context.Context, id string, logs io.Writer) error {
return fns.RunFn(ctx, id, logs)
}
// Cleanup implements Cleanable.
func (fns testFns) Cleanup(ctx context.Context, id string) error {
if fns.CleanupFn == nil {
return nil
}
return fns.CleanupFn(ctx, id)
}
func Test_TestRun(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
var (
name, id = "test", "1"
runCalled int64
cleanupCalled int64
testFns = testFns{
RunFn: func(ctx context.Context, id string, logs io.Writer) error {
atomic.AddInt64(&runCalled, 1)
return nil
},
CleanupFn: func(ctx context.Context, id string) error {
atomic.AddInt64(&cleanupCalled, 1)
return nil
},
}
)
run := harness.NewTestRun(name, id, testFns)
require.Equal(t, fmt.Sprintf("%s/%s", name, id), run.FullID())
err := run.Run(context.Background())
require.NoError(t, err)
require.EqualValues(t, 1, atomic.LoadInt64(&runCalled))
err = run.Cleanup(context.Background())
require.NoError(t, err)
require.EqualValues(t, 1, atomic.LoadInt64(&cleanupCalled))
})
t.Run("Cleanup", func(t *testing.T) {
t.Parallel()
t.Run("NoFn", func(t *testing.T) {
t.Parallel()
run := harness.NewTestRun("test", "1", testFns{
RunFn: func(ctx context.Context, id string, logs io.Writer) error {
return nil
},
CleanupFn: nil,
})
err := run.Cleanup(context.Background())
require.NoError(t, err)
})
t.Run("NotDone", func(t *testing.T) {
t.Parallel()
var cleanupCalled int64
run := harness.NewTestRun("test", "1", testFns{
RunFn: func(ctx context.Context, id string, logs io.Writer) error {
return nil
},
CleanupFn: func(ctx context.Context, id string) error {
atomic.AddInt64(&cleanupCalled, 1)
return nil
},
})
err := run.Cleanup(context.Background())
require.NoError(t, err)
require.EqualValues(t, 0, atomic.LoadInt64(&cleanupCalled))
})
})
t.Run("CatchesRunPanic", func(t *testing.T) {
t.Parallel()
testFns := testFns{
RunFn: func(ctx context.Context, id string, logs io.Writer) error {
panic(testPanicMessage)
},
}
run := harness.NewTestRun("test", "1", testFns)
err := run.Run(context.Background())
require.Error(t, err)
require.ErrorContains(t, err, "panic")
require.ErrorContains(t, err, testPanicMessage)
})
t.Run("ResultPanicsWhenNotDone", func(t *testing.T) {
t.Parallel()
run := harness.NewTestRun("test", "1", testFns{})
require.Panics(t, func() {
_ = run.Result()
})
})
}

View File

@ -0,0 +1,157 @@
package harness
import (
"context"
cryptorand "crypto/rand"
"encoding/binary"
"io"
"math/rand"
"sync"
"time"
)
// LinearExecutionStrategy executes all test runs in a linear fashion, one after
// the other.
type LinearExecutionStrategy struct{}
var _ ExecutionStrategy = LinearExecutionStrategy{}
// Execute implements ExecutionStrategy.
func (LinearExecutionStrategy) Execute(ctx context.Context, runs []*TestRun) error {
for _, run := range runs {
_ = run.Run(ctx)
}
return nil
}
// ConcurrentExecutionStrategy executes all test runs concurrently without any
// regard for parallelism.
type ConcurrentExecutionStrategy struct{}
var _ ExecutionStrategy = ConcurrentExecutionStrategy{}
// Execute implements ExecutionStrategy.
func (ConcurrentExecutionStrategy) Execute(ctx context.Context, runs []*TestRun) error {
var wg sync.WaitGroup
for _, run := range runs {
run := run
wg.Add(1)
go func() {
defer wg.Done()
_ = run.Run(ctx)
}()
}
wg.Wait()
return nil
}
// ParallelExecutionStrategy executes all test runs concurrently, but limits the
// number of concurrent runs to the given limit.
type ParallelExecutionStrategy struct {
Limit int
}
var _ ExecutionStrategy = ParallelExecutionStrategy{}
// Execute implements ExecutionStrategy.
func (p ParallelExecutionStrategy) Execute(ctx context.Context, runs []*TestRun) error {
var wg sync.WaitGroup
sem := make(chan struct{}, p.Limit)
defer close(sem)
for _, run := range runs {
run := run
wg.Add(1)
go func() {
defer func() {
<-sem
wg.Done()
}()
sem <- struct{}{}
_ = run.Run(ctx)
}()
}
wg.Wait()
return nil
}
// TimeoutExecutionStrategyWrapper is an ExecutionStrategy that wraps another
// ExecutionStrategy and applies a timeout to each test run's context.
type TimeoutExecutionStrategyWrapper struct {
Timeout time.Duration
Inner ExecutionStrategy
}
var _ ExecutionStrategy = TimeoutExecutionStrategyWrapper{}
type timeoutRunnerWrapper struct {
timeout time.Duration
inner Runnable
}
var _ Runnable = timeoutRunnerWrapper{}
func (t timeoutRunnerWrapper) Run(ctx context.Context, id string, logs io.Writer) error {
ctx, cancel := context.WithTimeout(ctx, t.timeout)
defer cancel()
return t.inner.Run(ctx, id, logs)
}
// Execute implements ExecutionStrategy.
func (t TimeoutExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error {
for _, run := range runs {
oldRunner := run.runner
run.runner = timeoutRunnerWrapper{
timeout: t.Timeout,
inner: oldRunner,
}
}
return t.Inner.Execute(ctx, runs)
}
// ShuffleExecutionStrategyWrapper is an ExecutionStrategy that wraps another
// ExecutionStrategy and shuffles the order of the test runs before executing.
type ShuffleExecutionStrategyWrapper struct {
Inner ExecutionStrategy
}
var _ ExecutionStrategy = ShuffleExecutionStrategyWrapper{}
type cryptoRandSource struct{}
var _ rand.Source = cryptoRandSource{}
func (cryptoRandSource) Int63() int64 {
var b [8]byte
_, err := cryptorand.Read(b[:])
if err != nil {
panic(err)
}
// mask off sign bit to ensure positive number
return int64(binary.LittleEndian.Uint64(b[:]) & (1<<63 - 1))
}
func (cryptoRandSource) Seed(_ int64) {}
// Execute implements ExecutionStrategy.
func (s ShuffleExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error {
shuffledRuns := make([]*TestRun, len(runs))
copy(shuffledRuns, runs)
//nolint:gosec // gosec thinks we're using an insecure RNG, but we're not.
src := rand.New(cryptoRandSource{})
for i := range shuffledRuns {
j := src.Intn(i + 1)
shuffledRuns[i], shuffledRuns[j] = shuffledRuns[j], shuffledRuns[i]
}
return s.Inner.Execute(ctx, shuffledRuns)
}

View File

@ -0,0 +1,183 @@
package harness_test
import (
"context"
"io"
"sort"
"strconv"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/coder/coder/loadtest/harness"
)
//nolint:paralleltest // this tests uses timings to determine if it's working
func Test_LinearExecutionStrategy(t *testing.T) {
var (
lastSeenI int64 = -1
count int64
)
runs := strategyTestData(100, func(_ context.Context, i int, _ io.Writer) error {
atomic.AddInt64(&count, 1)
swapped := atomic.CompareAndSwapInt64(&lastSeenI, int64(i-1), int64(i))
assert.True(t, swapped)
time.Sleep(2 * time.Millisecond)
return nil
})
strategy := harness.LinearExecutionStrategy{}
err := strategy.Execute(context.Background(), runs)
require.NoError(t, err)
require.EqualValues(t, 100, atomic.LoadInt64(&count))
lastStartTime := time.Time{}
for _, run := range runs {
startTime := run.Result().StartedAt
require.True(t, startTime.After(lastStartTime))
lastStartTime = startTime
}
}
//nolint:paralleltest // this tests uses timings to determine if it's working
func Test_ConcurrentExecutionStrategy(t *testing.T) {
runs := strategyTestData(10, func(_ context.Context, i int, _ io.Writer) error {
time.Sleep(1 * time.Second)
return nil
})
strategy := harness.ConcurrentExecutionStrategy{}
startTime := time.Now()
err := strategy.Execute(context.Background(), runs)
require.NoError(t, err)
// Should've taken at least 900ms to run but less than 5 seconds.
require.True(t, time.Since(startTime) > 900*time.Millisecond)
require.True(t, time.Since(startTime) < 5*time.Second)
// All tests should've started within 500 ms of the start time.
endTime := startTime.Add(500 * time.Millisecond)
for _, run := range runs {
runStartTime := run.Result().StartedAt
require.WithinRange(t, runStartTime, startTime, endTime)
}
}
//nolint:paralleltest // this tests uses timings to determine if it's working
func Test_ParallelExecutionStrategy(t *testing.T) {
runs := strategyTestData(10, func(_ context.Context, _ int, _ io.Writer) error {
time.Sleep(1 * time.Second)
return nil
})
strategy := harness.ParallelExecutionStrategy{
Limit: 5,
}
startTime := time.Now()
time.Sleep(time.Millisecond)
err := strategy.Execute(context.Background(), runs)
require.NoError(t, err)
// Should've taken at least 1900ms to run but less than 8 seconds.
require.True(t, time.Since(startTime) > 1900*time.Millisecond)
require.True(t, time.Since(startTime) < 8*time.Second)
// Any five of the tests should've started within 500 ms of the start time.
endTime := startTime.Add(500 * time.Millisecond)
withinRange := 0
for _, run := range runs {
runStartTime := run.Result().StartedAt
if runStartTime.After(startTime) && runStartTime.Before(endTime) {
withinRange++
}
}
require.Equal(t, 5, withinRange)
// The other 5 tests should've started between 900ms and 1.5s after the
// start time.
startTime = startTime.Add(900 * time.Millisecond)
endTime = startTime.Add(600 * time.Millisecond)
withinRange = 0
for _, run := range runs {
runStartTime := run.Result().StartedAt
if runStartTime.After(startTime) && runStartTime.Before(endTime) {
withinRange++
}
}
require.Equal(t, 5, withinRange)
}
//nolint:paralleltest // this tests uses timings to determine if it's working
func Test_TimeoutExecutionStrategy(t *testing.T) {
runs := strategyTestData(1, func(ctx context.Context, _ int, _ io.Writer) error {
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
select {
case <-ctx.Done():
return nil
case <-ticker.C:
return xerrors.New("context wasn't canceled")
}
})
strategy := harness.TimeoutExecutionStrategyWrapper{
Timeout: 100 * time.Millisecond,
Inner: harness.LinearExecutionStrategy{},
}
err := strategy.Execute(context.Background(), runs)
require.NoError(t, err)
for _, run := range runs {
require.NoError(t, run.Result().Error)
}
}
//nolint:paralleltest // this tests uses timings to determine if it's working
func Test_ShuffleExecutionStrategyWrapper(t *testing.T) {
runs := strategyTestData(100000, func(_ context.Context, i int, _ io.Writer) error {
// t.Logf("run %d", i)
return nil
})
strategy := harness.ShuffleExecutionStrategyWrapper{
Inner: harness.LinearExecutionStrategy{},
}
err := strategy.Execute(context.Background(), runs)
require.NoError(t, err)
// Ensure not in order by sorting the start time of each run.
unsortedTimes := make([]time.Time, len(runs))
for i, run := range runs {
unsortedTimes[i] = run.Result().StartedAt
}
sortedTimes := make([]time.Time, len(runs))
copy(sortedTimes, unsortedTimes)
sort.Slice(sortedTimes, func(i, j int) bool {
return sortedTimes[i].Before(sortedTimes[j])
})
require.NotEqual(t, unsortedTimes, sortedTimes)
}
func strategyTestData(count int, runFn func(ctx context.Context, i int, logs io.Writer) error) []*harness.TestRun {
out := make([]*harness.TestRun, count)
for i := 0; i < count; i++ {
i := i
out[i] = harness.NewTestRun("test", strconv.Itoa(i), testFns{
RunFn: func(ctx context.Context, id string, logs io.Writer) error {
if runFn != nil {
return runFn(ctx, i, logs)
}
return nil
},
})
}
return out
}

View File

@ -0,0 +1,37 @@
package placebo
import (
"golang.org/x/xerrors"
"github.com/coder/coder/coderd/httpapi"
)
type Config struct {
// Sleep is how long to sleep for. If unspecified, the test run will finish
// instantly.
Sleep httpapi.Duration `json:"sleep"`
// Jitter is the maximum amount of jitter to add to the sleep duration. The
// sleep value will be increased by a random value between 0 and jitter if
// jitter is greater than 0.
Jitter httpapi.Duration `json:"jitter"`
// FailureChance is the chance that the test will fail. The value must be
// between 0 and 1.
FailureChance float64 `json:"failure_chance"`
}
func (c Config) Validate() error {
if c.Sleep < 0 {
return xerrors.New("sleep must be set to a positive value")
}
if c.Jitter < 0 {
return xerrors.New("jitter must be set to a positive value")
}
if c.Jitter > 0 && c.Sleep == 0 {
return xerrors.New("jitter must be 0 if sleep is 0")
}
if c.FailureChance < 0 || c.FailureChance > 1 {
return xerrors.New("failure_chance must be between 0 and 1")
}
return nil
}

View File

@ -0,0 +1,115 @@
package placebo_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/loadtest/placebo"
)
func Test_Config(t *testing.T) {
t.Parallel()
cases := []struct {
name string
config placebo.Config
errContains string
}{
{
name: "Empty",
config: placebo.Config{
Sleep: 0,
Jitter: 0,
FailureChance: 0,
},
},
{
name: "Sleep",
config: placebo.Config{
Sleep: httpapi.Duration(1 * time.Second),
Jitter: 0,
FailureChance: 0,
},
},
{
name: "SleepAndJitter",
config: placebo.Config{
Sleep: httpapi.Duration(1 * time.Second),
Jitter: httpapi.Duration(1 * time.Second),
FailureChance: 0,
},
},
{
name: "FailureChance",
config: placebo.Config{
Sleep: 0,
Jitter: 0,
FailureChance: 0.5,
},
},
{
name: "NegativeSleep",
config: placebo.Config{
Sleep: httpapi.Duration(-1 * time.Second),
Jitter: 0,
FailureChance: 0,
},
errContains: "sleep must be set to a positive value",
},
{
name: "NegativeJitter",
config: placebo.Config{
Sleep: 0,
Jitter: httpapi.Duration(-1 * time.Second),
FailureChance: 0,
},
errContains: "jitter must be set to a positive value",
},
{
name: "JitterWithoutSleep",
config: placebo.Config{
Sleep: 0,
Jitter: httpapi.Duration(1 * time.Second),
FailureChance: 0,
},
errContains: "jitter must be 0 if sleep is 0",
},
{
name: "NegativeFailureChance",
config: placebo.Config{
Sleep: 0,
Jitter: 0,
FailureChance: -0.1,
},
errContains: "failure_chance must be between 0 and 1",
},
{
name: "FailureChanceTooLarge",
config: placebo.Config{
Sleep: 0,
Jitter: 0,
FailureChance: 1.1,
},
errContains: "failure_chance must be between 0 and 1",
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
err := c.config.Validate()
if c.errContains != "" {
require.Error(t, err)
require.Contains(t, err.Error(), c.errContains)
} else {
require.NoError(t, err)
}
})
}
}

66
loadtest/placebo/run.go Normal file
View File

@ -0,0 +1,66 @@
package placebo
import (
"context"
"fmt"
"io"
"math/rand"
"time"
"golang.org/x/xerrors"
"github.com/coder/coder/loadtest/harness"
)
type Runner struct {
cfg Config
}
var _ harness.Runnable = &Runner{}
// NewRunner creates a new placebo loadtest Runner. The test will sleep for the
// specified duration if set, and will add a random amount of jitter between 0
// and the specified jitter value if set.
func NewRunner(cfg Config) *Runner {
return &Runner{
cfg: cfg,
}
}
// Run implements Runnable.
func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error {
sleepDur := time.Duration(r.cfg.Sleep)
if r.cfg.Jitter > 0 {
//nolint:gosec // not used for crypto
sleepDur += time.Duration(rand.Int63n(int64(r.cfg.Jitter)))
// This makes it easier to tell if jitter was applied in tests.
sleepDur += time.Millisecond
}
if sleepDur > 0 {
_, _ = fmt.Fprintf(logs, "sleeping for %s\n", sleepDur)
t := time.NewTimer(sleepDur)
defer t.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-t.C:
}
}
if r.cfg.FailureChance > 0 {
_, _ = fmt.Fprintf(logs, "failure chance is %f\n", r.cfg.FailureChance)
_, _ = fmt.Fprintln(logs, "rolling the dice of fate...")
//nolint:gosec // not used for crypto
roll := rand.Float64()
_, _ = fmt.Fprintf(logs, "rolled: %f\n", roll)
if roll < r.cfg.FailureChance {
_, _ = fmt.Fprintln(logs, ":(")
return xerrors.New("test failed due to configured failure chance")
}
}
return nil
}

View File

@ -0,0 +1,95 @@
package placebo_test
import (
"bytes"
"context"
"io"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/httpapi"
"github.com/coder/coder/loadtest/placebo"
)
func Test_Runner(t *testing.T) {
t.Parallel()
t.Run("NoSleep", func(t *testing.T) {
t.Parallel()
r := placebo.NewRunner(placebo.Config{})
start := time.Now()
logs := bytes.NewBuffer(nil)
err := r.Run(context.Background(), "", logs)
require.NoError(t, err)
require.WithinDuration(t, time.Now(), start, 100*time.Millisecond)
require.Empty(t, logs.String())
})
t.Run("Sleep", func(t *testing.T) {
t.Parallel()
r := placebo.NewRunner(placebo.Config{
Sleep: httpapi.Duration(100 * time.Millisecond),
})
start := time.Now()
logs := bytes.NewBuffer(nil)
err := r.Run(context.Background(), "", logs)
require.NoError(t, err)
require.WithinRange(t, time.Now(), start.Add(90*time.Millisecond), start.Add(200*time.Millisecond))
require.Contains(t, logs.String(), "sleeping for 100ms")
})
t.Run("Jitter", func(t *testing.T) {
t.Parallel()
r := placebo.NewRunner(placebo.Config{
Sleep: httpapi.Duration(100 * time.Millisecond),
Jitter: httpapi.Duration(100 * time.Millisecond),
})
start := time.Now()
logs := bytes.NewBuffer(nil)
err := r.Run(context.Background(), "", logs)
require.NoError(t, err)
require.WithinRange(t, time.Now(), start.Add(90*time.Millisecond), start.Add(300*time.Millisecond))
logsStr := logs.String()
require.Contains(t, logsStr, "sleeping for")
require.NotContains(t, logsStr, "sleeping for 100ms")
})
t.Run("Timeout", func(t *testing.T) {
t.Parallel()
r := placebo.NewRunner(placebo.Config{
Sleep: httpapi.Duration(100 * time.Millisecond),
})
//nolint:gocritic // we're testing timeouts here so we want specific values
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
err := r.Run(ctx, "", io.Discard)
require.Error(t, err)
require.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("FailureChance", func(t *testing.T) {
t.Parallel()
r := placebo.NewRunner(placebo.Config{
FailureChance: 1,
})
logs := bytes.NewBuffer(nil)
err := r.Run(context.Background(), "", logs)
require.Error(t, err)
require.Contains(t, logs.String(), ":(")
})
}

View File

@ -0,0 +1,39 @@
package workspacebuild
import (
"github.com/google/uuid"
"golang.org/x/xerrors"
"github.com/coder/coder/codersdk"
)
type Config struct {
// OrganizationID is the ID of the organization to create the workspace in.
OrganizationID uuid.UUID `json:"organization_id"`
// UserID is the ID of the user to run the test as.
UserID string `json:"user_id"`
// Request is the request to send to the Coder API to create the workspace.
// request.template_id must be set. A name will be generated if not
// specified.
Request codersdk.CreateWorkspaceRequest `json:"request"`
}
func (c Config) Validate() error {
if c.OrganizationID == uuid.Nil {
return xerrors.New("organization_id must be set")
}
if c.UserID == "" {
return xerrors.New("user_id must be set")
}
if c.UserID != codersdk.Me {
_, err := uuid.Parse(c.UserID)
if err != nil {
return xerrors.Errorf("user_id must be %q or a valid UUID: %w", codersdk.Me, err)
}
}
if c.Request.TemplateID == uuid.Nil {
return xerrors.New("request.template_id must be set")
}
return nil
}

View File

@ -0,0 +1,94 @@
package workspacebuild_test
import (
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/workspacebuild"
)
func Test_Config(t *testing.T) {
t.Parallel()
id := uuid.Must(uuid.NewRandom())
cases := []struct {
name string
config workspacebuild.Config
errContains string
}{
{
name: "NoOrganizationID",
config: workspacebuild.Config{
OrganizationID: uuid.Nil,
UserID: id.String(),
Request: codersdk.CreateWorkspaceRequest{
TemplateID: id,
},
},
errContains: "organization_id must be set",
},
{
name: "NoUserID",
config: workspacebuild.Config{
OrganizationID: id,
UserID: "",
Request: codersdk.CreateWorkspaceRequest{
TemplateID: id,
},
},
errContains: "user_id must be set",
},
{
name: "UserIDNotUUID",
config: workspacebuild.Config{
OrganizationID: id,
UserID: "blah",
Request: codersdk.CreateWorkspaceRequest{
TemplateID: id,
},
},
errContains: "user_id must be \"me\" or a valid UUID",
},
{
name: "NoTemplateID",
config: workspacebuild.Config{
OrganizationID: id,
UserID: id.String(),
Request: codersdk.CreateWorkspaceRequest{
TemplateID: uuid.Nil,
},
},
errContains: "request.template_id must be set",
},
{
name: "UserMe",
config: workspacebuild.Config{
OrganizationID: id,
UserID: "me",
Request: codersdk.CreateWorkspaceRequest{
TemplateID: id,
},
},
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
err := c.config.Validate()
if c.errContains != "" {
require.Error(t, err)
require.Contains(t, err.Error(), c.errContains)
} else {
require.NoError(t, err)
}
})
}
}

View File

@ -0,0 +1,193 @@
package workspacebuild
import (
"context"
"fmt"
"io"
"time"
"github.com/google/uuid"
"golang.org/x/xerrors"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/cryptorand"
"github.com/coder/coder/loadtest/harness"
)
type Runner struct {
client *codersdk.Client
cfg Config
workspaceID uuid.UUID
}
var _ harness.Runnable = &Runner{}
var _ harness.Cleanable = &Runner{}
func NewRunner(client *codersdk.Client, cfg Config) *Runner {
return &Runner{
client: client,
cfg: cfg,
}
}
// Run implements Runnable.
func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error {
req := r.cfg.Request
if req.Name == "" {
randName, err := cryptorand.HexString(8)
if err != nil {
return xerrors.Errorf("generate random name for workspace: %w", err)
}
req.Name = "test-" + randName
}
after := time.Now()
workspace, err := r.client.CreateWorkspace(ctx, r.cfg.OrganizationID, r.cfg.UserID, req)
if err != nil {
return xerrors.Errorf("create workspace: %w", err)
}
r.workspaceID = workspace.ID
err = waitForBuild(ctx, logs, r.client, workspace.LatestBuild.ID, after)
if err != nil {
return xerrors.Errorf("wait for build: %w", err)
}
_, _ = fmt.Fprintln(logs, "")
err = waitForAgents(ctx, logs, r.client, workspace.ID)
if err != nil {
return xerrors.Errorf("wait for agent: %w", err)
}
return nil
}
// Cleanup implements Cleanable.
func (r *Runner) Cleanup(ctx context.Context, _ string) error {
if r.workspaceID == uuid.Nil {
return nil
}
after := time.Now()
build, err := r.client.CreateWorkspaceBuild(ctx, r.workspaceID, codersdk.CreateWorkspaceBuildRequest{
Transition: codersdk.WorkspaceTransitionDelete,
})
if err != nil {
return xerrors.Errorf("delete workspace: %w", err)
}
// TODO: capture these logs
logs := io.Discard
err = waitForBuild(ctx, logs, r.client, build.ID, after)
if err != nil {
return xerrors.Errorf("wait for build: %w", err)
}
return nil
}
func waitForBuild(ctx context.Context, w io.Writer, client *codersdk.Client, buildID uuid.UUID, after time.Time) error {
_, _ = fmt.Fprint(w, "Build is currently queued...")
// Wait for build to start.
for {
build, err := client.WorkspaceBuild(ctx, buildID)
if err != nil {
return xerrors.Errorf("fetch build: %w", err)
}
if build.Job.Status != codersdk.ProvisionerJobPending {
break
}
_, _ = fmt.Fprint(w, ".")
time.Sleep(500 * time.Millisecond)
}
_, _ = fmt.Fprintln(w, "\nBuild started! Streaming logs below:")
logs, closer, err := client.WorkspaceBuildLogsAfter(ctx, buildID, after)
if err != nil {
return xerrors.Errorf("start streaming build logs: %w", err)
}
defer closer.Close()
currentStage := ""
for {
select {
case <-ctx.Done():
return ctx.Err()
case log, ok := <-logs:
if !ok {
build, err := client.WorkspaceBuild(ctx, buildID)
if err != nil {
return xerrors.Errorf("fetch build: %w", err)
}
_, _ = fmt.Fprintln(w, "")
switch build.Job.Status {
case codersdk.ProvisionerJobSucceeded:
_, _ = fmt.Fprintln(w, "\nBuild succeeded!")
return nil
case codersdk.ProvisionerJobFailed:
_, _ = fmt.Fprintf(w, "\nBuild failed with error %q.\nSee logs above for more details.\n", build.Job.Error)
return xerrors.Errorf("build failed with status %q: %s", build.Job.Status, build.Job.Error)
case codersdk.ProvisionerJobCanceled:
_, _ = fmt.Fprintln(w, "\nBuild canceled.")
return xerrors.New("build canceled")
default:
_, _ = fmt.Fprintf(w, "\nLogs disconnected with unexpected job status %q and error %q.\n", build.Job.Status, build.Job.Error)
return xerrors.Errorf("logs disconnected with unexpected job status %q and error %q", build.Job.Status, build.Job.Error)
}
}
if log.Stage != currentStage {
currentStage = log.Stage
_, _ = fmt.Fprintf(w, "\n%s\n", currentStage)
}
level := "unknown"
if log.Level != "" {
level = string(log.Level)
}
_, _ = fmt.Fprintf(w, "\t%s:\t%s\n", level, log.Output)
}
}
}
func waitForAgents(ctx context.Context, w io.Writer, client *codersdk.Client, workspaceID uuid.UUID) error {
_, _ = fmt.Fprint(w, "Waiting for agents to connect...\n\n")
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
workspace, err := client.Workspace(ctx, workspaceID)
if err != nil {
return xerrors.Errorf("fetch workspace: %w", err)
}
ok := true
for _, res := range workspace.LatestBuild.Resources {
for _, agent := range res.Agents {
if agent.Status != codersdk.WorkspaceAgentConnected {
ok = false
}
_, _ = fmt.Fprintf(w, "\tAgent %q is %s\n", agent.Name, agent.Status)
}
}
if ok {
break
}
_, _ = fmt.Fprintln(w, "")
time.Sleep(1 * time.Second)
}
_, _ = fmt.Fprint(w, "\nAgents connected!\n\n")
return nil
}

View File

@ -0,0 +1,224 @@
package workspacebuild_test
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/agent"
"github.com/coder/coder/coderd/coderdtest"
"github.com/coder/coder/codersdk"
"github.com/coder/coder/loadtest/workspacebuild"
"github.com/coder/coder/provisioner/echo"
"github.com/coder/coder/provisionersdk/proto"
"github.com/coder/coder/testutil"
)
func Test_Runner(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
client := coderdtest.New(t, &coderdtest.Options{
IncludeProvisionerDaemon: true,
})
user := coderdtest.CreateFirstUser(t, client)
authToken1 := uuid.NewString()
authToken2 := uuid.NewString()
authToken3 := uuid.NewString()
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
Parse: echo.ParseComplete,
ProvisionDryRun: echo.ProvisionComplete,
Provision: []*proto.Provision_Response{
{
Type: &proto.Provision_Response_Log{
Log: &proto.Log{
Level: proto.LogLevel_INFO,
Output: "hello from logs",
},
},
},
{
Type: &proto.Provision_Response_Complete{
Complete: &proto.Provision_Complete{
Resources: []*proto.Resource{
{
Name: "example1",
Type: "aws_instance",
Agents: []*proto.Agent{
{
Id: uuid.NewString(),
Name: "agent1",
Auth: &proto.Agent_Token{
Token: authToken1,
},
Apps: []*proto.App{},
},
{
Id: uuid.NewString(),
Name: "agent2",
Auth: &proto.Agent_Token{
Token: authToken2,
},
Apps: []*proto.App{},
},
},
},
{
Name: "example2",
Type: "aws_instance",
Agents: []*proto.Agent{
{
Id: uuid.NewString(),
Name: "agent3",
Auth: &proto.Agent_Token{
Token: authToken3,
},
Apps: []*proto.App{},
},
},
},
},
},
},
},
},
})
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
// Since the runner creates the workspace on it's own, we have to keep
// listing workspaces until we find it, then wait for the build to finish,
// then start the agents.
go func() {
var workspace codersdk.Workspace
for {
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
Owner: codersdk.Me,
})
if !assert.NoError(t, err) {
return
}
if len(workspaces) == 1 {
workspace = workspaces[0]
break
}
time.Sleep(100 * time.Millisecond)
}
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
// Start the three agents.
for i, authToken := range []string{authToken1, authToken2, authToken3} {
i := i + 1
agentClient := codersdk.New(client.URL)
agentClient.SessionToken = authToken
agentCloser := agent.New(agent.Options{
Client: agentClient,
Logger: slogtest.Make(t, nil).
Named(fmt.Sprintf("agent%d", i)).
Leveled(slog.LevelWarn),
})
t.Cleanup(func() {
_ = agentCloser.Close()
})
}
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
}()
runner := workspacebuild.NewRunner(client, workspacebuild.Config{
OrganizationID: user.OrganizationID,
UserID: codersdk.Me,
Request: codersdk.CreateWorkspaceRequest{
TemplateID: template.ID,
},
})
logs := bytes.NewBuffer(nil)
err := runner.Run(ctx, "1", logs)
logsStr := logs.String()
t.Log("Runner logs:\n\n" + logsStr)
require.NoError(t, err)
// Look for strings in the logs.
require.Contains(t, logsStr, "hello from logs")
require.Contains(t, logsStr, `"agent1" is connected`)
require.Contains(t, logsStr, `"agent2" is connected`)
require.Contains(t, logsStr, `"agent3" is connected`)
// Find the workspace.
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
Owner: codersdk.Me,
})
require.NoError(t, err)
require.Len(t, workspaces, 1)
coderdtest.AwaitWorkspaceBuildJob(t, client, workspaces[0].LatestBuild.ID)
coderdtest.AwaitWorkspaceAgents(t, client, workspaces[0].ID)
err = runner.Cleanup(ctx, "1")
require.NoError(t, err)
})
t.Run("FailedBuild", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
client := coderdtest.New(t, &coderdtest.Options{
IncludeProvisionerDaemon: true,
})
user := coderdtest.CreateFirstUser(t, client)
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
Parse: echo.ParseComplete,
ProvisionDryRun: echo.ProvisionComplete,
Provision: []*proto.Provision_Response{
{
Type: &proto.Provision_Response_Complete{
Complete: &proto.Provision_Complete{
Error: "test error",
},
},
},
},
})
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
runner := workspacebuild.NewRunner(client, workspacebuild.Config{
OrganizationID: user.OrganizationID,
UserID: codersdk.Me,
Request: codersdk.CreateWorkspaceRequest{
TemplateID: template.ID,
},
})
logs := bytes.NewBuffer(nil)
err := runner.Run(ctx, "1", logs)
logsStr := logs.String()
t.Log("Runner logs:\n\n" + logsStr)
require.Error(t, err)
require.ErrorContains(t, err, "test error")
})
}