coder/codersdk/templates.go

485 lines
18 KiB
Go
Raw Normal View History

package codersdk
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/google/uuid"
"golang.org/x/xerrors"
)
// Template is the JSON representation of a Coder template. This type matches the
2022-04-01 19:42:36 +00:00
// database object for now, but is abstracted for ease of change later on.
type Template struct {
ID uuid.UUID `json:"id" format:"uuid"`
CreatedAt time.Time `json:"created_at" format:"date-time"`
UpdatedAt time.Time `json:"updated_at" format:"date-time"`
OrganizationID uuid.UUID `json:"organization_id" format:"uuid"`
Name string `json:"name"`
DisplayName string `json:"display_name"`
Provisioner ProvisionerType `json:"provisioner" enums:"terraform"`
ActiveVersionID uuid.UUID `json:"active_version_id" format:"uuid"`
// ActiveUserCount is set to -1 when loading.
ActiveUserCount int `json:"active_user_count"`
BuildTimeStats TemplateBuildTimeStats `json:"build_time_stats"`
Description string `json:"description"`
Deprecated bool `json:"deprecated"`
DeprecationMessage string `json:"deprecation_message"`
Icon string `json:"icon"`
DefaultTTLMillis int64 `json:"default_ttl_ms"`
ActivityBumpMillis int64 `json:"activity_bump_ms"`
// AutostopRequirement and AutostartRequirement are enterprise features. Its
// value is only used if your license is entitled to use the advanced template
// scheduling feature.
AutostopRequirement TemplateAutostopRequirement `json:"autostop_requirement"`
AutostartRequirement TemplateAutostartRequirement `json:"autostart_requirement"`
CreatedByID uuid.UUID `json:"created_by_id" format:"uuid"`
CreatedByName string `json:"created_by_name"`
// AllowUserAutostart and AllowUserAutostop are enterprise-only. Their
// values are only used if your license is entitled to use the advanced
// template scheduling feature.
AllowUserAutostart bool `json:"allow_user_autostart"`
AllowUserAutostop bool `json:"allow_user_autostop"`
AllowUserCancelWorkspaceJobs bool `json:"allow_user_cancel_workspace_jobs"`
// FailureTTLMillis, TimeTilDormantMillis, and TimeTilDormantAutoDeleteMillis are enterprise-only. Their
// values are used if your license is entitled to use the advanced
// template scheduling feature.
FailureTTLMillis int64 `json:"failure_ttl_ms"`
TimeTilDormantMillis int64 `json:"time_til_dormant_ms"`
TimeTilDormantAutoDeleteMillis int64 `json:"time_til_dormant_autodelete_ms"`
// RequireActiveVersion mandates that workspaces are built with the active
// template version.
RequireActiveVersion bool `json:"require_active_version"`
MaxPortShareLevel WorkspaceAgentPortShareLevel `json:"max_port_share_level"`
}
// WeekdaysToBitmap converts a list of weekdays to a bitmap in accordance with
// the schedule package's rules. The 0th bit is Monday, ..., the 6th bit is
// Sunday. The 7th bit is unused.
func WeekdaysToBitmap(days []string) (uint8, error) {
var bitmap uint8
for _, day := range days {
switch strings.ToLower(day) {
case "monday":
bitmap |= 1 << 0
case "tuesday":
bitmap |= 1 << 1
case "wednesday":
bitmap |= 1 << 2
case "thursday":
bitmap |= 1 << 3
case "friday":
bitmap |= 1 << 4
case "saturday":
bitmap |= 1 << 5
case "sunday":
bitmap |= 1 << 6
default:
return 0, xerrors.Errorf("invalid weekday %q", day)
}
}
return bitmap, nil
}
// BitmapToWeekdays converts a bitmap to a list of weekdays in accordance with
// the schedule package's rules (see above).
func BitmapToWeekdays(bitmap uint8) []string {
days := []string{}
for i := 0; i < 7; i++ {
if bitmap&(1<<i) != 0 {
switch i {
case 0:
days = append(days, "monday")
case 1:
days = append(days, "tuesday")
case 2:
days = append(days, "wednesday")
case 3:
days = append(days, "thursday")
case 4:
days = append(days, "friday")
case 5:
days = append(days, "saturday")
case 6:
days = append(days, "sunday")
}
}
}
return days
}
type TemplateAutostartRequirement struct {
// DaysOfWeek is a list of days of the week in which autostart is allowed
// to happen. If no days are specified, autostart is not allowed.
DaysOfWeek []string `json:"days_of_week" enums:"monday,tuesday,wednesday,thursday,friday,saturday,sunday"`
}
type TemplateAutostopRequirement struct {
// DaysOfWeek is a list of days of the week on which restarts are required.
// Restarts happen within the user's quiet hours (in their configured
// timezone). If no days are specified, restarts are not required. Weekdays
// cannot be specified twice.
//
// Restarts will only happen on weekdays in this list on weeks which line up
// with Weeks.
DaysOfWeek []string `json:"days_of_week" enums:"monday,tuesday,wednesday,thursday,friday,saturday,sunday"`
// Weeks is the number of weeks between required restarts. Weeks are synced
// across all workspaces (and Coder deployments) using modulo math on a
// hardcoded epoch week of January 2nd, 2023 (the first Monday of 2023).
// Values of 0 or 1 indicate weekly restarts. Values of 2 indicate
// fortnightly restarts, etc.
Weeks int64 `json:"weeks"`
}
type TransitionStats struct {
feat: Build framework for generating API docs (#5383) * WIP * Gen * WIP * chi swagger * WIP * WIP * WIP * GetWorkspaces * GetWorkspaces * Markdown * Use widdershins * WIP * WIP * WIP * Markdown template * Fix: makefile * fmt * Fix: comment * Enable swagger conditionally * fix: site * Default false * Flag tests * fix * fix * template fixes * Fix * Fix * Fix * WIP * Formatted * Cleanup * Templates * BEGIN END SECTION * subshell exit code * Fix * Fix merge * WIP * Fix * Fix fmt * Fix * Generic api.md page * Fix merge * Link pages * Fix * Fix * Fix: links * Add icon * Write manifest file * Fix fmt * Fix: enterprise * Fix: Swagger.Enable * Fix: rename apidocs to apidoc * Fix: find -not -prune * Fix: json not available * Fix: rename Coderd API to Coder API * Fix: npm exec * Fix: api dir * Fix: by ID * Fix: string uuid * Fix: include deleted * Fix: indirect go.mod * Fix: source lib.sh * Fix: shellcheck * Fix: pushd popd * Fix: fmt * Fix: improve workspaces * Fix: swagger-enable * Fix * Fix: mention only HTTP 200 * Fix: IDs * Fix: https * Fix: icon * More APis * Fix: format swagger.json * Fix: SwaggerEndpoint * Fix: SCRIPT_DIR * Fix: PROJECT_ROOT * Fix: use code tags in schemas.md * Fix: examples * Fix: examples * Fix: improve format * Fix: date-time,enums * Fix: include_deleted * Fix: array of * Fix: parameter, response * Fix: string time or null * Workspaces: more docs * Workspaces: more docs * Fix: renderDisplayName * Fix: ActiveUserCount * Fix * Fix: typo * Templates: docs * Notice: incomplete
2022-12-19 17:43:46 +00:00
P50 *int64 `example:"123"`
P95 *int64 `example:"146"`
}
2023-02-19 00:32:09 +00:00
type (
TemplateBuildTimeStats map[WorkspaceTransition]TransitionStats
UpdateActiveTemplateVersion struct {
ID uuid.UUID `json:"id" validate:"required" format:"uuid"`
}
)
type ArchiveTemplateVersionsRequest struct {
// By default, only failed versions are archived. Set this to true
// to archive all unused versions regardless of job status.
All bool `json:"all"`
}
type ArchiveTemplateVersionsResponse struct {
TemplateID uuid.UUID `json:"template_id" format:"uuid"`
ArchivedIDs []uuid.UUID `json:"archived_ids"`
}
2022-10-10 20:37:06 +00:00
type TemplateRole string
const (
TemplateRoleAdmin TemplateRole = "admin"
2022-10-12 19:33:21 +00:00
TemplateRoleUse TemplateRole = "use"
2022-10-10 20:37:06 +00:00
TemplateRoleDeleted TemplateRole = ""
)
type TemplateACL struct {
Users []TemplateUser `json:"users"`
Groups []TemplateGroup `json:"group"`
}
type TemplateGroup struct {
Group
Role TemplateRole `json:"role" enums:"admin,use"`
2022-10-10 20:37:06 +00:00
}
type TemplateUser struct {
User
Role TemplateRole `json:"role" enums:"admin,use"`
2022-10-10 20:37:06 +00:00
}
type UpdateTemplateACL struct {
// UserPerms should be a mapping of user id to role. The user id must be the
// uuid of the user, not a username or email address.
UserPerms map[string]TemplateRole `json:"user_perms,omitempty" example:"<group_id>:admin,4df59e74-c027-470b-ab4d-cbba8963a5e9:use"`
// GroupPerms should be a mapping of group id to role.
GroupPerms map[string]TemplateRole `json:"group_perms,omitempty" example:"<user_id>>:admin,8bd26b20-f3e8-48be-a903-46bb920cf671:use"`
2022-10-10 20:37:06 +00:00
}
// ACLAvailable is a list of users and groups that can be added to a template
// ACL.
type ACLAvailable struct {
Users []ReducedUser `json:"users"`
Groups []Group `json:"groups"`
}
type UpdateTemplateMeta struct {
Name string `json:"name,omitempty" validate:"omitempty,template_name"`
DisplayName string `json:"display_name,omitempty" validate:"omitempty,template_display_name"`
Description string `json:"description,omitempty"`
Icon string `json:"icon,omitempty"`
DefaultTTLMillis int64 `json:"default_ttl_ms,omitempty"`
// ActivityBumpMillis allows optionally specifying the activity bump
// duration for all workspaces created from this template. Defaults to 1h
// but can be set to 0 to disable activity bumping.
ActivityBumpMillis int64 `json:"activity_bump_ms,omitempty"`
// AutostopRequirement and AutostartRequirement can only be set if your license
// includes the advanced template scheduling feature. If you attempt to set this
// value while unlicensed, it will be ignored.
AutostopRequirement *TemplateAutostopRequirement `json:"autostop_requirement,omitempty"`
AutostartRequirement *TemplateAutostartRequirement `json:"autostart_requirement,omitempty"`
AllowUserAutostart bool `json:"allow_user_autostart,omitempty"`
AllowUserAutostop bool `json:"allow_user_autostop,omitempty"`
AllowUserCancelWorkspaceJobs bool `json:"allow_user_cancel_workspace_jobs,omitempty"`
FailureTTLMillis int64 `json:"failure_ttl_ms,omitempty"`
TimeTilDormantMillis int64 `json:"time_til_dormant_ms,omitempty"`
TimeTilDormantAutoDeleteMillis int64 `json:"time_til_dormant_autodelete_ms,omitempty"`
// UpdateWorkspaceLastUsedAt updates the last_used_at field of workspaces
// spawned from the template. This is useful for preventing workspaces being
// immediately locked when updating the inactivity_ttl field to a new, shorter
// value.
UpdateWorkspaceLastUsedAt bool `json:"update_workspace_last_used_at"`
// UpdateWorkspaceDormant updates the dormant_at field of workspaces spawned
// from the template. This is useful for preventing dormant workspaces being immediately
// deleted when updating the dormant_ttl field to a new, shorter value.
UpdateWorkspaceDormantAt bool `json:"update_workspace_dormant_at"`
// RequireActiveVersion mandates workspaces built using this template
// use the active version of the template. This option has no
// effect on template admins.
RequireActiveVersion bool `json:"require_active_version,omitempty"`
// DeprecationMessage if set, will mark the template as deprecated and block
// any new workspaces from using this template.
// If passed an empty string, will remove the deprecated message, making
// the template usable for new workspaces again.
DeprecationMessage *string `json:"deprecation_message"`
// DisableEveryoneGroupAccess allows optionally disabling the default
// behavior of granting the 'everyone' group access to use the template.
// If this is set to true, the template will not be available to all users,
// and must be explicitly granted to users or groups in the permissions settings
// of the template.
DisableEveryoneGroupAccess bool `json:"disable_everyone_group_access"`
MaxPortShareLevel *WorkspaceAgentPortShareLevel `json:"max_port_share_level"`
}
type TemplateExample struct {
ID string `json:"id" format:"uuid"`
URL string `json:"url"`
Name string `json:"name"`
Description string `json:"description"`
Icon string `json:"icon"`
Tags []string `json:"tags"`
Markdown string `json:"markdown"`
}
// Template returns a single template.
func (c *Client) Template(ctx context.Context, template uuid.UUID) (Template, error) {
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/templates/%s", template), nil)
if err != nil {
return Template{}, xerrors.Errorf("do request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return Template{}, ReadBodyAsError(res)
}
var resp Template
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
func (c *Client) ArchiveTemplateVersions(ctx context.Context, template uuid.UUID, all bool) (ArchiveTemplateVersionsResponse, error) {
res, err := c.Request(ctx, http.MethodPost,
fmt.Sprintf("/api/v2/templates/%s/versions/archive", template),
ArchiveTemplateVersionsRequest{
All: all,
},
)
if err != nil {
return ArchiveTemplateVersionsResponse{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return ArchiveTemplateVersionsResponse{}, ReadBodyAsError(res)
}
var resp ArchiveTemplateVersionsResponse
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
//nolint:revive
func (c *Client) SetArchiveTemplateVersion(ctx context.Context, templateVersion uuid.UUID, archive bool) error {
u := fmt.Sprintf("/api/v2/templateversions/%s", templateVersion.String())
if archive {
u += "/archive"
} else {
u += "/unarchive"
}
res, err := c.Request(ctx, http.MethodPost, u, nil)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return ReadBodyAsError(res)
}
return nil
}
func (c *Client) DeleteTemplate(ctx context.Context, template uuid.UUID) error {
res, err := c.Request(ctx, http.MethodDelete, fmt.Sprintf("/api/v2/templates/%s", template), nil)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return ReadBodyAsError(res)
}
return nil
}
func (c *Client) UpdateTemplateMeta(ctx context.Context, templateID uuid.UUID, req UpdateTemplateMeta) (Template, error) {
res, err := c.Request(ctx, http.MethodPatch, fmt.Sprintf("/api/v2/templates/%s", templateID), req)
if err != nil {
return Template{}, err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotModified {
return Template{}, xerrors.New("template metadata not modified")
}
if res.StatusCode != http.StatusOK {
return Template{}, ReadBodyAsError(res)
}
var updated Template
return updated, json.NewDecoder(res.Body).Decode(&updated)
}
2022-10-10 20:37:06 +00:00
func (c *Client) UpdateTemplateACL(ctx context.Context, templateID uuid.UUID, req UpdateTemplateACL) error {
res, err := c.Request(ctx, http.MethodPatch, fmt.Sprintf("/api/v2/templates/%s/acl", templateID), req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return ReadBodyAsError(res)
2022-10-10 20:37:06 +00:00
}
return nil
}
// TemplateACLAvailable returns available users + groups that can be assigned template perms
func (c *Client) TemplateACLAvailable(ctx context.Context, templateID uuid.UUID) (ACLAvailable, error) {
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/templates/%s/acl/available", templateID), nil)
if err != nil {
return ACLAvailable{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return ACLAvailable{}, ReadBodyAsError(res)
}
var acl ACLAvailable
return acl, json.NewDecoder(res.Body).Decode(&acl)
}
2022-10-10 20:37:06 +00:00
func (c *Client) TemplateACL(ctx context.Context, templateID uuid.UUID) (TemplateACL, error) {
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/templates/%s/acl", templateID), nil)
if err != nil {
return TemplateACL{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return TemplateACL{}, ReadBodyAsError(res)
2022-10-10 20:37:06 +00:00
}
var acl TemplateACL
return acl, json.NewDecoder(res.Body).Decode(&acl)
}
// UpdateActiveTemplateVersion updates the active template version to the ID provided.
// The template version must be attached to the template.
func (c *Client) UpdateActiveTemplateVersion(ctx context.Context, template uuid.UUID, req UpdateActiveTemplateVersion) error {
res, err := c.Request(ctx, http.MethodPatch, fmt.Sprintf("/api/v2/templates/%s/versions", template), req)
if err != nil {
return nil
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return ReadBodyAsError(res)
}
return nil
}
// TemplateVersionsByTemplateRequest defines the request parameters for
// TemplateVersionsByTemplate.
type TemplateVersionsByTemplateRequest struct {
TemplateID uuid.UUID `json:"template_id" validate:"required" format:"uuid"`
IncludeArchived bool `json:"include_archived"`
Pagination
}
// TemplateVersionsByTemplate lists versions associated with a template.
func (c *Client) TemplateVersionsByTemplate(ctx context.Context, req TemplateVersionsByTemplateRequest) ([]TemplateVersion, error) {
u := fmt.Sprintf("/api/v2/templates/%s/versions", req.TemplateID)
if req.IncludeArchived {
u += "?include_archived=true"
}
res, err := c.Request(ctx, http.MethodGet, u, nil, req.Pagination.asRequestOption())
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ReadBodyAsError(res)
}
var templateVersion []TemplateVersion
return templateVersion, json.NewDecoder(res.Body).Decode(&templateVersion)
feat: Add provisionerdaemon to coderd (#141) * feat: Add history middleware parameters These will be used for streaming logs, checking status, and other operations related to workspace and project history. * refactor: Move all HTTP routes to top-level struct Nesting all structs behind their respective structures is leaky, and promotes naming conflicts between handlers. Our HTTP routes cannot have conflicts, so neither should function naming. * Add provisioner daemon routes * Add periodic updates * Skip pubsub if short * Return jobs with WorkspaceHistory * Add endpoints for extracting singular history * The full end-to-end operation works * fix: Disable compression for websocket dRPC transport (#145) There is a race condition in the interop between the websocket and `dRPC`: https://github.com/coder/coder/runs/5038545709?check_suite_focus=true#step:7:117 - it seems both the websocket and dRPC feel like they own the `byte[]` being sent between them. This can lead to data races, in which both `dRPC` and the websocket are writing. This is just tracking some experimentation to fix that race condition ## Run results: ## - Run 1: peer test failure - Run 2: peer test failure - Run 3: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040858460?check_suite_focus=true#step:8:45 ``` status code 412: The provided project history is running. Wait for it to complete importing!` ``` - Run 4: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040957999?check_suite_focus=true#step:7:176 ``` workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` - Run 5: peer failure - Run 6: Pass ✅ - Run 7: Peer failure ## Open Questions: ## ### Is `dRPC` or `websocket` at fault for the data race? It looks like this condition is specifically happening when `dRPC` decides to [`SendError`]). This constructs a new byte payload from [`MarshalError`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/error.go#L15) - so `dRPC` has created this buffer and owns it. From `dRPC`'s perspective, the callstack looks like this: - [`sendPacket`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcstream/stream.go#L253) - [`writeFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L65) - [`AppendFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/packet.go#L128) - with finally the data race happening here: ```go // AppendFrame appends a marshaled form of the frame to the provided buffer. func AppendFrame(buf []byte, fr Frame) []byte { ... out := buf out = append(out, control). // <--------- ``` This should be fine, since `dPRC` create this buffer, and is taking the byte buffer constructed from `MarshalError` and tacking a bunch of headers on it to create a proper frame. Once `dRPC` is done writing, it _hangs onto the buffer and resets it here__: https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L73 However... the websocket implementation, once it gets the buffer, it runs a `statelessDeflate` [here](https://github.com/nhooyr/websocket/blob/8dee580a7f74cf1713400307b4eee514b927870f/write.go#L180), which compresses the buffer on the fly. This functionality actually [mutates the buffer in place](https://github.com/klauspost/compress/blob/a1a9cfc821f00faf2f5231beaa96244344d50391/flate/stateless.go#L94), which is where get our race. In the case where the `byte[]` aren't being manipulated anywhere else, this compress-in-place operation would be safe, and that's probably the case for most over-the-wire usages. In this case, though, where we're plumbing `dRPC` -> websocket, they both are manipulating it (`dRPC` is reusing the buffer for the next `write`, and `websocket` is compressing on the fly). ### Why does cloning on `Read` fail? Get a bunch of errors like: ``` 2022/02/02 19:26:10 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 ``` # UPDATE: We decided we could disable websocket compression, which would avoid the race because the in-place `deflate` operaton would no longer be run. Trying that out now: - Run 1: ✅ - Run 2: https://github.com/coder/coder/runs/5042645522?check_suite_focus=true#step:8:338 - Run 3: ✅ - Run 4: https://github.com/coder/coder/runs/5042988758?check_suite_focus=true#step:7:168 - Run 5: ✅ * fix: Remove race condition with acquiredJobDone channel (#148) Found another data race while running the tests: https://github.com/coder/coder/runs/5044320845?check_suite_focus=true#step:7:83 __Issue:__ There is a race in the p.acquiredJobDone chan - in particular, there can be a case where we're waiting on the channel to finish (in close) with <-p.acquiredJobDone, but in parallel, an acquireJob could've been started, which would create a new channel for p.acquiredJobDone. There is a similar race in `close(..)`ing the channel, which also came up in test runs. __Fix:__ Instead of recreating the channel everytime, we can use `sync.WaitGroup` to accomplish the same functionality - a semaphore to make close wait for the current job to wrap up. * fix: Bump up workspace history timeout (#149) This is an attempted fix for failures like: https://github.com/coder/coder/runs/5043435263?check_suite_focus=true#step:7:32 Looking at the timing of the test: ``` t.go:56: 2022-02-02 21:33:21.964 [DEBUG] (terraform-provisioner) <provision.go:139> ran apply t.go:56: 2022-02-02 21:33:21.991 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.050 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.090 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.140 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.195 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.240 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` It appears that the `terraform apply` job had just finished - with less than a second to spare until our `require.Eventually` completes - but there's still work to be done (ie, collecting the state files). So my suspicion is that terraform might, in some cases, exceed our 5s timeout. Note that in the setup for this test - there is a similar project history wait that waits for 15s, so I borrowed that here. In the future - we can look at potentially using a simple echo provider to exercise this in the unit test, in a way that is more reliable in terms of timing. I'll log an issue to track that. Co-authored-by: Bryan <bryan@coder.com>
2022-02-03 20:34:50 +00:00
}
// TemplateVersionByName returns a template version by it's friendly name.
// This is used for path-based routing. Like: /templates/example/versions/helloworld
func (c *Client) TemplateVersionByName(ctx context.Context, template uuid.UUID, name string) (TemplateVersion, error) {
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/templates/%s/versions/%s", template, name), nil)
feat: Add provisionerdaemon to coderd (#141) * feat: Add history middleware parameters These will be used for streaming logs, checking status, and other operations related to workspace and project history. * refactor: Move all HTTP routes to top-level struct Nesting all structs behind their respective structures is leaky, and promotes naming conflicts between handlers. Our HTTP routes cannot have conflicts, so neither should function naming. * Add provisioner daemon routes * Add periodic updates * Skip pubsub if short * Return jobs with WorkspaceHistory * Add endpoints for extracting singular history * The full end-to-end operation works * fix: Disable compression for websocket dRPC transport (#145) There is a race condition in the interop between the websocket and `dRPC`: https://github.com/coder/coder/runs/5038545709?check_suite_focus=true#step:7:117 - it seems both the websocket and dRPC feel like they own the `byte[]` being sent between them. This can lead to data races, in which both `dRPC` and the websocket are writing. This is just tracking some experimentation to fix that race condition ## Run results: ## - Run 1: peer test failure - Run 2: peer test failure - Run 3: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040858460?check_suite_focus=true#step:8:45 ``` status code 412: The provided project history is running. Wait for it to complete importing!` ``` - Run 4: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040957999?check_suite_focus=true#step:7:176 ``` workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` - Run 5: peer failure - Run 6: Pass ✅ - Run 7: Peer failure ## Open Questions: ## ### Is `dRPC` or `websocket` at fault for the data race? It looks like this condition is specifically happening when `dRPC` decides to [`SendError`]). This constructs a new byte payload from [`MarshalError`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/error.go#L15) - so `dRPC` has created this buffer and owns it. From `dRPC`'s perspective, the callstack looks like this: - [`sendPacket`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcstream/stream.go#L253) - [`writeFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L65) - [`AppendFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/packet.go#L128) - with finally the data race happening here: ```go // AppendFrame appends a marshaled form of the frame to the provided buffer. func AppendFrame(buf []byte, fr Frame) []byte { ... out := buf out = append(out, control). // <--------- ``` This should be fine, since `dPRC` create this buffer, and is taking the byte buffer constructed from `MarshalError` and tacking a bunch of headers on it to create a proper frame. Once `dRPC` is done writing, it _hangs onto the buffer and resets it here__: https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L73 However... the websocket implementation, once it gets the buffer, it runs a `statelessDeflate` [here](https://github.com/nhooyr/websocket/blob/8dee580a7f74cf1713400307b4eee514b927870f/write.go#L180), which compresses the buffer on the fly. This functionality actually [mutates the buffer in place](https://github.com/klauspost/compress/blob/a1a9cfc821f00faf2f5231beaa96244344d50391/flate/stateless.go#L94), which is where get our race. In the case where the `byte[]` aren't being manipulated anywhere else, this compress-in-place operation would be safe, and that's probably the case for most over-the-wire usages. In this case, though, where we're plumbing `dRPC` -> websocket, they both are manipulating it (`dRPC` is reusing the buffer for the next `write`, and `websocket` is compressing on the fly). ### Why does cloning on `Read` fail? Get a bunch of errors like: ``` 2022/02/02 19:26:10 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 ``` # UPDATE: We decided we could disable websocket compression, which would avoid the race because the in-place `deflate` operaton would no longer be run. Trying that out now: - Run 1: ✅ - Run 2: https://github.com/coder/coder/runs/5042645522?check_suite_focus=true#step:8:338 - Run 3: ✅ - Run 4: https://github.com/coder/coder/runs/5042988758?check_suite_focus=true#step:7:168 - Run 5: ✅ * fix: Remove race condition with acquiredJobDone channel (#148) Found another data race while running the tests: https://github.com/coder/coder/runs/5044320845?check_suite_focus=true#step:7:83 __Issue:__ There is a race in the p.acquiredJobDone chan - in particular, there can be a case where we're waiting on the channel to finish (in close) with <-p.acquiredJobDone, but in parallel, an acquireJob could've been started, which would create a new channel for p.acquiredJobDone. There is a similar race in `close(..)`ing the channel, which also came up in test runs. __Fix:__ Instead of recreating the channel everytime, we can use `sync.WaitGroup` to accomplish the same functionality - a semaphore to make close wait for the current job to wrap up. * fix: Bump up workspace history timeout (#149) This is an attempted fix for failures like: https://github.com/coder/coder/runs/5043435263?check_suite_focus=true#step:7:32 Looking at the timing of the test: ``` t.go:56: 2022-02-02 21:33:21.964 [DEBUG] (terraform-provisioner) <provision.go:139> ran apply t.go:56: 2022-02-02 21:33:21.991 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.050 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.090 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.140 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.195 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.240 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` It appears that the `terraform apply` job had just finished - with less than a second to spare until our `require.Eventually` completes - but there's still work to be done (ie, collecting the state files). So my suspicion is that terraform might, in some cases, exceed our 5s timeout. Note that in the setup for this test - there is a similar project history wait that waits for 15s, so I borrowed that here. In the future - we can look at potentially using a simple echo provider to exercise this in the unit test, in a way that is more reliable in terms of timing. I'll log an issue to track that. Co-authored-by: Bryan <bryan@coder.com>
2022-02-03 20:34:50 +00:00
if err != nil {
return TemplateVersion{}, err
feat: Add provisionerdaemon to coderd (#141) * feat: Add history middleware parameters These will be used for streaming logs, checking status, and other operations related to workspace and project history. * refactor: Move all HTTP routes to top-level struct Nesting all structs behind their respective structures is leaky, and promotes naming conflicts between handlers. Our HTTP routes cannot have conflicts, so neither should function naming. * Add provisioner daemon routes * Add periodic updates * Skip pubsub if short * Return jobs with WorkspaceHistory * Add endpoints for extracting singular history * The full end-to-end operation works * fix: Disable compression for websocket dRPC transport (#145) There is a race condition in the interop between the websocket and `dRPC`: https://github.com/coder/coder/runs/5038545709?check_suite_focus=true#step:7:117 - it seems both the websocket and dRPC feel like they own the `byte[]` being sent between them. This can lead to data races, in which both `dRPC` and the websocket are writing. This is just tracking some experimentation to fix that race condition ## Run results: ## - Run 1: peer test failure - Run 2: peer test failure - Run 3: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040858460?check_suite_focus=true#step:8:45 ``` status code 412: The provided project history is running. Wait for it to complete importing!` ``` - Run 4: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040957999?check_suite_focus=true#step:7:176 ``` workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` - Run 5: peer failure - Run 6: Pass ✅ - Run 7: Peer failure ## Open Questions: ## ### Is `dRPC` or `websocket` at fault for the data race? It looks like this condition is specifically happening when `dRPC` decides to [`SendError`]). This constructs a new byte payload from [`MarshalError`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/error.go#L15) - so `dRPC` has created this buffer and owns it. From `dRPC`'s perspective, the callstack looks like this: - [`sendPacket`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcstream/stream.go#L253) - [`writeFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L65) - [`AppendFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/packet.go#L128) - with finally the data race happening here: ```go // AppendFrame appends a marshaled form of the frame to the provided buffer. func AppendFrame(buf []byte, fr Frame) []byte { ... out := buf out = append(out, control). // <--------- ``` This should be fine, since `dPRC` create this buffer, and is taking the byte buffer constructed from `MarshalError` and tacking a bunch of headers on it to create a proper frame. Once `dRPC` is done writing, it _hangs onto the buffer and resets it here__: https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L73 However... the websocket implementation, once it gets the buffer, it runs a `statelessDeflate` [here](https://github.com/nhooyr/websocket/blob/8dee580a7f74cf1713400307b4eee514b927870f/write.go#L180), which compresses the buffer on the fly. This functionality actually [mutates the buffer in place](https://github.com/klauspost/compress/blob/a1a9cfc821f00faf2f5231beaa96244344d50391/flate/stateless.go#L94), which is where get our race. In the case where the `byte[]` aren't being manipulated anywhere else, this compress-in-place operation would be safe, and that's probably the case for most over-the-wire usages. In this case, though, where we're plumbing `dRPC` -> websocket, they both are manipulating it (`dRPC` is reusing the buffer for the next `write`, and `websocket` is compressing on the fly). ### Why does cloning on `Read` fail? Get a bunch of errors like: ``` 2022/02/02 19:26:10 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 ``` # UPDATE: We decided we could disable websocket compression, which would avoid the race because the in-place `deflate` operaton would no longer be run. Trying that out now: - Run 1: ✅ - Run 2: https://github.com/coder/coder/runs/5042645522?check_suite_focus=true#step:8:338 - Run 3: ✅ - Run 4: https://github.com/coder/coder/runs/5042988758?check_suite_focus=true#step:7:168 - Run 5: ✅ * fix: Remove race condition with acquiredJobDone channel (#148) Found another data race while running the tests: https://github.com/coder/coder/runs/5044320845?check_suite_focus=true#step:7:83 __Issue:__ There is a race in the p.acquiredJobDone chan - in particular, there can be a case where we're waiting on the channel to finish (in close) with <-p.acquiredJobDone, but in parallel, an acquireJob could've been started, which would create a new channel for p.acquiredJobDone. There is a similar race in `close(..)`ing the channel, which also came up in test runs. __Fix:__ Instead of recreating the channel everytime, we can use `sync.WaitGroup` to accomplish the same functionality - a semaphore to make close wait for the current job to wrap up. * fix: Bump up workspace history timeout (#149) This is an attempted fix for failures like: https://github.com/coder/coder/runs/5043435263?check_suite_focus=true#step:7:32 Looking at the timing of the test: ``` t.go:56: 2022-02-02 21:33:21.964 [DEBUG] (terraform-provisioner) <provision.go:139> ran apply t.go:56: 2022-02-02 21:33:21.991 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.050 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.090 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.140 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.195 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.240 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` It appears that the `terraform apply` job had just finished - with less than a second to spare until our `require.Eventually` completes - but there's still work to be done (ie, collecting the state files). So my suspicion is that terraform might, in some cases, exceed our 5s timeout. Note that in the setup for this test - there is a similar project history wait that waits for 15s, so I borrowed that here. In the future - we can look at potentially using a simple echo provider to exercise this in the unit test, in a way that is more reliable in terms of timing. I'll log an issue to track that. Co-authored-by: Bryan <bryan@coder.com>
2022-02-03 20:34:50 +00:00
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return TemplateVersion{}, ReadBodyAsError(res)
feat: Add provisionerdaemon to coderd (#141) * feat: Add history middleware parameters These will be used for streaming logs, checking status, and other operations related to workspace and project history. * refactor: Move all HTTP routes to top-level struct Nesting all structs behind their respective structures is leaky, and promotes naming conflicts between handlers. Our HTTP routes cannot have conflicts, so neither should function naming. * Add provisioner daemon routes * Add periodic updates * Skip pubsub if short * Return jobs with WorkspaceHistory * Add endpoints for extracting singular history * The full end-to-end operation works * fix: Disable compression for websocket dRPC transport (#145) There is a race condition in the interop between the websocket and `dRPC`: https://github.com/coder/coder/runs/5038545709?check_suite_focus=true#step:7:117 - it seems both the websocket and dRPC feel like they own the `byte[]` being sent between them. This can lead to data races, in which both `dRPC` and the websocket are writing. This is just tracking some experimentation to fix that race condition ## Run results: ## - Run 1: peer test failure - Run 2: peer test failure - Run 3: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040858460?check_suite_focus=true#step:8:45 ``` status code 412: The provided project history is running. Wait for it to complete importing!` ``` - Run 4: `TestWorkspaceHistory/CreateHistory` - https://github.com/coder/coder/runs/5040957999?check_suite_focus=true#step:7:176 ``` workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` - Run 5: peer failure - Run 6: Pass ✅ - Run 7: Peer failure ## Open Questions: ## ### Is `dRPC` or `websocket` at fault for the data race? It looks like this condition is specifically happening when `dRPC` decides to [`SendError`]). This constructs a new byte payload from [`MarshalError`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/error.go#L15) - so `dRPC` has created this buffer and owns it. From `dRPC`'s perspective, the callstack looks like this: - [`sendPacket`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcstream/stream.go#L253) - [`writeFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L65) - [`AppendFrame`](https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/packet.go#L128) - with finally the data race happening here: ```go // AppendFrame appends a marshaled form of the frame to the provided buffer. func AppendFrame(buf []byte, fr Frame) []byte { ... out := buf out = append(out, control). // <--------- ``` This should be fine, since `dPRC` create this buffer, and is taking the byte buffer constructed from `MarshalError` and tacking a bunch of headers on it to create a proper frame. Once `dRPC` is done writing, it _hangs onto the buffer and resets it here__: https://github.com/storj/drpc/blob/f6e369438f636b47ee788095d3fc13062ffbd019/drpcwire/writer.go#L73 However... the websocket implementation, once it gets the buffer, it runs a `statelessDeflate` [here](https://github.com/nhooyr/websocket/blob/8dee580a7f74cf1713400307b4eee514b927870f/write.go#L180), which compresses the buffer on the fly. This functionality actually [mutates the buffer in place](https://github.com/klauspost/compress/blob/a1a9cfc821f00faf2f5231beaa96244344d50391/flate/stateless.go#L94), which is where get our race. In the case where the `byte[]` aren't being manipulated anywhere else, this compress-in-place operation would be safe, and that's probably the case for most over-the-wire usages. In this case, though, where we're plumbing `dRPC` -> websocket, they both are manipulating it (`dRPC` is reusing the buffer for the next `write`, and `websocket` is compressing on the fly). ### Why does cloning on `Read` fail? Get a bunch of errors like: ``` 2022/02/02 19:26:10 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [ERR] yamux: Failed to read header: unexpected EOF 2022/02/02 19:26:25 [WARN] yamux: frame for missing stream: Vsn:0 Type:0 Flags:0 StreamID:0 Length:0 ``` # UPDATE: We decided we could disable websocket compression, which would avoid the race because the in-place `deflate` operaton would no longer be run. Trying that out now: - Run 1: ✅ - Run 2: https://github.com/coder/coder/runs/5042645522?check_suite_focus=true#step:8:338 - Run 3: ✅ - Run 4: https://github.com/coder/coder/runs/5042988758?check_suite_focus=true#step:7:168 - Run 5: ✅ * fix: Remove race condition with acquiredJobDone channel (#148) Found another data race while running the tests: https://github.com/coder/coder/runs/5044320845?check_suite_focus=true#step:7:83 __Issue:__ There is a race in the p.acquiredJobDone chan - in particular, there can be a case where we're waiting on the channel to finish (in close) with <-p.acquiredJobDone, but in parallel, an acquireJob could've been started, which would create a new channel for p.acquiredJobDone. There is a similar race in `close(..)`ing the channel, which also came up in test runs. __Fix:__ Instead of recreating the channel everytime, we can use `sync.WaitGroup` to accomplish the same functionality - a semaphore to make close wait for the current job to wrap up. * fix: Bump up workspace history timeout (#149) This is an attempted fix for failures like: https://github.com/coder/coder/runs/5043435263?check_suite_focus=true#step:7:32 Looking at the timing of the test: ``` t.go:56: 2022-02-02 21:33:21.964 [DEBUG] (terraform-provisioner) <provision.go:139> ran apply t.go:56: 2022-02-02 21:33:21.991 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.050 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.090 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.140 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.195 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running t.go:56: 2022-02-02 21:33:22.240 [DEBUG] (provisionerd) <provisionerd.go:162> skipping acquire; job is already running workspacehistory_test.go:122: Error Trace: workspacehistory_test.go:122 Error: Condition never satisfied Test: TestWorkspaceHistory/CreateHistory ``` It appears that the `terraform apply` job had just finished - with less than a second to spare until our `require.Eventually` completes - but there's still work to be done (ie, collecting the state files). So my suspicion is that terraform might, in some cases, exceed our 5s timeout. Note that in the setup for this test - there is a similar project history wait that waits for 15s, so I borrowed that here. In the future - we can look at potentially using a simple echo provider to exercise this in the unit test, in a way that is more reliable in terms of timing. I'll log an issue to track that. Co-authored-by: Bryan <bryan@coder.com>
2022-02-03 20:34:50 +00:00
}
var templateVersion TemplateVersion
return templateVersion, json.NewDecoder(res.Body).Decode(&templateVersion)
}
func (c *Client) TemplateDAUsLocalTZ(ctx context.Context, templateID uuid.UUID) (*DAUsResponse, error) {
return c.TemplateDAUs(ctx, templateID, TimezoneOffsetHour(time.Local))
}
// TemplateDAUs requires a tzOffset in hours. Use 0 for UTC, and TimezoneOffsetHour(time.Local) for the
// local timezone.
func (c *Client) TemplateDAUs(ctx context.Context, templateID uuid.UUID, tzOffset int) (*DAUsResponse, error) {
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/templates/%s/daus", templateID), nil, DAURequest{
TZHourOffset: tzOffset,
}.asRequestOption())
if err != nil {
return nil, xerrors.Errorf("execute request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ReadBodyAsError(res)
}
var resp DAUsResponse
return &resp, json.NewDecoder(res.Body).Decode(&resp)
}
// AgentStatsReportRequest is a WebSocket request by coderd
// to the agent for stats.
// @typescript-ignore AgentStatsReportRequest
2023-02-19 00:32:09 +00:00
type AgentStatsReportRequest struct{}
// AgentStatsReportResponse is returned for each report
// request by the agent.
type AgentStatsReportResponse struct {
NumConns int64 `json:"num_comms"`
// RxBytes is the number of received bytes.
RxBytes int64 `json:"rx_bytes"`
// TxBytes is the number of transmitted bytes.
TxBytes int64 `json:"tx_bytes"`
}
// TemplateExamples lists example templates embedded in coder.
func (c *Client) TemplateExamples(ctx context.Context, organizationID uuid.UUID) ([]TemplateExample, error) {
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/organizations/%s/templates/examples", organizationID), nil)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ReadBodyAsError(res)
}
var templateExamples []TemplateExample
return templateExamples, json.NewDecoder(res.Body).Decode(&templateExamples)
}