feat: add audit logging database schema (#1225)

This commit is contained in:
Colin Adler 2022-05-02 14:30:46 -05:00 committed by GitHub
parent e4e60256ac
commit 81bef1c83e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 458 additions and 84 deletions

View File

@ -99,7 +99,7 @@ jobs:
with: with:
go-version: "~1.18" go-version: "~1.18"
- run: curl -sSL - run: curl -sSL
https://github.com/kyleconroy/sqlc/releases/download/v1.11.0/sqlc_1.11.0_linux_amd64.tar.gz https://github.com/kyleconroy/sqlc/releases/download/v1.13.0/sqlc_1.13.0_linux_amd64.tar.gz
| sudo tar -C /usr/bin -xz sqlc | sudo tar -C /usr/bin -xz sqlc
- run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26 - run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26

1
coderd/database/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
test/

View File

@ -8,6 +8,7 @@ import (
"sync" "sync"
"github.com/google/uuid" "github.com/google/uuid"
"golang.org/x/exp/slices"
"github.com/coder/coder/coderd/database" "github.com/coder/coder/coderd/database"
) )
@ -16,23 +17,24 @@ import (
func New() database.Store { func New() database.Store {
return &fakeQuerier{ return &fakeQuerier{
apiKeys: make([]database.APIKey, 0), apiKeys: make([]database.APIKey, 0),
organizations: make([]database.Organization, 0),
organizationMembers: make([]database.OrganizationMember, 0), organizationMembers: make([]database.OrganizationMember, 0),
organizations: make([]database.Organization, 0),
users: make([]database.User, 0), users: make([]database.User, 0),
files: make([]database.File, 0), auditLogs: make([]database.AuditLog, 0),
parameterValue: make([]database.ParameterValue, 0), files: make([]database.File, 0),
parameterSchema: make([]database.ParameterSchema, 0), gitSSHKey: make([]database.GitSSHKey, 0),
template: make([]database.Template, 0), parameterSchemas: make([]database.ParameterSchema, 0),
templateVersion: make([]database.TemplateVersion, 0), parameterValues: make([]database.ParameterValue, 0),
provisionerDaemons: make([]database.ProvisionerDaemon, 0), provisionerDaemons: make([]database.ProvisionerDaemon, 0),
provisionerJobs: make([]database.ProvisionerJob, 0), provisionerJobAgents: make([]database.WorkspaceAgent, 0),
provisionerJobLog: make([]database.ProvisionerJobLog, 0), provisionerJobLogs: make([]database.ProvisionerJobLog, 0),
workspaces: make([]database.Workspace, 0), provisionerJobResources: make([]database.WorkspaceResource, 0),
provisionerJobResource: make([]database.WorkspaceResource, 0), provisionerJobs: make([]database.ProvisionerJob, 0),
workspaceBuild: make([]database.WorkspaceBuild, 0), templateVersions: make([]database.TemplateVersion, 0),
provisionerJobAgent: make([]database.WorkspaceAgent, 0), templates: make([]database.Template, 0),
GitSSHKey: make([]database.GitSSHKey, 0), workspaceBuilds: make([]database.WorkspaceBuild, 0),
workspaces: make([]database.Workspace, 0),
} }
} }
@ -47,19 +49,20 @@ type fakeQuerier struct {
users []database.User users []database.User
// New tables // New tables
files []database.File auditLogs []database.AuditLog
parameterValue []database.ParameterValue files []database.File
parameterSchema []database.ParameterSchema gitSSHKey []database.GitSSHKey
template []database.Template parameterSchemas []database.ParameterSchema
templateVersion []database.TemplateVersion parameterValues []database.ParameterValue
provisionerDaemons []database.ProvisionerDaemon provisionerDaemons []database.ProvisionerDaemon
provisionerJobs []database.ProvisionerJob provisionerJobAgents []database.WorkspaceAgent
provisionerJobAgent []database.WorkspaceAgent provisionerJobLogs []database.ProvisionerJobLog
provisionerJobResource []database.WorkspaceResource provisionerJobResources []database.WorkspaceResource
provisionerJobLog []database.ProvisionerJobLog provisionerJobs []database.ProvisionerJob
workspaces []database.Workspace templateVersions []database.TemplateVersion
workspaceBuild []database.WorkspaceBuild templates []database.Template
GitSSHKey []database.GitSSHKey workspaceBuilds []database.WorkspaceBuild
workspaces []database.Workspace
} }
// InTx doesn't rollback data properly for in-memory yet. // InTx doesn't rollback data properly for in-memory yet.
@ -99,12 +102,12 @@ func (q *fakeQuerier) DeleteParameterValueByID(_ context.Context, id uuid.UUID)
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, parameterValue := range q.parameterValue { for index, parameterValue := range q.parameterValues {
if parameterValue.ID.String() != id.String() { if parameterValue.ID.String() != id.String() {
continue continue
} }
q.parameterValue[index] = q.parameterValue[len(q.parameterValue)-1] q.parameterValues[index] = q.parameterValues[len(q.parameterValues)-1]
q.parameterValue = q.parameterValue[:len(q.parameterValue)-1] q.parameterValues = q.parameterValues[:len(q.parameterValues)-1]
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
@ -337,7 +340,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByID(_ context.Context, id uuid.UUID) (da
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, history := range q.workspaceBuild { for _, history := range q.workspaceBuilds {
if history.ID.String() == id.String() { if history.ID.String() == id.String() {
return history, nil return history, nil
} }
@ -349,7 +352,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByJobID(_ context.Context, jobID uuid.UUI
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, build := range q.workspaceBuild { for _, build := range q.workspaceBuilds {
if build.JobID.String() == jobID.String() { if build.JobID.String() == jobID.String() {
return build, nil return build, nil
} }
@ -361,7 +364,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByWorkspaceIDWithoutAfter(_ context.Conte
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, workspaceBuild := range q.workspaceBuild { for _, workspaceBuild := range q.workspaceBuilds {
if workspaceBuild.WorkspaceID.String() != workspaceID.String() { if workspaceBuild.WorkspaceID.String() != workspaceID.String() {
continue continue
} }
@ -377,7 +380,7 @@ func (q *fakeQuerier) GetWorkspaceBuildsByWorkspaceIDsWithoutAfter(_ context.Con
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
builds := make([]database.WorkspaceBuild, 0) builds := make([]database.WorkspaceBuild, 0)
for _, workspaceBuild := range q.workspaceBuild { for _, workspaceBuild := range q.workspaceBuilds {
for _, id := range ids { for _, id := range ids {
if id.String() != workspaceBuild.WorkspaceID.String() { if id.String() != workspaceBuild.WorkspaceID.String() {
continue continue
@ -396,7 +399,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByWorkspaceID(_ context.Context, workspac
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
history := make([]database.WorkspaceBuild, 0) history := make([]database.WorkspaceBuild, 0)
for _, workspaceBuild := range q.workspaceBuild { for _, workspaceBuild := range q.workspaceBuilds {
if workspaceBuild.WorkspaceID.String() == workspaceID.String() { if workspaceBuild.WorkspaceID.String() == workspaceID.String() {
history = append(history, workspaceBuild) history = append(history, workspaceBuild)
} }
@ -411,7 +414,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByWorkspaceIDAndName(_ context.Context, a
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, workspaceBuild := range q.workspaceBuild { for _, workspaceBuild := range q.workspaceBuilds {
if workspaceBuild.WorkspaceID.String() != arg.WorkspaceID.String() { if workspaceBuild.WorkspaceID.String() != arg.WorkspaceID.String() {
continue continue
} }
@ -524,7 +527,7 @@ func (q *fakeQuerier) GetParameterValuesByScope(_ context.Context, arg database.
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
parameterValues := make([]database.ParameterValue, 0) parameterValues := make([]database.ParameterValue, 0)
for _, parameterValue := range q.parameterValue { for _, parameterValue := range q.parameterValues {
if parameterValue.Scope != arg.Scope { if parameterValue.Scope != arg.Scope {
continue continue
} }
@ -543,7 +546,7 @@ func (q *fakeQuerier) GetTemplateByID(_ context.Context, id uuid.UUID) (database
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, template := range q.template { for _, template := range q.templates {
if template.ID.String() == id.String() { if template.ID.String() == id.String() {
return template, nil return template, nil
} }
@ -555,7 +558,7 @@ func (q *fakeQuerier) GetTemplateByOrganizationAndName(_ context.Context, arg da
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, template := range q.template { for _, template := range q.templates {
if template.OrganizationID != arg.OrganizationID { if template.OrganizationID != arg.OrganizationID {
continue continue
} }
@ -575,7 +578,7 @@ func (q *fakeQuerier) GetTemplateVersionsByTemplateID(_ context.Context, templat
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
version := make([]database.TemplateVersion, 0) version := make([]database.TemplateVersion, 0)
for _, templateVersion := range q.templateVersion { for _, templateVersion := range q.templateVersions {
if templateVersion.TemplateID.UUID.String() != templateID.String() { if templateVersion.TemplateID.UUID.String() != templateID.String() {
continue continue
} }
@ -591,7 +594,7 @@ func (q *fakeQuerier) GetTemplateVersionByTemplateIDAndName(_ context.Context, a
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, templateVersion := range q.templateVersion { for _, templateVersion := range q.templateVersions {
if templateVersion.TemplateID != arg.TemplateID { if templateVersion.TemplateID != arg.TemplateID {
continue continue
} }
@ -607,7 +610,7 @@ func (q *fakeQuerier) GetTemplateVersionByID(_ context.Context, templateVersionI
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, templateVersion := range q.templateVersion { for _, templateVersion := range q.templateVersions {
if templateVersion.ID.String() != templateVersionID.String() { if templateVersion.ID.String() != templateVersionID.String() {
continue continue
} }
@ -620,7 +623,7 @@ func (q *fakeQuerier) GetTemplateVersionByJobID(_ context.Context, jobID uuid.UU
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, templateVersion := range q.templateVersion { for _, templateVersion := range q.templateVersions {
if templateVersion.JobID.String() != jobID.String() { if templateVersion.JobID.String() != jobID.String() {
continue continue
} }
@ -634,7 +637,7 @@ func (q *fakeQuerier) GetParameterSchemasByJobID(_ context.Context, jobID uuid.U
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
parameters := make([]database.ParameterSchema, 0) parameters := make([]database.ParameterSchema, 0)
for _, parameterSchema := range q.parameterSchema { for _, parameterSchema := range q.parameterSchemas {
if parameterSchema.JobID.String() != jobID.String() { if parameterSchema.JobID.String() != jobID.String() {
continue continue
} }
@ -650,7 +653,7 @@ func (q *fakeQuerier) GetParameterValueByScopeAndName(_ context.Context, arg dat
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, parameterValue := range q.parameterValue { for _, parameterValue := range q.parameterValues {
if parameterValue.Scope != arg.Scope { if parameterValue.Scope != arg.Scope {
continue continue
} }
@ -670,7 +673,7 @@ func (q *fakeQuerier) GetTemplatesByOrganization(_ context.Context, arg database
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
templates := make([]database.Template, 0) templates := make([]database.Template, 0)
for _, template := range q.template { for _, template := range q.templates {
if template.Deleted != arg.Deleted { if template.Deleted != arg.Deleted {
continue continue
} }
@ -690,7 +693,7 @@ func (q *fakeQuerier) GetTemplatesByIDs(_ context.Context, ids []uuid.UUID) ([]d
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
templates := make([]database.Template, 0) templates := make([]database.Template, 0)
for _, template := range q.template { for _, template := range q.templates {
for _, id := range ids { for _, id := range ids {
if template.ID.String() != id.String() { if template.ID.String() != id.String() {
continue continue
@ -795,8 +798,8 @@ func (q *fakeQuerier) GetWorkspaceAgentByAuthToken(_ context.Context, authToken
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
// The schema sorts this by created at, so we iterate the array backwards. // The schema sorts this by created at, so we iterate the array backwards.
for i := len(q.provisionerJobAgent) - 1; i >= 0; i-- { for i := len(q.provisionerJobAgents) - 1; i >= 0; i-- {
agent := q.provisionerJobAgent[i] agent := q.provisionerJobAgents[i]
if agent.AuthToken.String() == authToken.String() { if agent.AuthToken.String() == authToken.String() {
return agent, nil return agent, nil
} }
@ -809,8 +812,8 @@ func (q *fakeQuerier) GetWorkspaceAgentByID(_ context.Context, id uuid.UUID) (da
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
// The schema sorts this by created at, so we iterate the array backwards. // The schema sorts this by created at, so we iterate the array backwards.
for i := len(q.provisionerJobAgent) - 1; i >= 0; i-- { for i := len(q.provisionerJobAgents) - 1; i >= 0; i-- {
agent := q.provisionerJobAgent[i] agent := q.provisionerJobAgents[i]
if agent.ID.String() == id.String() { if agent.ID.String() == id.String() {
return agent, nil return agent, nil
} }
@ -823,8 +826,8 @@ func (q *fakeQuerier) GetWorkspaceAgentByInstanceID(_ context.Context, instanceI
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
// The schema sorts this by created at, so we iterate the array backwards. // The schema sorts this by created at, so we iterate the array backwards.
for i := len(q.provisionerJobAgent) - 1; i >= 0; i-- { for i := len(q.provisionerJobAgents) - 1; i >= 0; i-- {
agent := q.provisionerJobAgent[i] agent := q.provisionerJobAgents[i]
if agent.AuthInstanceID.Valid && agent.AuthInstanceID.String == instanceID { if agent.AuthInstanceID.Valid && agent.AuthInstanceID.String == instanceID {
return agent, nil return agent, nil
} }
@ -837,7 +840,7 @@ func (q *fakeQuerier) GetWorkspaceAgentsByResourceIDs(_ context.Context, resourc
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
workspaceAgents := make([]database.WorkspaceAgent, 0) workspaceAgents := make([]database.WorkspaceAgent, 0)
for _, agent := range q.provisionerJobAgent { for _, agent := range q.provisionerJobAgents {
for _, resourceID := range resourceIDs { for _, resourceID := range resourceIDs {
if agent.ResourceID.String() != resourceID.String() { if agent.ResourceID.String() != resourceID.String() {
continue continue
@ -881,7 +884,7 @@ func (q *fakeQuerier) GetWorkspaceResourceByID(_ context.Context, id uuid.UUID)
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, resource := range q.provisionerJobResource { for _, resource := range q.provisionerJobResources {
if resource.ID.String() == id.String() { if resource.ID.String() == id.String() {
return resource, nil return resource, nil
} }
@ -894,7 +897,7 @@ func (q *fakeQuerier) GetWorkspaceResourcesByJobID(_ context.Context, jobID uuid
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
resources := make([]database.WorkspaceResource, 0) resources := make([]database.WorkspaceResource, 0)
for _, resource := range q.provisionerJobResource { for _, resource := range q.provisionerJobResources {
if resource.JobID.String() != jobID.String() { if resource.JobID.String() != jobID.String() {
continue continue
} }
@ -931,7 +934,7 @@ func (q *fakeQuerier) GetProvisionerLogsByIDBetween(_ context.Context, arg datab
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
logs := make([]database.ProvisionerJobLog, 0) logs := make([]database.ProvisionerJobLog, 0)
for _, jobLog := range q.provisionerJobLog { for _, jobLog := range q.provisionerJobLogs {
if jobLog.JobID.String() != arg.JobID.String() { if jobLog.JobID.String() != arg.JobID.String() {
continue continue
} }
@ -1034,7 +1037,7 @@ func (q *fakeQuerier) InsertParameterValue(_ context.Context, arg database.Inser
SourceValue: arg.SourceValue, SourceValue: arg.SourceValue,
DestinationScheme: arg.DestinationScheme, DestinationScheme: arg.DestinationScheme,
} }
q.parameterValue = append(q.parameterValue, parameterValue) q.parameterValues = append(q.parameterValues, parameterValue)
return parameterValue, nil return parameterValue, nil
} }
@ -1052,7 +1055,7 @@ func (q *fakeQuerier) InsertTemplate(_ context.Context, arg database.InsertTempl
Provisioner: arg.Provisioner, Provisioner: arg.Provisioner,
ActiveVersionID: arg.ActiveVersionID, ActiveVersionID: arg.ActiveVersionID,
} }
q.template = append(q.template, template) q.templates = append(q.templates, template)
return template, nil return template, nil
} }
@ -1071,7 +1074,7 @@ func (q *fakeQuerier) InsertTemplateVersion(_ context.Context, arg database.Inse
Description: arg.Description, Description: arg.Description,
JobID: arg.JobID, JobID: arg.JobID,
} }
q.templateVersion = append(q.templateVersion, version) q.templateVersions = append(q.templateVersions, version)
return version, nil return version, nil
} }
@ -1091,7 +1094,7 @@ func (q *fakeQuerier) InsertProvisionerJobLogs(_ context.Context, arg database.I
Output: output, Output: output,
}) })
} }
q.provisionerJobLog = append(q.provisionerJobLog, logs...) q.provisionerJobLogs = append(q.provisionerJobLogs, logs...)
return logs, nil return logs, nil
} }
@ -1118,7 +1121,7 @@ func (q *fakeQuerier) InsertParameterSchema(_ context.Context, arg database.Inse
ValidationTypeSystem: arg.ValidationTypeSystem, ValidationTypeSystem: arg.ValidationTypeSystem,
ValidationValueType: arg.ValidationValueType, ValidationValueType: arg.ValidationValueType,
} }
q.parameterSchema = append(q.parameterSchema, param) q.parameterSchemas = append(q.parameterSchemas, param)
return param, nil return param, nil
} }
@ -1178,7 +1181,7 @@ func (q *fakeQuerier) InsertWorkspaceAgent(_ context.Context, arg database.Inser
InstanceMetadata: arg.InstanceMetadata, InstanceMetadata: arg.InstanceMetadata,
ResourceMetadata: arg.ResourceMetadata, ResourceMetadata: arg.ResourceMetadata,
} }
q.provisionerJobAgent = append(q.provisionerJobAgent, agent) q.provisionerJobAgents = append(q.provisionerJobAgents, agent)
return agent, nil return agent, nil
} }
@ -1195,7 +1198,7 @@ func (q *fakeQuerier) InsertWorkspaceResource(_ context.Context, arg database.In
Type: arg.Type, Type: arg.Type,
Name: arg.Name, Name: arg.Name,
} }
q.provisionerJobResource = append(q.provisionerJobResource, resource) q.provisionerJobResources = append(q.provisionerJobResources, resource)
return resource, nil return resource, nil
} }
@ -1314,7 +1317,7 @@ func (q *fakeQuerier) InsertWorkspaceBuild(_ context.Context, arg database.Inser
JobID: arg.JobID, JobID: arg.JobID,
ProvisionerState: arg.ProvisionerState, ProvisionerState: arg.ProvisionerState,
} }
q.workspaceBuild = append(q.workspaceBuild, workspaceBuild) q.workspaceBuilds = append(q.workspaceBuilds, workspaceBuild)
return workspaceBuild, nil return workspaceBuild, nil
} }
@ -1341,12 +1344,12 @@ func (q *fakeQuerier) UpdateTemplateActiveVersionByID(_ context.Context, arg dat
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, template := range q.template { for index, template := range q.templates {
if template.ID.String() != arg.ID.String() { if template.ID.String() != arg.ID.String() {
continue continue
} }
template.ActiveVersionID = arg.ActiveVersionID template.ActiveVersionID = arg.ActiveVersionID
q.template[index] = template q.templates[index] = template
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
@ -1356,12 +1359,12 @@ func (q *fakeQuerier) UpdateTemplateDeletedByID(_ context.Context, arg database.
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, template := range q.template { for index, template := range q.templates {
if template.ID.String() != arg.ID.String() { if template.ID.String() != arg.ID.String() {
continue continue
} }
template.Deleted = arg.Deleted template.Deleted = arg.Deleted
q.template[index] = template q.templates[index] = template
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
@ -1371,13 +1374,13 @@ func (q *fakeQuerier) UpdateTemplateVersionByID(_ context.Context, arg database.
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, templateVersion := range q.templateVersion { for index, templateVersion := range q.templateVersions {
if templateVersion.ID.String() != arg.ID.String() { if templateVersion.ID.String() != arg.ID.String() {
continue continue
} }
templateVersion.TemplateID = arg.TemplateID templateVersion.TemplateID = arg.TemplateID
templateVersion.UpdatedAt = arg.UpdatedAt templateVersion.UpdatedAt = arg.UpdatedAt
q.templateVersion[index] = templateVersion q.templateVersions[index] = templateVersion
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
@ -1403,14 +1406,14 @@ func (q *fakeQuerier) UpdateWorkspaceAgentConnectionByID(_ context.Context, arg
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, agent := range q.provisionerJobAgent { for index, agent := range q.provisionerJobAgents {
if agent.ID.String() != arg.ID.String() { if agent.ID.String() != arg.ID.String() {
continue continue
} }
agent.FirstConnectedAt = arg.FirstConnectedAt agent.FirstConnectedAt = arg.FirstConnectedAt
agent.LastConnectedAt = arg.LastConnectedAt agent.LastConnectedAt = arg.LastConnectedAt
agent.DisconnectedAt = arg.DisconnectedAt agent.DisconnectedAt = arg.DisconnectedAt
q.provisionerJobAgent[index] = agent q.provisionerJobAgents[index] = agent
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
@ -1499,14 +1502,14 @@ func (q *fakeQuerier) UpdateWorkspaceBuildByID(_ context.Context, arg database.U
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, workspaceBuild := range q.workspaceBuild { for index, workspaceBuild := range q.workspaceBuilds {
if workspaceBuild.ID.String() != arg.ID.String() { if workspaceBuild.ID.String() != arg.ID.String() {
continue continue
} }
workspaceBuild.UpdatedAt = arg.UpdatedAt workspaceBuild.UpdatedAt = arg.UpdatedAt
workspaceBuild.AfterID = arg.AfterID workspaceBuild.AfterID = arg.AfterID
workspaceBuild.ProvisionerState = arg.ProvisionerState workspaceBuild.ProvisionerState = arg.ProvisionerState
q.workspaceBuild[index] = workspaceBuild q.workspaceBuilds[index] = workspaceBuild
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
@ -1539,7 +1542,7 @@ func (q *fakeQuerier) InsertGitSSHKey(_ context.Context, arg database.InsertGitS
PrivateKey: arg.PrivateKey, PrivateKey: arg.PrivateKey,
PublicKey: arg.PublicKey, PublicKey: arg.PublicKey,
} }
q.GitSSHKey = append(q.GitSSHKey, gitSSHKey) q.gitSSHKey = append(q.gitSSHKey, gitSSHKey)
return gitSSHKey, nil return gitSSHKey, nil
} }
@ -1547,7 +1550,7 @@ func (q *fakeQuerier) GetGitSSHKey(_ context.Context, userID uuid.UUID) (databas
q.mutex.RLock() q.mutex.RLock()
defer q.mutex.RUnlock() defer q.mutex.RUnlock()
for _, key := range q.GitSSHKey { for _, key := range q.gitSSHKey {
if key.UserID == userID { if key.UserID == userID {
return key, nil return key, nil
} }
@ -1559,14 +1562,14 @@ func (q *fakeQuerier) UpdateGitSSHKey(_ context.Context, arg database.UpdateGitS
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, key := range q.GitSSHKey { for index, key := range q.gitSSHKey {
if key.UserID.String() != arg.UserID.String() { if key.UserID.String() != arg.UserID.String() {
continue continue
} }
key.UpdatedAt = arg.UpdatedAt key.UpdatedAt = arg.UpdatedAt
key.PrivateKey = arg.PrivateKey key.PrivateKey = arg.PrivateKey
key.PublicKey = arg.PublicKey key.PublicKey = arg.PublicKey
q.GitSSHKey[index] = key q.gitSSHKey[index] = key
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
@ -1576,13 +1579,72 @@ func (q *fakeQuerier) DeleteGitSSHKey(_ context.Context, userID uuid.UUID) error
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for index, key := range q.GitSSHKey { for index, key := range q.gitSSHKey {
if key.UserID.String() != userID.String() { if key.UserID.String() != userID.String() {
continue continue
} }
q.GitSSHKey[index] = q.GitSSHKey[len(q.GitSSHKey)-1] q.gitSSHKey[index] = q.gitSSHKey[len(q.gitSSHKey)-1]
q.GitSSHKey = q.GitSSHKey[:len(q.GitSSHKey)-1] q.gitSSHKey = q.gitSSHKey[:len(q.gitSSHKey)-1]
return nil return nil
} }
return sql.ErrNoRows return sql.ErrNoRows
} }
func (q *fakeQuerier) GetAuditLogsBefore(_ context.Context, arg database.GetAuditLogsBeforeParams) ([]database.AuditLog, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
logs := make([]database.AuditLog, 0)
start := database.AuditLog{}
for _, alog := range q.auditLogs {
if alog.ID == arg.ID {
start = alog
break
}
}
if start.ID == uuid.Nil {
return nil, sql.ErrNoRows
}
// q.auditLogs are already sorted by time DESC, so no need to sort after the fact.
for _, alog := range q.auditLogs {
if alog.Time.Before(start.Time) {
logs = append(logs, alog)
}
if len(logs) >= int(arg.RowLimit) {
break
}
}
return logs, nil
}
func (q *fakeQuerier) InsertAuditLog(_ context.Context, arg database.InsertAuditLogParams) (database.AuditLog, error) {
q.mutex.Lock()
defer q.mutex.Unlock()
alog := database.AuditLog{
ID: arg.ID,
Time: arg.Time,
UserID: arg.UserID,
OrganizationID: arg.OrganizationID,
Ip: arg.Ip,
UserAgent: arg.UserAgent,
ResourceType: arg.ResourceType,
ResourceID: arg.ResourceID,
ResourceTarget: arg.ResourceTarget,
Action: arg.Action,
Diff: arg.Diff,
StatusCode: arg.StatusCode,
}
q.auditLogs = append(q.auditLogs, alog)
slices.SortFunc(q.auditLogs, func(a, b database.AuditLog) bool {
return a.Time.Before(b.Time)
})
return alog, nil
}

View File

@ -1,5 +1,11 @@
-- Code generated by 'make coderd/database/generate'. DO NOT EDIT. -- Code generated by 'make coderd/database/generate'. DO NOT EDIT.
CREATE TYPE audit_action AS ENUM (
'create',
'write',
'delete'
);
CREATE TYPE log_level AS ENUM ( CREATE TYPE log_level AS ENUM (
'trace', 'trace',
'debug', 'debug',
@ -56,6 +62,14 @@ CREATE TYPE provisioner_type AS ENUM (
'terraform' 'terraform'
); );
CREATE TYPE resource_type AS ENUM (
'organization',
'template',
'template_version',
'user',
'workspace'
);
CREATE TYPE user_status AS ENUM ( CREATE TYPE user_status AS ENUM (
'active', 'active',
'suspended' 'suspended'
@ -82,6 +96,21 @@ CREATE TABLE api_keys (
oauth_expiry timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL oauth_expiry timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL
); );
CREATE TABLE audit_logs (
id uuid NOT NULL,
"time" timestamp with time zone NOT NULL,
user_id uuid NOT NULL,
organization_id uuid NOT NULL,
ip cidr NOT NULL,
user_agent character varying(256) NOT NULL,
resource_type resource_type NOT NULL,
resource_id uuid NOT NULL,
resource_target text NOT NULL,
action audit_action NOT NULL,
diff jsonb NOT NULL,
status_code integer NOT NULL
);
CREATE TABLE files ( CREATE TABLE files (
hash character varying(64) NOT NULL, hash character varying(64) NOT NULL,
created_at timestamp with time zone NOT NULL, created_at timestamp with time zone NOT NULL,
@ -293,6 +322,9 @@ ALTER TABLE ONLY licenses ALTER COLUMN id SET DEFAULT nextval('public.licenses_i
ALTER TABLE ONLY api_keys ALTER TABLE ONLY api_keys
ADD CONSTRAINT api_keys_pkey PRIMARY KEY (id); ADD CONSTRAINT api_keys_pkey PRIMARY KEY (id);
ALTER TABLE ONLY audit_logs
ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id);
ALTER TABLE ONLY files ALTER TABLE ONLY files
ADD CONSTRAINT files_pkey PRIMARY KEY (hash); ADD CONSTRAINT files_pkey PRIMARY KEY (hash);
@ -367,6 +399,14 @@ ALTER TABLE ONLY workspaces
CREATE INDEX idx_api_keys_user ON api_keys USING btree (user_id); CREATE INDEX idx_api_keys_user ON api_keys USING btree (user_id);
CREATE INDEX idx_audit_log_organization_id ON audit_logs USING btree (organization_id);
CREATE INDEX idx_audit_log_resource_id ON audit_logs USING btree (resource_id);
CREATE INDEX idx_audit_log_user_id ON audit_logs USING btree (user_id);
CREATE INDEX idx_audit_logs_time_desc ON audit_logs USING btree ("time" DESC);
CREATE INDEX idx_organization_member_organization_id_uuid ON organization_members USING btree (organization_id); CREATE INDEX idx_organization_member_organization_id_uuid ON organization_members USING btree (organization_id);
CREATE INDEX idx_organization_member_user_id_uuid ON organization_members USING btree (user_id); CREATE INDEX idx_organization_member_user_id_uuid ON organization_members USING btree (user_id);

View File

@ -10,6 +10,8 @@ set -euo pipefail
cd "$(dirname "$0")" cd "$(dirname "$0")"
# The logic below depends on the exact version being correct :(
[[ $(sqlc version) != "v1.13.0" ]] && go install github.com/kyleconroy/sqlc/cmd/sqlc@v1.13.0
sqlc generate sqlc generate
first=true first=true
@ -20,7 +22,7 @@ for fi in queries/*.sql.go; do
# Copy the header from the first file only, ignoring the source comment. # Copy the header from the first file only, ignoring the source comment.
if $first; then if $first; then
head -n 4 < "$fi" | grep -v "source" > queries.sql.go head -n 6 < "$fi" | grep -v "source" > queries.sql.go
first=false first=false
fi fi

View File

@ -0,0 +1,3 @@
DROP TABLE audit_logs;
DROP TYPE audit_action;
DROP TYPE resource_type;

View File

@ -0,0 +1,37 @@
CREATE TYPE resource_type AS ENUM (
'organization',
'template',
'template_version',
'user',
'workspace'
);
CREATE TYPE audit_action AS ENUM (
'create',
-- We intentionally do not track reads. They're way too spammy.
'write',
'delete'
);
CREATE TABLE audit_logs (
id uuid NOT NULL,
"time" timestamp with time zone NOT NULL,
user_id uuid NOT NULL,
organization_id uuid NOT NULL,
ip cidr NOT NULL,
user_agent varchar(256) NOT NULL,
resource_type resource_type NOT NULL,
resource_id uuid NOT NULL,
-- resource_target is the name of the resource that `resource_id` points to.
-- it's stored here because resources we point to can be deleted.
resource_target text NOT NULL,
action audit_action NOT NULL,
diff jsonb NOT NULL,
status_code integer NOT NULL,
PRIMARY KEY (id)
);
CREATE INDEX idx_audit_logs_time_desc ON audit_logs USING btree ("time" DESC);
CREATE INDEX idx_audit_log_user_id ON audit_logs USING btree (user_id);
CREATE INDEX idx_audit_log_organization_id ON audit_logs USING btree (organization_id);
CREATE INDEX idx_audit_log_resource_id ON audit_logs USING btree (resource_id);

View File

@ -1,4 +1,6 @@
// Code generated by sqlc. DO NOT EDIT. // Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.13.0
package database package database
@ -12,6 +14,26 @@ import (
"github.com/tabbed/pqtype" "github.com/tabbed/pqtype"
) )
type AuditAction string
const (
AuditActionCreate AuditAction = "create"
AuditActionWrite AuditAction = "write"
AuditActionDelete AuditAction = "delete"
)
func (e *AuditAction) Scan(src interface{}) error {
switch s := src.(type) {
case []byte:
*e = AuditAction(s)
case string:
*e = AuditAction(s)
default:
return fmt.Errorf("unsupported scan type for AuditAction: %T", src)
}
return nil
}
type LogLevel string type LogLevel string
const ( const (
@ -208,6 +230,28 @@ func (e *ProvisionerType) Scan(src interface{}) error {
return nil return nil
} }
type ResourceType string
const (
ResourceTypeOrganization ResourceType = "organization"
ResourceTypeTemplate ResourceType = "template"
ResourceTypeTemplateVersion ResourceType = "template_version"
ResourceTypeUser ResourceType = "user"
ResourceTypeWorkspace ResourceType = "workspace"
)
func (e *ResourceType) Scan(src interface{}) error {
switch s := src.(type) {
case []byte:
*e = ResourceType(s)
case string:
*e = ResourceType(s)
default:
return fmt.Errorf("unsupported scan type for ResourceType: %T", src)
}
return nil
}
type UserStatus string type UserStatus string
const ( const (
@ -262,6 +306,21 @@ type APIKey struct {
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"` OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
} }
type AuditLog struct {
ID uuid.UUID `db:"id" json:"id"`
Time time.Time `db:"time" json:"time"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
Ip pqtype.CIDR `db:"ip" json:"ip"`
UserAgent string `db:"user_agent" json:"user_agent"`
ResourceType ResourceType `db:"resource_type" json:"resource_type"`
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
ResourceTarget string `db:"resource_target" json:"resource_target"`
Action AuditAction `db:"action" json:"action"`
Diff json.RawMessage `db:"diff" json:"diff"`
StatusCode int32 `db:"status_code" json:"status_code"`
}
type File struct { type File struct {
Hash string `db:"hash" json:"hash"` Hash string `db:"hash" json:"hash"`
CreatedAt time.Time `db:"created_at" json:"created_at"` CreatedAt time.Time `db:"created_at" json:"created_at"`

View File

@ -1,4 +1,6 @@
// Code generated by sqlc. DO NOT EDIT. // Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.13.0
package database package database
@ -9,10 +11,19 @@ import (
) )
type querier interface { type querier interface {
// Acquires the lock for a single job that isn't started, completed,
// canceled, and that matches an array of provisioner types.
//
// SKIP LOCKED is used to jump over locked rows. This prevents
// multiple provisioners from acquiring the same jobs. See:
// https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE
AcquireProvisionerJob(ctx context.Context, arg AcquireProvisionerJobParams) (ProvisionerJob, error) AcquireProvisionerJob(ctx context.Context, arg AcquireProvisionerJobParams) (ProvisionerJob, error)
DeleteGitSSHKey(ctx context.Context, userID uuid.UUID) error DeleteGitSSHKey(ctx context.Context, userID uuid.UUID) error
DeleteParameterValueByID(ctx context.Context, id uuid.UUID) error DeleteParameterValueByID(ctx context.Context, id uuid.UUID) error
GetAPIKeyByID(ctx context.Context, id string) (APIKey, error) GetAPIKeyByID(ctx context.Context, id string) (APIKey, error)
// GetAuditLogsBefore retrieves `limit` number of audit logs before the provided
// ID.
GetAuditLogsBefore(ctx context.Context, arg GetAuditLogsBeforeParams) ([]AuditLog, error)
GetFileByHash(ctx context.Context, hash string) (File, error) GetFileByHash(ctx context.Context, hash string) (File, error)
GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error)
GetOrganizationByID(ctx context.Context, id uuid.UUID) (Organization, error) GetOrganizationByID(ctx context.Context, id uuid.UUID) (Organization, error)
@ -61,6 +72,7 @@ type querier interface {
GetWorkspacesByOwnerID(ctx context.Context, arg GetWorkspacesByOwnerIDParams) ([]Workspace, error) GetWorkspacesByOwnerID(ctx context.Context, arg GetWorkspacesByOwnerIDParams) ([]Workspace, error)
GetWorkspacesByTemplateID(ctx context.Context, arg GetWorkspacesByTemplateIDParams) ([]Workspace, error) GetWorkspacesByTemplateID(ctx context.Context, arg GetWorkspacesByTemplateIDParams) ([]Workspace, error)
InsertAPIKey(ctx context.Context, arg InsertAPIKeyParams) (APIKey, error) InsertAPIKey(ctx context.Context, arg InsertAPIKeyParams) (APIKey, error)
InsertAuditLog(ctx context.Context, arg InsertAuditLogParams) (AuditLog, error)
InsertFile(ctx context.Context, arg InsertFileParams) (File, error) InsertFile(ctx context.Context, arg InsertFileParams) (File, error)
InsertGitSSHKey(ctx context.Context, arg InsertGitSSHKeyParams) (GitSSHKey, error) InsertGitSSHKey(ctx context.Context, arg InsertGitSSHKeyParams) (GitSSHKey, error)
InsertOrganization(ctx context.Context, arg InsertOrganizationParams) (Organization, error) InsertOrganization(ctx context.Context, arg InsertOrganizationParams) (Organization, error)

View File

@ -1,4 +1,6 @@
// Code generated by sqlc. DO NOT EDIT. // Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.13.0
package database package database
@ -146,6 +148,130 @@ func (q *sqlQuerier) UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDP
return err return err
} }
const getAuditLogsBefore = `-- name: GetAuditLogsBefore :many
SELECT
id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code
FROM
audit_logs
WHERE
"time" < (SELECT "time" FROM audit_logs a WHERE a.id = $1)
ORDER BY
"time" DESC
LIMIT
$2
`
type GetAuditLogsBeforeParams struct {
ID uuid.UUID `db:"id" json:"id"`
RowLimit int32 `db:"row_limit" json:"row_limit"`
}
// GetAuditLogsBefore retrieves `limit` number of audit logs before the provided
// ID.
func (q *sqlQuerier) GetAuditLogsBefore(ctx context.Context, arg GetAuditLogsBeforeParams) ([]AuditLog, error) {
rows, err := q.db.QueryContext(ctx, getAuditLogsBefore, arg.ID, arg.RowLimit)
if err != nil {
return nil, err
}
defer rows.Close()
var items []AuditLog
for rows.Next() {
var i AuditLog
if err := rows.Scan(
&i.ID,
&i.Time,
&i.UserID,
&i.OrganizationID,
&i.Ip,
&i.UserAgent,
&i.ResourceType,
&i.ResourceID,
&i.ResourceTarget,
&i.Action,
&i.Diff,
&i.StatusCode,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const insertAuditLog = `-- name: InsertAuditLog :one
INSERT INTO
audit_logs (
id,
"time",
user_id,
organization_id,
ip,
user_agent,
resource_type,
resource_id,
resource_target,
action,
diff,
status_code
)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code
`
type InsertAuditLogParams struct {
ID uuid.UUID `db:"id" json:"id"`
Time time.Time `db:"time" json:"time"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
Ip pqtype.CIDR `db:"ip" json:"ip"`
UserAgent string `db:"user_agent" json:"user_agent"`
ResourceType ResourceType `db:"resource_type" json:"resource_type"`
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
ResourceTarget string `db:"resource_target" json:"resource_target"`
Action AuditAction `db:"action" json:"action"`
Diff json.RawMessage `db:"diff" json:"diff"`
StatusCode int32 `db:"status_code" json:"status_code"`
}
func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParams) (AuditLog, error) {
row := q.db.QueryRowContext(ctx, insertAuditLog,
arg.ID,
arg.Time,
arg.UserID,
arg.OrganizationID,
arg.Ip,
arg.UserAgent,
arg.ResourceType,
arg.ResourceID,
arg.ResourceTarget,
arg.Action,
arg.Diff,
arg.StatusCode,
)
var i AuditLog
err := row.Scan(
&i.ID,
&i.Time,
&i.UserID,
&i.OrganizationID,
&i.Ip,
&i.UserAgent,
&i.ResourceType,
&i.ResourceID,
&i.ResourceTarget,
&i.Action,
&i.Diff,
&i.StatusCode,
)
return i, err
}
const getFileByHash = `-- name: GetFileByHash :one const getFileByHash = `-- name: GetFileByHash :one
SELECT SELECT
hash, created_at, created_by, mimetype, data hash, created_at, created_by, mimetype, data

View File

@ -0,0 +1,32 @@
-- GetAuditLogsBefore retrieves `limit` number of audit logs before the provided
-- ID.
-- name: GetAuditLogsBefore :many
SELECT
*
FROM
audit_logs
WHERE
"time" < (SELECT "time" FROM audit_logs a WHERE a.id = sqlc.arg(id))
ORDER BY
"time" DESC
LIMIT
sqlc.arg(row_limit);
-- name: InsertAuditLog :one
INSERT INTO
audit_logs (
id,
"time",
user_id,
organization_id,
ip,
user_agent,
resource_type,
resource_id,
resource_target,
action,
diff,
status_code
)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING *;