mirror of https://github.com/coder/coder.git
feat: add audit logging database schema (#1225)
This commit is contained in:
parent
e4e60256ac
commit
81bef1c83e
|
@ -99,7 +99,7 @@ jobs:
|
|||
with:
|
||||
go-version: "~1.18"
|
||||
- run: curl -sSL
|
||||
https://github.com/kyleconroy/sqlc/releases/download/v1.11.0/sqlc_1.11.0_linux_amd64.tar.gz
|
||||
https://github.com/kyleconroy/sqlc/releases/download/v1.13.0/sqlc_1.13.0_linux_amd64.tar.gz
|
||||
| sudo tar -C /usr/bin -xz sqlc
|
||||
|
||||
- run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
test/
|
|
@ -8,6 +8,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/coder/coder/coderd/database"
|
||||
)
|
||||
|
@ -16,23 +17,24 @@ import (
|
|||
func New() database.Store {
|
||||
return &fakeQuerier{
|
||||
apiKeys: make([]database.APIKey, 0),
|
||||
organizations: make([]database.Organization, 0),
|
||||
organizationMembers: make([]database.OrganizationMember, 0),
|
||||
organizations: make([]database.Organization, 0),
|
||||
users: make([]database.User, 0),
|
||||
|
||||
files: make([]database.File, 0),
|
||||
parameterValue: make([]database.ParameterValue, 0),
|
||||
parameterSchema: make([]database.ParameterSchema, 0),
|
||||
template: make([]database.Template, 0),
|
||||
templateVersion: make([]database.TemplateVersion, 0),
|
||||
provisionerDaemons: make([]database.ProvisionerDaemon, 0),
|
||||
provisionerJobs: make([]database.ProvisionerJob, 0),
|
||||
provisionerJobLog: make([]database.ProvisionerJobLog, 0),
|
||||
workspaces: make([]database.Workspace, 0),
|
||||
provisionerJobResource: make([]database.WorkspaceResource, 0),
|
||||
workspaceBuild: make([]database.WorkspaceBuild, 0),
|
||||
provisionerJobAgent: make([]database.WorkspaceAgent, 0),
|
||||
GitSSHKey: make([]database.GitSSHKey, 0),
|
||||
auditLogs: make([]database.AuditLog, 0),
|
||||
files: make([]database.File, 0),
|
||||
gitSSHKey: make([]database.GitSSHKey, 0),
|
||||
parameterSchemas: make([]database.ParameterSchema, 0),
|
||||
parameterValues: make([]database.ParameterValue, 0),
|
||||
provisionerDaemons: make([]database.ProvisionerDaemon, 0),
|
||||
provisionerJobAgents: make([]database.WorkspaceAgent, 0),
|
||||
provisionerJobLogs: make([]database.ProvisionerJobLog, 0),
|
||||
provisionerJobResources: make([]database.WorkspaceResource, 0),
|
||||
provisionerJobs: make([]database.ProvisionerJob, 0),
|
||||
templateVersions: make([]database.TemplateVersion, 0),
|
||||
templates: make([]database.Template, 0),
|
||||
workspaceBuilds: make([]database.WorkspaceBuild, 0),
|
||||
workspaces: make([]database.Workspace, 0),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,19 +49,20 @@ type fakeQuerier struct {
|
|||
users []database.User
|
||||
|
||||
// New tables
|
||||
files []database.File
|
||||
parameterValue []database.ParameterValue
|
||||
parameterSchema []database.ParameterSchema
|
||||
template []database.Template
|
||||
templateVersion []database.TemplateVersion
|
||||
provisionerDaemons []database.ProvisionerDaemon
|
||||
provisionerJobs []database.ProvisionerJob
|
||||
provisionerJobAgent []database.WorkspaceAgent
|
||||
provisionerJobResource []database.WorkspaceResource
|
||||
provisionerJobLog []database.ProvisionerJobLog
|
||||
workspaces []database.Workspace
|
||||
workspaceBuild []database.WorkspaceBuild
|
||||
GitSSHKey []database.GitSSHKey
|
||||
auditLogs []database.AuditLog
|
||||
files []database.File
|
||||
gitSSHKey []database.GitSSHKey
|
||||
parameterSchemas []database.ParameterSchema
|
||||
parameterValues []database.ParameterValue
|
||||
provisionerDaemons []database.ProvisionerDaemon
|
||||
provisionerJobAgents []database.WorkspaceAgent
|
||||
provisionerJobLogs []database.ProvisionerJobLog
|
||||
provisionerJobResources []database.WorkspaceResource
|
||||
provisionerJobs []database.ProvisionerJob
|
||||
templateVersions []database.TemplateVersion
|
||||
templates []database.Template
|
||||
workspaceBuilds []database.WorkspaceBuild
|
||||
workspaces []database.Workspace
|
||||
}
|
||||
|
||||
// InTx doesn't rollback data properly for in-memory yet.
|
||||
|
@ -99,12 +102,12 @@ func (q *fakeQuerier) DeleteParameterValueByID(_ context.Context, id uuid.UUID)
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, parameterValue := range q.parameterValue {
|
||||
for index, parameterValue := range q.parameterValues {
|
||||
if parameterValue.ID.String() != id.String() {
|
||||
continue
|
||||
}
|
||||
q.parameterValue[index] = q.parameterValue[len(q.parameterValue)-1]
|
||||
q.parameterValue = q.parameterValue[:len(q.parameterValue)-1]
|
||||
q.parameterValues[index] = q.parameterValues[len(q.parameterValues)-1]
|
||||
q.parameterValues = q.parameterValues[:len(q.parameterValues)-1]
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
|
@ -337,7 +340,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByID(_ context.Context, id uuid.UUID) (da
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, history := range q.workspaceBuild {
|
||||
for _, history := range q.workspaceBuilds {
|
||||
if history.ID.String() == id.String() {
|
||||
return history, nil
|
||||
}
|
||||
|
@ -349,7 +352,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByJobID(_ context.Context, jobID uuid.UUI
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, build := range q.workspaceBuild {
|
||||
for _, build := range q.workspaceBuilds {
|
||||
if build.JobID.String() == jobID.String() {
|
||||
return build, nil
|
||||
}
|
||||
|
@ -361,7 +364,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByWorkspaceIDWithoutAfter(_ context.Conte
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, workspaceBuild := range q.workspaceBuild {
|
||||
for _, workspaceBuild := range q.workspaceBuilds {
|
||||
if workspaceBuild.WorkspaceID.String() != workspaceID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -377,7 +380,7 @@ func (q *fakeQuerier) GetWorkspaceBuildsByWorkspaceIDsWithoutAfter(_ context.Con
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
builds := make([]database.WorkspaceBuild, 0)
|
||||
for _, workspaceBuild := range q.workspaceBuild {
|
||||
for _, workspaceBuild := range q.workspaceBuilds {
|
||||
for _, id := range ids {
|
||||
if id.String() != workspaceBuild.WorkspaceID.String() {
|
||||
continue
|
||||
|
@ -396,7 +399,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByWorkspaceID(_ context.Context, workspac
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
history := make([]database.WorkspaceBuild, 0)
|
||||
for _, workspaceBuild := range q.workspaceBuild {
|
||||
for _, workspaceBuild := range q.workspaceBuilds {
|
||||
if workspaceBuild.WorkspaceID.String() == workspaceID.String() {
|
||||
history = append(history, workspaceBuild)
|
||||
}
|
||||
|
@ -411,7 +414,7 @@ func (q *fakeQuerier) GetWorkspaceBuildByWorkspaceIDAndName(_ context.Context, a
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, workspaceBuild := range q.workspaceBuild {
|
||||
for _, workspaceBuild := range q.workspaceBuilds {
|
||||
if workspaceBuild.WorkspaceID.String() != arg.WorkspaceID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -524,7 +527,7 @@ func (q *fakeQuerier) GetParameterValuesByScope(_ context.Context, arg database.
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
parameterValues := make([]database.ParameterValue, 0)
|
||||
for _, parameterValue := range q.parameterValue {
|
||||
for _, parameterValue := range q.parameterValues {
|
||||
if parameterValue.Scope != arg.Scope {
|
||||
continue
|
||||
}
|
||||
|
@ -543,7 +546,7 @@ func (q *fakeQuerier) GetTemplateByID(_ context.Context, id uuid.UUID) (database
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, template := range q.template {
|
||||
for _, template := range q.templates {
|
||||
if template.ID.String() == id.String() {
|
||||
return template, nil
|
||||
}
|
||||
|
@ -555,7 +558,7 @@ func (q *fakeQuerier) GetTemplateByOrganizationAndName(_ context.Context, arg da
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, template := range q.template {
|
||||
for _, template := range q.templates {
|
||||
if template.OrganizationID != arg.OrganizationID {
|
||||
continue
|
||||
}
|
||||
|
@ -575,7 +578,7 @@ func (q *fakeQuerier) GetTemplateVersionsByTemplateID(_ context.Context, templat
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
version := make([]database.TemplateVersion, 0)
|
||||
for _, templateVersion := range q.templateVersion {
|
||||
for _, templateVersion := range q.templateVersions {
|
||||
if templateVersion.TemplateID.UUID.String() != templateID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -591,7 +594,7 @@ func (q *fakeQuerier) GetTemplateVersionByTemplateIDAndName(_ context.Context, a
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, templateVersion := range q.templateVersion {
|
||||
for _, templateVersion := range q.templateVersions {
|
||||
if templateVersion.TemplateID != arg.TemplateID {
|
||||
continue
|
||||
}
|
||||
|
@ -607,7 +610,7 @@ func (q *fakeQuerier) GetTemplateVersionByID(_ context.Context, templateVersionI
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, templateVersion := range q.templateVersion {
|
||||
for _, templateVersion := range q.templateVersions {
|
||||
if templateVersion.ID.String() != templateVersionID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -620,7 +623,7 @@ func (q *fakeQuerier) GetTemplateVersionByJobID(_ context.Context, jobID uuid.UU
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, templateVersion := range q.templateVersion {
|
||||
for _, templateVersion := range q.templateVersions {
|
||||
if templateVersion.JobID.String() != jobID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -634,7 +637,7 @@ func (q *fakeQuerier) GetParameterSchemasByJobID(_ context.Context, jobID uuid.U
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
parameters := make([]database.ParameterSchema, 0)
|
||||
for _, parameterSchema := range q.parameterSchema {
|
||||
for _, parameterSchema := range q.parameterSchemas {
|
||||
if parameterSchema.JobID.String() != jobID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -650,7 +653,7 @@ func (q *fakeQuerier) GetParameterValueByScopeAndName(_ context.Context, arg dat
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, parameterValue := range q.parameterValue {
|
||||
for _, parameterValue := range q.parameterValues {
|
||||
if parameterValue.Scope != arg.Scope {
|
||||
continue
|
||||
}
|
||||
|
@ -670,7 +673,7 @@ func (q *fakeQuerier) GetTemplatesByOrganization(_ context.Context, arg database
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
templates := make([]database.Template, 0)
|
||||
for _, template := range q.template {
|
||||
for _, template := range q.templates {
|
||||
if template.Deleted != arg.Deleted {
|
||||
continue
|
||||
}
|
||||
|
@ -690,7 +693,7 @@ func (q *fakeQuerier) GetTemplatesByIDs(_ context.Context, ids []uuid.UUID) ([]d
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
templates := make([]database.Template, 0)
|
||||
for _, template := range q.template {
|
||||
for _, template := range q.templates {
|
||||
for _, id := range ids {
|
||||
if template.ID.String() != id.String() {
|
||||
continue
|
||||
|
@ -795,8 +798,8 @@ func (q *fakeQuerier) GetWorkspaceAgentByAuthToken(_ context.Context, authToken
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
// The schema sorts this by created at, so we iterate the array backwards.
|
||||
for i := len(q.provisionerJobAgent) - 1; i >= 0; i-- {
|
||||
agent := q.provisionerJobAgent[i]
|
||||
for i := len(q.provisionerJobAgents) - 1; i >= 0; i-- {
|
||||
agent := q.provisionerJobAgents[i]
|
||||
if agent.AuthToken.String() == authToken.String() {
|
||||
return agent, nil
|
||||
}
|
||||
|
@ -809,8 +812,8 @@ func (q *fakeQuerier) GetWorkspaceAgentByID(_ context.Context, id uuid.UUID) (da
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
// The schema sorts this by created at, so we iterate the array backwards.
|
||||
for i := len(q.provisionerJobAgent) - 1; i >= 0; i-- {
|
||||
agent := q.provisionerJobAgent[i]
|
||||
for i := len(q.provisionerJobAgents) - 1; i >= 0; i-- {
|
||||
agent := q.provisionerJobAgents[i]
|
||||
if agent.ID.String() == id.String() {
|
||||
return agent, nil
|
||||
}
|
||||
|
@ -823,8 +826,8 @@ func (q *fakeQuerier) GetWorkspaceAgentByInstanceID(_ context.Context, instanceI
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
// The schema sorts this by created at, so we iterate the array backwards.
|
||||
for i := len(q.provisionerJobAgent) - 1; i >= 0; i-- {
|
||||
agent := q.provisionerJobAgent[i]
|
||||
for i := len(q.provisionerJobAgents) - 1; i >= 0; i-- {
|
||||
agent := q.provisionerJobAgents[i]
|
||||
if agent.AuthInstanceID.Valid && agent.AuthInstanceID.String == instanceID {
|
||||
return agent, nil
|
||||
}
|
||||
|
@ -837,7 +840,7 @@ func (q *fakeQuerier) GetWorkspaceAgentsByResourceIDs(_ context.Context, resourc
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
workspaceAgents := make([]database.WorkspaceAgent, 0)
|
||||
for _, agent := range q.provisionerJobAgent {
|
||||
for _, agent := range q.provisionerJobAgents {
|
||||
for _, resourceID := range resourceIDs {
|
||||
if agent.ResourceID.String() != resourceID.String() {
|
||||
continue
|
||||
|
@ -881,7 +884,7 @@ func (q *fakeQuerier) GetWorkspaceResourceByID(_ context.Context, id uuid.UUID)
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, resource := range q.provisionerJobResource {
|
||||
for _, resource := range q.provisionerJobResources {
|
||||
if resource.ID.String() == id.String() {
|
||||
return resource, nil
|
||||
}
|
||||
|
@ -894,7 +897,7 @@ func (q *fakeQuerier) GetWorkspaceResourcesByJobID(_ context.Context, jobID uuid
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
resources := make([]database.WorkspaceResource, 0)
|
||||
for _, resource := range q.provisionerJobResource {
|
||||
for _, resource := range q.provisionerJobResources {
|
||||
if resource.JobID.String() != jobID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -931,7 +934,7 @@ func (q *fakeQuerier) GetProvisionerLogsByIDBetween(_ context.Context, arg datab
|
|||
defer q.mutex.RUnlock()
|
||||
|
||||
logs := make([]database.ProvisionerJobLog, 0)
|
||||
for _, jobLog := range q.provisionerJobLog {
|
||||
for _, jobLog := range q.provisionerJobLogs {
|
||||
if jobLog.JobID.String() != arg.JobID.String() {
|
||||
continue
|
||||
}
|
||||
|
@ -1034,7 +1037,7 @@ func (q *fakeQuerier) InsertParameterValue(_ context.Context, arg database.Inser
|
|||
SourceValue: arg.SourceValue,
|
||||
DestinationScheme: arg.DestinationScheme,
|
||||
}
|
||||
q.parameterValue = append(q.parameterValue, parameterValue)
|
||||
q.parameterValues = append(q.parameterValues, parameterValue)
|
||||
return parameterValue, nil
|
||||
}
|
||||
|
||||
|
@ -1052,7 +1055,7 @@ func (q *fakeQuerier) InsertTemplate(_ context.Context, arg database.InsertTempl
|
|||
Provisioner: arg.Provisioner,
|
||||
ActiveVersionID: arg.ActiveVersionID,
|
||||
}
|
||||
q.template = append(q.template, template)
|
||||
q.templates = append(q.templates, template)
|
||||
return template, nil
|
||||
}
|
||||
|
||||
|
@ -1071,7 +1074,7 @@ func (q *fakeQuerier) InsertTemplateVersion(_ context.Context, arg database.Inse
|
|||
Description: arg.Description,
|
||||
JobID: arg.JobID,
|
||||
}
|
||||
q.templateVersion = append(q.templateVersion, version)
|
||||
q.templateVersions = append(q.templateVersions, version)
|
||||
return version, nil
|
||||
}
|
||||
|
||||
|
@ -1091,7 +1094,7 @@ func (q *fakeQuerier) InsertProvisionerJobLogs(_ context.Context, arg database.I
|
|||
Output: output,
|
||||
})
|
||||
}
|
||||
q.provisionerJobLog = append(q.provisionerJobLog, logs...)
|
||||
q.provisionerJobLogs = append(q.provisionerJobLogs, logs...)
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
|
@ -1118,7 +1121,7 @@ func (q *fakeQuerier) InsertParameterSchema(_ context.Context, arg database.Inse
|
|||
ValidationTypeSystem: arg.ValidationTypeSystem,
|
||||
ValidationValueType: arg.ValidationValueType,
|
||||
}
|
||||
q.parameterSchema = append(q.parameterSchema, param)
|
||||
q.parameterSchemas = append(q.parameterSchemas, param)
|
||||
return param, nil
|
||||
}
|
||||
|
||||
|
@ -1178,7 +1181,7 @@ func (q *fakeQuerier) InsertWorkspaceAgent(_ context.Context, arg database.Inser
|
|||
InstanceMetadata: arg.InstanceMetadata,
|
||||
ResourceMetadata: arg.ResourceMetadata,
|
||||
}
|
||||
q.provisionerJobAgent = append(q.provisionerJobAgent, agent)
|
||||
q.provisionerJobAgents = append(q.provisionerJobAgents, agent)
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
|
@ -1195,7 +1198,7 @@ func (q *fakeQuerier) InsertWorkspaceResource(_ context.Context, arg database.In
|
|||
Type: arg.Type,
|
||||
Name: arg.Name,
|
||||
}
|
||||
q.provisionerJobResource = append(q.provisionerJobResource, resource)
|
||||
q.provisionerJobResources = append(q.provisionerJobResources, resource)
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
|
@ -1314,7 +1317,7 @@ func (q *fakeQuerier) InsertWorkspaceBuild(_ context.Context, arg database.Inser
|
|||
JobID: arg.JobID,
|
||||
ProvisionerState: arg.ProvisionerState,
|
||||
}
|
||||
q.workspaceBuild = append(q.workspaceBuild, workspaceBuild)
|
||||
q.workspaceBuilds = append(q.workspaceBuilds, workspaceBuild)
|
||||
return workspaceBuild, nil
|
||||
}
|
||||
|
||||
|
@ -1341,12 +1344,12 @@ func (q *fakeQuerier) UpdateTemplateActiveVersionByID(_ context.Context, arg dat
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, template := range q.template {
|
||||
for index, template := range q.templates {
|
||||
if template.ID.String() != arg.ID.String() {
|
||||
continue
|
||||
}
|
||||
template.ActiveVersionID = arg.ActiveVersionID
|
||||
q.template[index] = template
|
||||
q.templates[index] = template
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
|
@ -1356,12 +1359,12 @@ func (q *fakeQuerier) UpdateTemplateDeletedByID(_ context.Context, arg database.
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, template := range q.template {
|
||||
for index, template := range q.templates {
|
||||
if template.ID.String() != arg.ID.String() {
|
||||
continue
|
||||
}
|
||||
template.Deleted = arg.Deleted
|
||||
q.template[index] = template
|
||||
q.templates[index] = template
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
|
@ -1371,13 +1374,13 @@ func (q *fakeQuerier) UpdateTemplateVersionByID(_ context.Context, arg database.
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, templateVersion := range q.templateVersion {
|
||||
for index, templateVersion := range q.templateVersions {
|
||||
if templateVersion.ID.String() != arg.ID.String() {
|
||||
continue
|
||||
}
|
||||
templateVersion.TemplateID = arg.TemplateID
|
||||
templateVersion.UpdatedAt = arg.UpdatedAt
|
||||
q.templateVersion[index] = templateVersion
|
||||
q.templateVersions[index] = templateVersion
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
|
@ -1403,14 +1406,14 @@ func (q *fakeQuerier) UpdateWorkspaceAgentConnectionByID(_ context.Context, arg
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, agent := range q.provisionerJobAgent {
|
||||
for index, agent := range q.provisionerJobAgents {
|
||||
if agent.ID.String() != arg.ID.String() {
|
||||
continue
|
||||
}
|
||||
agent.FirstConnectedAt = arg.FirstConnectedAt
|
||||
agent.LastConnectedAt = arg.LastConnectedAt
|
||||
agent.DisconnectedAt = arg.DisconnectedAt
|
||||
q.provisionerJobAgent[index] = agent
|
||||
q.provisionerJobAgents[index] = agent
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
|
@ -1499,14 +1502,14 @@ func (q *fakeQuerier) UpdateWorkspaceBuildByID(_ context.Context, arg database.U
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, workspaceBuild := range q.workspaceBuild {
|
||||
for index, workspaceBuild := range q.workspaceBuilds {
|
||||
if workspaceBuild.ID.String() != arg.ID.String() {
|
||||
continue
|
||||
}
|
||||
workspaceBuild.UpdatedAt = arg.UpdatedAt
|
||||
workspaceBuild.AfterID = arg.AfterID
|
||||
workspaceBuild.ProvisionerState = arg.ProvisionerState
|
||||
q.workspaceBuild[index] = workspaceBuild
|
||||
q.workspaceBuilds[index] = workspaceBuild
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
|
@ -1539,7 +1542,7 @@ func (q *fakeQuerier) InsertGitSSHKey(_ context.Context, arg database.InsertGitS
|
|||
PrivateKey: arg.PrivateKey,
|
||||
PublicKey: arg.PublicKey,
|
||||
}
|
||||
q.GitSSHKey = append(q.GitSSHKey, gitSSHKey)
|
||||
q.gitSSHKey = append(q.gitSSHKey, gitSSHKey)
|
||||
return gitSSHKey, nil
|
||||
}
|
||||
|
||||
|
@ -1547,7 +1550,7 @@ func (q *fakeQuerier) GetGitSSHKey(_ context.Context, userID uuid.UUID) (databas
|
|||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, key := range q.GitSSHKey {
|
||||
for _, key := range q.gitSSHKey {
|
||||
if key.UserID == userID {
|
||||
return key, nil
|
||||
}
|
||||
|
@ -1559,14 +1562,14 @@ func (q *fakeQuerier) UpdateGitSSHKey(_ context.Context, arg database.UpdateGitS
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, key := range q.GitSSHKey {
|
||||
for index, key := range q.gitSSHKey {
|
||||
if key.UserID.String() != arg.UserID.String() {
|
||||
continue
|
||||
}
|
||||
key.UpdatedAt = arg.UpdatedAt
|
||||
key.PrivateKey = arg.PrivateKey
|
||||
key.PublicKey = arg.PublicKey
|
||||
q.GitSSHKey[index] = key
|
||||
q.gitSSHKey[index] = key
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
|
@ -1576,13 +1579,72 @@ func (q *fakeQuerier) DeleteGitSSHKey(_ context.Context, userID uuid.UUID) error
|
|||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, key := range q.GitSSHKey {
|
||||
for index, key := range q.gitSSHKey {
|
||||
if key.UserID.String() != userID.String() {
|
||||
continue
|
||||
}
|
||||
q.GitSSHKey[index] = q.GitSSHKey[len(q.GitSSHKey)-1]
|
||||
q.GitSSHKey = q.GitSSHKey[:len(q.GitSSHKey)-1]
|
||||
q.gitSSHKey[index] = q.gitSSHKey[len(q.gitSSHKey)-1]
|
||||
q.gitSSHKey = q.gitSSHKey[:len(q.gitSSHKey)-1]
|
||||
return nil
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetAuditLogsBefore(_ context.Context, arg database.GetAuditLogsBeforeParams) ([]database.AuditLog, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
logs := make([]database.AuditLog, 0)
|
||||
start := database.AuditLog{}
|
||||
|
||||
for _, alog := range q.auditLogs {
|
||||
if alog.ID == arg.ID {
|
||||
start = alog
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if start.ID == uuid.Nil {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
|
||||
// q.auditLogs are already sorted by time DESC, so no need to sort after the fact.
|
||||
for _, alog := range q.auditLogs {
|
||||
if alog.Time.Before(start.Time) {
|
||||
logs = append(logs, alog)
|
||||
}
|
||||
|
||||
if len(logs) >= int(arg.RowLimit) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) InsertAuditLog(_ context.Context, arg database.InsertAuditLogParams) (database.AuditLog, error) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
alog := database.AuditLog{
|
||||
ID: arg.ID,
|
||||
Time: arg.Time,
|
||||
UserID: arg.UserID,
|
||||
OrganizationID: arg.OrganizationID,
|
||||
Ip: arg.Ip,
|
||||
UserAgent: arg.UserAgent,
|
||||
ResourceType: arg.ResourceType,
|
||||
ResourceID: arg.ResourceID,
|
||||
ResourceTarget: arg.ResourceTarget,
|
||||
Action: arg.Action,
|
||||
Diff: arg.Diff,
|
||||
StatusCode: arg.StatusCode,
|
||||
}
|
||||
|
||||
q.auditLogs = append(q.auditLogs, alog)
|
||||
slices.SortFunc(q.auditLogs, func(a, b database.AuditLog) bool {
|
||||
return a.Time.Before(b.Time)
|
||||
})
|
||||
|
||||
return alog, nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
-- Code generated by 'make coderd/database/generate'. DO NOT EDIT.
|
||||
|
||||
CREATE TYPE audit_action AS ENUM (
|
||||
'create',
|
||||
'write',
|
||||
'delete'
|
||||
);
|
||||
|
||||
CREATE TYPE log_level AS ENUM (
|
||||
'trace',
|
||||
'debug',
|
||||
|
@ -56,6 +62,14 @@ CREATE TYPE provisioner_type AS ENUM (
|
|||
'terraform'
|
||||
);
|
||||
|
||||
CREATE TYPE resource_type AS ENUM (
|
||||
'organization',
|
||||
'template',
|
||||
'template_version',
|
||||
'user',
|
||||
'workspace'
|
||||
);
|
||||
|
||||
CREATE TYPE user_status AS ENUM (
|
||||
'active',
|
||||
'suspended'
|
||||
|
@ -82,6 +96,21 @@ CREATE TABLE api_keys (
|
|||
oauth_expiry timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE audit_logs (
|
||||
id uuid NOT NULL,
|
||||
"time" timestamp with time zone NOT NULL,
|
||||
user_id uuid NOT NULL,
|
||||
organization_id uuid NOT NULL,
|
||||
ip cidr NOT NULL,
|
||||
user_agent character varying(256) NOT NULL,
|
||||
resource_type resource_type NOT NULL,
|
||||
resource_id uuid NOT NULL,
|
||||
resource_target text NOT NULL,
|
||||
action audit_action NOT NULL,
|
||||
diff jsonb NOT NULL,
|
||||
status_code integer NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE files (
|
||||
hash character varying(64) NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
|
@ -293,6 +322,9 @@ ALTER TABLE ONLY licenses ALTER COLUMN id SET DEFAULT nextval('public.licenses_i
|
|||
ALTER TABLE ONLY api_keys
|
||||
ADD CONSTRAINT api_keys_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY audit_logs
|
||||
ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY files
|
||||
ADD CONSTRAINT files_pkey PRIMARY KEY (hash);
|
||||
|
||||
|
@ -367,6 +399,14 @@ ALTER TABLE ONLY workspaces
|
|||
|
||||
CREATE INDEX idx_api_keys_user ON api_keys USING btree (user_id);
|
||||
|
||||
CREATE INDEX idx_audit_log_organization_id ON audit_logs USING btree (organization_id);
|
||||
|
||||
CREATE INDEX idx_audit_log_resource_id ON audit_logs USING btree (resource_id);
|
||||
|
||||
CREATE INDEX idx_audit_log_user_id ON audit_logs USING btree (user_id);
|
||||
|
||||
CREATE INDEX idx_audit_logs_time_desc ON audit_logs USING btree ("time" DESC);
|
||||
|
||||
CREATE INDEX idx_organization_member_organization_id_uuid ON organization_members USING btree (organization_id);
|
||||
|
||||
CREATE INDEX idx_organization_member_user_id_uuid ON organization_members USING btree (user_id);
|
||||
|
|
|
@ -10,6 +10,8 @@ set -euo pipefail
|
|||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# The logic below depends on the exact version being correct :(
|
||||
[[ $(sqlc version) != "v1.13.0" ]] && go install github.com/kyleconroy/sqlc/cmd/sqlc@v1.13.0
|
||||
sqlc generate
|
||||
|
||||
first=true
|
||||
|
@ -20,7 +22,7 @@ for fi in queries/*.sql.go; do
|
|||
|
||||
# Copy the header from the first file only, ignoring the source comment.
|
||||
if $first; then
|
||||
head -n 4 < "$fi" | grep -v "source" > queries.sql.go
|
||||
head -n 6 < "$fi" | grep -v "source" > queries.sql.go
|
||||
first=false
|
||||
fi
|
||||
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
DROP TABLE audit_logs;
|
||||
DROP TYPE audit_action;
|
||||
DROP TYPE resource_type;
|
|
@ -0,0 +1,37 @@
|
|||
CREATE TYPE resource_type AS ENUM (
|
||||
'organization',
|
||||
'template',
|
||||
'template_version',
|
||||
'user',
|
||||
'workspace'
|
||||
);
|
||||
|
||||
CREATE TYPE audit_action AS ENUM (
|
||||
'create',
|
||||
-- We intentionally do not track reads. They're way too spammy.
|
||||
'write',
|
||||
'delete'
|
||||
);
|
||||
|
||||
CREATE TABLE audit_logs (
|
||||
id uuid NOT NULL,
|
||||
"time" timestamp with time zone NOT NULL,
|
||||
user_id uuid NOT NULL,
|
||||
organization_id uuid NOT NULL,
|
||||
ip cidr NOT NULL,
|
||||
user_agent varchar(256) NOT NULL,
|
||||
resource_type resource_type NOT NULL,
|
||||
resource_id uuid NOT NULL,
|
||||
-- resource_target is the name of the resource that `resource_id` points to.
|
||||
-- it's stored here because resources we point to can be deleted.
|
||||
resource_target text NOT NULL,
|
||||
action audit_action NOT NULL,
|
||||
diff jsonb NOT NULL,
|
||||
status_code integer NOT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_audit_logs_time_desc ON audit_logs USING btree ("time" DESC);
|
||||
CREATE INDEX idx_audit_log_user_id ON audit_logs USING btree (user_id);
|
||||
CREATE INDEX idx_audit_log_organization_id ON audit_logs USING btree (organization_id);
|
||||
CREATE INDEX idx_audit_log_resource_id ON audit_logs USING btree (resource_id);
|
|
@ -1,4 +1,6 @@
|
|||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.13.0
|
||||
|
||||
package database
|
||||
|
||||
|
@ -12,6 +14,26 @@ import (
|
|||
"github.com/tabbed/pqtype"
|
||||
)
|
||||
|
||||
type AuditAction string
|
||||
|
||||
const (
|
||||
AuditActionCreate AuditAction = "create"
|
||||
AuditActionWrite AuditAction = "write"
|
||||
AuditActionDelete AuditAction = "delete"
|
||||
)
|
||||
|
||||
func (e *AuditAction) Scan(src interface{}) error {
|
||||
switch s := src.(type) {
|
||||
case []byte:
|
||||
*e = AuditAction(s)
|
||||
case string:
|
||||
*e = AuditAction(s)
|
||||
default:
|
||||
return fmt.Errorf("unsupported scan type for AuditAction: %T", src)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LogLevel string
|
||||
|
||||
const (
|
||||
|
@ -208,6 +230,28 @@ func (e *ProvisionerType) Scan(src interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type ResourceType string
|
||||
|
||||
const (
|
||||
ResourceTypeOrganization ResourceType = "organization"
|
||||
ResourceTypeTemplate ResourceType = "template"
|
||||
ResourceTypeTemplateVersion ResourceType = "template_version"
|
||||
ResourceTypeUser ResourceType = "user"
|
||||
ResourceTypeWorkspace ResourceType = "workspace"
|
||||
)
|
||||
|
||||
func (e *ResourceType) Scan(src interface{}) error {
|
||||
switch s := src.(type) {
|
||||
case []byte:
|
||||
*e = ResourceType(s)
|
||||
case string:
|
||||
*e = ResourceType(s)
|
||||
default:
|
||||
return fmt.Errorf("unsupported scan type for ResourceType: %T", src)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type UserStatus string
|
||||
|
||||
const (
|
||||
|
@ -262,6 +306,21 @@ type APIKey struct {
|
|||
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
||||
}
|
||||
|
||||
type AuditLog struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
Time time.Time `db:"time" json:"time"`
|
||||
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
||||
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
||||
Ip pqtype.CIDR `db:"ip" json:"ip"`
|
||||
UserAgent string `db:"user_agent" json:"user_agent"`
|
||||
ResourceType ResourceType `db:"resource_type" json:"resource_type"`
|
||||
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
||||
ResourceTarget string `db:"resource_target" json:"resource_target"`
|
||||
Action AuditAction `db:"action" json:"action"`
|
||||
Diff json.RawMessage `db:"diff" json:"diff"`
|
||||
StatusCode int32 `db:"status_code" json:"status_code"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Hash string `db:"hash" json:"hash"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.13.0
|
||||
|
||||
package database
|
||||
|
||||
|
@ -9,10 +11,19 @@ import (
|
|||
)
|
||||
|
||||
type querier interface {
|
||||
// Acquires the lock for a single job that isn't started, completed,
|
||||
// canceled, and that matches an array of provisioner types.
|
||||
//
|
||||
// SKIP LOCKED is used to jump over locked rows. This prevents
|
||||
// multiple provisioners from acquiring the same jobs. See:
|
||||
// https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE
|
||||
AcquireProvisionerJob(ctx context.Context, arg AcquireProvisionerJobParams) (ProvisionerJob, error)
|
||||
DeleteGitSSHKey(ctx context.Context, userID uuid.UUID) error
|
||||
DeleteParameterValueByID(ctx context.Context, id uuid.UUID) error
|
||||
GetAPIKeyByID(ctx context.Context, id string) (APIKey, error)
|
||||
// GetAuditLogsBefore retrieves `limit` number of audit logs before the provided
|
||||
// ID.
|
||||
GetAuditLogsBefore(ctx context.Context, arg GetAuditLogsBeforeParams) ([]AuditLog, error)
|
||||
GetFileByHash(ctx context.Context, hash string) (File, error)
|
||||
GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error)
|
||||
GetOrganizationByID(ctx context.Context, id uuid.UUID) (Organization, error)
|
||||
|
@ -61,6 +72,7 @@ type querier interface {
|
|||
GetWorkspacesByOwnerID(ctx context.Context, arg GetWorkspacesByOwnerIDParams) ([]Workspace, error)
|
||||
GetWorkspacesByTemplateID(ctx context.Context, arg GetWorkspacesByTemplateIDParams) ([]Workspace, error)
|
||||
InsertAPIKey(ctx context.Context, arg InsertAPIKeyParams) (APIKey, error)
|
||||
InsertAuditLog(ctx context.Context, arg InsertAuditLogParams) (AuditLog, error)
|
||||
InsertFile(ctx context.Context, arg InsertFileParams) (File, error)
|
||||
InsertGitSSHKey(ctx context.Context, arg InsertGitSSHKeyParams) (GitSSHKey, error)
|
||||
InsertOrganization(ctx context.Context, arg InsertOrganizationParams) (Organization, error)
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.13.0
|
||||
|
||||
package database
|
||||
|
||||
|
@ -146,6 +148,130 @@ func (q *sqlQuerier) UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDP
|
|||
return err
|
||||
}
|
||||
|
||||
const getAuditLogsBefore = `-- name: GetAuditLogsBefore :many
|
||||
SELECT
|
||||
id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code
|
||||
FROM
|
||||
audit_logs
|
||||
WHERE
|
||||
"time" < (SELECT "time" FROM audit_logs a WHERE a.id = $1)
|
||||
ORDER BY
|
||||
"time" DESC
|
||||
LIMIT
|
||||
$2
|
||||
`
|
||||
|
||||
type GetAuditLogsBeforeParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
RowLimit int32 `db:"row_limit" json:"row_limit"`
|
||||
}
|
||||
|
||||
// GetAuditLogsBefore retrieves `limit` number of audit logs before the provided
|
||||
// ID.
|
||||
func (q *sqlQuerier) GetAuditLogsBefore(ctx context.Context, arg GetAuditLogsBeforeParams) ([]AuditLog, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getAuditLogsBefore, arg.ID, arg.RowLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []AuditLog
|
||||
for rows.Next() {
|
||||
var i AuditLog
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.Time,
|
||||
&i.UserID,
|
||||
&i.OrganizationID,
|
||||
&i.Ip,
|
||||
&i.UserAgent,
|
||||
&i.ResourceType,
|
||||
&i.ResourceID,
|
||||
&i.ResourceTarget,
|
||||
&i.Action,
|
||||
&i.Diff,
|
||||
&i.StatusCode,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const insertAuditLog = `-- name: InsertAuditLog :one
|
||||
INSERT INTO
|
||||
audit_logs (
|
||||
id,
|
||||
"time",
|
||||
user_id,
|
||||
organization_id,
|
||||
ip,
|
||||
user_agent,
|
||||
resource_type,
|
||||
resource_id,
|
||||
resource_target,
|
||||
action,
|
||||
diff,
|
||||
status_code
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code
|
||||
`
|
||||
|
||||
type InsertAuditLogParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
Time time.Time `db:"time" json:"time"`
|
||||
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
||||
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
||||
Ip pqtype.CIDR `db:"ip" json:"ip"`
|
||||
UserAgent string `db:"user_agent" json:"user_agent"`
|
||||
ResourceType ResourceType `db:"resource_type" json:"resource_type"`
|
||||
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
||||
ResourceTarget string `db:"resource_target" json:"resource_target"`
|
||||
Action AuditAction `db:"action" json:"action"`
|
||||
Diff json.RawMessage `db:"diff" json:"diff"`
|
||||
StatusCode int32 `db:"status_code" json:"status_code"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParams) (AuditLog, error) {
|
||||
row := q.db.QueryRowContext(ctx, insertAuditLog,
|
||||
arg.ID,
|
||||
arg.Time,
|
||||
arg.UserID,
|
||||
arg.OrganizationID,
|
||||
arg.Ip,
|
||||
arg.UserAgent,
|
||||
arg.ResourceType,
|
||||
arg.ResourceID,
|
||||
arg.ResourceTarget,
|
||||
arg.Action,
|
||||
arg.Diff,
|
||||
arg.StatusCode,
|
||||
)
|
||||
var i AuditLog
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.Time,
|
||||
&i.UserID,
|
||||
&i.OrganizationID,
|
||||
&i.Ip,
|
||||
&i.UserAgent,
|
||||
&i.ResourceType,
|
||||
&i.ResourceID,
|
||||
&i.ResourceTarget,
|
||||
&i.Action,
|
||||
&i.Diff,
|
||||
&i.StatusCode,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getFileByHash = `-- name: GetFileByHash :one
|
||||
SELECT
|
||||
hash, created_at, created_by, mimetype, data
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
-- GetAuditLogsBefore retrieves `limit` number of audit logs before the provided
|
||||
-- ID.
|
||||
-- name: GetAuditLogsBefore :many
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
audit_logs
|
||||
WHERE
|
||||
"time" < (SELECT "time" FROM audit_logs a WHERE a.id = sqlc.arg(id))
|
||||
ORDER BY
|
||||
"time" DESC
|
||||
LIMIT
|
||||
sqlc.arg(row_limit);
|
||||
|
||||
-- name: InsertAuditLog :one
|
||||
INSERT INTO
|
||||
audit_logs (
|
||||
id,
|
||||
"time",
|
||||
user_id,
|
||||
organization_id,
|
||||
ip,
|
||||
user_agent,
|
||||
resource_type,
|
||||
resource_id,
|
||||
resource_target,
|
||||
action,
|
||||
diff,
|
||||
status_code
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING *;
|
Loading…
Reference in New Issue