fix: tar: do not archive .tfvars (#11208)

This commit is contained in:
Marcin Tojek 2023-12-15 11:15:12 +01:00 committed by GitHub
parent 211e59bf65
commit 89d8a293f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 41 additions and 14 deletions

View File

@ -78,7 +78,7 @@ func (pf *templateUploadFlags) upload(inv *clibase.Invocation, client *codersdk.
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
go func() { go func() {
err := provisionersdk.Tar(pipeWriter, pf.directory, provisionersdk.TemplateArchiveLimit) err := provisionersdk.Tar(pipeWriter, inv.Logger, pf.directory, provisionersdk.TemplateArchiveLimit)
_ = pipeWriter.CloseWithError(err) _ = pipeWriter.CloseWithError(err)
}() }()
defer pipeReader.Close() defer pipeReader.Close()

View File

@ -2,6 +2,7 @@ package provisionersdk
import ( import (
"archive/tar" "archive/tar"
"context"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
@ -9,6 +10,8 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/util/xio" "github.com/coder/coder/v2/coderd/util/xio"
) )
@ -39,7 +42,7 @@ func DirHasLockfile(dir string) (bool, error) {
} }
// Tar archives a Terraform directory. // Tar archives a Terraform directory.
func Tar(w io.Writer, directory string, limit int64) error { func Tar(w io.Writer, logger slog.Logger, directory string, limit int64) error {
// The total bytes written must be under the limit, so use -1 // The total bytes written must be under the limit, so use -1
w = xio.NewLimitWriter(w, limit-1) w = xio.NewLimitWriter(w, limit-1)
tarWriter := tar.NewWriter(w) tarWriter := tar.NewWriter(w)
@ -94,6 +97,12 @@ func Tar(w io.Writer, directory string, limit int64) error {
} }
if strings.Contains(rel, ".tfstate") { if strings.Contains(rel, ".tfstate") {
// Don't store tfstate! // Don't store tfstate!
logger.Debug(context.Background(), "skip state", slog.F("name", rel))
return nil
}
if rel == "terraform.tfvars" || rel == "terraform.tfvars.json" || strings.HasSuffix(rel, ".auto.tfvars") || strings.HasSuffix(rel, ".auto.tfvars.json") {
// Don't store .tfvars, as Coder uses their own variables file.
logger.Debug(context.Background(), "skip variable definitions", slog.F("name", rel))
return nil return nil
} }
// Use unix paths in the tar archive. // Use unix paths in the tar archive.

View File

@ -10,11 +10,16 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/provisionersdk" "github.com/coder/coder/v2/provisionersdk"
) )
func TestTar(t *testing.T) { func TestTar(t *testing.T) {
t.Parallel() t.Parallel()
log := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
t.Run("NoFollowSymlink", func(t *testing.T) { t.Run("NoFollowSymlink", func(t *testing.T) {
t.Parallel() t.Parallel()
dir := t.TempDir() dir := t.TempDir()
@ -28,7 +33,7 @@ func TestTar(t *testing.T) {
err = os.Symlink("no-exists", filepath.Join(dir, "link")) err = os.Symlink("no-exists", filepath.Join(dir, "link"))
require.NoError(t, err) require.NoError(t, err)
err = provisionersdk.Tar(io.Discard, dir, 1024*1024) err = provisionersdk.Tar(io.Discard, log, dir, 1024*1024)
require.NoError(t, err) require.NoError(t, err)
}) })
t.Run("HeaderBreakLimit", func(t *testing.T) { t.Run("HeaderBreakLimit", func(t *testing.T) {
@ -38,7 +43,7 @@ func TestTar(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
_ = file.Close() _ = file.Close()
// A header is 512 bytes // A header is 512 bytes
err = provisionersdk.Tar(io.Discard, dir, 100) err = provisionersdk.Tar(io.Discard, log, dir, 100)
require.Error(t, err) require.Error(t, err)
}) })
t.Run("HeaderAndContent", func(t *testing.T) { t.Run("HeaderAndContent", func(t *testing.T) {
@ -49,11 +54,11 @@ func TestTar(t *testing.T) {
_, _ = file.Write(make([]byte, 100)) _, _ = file.Write(make([]byte, 100))
_ = file.Close() _ = file.Close()
// Pay + header is 1024 bytes (padding) // Pay + header is 1024 bytes (padding)
err = provisionersdk.Tar(io.Discard, dir, 1025) err = provisionersdk.Tar(io.Discard, log, dir, 1025)
require.NoError(t, err) require.NoError(t, err)
// Limit is 1 byte too small (n == limit is a failure, must be under) // Limit is 1 byte too small (n == limit is a failure, must be under)
err = provisionersdk.Tar(io.Discard, dir, 1024) err = provisionersdk.Tar(io.Discard, log, dir, 1024)
require.Error(t, err) require.Error(t, err)
}) })
@ -63,7 +68,7 @@ func TestTar(t *testing.T) {
file, err := os.CreateTemp(dir, "") file, err := os.CreateTemp(dir, "")
require.NoError(t, err) require.NoError(t, err)
_ = file.Close() _ = file.Close()
err = provisionersdk.Tar(io.Discard, dir, 1024) err = provisionersdk.Tar(io.Discard, log, dir, 1024)
require.Error(t, err) require.Error(t, err)
}) })
t.Run("Valid", func(t *testing.T) { t.Run("Valid", func(t *testing.T) {
@ -72,7 +77,7 @@ func TestTar(t *testing.T) {
file, err := os.CreateTemp(dir, "*.tf") file, err := os.CreateTemp(dir, "*.tf")
require.NoError(t, err) require.NoError(t, err)
_ = file.Close() _ = file.Close()
err = provisionersdk.Tar(io.Discard, dir, 1024) err = provisionersdk.Tar(io.Discard, log, dir, 1024)
require.NoError(t, err) require.NoError(t, err)
}) })
t.Run("ValidJSON", func(t *testing.T) { t.Run("ValidJSON", func(t *testing.T) {
@ -81,7 +86,7 @@ func TestTar(t *testing.T) {
file, err := os.CreateTemp(dir, "*.tf.json") file, err := os.CreateTemp(dir, "*.tf.json")
require.NoError(t, err) require.NoError(t, err)
_ = file.Close() _ = file.Close()
err = provisionersdk.Tar(io.Discard, dir, 1024) err = provisionersdk.Tar(io.Discard, log, dir, 1024)
require.NoError(t, err) require.NoError(t, err)
}) })
t.Run("HiddenFiles", func(t *testing.T) { t.Run("HiddenFiles", func(t *testing.T) {
@ -122,6 +127,18 @@ func TestTar(t *testing.T) {
}, { }, {
Name: "terraform.tfstate", Name: "terraform.tfstate",
Archives: false, Archives: false,
}, {
Name: "terraform.tfvars",
Archives: false,
}, {
Name: "terraform.tfvars.json",
Archives: false,
}, {
Name: "*.auto.tfvars",
Archives: false,
}, {
Name: "*.auto.tfvars.json",
Archives: false,
}, },
} }
for _, file := range files { for _, file := range files {
@ -149,18 +166,17 @@ func TestTar(t *testing.T) {
} }
archive := new(bytes.Buffer) archive := new(bytes.Buffer)
// Headers are chonky so raise the limit to something reasonable // Headers are chonky so raise the limit to something reasonable
err := provisionersdk.Tar(archive, dir, 1024<<2) err := provisionersdk.Tar(archive, log, dir, 1024<<3)
require.NoError(t, err) require.NoError(t, err)
dir = t.TempDir() dir = t.TempDir()
err = provisionersdk.Untar(dir, archive) err = provisionersdk.Untar(dir, archive)
require.NoError(t, err) require.NoError(t, err)
for _, file := range files { for _, file := range files {
_, err = os.Stat(filepath.Join(dir, file.Name)) _, err = os.Stat(filepath.Join(dir, file.Name))
t.Logf("stat %q %+v", file.Name, err)
if file.Archives { if file.Archives {
require.NoError(t, err) require.NoError(t, err, "stat %q, got error: %+v", file.Name, err)
} else { } else {
require.ErrorIs(t, err, os.ErrNotExist) require.ErrorIs(t, err, os.ErrNotExist, "stat %q, expected ErrNotExist, got: %+v", file.Name, err)
} }
} }
}) })
@ -168,12 +184,14 @@ func TestTar(t *testing.T) {
func TestUntar(t *testing.T) { func TestUntar(t *testing.T) {
t.Parallel() t.Parallel()
log := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
dir := t.TempDir() dir := t.TempDir()
file, err := os.CreateTemp(dir, "*.tf") file, err := os.CreateTemp(dir, "*.tf")
require.NoError(t, err) require.NoError(t, err)
_ = file.Close() _ = file.Close()
archive := new(bytes.Buffer) archive := new(bytes.Buffer)
err = provisionersdk.Tar(archive, dir, 1024) err = provisionersdk.Tar(archive, log, dir, 1024)
require.NoError(t, err) require.NoError(t, err)
dir = t.TempDir() dir = t.TempDir()
err = provisionersdk.Untar(dir, archive) err = provisionersdk.Untar(dir, archive)