internal/pkg: tar optional file
All checks were successful
Test / Create distribution (push) Successful in 47s
Test / Sandbox (push) Successful in 2m49s
Test / ShareFS (push) Successful in 4m46s
Test / Sandbox (race detector) (push) Successful in 5m17s
Test / Hpkg (push) Successful in 5m19s
Test / Hakurei (push) Successful in 5m31s
Test / Hakurei (race detector) (push) Successful in 7m27s
Test / Flake checks (push) Successful in 1m39s
All checks were successful
Test / Create distribution (push) Successful in 47s
Test / Sandbox (push) Successful in 2m49s
Test / ShareFS (push) Successful in 4m46s
Test / Sandbox (race detector) (push) Successful in 5m17s
Test / Hpkg (push) Successful in 5m19s
Test / Hakurei (push) Successful in 5m31s
Test / Hakurei (race detector) (push) Successful in 7m27s
Test / Flake checks (push) Successful in 1m39s
This allows tar to take a single-file directory Artifact as input. Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
@@ -209,6 +209,7 @@ func TestFlatten(t *testing.T) {
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
@@ -230,10 +231,11 @@ func TestFlatten(t *testing.T) {
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("PrNWkHqtSmdtVecctlI9Xf63dQPIyyLBIMCtRAP2-VqF9u1oQ8ydV7-WPbzEvMkG"), nil},
|
||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94"), nil},
|
||||
|
||||
{"sample tar expand step unpack", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
@@ -254,6 +256,7 @@ func TestFlatten(t *testing.T) {
|
||||
|
||||
"identifier": {Mode: fs.ModeDir | 0700},
|
||||
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
|
||||
"temp": {Mode: fs.ModeDir | 0700},
|
||||
"work": {Mode: fs.ModeDir | 0700},
|
||||
@@ -266,10 +269,11 @@ func TestFlatten(t *testing.T) {
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||
|
||||
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||
}, pkg.MustDecode("YZUoGdMwmfW5sQWto9hQgMKah648rHKck8Ds_GGnqgCBpTAiZKOefpHCKnvktfYh"), nil},
|
||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX"), nil},
|
||||
|
||||
{"testtool", fstest.MapFS{
|
||||
".": {Mode: fs.ModeDir | 0500},
|
||||
|
||||
@@ -300,26 +300,6 @@ func newTesttool() (
|
||||
).String(), testtoolBin, 0500)
|
||||
},
|
||||
}}
|
||||
testtoolDestroy = func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
if pathname, checksum, err := c.Cure(testtool); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if err = os.Remove(pathname.String()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
p := base.Append(
|
||||
"checksum",
|
||||
pkg.Encode(checksum),
|
||||
)
|
||||
if err = os.Chmod(p.Append("bin").String(), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = os.Chmod(p.String(), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = os.RemoveAll(p.String()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
testtoolDestroy = newDestroyArtifactFunc(testtool)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -117,6 +117,59 @@ func newStubFile(
|
||||
}
|
||||
}
|
||||
|
||||
// destroyArtifact removes all traces of an [Artifact] from the on-disk cache.
|
||||
// Do not use this in a test case without a very good reason to do so.
|
||||
func destroyArtifact(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
a pkg.Artifact,
|
||||
) {
|
||||
if pathname, checksum, err := c.Cure(a); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if err = os.Remove(pathname.String()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
p := base.Append(
|
||||
"checksum",
|
||||
pkg.Encode(checksum),
|
||||
)
|
||||
if err = filepath.WalkDir(p.String(), func(
|
||||
path string,
|
||||
d fs.DirEntry,
|
||||
err error,
|
||||
) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return os.Chmod(path, 0700)
|
||||
}
|
||||
return nil
|
||||
}); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = os.RemoveAll(p.String()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newDestroyArtifactFunc returns a function that calls destroyArtifact.
|
||||
func newDestroyArtifactFunc(a pkg.Artifact) func(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
) {
|
||||
return func(
|
||||
t *testing.T,
|
||||
base *check.Absolute,
|
||||
c *pkg.Cache,
|
||||
) {
|
||||
destroyArtifact(t, base, c, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -28,15 +28,17 @@ const (
|
||||
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [File].
|
||||
type tarArtifact struct {
|
||||
// Caller-supplied backing tarball.
|
||||
f File
|
||||
f Artifact
|
||||
// Compression on top of the tarball.
|
||||
compression uint64
|
||||
}
|
||||
|
||||
// NewTar returns a new [Artifact] backed by the supplied [File] and
|
||||
// compression method.
|
||||
func NewTar(f File, compression uint64) Artifact {
|
||||
return &tarArtifact{f: f, compression: compression}
|
||||
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||
// compression method. If f implements [File], its data might be used directly,
|
||||
// eliminating the roundtrip to vfs. If f is a directory, it must contain a
|
||||
// single regular file.
|
||||
func NewTar(a Artifact, compression uint64) Artifact {
|
||||
return &tarArtifact{a, compression}
|
||||
}
|
||||
|
||||
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
||||
@@ -76,26 +78,46 @@ func (a *tarArtifact) Cure(c *CureContext) (err error) {
|
||||
temp := c.GetTempDir()
|
||||
var tr io.ReadCloser
|
||||
|
||||
{
|
||||
|
||||
if tr, err = c.OpenFile(a.f); err != nil {
|
||||
if file, ok := a.f.(File); ok {
|
||||
if tr, err = c.OpenFile(file); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
var pathname *check.Absolute
|
||||
if pathname, _, err = c.Cure(a.f); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var entries []os.DirEntry
|
||||
if entries, err = os.ReadDir(pathname.String()); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(entries) != 1 || !entries[0].Type().IsRegular() {
|
||||
return errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)
|
||||
} else {
|
||||
pathname = pathname.Append(entries[0].Name())
|
||||
}
|
||||
|
||||
if tr, err = os.Open(pathname.String()); err != nil {
|
||||
return
|
||||
}
|
||||
defer func(f io.ReadCloser) {
|
||||
closeErr := f.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}(tr)
|
||||
tr = io.NopCloser(tr)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
defer func(f io.ReadCloser) {
|
||||
closeErr := tr.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
closeErr = f.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}(tr)
|
||||
tr = io.NopCloser(tr)
|
||||
|
||||
switch a.compression {
|
||||
case TarUncompressed:
|
||||
|
||||
@@ -5,12 +5,15 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"hakurei.app/container/check"
|
||||
"hakurei.app/container/stub"
|
||||
"hakurei.app/internal/pkg"
|
||||
)
|
||||
|
||||
@@ -37,7 +40,7 @@ func TestTar(t *testing.T) {
|
||||
}, pkg.MustDecode(
|
||||
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
|
||||
))
|
||||
}, pkg.MustDecode("PrNWkHqtSmdtVecctlI9Xf63dQPIyyLBIMCtRAP2-VqF9u1oQ8ydV7-WPbzEvMkG")},
|
||||
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94")},
|
||||
|
||||
{"http expand", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||
@@ -48,7 +51,7 @@ func TestTar(t *testing.T) {
|
||||
}, pkg.MustDecode(
|
||||
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
|
||||
))
|
||||
}, pkg.MustDecode("YZUoGdMwmfW5sQWto9hQgMKah648rHKck8Ds_GGnqgCBpTAiZKOefpHCKnvktfYh")},
|
||||
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX")},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -115,18 +118,88 @@ func checkTarHTTP(
|
||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(id), pkg.Encode(wantIdent))
|
||||
}
|
||||
|
||||
wantPathname := base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(wantIdent),
|
||||
)
|
||||
if pathname, checksum, err := c.Cure(a); err != nil {
|
||||
t.Fatalf("Cure: error = %v", err)
|
||||
} else if !pathname.Is(wantPathname) {
|
||||
t.Fatalf("Cure: %q, want %q", pathname, wantPathname)
|
||||
} else if checksum != wantChecksum {
|
||||
t.Fatalf("Cure: %v", &pkg.ChecksumMismatchError{
|
||||
Got: checksum,
|
||||
Want: wantChecksum,
|
||||
})
|
||||
tarDir := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a single regular file"),
|
||||
cure: func(c *pkg.CureContext) error {
|
||||
work := c.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(
|
||||
work.Append("sample.tar.gz").String(),
|
||||
[]byte(testdata),
|
||||
0400,
|
||||
)
|
||||
},
|
||||
}
|
||||
tarDirMulti := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a multiple entries"),
|
||||
cure: func(c *pkg.CureContext) error {
|
||||
work := c.GetWorkDir()
|
||||
if err := os.MkdirAll(work.Append(
|
||||
"garbage",
|
||||
).String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(
|
||||
work.Append("sample.tar.gz").String(),
|
||||
[]byte(testdata),
|
||||
0400,
|
||||
)
|
||||
},
|
||||
}
|
||||
tarDirType := stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("directory containing a symbolic link"),
|
||||
cure: func(c *pkg.CureContext) error {
|
||||
work := c.GetWorkDir()
|
||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Symlink(
|
||||
work.String(),
|
||||
work.Append("sample.tar.gz").String(),
|
||||
)
|
||||
},
|
||||
}
|
||||
// destroy these to avoid including it in flatten test case
|
||||
defer newDestroyArtifactFunc(tarDir)(t, base, c)
|
||||
defer newDestroyArtifactFunc(tarDirMulti)(t, base, c)
|
||||
defer newDestroyArtifactFunc(tarDirType)(t, base, c)
|
||||
|
||||
cureMany(t, c, []cureStep{
|
||||
{"file", a, base.Append(
|
||||
"identifier",
|
||||
pkg.Encode(wantIdent),
|
||||
), wantChecksum, nil},
|
||||
|
||||
{"directory", pkg.NewTar(
|
||||
tarDir,
|
||||
pkg.TarGzip,
|
||||
), ignorePathname, wantChecksum, nil},
|
||||
|
||||
{"multiple entries", pkg.NewTar(
|
||||
tarDirMulti,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"bad type", pkg.NewTar(
|
||||
tarDirType,
|
||||
pkg.TarGzip,
|
||||
), nil, pkg.Checksum{}, errors.New(
|
||||
"input directory does not contain a single regular file",
|
||||
)},
|
||||
|
||||
{"error passthrough", pkg.NewTar(stubArtifact{
|
||||
kind: pkg.KindExec,
|
||||
params: []byte("doomed artifact"),
|
||||
cure: func(c *pkg.CureContext) error {
|
||||
return stub.UniqueError(0xcafe)
|
||||
},
|
||||
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user