Files
hakurei/internal/pkg/pkg_test.go
Ophestra e0c720681b internal/pkg: standardise artifact IR
This should hopefully provide good separation between the artifact curing backend implementation and the (still work in progress) language. Making the IR parseable also guarantees uniqueness of the representation.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-02-05 08:24:09 +09:00

1234 lines
34 KiB
Go

package pkg_test
import (
"archive/tar"
"bytes"
"context"
"crypto/sha512"
"encoding/base64"
"errors"
"fmt"
"io"
"io/fs"
"log"
"net/http"
"os"
"path/filepath"
"reflect"
"strconv"
"syscall"
"testing"
"unique"
"unsafe"
"hakurei.app/container"
"hakurei.app/container/check"
"hakurei.app/container/fhs"
"hakurei.app/container/stub"
"hakurei.app/internal/pkg"
"hakurei.app/message"
)
//go:linkname unsafeOpen hakurei.app/internal/pkg.open
func unsafeOpen(
ctx context.Context,
msg message.Msg,
cures int,
base *check.Absolute,
lock bool,
) (*pkg.Cache, error)
func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
// overrideIdent overrides the ID method of [Artifact].
type overrideIdent struct {
id pkg.ID
pkg.TrivialArtifact
}
func (a overrideIdent) ID() pkg.ID { return a.id }
// overrideIdentFile overrides the ID method of [FileArtifact].
type overrideIdentFile struct {
id pkg.ID
pkg.FileArtifact
}
func (a overrideIdentFile) ID() pkg.ID { return a.id }
// A knownIdentArtifact implements [pkg.KnownIdent] and [Artifact]
type knownIdentArtifact interface {
pkg.KnownIdent
pkg.TrivialArtifact
}
// A knownIdentFile implements [pkg.KnownIdent] and [FileArtifact]
type knownIdentFile interface {
pkg.KnownIdent
pkg.FileArtifact
}
// overrideChecksum overrides the Checksum method of [Artifact].
type overrideChecksum struct {
checksum pkg.Checksum
knownIdentArtifact
}
func (a overrideChecksum) Checksum() pkg.Checksum { return a.checksum }
// overrideChecksumFile overrides the Checksum method of [FileArtifact].
type overrideChecksumFile struct {
checksum pkg.Checksum
knownIdentFile
}
func (a overrideChecksumFile) Checksum() pkg.Checksum { return a.checksum }
// A stubArtifact implements [TrivialArtifact] with hardcoded behaviour.
type stubArtifact struct {
kind pkg.Kind
params []byte
deps []pkg.Artifact
cure func(t *pkg.TContext) error
}
func (a *stubArtifact) Kind() pkg.Kind { return a.kind }
func (a *stubArtifact) Params(ctx *pkg.IContext) { ctx.Write(a.params) }
func (a *stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
func (a *stubArtifact) Cure(t *pkg.TContext) error { return a.cure(t) }
func (*stubArtifact) IsExclusive() bool { return false }
// A stubArtifactF implements [FloodArtifact] with hardcoded behaviour.
type stubArtifactF struct {
kind pkg.Kind
params []byte
deps []pkg.Artifact
excl bool
cure func(f *pkg.FContext) error
}
func (a *stubArtifactF) Kind() pkg.Kind { return a.kind }
func (a *stubArtifactF) Params(ctx *pkg.IContext) { ctx.Write(a.params) }
func (a *stubArtifactF) Dependencies() []pkg.Artifact { return a.deps }
func (a *stubArtifactF) Cure(f *pkg.FContext) error { return a.cure(f) }
func (a *stubArtifactF) IsExclusive() bool { return a.excl }
// A stubFile implements [FileArtifact] with hardcoded behaviour.
type stubFile struct {
data []byte
err error
stubArtifact
}
func (a *stubFile) Cure(*pkg.RContext) (io.ReadCloser, error) {
return io.NopCloser(bytes.NewReader(a.data)), a.err
}
// newStubFile returns an implementation of [pkg.File] with hardcoded behaviour.
func newStubFile(
kind pkg.Kind,
id pkg.ID,
sum *pkg.Checksum,
data []byte,
err error,
) pkg.FileArtifact {
f := overrideIdentFile{id, &stubFile{data, err, stubArtifact{
kind,
nil,
nil,
func(*pkg.TContext) error {
panic("unreachable")
},
}}}
if sum == nil {
return f
} else {
return overrideChecksumFile{*sum, f}
}
}
// destroyArtifact removes all traces of an [Artifact] from the on-disk cache.
// Do not use this in a test case without a very good reason to do so.
func destroyArtifact(
t *testing.T,
base *check.Absolute,
c *pkg.Cache,
a pkg.Artifact,
) {
if pathname, checksum, err := c.Cure(a); err != nil {
t.Fatalf("Cure: error = %v", err)
} else if err = os.Remove(pathname.String()); err != nil {
t.Fatal(err)
} else {
p := base.Append(
"checksum",
pkg.Encode(checksum.Value()),
)
if err = filepath.WalkDir(p.String(), func(
path string,
d fs.DirEntry,
err error,
) error {
if err != nil {
return err
}
if d.IsDir() {
return os.Chmod(path, 0700)
}
return nil
}); err != nil && !errors.Is(err, os.ErrNotExist) {
t.Fatal(err)
}
if err = os.RemoveAll(p.String()); err != nil {
t.Fatal(err)
}
}
}
// newDestroyArtifactFunc returns a function that calls destroyArtifact.
func newDestroyArtifactFunc(a pkg.Artifact) func(
t *testing.T,
base *check.Absolute,
c *pkg.Cache,
) {
return func(
t *testing.T,
base *check.Absolute,
c *pkg.Cache,
) {
destroyArtifact(t, base, c, a)
}
}
func TestIdent(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
a pkg.Artifact
want unique.Handle[pkg.ID]
}{
{"tar", &stubArtifact{
pkg.KindTar,
[]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0},
[]pkg.Artifact{
overrideIdent{pkg.ID{}, new(stubArtifact)},
},
nil,
}, unique.Make[pkg.ID](pkg.MustDecode(
"WKErnjTOVbuH2P9a0gM4OcAAO4p-CoX2HQu7CbZrg8ZOzApvWoO3-ISzPw6av_rN",
))},
}
msg := message.New(log.New(os.Stderr, "ident: ", 0))
msg.SwapVerbose(true)
var cache *pkg.Cache
if a, err := check.NewAbs(t.TempDir()); err != nil {
t.Fatal(err)
} else if cache, err = pkg.Open(t.Context(), msg, 0, a); err != nil {
t.Fatal(err)
}
t.Cleanup(cache.Close)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if got := cache.Ident(tc.a); got != tc.want {
t.Errorf("Ident: %s, want %s",
pkg.Encode(got.Value()),
pkg.Encode(tc.want.Value()),
)
}
})
}
}
// cacheTestCase is a test case passed to checkWithCache where a new instance
// of [pkg.Cache] is prepared for the test case, and is validated and removed
// on test completion.
type cacheTestCase struct {
name string
early func(t *testing.T, base *check.Absolute)
f func(t *testing.T, base *check.Absolute, c *pkg.Cache)
want pkg.Checksum
}
// checkWithCache runs a slice of cacheTestCase.
func checkWithCache(t *testing.T, testCases []cacheTestCase) {
t.Helper()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Helper()
t.Parallel()
base := check.MustAbs(t.TempDir())
if err := os.Chmod(base.String(), 0700); err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := filepath.WalkDir(base.String(), func(path string, d fs.DirEntry, err error) error {
if err != nil {
t.Error(err)
return nil
}
if !d.IsDir() {
return nil
}
return os.Chmod(path, 0700)
}); err != nil {
t.Fatal(err)
}
})
msg := message.New(log.New(os.Stderr, "cache: ", 0))
msg.SwapVerbose(testing.Verbose())
var scrubFunc func() error // scrub after hashing
if c, err := pkg.Open(t.Context(), msg, 1<<4, base); err != nil {
t.Fatalf("Open: error = %v", err)
} else {
t.Cleanup(c.Close)
if tc.early != nil {
tc.early(t, base)
}
tc.f(t, base, c)
scrubFunc = func() error { return c.Scrub(1 << 7) }
}
var restoreTemp bool
if _, err := os.Lstat(base.Append("temp").String()); err != nil {
if !errors.Is(err, os.ErrNotExist) {
t.Fatal(err)
}
} else {
restoreTemp = true
}
// destroy lock file to avoid changing cache checksums
if err := os.Remove(base.Append("lock").String()); err != nil {
t.Fatal(err)
}
var checksum pkg.Checksum
if err := pkg.HashDir(&checksum, base); err != nil {
t.Fatalf("HashDir: error = %v", err)
} else if checksum != tc.want {
t.Fatalf("HashDir: %v", &pkg.ChecksumMismatchError{
Got: checksum,
Want: tc.want,
})
}
if err := scrubFunc(); err != nil {
t.Fatal("cache contains inconsistencies\n\n" + err.Error())
}
if restoreTemp {
if err := os.Mkdir(
base.Append("temp").String(),
0700,
); err != nil {
t.Fatal(err)
}
}
// validate again to make sure scrub did not condemn anything
if err := pkg.HashDir(&checksum, base); err != nil {
t.Fatalf("HashDir: error = %v", err)
} else if checksum != tc.want {
t.Fatalf("(scrubbed) HashDir: %v", &pkg.ChecksumMismatchError{
Got: checksum,
Want: tc.want,
})
}
})
}
}
// A cureStep contains an [Artifact] to be cured, and the expected outcome.
type cureStep struct {
name string
a pkg.Artifact
pathname *check.Absolute
checksum pkg.Checksum
err error
}
// ignorePathname is passed to cureMany to skip the pathname check.
var ignorePathname = check.MustAbs("/\x00")
// cureMany cures many artifacts against a [Cache] and checks their outcomes.
func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
t.Helper()
makeChecksumH := func(checksum pkg.Checksum) unique.Handle[pkg.Checksum] {
if checksum == (pkg.Checksum{}) {
return unique.Handle[pkg.Checksum]{}
}
return unique.Make(checksum)
}
for _, step := range steps {
t.Log("cure step:", step.name)
if pathname, checksum, err := c.Cure(step.a); !reflect.DeepEqual(err, step.err) {
t.Fatalf("Cure: error = %v, want %v", err, step.err)
} else if step.pathname != ignorePathname && !pathname.Is(step.pathname) {
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
} else if checksum != makeChecksumH(step.checksum) {
t.Fatalf(
"Cure: checksum = %s, want %s",
pkg.Encode(checksum.Value()), pkg.Encode(step.checksum),
)
} else {
v := any(err)
if err == nil {
v = pathname
}
var checksumVal pkg.Checksum
if checksum != (unique.Handle[pkg.Checksum]{}) {
checksumVal = checksum.Value()
}
t.Log(pkg.Encode(checksumVal)+":", v)
}
}
}
// newWantScrubError returns the address to a new [ScrubError] for base.
func newWantScrubError(base *check.Absolute) *pkg.ScrubError {
return &pkg.ScrubError{
ChecksumMismatches: []pkg.ChecksumMismatchError{
{Got: pkg.MustDecode(
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
), Want: pkg.Checksum{0xff, 0}},
},
DanglingIdentifiers: []pkg.ID{
{0xfe, 0},
{0xfe, 0xfe},
{0xfe, 0xff},
},
Errs: map[unique.Handle[string]][]error{
base.Append("checksum", "__8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").Handle(): {
pkg.InvalidFileModeError(fs.ModeSymlink),
},
base.Append("checksum", "invalid").Handle(): {
base64.CorruptInputError(4),
},
base.Append("nonexistent").Handle(): {
base64.CorruptInputError(8),
},
base.Append("identifier", pkg.Encode(pkg.ID{0xfe, 0xff})).Handle(): {
&os.PathError{
Op: "readlink",
Path: base.Append(
"identifier",
pkg.Encode(pkg.ID{0xfe, 0xff}),
).String(),
Err: syscall.EINVAL,
},
},
base.Append("identifier", "invalid").Handle(): {
base64.CorruptInputError(4),
},
},
}
}
func TestCache(t *testing.T) {
t.Parallel()
const testdata = "" +
"\x00\x00\x00\x00" +
"\xad\x0b\x00" +
"\x04" +
"\xfe\xfe\x00\x00" +
"\xfe\xca\x00\x00"
testdataChecksum := func() pkg.Checksum {
h := sha512.New384()
h.Write([]byte(testdata))
return (pkg.Checksum)(h.Sum(nil))
}()
testCases := []cacheTestCase{
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
c.SetStrict(true)
identifier := (pkg.ID)(bytes.Repeat([]byte{
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
}, 8))
wantPathname := base.Append(
"identifier",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
)
identifier0 := (pkg.ID)(bytes.Repeat([]byte{
0x71, 0xa7, 0xde, 0x6d, 0xa6, 0xde,
}, 8))
wantPathname0 := base.Append(
"identifier",
"cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe",
)
cureMany(t, c, []cureStep{
{"initial file", newStubFile(
pkg.KindHTTPGet,
identifier,
&testdataChecksum,
[]byte(testdata), nil,
), wantPathname, testdataChecksum, nil},
{"identical content", newStubFile(
pkg.KindHTTPGet,
identifier0,
&testdataChecksum,
[]byte(testdata), nil,
), wantPathname0, testdataChecksum, nil},
{"existing entry", newStubFile(
pkg.KindHTTPGet,
identifier,
&testdataChecksum,
[]byte(testdata), nil,
), wantPathname, testdataChecksum, nil},
{"checksum mismatch", newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xff, 0},
new(pkg.Checksum),
[]byte(testdata), nil,
), nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
Got: testdataChecksum,
}},
{"store without validation", newStubFile(
pkg.KindHTTPGet,
pkg.MustDecode("vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX"),
nil,
[]byte{0}, nil,
), base.Append(
"identifier",
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
), pkg.Checksum{
0xbe, 0xc0, 0x21, 0xb4, 0xf3, 0x68,
0xe3, 0x06, 0x91, 0x34, 0xe0, 0x12,
0xc2, 0xb4, 0x30, 0x70, 0x83, 0xd3,
0xa9, 0xbd, 0xd2, 0x06, 0xe2, 0x4e,
0x5f, 0x0d, 0x86, 0xe1, 0x3d, 0x66,
0x36, 0x65, 0x59, 0x33, 0xec, 0x2b,
0x41, 0x34, 0x65, 0x96, 0x68, 0x17,
0xa9, 0xc2, 0x08, 0xa1, 0x17, 0x17,
}, nil},
{"incomplete implementation", struct{ pkg.Artifact }{&stubArtifact{
kind: pkg.KindExec,
params: []byte("artifact overridden to be incomplete"),
}}, nil, pkg.Checksum{}, pkg.InvalidArtifactError(pkg.MustDecode(
"E__uZ1sLIvb84vzSm5Uezb03RogsiaeTt1nfIVv8TKnnf4LqwtSi-smdHhlkZrUJ",
))},
{"error passthrough", newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xff, 1},
nil,
nil, stub.UniqueError(0xcafe),
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
{"error caching", newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xff, 1},
nil,
nil, nil,
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
{"cache hit bad type", overrideChecksum{testdataChecksum, overrideIdent{pkg.ID{0xff, 2}, &stubArtifact{
kind: pkg.KindTar,
}}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
0400,
)},
{"noncomparable error", &stubArtifactF{
kind: pkg.KindExec,
params: []byte("artifact with dependency returning noncomparable error"),
deps: []pkg.Artifact{newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xff, 3},
nil,
nil, struct {
_ []byte
stub.UniqueError
}{UniqueError: 0xbad},
)},
cure: func(f *pkg.FContext) error {
panic("attempting to cure impossible artifact")
},
}, nil, pkg.Checksum{}, &pkg.DependencyCureError{
{
Ident: unique.Make(pkg.ID{0xff, 3}),
Err: struct {
_ []byte
stub.UniqueError
}{UniqueError: 0xbad},
},
}},
})
if c0, err := unsafeOpen(
t.Context(),
message.New(nil),
0, base, false,
); err != nil {
t.Fatalf("open: error = %v", err)
} else {
t.Cleanup(c.Close) // check doubled cancel
cureMany(t, c0, []cureStep{
{"cache hit ident", overrideIdent{
id: identifier,
}, wantPathname, testdataChecksum, nil},
{"cache miss checksum match", newStubFile(
pkg.KindHTTPGet,
testdataChecksum,
nil,
[]byte(testdata),
nil,
), base.Append(
"identifier",
pkg.Encode(testdataChecksum),
), testdataChecksum, nil},
})
// cure after close
c.Close()
if _, _, err = c.Cure(&stubArtifactF{
kind: pkg.KindExec,
params: []byte("unreachable artifact cured after cancel"),
deps: []pkg.Artifact{pkg.NewFile("", []byte("unreachable dependency"))},
}); !reflect.DeepEqual(err, context.Canceled) {
t.Fatalf("(closed) Cure: error = %v", err)
}
}
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2")},
{"directory", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
id := pkg.MustDecode(
"HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY",
)
makeSample := func(t *pkg.TContext) error {
work := t.GetWorkDir()
if err := os.Mkdir(work.String(), 0700); err != nil {
return err
}
if err := os.WriteFile(
work.Append("check").String(),
[]byte{0, 0},
0400,
); err != nil {
return err
}
if err := os.MkdirAll(work.Append(
"lib",
"pkgconfig",
).String(), 0700); err != nil {
return err
}
return os.Symlink(
"/proc/nonexistent/libedac.so",
work.Append(
"lib",
"libedac.so",
).String(),
)
}
wantChecksum := pkg.MustDecode(
"qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b",
)
wantPathname := base.Append(
"identifier",
pkg.Encode(id),
)
id0 := pkg.MustDecode(
"Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa",
)
wantPathname0 := base.Append(
"identifier",
pkg.Encode(id0),
)
makeGarbage := func(work *check.Absolute, wantErr error) error {
if err := os.Mkdir(work.String(), 0700); err != nil {
return err
}
mode := fs.FileMode(0)
if wantErr == nil {
mode = 0500
}
if err := os.MkdirAll(work.Append(
"lib",
"pkgconfig",
).String(), 0700); err != nil {
return err
}
if err := os.WriteFile(work.Append(
"lib",
"check",
).String(), nil, 0400&mode); err != nil {
return err
}
if err := os.Chmod(work.Append(
"lib",
"pkgconfig",
).String(), 0500&mode); err != nil {
return err
}
if err := os.Chmod(work.Append(
"lib",
).String(), 0500&mode); err != nil {
return err
}
return wantErr
}
cureMany(t, c, []cureStep{
{"initial directory", overrideChecksum{wantChecksum, overrideIdent{id, &stubArtifact{
kind: pkg.KindTar,
cure: makeSample,
}}}, wantPathname, wantChecksum, nil},
{"identical identifier", overrideChecksum{wantChecksum, overrideIdent{id, &stubArtifact{
kind: pkg.KindTar,
}}}, wantPathname, wantChecksum, nil},
{"identical checksum", overrideIdent{id0, &stubArtifact{
kind: pkg.KindTar,
cure: makeSample,
}}, wantPathname0, wantChecksum, nil},
{"cure fault", overrideIdent{pkg.ID{0xff, 0}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
return makeGarbage(t.GetWorkDir(), stub.UniqueError(0xcafe))
},
}}, nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
{"checksum mismatch", overrideChecksum{pkg.Checksum{}, overrideIdent{pkg.ID{0xff, 1}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
return makeGarbage(t.GetWorkDir(), nil)
},
}}}, nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
Got: pkg.MustDecode(
"CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT",
),
}},
{"cache hit bad type", newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xff, 2},
&wantChecksum,
[]byte(testdata), nil,
), nil, pkg.Checksum{}, pkg.InvalidFileModeError(
fs.ModeDir | 0500,
)},
{"openFile directory", overrideIdent{pkg.ID{0xff, 3}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
r, err := t.Open(overrideChecksumFile{checksum: wantChecksum})
if err != nil {
panic(err)
}
_, err = io.ReadAll(r)
return err
},
}}, nil, pkg.Checksum{}, &os.PathError{
Op: "read",
Path: base.Append(
"checksum",
pkg.Encode(wantChecksum),
).String(),
Err: syscall.EISDIR,
}},
{"no output", overrideIdent{pkg.ID{0xff, 4}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
return nil
},
}}, nil, pkg.Checksum{}, pkg.NoOutputError{}},
{"file output", overrideIdent{pkg.ID{0xff, 5}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
return os.WriteFile(t.GetWorkDir().String(), []byte{0}, 0400)
},
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
{"symlink output", overrideIdent{pkg.ID{0xff, 6}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
return os.Symlink(
t.GetWorkDir().String(),
t.GetWorkDir().String(),
)
},
}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
fs.ModeSymlink | 0777,
)},
})
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d")},
{"pending", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
c.SetStrict(true)
wantErr := stub.UniqueError(0xcafe)
n, ready := make(chan struct{}), make(chan struct{})
go func() {
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
close(ready)
<-n
return wantErr
},
}}); !reflect.DeepEqual(err, wantErr) {
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
}
}()
<-ready
wCureDone := make(chan struct{})
go func() {
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, &stubArtifact{
kind: pkg.KindTar,
}}); !reflect.DeepEqual(err, wantErr) {
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
}
close(wCureDone)
}()
// check cache activity while a cure is blocking
cureMany(t, c, []cureStep{
{"error passthrough", newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xff, 1},
nil,
nil, stub.UniqueError(0xbad),
), nil, pkg.Checksum{}, stub.UniqueError(0xbad)},
{"file output", overrideIdent{pkg.ID{0xff, 2}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
return os.WriteFile(
t.GetWorkDir().String(),
[]byte{0},
0400,
)
},
}}, nil, pkg.Checksum{}, errors.New(
"non-file artifact produced regular file",
)},
})
wantErrScrub := &pkg.ScrubError{
Errs: map[unique.Handle[string]][]error{
base.Handle(): {errors.New("scrub began with pending artifacts")},
},
}
if err := c.Scrub(1 << 6); !reflect.DeepEqual(err, wantErrScrub) {
t.Fatalf("Scrub: error = %#v, want %#v", err, wantErrScrub)
}
identPendingVal := reflect.ValueOf(c).Elem().FieldByName("identPending")
identPending := reflect.NewAt(
identPendingVal.Type(),
unsafe.Pointer(identPendingVal.UnsafeAddr()),
).Elem().Interface().(map[unique.Handle[pkg.ID]]<-chan struct{})
notify := identPending[unique.Make(pkg.ID{0xff})]
go close(n)
<-notify
<-wCureDone
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
{"scrub", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
cureMany(t, c, []cureStep{
{"bad measured file", newStubFile(
pkg.KindHTTPGet,
pkg.Checksum{0xfe, 0},
&pkg.Checksum{0xff, 0},
[]byte{0}, nil,
), base.Append(
"identifier",
pkg.Encode(pkg.Checksum{0xfe, 0}),
), pkg.Checksum{0xff, 0}, nil},
})
for _, p := range [][]string{
{"identifier", "invalid"},
{"identifier", pkg.Encode(pkg.ID{0xfe, 0xff})},
{"checksum", "invalid"},
} {
if err := os.WriteFile(
base.Append(p...).String(),
nil,
0400,
); err != nil {
t.Fatal(err)
}
}
for _, p := range [][]string{
{"../nonexistent", "checksum", pkg.Encode(pkg.Checksum{0xff, 0xff})},
{"../nonexistent", "identifier", pkg.Encode(pkg.Checksum{0xfe, 0xfe})},
} {
if err := os.Symlink(
p[0],
base.Append(p[1:]...).String(),
); err != nil {
t.Fatal(err)
}
}
wantErr := newWantScrubError(base)
if err := c.Scrub(1 << 6); !reflect.DeepEqual(err, wantErr) {
t.Fatalf("Scrub: error =\n%s\nwant\n%s", err, wantErr)
}
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
}
checkWithCache(t, testCases)
}
func TestErrors(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
err error
want string
}{
{"InvalidLookupError", pkg.InvalidLookupError{
0xff, 0xf0,
}, "attempting to look up non-dependency artifact __AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"},
{"InvalidArtifactError", pkg.InvalidArtifactError{
0xff, 0xfd,
}, "artifact __0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA cannot be cured"},
{"ChecksumMismatchError", &pkg.ChecksumMismatchError{
Want: (pkg.Checksum)(bytes.Repeat([]byte{
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
}, 8)),
}, "got AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" +
" instead of deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"},
{"ResponseStatusError", pkg.ResponseStatusError(
http.StatusNotAcceptable,
), "the requested URL returned non-OK status: Not Acceptable"},
{"DisallowedTypeflagError", pkg.DisallowedTypeflagError(
tar.TypeChar,
), "disallowed typeflag '3'"},
{"InvalidFileModeError", pkg.InvalidFileModeError(
fs.ModeSymlink | 0777,
), "artifact did not produce a regular file or directory"},
{"NoOutputError", pkg.NoOutputError{
// empty struct
}, "artifact cured successfully but did not produce any output"},
{"IRKindError", &pkg.IRKindError{
Got: pkg.IRKindEnd,
Want: pkg.IRKindIdent,
Ancillary: 0xcafebabe,
}, "got terminator IR value (0xcafebabe) instead of ident"},
{"IRKindError invalid", &pkg.IRKindError{
Got: 0xbeef,
Want: pkg.IRKindIdent,
Ancillary: 0xcafe,
}, "got invalid kind 48879 IR value (0xcafe) instead of ident"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if got := tc.err.Error(); got != tc.want {
t.Errorf("Error: %q, want %q", got, tc.want)
}
})
}
}
func TestScrubError(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
err pkg.ScrubError
want string
unwrap []error
}{
{"sample", *newWantScrubError(
fhs.AbsVarLib.Append("cure"),
), `checksum mismatches:
got vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX instead of _wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
dangling identifiers:
_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
_v4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
_v8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
errors during scrub:
/var/lib/cure/checksum/__8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA:
artifact did not produce a regular file or directory
/var/lib/cure/checksum/invalid:
illegal base64 data at input byte 4
/var/lib/cure/identifier/_v8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA:
readlink /var/lib/cure/identifier/_v8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: invalid argument
/var/lib/cure/identifier/invalid:
illegal base64 data at input byte 4
/var/lib/cure/nonexistent:
illegal base64 data at input byte 8
`, []error{
&pkg.ChecksumMismatchError{Got: pkg.MustDecode(
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
), Want: pkg.Checksum{0xff, 0}},
pkg.InvalidFileModeError(fs.ModeSymlink),
base64.CorruptInputError(4),
&os.PathError{
Op: "readlink",
Path: fhs.AbsVarLib.Append("cure").Append(
"identifier",
pkg.Encode(pkg.ID{0xfe, 0xff}),
).String(),
Err: syscall.EINVAL,
},
base64.CorruptInputError(4),
base64.CorruptInputError(8),
}},
{"full", pkg.ScrubError{
ChecksumMismatches: []pkg.ChecksumMismatchError{
{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
},
DanglingIdentifiers: []pkg.ID{
(pkg.ID)(bytes.Repeat([]byte{0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f}, 8)),
(pkg.ID)(bytes.Repeat([]byte{0x71, 0xa7, 0xde, 0x6d, 0xa6, 0xde}, 8)),
},
Errs: map[unique.Handle[string]][]error{
unique.Make("/proc/nonexistent"): {
stub.UniqueError(0xcafe),
stub.UniqueError(0xbad),
stub.UniqueError(0xff),
},
},
}, `checksum mismatches:
got AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA instead of CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN
dangling identifiers:
deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe
errors during scrub:
/proc/nonexistent:
unique error 51966 injected by the test suite
unique error 2989 injected by the test suite
unique error 255 injected by the test suite
`, []error{
&pkg.ChecksumMismatchError{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
stub.UniqueError(0xcafe),
stub.UniqueError(0xbad),
stub.UniqueError(0xff),
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if got := tc.err.Error(); got != tc.want {
t.Errorf("Error:\n\n%s\n\nwant\n\n%s", got, tc.want)
}
if unwrap := tc.err.Unwrap(); !reflect.DeepEqual(unwrap, tc.unwrap) {
t.Errorf("Unwrap: %#v, want %#v", unwrap, tc.unwrap)
}
})
}
}
func TestDependencyCureError(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
err pkg.DependencyCureError
want string
unwrap []error
}{
{"simple", pkg.DependencyCureError{
{Ident: unique.Make(pkg.ID{0xff, 9}), Err: stub.UniqueError(0xbad09)},
{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
{Ident: unique.Make(pkg.ID{0xff, 0xf}), Err: stub.UniqueError(0xbad0f)},
{Ident: unique.Make(pkg.ID{0xff, 1}), Err: stub.UniqueError(0xbad01)},
}, `errors curing dependencies:
_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765184 injected by the test suite
_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765185 injected by the test suite
_wkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765193 injected by the test suite
_w8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765199 injected by the test suite`, []error{
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 1}), Err: stub.UniqueError(0xbad01)},
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 9}), Err: stub.UniqueError(0xbad09)},
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 0xf}), Err: stub.UniqueError(0xbad0f)},
}},
{"dedup", pkg.DependencyCureError{
{Ident: unique.Make(pkg.ID{0xff, 9}), Err: stub.UniqueError(0xbad09)},
{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
{Ident: unique.Make(pkg.ID{0xff, 0xfd}), Err: &pkg.DependencyCureError{
{Ident: unique.Make(pkg.ID{0xff, 9}), Err: stub.UniqueError(0xbad09)},
{Ident: unique.Make(pkg.ID{0xff, 0xc}), Err: &pkg.DependencyCureError{
{Ident: unique.Make(pkg.ID{0xff, 0xf}), Err: stub.UniqueError(0xbad0f)},
{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
}},
{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
}},
{Ident: unique.Make(pkg.ID{0xff, 0xff}), Err: &pkg.DependencyCureError{
{Ident: unique.Make(pkg.ID{0xff, 9}), Err: stub.UniqueError(0xbad09)},
{Ident: unique.Make(pkg.ID{0xff, 0xc}), Err: &pkg.DependencyCureError{
{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
}},
{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
}},
{Ident: unique.Make(pkg.ID{0xff, 0xf}), Err: stub.UniqueError(0xbad0f)},
{Ident: unique.Make(pkg.ID{0xff, 1}), Err: stub.UniqueError(0xbad01)},
}, `errors curing dependencies:
_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765184 injected by the test suite
_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765185 injected by the test suite
_wkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765193 injected by the test suite
_w8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: unique error 765199 injected by the test suite`, []error{
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 0}), Err: stub.UniqueError(0xbad00)},
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 1}), Err: stub.UniqueError(0xbad01)},
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 9}), Err: stub.UniqueError(0xbad09)},
&pkg.CureError{Ident: unique.Make(pkg.ID{0xff, 0xf}), Err: stub.UniqueError(0xbad0f)},
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if got := tc.err.Error(); got != tc.want {
t.Errorf("Error:\n%s\nwant\n%s", got, tc.want)
}
if unwrap := tc.err.Unwrap(); !reflect.DeepEqual(unwrap, tc.unwrap) {
t.Errorf("Unwrap: %#v, want %#v", unwrap, tc.unwrap)
}
})
}
}
// earlyFailureF is a [FloodArtifact] with a large dependency graph resulting in
// a large [DependencyCureError].
type earlyFailureF int
func (earlyFailureF) Kind() pkg.Kind { return pkg.KindExec }
func (earlyFailureF) Params(*pkg.IContext) {}
func (earlyFailureF) IsExclusive() bool { return false }
func (a earlyFailureF) Dependencies() []pkg.Artifact {
deps := make([]pkg.Artifact, a)
for i := range deps {
deps[i] = a - 1
}
return deps
}
func (a earlyFailureF) Cure(*pkg.FContext) error {
if a != 0 {
panic("unexpected cure on " + strconv.Itoa(int(a)))
}
return stub.UniqueError(0xcafe)
}
func TestDependencyCureErrorEarly(t *testing.T) {
checkWithCache(t, []cacheTestCase{
{"early", nil, func(t *testing.T, _ *check.Absolute, c *pkg.Cache) {
_, _, err := c.Cure(earlyFailureF(8))
if !errors.Is(err, stub.UniqueError(0xcafe)) {
t.Fatalf("Cure: error = %v", err)
}
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
})
}
func TestNew(t *testing.T) {
t.Parallel()
t.Run("nonexistent", func(t *testing.T) {
t.Parallel()
wantErr := &os.PathError{
Op: "mkdir",
Path: container.Nonexistent,
Err: syscall.ENOENT,
}
if _, err := pkg.Open(
t.Context(),
message.New(nil),
0, check.MustAbs(container.Nonexistent),
); !reflect.DeepEqual(err, wantErr) {
t.Errorf("Open: error = %#v, want %#v", err, wantErr)
}
})
t.Run("permission", func(t *testing.T) {
t.Parallel()
tempDir := check.MustAbs(t.TempDir())
if err := os.Chmod(tempDir.String(), 0); err != nil {
t.Fatal(err)
} else {
t.Cleanup(func() {
if err = os.Chmod(tempDir.String(), 0700); err != nil {
t.Fatal(err)
}
})
}
wantErr := &os.PathError{
Op: "mkdir",
Path: tempDir.Append("cache").String(),
Err: syscall.EACCES,
}
if _, err := pkg.Open(
t.Context(),
message.New(nil),
0, tempDir.Append("cache"),
); !reflect.DeepEqual(err, wantErr) {
t.Errorf("Open: error = %#v, want %#v", err, wantErr)
}
})
}