internal/pkg: move dependency flooding to cache
All checks were successful
Test / Create distribution (push) Successful in 47s
Test / Sandbox (push) Successful in 2m50s
Test / ShareFS (push) Successful in 4m46s
Test / Sandbox (race detector) (push) Successful in 5m16s
Test / Hpkg (push) Successful in 5m24s
Test / Hakurei (push) Successful in 5m40s
Test / Hakurei (race detector) (push) Successful in 7m27s
Test / Flake checks (push) Successful in 1m42s
All checks were successful
Test / Create distribution (push) Successful in 47s
Test / Sandbox (push) Successful in 2m50s
Test / ShareFS (push) Successful in 4m46s
Test / Sandbox (race detector) (push) Successful in 5m16s
Test / Hpkg (push) Successful in 5m24s
Test / Hakurei (push) Successful in 5m40s
Test / Hakurei (race detector) (push) Successful in 7m27s
Test / Flake checks (push) Successful in 1m42s
This imposes a hard upper limit to concurrency during dependency satisfaction and moves all dependency-related code out of individual implementations of Artifact. This change also includes ctx and msg as part of Cache. Signed-off-by: Ophestra <cat@gensokyo.uk>
This commit is contained in:
@@ -423,6 +423,46 @@ func TestFlatten(t *testing.T) {
|
|||||||
{Mode: fs.ModeDir | 0700, Path: "work"},
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
}, pkg.MustDecode("RibudsoY1X4_dtshfvL5LYfCPcxVnP0ikOn3yBHzOrt6BpevQiANLJF6Xua76-gM"), nil},
|
}, pkg.MustDecode("RibudsoY1X4_dtshfvL5LYfCPcxVnP0ikOn3yBHzOrt6BpevQiANLJF6Xua76-gM"), nil},
|
||||||
|
|
||||||
|
{"sample exec container multiple layers", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte{0x6c, 0x61, 0x79, 0x65, 0x72, 0x73}},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
"identifier/nnQusiSoVSTQ0hHhAwRr032HA23Qg2fluUKQTTDeNhzspO5F-5FgtSU3d9NKvwAs": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
|
"identifier/o4z73RXEjkF5BBoXYg4WZ3c4VCjwvfJtPKFrstFHcEQxkVDoWTtdc5TMXigFvTsF": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
|
||||||
|
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nnQusiSoVSTQ0hHhAwRr032HA23Qg2fluUKQTTDeNhzspO5F-5FgtSU3d9NKvwAs", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/o4z73RXEjkF5BBoXYg4WZ3c4VCjwvfJtPKFrstFHcEQxkVDoWTtdc5TMXigFvTsF", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("a05V6iVGAfLO8aG0VSSFyB1QvnzJoMmGbMX6ud8CMMbatvyBv90_xGn1qWEIsjkQ"), nil},
|
||||||
|
|
||||||
{"sample file short", fstest.MapFS{
|
{"sample file short", fstest.MapFS{
|
||||||
".": {Mode: fs.ModeDir | 0700},
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -15,7 +14,6 @@ import (
|
|||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/std"
|
"hakurei.app/container/std"
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
||||||
@@ -30,7 +28,7 @@ type ExecPath struct {
|
|||||||
// If there are multiple entries or W is true, P is set up as an overlay
|
// If there are multiple entries or W is true, P is set up as an overlay
|
||||||
// mount, and entries of A must not implement [File].
|
// mount, and entries of A must not implement [File].
|
||||||
A []Artifact
|
A []Artifact
|
||||||
// Whether to make the mount point writable via an invisible tmpfs upperdir.
|
// Whether to make the mount point writable via the temp directory.
|
||||||
W bool
|
W bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,17 +48,8 @@ func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
|
|||||||
// Methods of execArtifact does not modify any struct field or underlying arrays
|
// Methods of execArtifact does not modify any struct field or underlying arrays
|
||||||
// referred to by slices.
|
// referred to by slices.
|
||||||
type execArtifact struct {
|
type execArtifact struct {
|
||||||
// Caller-supplied context.
|
|
||||||
ctx context.Context
|
|
||||||
// Caller-supplied inner mount points.
|
// Caller-supplied inner mount points.
|
||||||
paths []ExecPath
|
paths []ExecPath
|
||||||
// Caller-supplied logging facility, passed through to [container] and used
|
|
||||||
// internally to produce verbose output.
|
|
||||||
msg message.Msg
|
|
||||||
|
|
||||||
// Number of [Artifact] to concurrently cure. A value of 0 or lower is
|
|
||||||
// equivalent to the value returned by [runtime.NumCPU].
|
|
||||||
cures int
|
|
||||||
|
|
||||||
// Passed through to [container.Params].
|
// Passed through to [container.Params].
|
||||||
dir *check.Absolute
|
dir *check.Absolute
|
||||||
@@ -93,10 +82,10 @@ func (a *execNetArtifact) Params() []byte {
|
|||||||
return slices.Concat(a.checksum[:], a.execArtifact.Params())
|
return slices.Concat(a.checksum[:], a.execArtifact.Params())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cure cures the [Artifact] by curing all its dependencies then running the
|
// Cure cures the [Artifact] in the container described by the caller. The
|
||||||
// container described by the caller. The container retains host networking.
|
// container retains host networking.
|
||||||
func (a *execNetArtifact) Cure(c *CureContext) error {
|
func (a *execNetArtifact) Cure(f *FContext) error {
|
||||||
return a.cure(c, true)
|
return a.cure(f, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewExec returns a new [Artifact] bounded by ctx, it cures all [Artifact]
|
// NewExec returns a new [Artifact] bounded by ctx, it cures all [Artifact]
|
||||||
@@ -114,13 +103,7 @@ func (a *execNetArtifact) Cure(c *CureContext) error {
|
|||||||
//
|
//
|
||||||
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
|
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
|
||||||
// and its container runs in the host net namespace.
|
// and its container runs in the host net namespace.
|
||||||
//
|
|
||||||
// A cures value of 0 or lower is equivalent to the value returned by
|
|
||||||
// [runtime.NumCPU].
|
|
||||||
func NewExec(
|
func NewExec(
|
||||||
ctx context.Context,
|
|
||||||
msg message.Msg,
|
|
||||||
cures int,
|
|
||||||
checksum *Checksum,
|
checksum *Checksum,
|
||||||
|
|
||||||
dir *check.Absolute,
|
dir *check.Absolute,
|
||||||
@@ -130,7 +113,7 @@ func NewExec(
|
|||||||
|
|
||||||
paths ...ExecPath,
|
paths ...ExecPath,
|
||||||
) Artifact {
|
) Artifact {
|
||||||
a := execArtifact{ctx, paths, msg, cures, dir, env, path, args}
|
a := execArtifact{paths, dir, env, path, args}
|
||||||
if checksum == nil {
|
if checksum == nil {
|
||||||
return &a
|
return &a
|
||||||
}
|
}
|
||||||
@@ -186,10 +169,9 @@ func (a *execArtifact) Dependencies() []Artifact {
|
|||||||
return slices.Concat(artifacts...)
|
return slices.Concat(artifacts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cure cures the [Artifact] by curing all its dependencies then running the
|
// Cure cures the [Artifact] in the container described by the caller.
|
||||||
// container described by the caller.
|
func (a *execArtifact) Cure(f *FContext) (err error) {
|
||||||
func (a *execArtifact) Cure(c *CureContext) (err error) {
|
return a.cure(f, false)
|
||||||
return a.cure(c, false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -199,20 +181,8 @@ const (
|
|||||||
|
|
||||||
// cure is like Cure but allows optional host net namespace. This is used for
|
// cure is like Cure but allows optional host net namespace. This is used for
|
||||||
// the [KnownChecksum] variant where networking is allowed.
|
// the [KnownChecksum] variant where networking is allowed.
|
||||||
func (a *execArtifact) cure(c *CureContext, hostNet bool) (err error) {
|
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
||||||
cures := a.cures
|
|
||||||
if cures < 1 {
|
|
||||||
cures = runtime.NumCPU()
|
|
||||||
}
|
|
||||||
|
|
||||||
overlayWorkIndex := -1
|
overlayWorkIndex := -1
|
||||||
type curePath struct {
|
|
||||||
// Copied from ExecPath.P.
|
|
||||||
dst *check.Absolute
|
|
||||||
// Cured from ExecPath.A.
|
|
||||||
src []*check.Absolute
|
|
||||||
}
|
|
||||||
paths := make([]curePath, len(a.paths))
|
|
||||||
for i, p := range a.paths {
|
for i, p := range a.paths {
|
||||||
if p.P == nil || len(p.A) == 0 {
|
if p.P == nil || len(p.A) == 0 {
|
||||||
return os.ErrInvalid
|
return os.ErrInvalid
|
||||||
@@ -220,8 +190,6 @@ func (a *execArtifact) cure(c *CureContext, hostNet bool) (err error) {
|
|||||||
if p.P.Is(AbsWork) {
|
if p.P.Is(AbsWork) {
|
||||||
overlayWorkIndex = i
|
overlayWorkIndex = i
|
||||||
}
|
}
|
||||||
paths[i].dst = p.P
|
|
||||||
paths[i].src = make([]*check.Absolute, len(p.A))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var artifactCount int
|
var artifactCount int
|
||||||
@@ -229,71 +197,10 @@ func (a *execArtifact) cure(c *CureContext, hostNet bool) (err error) {
|
|||||||
artifactCount += len(p.A)
|
artifactCount += len(p.A)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(paths) > 0 {
|
ctx, cancel := context.WithCancel(f.Unwrap())
|
||||||
type cureArtifact struct {
|
|
||||||
// Index of pending Artifact in paths.
|
|
||||||
index [2]int
|
|
||||||
// Pending artifact.
|
|
||||||
a Artifact
|
|
||||||
}
|
|
||||||
ac := make(chan cureArtifact, artifactCount)
|
|
||||||
for i, p := range a.paths {
|
|
||||||
for j, d := range p.A {
|
|
||||||
ac <- cureArtifact{[2]int{i, j}, d}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type cureRes struct {
|
|
||||||
// Index of result in paths.
|
|
||||||
index [2]int
|
|
||||||
// Cured pathname.
|
|
||||||
pathname *check.Absolute
|
|
||||||
// Error returned by c.
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
res := make(chan cureRes)
|
|
||||||
|
|
||||||
for i := 0; i < cures; i++ {
|
|
||||||
go func() {
|
|
||||||
for d := range ac {
|
|
||||||
// computing and encoding identifier is expensive
|
|
||||||
if a.msg.IsVerbose() {
|
|
||||||
a.msg.Verbosef("curing %s...", Encode(Ident(d.a)))
|
|
||||||
}
|
|
||||||
|
|
||||||
var cr cureRes
|
|
||||||
cr.index = d.index
|
|
||||||
cr.pathname, _, cr.err = c.Cure(d.a)
|
|
||||||
res <- cr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
var count int
|
|
||||||
errs := make([]error, 0, artifactCount)
|
|
||||||
for cr := range res {
|
|
||||||
count++
|
|
||||||
|
|
||||||
if cr.err != nil {
|
|
||||||
errs = append(errs, cr.err)
|
|
||||||
} else {
|
|
||||||
paths[cr.index[0]].src[cr.index[1]] = cr.pathname
|
|
||||||
}
|
|
||||||
|
|
||||||
if count == artifactCount {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(ac)
|
|
||||||
if err = errors.Join(errs...); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(a.ctx)
|
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
z := container.New(ctx, a.msg)
|
z := container.New(ctx, f.GetMessage())
|
||||||
z.WaitDelay = execWaitDelay
|
z.WaitDelay = execWaitDelay
|
||||||
z.SeccompPresets |= std.PresetStrict
|
z.SeccompPresets |= std.PresetStrict
|
||||||
z.ParentPerm = 0700
|
z.ParentPerm = 0700
|
||||||
@@ -303,15 +210,20 @@ func (a *execArtifact) cure(c *CureContext, hostNet bool) (err error) {
|
|||||||
z.Hostname = "cure-net"
|
z.Hostname = "cure-net"
|
||||||
}
|
}
|
||||||
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
if a.msg.IsVerbose() {
|
if f.GetMessage().IsVerbose() {
|
||||||
z.Stdout, z.Stderr = os.Stdout, os.Stderr
|
z.Stdout, z.Stderr = os.Stdout, os.Stderr
|
||||||
}
|
}
|
||||||
|
|
||||||
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
||||||
z.Grow(len(paths) + 4)
|
z.Grow(len(a.paths) + 4)
|
||||||
|
|
||||||
|
temp, work := f.GetTempDir(), f.GetWorkDir()
|
||||||
|
for i, b := range a.paths {
|
||||||
|
layers := make([]*check.Absolute, len(b.A))
|
||||||
|
for j, d := range b.A {
|
||||||
|
layers[j] = f.Pathname(d)
|
||||||
|
}
|
||||||
|
|
||||||
temp, work := c.GetTempDir(), c.GetWorkDir()
|
|
||||||
for i, b := range paths {
|
|
||||||
if i == overlayWorkIndex {
|
if i == overlayWorkIndex {
|
||||||
if err = os.MkdirAll(work.String(), 0700); err != nil {
|
if err = os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
@@ -324,7 +236,7 @@ func (a *execArtifact) cure(c *CureContext, hostNet bool) (err error) {
|
|||||||
AbsWork,
|
AbsWork,
|
||||||
work,
|
work,
|
||||||
tempWork,
|
tempWork,
|
||||||
b.src...,
|
layers...,
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -341,11 +253,11 @@ func (a *execArtifact) cure(c *CureContext, hostNet bool) (err error) {
|
|||||||
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
z.Overlay(b.dst, tempUpper, tempWork, b.src...)
|
z.Overlay(b.P, tempUpper, tempWork, layers...)
|
||||||
} else if len(b.src) == 1 {
|
} else if len(layers) == 1 {
|
||||||
z.Bind(b.src[0], b.dst, 0)
|
z.Bind(layers[0], b.P, 0)
|
||||||
} else {
|
} else {
|
||||||
z.OverlayReadonly(b.dst, b.src...)
|
z.OverlayReadonly(b.P, layers...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if overlayWorkIndex < 0 {
|
if overlayWorkIndex < 0 {
|
||||||
@@ -356,7 +268,7 @@ func (a *execArtifact) cure(c *CureContext, hostNet bool) (err error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
z.Bind(
|
z.Bind(
|
||||||
c.GetTempDir(),
|
f.GetTempDir(),
|
||||||
fhs.AbsTmp,
|
fhs.AbsTmp,
|
||||||
std.BindWritable|std.BindEnsure,
|
std.BindWritable|std.BindEnsure,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,16 +5,15 @@ package pkg_test
|
|||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/hst"
|
"hakurei.app/hst"
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
"hakurei.app/message"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// testtoolBin is the container test tool binary made available to the
|
// testtoolBin is the container test tool binary made available to the
|
||||||
@@ -35,14 +34,8 @@ func TestExec(t *testing.T) {
|
|||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
testtool, testtoolDestroy := newTesttool()
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
msg := message.New(log.New(os.Stderr, "container: ", 0))
|
|
||||||
msg.SwapVerbose(testing.Verbose())
|
|
||||||
|
|
||||||
cureMany(t, c, []cureStep{
|
cureMany(t, c, []cureStep{
|
||||||
{"container", pkg.NewExec(
|
{"container", pkg.NewExec(
|
||||||
t.Context(),
|
|
||||||
msg,
|
|
||||||
0,
|
|
||||||
nil,
|
nil,
|
||||||
pkg.AbsWork,
|
pkg.AbsWork,
|
||||||
[]string{"HAKUREI_TEST=1"},
|
[]string{"HAKUREI_TEST=1"},
|
||||||
@@ -58,17 +51,14 @@ func TestExec(t *testing.T) {
|
|||||||
pkg.MustPath("/.hakurei", false, stubArtifact{
|
pkg.MustPath("/.hakurei", false, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
params: []byte("empty directory"),
|
params: []byte("empty directory"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.MkdirAll(c.GetWorkDir().String(), 0700)
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
pkg.MustPath("/opt", false, testtool),
|
pkg.MustPath("/opt", false, testtool),
|
||||||
), ignorePathname, wantChecksumOffline, nil},
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
|
||||||
{"error passthrough", pkg.NewExec(
|
{"error passthrough", pkg.NewExec(
|
||||||
t.Context(),
|
|
||||||
msg,
|
|
||||||
0,
|
|
||||||
nil,
|
nil,
|
||||||
pkg.AbsWork,
|
pkg.AbsWork,
|
||||||
[]string{"HAKUREI_TEST=1"},
|
[]string{"HAKUREI_TEST=1"},
|
||||||
@@ -78,16 +68,13 @@ func TestExec(t *testing.T) {
|
|||||||
pkg.MustPath("/proc/nonexistent", false, stubArtifact{
|
pkg.MustPath("/proc/nonexistent", false, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
params: []byte("doomed artifact"),
|
params: []byte("doomed artifact"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return stub.UniqueError(0xcafe)
|
return stub.UniqueError(0xcafe)
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
), nil, pkg.Checksum{}, errors.Join(stub.UniqueError(0xcafe))},
|
), nil, pkg.Checksum{}, errors.Join(stub.UniqueError(0xcafe))},
|
||||||
|
|
||||||
{"invalid paths", pkg.NewExec(
|
{"invalid paths", pkg.NewExec(
|
||||||
t.Context(),
|
|
||||||
msg,
|
|
||||||
0,
|
|
||||||
nil,
|
nil,
|
||||||
pkg.AbsWork,
|
pkg.AbsWork,
|
||||||
[]string{"HAKUREI_TEST=1"},
|
[]string{"HAKUREI_TEST=1"},
|
||||||
@@ -101,9 +88,6 @@ func TestExec(t *testing.T) {
|
|||||||
// check init failure passthrough
|
// check init failure passthrough
|
||||||
var exitError *exec.ExitError
|
var exitError *exec.ExitError
|
||||||
if _, _, err := c.Cure(pkg.NewExec(
|
if _, _, err := c.Cure(pkg.NewExec(
|
||||||
t.Context(),
|
|
||||||
msg,
|
|
||||||
0,
|
|
||||||
nil,
|
nil,
|
||||||
pkg.AbsWork,
|
pkg.AbsWork,
|
||||||
nil,
|
nil,
|
||||||
@@ -121,17 +105,11 @@ func TestExec(t *testing.T) {
|
|||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
testtool, testtoolDestroy := newTesttool()
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
msg := message.New(log.New(os.Stderr, "container: ", 0))
|
|
||||||
msg.SwapVerbose(testing.Verbose())
|
|
||||||
|
|
||||||
wantChecksum := pkg.MustDecode(
|
wantChecksum := pkg.MustDecode(
|
||||||
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
|
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
|
||||||
)
|
)
|
||||||
cureMany(t, c, []cureStep{
|
cureMany(t, c, []cureStep{
|
||||||
{"container", pkg.NewExec(
|
{"container", pkg.NewExec(
|
||||||
t.Context(),
|
|
||||||
msg,
|
|
||||||
0,
|
|
||||||
&wantChecksum,
|
&wantChecksum,
|
||||||
pkg.AbsWork,
|
pkg.AbsWork,
|
||||||
[]string{"HAKUREI_TEST=1"},
|
[]string{"HAKUREI_TEST=1"},
|
||||||
@@ -147,8 +125,8 @@ func TestExec(t *testing.T) {
|
|||||||
pkg.MustPath("/.hakurei", false, stubArtifact{
|
pkg.MustPath("/.hakurei", false, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
params: []byte("empty directory"),
|
params: []byte("empty directory"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.MkdirAll(c.GetWorkDir().String(), 0700)
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
pkg.MustPath("/opt", false, testtool),
|
pkg.MustPath("/opt", false, testtool),
|
||||||
@@ -162,14 +140,8 @@ func TestExec(t *testing.T) {
|
|||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
testtool, testtoolDestroy := newTesttool()
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
msg := message.New(log.New(os.Stderr, "container: ", 0))
|
|
||||||
msg.SwapVerbose(testing.Verbose())
|
|
||||||
|
|
||||||
cureMany(t, c, []cureStep{
|
cureMany(t, c, []cureStep{
|
||||||
{"container", pkg.NewExec(
|
{"container", pkg.NewExec(
|
||||||
t.Context(),
|
|
||||||
msg,
|
|
||||||
0,
|
|
||||||
nil,
|
nil,
|
||||||
pkg.AbsWork,
|
pkg.AbsWork,
|
||||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
@@ -179,8 +151,8 @@ func TestExec(t *testing.T) {
|
|||||||
pkg.MustPath("/", true, stubArtifact{
|
pkg.MustPath("/", true, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
params: []byte("empty directory"),
|
params: []byte("empty directory"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.MkdirAll(c.GetWorkDir().String(), 0700)
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
pkg.MustPath("/opt", false, testtool),
|
pkg.MustPath("/opt", false, testtool),
|
||||||
@@ -194,14 +166,8 @@ func TestExec(t *testing.T) {
|
|||||||
c.SetStrict(true)
|
c.SetStrict(true)
|
||||||
testtool, testtoolDestroy := newTesttool()
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
msg := message.New(log.New(os.Stderr, "container: ", 0))
|
|
||||||
msg.SwapVerbose(testing.Verbose())
|
|
||||||
|
|
||||||
cureMany(t, c, []cureStep{
|
cureMany(t, c, []cureStep{
|
||||||
{"container", pkg.NewExec(
|
{"container", pkg.NewExec(
|
||||||
t.Context(),
|
|
||||||
msg,
|
|
||||||
0,
|
|
||||||
nil,
|
nil,
|
||||||
pkg.AbsWork,
|
pkg.AbsWork,
|
||||||
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
@@ -211,14 +177,14 @@ func TestExec(t *testing.T) {
|
|||||||
pkg.MustPath("/", true, stubArtifact{
|
pkg.MustPath("/", true, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
params: []byte("empty directory"),
|
params: []byte("empty directory"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.MkdirAll(c.GetWorkDir().String(), 0700)
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
},
|
},
|
||||||
}), pkg.MustPath("/work/", false, stubArtifact{
|
}), pkg.MustPath("/work/", false, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
params: []byte("empty directory"),
|
params: []byte("empty directory"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.MkdirAll(c.GetWorkDir().String(), 0700)
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
},
|
},
|
||||||
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
|
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
|
||||||
), ignorePathname, wantChecksumOffline, nil},
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
@@ -226,6 +192,57 @@ func TestExec(t *testing.T) {
|
|||||||
|
|
||||||
testtoolDestroy(t, base, c)
|
testtoolDestroy(t, base, c)
|
||||||
}, pkg.MustDecode("RibudsoY1X4_dtshfvL5LYfCPcxVnP0ikOn3yBHzOrt6BpevQiANLJF6Xua76-gM")},
|
}, pkg.MustDecode("RibudsoY1X4_dtshfvL5LYfCPcxVnP0ikOn3yBHzOrt6BpevQiANLJF6Xua76-gM")},
|
||||||
|
|
||||||
|
{"multiple layers", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
nil,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "layers"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}, stubArtifactF{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("test sample with dependencies"),
|
||||||
|
|
||||||
|
deps: slices.Repeat([]pkg.Artifact{newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xfe, 0},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
), stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
|
||||||
|
// this is queued and might run instead of the other
|
||||||
|
// one so do not leave it as nil
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}}, 1<<5 /* concurrent cache hits */), cure: func(f *pkg.FContext) error {
|
||||||
|
work := f.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(work.Append("check").String(), []byte("layers"), 0400)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("a05V6iVGAfLO8aG0VSSFyB1QvnzJoMmGbMX6ud8CMMbatvyBv90_xGn1qWEIsjkQ")},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,15 +255,15 @@ func newTesttool() (
|
|||||||
// testtoolBin is built during go:generate and is not deterministic
|
// testtoolBin is built during go:generate and is not deterministic
|
||||||
testtool = overrideIdent{pkg.ID{0xfe, 0xff}, stubArtifact{
|
testtool = overrideIdent{pkg.ID{0xfe, 0xff}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
work := c.GetWorkDir()
|
work := t.GetWorkDir()
|
||||||
if err := os.MkdirAll(
|
if err := os.MkdirAll(
|
||||||
work.Append("bin").String(),
|
work.Append("bin").String(),
|
||||||
0700,
|
0700,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.WriteFile(c.GetWorkDir().Append(
|
return os.WriteFile(t.GetWorkDir().Append(
|
||||||
"bin",
|
"bin",
|
||||||
"testtool",
|
"testtool",
|
||||||
).String(), testtoolBin, 0500)
|
).String(), testtoolBin, 0500)
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package pkg
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"syscall"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A fileArtifact is an [Artifact] that cures into data known ahead of time.
|
// A fileArtifact is an [Artifact] that cures into data known ahead of time.
|
||||||
@@ -31,10 +30,5 @@ func (a fileArtifact) Checksum() Checksum {
|
|||||||
return Checksum(h.Sum(nil))
|
return Checksum(h.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Data returns the caller-supplied data.
|
// Cure returns the caller-supplied data.
|
||||||
func (a fileArtifact) Data() ([]byte, error) { return a, nil }
|
func (a fileArtifact) Cure() ([]byte, error) { return a, nil }
|
||||||
|
|
||||||
// Cure returns syscall.ENOTSUP. Callers should use Data instead.
|
|
||||||
func (a fileArtifact) Cure(*CureContext) (err error) {
|
|
||||||
return syscall.ENOTSUP
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// An httpArtifact is an [Artifact] backed by a [http] url string. The method is
|
// An httpArtifact is an [Artifact] backed by a [http] url string. The method is
|
||||||
@@ -100,14 +99,9 @@ func (a *httpArtifact) do() (data []byte, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cure returns syscall.ENOTSUP. Callers should use Data instead.
|
// Cure completes the http request and returns the resulting response body read
|
||||||
func (a *httpArtifact) Cure(*CureContext) error {
|
|
||||||
return syscall.ENOTSUP
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data completes the http request and returns the resulting response body read
|
|
||||||
// to EOF. Data does not interact with the filesystem.
|
// to EOF. Data does not interact with the filesystem.
|
||||||
func (a *httpArtifact) Data() (data []byte, err error) {
|
func (a *httpArtifact) Cure() (data []byte, err error) {
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
defer a.mu.Unlock()
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
|||||||
@@ -37,10 +37,10 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
testdataChecksum,
|
testdataChecksum,
|
||||||
)
|
)
|
||||||
wantIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
wantIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||||
if got, err := f.Data(); err != nil {
|
if got, err := f.Cure(); err != nil {
|
||||||
t.Fatalf("Data: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if string(got) != testdata {
|
} else if string(got) != testdata {
|
||||||
t.Fatalf("Data: %x, want %x", got, testdata)
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
}
|
}
|
||||||
@@ -55,8 +55,8 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
wantErrMismatch := &pkg.ChecksumMismatchError{
|
wantErrMismatch := &pkg.ChecksumMismatchError{
|
||||||
Got: testdataChecksum,
|
Got: testdataChecksum,
|
||||||
}
|
}
|
||||||
if _, err := f.Data(); !reflect.DeepEqual(err, wantErrMismatch) {
|
if _, err := f.Cure(); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||||
t.Fatalf("Data: error = %#v, want %#v", err, wantErrMismatch)
|
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrMismatch)
|
||||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
}
|
}
|
||||||
@@ -70,8 +70,8 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
)
|
)
|
||||||
wantIdentNonexistent := pkg.KindHTTPGet.Ident([]byte("file:///nonexistent"))
|
wantIdentNonexistent := pkg.KindHTTPGet.Ident([]byte("file:///nonexistent"))
|
||||||
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
if _, err := f.Data(); !reflect.DeepEqual(err, wantErrNotFound) {
|
if _, err := f.Cure(); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
t.Fatalf("Data: error = %#v, want %#v", err, wantErrNotFound)
|
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdentNonexistent {
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdentNonexistent {
|
||||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdentNonexistent))
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdentNonexistent))
|
||||||
}
|
}
|
||||||
@@ -97,10 +97,10 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
t.Fatalf("Cure: %x, want %x", checksum, testdataChecksum)
|
t.Fatalf("Cure: %x, want %x", checksum, testdataChecksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
if got, err := f.Data(); err != nil {
|
if got, err := f.Cure(); err != nil {
|
||||||
t.Fatalf("Data: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if string(got) != testdata {
|
} else if string(got) != testdata {
|
||||||
t.Fatalf("Data: %x, want %x", got, testdata)
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
}
|
}
|
||||||
@@ -112,10 +112,10 @@ func TestHTTPGet(t *testing.T) {
|
|||||||
"file:///testdata",
|
"file:///testdata",
|
||||||
testdataChecksum,
|
testdataChecksum,
|
||||||
)
|
)
|
||||||
if got, err := f.Data(); err != nil {
|
if got, err := f.Cure(); err != nil {
|
||||||
t.Fatalf("Data: error = %v", err)
|
t.Fatalf("Cure: error = %v", err)
|
||||||
} else if string(got) != testdata {
|
} else if string(got) != testdata {
|
||||||
t.Fatalf("Data: %x, want %x", got, testdata)
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package pkg
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
@@ -12,11 +13,14 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@@ -53,45 +57,123 @@ func MustDecode(s string) Checksum {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CureContext is passed to [Artifact.Cure] and contains information and methods
|
// TContext is passed to [TrivialArtifact.Cure] and provides information and
|
||||||
// useful for curing the [Artifact], like requesting the data of [File], or that
|
// methods required for curing the [TrivialArtifact].
|
||||||
// other artifacts be cured.
|
|
||||||
//
|
//
|
||||||
// Methods of CureContext are safe for concurrent use. CureContext is valid
|
// Methods of TContext are safe for concurrent use. TContext is valid
|
||||||
// until [Artifact.Cure] returns.
|
// until [TrivialArtifact.Cure] returns.
|
||||||
type CureContext struct {
|
type TContext struct {
|
||||||
// Address of underlying [Cache], should be zeroed after [Artifact.Cure]
|
// Address of underlying [Cache], should be zeroed or made unusable after
|
||||||
// returns and must not be exposed directly.
|
// [TrivialArtifact.Cure] returns and must not be exposed directly.
|
||||||
cache *Cache
|
cache *Cache
|
||||||
|
|
||||||
// Populated during [Cache.Cure].
|
// Populated during [Cache.Cure].
|
||||||
work, temp *check.Absolute
|
work, temp *check.Absolute
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// destroy destroys the temporary directory and joins its errors with the error
|
||||||
|
// referred to by errP. If the error referred to by errP is non-nil, the work
|
||||||
|
// directory is removed similarly. [Cache] is responsible for making sure work
|
||||||
|
// is never left behind for a successful [Cache.Cure].
|
||||||
|
//
|
||||||
|
// destroy must be deferred by [Cache.Cure] if [TContext] is passed to any Cure
|
||||||
|
// implementation. It should not be called prior to that point.
|
||||||
|
func (t *TContext) destroy(errP *error) {
|
||||||
|
if chmodErr, removeErr := removeAll(t.temp); chmodErr != nil || removeErr != nil {
|
||||||
|
*errP = errors.Join(*errP, chmodErr, removeErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if *errP != nil {
|
||||||
|
chmodErr, removeErr := removeAll(t.work)
|
||||||
|
if chmodErr != nil || removeErr != nil {
|
||||||
|
*errP = errors.Join(*errP, chmodErr, removeErr)
|
||||||
|
} else if errors.Is(*errP, os.ErrExist) {
|
||||||
|
// two artifacts may be backed by the same file
|
||||||
|
*errP = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the underlying [context.Context].
|
||||||
|
func (t *TContext) Unwrap() context.Context { return t.cache.ctx }
|
||||||
|
|
||||||
|
// GetMessage returns [message.Msg] held by the underlying [Cache].
|
||||||
|
func (t *TContext) GetMessage() message.Msg { return t.cache.msg }
|
||||||
|
|
||||||
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
// GetWorkDir returns a pathname to a directory which [Artifact] is expected to
|
||||||
// write its output to. This is not the final resting place of the [Artifact]
|
// write its output to. This is not the final resting place of the [Artifact]
|
||||||
// and this pathname should not be directly referred to in the final contents.
|
// and this pathname should not be directly referred to in the final contents.
|
||||||
func (c *CureContext) GetWorkDir() *check.Absolute { return c.work }
|
func (t *TContext) GetWorkDir() *check.Absolute { return t.work }
|
||||||
|
|
||||||
// GetTempDir returns a pathname which implementations may use as scratch space.
|
// GetTempDir returns a pathname which implementations may use as scratch space.
|
||||||
// A directory is not created automatically, implementations are expected to
|
// A directory is not created automatically, implementations are expected to
|
||||||
// create it if they wish to use it, using [os.MkdirAll].
|
// create it if they wish to use it, using [os.MkdirAll].
|
||||||
func (c *CureContext) GetTempDir() *check.Absolute { return c.temp }
|
func (t *TContext) GetTempDir() *check.Absolute { return t.temp }
|
||||||
|
|
||||||
// Cure cures the [Artifact] and returns its pathname and [Checksum].
|
// Open tries to open [Artifact] for reading. If a implements [File], its data
|
||||||
func (c *CureContext) Cure(a Artifact) (
|
// might be used directly, eliminating the roundtrip to vfs. Otherwise, it must
|
||||||
pathname *check.Absolute,
|
// cure into a directory containing a single regular file.
|
||||||
checksum Checksum,
|
//
|
||||||
err error,
|
// If err is nil, the caller is responsible for closing the resulting
|
||||||
) {
|
// [io.ReadCloser].
|
||||||
return c.cache.Cure(a)
|
func (t *TContext) Open(a Artifact) (r io.ReadCloser, err error) {
|
||||||
|
if f, ok := a.(File); ok {
|
||||||
|
return t.cache.openFile(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
var pathname *check.Absolute
|
||||||
|
if pathname, _, err = t.cache.Cure(a); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []os.DirEntry
|
||||||
|
if entries, err = os.ReadDir(pathname.String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) != 1 || !entries[0].Type().IsRegular() {
|
||||||
|
err = errors.New(
|
||||||
|
"input directory does not contain a single regular file",
|
||||||
|
)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
return os.Open(pathname.Append(entries[0].Name()).String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenFile tries to load [File] from [Cache], and if that fails, obtains it via
|
// FContext is passed to [FloodArtifact.Cure] and provides information and
|
||||||
// [File.Data] instead. Notably, it does not cure [File]. If err is nil, the
|
// methods required for curing the [FloodArtifact].
|
||||||
// caller is responsible for closing the resulting [io.ReadCloser].
|
//
|
||||||
func (c *CureContext) OpenFile(f File) (r io.ReadCloser, err error) {
|
// Methods of FContext are safe for concurrent use. FContext is valid
|
||||||
return c.cache.openFile(f)
|
// until [FloodArtifact.Cure] returns.
|
||||||
|
type FContext struct {
|
||||||
|
TContext
|
||||||
|
|
||||||
|
// Cured top-level dependencies looked up by Pathname.
|
||||||
|
deps map[ID]*check.Absolute
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidLookupError is the identifier of non-dependency [Artifact] looked up
|
||||||
|
// via [FContext.Pathname] by a misbehaving [Artifact] implementation.
|
||||||
|
type InvalidLookupError ID
|
||||||
|
|
||||||
|
func (e InvalidLookupError) Error() string {
|
||||||
|
return "attempting to look up non-dependency artifact " + Encode(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = InvalidLookupError{}
|
||||||
|
|
||||||
|
// Pathname returns the identifier pathname of an [Artifact]. Calling Pathname
|
||||||
|
// with an [Artifact] not part of the slice returned by [Artifact.Dependencies]
|
||||||
|
// panics.
|
||||||
|
func (f *FContext) Pathname(a Artifact) *check.Absolute {
|
||||||
|
id := Ident(a)
|
||||||
|
if p, ok := f.deps[id]; ok {
|
||||||
|
return p
|
||||||
|
} else {
|
||||||
|
panic(InvalidLookupError(id))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// An Artifact is a read-only reference to a piece of data that may be created
|
// An Artifact is a read-only reference to a piece of data that may be created
|
||||||
@@ -119,16 +201,35 @@ type Artifact interface {
|
|||||||
//
|
//
|
||||||
// Result must remain identical across multiple invocations.
|
// Result must remain identical across multiple invocations.
|
||||||
Dependencies() []Artifact
|
Dependencies() []Artifact
|
||||||
|
}
|
||||||
|
|
||||||
|
// FloodArtifact refers to an [Artifact] requiring its entire dependency graph
|
||||||
|
// to be cured prior to curing itself.
|
||||||
|
type FloodArtifact interface {
|
||||||
// Cure cures the current [Artifact] to the working directory obtained via
|
// Cure cures the current [Artifact] to the working directory obtained via
|
||||||
// [CureContext.GetWorkDir].
|
// [TContext.GetWorkDir] embedded in [FContext].
|
||||||
//
|
|
||||||
// If the implementation produces a single file, it must implement [File]
|
|
||||||
// as well. In that case, Cure must produce a single regular file with
|
|
||||||
// contents identical to that returned by [File.Data].
|
|
||||||
//
|
//
|
||||||
// Implementations must not retain c.
|
// Implementations must not retain c.
|
||||||
Cure(c *CureContext) (err error)
|
Cure(f *FContext) (err error)
|
||||||
|
|
||||||
|
Artifact
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrivialArtifact refers to an [Artifact] that cures without requiring that
|
||||||
|
// any other [Artifact] is cured before it. Its dependency tree is ignored after
|
||||||
|
// computing its identifier.
|
||||||
|
//
|
||||||
|
// TrivialArtifact is unable to cure any other [Artifact] and it cannot access
|
||||||
|
// pathnames. This type of [Artifact] is primarily intended for dependency-less
|
||||||
|
// artifacts or direct dependencies that only consists of [File].
|
||||||
|
type TrivialArtifact interface {
|
||||||
|
// Cure cures the current [Artifact] to the working directory obtained via
|
||||||
|
// [TContext.GetWorkDir].
|
||||||
|
//
|
||||||
|
// Implementations must not retain c.
|
||||||
|
Cure(t *TContext) (err error)
|
||||||
|
|
||||||
|
Artifact
|
||||||
}
|
}
|
||||||
|
|
||||||
// KnownIdent is optionally implemented by [Artifact] and is used instead of
|
// KnownIdent is optionally implemented by [Artifact] and is used instead of
|
||||||
@@ -157,12 +258,14 @@ type KnownChecksum interface {
|
|||||||
|
|
||||||
// A File refers to an [Artifact] backed by a single file.
|
// A File refers to an [Artifact] backed by a single file.
|
||||||
type File interface {
|
type File interface {
|
||||||
// Data returns the full contents of [Artifact]. If [Artifact.Checksum]
|
// Cure returns the full contents of [File]. If [File] implements
|
||||||
// returns a non-nil address, Data is responsible for validating any data
|
// [KnownChecksum], Cure is responsible for validating any data it produces
|
||||||
// it produces and must return [ChecksumMismatchError] if validation fails.
|
// and must return [ChecksumMismatchError] if validation fails.
|
||||||
//
|
//
|
||||||
// Callers must not modify the returned byte slice.
|
// Callers must not modify the returned byte slice.
|
||||||
Data() ([]byte, error)
|
//
|
||||||
|
// Result must remain identical across multiple invocations.
|
||||||
|
Cure() ([]byte, error)
|
||||||
|
|
||||||
Artifact
|
Artifact
|
||||||
}
|
}
|
||||||
@@ -242,9 +345,47 @@ const (
|
|||||||
checksumLinknamePrefix = "../" + dirChecksum + "/"
|
checksumLinknamePrefix = "../" + dirChecksum + "/"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A pendingArtifactDep is a dependency [Artifact] pending concurrent curing,
|
||||||
|
// subject to the cures limit. Values pointed to by result addresses are safe
|
||||||
|
// to access after the [sync.WaitGroup] associated with this pendingArtifactDep
|
||||||
|
// is done. pendingArtifactDep must not be reused or modified after it is sent
|
||||||
|
// to Cache.cureDep.
|
||||||
|
type pendingArtifactDep struct {
|
||||||
|
// Dependency artifact populated during [Cache.Cure].
|
||||||
|
a Artifact
|
||||||
|
|
||||||
|
// Address of result pathname populated during [Cache.Cure] and dereferenced
|
||||||
|
// if curing succeeds.
|
||||||
|
resP **check.Absolute
|
||||||
|
|
||||||
|
// Address of result error slice populated during [Cache.Cure], dereferenced
|
||||||
|
// after acquiring errsMu if curing fails. No additional action is taken,
|
||||||
|
// [Cache] and its caller are responsible for further error handling.
|
||||||
|
errs *[]error
|
||||||
|
// Address of mutex synchronising access to errs.
|
||||||
|
errsMu *sync.Mutex
|
||||||
|
|
||||||
|
// For synchronising access to result buffer.
|
||||||
|
*sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
// Cache is a support layer that implementations of [Artifact] can use to store
|
// Cache is a support layer that implementations of [Artifact] can use to store
|
||||||
// cured [Artifact] data in a content addressed fashion.
|
// cured [Artifact] data in a content addressed fashion.
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
|
// Work for curing dependency [Artifact] is sent here and cured concurrently
|
||||||
|
// while subject to the cures limit. Invalid after the context is canceled.
|
||||||
|
cureDep chan<- *pendingArtifactDep
|
||||||
|
|
||||||
|
// [context.WithCancel] over caller-supplied context, used by [Artifact] and
|
||||||
|
// all dependency curing goroutines.
|
||||||
|
ctx context.Context
|
||||||
|
// Cancels ctx.
|
||||||
|
cancel context.CancelFunc
|
||||||
|
// For waiting on dependency curing goroutines.
|
||||||
|
wg sync.WaitGroup
|
||||||
|
// Reports new cures and passed to [Artifact].
|
||||||
|
msg message.Msg
|
||||||
|
|
||||||
// Directory where all [Cache] related files are placed.
|
// Directory where all [Cache] related files are placed.
|
||||||
base *check.Absolute
|
base *check.Absolute
|
||||||
|
|
||||||
@@ -560,7 +701,9 @@ func (c *Cache) finaliseIdent(
|
|||||||
close(done)
|
close(done)
|
||||||
}
|
}
|
||||||
|
|
||||||
// openFile provides [CureContext.OpenFile] for [Artifact.Cure].
|
// openFile tries to load [File] from [Cache], and if that fails, obtains it via
|
||||||
|
// [File.Cure] instead. Notably, it does not cure [File]. If err is nil, the
|
||||||
|
// caller is responsible for closing the resulting [io.ReadCloser].
|
||||||
func (c *Cache) openFile(f File) (r io.ReadCloser, err error) {
|
func (c *Cache) openFile(f File) (r io.ReadCloser, err error) {
|
||||||
if kc, ok := f.(KnownChecksum); ok {
|
if kc, ok := f.(KnownChecksum); ok {
|
||||||
c.checksumMu.RLock()
|
c.checksumMu.RLock()
|
||||||
@@ -583,7 +726,7 @@ func (c *Cache) openFile(f File) (r io.ReadCloser, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
var data []byte
|
var data []byte
|
||||||
if data, err = f.Data(); err != nil {
|
if data, err = f.Cure(); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r = io.NopCloser(bytes.NewReader(data))
|
r = io.NopCloser(bytes.NewReader(data))
|
||||||
@@ -691,7 +834,16 @@ func (fsys dotOverrideFS) Stat(name string) (fi fs.FileInfo, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cure cures the [Artifact] and returns its pathname and [Checksum].
|
// InvalidArtifactError describes an artifact that does not implement a
|
||||||
|
// supported Cure method.
|
||||||
|
type InvalidArtifactError ID
|
||||||
|
|
||||||
|
func (e InvalidArtifactError) Error() string {
|
||||||
|
return "artifact " + Encode(e) + " cannot be cured"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure cures the [Artifact] and returns its pathname and [Checksum]. Direct
|
||||||
|
// calls to Cure are not subject to the cures limit.
|
||||||
func (c *Cache) Cure(a Artifact) (
|
func (c *Cache) Cure(a Artifact) (
|
||||||
pathname *check.Absolute,
|
pathname *check.Absolute,
|
||||||
checksum Checksum,
|
checksum Checksum,
|
||||||
@@ -764,6 +916,11 @@ func (c *Cache) Cure(a Artifact) (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.msg.IsVerbose() {
|
||||||
|
c.msg.Verbosef("curing %s...", Encode(id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// cure File outside type switch to skip TContext initialisation
|
||||||
if f, ok := a.(File); ok {
|
if f, ok := a.(File); ok {
|
||||||
if checksumFi != nil {
|
if checksumFi != nil {
|
||||||
if !checksumFi.Mode().IsRegular() {
|
if !checksumFi.Mode().IsRegular() {
|
||||||
@@ -774,7 +931,7 @@ func (c *Cache) Cure(a Artifact) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
var data []byte
|
var data []byte
|
||||||
data, err = f.Data()
|
data, err = f.Cure()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -823,107 +980,163 @@ func (c *Cache) Cure(a Artifact) (
|
|||||||
c.checksumMu.Unlock()
|
c.checksumMu.Unlock()
|
||||||
|
|
||||||
return
|
return
|
||||||
} else {
|
}
|
||||||
if checksumFi != nil {
|
|
||||||
if !checksumFi.Mode().IsDir() {
|
|
||||||
// unreachable
|
|
||||||
err = InvalidFileModeError(checksumFi.Mode())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cc := CureContext{
|
if checksumFi != nil {
|
||||||
cache: c,
|
if !checksumFi.Mode().IsDir() {
|
||||||
work: c.base.Append(dirWork, ids),
|
// unreachable
|
||||||
temp: c.base.Append(dirTemp, ids),
|
err = InvalidFileModeError(checksumFi.Mode())
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
if chmodErr, removeErr := removeAll(cc.temp); chmodErr != nil || removeErr != nil {
|
|
||||||
err = errors.Join(err, chmodErr, removeErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
chmodErr, removeErr := removeAll(cc.work)
|
|
||||||
if chmodErr != nil || removeErr != nil {
|
|
||||||
err = errors.Join(err, chmodErr, removeErr)
|
|
||||||
} else if errors.Is(err, os.ErrExist) {
|
|
||||||
// two artifacts may be backed by the same file
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = a.Cure(&cc); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cc.cache = nil
|
|
||||||
var fi os.FileInfo
|
|
||||||
if fi, err = os.Lstat(cc.work.String()); err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
err = NoOutputError{}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !fi.IsDir() {
|
|
||||||
if !fi.Mode().IsRegular() {
|
|
||||||
err = InvalidFileModeError(fi.Mode())
|
|
||||||
} else {
|
|
||||||
err = errors.New("non-file artifact produced regular file")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var gotChecksum Checksum
|
|
||||||
if gotChecksum, err = HashFS(
|
|
||||||
dotOverrideFS{os.DirFS(cc.work.String()).(dirFS)},
|
|
||||||
".",
|
|
||||||
); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if checksumPathname == nil {
|
|
||||||
checksum = gotChecksum
|
|
||||||
checksums = Encode(checksum)
|
|
||||||
checksumPathname = c.base.Append(
|
|
||||||
dirChecksum,
|
|
||||||
checksums,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
if gotChecksum != checksum {
|
|
||||||
err = &ChecksumMismatchError{
|
|
||||||
Got: gotChecksum,
|
|
||||||
Want: checksum,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.Chmod(cc.work.String(), 0700); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.checksumMu.Lock()
|
|
||||||
if err = os.Rename(
|
|
||||||
cc.work.String(),
|
|
||||||
checksumPathname.String(),
|
|
||||||
); err != nil {
|
|
||||||
if !errors.Is(err, os.ErrExist) {
|
|
||||||
c.checksumMu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// err is zeroed during deferred cleanup
|
|
||||||
} else {
|
|
||||||
err = os.Chmod(checksumPathname.String(), 0500)
|
|
||||||
}
|
|
||||||
c.checksumMu.Unlock()
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t := TContext{c, c.base.Append(dirWork, ids), c.base.Append(dirTemp, ids)}
|
||||||
|
switch ca := a.(type) {
|
||||||
|
case TrivialArtifact:
|
||||||
|
defer t.destroy(&err)
|
||||||
|
if err = ca.Cure(&t); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case FloodArtifact:
|
||||||
|
deps := a.Dependencies()
|
||||||
|
f := FContext{t, make(map[ID]*check.Absolute, len(deps))}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(deps))
|
||||||
|
res := make([]*check.Absolute, len(deps))
|
||||||
|
errs := make([]error, 0, len(deps))
|
||||||
|
var errsMu sync.Mutex
|
||||||
|
for i, d := range deps {
|
||||||
|
pending := pendingArtifactDep{d, &res[i], &errs, &errsMu, &wg}
|
||||||
|
select {
|
||||||
|
case c.cureDep <- &pending:
|
||||||
|
break
|
||||||
|
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
err = c.ctx.Err()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
err = errors.Join(errs...)
|
||||||
|
if err == nil {
|
||||||
|
// unreachable
|
||||||
|
err = syscall.ENOTRECOVERABLE
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i, p := range res {
|
||||||
|
f.deps[Ident(deps[i])] = p
|
||||||
|
}
|
||||||
|
|
||||||
|
defer f.destroy(&err)
|
||||||
|
if err = ca.Cure(&f); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
err = InvalidArtifactError(id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.cache = nil
|
||||||
|
|
||||||
|
var fi os.FileInfo
|
||||||
|
if fi, err = os.Lstat(t.work.String()); err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
err = NoOutputError{}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fi.IsDir() {
|
||||||
|
if !fi.Mode().IsRegular() {
|
||||||
|
err = InvalidFileModeError(fi.Mode())
|
||||||
|
} else {
|
||||||
|
err = errors.New("non-file artifact produced regular file")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var gotChecksum Checksum
|
||||||
|
if gotChecksum, err = HashFS(
|
||||||
|
dotOverrideFS{os.DirFS(t.work.String()).(dirFS)},
|
||||||
|
".",
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if checksumPathname == nil {
|
||||||
|
checksum = gotChecksum
|
||||||
|
checksums = Encode(checksum)
|
||||||
|
checksumPathname = c.base.Append(
|
||||||
|
dirChecksum,
|
||||||
|
checksums,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
if gotChecksum != checksum {
|
||||||
|
err = &ChecksumMismatchError{
|
||||||
|
Got: gotChecksum,
|
||||||
|
Want: checksum,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = os.Chmod(t.work.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.checksumMu.Lock()
|
||||||
|
if err = os.Rename(
|
||||||
|
t.work.String(),
|
||||||
|
checksumPathname.String(),
|
||||||
|
); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrExist) {
|
||||||
|
c.checksumMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// err is zeroed during deferred cleanup
|
||||||
|
} else {
|
||||||
|
err = os.Chmod(checksumPathname.String(), 0500)
|
||||||
|
}
|
||||||
|
c.checksumMu.Unlock()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns the address to a new instance of [Cache].
|
// cure cures the pending [Artifact], stores its result and notifies the caller.
|
||||||
func New(base *check.Absolute) (*Cache, error) {
|
func (pending *pendingArtifactDep) cure(c *Cache) {
|
||||||
|
defer pending.Done()
|
||||||
|
|
||||||
|
pathname, _, err := c.Cure(pending.a)
|
||||||
|
if err == nil {
|
||||||
|
*pending.resP = pathname
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pending.errsMu.Lock()
|
||||||
|
*pending.errs = append(*pending.errs, err)
|
||||||
|
pending.errsMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close cancels all pending cures and waits for them to clean up.
|
||||||
|
func (c *Cache) Close() { c.cancel(); c.wg.Wait() }
|
||||||
|
|
||||||
|
// New returns the address to a new instance of [Cache]. Concurrent cures of
|
||||||
|
// dependency [Artifact] is limited to the caller-supplied value, however direct
|
||||||
|
// calls to [Cache.Cure] is not subject to this limitation.
|
||||||
|
//
|
||||||
|
// A cures value of 0 or lower is equivalent to the value returned by
|
||||||
|
// [runtime.NumCPU].
|
||||||
|
func New(
|
||||||
|
ctx context.Context,
|
||||||
|
msg message.Msg,
|
||||||
|
cures int,
|
||||||
|
base *check.Absolute,
|
||||||
|
) (*Cache, error) {
|
||||||
for _, name := range []string{
|
for _, name := range []string{
|
||||||
dirIdentifier,
|
dirIdentifier,
|
||||||
dirChecksum,
|
dirChecksum,
|
||||||
@@ -935,11 +1148,35 @@ func New(base *check.Absolute) (*Cache, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Cache{
|
c := Cache{
|
||||||
|
msg: msg,
|
||||||
base: base,
|
base: base,
|
||||||
|
|
||||||
ident: make(map[ID]Checksum),
|
ident: make(map[ID]Checksum),
|
||||||
identErr: make(map[ID]error),
|
identErr: make(map[ID]error),
|
||||||
identPending: make(map[ID]<-chan struct{}),
|
identPending: make(map[ID]<-chan struct{}),
|
||||||
}, nil
|
}
|
||||||
|
c.ctx, c.cancel = context.WithCancel(ctx)
|
||||||
|
cureDep := make(chan *pendingArtifactDep, cures)
|
||||||
|
c.cureDep = cureDep
|
||||||
|
|
||||||
|
if cures < 1 {
|
||||||
|
cures = runtime.NumCPU()
|
||||||
|
}
|
||||||
|
for i := 0; i < cures; i++ {
|
||||||
|
c.wg.Go(func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return
|
||||||
|
|
||||||
|
case pending := <-cureDep:
|
||||||
|
pending.cure(&c)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package pkg_test
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
@@ -10,6 +11,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -22,6 +24,7 @@ import (
|
|||||||
"hakurei.app/container/check"
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/stub"
|
"hakurei.app/container/stub"
|
||||||
"hakurei.app/internal/pkg"
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/message"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
|
func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
|
||||||
@@ -29,7 +32,7 @@ func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
|
|||||||
// overrideIdent overrides the ID method of [Artifact].
|
// overrideIdent overrides the ID method of [Artifact].
|
||||||
type overrideIdent struct {
|
type overrideIdent struct {
|
||||||
id pkg.ID
|
id pkg.ID
|
||||||
pkg.Artifact
|
pkg.TrivialArtifact
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a overrideIdent) ID() pkg.ID { return a.id }
|
func (a overrideIdent) ID() pkg.ID { return a.id }
|
||||||
@@ -45,7 +48,7 @@ func (a overrideIdentFile) ID() pkg.ID { return a.id }
|
|||||||
// A knownIdentArtifact implements [pkg.KnownIdent] and [Artifact]
|
// A knownIdentArtifact implements [pkg.KnownIdent] and [Artifact]
|
||||||
type knownIdentArtifact interface {
|
type knownIdentArtifact interface {
|
||||||
pkg.KnownIdent
|
pkg.KnownIdent
|
||||||
pkg.Artifact
|
pkg.TrivialArtifact
|
||||||
}
|
}
|
||||||
|
|
||||||
// A knownIdentFile implements [pkg.KnownIdent] and [File]
|
// A knownIdentFile implements [pkg.KnownIdent] and [File]
|
||||||
@@ -70,19 +73,33 @@ type overrideChecksumFile struct {
|
|||||||
|
|
||||||
func (a overrideChecksumFile) Checksum() pkg.Checksum { return a.checksum }
|
func (a overrideChecksumFile) Checksum() pkg.Checksum { return a.checksum }
|
||||||
|
|
||||||
// A stubArtifact implements [Artifact] with hardcoded behaviour.
|
// A stubArtifact implements [TrivialArtifact] with hardcoded behaviour.
|
||||||
type stubArtifact struct {
|
type stubArtifact struct {
|
||||||
kind pkg.Kind
|
kind pkg.Kind
|
||||||
params []byte
|
params []byte
|
||||||
deps []pkg.Artifact
|
deps []pkg.Artifact
|
||||||
|
|
||||||
cure func(c *pkg.CureContext) error
|
cure func(t *pkg.TContext) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a stubArtifact) Kind() pkg.Kind { return a.kind }
|
func (a stubArtifact) Kind() pkg.Kind { return a.kind }
|
||||||
func (a stubArtifact) Params() []byte { return a.params }
|
func (a stubArtifact) Params() []byte { return a.params }
|
||||||
func (a stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
|
func (a stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
|
||||||
func (a stubArtifact) Cure(c *pkg.CureContext) error { return a.cure(c) }
|
func (a stubArtifact) Cure(t *pkg.TContext) error { return a.cure(t) }
|
||||||
|
|
||||||
|
// A stubArtifactF implements [FloodArtifact] with hardcoded behaviour.
|
||||||
|
type stubArtifactF struct {
|
||||||
|
kind pkg.Kind
|
||||||
|
params []byte
|
||||||
|
deps []pkg.Artifact
|
||||||
|
|
||||||
|
cure func(f *pkg.FContext) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stubArtifactF) Kind() pkg.Kind { return a.kind }
|
||||||
|
func (a stubArtifactF) Params() []byte { return a.params }
|
||||||
|
func (a stubArtifactF) Dependencies() []pkg.Artifact { return a.deps }
|
||||||
|
func (a stubArtifactF) Cure(f *pkg.FContext) error { return a.cure(f) }
|
||||||
|
|
||||||
// A stubFile implements [File] with hardcoded behaviour.
|
// A stubFile implements [File] with hardcoded behaviour.
|
||||||
type stubFile struct {
|
type stubFile struct {
|
||||||
@@ -92,7 +109,7 @@ type stubFile struct {
|
|||||||
stubArtifact
|
stubArtifact
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a stubFile) Data() ([]byte, error) { return a.data, a.err }
|
func (a stubFile) Cure() ([]byte, error) { return a.data, a.err }
|
||||||
|
|
||||||
// newStubFile returns an implementation of [pkg.File] with hardcoded behaviour.
|
// newStubFile returns an implementation of [pkg.File] with hardcoded behaviour.
|
||||||
func newStubFile(
|
func newStubFile(
|
||||||
@@ -106,7 +123,7 @@ func newStubFile(
|
|||||||
kind,
|
kind,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
func(*pkg.CureContext) error {
|
func(*pkg.TContext) error {
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
},
|
},
|
||||||
}}}
|
}}}
|
||||||
@@ -241,10 +258,14 @@ func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
msg := message.New(log.New(os.Stderr, "cache: ", 0))
|
||||||
|
msg.SwapVerbose(testing.Verbose())
|
||||||
|
|
||||||
var scrubFunc func() error // scrub after hashing
|
var scrubFunc func() error // scrub after hashing
|
||||||
if c, err := pkg.New(base); err != nil {
|
if c, err := pkg.New(t.Context(), msg, 0, base); err != nil {
|
||||||
t.Fatalf("New: error = %v", err)
|
t.Fatalf("New: error = %v", err)
|
||||||
} else {
|
} else {
|
||||||
|
t.Cleanup(c.Close)
|
||||||
if tc.early != nil {
|
if tc.early != nil {
|
||||||
tc.early(t, base)
|
tc.early(t, base)
|
||||||
}
|
}
|
||||||
@@ -312,6 +333,8 @@ var ignorePathname = check.MustAbs("/\x00")
|
|||||||
|
|
||||||
// cureMany cures many artifacts against a [Cache] and checks their outcomes.
|
// cureMany cures many artifacts against a [Cache] and checks their outcomes.
|
||||||
func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
|
func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
for _, step := range steps {
|
for _, step := range steps {
|
||||||
t.Log("cure step:", step.name)
|
t.Log("cure step:", step.name)
|
||||||
if pathname, checksum, err := c.Cure(step.a); !reflect.DeepEqual(err, step.err) {
|
if pathname, checksum, err := c.Cure(step.a); !reflect.DeepEqual(err, step.err) {
|
||||||
@@ -415,6 +438,13 @@ func TestCache(t *testing.T) {
|
|||||||
0xa9, 0xc2, 0x08, 0xa1, 0x17, 0x17,
|
0xa9, 0xc2, 0x08, 0xa1, 0x17, 0x17,
|
||||||
}, nil},
|
}, nil},
|
||||||
|
|
||||||
|
{"incomplete implementation", struct{ pkg.Artifact }{stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("artifact overridden to be incomplete"),
|
||||||
|
}}, nil, pkg.Checksum{}, pkg.InvalidArtifactError(pkg.MustDecode(
|
||||||
|
"da4kLKa94g1wN2M0qcKflqgf2-Y2UL36iehhczqsIIW8G0LGvM7S8jjtnBc0ftB0",
|
||||||
|
))},
|
||||||
|
|
||||||
{"error passthrough", newStubFile(
|
{"error passthrough", newStubFile(
|
||||||
pkg.KindHTTPGet,
|
pkg.KindHTTPGet,
|
||||||
pkg.ID{0xff, 1},
|
pkg.ID{0xff, 1},
|
||||||
@@ -436,9 +466,14 @@ func TestCache(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
})
|
})
|
||||||
|
|
||||||
if c0, err := pkg.New(base); err != nil {
|
if c0, err := pkg.New(
|
||||||
|
t.Context(),
|
||||||
|
message.New(nil),
|
||||||
|
0, base,
|
||||||
|
); err != nil {
|
||||||
t.Fatalf("New: error = %v", err)
|
t.Fatalf("New: error = %v", err)
|
||||||
} else {
|
} else {
|
||||||
|
t.Cleanup(c.Close) // check doubled cancel
|
||||||
cureMany(t, c0, []cureStep{
|
cureMany(t, c0, []cureStep{
|
||||||
{"cache hit ident", overrideIdent{
|
{"cache hit ident", overrideIdent{
|
||||||
id: identifier,
|
id: identifier,
|
||||||
@@ -455,6 +490,16 @@ func TestCache(t *testing.T) {
|
|||||||
pkg.Encode(testdataChecksum),
|
pkg.Encode(testdataChecksum),
|
||||||
), testdataChecksum, nil},
|
), testdataChecksum, nil},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// cure after close
|
||||||
|
c.Close()
|
||||||
|
if _, _, err = c.Cure(stubArtifactF{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("unreachable artifact cured after cancel"),
|
||||||
|
deps: []pkg.Artifact{pkg.NewFile([]byte("unreachable dependency"))},
|
||||||
|
}); !reflect.DeepEqual(err, context.Canceled) {
|
||||||
|
t.Fatalf("(closed) Cure: error = %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2")},
|
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2")},
|
||||||
|
|
||||||
@@ -463,8 +508,8 @@ func TestCache(t *testing.T) {
|
|||||||
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
||||||
overrideIdent{testdataChecksum, stubArtifact{}},
|
overrideIdent{testdataChecksum, stubArtifact{}},
|
||||||
)
|
)
|
||||||
makeSample := func(c *pkg.CureContext) error {
|
makeSample := func(t *pkg.TContext) error {
|
||||||
work := c.GetWorkDir()
|
work := t.GetWorkDir()
|
||||||
if err := os.Mkdir(work.String(), 0700); err != nil {
|
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -565,15 +610,15 @@ func TestCache(t *testing.T) {
|
|||||||
|
|
||||||
{"cure fault", overrideIdent{pkg.ID{0xff, 0}, stubArtifact{
|
{"cure fault", overrideIdent{pkg.ID{0xff, 0}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return makeGarbage(c.GetWorkDir(), stub.UniqueError(0xcafe))
|
return makeGarbage(t.GetWorkDir(), stub.UniqueError(0xcafe))
|
||||||
},
|
},
|
||||||
}}, nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
}}, nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|
||||||
{"checksum mismatch", overrideChecksum{pkg.Checksum{}, overrideIdent{pkg.ID{0xff, 1}, stubArtifact{
|
{"checksum mismatch", overrideChecksum{pkg.Checksum{}, overrideIdent{pkg.ID{0xff, 1}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return makeGarbage(c.GetWorkDir(), nil)
|
return makeGarbage(t.GetWorkDir(), nil)
|
||||||
},
|
},
|
||||||
}}}, nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
}}}, nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
||||||
Got: pkg.MustDecode(
|
Got: pkg.MustDecode(
|
||||||
@@ -592,8 +637,8 @@ func TestCache(t *testing.T) {
|
|||||||
|
|
||||||
{"openFile directory", overrideIdent{pkg.ID{0xff, 3}, stubArtifact{
|
{"openFile directory", overrideIdent{pkg.ID{0xff, 3}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
r, err := c.OpenFile(overrideChecksumFile{checksum: wantChecksum})
|
r, err := t.Open(overrideChecksumFile{checksum: wantChecksum})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -611,24 +656,24 @@ func TestCache(t *testing.T) {
|
|||||||
|
|
||||||
{"no output", overrideIdent{pkg.ID{0xff, 4}, stubArtifact{
|
{"no output", overrideIdent{pkg.ID{0xff, 4}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}}, nil, pkg.Checksum{}, pkg.NoOutputError{}},
|
}}, nil, pkg.Checksum{}, pkg.NoOutputError{}},
|
||||||
|
|
||||||
{"file output", overrideIdent{pkg.ID{0xff, 5}, stubArtifact{
|
{"file output", overrideIdent{pkg.ID{0xff, 5}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.WriteFile(c.GetWorkDir().String(), []byte{0}, 0400)
|
return os.WriteFile(t.GetWorkDir().String(), []byte{0}, 0400)
|
||||||
},
|
},
|
||||||
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
|
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
|
||||||
|
|
||||||
{"symlink output", overrideIdent{pkg.ID{0xff, 6}, stubArtifact{
|
{"symlink output", overrideIdent{pkg.ID{0xff, 6}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.Symlink(
|
return os.Symlink(
|
||||||
c.GetWorkDir().String(),
|
t.GetWorkDir().String(),
|
||||||
c.GetWorkDir().String(),
|
t.GetWorkDir().String(),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
@@ -645,7 +690,7 @@ func TestCache(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
close(ready)
|
close(ready)
|
||||||
<-n
|
<-n
|
||||||
return wantErr
|
return wantErr
|
||||||
@@ -677,9 +722,9 @@ func TestCache(t *testing.T) {
|
|||||||
|
|
||||||
{"file output", overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
{"file output", overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
||||||
kind: pkg.KindTar,
|
kind: pkg.KindTar,
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return os.WriteFile(
|
return os.WriteFile(
|
||||||
c.GetWorkDir().String(),
|
t.GetWorkDir().String(),
|
||||||
[]byte{0},
|
[]byte{0},
|
||||||
0400,
|
0400,
|
||||||
)
|
)
|
||||||
@@ -785,6 +830,14 @@ func TestErrors(t *testing.T) {
|
|||||||
err error
|
err error
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
|
{"InvalidLookupError", pkg.InvalidLookupError{
|
||||||
|
0xff, 0xf0,
|
||||||
|
}, "attempting to look up non-dependency artifact __AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"},
|
||||||
|
|
||||||
|
{"InvalidArtifactError", pkg.InvalidArtifactError{
|
||||||
|
0xff, 0xfd,
|
||||||
|
}, "artifact __0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA cannot be cured"},
|
||||||
|
|
||||||
{"ChecksumMismatchError", &pkg.ChecksumMismatchError{
|
{"ChecksumMismatchError", &pkg.ChecksumMismatchError{
|
||||||
Want: (pkg.Checksum)(bytes.Repeat([]byte{
|
Want: (pkg.Checksum)(bytes.Repeat([]byte{
|
||||||
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
||||||
@@ -885,7 +938,11 @@ func TestNew(t *testing.T) {
|
|||||||
Path: container.Nonexistent,
|
Path: container.Nonexistent,
|
||||||
Err: syscall.ENOENT,
|
Err: syscall.ENOENT,
|
||||||
}
|
}
|
||||||
if _, err := pkg.New(check.MustAbs(container.Nonexistent)); !reflect.DeepEqual(err, wantErr) {
|
if _, err := pkg.New(
|
||||||
|
t.Context(),
|
||||||
|
message.New(nil),
|
||||||
|
0, check.MustAbs(container.Nonexistent),
|
||||||
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -909,7 +966,11 @@ func TestNew(t *testing.T) {
|
|||||||
Path: tempDir.Append("cache").String(),
|
Path: tempDir.Append("cache").String(),
|
||||||
Err: syscall.EACCES,
|
Err: syscall.EACCES,
|
||||||
}
|
}
|
||||||
if _, err := pkg.New(tempDir.Append("cache")); !reflect.DeepEqual(err, wantErr) {
|
if _, err := pkg.New(
|
||||||
|
t.Context(),
|
||||||
|
message.New(nil),
|
||||||
|
0, tempDir.Append("cache"),
|
||||||
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -34,9 +34,8 @@ type tarArtifact struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||||
// compression method. If f implements [File], its data might be used directly,
|
// compression method. The source [Artifact] must be compatible with
|
||||||
// eliminating the roundtrip to vfs. If f is a directory, it must contain a
|
// [TContext.Open].
|
||||||
// single regular file.
|
|
||||||
func NewTar(a Artifact, compression uint64) Artifact {
|
func NewTar(a Artifact, compression uint64) Artifact {
|
||||||
return &tarArtifact{a, compression}
|
return &tarArtifact{a, compression}
|
||||||
}
|
}
|
||||||
@@ -74,36 +73,11 @@ func (e DisallowedTypeflagError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cure cures the [Artifact], producing a directory located at work.
|
// Cure cures the [Artifact], producing a directory located at work.
|
||||||
func (a *tarArtifact) Cure(c *CureContext) (err error) {
|
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
||||||
temp := c.GetTempDir()
|
temp := t.GetTempDir()
|
||||||
var tr io.ReadCloser
|
var tr io.ReadCloser
|
||||||
|
if tr, err = t.Open(a.f); err != nil {
|
||||||
if file, ok := a.f.(File); ok {
|
return
|
||||||
if tr, err = c.OpenFile(file); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var pathname *check.Absolute
|
|
||||||
if pathname, _, err = c.Cure(a.f); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries []os.DirEntry
|
|
||||||
if entries, err = os.ReadDir(pathname.String()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(entries) != 1 || !entries[0].Type().IsRegular() {
|
|
||||||
return errors.New(
|
|
||||||
"input directory does not contain a single regular file",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
pathname = pathname.Append(entries[0].Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr, err = os.Open(pathname.String()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func(f io.ReadCloser) {
|
defer func(f io.ReadCloser) {
|
||||||
@@ -255,9 +229,9 @@ func (a *tarArtifact) Cure(c *CureContext) (err error) {
|
|||||||
if err = os.Chmod(p.String(), 0700); err != nil {
|
if err = os.Chmod(p.String(), 0700); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = os.Rename(p.String(), c.GetWorkDir().String())
|
err = os.Rename(p.String(), t.GetWorkDir().String())
|
||||||
} else {
|
} else {
|
||||||
err = os.Rename(temp.String(), c.GetWorkDir().String())
|
err = os.Rename(temp.String(), t.GetWorkDir().String())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -121,8 +121,8 @@ func checkTarHTTP(
|
|||||||
tarDir := stubArtifact{
|
tarDir := stubArtifact{
|
||||||
kind: pkg.KindExec,
|
kind: pkg.KindExec,
|
||||||
params: []byte("directory containing a single regular file"),
|
params: []byte("directory containing a single regular file"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
work := c.GetWorkDir()
|
work := t.GetWorkDir()
|
||||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -136,8 +136,8 @@ func checkTarHTTP(
|
|||||||
tarDirMulti := stubArtifact{
|
tarDirMulti := stubArtifact{
|
||||||
kind: pkg.KindExec,
|
kind: pkg.KindExec,
|
||||||
params: []byte("directory containing a multiple entries"),
|
params: []byte("directory containing a multiple entries"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
work := c.GetWorkDir()
|
work := t.GetWorkDir()
|
||||||
if err := os.MkdirAll(work.Append(
|
if err := os.MkdirAll(work.Append(
|
||||||
"garbage",
|
"garbage",
|
||||||
).String(), 0700); err != nil {
|
).String(), 0700); err != nil {
|
||||||
@@ -153,8 +153,8 @@ func checkTarHTTP(
|
|||||||
tarDirType := stubArtifact{
|
tarDirType := stubArtifact{
|
||||||
kind: pkg.KindExec,
|
kind: pkg.KindExec,
|
||||||
params: []byte("directory containing a symbolic link"),
|
params: []byte("directory containing a symbolic link"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
work := c.GetWorkDir()
|
work := t.GetWorkDir()
|
||||||
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -197,7 +197,7 @@ func checkTarHTTP(
|
|||||||
{"error passthrough", pkg.NewTar(stubArtifact{
|
{"error passthrough", pkg.NewTar(stubArtifact{
|
||||||
kind: pkg.KindExec,
|
kind: pkg.KindExec,
|
||||||
params: []byte("doomed artifact"),
|
params: []byte("doomed artifact"),
|
||||||
cure: func(c *pkg.CureContext) error {
|
cure: func(t *pkg.TContext) error {
|
||||||
return stub.UniqueError(0xcafe)
|
return stub.UniqueError(0xcafe)
|
||||||
},
|
},
|
||||||
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|||||||
64
internal/pkg/testdata/main.go
vendored
64
internal/pkg/testdata/main.go
vendored
@@ -12,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
"hakurei.app/container/fhs"
|
"hakurei.app/container/fhs"
|
||||||
"hakurei.app/container/vfs"
|
"hakurei.app/container/vfs"
|
||||||
)
|
)
|
||||||
@@ -20,14 +21,24 @@ func main() {
|
|||||||
log.SetFlags(0)
|
log.SetFlags(0)
|
||||||
log.SetPrefix("testtool: ")
|
log.SetPrefix("testtool: ")
|
||||||
|
|
||||||
var hostNet bool
|
var hostNet, layers bool
|
||||||
wantArgs := []string{"testtool"}
|
if len(os.Args) == 2 && os.Args[0] == "testtool" {
|
||||||
if len(os.Args) == 2 {
|
switch os.Args[1] {
|
||||||
hostNet = true
|
case "net":
|
||||||
log.SetPrefix("testtool(net): ")
|
hostNet = true
|
||||||
wantArgs = []string{"testtool", "net"}
|
log.SetPrefix("testtool(net): ")
|
||||||
}
|
break
|
||||||
if !slices.Equal(os.Args, wantArgs) {
|
|
||||||
|
case "layers":
|
||||||
|
layers = true
|
||||||
|
log.SetPrefix("testtool(layers): ")
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Fatalf("Args: %q", os.Args)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if wantArgs := []string{"testtool"}; !slices.Equal(os.Args, wantArgs) {
|
||||||
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
|
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -35,7 +46,9 @@ func main() {
|
|||||||
wantEnv := []string{"HAKUREI_TEST=1"}
|
wantEnv := []string{"HAKUREI_TEST=1"}
|
||||||
if len(os.Environ()) == 2 {
|
if len(os.Environ()) == 2 {
|
||||||
overlayRoot = true
|
overlayRoot = true
|
||||||
log.SetPrefix("testtool(overlay root): ")
|
if !layers {
|
||||||
|
log.SetPrefix("testtool(overlay root): ")
|
||||||
|
}
|
||||||
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
||||||
}
|
}
|
||||||
if !slices.Equal(wantEnv, os.Environ()) {
|
if !slices.Equal(wantEnv, os.Environ()) {
|
||||||
@@ -116,8 +129,37 @@ func main() {
|
|||||||
lowerdir = o[len(lowerdirKey):]
|
lowerdir = o[len(lowerdirKey):]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if path.Base(lowerdir) != checksumEmptyDir {
|
if !layers {
|
||||||
log.Fatal("unexpected artifact checksum")
|
if path.Base(lowerdir) != checksumEmptyDir {
|
||||||
|
log.Fatal("unexpected artifact checksum")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ident = "o4z73RXEjkF5BBoXYg4WZ3c4VCjwvfJtPKFrstFHcEQxkVDoWTtdc5TMXigFvTsF"
|
||||||
|
|
||||||
|
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
||||||
|
lowerdirs := lowerdirsEscaped[:0]
|
||||||
|
// ignore the option separator since it does not appear in ident
|
||||||
|
for i, e := range lowerdirsEscaped {
|
||||||
|
if len(e) > 0 &&
|
||||||
|
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
|
||||||
|
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
|
||||||
|
// ignore escaped pathname separator since it does not
|
||||||
|
// appear in ident
|
||||||
|
|
||||||
|
e = e[:len(e)-1]
|
||||||
|
if len(lowerdirsEscaped) != i {
|
||||||
|
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lowerdirs = append(lowerdirs, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lowerdirs) != 2 ||
|
||||||
|
path.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
|
||||||
|
path.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
|
||||||
|
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if hostNet {
|
if hostNet {
|
||||||
|
|||||||
Reference in New Issue
Block a user