Compare commits
71 Commits
c9cd16fd2a
...
wip-bootst
| Author | SHA1 | Date | |
|---|---|---|---|
|
088d35e4e6
|
|||
|
1667df9c43
|
|||
|
156dd767ef
|
|||
|
5fe166a4a7
|
|||
|
41a8d03dd2
|
|||
|
610572d0e6
|
|||
|
29951c5174
|
|||
|
91c3594dee
|
|||
|
7ccc2fc5ec
|
|||
|
63e137856e
|
|||
|
e1e46504a1
|
|||
|
ec9343ebd6
|
|||
|
423808ac76
|
|||
|
2494ede106
|
|||
|
da3848b92f
|
|||
|
34cb4ebd3b
|
|||
|
f712466714
|
|||
|
f2430b5f5e
|
|||
|
863e6f5db6
|
|||
|
23df2ab999
|
|||
|
7bd4d7d0e6
|
|||
|
b3c30bcc51
|
|||
|
38059db835
|
|||
|
409fd3149e
|
|||
|
4eea136308
|
|||
|
c86ff02d8d
|
|||
|
e8dda70c41
|
|||
|
7ea4e8b643
|
|||
|
5eefebcb48
|
|||
|
8e08e8f518
|
|||
|
54da6ce03d
|
|||
|
3a21ba1bca
|
|||
|
45301559bf
|
|||
|
0df87ab111
|
|||
|
aa0a949cef
|
|||
|
ce0064384d
|
|||
|
53d80f4b66
|
|||
|
156096ac98
|
|||
|
ceb75538cf
|
|||
|
0741a614ed
|
|||
|
e7e9b4caea
|
|||
|
f6d32e482a
|
|||
|
79adf217f4
|
|||
|
8efffd72f4
|
|||
|
86ad8b72aa
|
|||
|
e91049c3c5
|
|||
|
3d4d32932d
|
|||
|
0ab6c13c77
|
|||
|
834cb0d40b
|
|||
|
7548a627e5
|
|||
|
b98d27f773
|
|||
|
f3aa31e401
|
|||
|
4da26681b5
|
|||
|
4897b0259e
|
|||
|
d6e4f85864
|
|||
|
3eb927823f
|
|||
|
d76b9d04b8
|
|||
|
fa93476896
|
|||
|
bd0ef086b1
|
|||
|
05202cf994
|
|||
|
40081e7a06
|
|||
|
863d3dcf9f
|
|||
|
8ad9909065
|
|||
|
deda16da38
|
|||
|
55465c6e72
|
|||
|
ce249d23f1
|
|||
|
dd5d792d14
|
|||
|
d15d2ec2bd
|
|||
|
3078c41ce7
|
|||
|
e9de5d3aca
|
|||
|
993afde840
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -27,6 +27,7 @@ go.work.sum
|
|||||||
|
|
||||||
# go generate
|
# go generate
|
||||||
/cmd/hakurei/LICENSE
|
/cmd/hakurei/LICENSE
|
||||||
|
/internal/pkg/testdata/testtool
|
||||||
|
|
||||||
# release
|
# release
|
||||||
/dist/hakurei-*
|
/dist/hakurei-*
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ const (
|
|||||||
|
|
||||||
CAP_SYS_ADMIN = 0x15
|
CAP_SYS_ADMIN = 0x15
|
||||||
CAP_SETPCAP = 0x8
|
CAP_SETPCAP = 0x8
|
||||||
|
CAP_NET_ADMIN = 0xc
|
||||||
CAP_DAC_OVERRIDE = 0x1
|
CAP_DAC_OVERRIDE = 0x1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -9,46 +9,60 @@ import (
|
|||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"unique"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AbsoluteError is returned by [NewAbs] and holds the invalid pathname.
|
// AbsoluteError is returned by [NewAbs] and holds the invalid pathname.
|
||||||
type AbsoluteError struct{ Pathname string }
|
type AbsoluteError string
|
||||||
|
|
||||||
func (e *AbsoluteError) Error() string { return fmt.Sprintf("path %q is not absolute", e.Pathname) }
|
func (e AbsoluteError) Error() string {
|
||||||
func (e *AbsoluteError) Is(target error) bool {
|
return fmt.Sprintf("path %q is not absolute", string(e))
|
||||||
var ce *AbsoluteError
|
}
|
||||||
|
|
||||||
|
func (e AbsoluteError) Is(target error) bool {
|
||||||
|
var ce AbsoluteError
|
||||||
if !errors.As(target, &ce) {
|
if !errors.As(target, &ce) {
|
||||||
return errors.Is(target, syscall.EINVAL)
|
return errors.Is(target, syscall.EINVAL)
|
||||||
}
|
}
|
||||||
return *e == *ce
|
return e == ce
|
||||||
}
|
}
|
||||||
|
|
||||||
// Absolute holds a pathname checked to be absolute.
|
// Absolute holds a pathname checked to be absolute.
|
||||||
type Absolute struct{ pathname string }
|
type Absolute struct{ pathname unique.Handle[string] }
|
||||||
|
|
||||||
|
// ok returns whether [Absolute] is not the zero value.
|
||||||
|
func (a *Absolute) ok() bool { return a != nil && *a != (Absolute{}) }
|
||||||
|
|
||||||
// unsafeAbs returns [check.Absolute] on any string value.
|
// unsafeAbs returns [check.Absolute] on any string value.
|
||||||
func unsafeAbs(pathname string) *Absolute { return &Absolute{pathname} }
|
func unsafeAbs(pathname string) *Absolute {
|
||||||
|
return &Absolute{unique.Make(pathname)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the checked pathname.
|
||||||
func (a *Absolute) String() string {
|
func (a *Absolute) String() string {
|
||||||
if a.pathname == "" {
|
if !a.ok() {
|
||||||
panic("attempted use of zero Absolute")
|
panic("attempted use of zero Absolute")
|
||||||
}
|
}
|
||||||
|
return a.pathname.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle returns the underlying [unique.Handle].
|
||||||
|
func (a *Absolute) Handle() unique.Handle[string] {
|
||||||
return a.pathname
|
return a.pathname
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Is efficiently compares the underlying pathname.
|
||||||
func (a *Absolute) Is(v *Absolute) bool {
|
func (a *Absolute) Is(v *Absolute) bool {
|
||||||
if a == nil && v == nil {
|
if a == nil && v == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return a != nil && v != nil &&
|
return a.ok() && v.ok() && a.pathname == v.pathname
|
||||||
a.pathname != "" && v.pathname != "" &&
|
|
||||||
a.pathname == v.pathname
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
// NewAbs checks pathname and returns a new [Absolute] if pathname is absolute.
|
||||||
func NewAbs(pathname string) (*Absolute, error) {
|
func NewAbs(pathname string) (*Absolute, error) {
|
||||||
if !path.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return nil, &AbsoluteError{pathname}
|
return nil, AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
return unsafeAbs(pathname), nil
|
return unsafeAbs(pathname), nil
|
||||||
}
|
}
|
||||||
@@ -70,35 +84,49 @@ func (a *Absolute) Append(elem ...string) *Absolute {
|
|||||||
// Dir calls [path.Dir] with [Absolute] as its argument.
|
// Dir calls [path.Dir] with [Absolute] as its argument.
|
||||||
func (a *Absolute) Dir() *Absolute { return unsafeAbs(path.Dir(a.String())) }
|
func (a *Absolute) Dir() *Absolute { return unsafeAbs(path.Dir(a.String())) }
|
||||||
|
|
||||||
func (a *Absolute) GobEncode() ([]byte, error) { return []byte(a.String()), nil }
|
// GobEncode returns the checked pathname.
|
||||||
|
func (a *Absolute) GobEncode() ([]byte, error) {
|
||||||
|
return []byte(a.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GobDecode stores data if it represents an absolute pathname.
|
||||||
func (a *Absolute) GobDecode(data []byte) error {
|
func (a *Absolute) GobDecode(data []byte) error {
|
||||||
pathname := string(data)
|
pathname := string(data)
|
||||||
if !path.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return &AbsoluteError{pathname}
|
return AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
a.pathname = pathname
|
a.pathname = unique.Make(pathname)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Absolute) MarshalJSON() ([]byte, error) { return json.Marshal(a.String()) }
|
// MarshalJSON returns a JSON representation of the checked pathname.
|
||||||
|
func (a *Absolute) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(a.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON stores data if it represents an absolute pathname.
|
||||||
func (a *Absolute) UnmarshalJSON(data []byte) error {
|
func (a *Absolute) UnmarshalJSON(data []byte) error {
|
||||||
var pathname string
|
var pathname string
|
||||||
if err := json.Unmarshal(data, &pathname); err != nil {
|
if err := json.Unmarshal(data, &pathname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !path.IsAbs(pathname) {
|
if !path.IsAbs(pathname) {
|
||||||
return &AbsoluteError{pathname}
|
return AbsoluteError(pathname)
|
||||||
}
|
}
|
||||||
a.pathname = pathname
|
a.pathname = unique.Make(pathname)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
// SortAbs calls [slices.SortFunc] for a slice of [Absolute].
|
||||||
func SortAbs(x []*Absolute) {
|
func SortAbs(x []*Absolute) {
|
||||||
slices.SortFunc(x, func(a, b *Absolute) int { return strings.Compare(a.String(), b.String()) })
|
slices.SortFunc(x, func(a, b *Absolute) int {
|
||||||
|
return strings.Compare(a.String(), b.String())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompactAbs calls [slices.CompactFunc] for a slice of [Absolute].
|
// CompactAbs calls [slices.CompactFunc] for a slice of [Absolute].
|
||||||
func CompactAbs(s []*Absolute) []*Absolute {
|
func CompactAbs(s []*Absolute) []*Absolute {
|
||||||
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool { return a.String() == b.String() })
|
return slices.CompactFunc(s, func(a *Absolute, b *Absolute) bool {
|
||||||
|
return a.Is(b)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,8 +31,8 @@ func TestAbsoluteError(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{"EINVAL", new(AbsoluteError), syscall.EINVAL, true},
|
{"EINVAL", new(AbsoluteError), syscall.EINVAL, true},
|
||||||
{"not EINVAL", new(AbsoluteError), syscall.EBADE, false},
|
{"not EINVAL", new(AbsoluteError), syscall.EBADE, false},
|
||||||
{"ne val", new(AbsoluteError), &AbsoluteError{Pathname: "etc"}, false},
|
{"ne val", new(AbsoluteError), AbsoluteError("etc"), false},
|
||||||
{"equals", &AbsoluteError{Pathname: "etc"}, &AbsoluteError{Pathname: "etc"}, true},
|
{"equals", AbsoluteError("etc"), AbsoluteError("etc"), true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@@ -45,7 +45,7 @@ func TestAbsoluteError(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
want := `path "etc" is not absolute`
|
want := `path "etc" is not absolute`
|
||||||
if got := (&AbsoluteError{Pathname: "etc"}).Error(); got != want {
|
if got := (AbsoluteError("etc")).Error(); got != want {
|
||||||
t.Errorf("Error: %q, want %q", got, want)
|
t.Errorf("Error: %q, want %q", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -62,8 +62,8 @@ func TestNewAbs(t *testing.T) {
|
|||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{"good", "/etc", MustAbs("/etc"), nil},
|
{"good", "/etc", MustAbs("/etc"), nil},
|
||||||
{"not absolute", "etc", nil, &AbsoluteError{Pathname: "etc"}},
|
{"not absolute", "etc", nil, AbsoluteError("etc")},
|
||||||
{"zero", "", nil, &AbsoluteError{Pathname: ""}},
|
{"zero", "", nil, AbsoluteError("")},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@@ -84,7 +84,7 @@ func TestNewAbs(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
wantPanic := &AbsoluteError{Pathname: "etc"}
|
wantPanic := AbsoluteError("etc")
|
||||||
|
|
||||||
if r := recover(); !reflect.DeepEqual(r, wantPanic) {
|
if r := recover(); !reflect.DeepEqual(r, wantPanic) {
|
||||||
t.Errorf("MustAbs: panic = %v; want %v", r, wantPanic)
|
t.Errorf("MustAbs: panic = %v; want %v", r, wantPanic)
|
||||||
@@ -175,7 +175,7 @@ func TestCodecAbsolute(t *testing.T) {
|
|||||||
|
|
||||||
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
`"/etc"`, `{"val":"/etc","magic":3236757504}`},
|
||||||
{"not absolute", nil,
|
{"not absolute", nil,
|
||||||
&AbsoluteError{Pathname: "etc"},
|
AbsoluteError("etc"),
|
||||||
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
"\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\a\xff\x80\x00\x03etc",
|
||||||
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
",\xff\x83\x03\x01\x01\x06sCheck\x01\xff\x84\x00\x01\x02\x01\bPathname\x01\xff\x80\x00\x01\x05Magic\x01\x06\x00\x00\x00\t\x7f\x05\x01\x02\xff\x82\x00\x00\x00\x0f\xff\x84\x01\x03etc\x01\xfb\x01\x81\xda\x00\x00\x00",
|
||||||
|
|
||||||
|
|||||||
@@ -263,6 +263,8 @@ func (p *Container) Start() error {
|
|||||||
CAP_SYS_ADMIN,
|
CAP_SYS_ADMIN,
|
||||||
// drop capabilities
|
// drop capabilities
|
||||||
CAP_SETPCAP,
|
CAP_SETPCAP,
|
||||||
|
// bring up loopback interface
|
||||||
|
CAP_NET_ADMIN,
|
||||||
// overlay access to upperdir and workdir
|
// overlay access to upperdir and workdir
|
||||||
CAP_DAC_OVERRIDE,
|
CAP_DAC_OVERRIDE,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -61,6 +61,8 @@ type syscallDispatcher interface {
|
|||||||
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
|
||||||
// ensureFile provides ensureFile.
|
// ensureFile provides ensureFile.
|
||||||
ensureFile(name string, perm, pperm os.FileMode) error
|
ensureFile(name string, perm, pperm os.FileMode) error
|
||||||
|
// mustLoopback provides mustLoopback.
|
||||||
|
mustLoopback(msg message.Msg)
|
||||||
|
|
||||||
// seccompLoad provides [seccomp.Load].
|
// seccompLoad provides [seccomp.Load].
|
||||||
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error
|
||||||
@@ -164,6 +166,7 @@ func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm
|
|||||||
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
|
||||||
return ensureFile(name, perm, pperm)
|
return ensureFile(name, perm, pperm)
|
||||||
}
|
}
|
||||||
|
func (direct) mustLoopback(msg message.Msg) { mustLoopback(msg) }
|
||||||
|
|
||||||
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
func (direct) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||||
return seccomp.Load(rules, flags)
|
return seccomp.Load(rules, flags)
|
||||||
|
|||||||
@@ -465,6 +465,8 @@ func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
|
|||||||
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
stub.CheckArg(k.Stub, "pperm", pperm, 2))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*kstub) mustLoopback(message.Msg) { /* noop */ }
|
||||||
|
|
||||||
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
func (k *kstub) seccompLoad(rules []std.NativeRule, flags seccomp.ExportFlag) error {
|
||||||
k.Helper()
|
k.Helper()
|
||||||
return k.Expects("seccompLoad").Error(
|
return k.Expects("seccompLoad").Error(
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func messageFromError(err error) (m string, ok bool) {
|
|||||||
if m, ok = messagePrefixP[os.PathError]("cannot ", err); ok {
|
if m, ok = messagePrefixP[os.PathError]("cannot ", err); ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if m, ok = messagePrefixP[check.AbsoluteError](zeroString, err); ok {
|
if m, ok = messagePrefix[check.AbsoluteError](zeroString, err); ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if m, ok = messagePrefix[OpRepeatError](zeroString, err); ok {
|
if m, ok = messagePrefix[OpRepeatError](zeroString, err); ok {
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func TestMessageFromError(t *testing.T) {
|
|||||||
Err: stub.UniqueError(0xdeadbeef),
|
Err: stub.UniqueError(0xdeadbeef),
|
||||||
}, "cannot mount /sysroot: unique error 3735928559 injected by the test suite", true},
|
}, "cannot mount /sysroot: unique error 3735928559 injected by the test suite", true},
|
||||||
|
|
||||||
{"absolute", &check.AbsoluteError{Pathname: "etc/mtab"},
|
{"absolute", check.AbsoluteError("etc/mtab"),
|
||||||
`path "etc/mtab" is not absolute`, true},
|
`path "etc/mtab" is not absolute`, true},
|
||||||
|
|
||||||
{"repeat", OpRepeatError("autoetc"),
|
{"repeat", OpRepeatError("autoetc"),
|
||||||
|
|||||||
@@ -170,6 +170,10 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
|
|||||||
offsetSetup = int(setupFd + 1)
|
offsetSetup = int(setupFd + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !params.HostNet {
|
||||||
|
k.mustLoopback(msg)
|
||||||
|
}
|
||||||
|
|
||||||
// write uid/gid map here so parent does not need to set dumpable
|
// write uid/gid map here so parent does not need to set dumpable
|
||||||
if err := k.setDumpable(SUID_DUMP_USER); err != nil {
|
if err := k.setDumpable(SUID_DUMP_USER); err != nil {
|
||||||
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
|
||||||
|
|||||||
@@ -312,7 +312,10 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"ephemeral", new(Ops).OverlayEphemeral(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
{"ephemeral", new(Ops).OverlayEphemeral(
|
||||||
|
check.MustAbs("/nix/store"),
|
||||||
|
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||||
|
), Ops{
|
||||||
&MountOverlayOp{
|
&MountOverlayOp{
|
||||||
Target: check.MustAbs("/nix/store"),
|
Target: check.MustAbs("/nix/store"),
|
||||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||||
@@ -320,7 +323,10 @@ func TestMountOverlayOp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"readonly", new(Ops).OverlayReadonly(check.MustAbs("/nix/store"), check.MustAbs("/mnt-root/nix/.ro-store")), Ops{
|
{"readonly", new(Ops).OverlayReadonly(
|
||||||
|
check.MustAbs("/nix/store"),
|
||||||
|
check.MustAbs("/mnt-root/nix/.ro-store"),
|
||||||
|
), Ops{
|
||||||
&MountOverlayOp{
|
&MountOverlayOp{
|
||||||
Target: check.MustAbs("/nix/store"),
|
Target: check.MustAbs("/nix/store"),
|
||||||
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
Lower: []*check.Absolute{check.MustAbs("/mnt-root/nix/.ro-store")},
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ func (l *SymlinkOp) Valid() bool { return l != nil && l.Target != nil && l.LinkN
|
|||||||
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
func (l *SymlinkOp) early(_ *setupState, k syscallDispatcher) error {
|
||||||
if l.Dereference {
|
if l.Dereference {
|
||||||
if !path.IsAbs(l.LinkName) {
|
if !path.IsAbs(l.LinkName) {
|
||||||
return &check.AbsoluteError{Pathname: l.LinkName}
|
return check.AbsoluteError(l.LinkName)
|
||||||
}
|
}
|
||||||
if name, err := k.readlink(l.LinkName); err != nil {
|
if name, err := k.readlink(l.LinkName); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func TestSymlinkOp(t *testing.T) {
|
|||||||
Target: check.MustAbs("/etc/mtab"),
|
Target: check.MustAbs("/etc/mtab"),
|
||||||
LinkName: "etc/mtab",
|
LinkName: "etc/mtab",
|
||||||
Dereference: true,
|
Dereference: true,
|
||||||
}, nil, &check.AbsoluteError{Pathname: "etc/mtab"}, nil, nil},
|
}, nil, check.AbsoluteError("etc/mtab"), nil, nil},
|
||||||
|
|
||||||
{"readlink", &Params{ParentPerm: 0755}, &SymlinkOp{
|
{"readlink", &Params{ParentPerm: 0755}, &SymlinkOp{
|
||||||
Target: check.MustAbs("/etc/mtab"),
|
Target: check.MustAbs("/etc/mtab"),
|
||||||
|
|||||||
269
container/netlink.go
Normal file
269
container/netlink.go
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
. "syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container/std"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
// rtnetlink represents a NETLINK_ROUTE socket.
|
||||||
|
type rtnetlink struct {
|
||||||
|
// Sent as part of rtnetlink messages.
|
||||||
|
pid uint32
|
||||||
|
// AF_NETLINK socket.
|
||||||
|
fd int
|
||||||
|
// Whether the socket is open.
|
||||||
|
ok bool
|
||||||
|
// Message sequence number.
|
||||||
|
seq uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// open creates the underlying NETLINK_ROUTE socket.
|
||||||
|
func (s *rtnetlink) open() (err error) {
|
||||||
|
if s.ok || s.fd < 0 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
s.pid = uint32(Getpid())
|
||||||
|
if s.fd, err = Socket(
|
||||||
|
AF_NETLINK,
|
||||||
|
SOCK_RAW|SOCK_CLOEXEC,
|
||||||
|
NETLINK_ROUTE,
|
||||||
|
); err != nil {
|
||||||
|
return os.NewSyscallError("socket", err)
|
||||||
|
} else if err = Bind(s.fd, &SockaddrNetlink{
|
||||||
|
Family: AF_NETLINK,
|
||||||
|
Pid: s.pid,
|
||||||
|
}); err != nil {
|
||||||
|
_ = s.close()
|
||||||
|
return os.NewSyscallError("bind", err)
|
||||||
|
} else {
|
||||||
|
s.ok = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// close closes the underlying NETLINK_ROUTE socket.
|
||||||
|
func (s *rtnetlink) close() error {
|
||||||
|
if !s.ok {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
s.ok = false
|
||||||
|
err := Close(s.fd)
|
||||||
|
s.fd = -1
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// roundtrip sends a netlink message and handles the reply.
|
||||||
|
func (s *rtnetlink) roundtrip(data []byte) error {
|
||||||
|
if !s.ok {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() { s.seq++ }()
|
||||||
|
|
||||||
|
if err := Sendto(s.fd, data, 0, &SockaddrNetlink{
|
||||||
|
Family: AF_NETLINK,
|
||||||
|
}); err != nil {
|
||||||
|
return os.NewSyscallError("sendto", err)
|
||||||
|
}
|
||||||
|
buf := make([]byte, Getpagesize())
|
||||||
|
|
||||||
|
done:
|
||||||
|
for {
|
||||||
|
p := buf
|
||||||
|
if n, _, err := Recvfrom(s.fd, p, 0); err != nil {
|
||||||
|
return os.NewSyscallError("recvfrom", err)
|
||||||
|
} else if n < NLMSG_HDRLEN {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
} else {
|
||||||
|
p = p[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
if msgs, err := ParseNetlinkMessage(p); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Header.Seq != s.seq || m.Header.Pid != s.pid {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
if m.Header.Type == NLMSG_DONE {
|
||||||
|
break done
|
||||||
|
}
|
||||||
|
if m.Header.Type == NLMSG_ERROR {
|
||||||
|
if len(m.Data) >= 4 {
|
||||||
|
errno := Errno(-std.ScmpInt(binary.NativeEndian.Uint32(m.Data)))
|
||||||
|
if errno == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustRoundtrip calls roundtrip and terminates via msg for a non-nil error.
|
||||||
|
func (s *rtnetlink) mustRoundtrip(msg message.Msg, data []byte) {
|
||||||
|
err := s.roundtrip(data)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if closeErr := Close(s.fd); closeErr != nil {
|
||||||
|
msg.Verbosef("cannot close: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch err.(type) {
|
||||||
|
case *os.SyscallError:
|
||||||
|
msg.GetLogger().Fatalf("cannot %v", err)
|
||||||
|
|
||||||
|
case Errno:
|
||||||
|
msg.GetLogger().Fatalf("RTNETLINK answers: %v", err)
|
||||||
|
|
||||||
|
default:
|
||||||
|
msg.GetLogger().Fatalln("RTNETLINK answers with unexpected message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newaddrLo represents a RTM_NEWADDR message with two addresses.
|
||||||
|
type newaddrLo struct {
|
||||||
|
header NlMsghdr
|
||||||
|
data IfAddrmsg
|
||||||
|
|
||||||
|
r0 RtAttr
|
||||||
|
a0 [4]byte // in_addr
|
||||||
|
r1 RtAttr
|
||||||
|
a1 [4]byte // in_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeofNewaddrLo is the expected size of newaddrLo.
|
||||||
|
const sizeofNewaddrLo = NLMSG_HDRLEN + SizeofIfAddrmsg + (SizeofRtAttr+4)*2
|
||||||
|
|
||||||
|
// newaddrLo returns the address of a populated newaddrLo.
|
||||||
|
func (s *rtnetlink) newaddrLo(lo int) *newaddrLo {
|
||||||
|
return &newaddrLo{NlMsghdr{
|
||||||
|
Len: sizeofNewaddrLo,
|
||||||
|
Type: RTM_NEWADDR,
|
||||||
|
Flags: NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL,
|
||||||
|
Seq: s.seq,
|
||||||
|
Pid: s.pid,
|
||||||
|
}, IfAddrmsg{
|
||||||
|
Family: AF_INET,
|
||||||
|
Prefixlen: 8,
|
||||||
|
Flags: IFA_F_PERMANENT,
|
||||||
|
Scope: RT_SCOPE_HOST,
|
||||||
|
Index: uint32(lo),
|
||||||
|
}, RtAttr{
|
||||||
|
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a0)),
|
||||||
|
Type: IFA_LOCAL,
|
||||||
|
}, [4]byte{127, 0, 0, 1}, RtAttr{
|
||||||
|
Len: uint16(SizeofRtAttr + len(newaddrLo{}.a1)),
|
||||||
|
Type: IFA_ADDRESS,
|
||||||
|
}, [4]byte{127, 0, 0, 1}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msg *newaddrLo) toWireFormat() []byte {
|
||||||
|
var buf [sizeofNewaddrLo]byte
|
||||||
|
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||||
|
|
||||||
|
buf[16] = msg.data.Family
|
||||||
|
buf[17] = msg.data.Prefixlen
|
||||||
|
buf[18] = msg.data.Flags
|
||||||
|
buf[19] = msg.data.Scope
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||||
|
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[24:26][0])) = msg.r0.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[26:28][0])) = msg.r0.Type
|
||||||
|
copy(buf[28:32], msg.a0[:])
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[32:34][0])) = msg.r1.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[34:36][0])) = msg.r1.Type
|
||||||
|
copy(buf[36:40], msg.a1[:])
|
||||||
|
|
||||||
|
return buf[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// newlinkLo represents a RTM_NEWLINK message.
|
||||||
|
type newlinkLo struct {
|
||||||
|
header NlMsghdr
|
||||||
|
data IfInfomsg
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeofNewlinkLo is the expected size of newlinkLo.
|
||||||
|
const sizeofNewlinkLo = NLMSG_HDRLEN + SizeofIfInfomsg
|
||||||
|
|
||||||
|
// newlinkLo returns the address of a populated newlinkLo.
|
||||||
|
func (s *rtnetlink) newlinkLo(lo int) *newlinkLo {
|
||||||
|
return &newlinkLo{NlMsghdr{
|
||||||
|
Len: sizeofNewlinkLo,
|
||||||
|
Type: RTM_NEWLINK,
|
||||||
|
Flags: NLM_F_REQUEST | NLM_F_ACK,
|
||||||
|
Seq: s.seq,
|
||||||
|
Pid: s.pid,
|
||||||
|
}, IfInfomsg{
|
||||||
|
Family: AF_UNSPEC,
|
||||||
|
Index: int32(lo),
|
||||||
|
Flags: IFF_UP,
|
||||||
|
Change: IFF_UP,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msg *newlinkLo) toWireFormat() []byte {
|
||||||
|
var buf [sizeofNewlinkLo]byte
|
||||||
|
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[0:4][0])) = msg.header.Len
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[4:6][0])) = msg.header.Type
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[6:8][0])) = msg.header.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[8:12][0])) = msg.header.Seq
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[12:16][0])) = msg.header.Pid
|
||||||
|
|
||||||
|
buf[16] = msg.data.Family
|
||||||
|
*(*uint16)(unsafe.Pointer(&buf[18:20][0])) = msg.data.Type
|
||||||
|
*(*int32)(unsafe.Pointer(&buf[20:24][0])) = msg.data.Index
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[24:28][0])) = msg.data.Flags
|
||||||
|
*(*uint32)(unsafe.Pointer(&buf[28:32][0])) = msg.data.Change
|
||||||
|
|
||||||
|
return buf[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustLoopback creates the loopback address and brings the lo interface up.
|
||||||
|
// mustLoopback calls a fatal method of the underlying [log.Logger] of m with a
|
||||||
|
// user-facing error message if RTNETLINK behaves unexpectedly.
|
||||||
|
func mustLoopback(msg message.Msg) {
|
||||||
|
log := msg.GetLogger()
|
||||||
|
|
||||||
|
var lo int
|
||||||
|
if ifi, err := net.InterfaceByName("lo"); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
} else {
|
||||||
|
lo = ifi.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
var s rtnetlink
|
||||||
|
if err := s.open(); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := s.close(); err != nil {
|
||||||
|
msg.Verbosef("cannot close netlink: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
s.mustRoundtrip(msg, s.newaddrLo(lo).toWireFormat())
|
||||||
|
s.mustRoundtrip(msg, s.newlinkLo(lo).toWireFormat())
|
||||||
|
}
|
||||||
72
container/netlink_test.go
Normal file
72
container/netlink_test.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSizeof(t *testing.T) {
|
||||||
|
if got := unsafe.Sizeof(newaddrLo{}); got != sizeofNewaddrLo {
|
||||||
|
t.Fatalf("newaddrLo: sizeof = %#x, want %#x", got, sizeofNewaddrLo)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := unsafe.Sizeof(newlinkLo{}); got != sizeofNewlinkLo {
|
||||||
|
t.Fatalf("newlinkLo: sizeof = %#x, want %#x", got, sizeofNewlinkLo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRtnetlinkMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
msg interface{ toWireFormat() []byte }
|
||||||
|
want []byte
|
||||||
|
}{
|
||||||
|
{"newaddrLo", (&rtnetlink{pid: 1, seq: 0}).newaddrLo(1), []byte{
|
||||||
|
/* Len */ 0x28, 0, 0, 0,
|
||||||
|
/* Type */ 0x14, 0,
|
||||||
|
/* Flags */ 5, 6,
|
||||||
|
/* Seq */ 0, 0, 0, 0,
|
||||||
|
/* Pid */ 1, 0, 0, 0,
|
||||||
|
|
||||||
|
/* Family */ 2,
|
||||||
|
/* Prefixlen */ 8,
|
||||||
|
/* Flags */ 0x80,
|
||||||
|
/* Scope */ 0xfe,
|
||||||
|
/* Index */ 1, 0, 0, 0,
|
||||||
|
|
||||||
|
/* Len */ 8, 0,
|
||||||
|
/* Type */ 2, 0,
|
||||||
|
/* in_addr */ 127, 0, 0, 1,
|
||||||
|
|
||||||
|
/* Len */ 8, 0,
|
||||||
|
/* Type */ 1, 0,
|
||||||
|
/* in_addr */ 127, 0, 0, 1,
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"newlinkLo", (&rtnetlink{pid: 1, seq: 1}).newlinkLo(1), []byte{
|
||||||
|
/* Len */ 0x20, 0, 0, 0,
|
||||||
|
/* Type */ 0x10, 0,
|
||||||
|
/* Flags */ 5, 0,
|
||||||
|
/* Seq */ 1, 0, 0, 0,
|
||||||
|
/* Pid */ 1, 0, 0, 0,
|
||||||
|
|
||||||
|
/* Family */ 0,
|
||||||
|
/* pad */ 0,
|
||||||
|
/* Type */ 0, 0,
|
||||||
|
/* Index */ 1, 0, 0, 0,
|
||||||
|
/* Flags */ 1, 0, 0, 0,
|
||||||
|
/* Change */ 1, 0, 0, 0,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got := tc.msg.toWireFormat(); string(got) != string(tc.want) {
|
||||||
|
t.Fatalf("toWireFormat: %#v, want %#v", got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
2
dist/install.sh
vendored
2
dist/install.sh
vendored
@@ -2,7 +2,7 @@
|
|||||||
cd "$(dirname -- "$0")" || exit 1
|
cd "$(dirname -- "$0")" || exit 1
|
||||||
|
|
||||||
install -vDm0755 "bin/hakurei" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hakurei"
|
install -vDm0755 "bin/hakurei" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hakurei"
|
||||||
install -vDm0755 "bin/hpkg" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hpkg"
|
install -vDm0755 "bin/sharefs" "${HAKUREI_INSTALL_PREFIX}/usr/bin/sharefs"
|
||||||
|
|
||||||
install -vDm4511 "bin/hsu" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hsu"
|
install -vDm4511 "bin/hsu" "${HAKUREI_INSTALL_PREFIX}/usr/bin/hsu"
|
||||||
if [ ! -f "${HAKUREI_INSTALL_PREFIX}/etc/hsurc" ]; then
|
if [ ! -f "${HAKUREI_INSTALL_PREFIX}/etc/hsurc" ]; then
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ func TestSpPulseOp(t *testing.T) {
|
|||||||
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/cookie", nil),
|
call("lookupEnv", stub.ExpectArgs{"PULSE_COOKIE"}, "proc/nonexistent/cookie", nil),
|
||||||
}, nil, nil, &hst.AppError{
|
}, nil, nil, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/cookie"},
|
Err: check.AbsoluteError("proc/nonexistent/cookie"),
|
||||||
}, nil, nil, nil, nil, nil},
|
}, nil, nil, nil, nil, nil},
|
||||||
|
|
||||||
{"cookie loadFile", func(bool, bool) outcomeOp {
|
{"cookie loadFile", func(bool, bool) outcomeOp {
|
||||||
@@ -272,7 +272,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
|||||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||||
}}, &hst.AppError{
|
}}, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/pulse-cookie"},
|
Err: check.AbsoluteError("proc/nonexistent/pulse-cookie"),
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"success override", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
{"success override", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||||
@@ -286,7 +286,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
|||||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||||
}}, &hst.AppError{
|
}}, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/home"},
|
Err: check.AbsoluteError("proc/nonexistent/home"),
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"home stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
{"home stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||||
@@ -321,7 +321,7 @@ func TestDiscoverPulseCookie(t *testing.T) {
|
|||||||
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
call("verbose", stub.ExpectArgs{[]any{(*check.Absolute)(nil)}}, nil, nil),
|
||||||
}}, &hst.AppError{
|
}}, &hst.AppError{
|
||||||
Step: "locate PulseAudio cookie",
|
Step: "locate PulseAudio cookie",
|
||||||
Err: &check.AbsoluteError{Pathname: "proc/nonexistent/xdg"},
|
Err: check.AbsoluteError("proc/nonexistent/xdg"),
|
||||||
}},
|
}},
|
||||||
|
|
||||||
{"xdg stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
{"xdg stat", fCheckPathname, stub.Expect{Calls: []stub.Call{
|
||||||
|
|||||||
210
internal/pkg/dir.go
Normal file
210
internal/pkg/dir.go
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FlatEntry is a directory entry to be encoded for [Flatten].
|
||||||
|
type FlatEntry struct {
|
||||||
|
Mode fs.FileMode // file mode bits
|
||||||
|
Path string // pathname of the file
|
||||||
|
Data []byte // file content or symlink destination
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
| mode uint32 | path_sz uint32 |
|
||||||
|
| data_sz uint64 |
|
||||||
|
| path string |
|
||||||
|
| data []byte |
|
||||||
|
*/
|
||||||
|
|
||||||
|
// wordSize is the boundary which binary segments are always aligned to.
|
||||||
|
const wordSize = 8
|
||||||
|
|
||||||
|
// alignSize returns the padded size for aligning sz.
|
||||||
|
func alignSize(sz int) int {
|
||||||
|
return sz + (wordSize-(sz)%wordSize)%wordSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode encodes the entry for transmission or hashing.
|
||||||
|
func (ent *FlatEntry) Encode(w io.Writer) (n int, err error) {
|
||||||
|
pPathSize := alignSize(len(ent.Path))
|
||||||
|
if pPathSize > math.MaxUint32 {
|
||||||
|
return 0, syscall.E2BIG
|
||||||
|
}
|
||||||
|
pDataSize := alignSize(len(ent.Data))
|
||||||
|
|
||||||
|
payload := make([]byte, wordSize*2+pPathSize+pDataSize)
|
||||||
|
binary.LittleEndian.PutUint32(payload, uint32(ent.Mode))
|
||||||
|
binary.LittleEndian.PutUint32(payload[wordSize/2:], uint32(len(ent.Path)))
|
||||||
|
binary.LittleEndian.PutUint64(payload[wordSize:], uint64(len(ent.Data)))
|
||||||
|
copy(payload[wordSize*2:], ent.Path)
|
||||||
|
copy(payload[wordSize*2+pPathSize:], ent.Data)
|
||||||
|
return w.Write(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInsecurePath is returned by [FlatEntry.Decode] if validation is requested
|
||||||
|
// and a nonlocal path is encountered in the stream.
|
||||||
|
var ErrInsecurePath = errors.New("insecure file path")
|
||||||
|
|
||||||
|
// Decode decodes the entry from its representation produced by Encode.
|
||||||
|
func (ent *FlatEntry) Decode(r io.Reader, validate bool) (n int, err error) {
|
||||||
|
var nr int
|
||||||
|
|
||||||
|
header := make([]byte, wordSize*2)
|
||||||
|
nr, err = r.Read(header)
|
||||||
|
n += nr
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) && n != 0 {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ent.Mode = fs.FileMode(binary.LittleEndian.Uint32(header))
|
||||||
|
pathSize := int(binary.LittleEndian.Uint32(header[wordSize/2:]))
|
||||||
|
pPathSize := alignSize(pathSize)
|
||||||
|
dataSize := int(binary.LittleEndian.Uint64(header[wordSize:]))
|
||||||
|
pDataSize := alignSize(dataSize)
|
||||||
|
|
||||||
|
buf := make([]byte, pPathSize+pDataSize)
|
||||||
|
nr, err = r.Read(buf)
|
||||||
|
n += nr
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
if nr != len(buf) {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ent.Path = string(buf[:pathSize])
|
||||||
|
if ent.Mode.IsDir() {
|
||||||
|
ent.Data = nil
|
||||||
|
} else {
|
||||||
|
ent.Data = buf[pPathSize : pPathSize+dataSize]
|
||||||
|
}
|
||||||
|
|
||||||
|
if validate && !filepath.IsLocal(ent.Path) {
|
||||||
|
err = ErrInsecurePath
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirScanner provides an efficient interface for reading a stream of encoded
|
||||||
|
// [FlatEntry]. Successive calls to the Scan method will step through the
|
||||||
|
// entries in the stream.
|
||||||
|
type DirScanner struct {
|
||||||
|
// Underlying reader to scan [FlatEntry] representations from.
|
||||||
|
r io.Reader
|
||||||
|
|
||||||
|
// First non-EOF I/O error, returned by the Err method.
|
||||||
|
err error
|
||||||
|
|
||||||
|
// Entry to store results in. Its address is returned by the Entry method
|
||||||
|
// and is updated on every call to Scan.
|
||||||
|
ent FlatEntry
|
||||||
|
|
||||||
|
// Validate pathnames during decoding.
|
||||||
|
validate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDirScanner returns the address of a new instance of [DirScanner] reading
|
||||||
|
// from r. The caller must no longer read from r after this function returns.
|
||||||
|
func NewDirScanner(r io.Reader, validate bool) *DirScanner {
|
||||||
|
return &DirScanner{r: r, validate: validate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the first non-EOF I/O error.
|
||||||
|
func (s *DirScanner) Err() error {
|
||||||
|
if errors.Is(s.err, io.EOF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry returns the address to the [FlatEntry] value storing the last result.
|
||||||
|
func (s *DirScanner) Entry() *FlatEntry { return &s.ent }
|
||||||
|
|
||||||
|
// Scan advances to the next [FlatEntry].
|
||||||
|
func (s *DirScanner) Scan() bool {
|
||||||
|
if s.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var n int
|
||||||
|
n, s.err = s.ent.Decode(s.r, s.validate)
|
||||||
|
if errors.Is(s.err, io.EOF) {
|
||||||
|
return n != 0
|
||||||
|
}
|
||||||
|
return s.err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatten writes a deterministic representation of the contents of fsys to w.
|
||||||
|
// The resulting data can be hashed to produce a deterministic checksum for the
|
||||||
|
// directory.
|
||||||
|
func Flatten(fsys fs.FS, root string, w io.Writer) (n int, err error) {
|
||||||
|
var nr int
|
||||||
|
err = fs.WalkDir(fsys, root, func(path string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fi fs.FileInfo
|
||||||
|
fi, err = d.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ent := FlatEntry{
|
||||||
|
Path: path,
|
||||||
|
Mode: fi.Mode(),
|
||||||
|
}
|
||||||
|
if ent.Mode.IsRegular() {
|
||||||
|
if ent.Data, err = fs.ReadFile(fsys, path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if ent.Mode&fs.ModeSymlink != 0 {
|
||||||
|
var newpath string
|
||||||
|
if newpath, err = fs.ReadLink(fsys, path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ent.Data = []byte(newpath)
|
||||||
|
} else if !ent.Mode.IsDir() {
|
||||||
|
return InvalidFileModeError(ent.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
nr, err = ent.Encode(w)
|
||||||
|
n += nr
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashFS returns a checksum produced by hashing the result of [Flatten].
|
||||||
|
func HashFS(fsys fs.FS, root string) (Checksum, error) {
|
||||||
|
h := sha512.New384()
|
||||||
|
if _, err := Flatten(fsys, root, h); err != nil {
|
||||||
|
return Checksum{}, err
|
||||||
|
}
|
||||||
|
return (Checksum)(h.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashDir returns a checksum produced by hashing the result of [Flatten].
|
||||||
|
func HashDir(pathname *check.Absolute) (Checksum, error) {
|
||||||
|
return HashFS(os.DirFS(pathname.String()), ".")
|
||||||
|
}
|
||||||
537
internal/pkg/dir_test.go
Normal file
537
internal/pkg/dir_test.go
Normal file
@@ -0,0 +1,537 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/fs"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"testing/fstest"
|
||||||
|
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFlatten(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
fsys fs.FS
|
||||||
|
entries []pkg.FlatEntry
|
||||||
|
sum pkg.Checksum
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{"bad type", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
"invalid": {Mode: fs.ModeCharDevice | 0400},
|
||||||
|
}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
fs.ModeCharDevice | 0400,
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"empty", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil},
|
||||||
|
|
||||||
|
{"sample cache file", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: 0400, Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
"identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
"identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
"identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: 0400, Path: "checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
|
||||||
|
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2"), nil},
|
||||||
|
|
||||||
|
{"sample http get cure", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/NqVORkT6L9HX6Za7kT2zcibY10qFqBaxEjPiYFrBQX-ZFr3yxCzJxbKOP0zVjeWb", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN"), nil},
|
||||||
|
|
||||||
|
{"sample directory step simple", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
|
||||||
|
"lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: 0400, Path: "check", Data: []byte{0, 0}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "lib/pkgconfig"},
|
||||||
|
}, pkg.MustDecode("qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"), nil},
|
||||||
|
|
||||||
|
{"sample directory step garbage", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"lib": {Mode: fs.ModeDir | 0500},
|
||||||
|
"lib/check": {Mode: 0400, Data: []byte{}},
|
||||||
|
|
||||||
|
"lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "lib"},
|
||||||
|
{Mode: 0400, Path: "lib/check", Data: []byte{}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"},
|
||||||
|
}, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil},
|
||||||
|
|
||||||
|
{"sample directory", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"},
|
||||||
|
{Mode: 0400, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check", Data: []byte{0, 0}},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d"), nil},
|
||||||
|
|
||||||
|
{"sample tar step unpack", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0500},
|
||||||
|
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0500},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||||
|
{Mode: 0400, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "work"},
|
||||||
|
}, pkg.MustDecode("cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"), nil},
|
||||||
|
|
||||||
|
{"sample tar", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
|
||||||
|
{Mode: 0400, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94"), nil},
|
||||||
|
|
||||||
|
{"sample tar expand step unpack", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
}, pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"), nil},
|
||||||
|
|
||||||
|
{"sample tar expand", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
"identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/-P_1iw6yVq_letMHncqcExSE0bYcDhYI5OdY6b1wKASf-Corufvj__XTBUq2Qd2a", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0_rRxIqbX9LK9L_KDbuafotFz6HFkonNgO9gXhK1asM_Y1Pxn0amg756vRTo6m74", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX"), nil},
|
||||||
|
|
||||||
|
{"testtool", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: 0400, Path: "check", Data: []byte{0}},
|
||||||
|
}, pkg.MustDecode("GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"), nil},
|
||||||
|
|
||||||
|
{"sample exec container", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW"), nil},
|
||||||
|
|
||||||
|
{"testtool net", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"check": {Mode: 0400, Data: []byte("net")},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "."},
|
||||||
|
|
||||||
|
{Mode: 0400, Path: "check", Data: []byte("net")},
|
||||||
|
}, pkg.MustDecode("a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"), nil},
|
||||||
|
|
||||||
|
{"sample exec net container", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
"identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||||
|
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"},
|
||||||
|
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o"), nil},
|
||||||
|
|
||||||
|
{"sample exec container overlay root", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W"), nil},
|
||||||
|
|
||||||
|
{"sample exec container overlay work", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl"), nil},
|
||||||
|
|
||||||
|
{"sample exec container multiple layers", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
|
||||||
|
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
|
||||||
|
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
|
||||||
|
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
"identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
"identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
|
"identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
|
"temp": {Mode: fs.ModeDir | 0700},
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
|
||||||
|
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
|
||||||
|
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
|
||||||
|
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
|
||||||
|
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/nfeISfLeFDr1k-g3hpE1oZ440kTqDdfF8TDpoLdbTPqaMMIl95oiqcvqjRkMjubA", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rXLKjjYfGSyoWmuvEJooHkvGJIZaC0IAWnKGvtPZkM15gBxAgW7mIXcxRVNOXAr4", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "temp"},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx"), nil},
|
||||||
|
|
||||||
|
{"sample file short", fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, []pkg.FlatEntry{
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "."},
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "checksum"},
|
||||||
|
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "identifier"},
|
||||||
|
{Mode: fs.ModeSymlink | 0777, Path: "identifier/lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
|
||||||
|
|
||||||
|
{Mode: fs.ModeDir | 0700, Path: "work"},
|
||||||
|
}, pkg.MustDecode("hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7"), nil},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("roundtrip", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := pkg.Flatten(
|
||||||
|
tc.fsys,
|
||||||
|
".",
|
||||||
|
&buf,
|
||||||
|
); !reflect.DeepEqual(err, tc.err) {
|
||||||
|
t.Fatalf("Flatten: error = %v, want %v", err, tc.err)
|
||||||
|
} else if tc.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s := pkg.NewDirScanner(bytes.NewReader(buf.Bytes()), true)
|
||||||
|
var got []pkg.FlatEntry
|
||||||
|
for s.Scan() {
|
||||||
|
got = append(got, *s.Entry())
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
t.Fatalf("Err: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(got, tc.entries) {
|
||||||
|
t.Fatalf("Scan: %#v, want %#v", got, tc.entries)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if tc.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("hash", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got, err := pkg.HashFS(tc.fsys, "."); err != nil {
|
||||||
|
t.Fatalf("HashFS: error = %v", err)
|
||||||
|
} else if got != tc.sum {
|
||||||
|
t.Fatalf("HashFS: %v", &pkg.ChecksumMismatchError{
|
||||||
|
Got: got,
|
||||||
|
Want: tc.sum,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
378
internal/pkg/exec.go
Normal file
378
internal/pkg/exec.go
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/std"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AbsWork is the container pathname [CureContext.GetWorkDir] is mounted on.
|
||||||
|
var AbsWork = fhs.AbsRoot.Append("work/")
|
||||||
|
|
||||||
|
// ExecPath is a slice of [Artifact] and the [check.Absolute] pathname to make
|
||||||
|
// it available at under in the container.
|
||||||
|
type ExecPath struct {
|
||||||
|
// Pathname in the container mount namespace.
|
||||||
|
P *check.Absolute
|
||||||
|
// Artifacts to mount on the pathname, must contain at least one [Artifact].
|
||||||
|
// If there are multiple entries or W is true, P is set up as an overlay
|
||||||
|
// mount, and entries of A must not implement [File].
|
||||||
|
A []Artifact
|
||||||
|
// Whether to make the mount point writable via the temp directory.
|
||||||
|
W bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns a populated [ExecPath].
|
||||||
|
func Path(pathname *check.Absolute, writable bool, a ...Artifact) ExecPath {
|
||||||
|
return ExecPath{pathname, a, writable}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustPath is like [Path], but takes a string pathname via [check.MustAbs].
|
||||||
|
func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
|
||||||
|
return ExecPath{check.MustAbs(pathname), a, writable}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ExecTimeoutDefault replaces out of range [NewExec] timeout values.
|
||||||
|
ExecTimeoutDefault = 15 * time.Minute
|
||||||
|
// ExecTimeoutMax is the arbitrary upper bound of [NewExec] timeout.
|
||||||
|
ExecTimeoutMax = 48 * time.Hour
|
||||||
|
)
|
||||||
|
|
||||||
|
// An execArtifact is an [Artifact] that produces output by running a program
|
||||||
|
// part of another [Artifact] in a [container] to produce its output.
|
||||||
|
//
|
||||||
|
// Methods of execArtifact does not modify any struct field or underlying arrays
|
||||||
|
// referred to by slices.
|
||||||
|
type execArtifact struct {
|
||||||
|
// Caller-supplied user-facing reporting name, guaranteed to be nonzero
|
||||||
|
// during initialisation.
|
||||||
|
name string
|
||||||
|
// Caller-supplied inner mount points.
|
||||||
|
paths []ExecPath
|
||||||
|
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
dir *check.Absolute
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
env []string
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
path *check.Absolute
|
||||||
|
// Passed through to [container.Params].
|
||||||
|
args []string
|
||||||
|
|
||||||
|
// Duration the initial process is allowed to run. The zero value is
|
||||||
|
// equivalent to execTimeoutDefault. This value is never encoded in Params
|
||||||
|
// because it cannot affect outcome.
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = new(execArtifact)
|
||||||
|
|
||||||
|
// execNetArtifact is like execArtifact but implements [KnownChecksum] and has
|
||||||
|
// its resulting container keep the host net namespace.
|
||||||
|
type execNetArtifact struct {
|
||||||
|
checksum Checksum
|
||||||
|
|
||||||
|
execArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ KnownChecksum = new(execNetArtifact)
|
||||||
|
|
||||||
|
// Checksum returns the caller-supplied checksum.
|
||||||
|
func (a *execNetArtifact) Checksum() Checksum { return a.checksum }
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (a *execNetArtifact) Kind() Kind { return KindExecNet }
|
||||||
|
|
||||||
|
// Params is [Checksum] concatenated with [KindExec] params.
|
||||||
|
func (a *execNetArtifact) Params() []byte {
|
||||||
|
return slices.Concat(a.checksum[:], a.execArtifact.Params())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure cures the [Artifact] in the container described by the caller. The
|
||||||
|
// container retains host networking.
|
||||||
|
func (a *execNetArtifact) Cure(f *FContext) error {
|
||||||
|
return a.cure(f, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExec returns a new [Artifact] that executes the program path in a
|
||||||
|
// container with specified paths bind mounted read-only in order. A private
|
||||||
|
// instance of /proc and /dev is made available to the container.
|
||||||
|
//
|
||||||
|
// The working and temporary directories are both created and mounted writable
|
||||||
|
// on [AbsWork] and [fhs.AbsTmp] respectively. If one or more paths target
|
||||||
|
// [AbsWork], the final entry is set up as a writable overlay mount on /work for
|
||||||
|
// which the upperdir is the host side work directory. In this configuration,
|
||||||
|
// the W field is ignored, and the program must avoid causing whiteout files to
|
||||||
|
// be created. Cure fails if upperdir ends up with entries other than directory,
|
||||||
|
// regular or symlink.
|
||||||
|
//
|
||||||
|
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
|
||||||
|
// and its container runs in the host net namespace.
|
||||||
|
//
|
||||||
|
// The container is allowed to run for the specified duration before the initial
|
||||||
|
// process and all processes originating from it is terminated. A zero or
|
||||||
|
// negative timeout value is equivalent tp [ExecTimeoutDefault], a timeout value
|
||||||
|
// greater than [ExecTimeoutMax] is equivalent to [ExecTimeoutMax].
|
||||||
|
//
|
||||||
|
// The user-facing name is not accessible from the container and does not
|
||||||
|
// affect curing outcome. Because of this, it is omitted from parameter data
|
||||||
|
// for computing identifier.
|
||||||
|
func NewExec(
|
||||||
|
name string,
|
||||||
|
checksum *Checksum,
|
||||||
|
timeout time.Duration,
|
||||||
|
|
||||||
|
dir *check.Absolute,
|
||||||
|
env []string,
|
||||||
|
pathname *check.Absolute,
|
||||||
|
args []string,
|
||||||
|
|
||||||
|
paths ...ExecPath,
|
||||||
|
) Artifact {
|
||||||
|
if name == "" {
|
||||||
|
name = "exec-" + path.Base(pathname.String())
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = ExecTimeoutDefault
|
||||||
|
}
|
||||||
|
if timeout > ExecTimeoutMax {
|
||||||
|
timeout = ExecTimeoutMax
|
||||||
|
}
|
||||||
|
a := execArtifact{name, paths, dir, env, pathname, args, timeout}
|
||||||
|
if checksum == nil {
|
||||||
|
return &a
|
||||||
|
}
|
||||||
|
return &execNetArtifact{*checksum, a}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (a *execArtifact) Kind() Kind { return KindExec }
|
||||||
|
|
||||||
|
// Params returns paths, executable pathname and args concatenated together.
|
||||||
|
func (a *execArtifact) Params() []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for _, p := range a.paths {
|
||||||
|
if p.W {
|
||||||
|
buf.WriteByte(1)
|
||||||
|
} else {
|
||||||
|
buf.WriteByte(0)
|
||||||
|
}
|
||||||
|
if p.P != nil {
|
||||||
|
buf.WriteString(p.P.String())
|
||||||
|
} else {
|
||||||
|
buf.WriteString("invalid P\x00")
|
||||||
|
}
|
||||||
|
buf.WriteByte(0)
|
||||||
|
for _, d := range p.A {
|
||||||
|
id := Ident(d)
|
||||||
|
buf.Write(id[:])
|
||||||
|
}
|
||||||
|
buf.WriteByte(0)
|
||||||
|
}
|
||||||
|
buf.WriteByte(0)
|
||||||
|
buf.WriteString(a.dir.String())
|
||||||
|
buf.WriteByte(0)
|
||||||
|
for _, e := range a.env {
|
||||||
|
buf.WriteString(e)
|
||||||
|
}
|
||||||
|
buf.WriteByte(0)
|
||||||
|
buf.WriteString(a.path.String())
|
||||||
|
buf.WriteByte(0)
|
||||||
|
for _, arg := range a.args {
|
||||||
|
buf.WriteString(arg)
|
||||||
|
}
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies returns a slice of all artifacts collected from caller-supplied
|
||||||
|
// [ExecPath].
|
||||||
|
func (a *execArtifact) Dependencies() []Artifact {
|
||||||
|
artifacts := make([][]Artifact, 0, len(a.paths))
|
||||||
|
for _, p := range a.paths {
|
||||||
|
artifacts = append(artifacts, p.A)
|
||||||
|
}
|
||||||
|
return slices.Concat(artifacts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the caller-supplied reporting name.
|
||||||
|
func (a *execArtifact) String() string { return a.name }
|
||||||
|
|
||||||
|
// Cure cures the [Artifact] in the container described by the caller.
|
||||||
|
func (a *execArtifact) Cure(f *FContext) (err error) {
|
||||||
|
return a.cure(f, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// execWaitDelay is passed through to [container.Params].
|
||||||
|
execWaitDelay = time.Nanosecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// scanVerbose prefixes program output for a verbose [message.Msg].
|
||||||
|
func scanVerbose(
|
||||||
|
msg message.Msg,
|
||||||
|
done chan<- struct{},
|
||||||
|
prefix string,
|
||||||
|
r io.Reader,
|
||||||
|
) {
|
||||||
|
defer close(done)
|
||||||
|
s := bufio.NewScanner(r)
|
||||||
|
for s.Scan() {
|
||||||
|
msg.Verbose(prefix, s.Text())
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||||
|
msg.Verbose("*"+prefix, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cure is like Cure but allows optional host net namespace. This is used for
|
||||||
|
// the [KnownChecksum] variant where networking is allowed.
|
||||||
|
func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
|
||||||
|
overlayWorkIndex := -1
|
||||||
|
for i, p := range a.paths {
|
||||||
|
if p.P == nil || len(p.A) == 0 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
if p.P.Is(AbsWork) {
|
||||||
|
overlayWorkIndex = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var artifactCount int
|
||||||
|
for _, p := range a.paths {
|
||||||
|
artifactCount += len(p.A)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(f.Unwrap(), a.timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
z := container.New(ctx, f.GetMessage())
|
||||||
|
z.WaitDelay = execWaitDelay
|
||||||
|
z.SeccompPresets |= std.PresetStrict
|
||||||
|
z.ParentPerm = 0700
|
||||||
|
z.HostNet = hostNet
|
||||||
|
z.Hostname = "cure"
|
||||||
|
if z.HostNet {
|
||||||
|
z.Hostname = "cure-net"
|
||||||
|
}
|
||||||
|
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
|
||||||
|
if msg := f.GetMessage(); msg.IsVerbose() {
|
||||||
|
var stdout, stderr io.ReadCloser
|
||||||
|
if stdout, err = z.StdoutPipe(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stderr, err = z.StderrPipe(); err != nil {
|
||||||
|
_ = stdout.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil && !errors.As(err, new(*exec.ExitError)) {
|
||||||
|
_ = stdout.Close()
|
||||||
|
_ = stderr.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
|
||||||
|
go scanVerbose(msg, stdoutDone, "("+a.name+":1)", stdout)
|
||||||
|
go scanVerbose(msg, stderrDone, "("+a.name+":2)", stderr)
|
||||||
|
defer func() { <-stdoutDone; <-stderrDone }()
|
||||||
|
}
|
||||||
|
|
||||||
|
z.Dir, z.Env, z.Path, z.Args = a.dir, a.env, a.path, a.args
|
||||||
|
z.Grow(len(a.paths) + 4)
|
||||||
|
|
||||||
|
temp, work := f.GetTempDir(), f.GetWorkDir()
|
||||||
|
for i, b := range a.paths {
|
||||||
|
layers := make([]*check.Absolute, len(b.A))
|
||||||
|
for j, d := range b.A {
|
||||||
|
layers[j] = f.Pathname(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == overlayWorkIndex {
|
||||||
|
if err = os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tempWork := temp.Append(".work")
|
||||||
|
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
z.Overlay(
|
||||||
|
AbsWork,
|
||||||
|
work,
|
||||||
|
tempWork,
|
||||||
|
layers...,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.paths[i].W {
|
||||||
|
tempUpper, tempWork := temp.Append(
|
||||||
|
".upper", strconv.Itoa(i),
|
||||||
|
), temp.Append(
|
||||||
|
".work", strconv.Itoa(i),
|
||||||
|
)
|
||||||
|
if err = os.MkdirAll(tempUpper.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = os.MkdirAll(tempWork.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
z.Overlay(b.P, tempUpper, tempWork, layers...)
|
||||||
|
} else if len(layers) == 1 {
|
||||||
|
z.Bind(layers[0], b.P, 0)
|
||||||
|
} else {
|
||||||
|
z.OverlayReadonly(b.P, layers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if overlayWorkIndex < 0 {
|
||||||
|
z.Bind(
|
||||||
|
work,
|
||||||
|
AbsWork,
|
||||||
|
std.BindWritable|std.BindEnsure,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
z.Bind(
|
||||||
|
f.GetTempDir(),
|
||||||
|
fhs.AbsTmp,
|
||||||
|
std.BindWritable|std.BindEnsure,
|
||||||
|
)
|
||||||
|
z.Proc(fhs.AbsProc).Dev(fhs.AbsDev, true)
|
||||||
|
|
||||||
|
if err = z.Start(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = z.Serve(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = z.Wait(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not allow empty directories to succeed
|
||||||
|
for {
|
||||||
|
err = syscall.Rmdir(work.String())
|
||||||
|
if err != syscall.EINTR {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil && errors.Is(err, syscall.ENOTEMPTY) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
297
internal/pkg/exec_test.go
Normal file
297
internal/pkg/exec_test.go
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
//go:generate env CGO_ENABLED=0 go build -tags testtool -o testdata/testtool ./testdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "embed"
|
||||||
|
"encoding/gob"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/stub"
|
||||||
|
"hakurei.app/hst"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// testtoolBin is the container test tool binary made available to the
|
||||||
|
// execArtifact for testing its curing environment.
|
||||||
|
//
|
||||||
|
//go:embed testdata/testtool
|
||||||
|
var testtoolBin []byte
|
||||||
|
|
||||||
|
func TestExec(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
wantChecksumOffline := pkg.MustDecode(
|
||||||
|
"GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9",
|
||||||
|
)
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"offline", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"", nil, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xfe, 0},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
)),
|
||||||
|
pkg.MustPath("/.hakurei", false, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
|
||||||
|
{"error passthrough", pkg.NewExec(
|
||||||
|
"", nil, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/proc/nonexistent", false, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("doomed artifact"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return stub.UniqueError(0xcafe)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
), nil, pkg.Checksum{}, errors.Join(stub.UniqueError(0xcafe))},
|
||||||
|
|
||||||
|
{"invalid paths", pkg.NewExec(
|
||||||
|
"", nil, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.ExecPath{},
|
||||||
|
), nil, pkg.Checksum{}, os.ErrInvalid},
|
||||||
|
})
|
||||||
|
|
||||||
|
// check init failure passthrough
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if _, _, err := c.Cure(pkg.NewExec(
|
||||||
|
"", nil, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
nil,
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
)); !errors.As(err, &exitError) ||
|
||||||
|
exitError.ExitCode() != hst.ExitFailure {
|
||||||
|
t.Fatalf("Cure: error = %v, want init exit status 1", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("UiV6kMz7KrTsc_yphiyQzFLqjRanHxUOwrBMtkKuWo4mOO6WgPFAcoUEeSp7eVIW")},
|
||||||
|
|
||||||
|
{"net", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
wantChecksum := pkg.MustDecode(
|
||||||
|
"a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
|
||||||
|
)
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"", &wantChecksum, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "net"},
|
||||||
|
|
||||||
|
pkg.MustPath("/file", false, newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xfe, 0},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
)),
|
||||||
|
pkg.MustPath("/.hakurei", false, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksum, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("ek4K-0d4iRSArkY2TCs3WK34DbiYeOmhE_4vsJTSu_6roY4ZF3YG6eKRooal-i1o")},
|
||||||
|
|
||||||
|
{"overlay root", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"", nil, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("VIqqpf0ip9jcyw63i6E8lCMGUcLivQBe4Bevt3WusNac-1MSy5bzB647qGUBzl-W")},
|
||||||
|
|
||||||
|
{"overlay work", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"", nil, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/work/bin/testtool"),
|
||||||
|
[]string{"testtool"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}), pkg.MustPath("/work/", false, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("q8x2zQg4YZbKpPqKlEBj_uxXD9vOBaZ852qOuIsl9QdO73I_UMNpuUoPLtunxUYl")},
|
||||||
|
|
||||||
|
{"multiple layers", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
testtool, testtoolDestroy := newTesttool()
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"container", pkg.NewExec(
|
||||||
|
"", nil, 0,
|
||||||
|
pkg.AbsWork,
|
||||||
|
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
|
||||||
|
check.MustAbs("/opt/bin/testtool"),
|
||||||
|
[]string{"testtool", "layers"},
|
||||||
|
|
||||||
|
pkg.MustPath("/", true, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}, stubArtifactF{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("test sample with dependencies"),
|
||||||
|
|
||||||
|
deps: slices.Repeat([]pkg.Artifact{newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xfe, 0},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
), stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
params: []byte("empty directory"),
|
||||||
|
|
||||||
|
// this is queued and might run instead of the other
|
||||||
|
// one so do not leave it as nil
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.MkdirAll(t.GetWorkDir().String(), 0700)
|
||||||
|
},
|
||||||
|
}}, 1<<5 /* concurrent cache hits */), cure: func(f *pkg.FContext) error {
|
||||||
|
work := f.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(work.Append("check").String(), []byte("layers"), 0400)
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
pkg.MustPath("/opt", false, testtool),
|
||||||
|
), ignorePathname, wantChecksumOffline, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
testtoolDestroy(t, base, c)
|
||||||
|
}, pkg.MustDecode("SITnQ6PTV12PAQQjIuLUxkvsXQiC9Gq_HJQlcb4BPL5YnRHnx8lsW7PRM9YMLBsx")},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTesttool returns an [Artifact] that cures into testtoolBin. The returned
|
||||||
|
// function must be called at the end of the test but not deferred.
|
||||||
|
func newTesttool() (
|
||||||
|
testtool pkg.Artifact,
|
||||||
|
testtoolDestroy func(t *testing.T, base *check.Absolute, c *pkg.Cache),
|
||||||
|
) {
|
||||||
|
// testtoolBin is built during go:generate and is not deterministic
|
||||||
|
testtool = overrideIdent{pkg.ID{0xfe, 0xff}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(
|
||||||
|
work.Append("bin").String(),
|
||||||
|
0700,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ift, err := net.Interfaces(); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.Create(t.GetWorkDir().Append(
|
||||||
|
"ift",
|
||||||
|
).String()); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
err = gob.NewEncoder(f).Encode(ift)
|
||||||
|
closeErr := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
return closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.WriteFile(t.GetWorkDir().Append(
|
||||||
|
"bin",
|
||||||
|
"testtool",
|
||||||
|
).String(), testtoolBin, 0500)
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
testtoolDestroy = newDestroyArtifactFunc(testtool)
|
||||||
|
return
|
||||||
|
}
|
||||||
54
internal/pkg/file.go
Normal file
54
internal/pkg/file.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha512"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A fileArtifact is an [Artifact] that cures into data known ahead of time.
|
||||||
|
type fileArtifact []byte
|
||||||
|
|
||||||
|
var _ KnownChecksum = fileArtifact{}
|
||||||
|
|
||||||
|
// fileArtifactNamed embeds fileArtifact alongside a caller-supplied name.
|
||||||
|
type fileArtifactNamed struct {
|
||||||
|
fileArtifact
|
||||||
|
// Caller-supplied user-facing reporting name.
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = fileArtifactNamed{}
|
||||||
|
|
||||||
|
// String returns the caller-supplied reporting name.
|
||||||
|
func (a fileArtifactNamed) String() string { return a.name }
|
||||||
|
|
||||||
|
// NewFile returns a [File] that cures into a caller-supplied byte slice.
|
||||||
|
//
|
||||||
|
// Caller must not modify data after NewFile returns.
|
||||||
|
func NewFile(name string, data []byte) File {
|
||||||
|
f := fileArtifact(data)
|
||||||
|
if name != "" {
|
||||||
|
return fileArtifactNamed{f, name}
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (a fileArtifact) Kind() Kind { return KindFile }
|
||||||
|
|
||||||
|
// Params returns the result of Data.
|
||||||
|
func (a fileArtifact) Params() []byte { return a }
|
||||||
|
|
||||||
|
// Dependencies returns a nil slice.
|
||||||
|
func (a fileArtifact) Dependencies() []Artifact { return nil }
|
||||||
|
|
||||||
|
// Checksum computes and returns the checksum of caller-supplied data.
|
||||||
|
func (a fileArtifact) Checksum() Checksum {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write(a)
|
||||||
|
return Checksum(h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure returns the caller-supplied data.
|
||||||
|
func (a fileArtifact) Cure(context.Context) ([]byte, error) { return a, nil }
|
||||||
29
internal/pkg/file_test.go
Normal file
29
internal/pkg/file_test.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFile(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"short", pkg.NewFile("null", []byte{0}), base.Append(
|
||||||
|
"identifier",
|
||||||
|
"lIx_W4M7tVOcQ8jh08EJOfXf4brRmkEEjvUa7c17vVUzlmtUxlhhrgqmc9aZhjbn",
|
||||||
|
), pkg.MustDecode(
|
||||||
|
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||||
|
), nil},
|
||||||
|
})
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"hnrfmJtivNKcgtETsKnU9gP_OwPgpNY3DSUJnmxnmeOODSO-YBvEBiTgieY4AAd7",
|
||||||
|
)},
|
||||||
|
})
|
||||||
|
}
|
||||||
124
internal/pkg/net.go
Normal file
124
internal/pkg/net.go
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha512"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An httpArtifact is an [Artifact] backed by a [http] url string. The method is
|
||||||
|
// hardcoded as [http.MethodGet]. Request body is not allowed because it cannot
|
||||||
|
// be deterministically represented by Params.
|
||||||
|
type httpArtifact struct {
|
||||||
|
// Caller-supplied url string.
|
||||||
|
url string
|
||||||
|
|
||||||
|
// Caller-supplied checksum of the response body. This is validated during
|
||||||
|
// curing and the first call to Data.
|
||||||
|
checksum Checksum
|
||||||
|
|
||||||
|
// doFunc is the Do method of [http.Client] supplied by the caller.
|
||||||
|
doFunc func(req *http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
// Response body read to EOF.
|
||||||
|
data []byte
|
||||||
|
|
||||||
|
// Synchronises access to data.
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ KnownChecksum = new(httpArtifact)
|
||||||
|
var _ fmt.Stringer = new(httpArtifact)
|
||||||
|
|
||||||
|
// NewHTTPGet returns a new [File] backed by the supplied client. A GET request
|
||||||
|
// is set up for url. If c is nil, [http.DefaultClient] is used instead.
|
||||||
|
func NewHTTPGet(
|
||||||
|
c *http.Client,
|
||||||
|
url string,
|
||||||
|
checksum Checksum,
|
||||||
|
) File {
|
||||||
|
if c == nil {
|
||||||
|
c = http.DefaultClient
|
||||||
|
}
|
||||||
|
return &httpArtifact{url: url, checksum: checksum, doFunc: c.Do}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (a *httpArtifact) Kind() Kind { return KindHTTPGet }
|
||||||
|
|
||||||
|
// Params returns the backing url string. Context is not represented as it does
|
||||||
|
// not affect [Cache.Cure] outcome.
|
||||||
|
func (a *httpArtifact) Params() []byte { return []byte(a.url) }
|
||||||
|
|
||||||
|
// Dependencies returns a nil slice.
|
||||||
|
func (a *httpArtifact) Dependencies() []Artifact { return nil }
|
||||||
|
|
||||||
|
// Checksum returns the caller-supplied checksum.
|
||||||
|
func (a *httpArtifact) Checksum() Checksum { return a.checksum }
|
||||||
|
|
||||||
|
// String returns [path.Base] over the backing url.
|
||||||
|
func (a *httpArtifact) String() string { return path.Base(a.url) }
|
||||||
|
|
||||||
|
// ResponseStatusError is returned for a response returned by an [http.Client]
|
||||||
|
// with a status code other than [http.StatusOK].
|
||||||
|
type ResponseStatusError int
|
||||||
|
|
||||||
|
func (e ResponseStatusError) Error() string {
|
||||||
|
return "the requested URL returned non-OK status: " + http.StatusText(int(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// do sends the caller-supplied request on the caller-supplied [http.Client]
|
||||||
|
// and reads its response body to EOF and returns the resulting bytes.
|
||||||
|
func (a *httpArtifact) do(ctx context.Context) (data []byte, err error) {
|
||||||
|
var req *http.Request
|
||||||
|
req, err = http.NewRequestWithContext(ctx, http.MethodGet, a.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
if resp, err = a.doFunc(req); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
return nil, ResponseStatusError(resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, err = io.ReadAll(resp.Body); err != nil {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = resp.Body.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure completes the http request and returns the resulting response body read
|
||||||
|
// to EOF. Data does not interact with the filesystem.
|
||||||
|
func (a *httpArtifact) Cure(ctx context.Context) (data []byte, err error) {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
if a.data != nil {
|
||||||
|
// validated by cache or a previous call to Data
|
||||||
|
return a.data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, err = a.do(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write(data)
|
||||||
|
if got := (Checksum)(h.Sum(nil)); got != a.checksum {
|
||||||
|
return nil, &ChecksumMismatchError{got, a.checksum}
|
||||||
|
}
|
||||||
|
a.data = data
|
||||||
|
return
|
||||||
|
}
|
||||||
133
internal/pkg/net_test.go
Normal file
133
internal/pkg/net_test.go
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha512"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"testing/fstest"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHTTPGet(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
const testdata = "\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69"
|
||||||
|
|
||||||
|
testdataChecksum := func() pkg.Checksum {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write([]byte(testdata))
|
||||||
|
return (pkg.Checksum)(h.Sum(nil))
|
||||||
|
}()
|
||||||
|
|
||||||
|
var transport http.Transport
|
||||||
|
client := http.Client{Transport: &transport}
|
||||||
|
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||||
|
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||||
|
}))
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"direct", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
f := pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum,
|
||||||
|
)
|
||||||
|
wantIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||||
|
if got, err := f.Cure(t.Context()); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if string(got) != testdata {
|
||||||
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
|
}
|
||||||
|
|
||||||
|
// check direct validation
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
pkg.Checksum{},
|
||||||
|
)
|
||||||
|
wantErrMismatch := &pkg.ChecksumMismatchError{
|
||||||
|
Got: testdataChecksum,
|
||||||
|
}
|
||||||
|
if _, err := f.Cure(t.Context()); !reflect.DeepEqual(err, wantErrMismatch) {
|
||||||
|
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrMismatch)
|
||||||
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
|
}
|
||||||
|
|
||||||
|
// check direct response error
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///nonexistent",
|
||||||
|
pkg.Checksum{},
|
||||||
|
)
|
||||||
|
wantIdentNonexistent := pkg.KindHTTPGet.Ident([]byte("file:///nonexistent"))
|
||||||
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
|
if _, err := f.Cure(t.Context()); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
|
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdentNonexistent {
|
||||||
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdentNonexistent))
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
|
||||||
|
{"cure", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
f := pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum,
|
||||||
|
)
|
||||||
|
wantIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||||
|
wantPathname := base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(wantIdent),
|
||||||
|
)
|
||||||
|
if pathname, checksum, err := c.Cure(f); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if !pathname.Is(wantPathname) {
|
||||||
|
t.Fatalf("Cure: %q, want %q", pathname, wantPathname)
|
||||||
|
} else if checksum != testdataChecksum {
|
||||||
|
t.Fatalf("Cure: %x, want %x", checksum, testdataChecksum)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, err := f.Cure(t.Context()); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if string(got) != testdata {
|
||||||
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
|
}
|
||||||
|
|
||||||
|
// check load from cache
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum,
|
||||||
|
)
|
||||||
|
if got, err := f.Cure(t.Context()); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if string(got) != testdata {
|
||||||
|
t.Fatalf("Cure: %x, want %x", got, testdata)
|
||||||
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdent {
|
||||||
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdent))
|
||||||
|
}
|
||||||
|
|
||||||
|
// check error passthrough
|
||||||
|
f = pkg.NewHTTPGet(
|
||||||
|
&client,
|
||||||
|
"file:///nonexistent",
|
||||||
|
pkg.Checksum{},
|
||||||
|
)
|
||||||
|
wantIdentNonexistent := pkg.KindHTTPGet.Ident([]byte("file:///nonexistent"))
|
||||||
|
wantErrNotFound := pkg.ResponseStatusError(http.StatusNotFound)
|
||||||
|
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
|
||||||
|
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
|
||||||
|
} else if gotIdent := pkg.Ident(f); gotIdent != wantIdentNonexistent {
|
||||||
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(gotIdent), pkg.Encode(wantIdentNonexistent))
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode("bqtn69RkV5E7V7GhhgCFjcvbxmaqrO8DywamM4Tyjf10F6EJBHjXiIa_tFRtF4iN")},
|
||||||
|
})
|
||||||
|
}
|
||||||
1315
internal/pkg/pkg.go
Normal file
1315
internal/pkg/pkg.go
Normal file
File diff suppressed because it is too large
Load Diff
977
internal/pkg/pkg_test.go
Normal file
977
internal/pkg/pkg_test.go
Normal file
@@ -0,0 +1,977 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"hakurei.app/container"
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/stub"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
"hakurei.app/message"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) { container.TryArgv0(nil); os.Exit(m.Run()) }
|
||||||
|
|
||||||
|
// overrideIdent overrides the ID method of [Artifact].
|
||||||
|
type overrideIdent struct {
|
||||||
|
id pkg.ID
|
||||||
|
pkg.TrivialArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a overrideIdent) ID() pkg.ID { return a.id }
|
||||||
|
|
||||||
|
// overrideIdentFile overrides the ID method of [File].
|
||||||
|
type overrideIdentFile struct {
|
||||||
|
id pkg.ID
|
||||||
|
pkg.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a overrideIdentFile) ID() pkg.ID { return a.id }
|
||||||
|
|
||||||
|
// A knownIdentArtifact implements [pkg.KnownIdent] and [Artifact]
|
||||||
|
type knownIdentArtifact interface {
|
||||||
|
pkg.KnownIdent
|
||||||
|
pkg.TrivialArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
// A knownIdentFile implements [pkg.KnownIdent] and [File]
|
||||||
|
type knownIdentFile interface {
|
||||||
|
pkg.KnownIdent
|
||||||
|
pkg.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// overrideChecksum overrides the Checksum method of [Artifact].
|
||||||
|
type overrideChecksum struct {
|
||||||
|
checksum pkg.Checksum
|
||||||
|
knownIdentArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a overrideChecksum) Checksum() pkg.Checksum { return a.checksum }
|
||||||
|
|
||||||
|
// overrideChecksumFile overrides the Checksum method of [File].
|
||||||
|
type overrideChecksumFile struct {
|
||||||
|
checksum pkg.Checksum
|
||||||
|
knownIdentFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a overrideChecksumFile) Checksum() pkg.Checksum { return a.checksum }
|
||||||
|
|
||||||
|
// A stubArtifact implements [TrivialArtifact] with hardcoded behaviour.
|
||||||
|
type stubArtifact struct {
|
||||||
|
kind pkg.Kind
|
||||||
|
params []byte
|
||||||
|
deps []pkg.Artifact
|
||||||
|
|
||||||
|
cure func(t *pkg.TContext) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stubArtifact) Kind() pkg.Kind { return a.kind }
|
||||||
|
func (a stubArtifact) Params() []byte { return a.params }
|
||||||
|
func (a stubArtifact) Dependencies() []pkg.Artifact { return a.deps }
|
||||||
|
func (a stubArtifact) Cure(t *pkg.TContext) error { return a.cure(t) }
|
||||||
|
|
||||||
|
// A stubArtifactF implements [FloodArtifact] with hardcoded behaviour.
|
||||||
|
type stubArtifactF struct {
|
||||||
|
kind pkg.Kind
|
||||||
|
params []byte
|
||||||
|
deps []pkg.Artifact
|
||||||
|
|
||||||
|
cure func(f *pkg.FContext) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stubArtifactF) Kind() pkg.Kind { return a.kind }
|
||||||
|
func (a stubArtifactF) Params() []byte { return a.params }
|
||||||
|
func (a stubArtifactF) Dependencies() []pkg.Artifact { return a.deps }
|
||||||
|
func (a stubArtifactF) Cure(f *pkg.FContext) error { return a.cure(f) }
|
||||||
|
|
||||||
|
// A stubFile implements [File] with hardcoded behaviour.
|
||||||
|
type stubFile struct {
|
||||||
|
data []byte
|
||||||
|
err error
|
||||||
|
|
||||||
|
stubArtifact
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stubFile) Cure(context.Context) ([]byte, error) { return a.data, a.err }
|
||||||
|
|
||||||
|
// newStubFile returns an implementation of [pkg.File] with hardcoded behaviour.
|
||||||
|
func newStubFile(
|
||||||
|
kind pkg.Kind,
|
||||||
|
id pkg.ID,
|
||||||
|
sum *pkg.Checksum,
|
||||||
|
data []byte,
|
||||||
|
err error,
|
||||||
|
) pkg.File {
|
||||||
|
f := overrideIdentFile{id, stubFile{data, err, stubArtifact{
|
||||||
|
kind,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
func(*pkg.TContext) error {
|
||||||
|
panic("unreachable")
|
||||||
|
},
|
||||||
|
}}}
|
||||||
|
if sum == nil {
|
||||||
|
return f
|
||||||
|
} else {
|
||||||
|
return overrideChecksumFile{*sum, f}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// destroyArtifact removes all traces of an [Artifact] from the on-disk cache.
|
||||||
|
// Do not use this in a test case without a very good reason to do so.
|
||||||
|
func destroyArtifact(
|
||||||
|
t *testing.T,
|
||||||
|
base *check.Absolute,
|
||||||
|
c *pkg.Cache,
|
||||||
|
a pkg.Artifact,
|
||||||
|
) {
|
||||||
|
if pathname, checksum, err := c.Cure(a); err != nil {
|
||||||
|
t.Fatalf("Cure: error = %v", err)
|
||||||
|
} else if err = os.Remove(pathname.String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else {
|
||||||
|
p := base.Append(
|
||||||
|
"checksum",
|
||||||
|
pkg.Encode(checksum),
|
||||||
|
)
|
||||||
|
if err = filepath.WalkDir(p.String(), func(
|
||||||
|
path string,
|
||||||
|
d fs.DirEntry,
|
||||||
|
err error,
|
||||||
|
) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if d.IsDir() {
|
||||||
|
return os.Chmod(path, 0700)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = os.RemoveAll(p.String()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDestroyArtifactFunc returns a function that calls destroyArtifact.
|
||||||
|
func newDestroyArtifactFunc(a pkg.Artifact) func(
|
||||||
|
t *testing.T,
|
||||||
|
base *check.Absolute,
|
||||||
|
c *pkg.Cache,
|
||||||
|
) {
|
||||||
|
return func(
|
||||||
|
t *testing.T,
|
||||||
|
base *check.Absolute,
|
||||||
|
c *pkg.Cache,
|
||||||
|
) {
|
||||||
|
destroyArtifact(t, base, c, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIdent(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
a pkg.Artifact
|
||||||
|
want pkg.ID
|
||||||
|
}{
|
||||||
|
{"tar", stubArtifact{
|
||||||
|
pkg.KindTar,
|
||||||
|
[]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0},
|
||||||
|
[]pkg.Artifact{
|
||||||
|
overrideIdent{pkg.ID{}, stubArtifact{}},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY",
|
||||||
|
)},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got := pkg.Ident(tc.a); got != tc.want {
|
||||||
|
t.Errorf("Ident: %s, want %s",
|
||||||
|
pkg.Encode(got),
|
||||||
|
pkg.Encode(tc.want),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheTestCase is a test case passed to checkWithCache where a new instance
|
||||||
|
// of [pkg.Cache] is prepared for the test case, and is validated and removed
|
||||||
|
// on test completion.
|
||||||
|
type cacheTestCase struct {
|
||||||
|
name string
|
||||||
|
early func(t *testing.T, base *check.Absolute)
|
||||||
|
f func(t *testing.T, base *check.Absolute, c *pkg.Cache)
|
||||||
|
want pkg.Checksum
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkWithCache runs a slice of cacheTestCase.
|
||||||
|
func checkWithCache(t *testing.T, testCases []cacheTestCase) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
base := check.MustAbs(t.TempDir())
|
||||||
|
if err := os.Chmod(base.String(), 0700); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if err := filepath.WalkDir(base.String(), func(path string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !d.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return os.Chmod(path, 0700)
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
msg := message.New(log.New(os.Stderr, "cache: ", 0))
|
||||||
|
msg.SwapVerbose(testing.Verbose())
|
||||||
|
|
||||||
|
var scrubFunc func() error // scrub after hashing
|
||||||
|
if c, err := pkg.New(t.Context(), msg, 0, base); err != nil {
|
||||||
|
t.Fatalf("New: error = %v", err)
|
||||||
|
} else {
|
||||||
|
t.Cleanup(c.Close)
|
||||||
|
if tc.early != nil {
|
||||||
|
tc.early(t, base)
|
||||||
|
}
|
||||||
|
tc.f(t, base, c)
|
||||||
|
scrubFunc = c.Scrub
|
||||||
|
}
|
||||||
|
|
||||||
|
var restoreTemp bool
|
||||||
|
if _, err := os.Lstat(base.Append("temp").String()); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
restoreTemp = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if checksum, err := pkg.HashDir(base); err != nil {
|
||||||
|
t.Fatalf("HashDir: error = %v", err)
|
||||||
|
} else if checksum != tc.want {
|
||||||
|
t.Fatalf("HashDir: %v", &pkg.ChecksumMismatchError{
|
||||||
|
Got: checksum,
|
||||||
|
Want: tc.want,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scrubFunc(); err != nil {
|
||||||
|
t.Fatal("cache contains inconsistencies\n\n" + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if restoreTemp {
|
||||||
|
if err := os.Mkdir(
|
||||||
|
base.Append("temp").String(),
|
||||||
|
0700,
|
||||||
|
); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate again to make sure scrub did not condemn anything
|
||||||
|
if checksum, err := pkg.HashDir(base); err != nil {
|
||||||
|
t.Fatalf("HashDir: error = %v", err)
|
||||||
|
} else if checksum != tc.want {
|
||||||
|
t.Fatalf("(scrubbed) HashDir: %v", &pkg.ChecksumMismatchError{
|
||||||
|
Got: checksum,
|
||||||
|
Want: tc.want,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A cureStep contains an [Artifact] to be cured, and the expected outcome.
|
||||||
|
type cureStep struct {
|
||||||
|
name string
|
||||||
|
|
||||||
|
a pkg.Artifact
|
||||||
|
|
||||||
|
pathname *check.Absolute
|
||||||
|
checksum pkg.Checksum
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignorePathname is passed to cureMany to skip the pathname check.
|
||||||
|
var ignorePathname = check.MustAbs("/\x00")
|
||||||
|
|
||||||
|
// cureMany cures many artifacts against a [Cache] and checks their outcomes.
|
||||||
|
func cureMany(t *testing.T, c *pkg.Cache, steps []cureStep) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
for _, step := range steps {
|
||||||
|
t.Log("cure step:", step.name)
|
||||||
|
if pathname, checksum, err := c.Cure(step.a); !reflect.DeepEqual(err, step.err) {
|
||||||
|
t.Fatalf("Cure: error = %v, want %v", err, step.err)
|
||||||
|
} else if step.pathname != ignorePathname && !pathname.Is(step.pathname) {
|
||||||
|
t.Fatalf("Cure: pathname = %q, want %q", pathname, step.pathname)
|
||||||
|
} else if checksum != step.checksum {
|
||||||
|
t.Fatalf("Cure: checksum = %s, want %s", pkg.Encode(checksum), pkg.Encode(step.checksum))
|
||||||
|
} else {
|
||||||
|
v := any(err)
|
||||||
|
if err == nil {
|
||||||
|
v = pathname
|
||||||
|
}
|
||||||
|
t.Log(pkg.Encode(checksum)+":", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
const testdata = "" +
|
||||||
|
"\x00\x00\x00\x00" +
|
||||||
|
"\xad\x0b\x00" +
|
||||||
|
"\x04" +
|
||||||
|
"\xfe\xfe\x00\x00" +
|
||||||
|
"\xfe\xca\x00\x00"
|
||||||
|
|
||||||
|
testdataChecksum := func() pkg.Checksum {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write([]byte(testdata))
|
||||||
|
return (pkg.Checksum)(h.Sum(nil))
|
||||||
|
}()
|
||||||
|
|
||||||
|
testCases := []cacheTestCase{
|
||||||
|
{"file", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
|
||||||
|
identifier := (pkg.ID)(bytes.Repeat([]byte{
|
||||||
|
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
||||||
|
}, 8))
|
||||||
|
wantPathname := base.Append(
|
||||||
|
"identifier",
|
||||||
|
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
|
||||||
|
)
|
||||||
|
identifier0 := (pkg.ID)(bytes.Repeat([]byte{
|
||||||
|
0x71, 0xa7, 0xde, 0x6d, 0xa6, 0xde,
|
||||||
|
}, 8))
|
||||||
|
wantPathname0 := base.Append(
|
||||||
|
"identifier",
|
||||||
|
"cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe",
|
||||||
|
)
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"initial file", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
identifier,
|
||||||
|
&testdataChecksum,
|
||||||
|
[]byte(testdata), nil,
|
||||||
|
), wantPathname, testdataChecksum, nil},
|
||||||
|
|
||||||
|
{"identical content", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
identifier0,
|
||||||
|
&testdataChecksum,
|
||||||
|
[]byte(testdata), nil,
|
||||||
|
), wantPathname0, testdataChecksum, nil},
|
||||||
|
|
||||||
|
{"existing entry", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
identifier,
|
||||||
|
&testdataChecksum,
|
||||||
|
[]byte(testdata), nil,
|
||||||
|
), wantPathname, testdataChecksum, nil},
|
||||||
|
|
||||||
|
{"checksum mismatch", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xff, 0},
|
||||||
|
new(pkg.Checksum),
|
||||||
|
[]byte(testdata), nil,
|
||||||
|
), nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
||||||
|
Got: testdataChecksum,
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"store without validation", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.MustDecode("vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX"),
|
||||||
|
nil,
|
||||||
|
[]byte{0}, nil,
|
||||||
|
), base.Append(
|
||||||
|
"identifier",
|
||||||
|
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||||
|
), pkg.Checksum{
|
||||||
|
0xbe, 0xc0, 0x21, 0xb4, 0xf3, 0x68,
|
||||||
|
0xe3, 0x06, 0x91, 0x34, 0xe0, 0x12,
|
||||||
|
0xc2, 0xb4, 0x30, 0x70, 0x83, 0xd3,
|
||||||
|
0xa9, 0xbd, 0xd2, 0x06, 0xe2, 0x4e,
|
||||||
|
0x5f, 0x0d, 0x86, 0xe1, 0x3d, 0x66,
|
||||||
|
0x36, 0x65, 0x59, 0x33, 0xec, 0x2b,
|
||||||
|
0x41, 0x34, 0x65, 0x96, 0x68, 0x17,
|
||||||
|
0xa9, 0xc2, 0x08, 0xa1, 0x17, 0x17,
|
||||||
|
}, nil},
|
||||||
|
|
||||||
|
{"incomplete implementation", struct{ pkg.Artifact }{stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("artifact overridden to be incomplete"),
|
||||||
|
}}, nil, pkg.Checksum{}, pkg.InvalidArtifactError(pkg.MustDecode(
|
||||||
|
"da4kLKa94g1wN2M0qcKflqgf2-Y2UL36iehhczqsIIW8G0LGvM7S8jjtnBc0ftB0",
|
||||||
|
))},
|
||||||
|
|
||||||
|
{"error passthrough", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xff, 1},
|
||||||
|
nil,
|
||||||
|
nil, stub.UniqueError(0xcafe),
|
||||||
|
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|
||||||
|
{"error caching", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xff, 1},
|
||||||
|
nil,
|
||||||
|
nil, nil,
|
||||||
|
), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|
||||||
|
{"cache hit bad type", overrideChecksum{testdataChecksum, overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
}}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
0400,
|
||||||
|
)},
|
||||||
|
})
|
||||||
|
|
||||||
|
if c0, err := pkg.New(
|
||||||
|
t.Context(),
|
||||||
|
message.New(nil),
|
||||||
|
0, base,
|
||||||
|
); err != nil {
|
||||||
|
t.Fatalf("New: error = %v", err)
|
||||||
|
} else {
|
||||||
|
t.Cleanup(c.Close) // check doubled cancel
|
||||||
|
cureMany(t, c0, []cureStep{
|
||||||
|
{"cache hit ident", overrideIdent{
|
||||||
|
id: identifier,
|
||||||
|
}, wantPathname, testdataChecksum, nil},
|
||||||
|
|
||||||
|
{"cache miss checksum match", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
testdataChecksum,
|
||||||
|
nil,
|
||||||
|
[]byte(testdata),
|
||||||
|
nil,
|
||||||
|
), base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(testdataChecksum),
|
||||||
|
), testdataChecksum, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
// cure after close
|
||||||
|
c.Close()
|
||||||
|
if _, _, err = c.Cure(stubArtifactF{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("unreachable artifact cured after cancel"),
|
||||||
|
deps: []pkg.Artifact{pkg.NewFile("", []byte("unreachable dependency"))},
|
||||||
|
}); !reflect.DeepEqual(err, context.Canceled) {
|
||||||
|
t.Fatalf("(closed) Cure: error = %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2")},
|
||||||
|
|
||||||
|
{"directory", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
id := pkg.KindTar.Ident(
|
||||||
|
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
||||||
|
overrideIdent{testdataChecksum, stubArtifact{}},
|
||||||
|
)
|
||||||
|
makeSample := func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(
|
||||||
|
work.Append("check").String(),
|
||||||
|
[]byte{0, 0},
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(work.Append(
|
||||||
|
"lib",
|
||||||
|
"pkgconfig",
|
||||||
|
).String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.Symlink(
|
||||||
|
"/proc/nonexistent/libedac.so",
|
||||||
|
work.Append(
|
||||||
|
"lib",
|
||||||
|
"libedac.so",
|
||||||
|
).String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
wantChecksum := pkg.MustDecode(
|
||||||
|
"qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b",
|
||||||
|
)
|
||||||
|
wantPathname := base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(id),
|
||||||
|
)
|
||||||
|
|
||||||
|
id0 := pkg.KindTar.Ident(
|
||||||
|
binary.LittleEndian.AppendUint64(nil, pkg.TarGzip),
|
||||||
|
overrideIdent{pkg.ID{}, stubArtifact{}},
|
||||||
|
)
|
||||||
|
wantPathname0 := base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(id0),
|
||||||
|
)
|
||||||
|
|
||||||
|
makeGarbage := func(work *check.Absolute, wantErr error) error {
|
||||||
|
if err := os.Mkdir(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mode := fs.FileMode(0)
|
||||||
|
if wantErr == nil {
|
||||||
|
mode = 0500
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(work.Append(
|
||||||
|
"lib",
|
||||||
|
"pkgconfig",
|
||||||
|
).String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(work.Append(
|
||||||
|
"lib",
|
||||||
|
"check",
|
||||||
|
).String(), nil, 0400&mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chmod(work.Append(
|
||||||
|
"lib",
|
||||||
|
"pkgconfig",
|
||||||
|
).String(), 0500&mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Chmod(work.Append(
|
||||||
|
"lib",
|
||||||
|
).String(), 0500&mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return wantErr
|
||||||
|
}
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"initial directory", overrideChecksum{wantChecksum, overrideIdent{id, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: makeSample,
|
||||||
|
}}}, wantPathname, wantChecksum, nil},
|
||||||
|
|
||||||
|
{"identical identifier", overrideChecksum{wantChecksum, overrideIdent{id, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
}}}, wantPathname, wantChecksum, nil},
|
||||||
|
|
||||||
|
{"identical checksum", overrideIdent{id0, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: makeSample,
|
||||||
|
}}, wantPathname0, wantChecksum, nil},
|
||||||
|
|
||||||
|
{"cure fault", overrideIdent{pkg.ID{0xff, 0}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return makeGarbage(t.GetWorkDir(), stub.UniqueError(0xcafe))
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
|
||||||
|
{"checksum mismatch", overrideChecksum{pkg.Checksum{}, overrideIdent{pkg.ID{0xff, 1}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return makeGarbage(t.GetWorkDir(), nil)
|
||||||
|
},
|
||||||
|
}}}, nil, pkg.Checksum{}, &pkg.ChecksumMismatchError{
|
||||||
|
Got: pkg.MustDecode(
|
||||||
|
"CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT",
|
||||||
|
),
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"cache hit bad type", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xff, 2},
|
||||||
|
&wantChecksum,
|
||||||
|
[]byte(testdata), nil,
|
||||||
|
), nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
fs.ModeDir | 0500,
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"openFile directory", overrideIdent{pkg.ID{0xff, 3}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
r, err := t.Open(overrideChecksumFile{checksum: wantChecksum})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
_, err = io.ReadAll(r)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, &os.PathError{
|
||||||
|
Op: "read",
|
||||||
|
Path: base.Append(
|
||||||
|
"checksum",
|
||||||
|
pkg.Encode(wantChecksum),
|
||||||
|
).String(),
|
||||||
|
Err: syscall.EISDIR,
|
||||||
|
}},
|
||||||
|
|
||||||
|
{"no output", overrideIdent{pkg.ID{0xff, 4}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, pkg.NoOutputError{}},
|
||||||
|
|
||||||
|
{"file output", overrideIdent{pkg.ID{0xff, 5}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.WriteFile(t.GetWorkDir().String(), []byte{0}, 0400)
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, errors.New("non-file artifact produced regular file")},
|
||||||
|
|
||||||
|
{"symlink output", overrideIdent{pkg.ID{0xff, 6}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.Symlink(
|
||||||
|
t.GetWorkDir().String(),
|
||||||
|
t.GetWorkDir().String(),
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, pkg.InvalidFileModeError(
|
||||||
|
fs.ModeSymlink | 0777,
|
||||||
|
)},
|
||||||
|
})
|
||||||
|
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d")},
|
||||||
|
|
||||||
|
{"pending", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
c.SetStrict(true)
|
||||||
|
|
||||||
|
wantErr := stub.UniqueError(0xcafe)
|
||||||
|
n, ready := make(chan struct{}), make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
close(ready)
|
||||||
|
<-n
|
||||||
|
return wantErr
|
||||||
|
},
|
||||||
|
}}); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-ready
|
||||||
|
wCureDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
if _, _, err := c.Cure(overrideIdent{pkg.ID{0xff}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
}}); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
panic(fmt.Sprintf("Cure: error = %v, want %v", err, wantErr))
|
||||||
|
}
|
||||||
|
close(wCureDone)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// check cache activity while a cure is blocking
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"error passthrough", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.ID{0xff, 1},
|
||||||
|
nil,
|
||||||
|
nil, stub.UniqueError(0xbad),
|
||||||
|
), nil, pkg.Checksum{}, stub.UniqueError(0xbad)},
|
||||||
|
|
||||||
|
{"file output", overrideIdent{pkg.ID{0xff, 2}, stubArtifact{
|
||||||
|
kind: pkg.KindTar,
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return os.WriteFile(
|
||||||
|
t.GetWorkDir().String(),
|
||||||
|
[]byte{0},
|
||||||
|
0400,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}}, nil, pkg.Checksum{}, errors.New(
|
||||||
|
"non-file artifact produced regular file",
|
||||||
|
)},
|
||||||
|
})
|
||||||
|
|
||||||
|
wantErrScrub := &pkg.ScrubError{
|
||||||
|
Errs: []error{errors.New("scrub began with pending artifacts")},
|
||||||
|
}
|
||||||
|
if err := c.Scrub(); !reflect.DeepEqual(err, wantErrScrub) {
|
||||||
|
t.Fatalf("Scrub: error = %#v, want %#v", err, wantErrScrub)
|
||||||
|
}
|
||||||
|
|
||||||
|
identPendingVal := reflect.ValueOf(c).Elem().FieldByName("identPending")
|
||||||
|
identPending := reflect.NewAt(
|
||||||
|
identPendingVal.Type(),
|
||||||
|
unsafe.Pointer(identPendingVal.UnsafeAddr()),
|
||||||
|
).Elem().Interface().(map[pkg.ID]<-chan struct{})
|
||||||
|
notify := identPending[pkg.ID{0xff}]
|
||||||
|
go close(n)
|
||||||
|
<-notify
|
||||||
|
<-wCureDone
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
|
||||||
|
{"scrub", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"bad measured file", newStubFile(
|
||||||
|
pkg.KindHTTPGet,
|
||||||
|
pkg.Checksum{0xfe, 0},
|
||||||
|
&pkg.Checksum{0xff, 0},
|
||||||
|
[]byte{0}, nil,
|
||||||
|
), base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(pkg.Checksum{0xfe, 0}),
|
||||||
|
), pkg.Checksum{0xff, 0}, nil},
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, p := range [][]string{
|
||||||
|
{"identifier", "invalid"},
|
||||||
|
{"identifier", pkg.Encode(pkg.ID{0xfe, 0xff})},
|
||||||
|
{"checksum", "invalid"},
|
||||||
|
} {
|
||||||
|
if err := os.WriteFile(
|
||||||
|
base.Append(p...).String(),
|
||||||
|
nil,
|
||||||
|
0400,
|
||||||
|
); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range [][]string{
|
||||||
|
{"../nonexistent", "checksum", pkg.Encode(pkg.Checksum{0xff, 0xff})},
|
||||||
|
{"../nonexistent", "identifier", pkg.Encode(pkg.Checksum{0xfe, 0xfe})},
|
||||||
|
} {
|
||||||
|
if err := os.Symlink(
|
||||||
|
p[0],
|
||||||
|
base.Append(p[1:]...).String(),
|
||||||
|
); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wantErr := &pkg.ScrubError{
|
||||||
|
ChecksumMismatches: []pkg.ChecksumMismatchError{
|
||||||
|
{Got: pkg.MustDecode(
|
||||||
|
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
|
||||||
|
), Want: pkg.Checksum{0xff, 0}},
|
||||||
|
},
|
||||||
|
DanglingIdentifiers: []pkg.ID{
|
||||||
|
{0xfe, 0},
|
||||||
|
{0xfe, 0xfe},
|
||||||
|
{0xfe, 0xff},
|
||||||
|
},
|
||||||
|
Errs: []error{
|
||||||
|
pkg.InvalidFileModeError(fs.ModeSymlink),
|
||||||
|
base64.CorruptInputError(4),
|
||||||
|
base64.CorruptInputError(8),
|
||||||
|
&os.PathError{
|
||||||
|
Op: "readlink",
|
||||||
|
Path: base.Append("identifier", pkg.Encode(pkg.ID{0xfe, 0xff})).String(),
|
||||||
|
Err: syscall.EINVAL,
|
||||||
|
},
|
||||||
|
base64.CorruptInputError(4),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := c.Scrub(); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
t.Fatalf("Scrub: error =\n%s\nwant\n%s", err, wantErr)
|
||||||
|
}
|
||||||
|
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
|
||||||
|
}
|
||||||
|
checkWithCache(t, testCases)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrors(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"InvalidLookupError", pkg.InvalidLookupError{
|
||||||
|
0xff, 0xf0,
|
||||||
|
}, "attempting to look up non-dependency artifact __AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"},
|
||||||
|
|
||||||
|
{"InvalidArtifactError", pkg.InvalidArtifactError{
|
||||||
|
0xff, 0xfd,
|
||||||
|
}, "artifact __0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA cannot be cured"},
|
||||||
|
|
||||||
|
{"ChecksumMismatchError", &pkg.ChecksumMismatchError{
|
||||||
|
Want: (pkg.Checksum)(bytes.Repeat([]byte{
|
||||||
|
0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f,
|
||||||
|
}, 8)),
|
||||||
|
}, "got AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" +
|
||||||
|
" instead of deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"},
|
||||||
|
|
||||||
|
{"ResponseStatusError", pkg.ResponseStatusError(
|
||||||
|
http.StatusNotAcceptable,
|
||||||
|
), "the requested URL returned non-OK status: Not Acceptable"},
|
||||||
|
|
||||||
|
{"DisallowedTypeflagError", pkg.DisallowedTypeflagError(
|
||||||
|
tar.TypeChar,
|
||||||
|
), "disallowed typeflag '3'"},
|
||||||
|
|
||||||
|
{"InvalidFileModeError", pkg.InvalidFileModeError(
|
||||||
|
fs.ModeSymlink | 0777,
|
||||||
|
), "artifact did not produce a regular file or directory"},
|
||||||
|
|
||||||
|
{"NoOutputError", pkg.NoOutputError{
|
||||||
|
// empty struct
|
||||||
|
}, "artifact cured successfully but did not produce any output"},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got := tc.err.Error(); got != tc.want {
|
||||||
|
t.Errorf("Error: %q, want %q", got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScrubError(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
err pkg.ScrubError
|
||||||
|
want string
|
||||||
|
unwrap []error
|
||||||
|
}{
|
||||||
|
{"full", pkg.ScrubError{
|
||||||
|
ChecksumMismatches: []pkg.ChecksumMismatchError{
|
||||||
|
{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
},
|
||||||
|
DanglingIdentifiers: []pkg.ID{
|
||||||
|
(pkg.ID)(bytes.Repeat([]byte{0x75, 0xe6, 0x9d, 0x6d, 0xe7, 0x9f}, 8)),
|
||||||
|
(pkg.ID)(bytes.Repeat([]byte{0x71, 0xa7, 0xde, 0x6d, 0xa6, 0xde}, 8)),
|
||||||
|
},
|
||||||
|
Errs: []error{
|
||||||
|
stub.UniqueError(0xcafe),
|
||||||
|
stub.UniqueError(0xbad),
|
||||||
|
stub.UniqueError(0xff),
|
||||||
|
},
|
||||||
|
}, `checksum mismatches:
|
||||||
|
got AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA instead of CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN
|
||||||
|
|
||||||
|
dangling identifiers:
|
||||||
|
deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
|
||||||
|
cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe
|
||||||
|
|
||||||
|
errors during scrub:
|
||||||
|
unique error 51966 injected by the test suite
|
||||||
|
unique error 2989 injected by the test suite
|
||||||
|
unique error 255 injected by the test suite
|
||||||
|
`, []error{
|
||||||
|
&pkg.ChecksumMismatchError{Want: pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
|
||||||
|
stub.UniqueError(0xcafe),
|
||||||
|
stub.UniqueError(0xbad),
|
||||||
|
stub.UniqueError(0xff),
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if got := tc.err.Error(); got != tc.want {
|
||||||
|
t.Errorf("Error:\n\n%s\n\nwant\n\n%s", got, tc.want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unwrap := tc.err.Unwrap(); !reflect.DeepEqual(unwrap, tc.unwrap) {
|
||||||
|
t.Errorf("Unwrap: %#v, want %#v", unwrap, tc.unwrap)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNew(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("nonexistent", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
wantErr := &os.PathError{
|
||||||
|
Op: "mkdir",
|
||||||
|
Path: container.Nonexistent,
|
||||||
|
Err: syscall.ENOENT,
|
||||||
|
}
|
||||||
|
if _, err := pkg.New(
|
||||||
|
t.Context(),
|
||||||
|
message.New(nil),
|
||||||
|
0, check.MustAbs(container.Nonexistent),
|
||||||
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("permission", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tempDir := check.MustAbs(t.TempDir())
|
||||||
|
if err := os.Chmod(tempDir.String(), 0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if err = os.Chmod(tempDir.String(), 0700); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
wantErr := &os.PathError{
|
||||||
|
Op: "mkdir",
|
||||||
|
Path: tempDir.Append("cache").String(),
|
||||||
|
Err: syscall.EACCES,
|
||||||
|
}
|
||||||
|
if _, err := pkg.New(
|
||||||
|
t.Context(),
|
||||||
|
message.New(nil),
|
||||||
|
0, tempDir.Append("cache"),
|
||||||
|
); !reflect.DeepEqual(err, wantErr) {
|
||||||
|
t.Errorf("New: error = %#v, want %#v", err, wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
238
internal/pkg/tar.go
Normal file
238
internal/pkg/tar.go
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/bzip2"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TarUncompressed denotes an uncompressed tarball.
|
||||||
|
TarUncompressed = iota
|
||||||
|
// TarGzip denotes a tarball compressed via [gzip].
|
||||||
|
TarGzip
|
||||||
|
// TarBzip2 denotes a tarball compressed via [bzip2].
|
||||||
|
TarBzip2
|
||||||
|
)
|
||||||
|
|
||||||
|
// A tarArtifact is an [Artifact] unpacking a tarball backed by a [File].
|
||||||
|
type tarArtifact struct {
|
||||||
|
// Caller-supplied backing tarball.
|
||||||
|
f Artifact
|
||||||
|
// Compression on top of the tarball.
|
||||||
|
compression uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// tarArtifactNamed embeds tarArtifact for a [fmt.Stringer] tarball.
|
||||||
|
type tarArtifactNamed struct {
|
||||||
|
tarArtifact
|
||||||
|
// Copied from tarArtifact.f.
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = new(tarArtifactNamed)
|
||||||
|
|
||||||
|
// String returns the name of the underlying [Artifact] suffixed with unpack.
|
||||||
|
func (a *tarArtifactNamed) String() string { return a.name + "-unpack" }
|
||||||
|
|
||||||
|
// NewTar returns a new [Artifact] backed by the supplied [Artifact] and
|
||||||
|
// compression method. The source [Artifact] must be compatible with
|
||||||
|
// [TContext.Open].
|
||||||
|
func NewTar(a Artifact, compression uint64) Artifact {
|
||||||
|
ta := tarArtifact{a, compression}
|
||||||
|
if s, ok := a.(fmt.Stringer); ok {
|
||||||
|
if name := s.String(); name != "" {
|
||||||
|
return &tarArtifactNamed{ta, name}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &ta
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPGetTar is abbreviation for NewHTTPGet passed to NewTar.
|
||||||
|
func NewHTTPGetTar(
|
||||||
|
hc *http.Client,
|
||||||
|
url string,
|
||||||
|
checksum Checksum,
|
||||||
|
compression uint64,
|
||||||
|
) Artifact {
|
||||||
|
return NewTar(NewHTTPGet(hc, url, checksum), compression)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the hardcoded [Kind] constant.
|
||||||
|
func (a *tarArtifact) Kind() Kind { return KindTar }
|
||||||
|
|
||||||
|
// Params returns compression encoded in little endian.
|
||||||
|
func (a *tarArtifact) Params() []byte {
|
||||||
|
return binary.LittleEndian.AppendUint64(nil, a.compression)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies returns a slice containing the backing file.
|
||||||
|
func (a *tarArtifact) Dependencies() []Artifact {
|
||||||
|
return []Artifact{a.f}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A DisallowedTypeflagError describes a disallowed typeflag encountered while
|
||||||
|
// unpacking a tarball.
|
||||||
|
type DisallowedTypeflagError byte
|
||||||
|
|
||||||
|
func (e DisallowedTypeflagError) Error() string {
|
||||||
|
return "disallowed typeflag '" + string(e) + "'"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cure cures the [Artifact], producing a directory located at work.
|
||||||
|
func (a *tarArtifact) Cure(t *TContext) (err error) {
|
||||||
|
temp := t.GetTempDir()
|
||||||
|
var tr io.ReadCloser
|
||||||
|
if tr, err = t.Open(a.f); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(f io.ReadCloser) {
|
||||||
|
closeErr := tr.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
closeErr = f.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}(tr)
|
||||||
|
tr = io.NopCloser(tr)
|
||||||
|
|
||||||
|
switch a.compression {
|
||||||
|
case TarUncompressed:
|
||||||
|
break
|
||||||
|
|
||||||
|
case TarGzip:
|
||||||
|
if tr, err = gzip.NewReader(tr); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case TarBzip2:
|
||||||
|
tr = io.NopCloser(bzip2.NewReader(tr))
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
type dirTargetPerm struct {
|
||||||
|
path *check.Absolute
|
||||||
|
mode fs.FileMode
|
||||||
|
}
|
||||||
|
var madeDirectories []dirTargetPerm
|
||||||
|
|
||||||
|
if err = os.MkdirAll(temp.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var header *tar.Header
|
||||||
|
r := tar.NewReader(tr)
|
||||||
|
for header, err = r.Next(); err == nil; header, err = r.Next() {
|
||||||
|
typeflag := header.Typeflag
|
||||||
|
if typeflag == 0 {
|
||||||
|
if len(header.Name) > 0 && header.Name[len(header.Name)-1] == '/' {
|
||||||
|
typeflag = tar.TypeDir
|
||||||
|
} else {
|
||||||
|
typeflag = tar.TypeReg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pathname := temp.Append(header.Name)
|
||||||
|
if typeflag >= '0' && typeflag <= '9' && typeflag != tar.TypeDir {
|
||||||
|
if err = os.MkdirAll(pathname.Dir().String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch typeflag {
|
||||||
|
case tar.TypeReg:
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.OpenFile(
|
||||||
|
pathname.String(),
|
||||||
|
os.O_CREATE|os.O_EXCL|os.O_WRONLY,
|
||||||
|
header.FileInfo().Mode()&0500,
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = io.Copy(f, r); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return
|
||||||
|
} else if err = f.Close(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeLink:
|
||||||
|
if err = os.Link(header.Linkname, pathname.String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeSymlink:
|
||||||
|
if err = os.Symlink(header.Linkname, pathname.String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeDir:
|
||||||
|
madeDirectories = append(madeDirectories, dirTargetPerm{
|
||||||
|
path: pathname,
|
||||||
|
mode: header.FileInfo().Mode(),
|
||||||
|
})
|
||||||
|
if err = os.MkdirAll(pathname.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
|
||||||
|
case tar.TypeXGlobalHeader:
|
||||||
|
continue // ignore
|
||||||
|
|
||||||
|
default:
|
||||||
|
return DisallowedTypeflagError(typeflag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
for _, e := range madeDirectories {
|
||||||
|
if err = os.Chmod(e.path.String(), e.mode&0500); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = os.Chmod(temp.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []os.DirEntry
|
||||||
|
if entries, err = os.ReadDir(temp.String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) == 1 && entries[0].IsDir() {
|
||||||
|
p := temp.Append(entries[0].Name())
|
||||||
|
if err = os.Chmod(p.String(), 0700); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = os.Rename(p.String(), t.GetWorkDir().String())
|
||||||
|
} else {
|
||||||
|
err = os.Rename(temp.String(), t.GetWorkDir().String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
204
internal/pkg/tar_test.go
Normal file
204
internal/pkg/tar_test.go
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
package pkg_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/sha512"
|
||||||
|
"errors"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"testing/fstest"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/stub"
|
||||||
|
"hakurei.app/internal/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTar(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
checkWithCache(t, []cacheTestCase{
|
||||||
|
{"http", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"checksum": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
|
||||||
|
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
|
||||||
|
"identifier": {Mode: fs.ModeDir | 0700},
|
||||||
|
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
|
||||||
|
|
||||||
|
"work": {Mode: fs.ModeDir | 0700},
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
|
||||||
|
))
|
||||||
|
}, pkg.MustDecode("sxbgyX-bPoezbha214n2lbQhiVfTUBkhZ0EX6zI7mmkMdrCdwuMwhMBJphLQsy94")},
|
||||||
|
|
||||||
|
{"http expand", nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
|
||||||
|
checkTarHTTP(t, base, c, fstest.MapFS{
|
||||||
|
".": {Mode: fs.ModeDir | 0700},
|
||||||
|
|
||||||
|
"lib": {Mode: fs.ModeDir | 0700},
|
||||||
|
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
|
||||||
|
}, pkg.MustDecode(
|
||||||
|
"CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
|
||||||
|
))
|
||||||
|
}, pkg.MustDecode("4I8wx_h7NSJTlG5lbuz-GGEXrOg0GYC3M_503LYEBhv5XGWXfNIdIY9Q3eVSYldX")},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkTarHTTP(
|
||||||
|
t *testing.T,
|
||||||
|
base *check.Absolute,
|
||||||
|
c *pkg.Cache,
|
||||||
|
testdataFsys fs.FS,
|
||||||
|
wantChecksum pkg.Checksum,
|
||||||
|
) {
|
||||||
|
var testdata string
|
||||||
|
{
|
||||||
|
var buf bytes.Buffer
|
||||||
|
w := tar.NewWriter(&buf)
|
||||||
|
if err := w.AddFS(testdataFsys); err != nil {
|
||||||
|
t.Fatalf("AddFS: error = %v", err)
|
||||||
|
}
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var zbuf bytes.Buffer
|
||||||
|
gw := gzip.NewWriter(&zbuf)
|
||||||
|
if _, err := gw.Write(buf.Bytes()); err != nil {
|
||||||
|
t.Fatalf("Write: error = %v", err)
|
||||||
|
}
|
||||||
|
if err := gw.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
testdata = zbuf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
testdataChecksum := func() pkg.Checksum {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write([]byte(testdata))
|
||||||
|
return (pkg.Checksum)(h.Sum(nil))
|
||||||
|
}()
|
||||||
|
|
||||||
|
var transport http.Transport
|
||||||
|
client := http.Client{Transport: &transport}
|
||||||
|
transport.RegisterProtocol("file", http.NewFileTransportFS(fstest.MapFS{
|
||||||
|
"testdata": {Data: []byte(testdata), Mode: 0400},
|
||||||
|
}))
|
||||||
|
|
||||||
|
wantIdent := func() pkg.ID {
|
||||||
|
h := sha512.New384()
|
||||||
|
h.Write([]byte{byte(pkg.KindTar), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
h.Write([]byte{pkg.TarGzip, 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
h.Write([]byte{byte(pkg.KindHTTPGet), 0, 0, 0, 0, 0, 0, 0})
|
||||||
|
httpIdent := pkg.KindHTTPGet.Ident([]byte("file:///testdata"))
|
||||||
|
h.Write(httpIdent[:])
|
||||||
|
return pkg.ID(h.Sum(nil))
|
||||||
|
}()
|
||||||
|
|
||||||
|
a := pkg.NewHTTPGetTar(
|
||||||
|
&client,
|
||||||
|
"file:///testdata",
|
||||||
|
testdataChecksum,
|
||||||
|
pkg.TarGzip,
|
||||||
|
)
|
||||||
|
|
||||||
|
if id := pkg.Ident(a); id != wantIdent {
|
||||||
|
t.Fatalf("Ident: %s, want %s", pkg.Encode(id), pkg.Encode(wantIdent))
|
||||||
|
}
|
||||||
|
|
||||||
|
tarDir := stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("directory containing a single regular file"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(
|
||||||
|
work.Append("sample.tar.gz").String(),
|
||||||
|
[]byte(testdata),
|
||||||
|
0400,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tarDirMulti := stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("directory containing a multiple entries"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.Append(
|
||||||
|
"garbage",
|
||||||
|
).String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(
|
||||||
|
work.Append("sample.tar.gz").String(),
|
||||||
|
[]byte(testdata),
|
||||||
|
0400,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tarDirType := stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("directory containing a symbolic link"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
work := t.GetWorkDir()
|
||||||
|
if err := os.MkdirAll(work.String(), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Symlink(
|
||||||
|
work.String(),
|
||||||
|
work.Append("sample.tar.gz").String(),
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// destroy these to avoid including it in flatten test case
|
||||||
|
defer newDestroyArtifactFunc(tarDir)(t, base, c)
|
||||||
|
defer newDestroyArtifactFunc(tarDirMulti)(t, base, c)
|
||||||
|
defer newDestroyArtifactFunc(tarDirType)(t, base, c)
|
||||||
|
|
||||||
|
cureMany(t, c, []cureStep{
|
||||||
|
{"file", a, base.Append(
|
||||||
|
"identifier",
|
||||||
|
pkg.Encode(wantIdent),
|
||||||
|
), wantChecksum, nil},
|
||||||
|
|
||||||
|
{"directory", pkg.NewTar(
|
||||||
|
tarDir,
|
||||||
|
pkg.TarGzip,
|
||||||
|
), ignorePathname, wantChecksum, nil},
|
||||||
|
|
||||||
|
{"multiple entries", pkg.NewTar(
|
||||||
|
tarDirMulti,
|
||||||
|
pkg.TarGzip,
|
||||||
|
), nil, pkg.Checksum{}, errors.New(
|
||||||
|
"input directory does not contain a single regular file",
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"bad type", pkg.NewTar(
|
||||||
|
tarDirType,
|
||||||
|
pkg.TarGzip,
|
||||||
|
), nil, pkg.Checksum{}, errors.New(
|
||||||
|
"input directory does not contain a single regular file",
|
||||||
|
)},
|
||||||
|
|
||||||
|
{"error passthrough", pkg.NewTar(stubArtifact{
|
||||||
|
kind: pkg.KindExec,
|
||||||
|
params: []byte("doomed artifact"),
|
||||||
|
cure: func(t *pkg.TContext) error {
|
||||||
|
return stub.UniqueError(0xcafe)
|
||||||
|
},
|
||||||
|
}, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
|
||||||
|
})
|
||||||
|
}
|
||||||
260
internal/pkg/testdata/main.go
vendored
Normal file
260
internal/pkg/testdata/main.go
vendored
Normal file
@@ -0,0 +1,260 @@
|
|||||||
|
//go:build testtool
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"hakurei.app/container/check"
|
||||||
|
"hakurei.app/container/fhs"
|
||||||
|
"hakurei.app/container/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("testtool: ")
|
||||||
|
|
||||||
|
var hostNet, layers bool
|
||||||
|
if len(os.Args) == 2 && os.Args[0] == "testtool" {
|
||||||
|
switch os.Args[1] {
|
||||||
|
case "net":
|
||||||
|
hostNet = true
|
||||||
|
log.SetPrefix("testtool(net): ")
|
||||||
|
break
|
||||||
|
|
||||||
|
case "layers":
|
||||||
|
layers = true
|
||||||
|
log.SetPrefix("testtool(layers): ")
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Fatalf("Args: %q", os.Args)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if wantArgs := []string{"testtool"}; !slices.Equal(os.Args, wantArgs) {
|
||||||
|
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
var overlayRoot bool
|
||||||
|
wantEnv := []string{"HAKUREI_TEST=1"}
|
||||||
|
if len(os.Environ()) == 2 {
|
||||||
|
overlayRoot = true
|
||||||
|
if !layers {
|
||||||
|
log.SetPrefix("testtool(overlay root): ")
|
||||||
|
}
|
||||||
|
wantEnv = []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}
|
||||||
|
}
|
||||||
|
if !slices.Equal(wantEnv, os.Environ()) {
|
||||||
|
log.Fatalf("Environ: %q, want %q", os.Environ(), wantEnv)
|
||||||
|
}
|
||||||
|
|
||||||
|
var overlayWork bool
|
||||||
|
const (
|
||||||
|
wantExec = "/opt/bin/testtool"
|
||||||
|
wantExecWork = "/work/bin/testtool"
|
||||||
|
)
|
||||||
|
var iftPath string
|
||||||
|
if got, err := os.Executable(); err != nil {
|
||||||
|
log.Fatalf("Executable: error = %v", err)
|
||||||
|
} else {
|
||||||
|
iftPath = path.Join(path.Dir(path.Dir(got)), "ift")
|
||||||
|
|
||||||
|
if got != wantExec {
|
||||||
|
switch got {
|
||||||
|
case wantExecWork:
|
||||||
|
overlayWork = true
|
||||||
|
log.SetPrefix("testtool(overlay work): ")
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Fatalf("Executable: %q, want %q", got, wantExec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wantHostname := "cure"
|
||||||
|
if hostNet {
|
||||||
|
wantHostname += "-net"
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostname, err := os.Hostname(); err != nil {
|
||||||
|
log.Fatalf("Hostname: error = %v", err)
|
||||||
|
} else if hostname != wantHostname {
|
||||||
|
log.Fatalf("Hostname: %q, want %q", hostname, wantHostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
var m *vfs.MountInfo
|
||||||
|
if f, err := os.Open(fhs.Proc + "self/mountinfo"); err != nil {
|
||||||
|
log.Fatalf("Open: error = %v", err)
|
||||||
|
} else {
|
||||||
|
err = vfs.NewMountInfoDecoder(f).Decode(&m)
|
||||||
|
closeErr := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Decode: error = %v", err)
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
log.Fatalf("Close: error = %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ift, err := net.Interfaces(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
} else if !hostNet {
|
||||||
|
if len(ift) != 1 || ift[0].Name != "lo" {
|
||||||
|
log.Fatalln("got interfaces", strings.Join(slices.Collect(func(yield func(ifn string) bool) {
|
||||||
|
for _, ifi := range ift {
|
||||||
|
if !yield(ifi.Name) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}), ", "))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var iftParent []net.Interface
|
||||||
|
|
||||||
|
var r *os.File
|
||||||
|
if r, err = os.Open(iftPath); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
} else {
|
||||||
|
err = gob.NewDecoder(r).Decode(&iftParent)
|
||||||
|
closeErr := r.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
log.Fatal(closeErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(ift, iftParent) {
|
||||||
|
log.Fatalf("Interfaces: %#v, want %#v", ift, iftParent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
|
||||||
|
ident := "U2cbgVgEtjfRuvHfE1cQnZ3t8yoexULQyo_VLgvxAVJSsobMcNaFIsuDWtmt7kzK"
|
||||||
|
log.Println(m)
|
||||||
|
next := func() { m = m.Next; log.Println(m) }
|
||||||
|
|
||||||
|
if overlayRoot {
|
||||||
|
ident = "5ey2wpmMpj483YYa7ZZQciYLA2cx3_l167JCqWW4Pd-5DVp81dj9EsBtVTwYptF6"
|
||||||
|
|
||||||
|
if m.Root != "/" || m.Target != "/" ||
|
||||||
|
m.Source != "overlay" || m.FsType != "overlay" {
|
||||||
|
log.Fatal("unexpected root mount entry")
|
||||||
|
}
|
||||||
|
var lowerdir string
|
||||||
|
for _, o := range strings.Split(m.FsOptstr, ",") {
|
||||||
|
const lowerdirKey = "lowerdir="
|
||||||
|
if strings.HasPrefix(o, lowerdirKey) {
|
||||||
|
lowerdir = o[len(lowerdirKey):]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !layers {
|
||||||
|
if path.Base(lowerdir) != checksumEmptyDir {
|
||||||
|
log.Fatal("unexpected artifact checksum")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ident = "tfjrsVuBuFgzWgwz-yPppFtylYuC1VFWnKhyBiHbWTGkyz8lt7Ee9QXWaIHPXs4x"
|
||||||
|
|
||||||
|
lowerdirsEscaped := strings.Split(lowerdir, ":")
|
||||||
|
lowerdirs := lowerdirsEscaped[:0]
|
||||||
|
// ignore the option separator since it does not appear in ident
|
||||||
|
for i, e := range lowerdirsEscaped {
|
||||||
|
if len(e) > 0 &&
|
||||||
|
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
|
||||||
|
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
|
||||||
|
// ignore escaped pathname separator since it does not
|
||||||
|
// appear in ident
|
||||||
|
|
||||||
|
e = e[:len(e)-1]
|
||||||
|
if len(lowerdirsEscaped) != i {
|
||||||
|
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lowerdirs = append(lowerdirs, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lowerdirs) != 2 ||
|
||||||
|
path.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
|
||||||
|
path.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
|
||||||
|
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if hostNet {
|
||||||
|
ident = "QdsJhGgnk5N2xdUNGcndXQxFKifxf1V_2t9X8CQ-pDcg24x6mGJC_BiLfGbs6Qml"
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Root != "/sysroot" || m.Target != "/" {
|
||||||
|
log.Fatal("unexpected root mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if path.Base(m.Root) != "OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb" {
|
||||||
|
log.Fatal("unexpected file artifact checksum")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if path.Base(m.Root) != checksumEmptyDir {
|
||||||
|
log.Fatal("unexpected artifact checksum")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
next() // testtool artifact
|
||||||
|
|
||||||
|
next()
|
||||||
|
if overlayWork {
|
||||||
|
ident = "acaDzHZv40dZaz4cGAXayqbRMgbEOuiuiUijZL8IgDQvyeCNMFE3onBMYfny-kXA"
|
||||||
|
if m.Root != "/" || m.Target != "/work" ||
|
||||||
|
m.Source != "overlay" || m.FsType != "overlay" {
|
||||||
|
log.Fatal("unexpected work mount entry")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if path.Base(m.Root) != ident || m.Target != "/work" {
|
||||||
|
log.Fatal("unexpected work mount entry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if path.Base(m.Root) != ident || m.Target != "/tmp" {
|
||||||
|
log.Fatal("unexpected temp mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if m.Root != "/" || m.Target != "/proc" || m.Source != "proc" || m.FsType != "proc" {
|
||||||
|
log.Fatal("unexpected proc mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
next()
|
||||||
|
if m.Root != "/" || m.Target != "/dev" || m.Source != "devtmpfs" || m.FsType != "tmpfs" {
|
||||||
|
log.Fatal("unexpected dev mount entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 9; i++ { // private /dev entries
|
||||||
|
next()
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Next != nil {
|
||||||
|
log.Println("unexpected extra mount entries")
|
||||||
|
for m.Next != nil {
|
||||||
|
next()
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkData := []byte{0}
|
||||||
|
if hostNet {
|
||||||
|
checkData = []byte("net")
|
||||||
|
}
|
||||||
|
if err := os.WriteFile("check", checkData, 0400); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -36,7 +36,7 @@ libzstd.so.1 = /usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
|||||||
|
|
||||||
{"path not absolute", `
|
{"path not absolute", `
|
||||||
libzstd.so.1 => usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
libzstd.so.1 => usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||||
`, &check.AbsoluteError{Pathname: "usr/lib/libzstd.so.1"}},
|
`, check.AbsoluteError("usr/lib/libzstd.so.1")},
|
||||||
|
|
||||||
{"unexpected segments", `
|
{"unexpected segments", `
|
||||||
meow libzstd.so.1 => /usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
meow libzstd.so.1 => /usr/lib/libzstd.so.1 (0x7ff71bfd2000)
|
||||||
|
|||||||
14
options.nix
14
options.nix
@@ -1,8 +1,15 @@
|
|||||||
packages:
|
packages:
|
||||||
{ lib, pkgs, ... }:
|
{
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
config,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
let
|
let
|
||||||
inherit (lib) types mkOption mkEnableOption;
|
inherit (lib) types mkOption mkEnableOption;
|
||||||
|
|
||||||
|
cfg = config.environment.hakurei;
|
||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -43,7 +50,10 @@ in
|
|||||||
sharefs = {
|
sharefs = {
|
||||||
package = mkOption {
|
package = mkOption {
|
||||||
type = types.package;
|
type = types.package;
|
||||||
default = packages.${pkgs.stdenv.hostPlatform.system}.sharefs;
|
default = pkgs.linkFarm "sharefs" {
|
||||||
|
"bin/sharefs" = "${cfg.package}/libexec/sharefs";
|
||||||
|
"bin/mount.fuse.sharefs" = "${cfg.package}/libexec/sharefs";
|
||||||
|
};
|
||||||
description = "The sharefs package to use.";
|
description = "The sharefs package to use.";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user