1
0
forked from rosa/hakurei

46 Commits

Author SHA1 Message Date
Kat
954b60dbab TODO: consider writing tests for the test runner. 2026-04-19 02:35:47 +10:00
Kat
a06e622a84 TODO: actually write tests lol. 2026-04-19 02:35:47 +10:00
Kat
a2af3a0db0 cmd/pkgserver/ui_test: implement skipping from DSL 2026-04-19 02:35:47 +10:00
Kat
4c9d1c9d87 cmd/pkgserver/ui_test: add JSON reporter for go test integration 2026-04-19 02:35:47 +10:00
Kat
178cb4d2d2 cmd/pkgserver/ui_test: implement DSL and runner 2026-04-19 02:35:47 +10:00
Kat
cd3224e4af cmd/pkgserver/ui_test: add DOM reporter 2026-04-19 02:35:47 +10:00
Kat
bae2735a0d cmd/pkgserver/ui_test: add basic CLI reporter 2026-04-19 02:35:47 +10:00
mae
8a38b614c6 cmd/pkgserver: update 2026-04-18 11:32:08 -05:00
mae
3286fff076 cmd/pkgserver: fix gitignore 2026-04-18 11:30:57 -05:00
mae
fd1884a84b cmd/pkgserver: better no results handling 2026-04-18 11:30:57 -05:00
mae
fe6424bd6d cmd/pkgserver: better no results handling 2026-04-18 11:30:57 -05:00
mae
004ac511a9 cmd/pkgserver: finish search implementation 2026-04-18 11:30:57 -05:00
mae
ba17f9d4f3 cmd/pkgserver: remove get endpoint count field 2026-04-18 11:30:56 -05:00
mae
ea62f64b8f cmd/pkgserver: search endpoint 2026-04-18 11:30:56 -05:00
mae
86669363ac cmd/pkgserver: pagination bugfix 2026-04-18 11:30:56 -05:00
6f5b7964f4 cmd/pkgserver: guard sass/ts behind build tag
Packaging nodejs and ruby is an immense burden for the Rosa OS base system, and these files diff poorly.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
mae
a195c3760c cmd/pkgserver: add size 2026-04-18 11:30:56 -05:00
cfe52dce82 cmd/pkgserver: expose size and store pre-encoded ident
This change also handles SIGSEGV correctly in newStatusHandler, and makes serving status fully zero copy.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
8483d8a005 cmd/pkgserver: look up status by name once
This has far less overhead.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
5bc5aed024 cmd/pkgserver: refer to preset in index
This enables referencing back to internal/rosa through an entry obtained via the index.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
9465649d13 cmd/pkgserver: handle unversioned value
This omits the field for an unversioned artifact, and only does so once on startup.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
33c461aa67 cmd/pkgserver: determine disposition route in mux
This removes duplicate checks and uses the more sound check in mux.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
dee0204fc0 cmd/pkgserver: format get error messages
This improves source code readability on smaller displays.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
2f916ed0c0 cmd/pkgserver: constant string in pattern
This resolves patterns at compile time.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
55ce3a2f90 cmd/pkgserver: satisfy handler signature in method
This is somewhat cleaner.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
3f6a07ef59 cmd/pkgserver: log instead of write encoding error
This message is unlikely to be useful to the user, and output may be partially written at this point, causing the error to be even less intelligible.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
02941e7c23 cmd/pkgserver: appropriately mark test helpers
This improves usefulness of test log messages.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
b9601881b7 cmd/pkgserver: do not omit report field
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
58596f0af5 cmd/pkgserver: gracefully shut down on signal
Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
02cde40289 cmd/pkgserver: specify full addr string in flag
This allows greater flexibility.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
5014534884 cmd/pkgserver: make report argument optional
This allows serving metadata only without a populated report. This also removes the out-of-bounds read on args when no arguments are passed.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
13cf99ced4 cmd/pkgserver: embed internal/rosa metadata
This change also cleans up and reduces some unnecessary copies.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
6bfb258fd0 cmd/pkgserver: do not assume default mux
This helps with testing.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
b649645189 cmd/pkgserver: create index without report
This is useful for testing, where report testdata is not available.

Signed-off-by: Ophestra <cat@gensokyo.uk>
2026-04-18 11:30:56 -05:00
mae
3ddba4e21f cmd/pkgserver: add sort orders, change pagination rules 2026-04-18 11:30:56 -05:00
mae
40a906c6c2 cmd/pkgserver: add /status endpoint 2026-04-18 11:30:56 -05:00
mae
06894e2104 cmd/pkgserver: minimum viable frontend 2026-04-18 11:30:56 -05:00
mae
56f0392b86 cmd/pkgserver: api versioning 2026-04-18 11:30:56 -05:00
mae
e2315f6c1a cmd/pkgserver: add get endpoint 2026-04-18 11:30:56 -05:00
mae
e4aee49eb0 cmd/pkgserver: add count endpoint and restructure 2026-04-18 11:30:56 -05:00
mae
6c03cc8b8a cmd/pkgserver: add status endpoint 2026-04-18 11:30:56 -05:00
mae
59ade6a86b cmd/pkgserver: add createPackageIndex 2026-04-18 11:30:56 -05:00
mae
59ab493035 cmd/pkgserver: add command handler 2026-04-18 11:30:56 -05:00
mae
d80a3346e2 cmd/pkgserver: replace favicon 2026-04-18 11:30:56 -05:00
mae
327a34aacb cmd/pkgserver: pagination 2026-04-18 11:30:56 -05:00
mae
ea7c6b3b48 cmd/pkgserver: basic web ui 2026-04-18 11:30:56 -05:00
139 changed files with 2936 additions and 6208 deletions

5
.gitignore vendored
View File

@@ -7,8 +7,9 @@
# go generate # go generate
/cmd/hakurei/LICENSE /cmd/hakurei/LICENSE
/cmd/mbf/internal/pkgserver/ui/static /cmd/pkgserver/ui/static/*.js
/internal/pkg/internal/testtool/testtool /cmd/pkgserver/ui_test/static
/internal/pkg/testdata/testtool
/internal/rosa/hakurei_current.tar.gz /internal/rosa/hakurei_current.tar.gz
# cmd/dist default destination # cmd/dist default destination

5
all.sh
View File

@@ -1,3 +1,6 @@
#!/bin/sh -e #!/bin/sh -e
HAKUREI_DIST_MAKE='' exec "$(dirname -- "$0")/cmd/dist/dist.sh" TOOLCHAIN_VERSION="$(go version)"
cd "$(dirname -- "$0")/"
echo "# Building cmd/dist using ${TOOLCHAIN_VERSION}."
go run -v --tags=dist ./cmd/dist

View File

@@ -4,23 +4,15 @@ import "strings"
const ( const (
// SpecialOverlayEscape is the escape string for overlay mount options. // SpecialOverlayEscape is the escape string for overlay mount options.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayEscape = `\` SpecialOverlayEscape = `\`
// SpecialOverlayOption is the separator string between overlay mount options. // SpecialOverlayOption is the separator string between overlay mount options.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayOption = "," SpecialOverlayOption = ","
// SpecialOverlayPath is the separator string between overlay paths. // SpecialOverlayPath is the separator string between overlay paths.
//
// Deprecated: This is no longer used and will be removed in 0.5.
SpecialOverlayPath = ":" SpecialOverlayPath = ":"
) )
// EscapeOverlayDataSegment escapes a string for formatting into the data // EscapeOverlayDataSegment escapes a string for formatting into the data
// argument of an overlay mount system call. // argument of an overlay mount system call.
//
// Deprecated: This is no longer used and will be removed in 0.5.
func EscapeOverlayDataSegment(s string) string { func EscapeOverlayDataSegment(s string) string {
if s == "" { if s == "" {
return "" return ""

10
cmd/dist/dist.sh vendored
View File

@@ -1,10 +0,0 @@
#!/bin/sh -e
TOOLCHAIN_VERSION="$(go version)"
cd "$(dirname -- "$0")/../.."
echo "Building cmd/dist using ${TOOLCHAIN_VERSION}."
FLAGS=''
if test -n "$VERBOSE"; then
FLAGS="$FLAGS -v"
fi
go run $FLAGS --tags=dist ./cmd/dist

40
cmd/dist/main.go vendored
View File

@@ -42,19 +42,14 @@ func mustRun(ctx context.Context, name string, arg ...string) {
var comp []byte var comp []byte
func main() { func main() {
fmt.Println()
log.SetFlags(0) log.SetFlags(0)
log.SetPrefix("") log.SetPrefix("# ")
verbose := os.Getenv("VERBOSE") != ""
runTests := os.Getenv("HAKUREI_DIST_MAKE") == ""
version := getenv("HAKUREI_VERSION", "untagged") version := getenv("HAKUREI_VERSION", "untagged")
prefix := getenv("PREFIX", "/usr") prefix := getenv("PREFIX", "/usr")
destdir := getenv("DESTDIR", "dist") destdir := getenv("DESTDIR", "dist")
if verbose {
log.Println()
}
if err := os.MkdirAll(destdir, 0755); err != nil { if err := os.MkdirAll(destdir, 0755); err != nil {
log.Fatal(err) log.Fatal(err)
} }
@@ -81,17 +76,12 @@ func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel() defer cancel()
verboseFlag := "-v" log.Println("Building hakurei.")
if !verbose {
verboseFlag = "-buildvcs=false"
}
log.Printf("Building hakurei for %s/%s.", runtime.GOOS, runtime.GOARCH)
mustRun(ctx, "go", "generate", "./...") mustRun(ctx, "go", "generate", "./...")
mustRun( mustRun(
ctx, "go", "build", ctx, "go", "build",
"-trimpath", "-trimpath",
verboseFlag, "-o", s, "-v", "-o", s,
"-ldflags=-s -w "+ "-ldflags=-s -w "+
"-buildid= -linkmode external -extldflags=-static "+ "-buildid= -linkmode external -extldflags=-static "+
"-X hakurei.app/internal/info.buildVersion="+version+" "+ "-X hakurei.app/internal/info.buildVersion="+version+" "+
@@ -100,19 +90,17 @@ func main() {
"-X main.hakureiPath="+prefix+"/bin/hakurei", "-X main.hakureiPath="+prefix+"/bin/hakurei",
"./...", "./...",
) )
log.Println() fmt.Println()
if runTests { log.Println("Testing Hakurei.")
log.Println("##### Testing Hakurei.") mustRun(
mustRun( ctx, "go", "test",
ctx, "go", "test", "-ldflags=-buildid= -linkmode external -extldflags=-static",
"-ldflags=-buildid= -linkmode external -extldflags=-static", "./...",
"./...", )
) fmt.Println()
log.Println()
}
log.Println("##### Creating distribution.") log.Println("Creating distribution.")
const suffix = ".tar.gz" const suffix = ".tar.gz"
distName := "hakurei-" + version + "-" + runtime.GOARCH distName := "hakurei-" + version + "-" + runtime.GOARCH
var f *os.File var f *os.File
@@ -133,7 +121,7 @@ func main() {
}() }()
h := sha512.New() h := sha512.New()
gw, _ := gzip.NewWriterLevel(io.MultiWriter(f, h), gzip.BestCompression) gw := gzip.NewWriter(io.MultiWriter(f, h))
tw := tar.NewWriter(gw) tw := tar.NewWriter(gw)
mustWriteHeader := func(name string, size int64, mode os.FileMode) { mustWriteHeader := func(name string, size int64, mode os.FileMode) {

View File

@@ -7,7 +7,6 @@ import (
"testing" "testing"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/container"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
"hakurei.app/message" "hakurei.app/message"
) )
@@ -20,15 +19,8 @@ type cache struct {
// Should generally not be used directly. // Should generally not be used directly.
c *pkg.Cache c *pkg.Cache
cures, jobs int cures, jobs int
// Primarily to work around missing landlock LSM. hostAbstract, idle bool
hostAbstract bool
// Set SCHED_IDLE.
idle bool
// Unset [pkg.CSuppressInit].
verboseInit bool
// Loaded artifact of [rosa.QEMU].
qemu pkg.Artifact
base string base string
} }
@@ -39,6 +31,9 @@ func (cache *cache) open() (err error) {
return os.ErrInvalid return os.ErrInvalid
} }
if cache.base == "" {
cache.base = "cache"
}
var base *check.Absolute var base *check.Absolute
if cache.base, err = filepath.Abs(cache.base); err != nil { if cache.base, err = filepath.Abs(cache.base); err != nil {
return return
@@ -53,9 +48,6 @@ func (cache *cache) open() (err error) {
if cache.hostAbstract { if cache.hostAbstract {
flags |= pkg.CHostAbstract flags |= pkg.CHostAbstract
} }
if !cache.verboseInit {
flags |= pkg.CSuppressInit
}
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@@ -81,39 +73,6 @@ func (cache *cache) open() (err error) {
cache.jobs, cache.jobs,
base, base,
) )
if err != nil {
return
}
done <- struct{}{}
if cache.qemu != nil {
var pathname *check.Absolute
pathname, _, err = cache.c.Cure(cache.qemu)
if err != nil {
cache.c.Close()
return
}
pkg.RegisterArch("riscv64", container.BinfmtEntry{
Offset: 0,
Magic: "\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00",
Mask: "\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
Interpreter: pathname.Append(
"system/bin",
"qemu-riscv64",
),
})
pkg.RegisterArch("arm64", container.BinfmtEntry{
Offset: 0,
Magic: "\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00",
Mask: "\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
Interpreter: pathname.Append(
"system/bin",
"qemu-aarch64",
),
})
}
return return
} }

View File

@@ -99,9 +99,10 @@ func cancelIdent(
var ident pkg.ID var ident pkg.ID
if _, err := io.ReadFull(conn, ident[:]); err != nil { if _, err := io.ReadFull(conn, ident[:]); err != nil {
return nil, false, errors.Join(err, conn.Close()) return nil, false, errors.Join(err, conn.Close())
} else if err = conn.Close(); err != nil {
return nil, false, err
} }
ok := cache.Cancel(unique.Make(ident)) return &ident, cache.Cancel(unique.Make(ident)), nil
return &ident, ok, conn.Close()
} }
// serve services connections from a [net.UnixListener]. // serve services connections from a [net.UnixListener].
@@ -193,11 +194,11 @@ func serve(
} }
case specialAbort: case specialAbort:
log.Println("aborting all pending cures")
cm.c.Abort()
if _err := conn.Close(); _err != nil { if _err := conn.Close(); _err != nil {
log.Println(_err) log.Println(_err)
} }
log.Println("aborting all pending cures")
cm.c.Abort()
} }
return return
@@ -305,7 +306,6 @@ func cancelRemote(
ctx context.Context, ctx context.Context,
addr *net.UnixAddr, addr *net.UnixAddr,
a pkg.Artifact, a pkg.Artifact,
wait bool,
) error { ) error {
done, conn, err := dial(ctx, addr) done, conn, err := dial(ctx, addr)
if err != nil { if err != nil {
@@ -324,19 +324,13 @@ func cancelRemote(
} else if n != len(id) { } else if n != len(id) {
return errors.Join(io.ErrShortWrite, conn.Close()) return errors.Join(io.ErrShortWrite, conn.Close())
} }
if wait { return conn.Close()
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
err = nil
}
}
return errors.Join(err, conn.Close())
} }
// abortRemote aborts all [pkg.Artifact] curing on a daemon. // abortRemote aborts all [pkg.Artifact] curing on a daemon.
func abortRemote( func abortRemote(
ctx context.Context, ctx context.Context,
addr *net.UnixAddr, addr *net.UnixAddr,
wait bool,
) error { ) error {
done, conn, err := dial(ctx, addr) done, conn, err := dial(ctx, addr)
if err != nil { if err != nil {
@@ -345,10 +339,5 @@ func abortRemote(
defer close(done) defer close(done)
err = writeSpecialHeader(conn, specialAbort) err = writeSpecialHeader(conn, specialAbort)
if wait && err == nil {
if _, err = conn.Read(make([]byte, 1)); err == io.EOF {
err = nil
}
}
return errors.Join(err, conn.Close()) return errors.Join(err, conn.Close())
} }

View File

@@ -106,11 +106,11 @@ func TestDaemon(t *testing.T) {
} }
}() }()
if err = cancelRemote(ctx, &addr, pkg.NewFile("nonexistent", nil), true); err != nil { if err = cancelRemote(ctx, &addr, pkg.NewFile("nonexistent", nil)); err != nil {
t.Fatalf("cancelRemote: error = %v", err) t.Fatalf("cancelRemote: error = %v", err)
} }
if err = abortRemote(ctx, &addr, true); err != nil { if err = abortRemote(ctx, &addr); err != nil {
t.Fatalf("abortRemote: error = %v", err) t.Fatalf("abortRemote: error = %v", err)
} }

View File

@@ -17,12 +17,25 @@ func commandInfo(
args []string, args []string,
w io.Writer, w io.Writer,
writeStatus bool, writeStatus bool,
r *rosa.Report, reportPath string,
) (err error) { ) (err error) {
if len(args) == 0 { if len(args) == 0 {
return errors.New("info requires at least 1 argument") return errors.New("info requires at least 1 argument")
} }
var r *rosa.Report
if reportPath != "" {
if r, err = rosa.OpenReport(reportPath); err != nil {
return err
}
defer func() {
if closeErr := r.Close(); err == nil {
err = closeErr
}
}()
defer r.HandleAccess(&err)()
}
// recovered by HandleAccess // recovered by HandleAccess
mustPrintln := func(a ...any) { mustPrintln := func(a ...any) {
if _, _err := fmt.Fprintln(w, a...); _err != nil { if _, _err := fmt.Fprintln(w, a...); _err != nil {

View File

@@ -95,7 +95,7 @@ status : not in report
var ( var (
cm *cache cm *cache
buf strings.Builder buf strings.Builder
r *rosa.Report rp string
) )
if tc.status != nil || tc.report != "" { if tc.status != nil || tc.report != "" {
@@ -108,25 +108,14 @@ status : not in report
} }
if tc.report != "" { if tc.report != "" {
pathname := filepath.Join(t.TempDir(), "report") rp = filepath.Join(t.TempDir(), "report")
err := os.WriteFile( if err := os.WriteFile(
pathname, rp,
unsafe.Slice(unsafe.StringData(tc.report), len(tc.report)), unsafe.Slice(unsafe.StringData(tc.report), len(tc.report)),
0400, 0400,
) ); err != nil {
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
r, err = rosa.OpenReport(pathname)
if err != nil {
t.Fatal(err)
}
defer func() {
if err = r.Close(); err != nil {
t.Fatal(err)
}
}()
} }
if tc.status != nil { if tc.status != nil {
@@ -168,7 +157,7 @@ status : not in report
tc.args, tc.args,
&buf, &buf,
cm != nil, cm != nil,
r, rp,
); !reflect.DeepEqual(err, wantErr) { ); !reflect.DeepEqual(err, wantErr) {
t.Fatalf("commandInfo: error = %v, want %v", err, wantErr) t.Fatalf("commandInfo: error = %v, want %v", err, wantErr)
} }

View File

@@ -1,9 +0,0 @@
// Package ui holds the static web UI.
package ui
import "net/http"
// Register arranges for mux to serve the embedded frontend.
func Register(mux *http.ServeMux) {
mux.Handle("GET /", http.FileServer(http.FS(static)))
}

View File

@@ -1,21 +0,0 @@
//go:build frontend
package ui
import (
"embed"
"io/fs"
)
//go:generate tsc
//go:generate cp index.html style.css favicon.ico static
//go:embed static
var _static embed.FS
var static = func() fs.FS {
if f, err := fs.Sub(_static, "static"); err != nil {
panic(err)
} else {
return f
}
}()

View File

@@ -20,7 +20,6 @@ import (
"io" "io"
"log" "log"
"net" "net"
"net/http"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
@@ -42,9 +41,6 @@ import (
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
"hakurei.app/internal/rosa" "hakurei.app/internal/rosa"
"hakurei.app/message" "hakurei.app/message"
"hakurei.app/cmd/mbf/internal/pkgserver"
"hakurei.app/cmd/mbf/internal/pkgserver/ui"
) )
func main() { func main() {
@@ -63,16 +59,17 @@ func main() {
defer stop() defer stop()
var cm cache var cm cache
defer func() { cm.Close() }() defer func() {
cm.Close()
if r := recover(); r != nil {
fmt.Println(r)
log.Fatal("consider scrubbing the on-disk cache")
}
}()
var ( var (
flagQuiet bool flagQuiet bool
flagQEMU bool
flagArch string
flagCheck bool
flagLTO bool
flagCrossOverride int
addr net.UnixAddr addr net.UnixAddr
) )
@@ -80,38 +77,11 @@ func main() {
msg.SwapVerbose(!flagQuiet) msg.SwapVerbose(!flagQuiet)
cm.ctx, cm.msg = ctx, msg cm.ctx, cm.msg = ctx, msg
cm.base = os.ExpandEnv(cm.base) cm.base = os.ExpandEnv(cm.base)
if cm.base == "" {
cm.base = "cache"
}
addr.Net = "unix" addr.Net = "unix"
addr.Name = os.ExpandEnv(addr.Name) addr.Name = os.ExpandEnv(addr.Name)
if addr.Name == "" { if addr.Name == "" {
addr.Name = filepath.Join(cm.base, "daemon") addr.Name = "daemon"
}
var flags int
if !flagCheck {
flags |= rosa.OptSkipCheck
}
if !flagLTO {
flags |= rosa.OptLLVMNoLTO
}
rosa.DropCaches("", flags)
cross := flagArch != "" && flagArch != runtime.GOARCH
if flagQEMU || cross {
cm.qemu = rosa.Std.Load(rosa.QEMU)
}
if cross {
if flagCrossOverride != -1 {
flags = flagCrossOverride
}
rosa.DropCaches(flagArch, flags)
if !rosa.HasStage0() {
return pkg.UnsupportedArchError(flagArch)
}
} }
return nil return nil
@@ -119,30 +89,6 @@ func main() {
&flagQuiet, &flagQuiet,
"q", command.BoolFlag(false), "q", command.BoolFlag(false),
"Do not print cure messages", "Do not print cure messages",
).Flag(
&flagQEMU,
"register", command.BoolFlag(false),
"Enable additional target architectures",
).Flag(
&flagArch,
"arch", command.StringFlag(runtime.GOARCH),
"Target architecture",
).Flag(
&flagLTO,
"lto", command.BoolFlag(false),
"Enable LTO in stage2 and stage3 LLVM toolchains",
).Flag(
&flagCheck,
"check", command.BoolFlag(true),
"Run test suites",
).Flag(
&flagCrossOverride,
"cross-flags", command.IntFlag(-1),
"Override non-native target preset flags",
).Flag(
&cm.verboseInit,
"v", command.BoolFlag(false),
"Do not suppress verbose output from init",
).Flag( ).Flag(
&cm.cures, &cm.cures,
"cures", command.IntFlag(0), "cures", command.IntFlag(0),
@@ -175,17 +121,7 @@ func main() {
c.NewCommand( c.NewCommand(
"checksum", "Compute checksum of data read from standard input", "checksum", "Compute checksum of data read from standard input",
func([]string) error { func([]string) error {
done := make(chan struct{}) go func() { <-ctx.Done(); os.Exit(1) }()
defer close(done)
go func() {
select {
case <-ctx.Done():
os.Exit(1)
case <-done:
return
}
}()
h := sha512.New384() h := sha512.New384()
if _, err := io.Copy(h, os.Stdin); err != nil { if _, err := io.Copy(h, os.Stdin); err != nil {
return err return err
@@ -219,7 +155,6 @@ func main() {
{ {
var ( var (
flagBind string
flagStatus bool flagStatus bool
flagReport string flagReport string
) )
@@ -227,52 +162,8 @@ func main() {
"info", "info",
"Display out-of-band metadata of an artifact", "Display out-of-band metadata of an artifact",
func(args []string) (err error) { func(args []string) (err error) {
const shutdownTimeout = 15 * time.Second return commandInfo(&cm, args, os.Stdout, flagStatus, flagReport)
var r *rosa.Report
if flagReport != "" {
if r, err = rosa.OpenReport(flagReport); err != nil {
return err
}
defer func() {
if closeErr := r.Close(); err == nil {
err = closeErr
}
}()
defer r.HandleAccess(&err)()
}
if flagBind == "" {
return commandInfo(&cm, args, os.Stdout, flagStatus, r)
}
var mux http.ServeMux
ui.Register(&mux)
if err = pkgserver.Register(ctx, &mux, r); err != nil {
return
}
server := http.Server{Addr: flagBind, Handler: &mux}
go func() {
<-ctx.Done()
cc, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer cancel()
if _err := server.Shutdown(cc); _err != nil {
log.Fatal(_err)
}
}()
msg.Verbosef("listening on %q", flagBind)
err = server.ListenAndServe()
if errors.Is(err, http.ErrServerClosed) {
err = nil
}
return
}, },
).Flag(
&flagBind,
"bind", command.StringFlag(""),
"TCP address for the server to listen on",
).Flag( ).Flag(
&flagStatus, &flagStatus,
"status", command.BoolFlag(false), "status", command.BoolFlag(false),
@@ -431,7 +322,7 @@ func main() {
if err = cm.Do(func(cache *pkg.Cache) (err error) { if err = cm.Do(func(cache *pkg.Cache) (err error) {
pathname, _, err = cache.Cure( pathname, _, err = cache.Cure(
(t - 2).Load(rosa.LLVM), (t - 2).Load(rosa.Clang),
) )
return return
}); err != nil { }); err != nil {
@@ -441,7 +332,7 @@ func main() {
if err = cm.Do(func(cache *pkg.Cache) (err error) { if err = cm.Do(func(cache *pkg.Cache) (err error) {
pathname, checksum[0], err = cache.Cure( pathname, checksum[0], err = cache.Cure(
(t - 1).Load(rosa.LLVM), (t - 1).Load(rosa.Clang),
) )
return return
}); err != nil { }); err != nil {
@@ -450,7 +341,7 @@ func main() {
log.Println("stage2:", pathname) log.Println("stage2:", pathname)
if err = cm.Do(func(cache *pkg.Cache) (err error) { if err = cm.Do(func(cache *pkg.Cache) (err error) {
pathname, checksum[1], err = cache.Cure( pathname, checksum[1], err = cache.Cure(
t.Load(rosa.LLVM), t.Load(rosa.Clang),
) )
return return
}); err != nil { }); err != nil {
@@ -506,11 +397,6 @@ func main() {
flagExport string flagExport string
flagRemote bool flagRemote bool
flagNoReply bool flagNoReply bool
flagFaults bool
flagPop bool
flagBoot bool
flagStd bool
) )
c.NewCommand( c.NewCommand(
"cure", "cure",
@@ -524,18 +410,11 @@ func main() {
return fmt.Errorf("unknown artifact %q", args[0]) return fmt.Errorf("unknown artifact %q", args[0])
} }
t := rosa.Std
if flagBoot {
t -= 2
} else if flagStd {
t -= 1
}
switch { switch {
default: default:
var pathname *check.Absolute var pathname *check.Absolute
err := cm.Do(func(cache *pkg.Cache) (err error) { err := cm.Do(func(cache *pkg.Cache) (err error) {
pathname, _, err = cache.Cure(t.Load(p)) pathname, _, err = cache.Cure(rosa.Std.Load(p))
return return
}) })
if err != nil { if err != nil {
@@ -588,7 +467,7 @@ func main() {
return cm.Do(func(cache *pkg.Cache) error { return cm.Do(func(cache *pkg.Cache) error {
return cache.EnterExec( return cache.EnterExec(
ctx, ctx,
t.Load(p), rosa.Std.Load(p),
true, os.Stdin, os.Stdout, os.Stderr, true, os.Stdin, os.Stdout, os.Stderr,
rosa.AbsSystem.Append("bin", "mksh"), rosa.AbsSystem.Append("bin", "mksh"),
"sh", "sh",
@@ -600,7 +479,7 @@ func main() {
if flagNoReply { if flagNoReply {
flags |= remoteNoReply flags |= remoteNoReply
} }
a := t.Load(p) a := rosa.Std.Load(p)
pathname, err := cureRemote(ctx, &addr, a, flags) pathname, err := cureRemote(ctx, &addr, a, flags)
if !flagNoReply && err == nil { if !flagNoReply && err == nil {
log.Println(pathname) log.Println(pathname)
@@ -610,55 +489,12 @@ func main() {
cc, cancel := context.WithDeadline(context.Background(), daemonDeadline()) cc, cancel := context.WithDeadline(context.Background(), daemonDeadline())
defer cancel() defer cancel()
if _err := cancelRemote(cc, &addr, a, false); _err != nil { if _err := cancelRemote(cc, &addr, a); _err != nil {
log.Println(err) log.Println(err)
} }
} }
return err return err
case flagFaults:
var faults []pkg.Fault
if err := cm.Do(func(cache *pkg.Cache) (err error) {
faults, err = cache.ReadFaults(t.Load(p))
return
}); err != nil {
return err
}
for _, fault := range faults {
log.Printf("%s: %s ago", fault.String(), time.Since(fault.Time()))
}
return nil
case flagPop:
var faults []pkg.Fault
if err := cm.Do(func(cache *pkg.Cache) (err error) {
faults, err = cache.ReadFaults(t.Load(p))
return
}); err != nil {
return err
}
if len(faults) == 0 {
return errors.New("no fault entries found")
}
fault := faults[len(faults)-1]
r, err := fault.Open()
if err != nil {
return err
}
if _, err = io.Copy(os.Stdout, r); err != nil {
_ = r.Close()
return err
}
fmt.Println()
if err = r.Close(); err != nil {
return err
}
log.Printf("faulting cure terminated %s ago", time.Since(fault.Time()))
return fault.Destroy()
} }
}, },
).Flag( ).Flag(
@@ -681,52 +517,13 @@ func main() {
&flagNoReply, &flagNoReply,
"no-reply", command.BoolFlag(false), "no-reply", command.BoolFlag(false),
"Do not receive a reply from the daemon", "Do not receive a reply from the daemon",
).Flag(
&flagBoot,
"boot", command.BoolFlag(false),
"Build on the stage0 toolchain",
).Flag(
&flagStd,
"std", command.BoolFlag(false),
"Build on the intermediate toolchain",
).Flag(
&flagFaults,
"faults", command.BoolFlag(false),
"Display fault entries of the specified artifact",
).Flag(
&flagPop,
"pop", command.BoolFlag(false),
"Display and destroy the most recent fault entry",
) )
} }
c.NewCommand(
"clear",
"Remove all fault entries from the cache",
func([]string) error {
return cm.Do(func(*pkg.Cache) error {
pathname := filepath.Join(cm.base, "fault")
dents, err := os.ReadDir(pathname)
if err != nil {
return err
}
for _, dent := range dents {
msg.Verbosef("destroying entry %s", dent.Name())
if err = os.Remove(filepath.Join(pathname, dent.Name())); err != nil {
return err
}
}
log.Printf("destroyed %d fault entries", len(dents))
return nil
})
},
)
c.NewCommand( c.NewCommand(
"abort", "abort",
"Abort all pending cures on the daemon", "Abort all pending cures on the daemon",
func([]string) error { return abortRemote(ctx, &addr, false) }, func([]string) error { return abortRemote(ctx, &addr) },
) )
{ {
@@ -749,7 +546,7 @@ func main() {
presets[i] = p presets[i] = p
} }
base := rosa.LLVM base := rosa.Clang
if !flagWithToolchain { if !flagWithToolchain {
base = rosa.Musl base = rosa.Musl
} }
@@ -821,7 +618,6 @@ func main() {
z.Hostname = "localhost" z.Hostname = "localhost"
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1 z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr z.Stdin, z.Stdout, z.Stderr = os.Stdin, os.Stdout, os.Stderr
z.Quiet = !cm.verboseInit
if s, ok := os.LookupEnv("TERM"); ok { if s, ok := os.LookupEnv("TERM"); ok {
z.Env = append(z.Env, "TERM="+s) z.Env = append(z.Env, "TERM="+s)
} }

View File

@@ -1,47 +0,0 @@
package main
import (
"net"
"os"
"testing"
"hakurei.app/internal/rosa"
)
func TestMain(m *testing.M) {
rosa.DropCaches("", rosa.OptLLVMNoLTO)
os.Exit(m.Run())
}
func TestCureAll(t *testing.T) {
t.Parallel()
const env = "ROSA_TEST_DAEMON"
if !testing.Verbose() {
t.Skip("verbose flag not set")
}
pathname, ok := os.LookupEnv(env)
if !ok {
t.Skip(env + " not set")
}
addr := net.UnixAddr{Net: "unix", Name: pathname}
t.Cleanup(func() {
if t.Failed() {
if err := abortRemote(t.Context(), &addr, false); err != nil {
t.Fatal(err)
}
}
})
for i := range rosa.PresetEnd {
p := rosa.PArtifact(i)
t.Run(rosa.GetMetadata(p).Name, func(t *testing.T) {
_, err := cureRemote(t.Context(), &addr, rosa.Std.Load(p), 0)
if err != nil {
t.Error(err)
}
})
}
}

View File

@@ -1,8 +1,6 @@
// Package pkgserver implements the package metadata service backend. package main
package pkgserver
import ( import (
"context"
"encoding/json" "encoding/json"
"log" "log"
"net/http" "net/http"
@@ -10,7 +8,6 @@ import (
"path" "path"
"strconv" "strconv"
"sync" "sync"
"time"
"hakurei.app/internal/info" "hakurei.app/internal/info"
"hakurei.app/internal/rosa" "hakurei.app/internal/rosa"
@@ -161,29 +158,6 @@ func (index *packageIndex) registerAPI(mux *http.ServeMux) {
mux.HandleFunc("GET /status/", index.newStatusHandler(true)) mux.HandleFunc("GET /status/", index.newStatusHandler(true))
} }
// Register arranges for mux to service API requests.
func Register(ctx context.Context, mux *http.ServeMux, report *rosa.Report) error {
var index packageIndex
index.search = make(searchCache)
if err := index.populate(report); err != nil {
return err
}
ticker := time.NewTicker(1 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
index.search.clean()
}
}
}()
index.registerAPI(mux)
return nil
}
// writeAPIPayload sets headers common to API responses and encodes payload as // writeAPIPayload sets headers common to API responses and encodes payload as
// JSON for the response body. // JSON for the response body.
func writeAPIPayload(w http.ResponseWriter, payload any) { func writeAPIPayload(w http.ResponseWriter, payload any) {

View File

@@ -1,4 +1,4 @@
package pkgserver package main
import ( import (
"net/http" "net/http"
@@ -118,31 +118,33 @@ func TestAPIGet(t *testing.T) {
checkStatus(t, resp, http.StatusOK) checkStatus(t, resp, http.StatusOK)
checkAPIHeader(t, w.Header()) checkAPIHeader(t, w.Header())
checkPayloadFunc(t, resp, func(got *struct { checkPayloadFunc(t, resp, func(got *struct {
Count int `json:"count"`
Values []*metadata `json:"values"` Values []*metadata `json:"values"`
}) bool { }) bool {
return slices.EqualFunc(got.Values, want, func(a, b *metadata) bool { return got.Count == len(want) &&
return (a.Version == b.Version || slices.EqualFunc(got.Values, want, func(a, b *metadata) bool {
a.Version == rosa.Unversioned || return (a.Version == b.Version ||
b.Version == rosa.Unversioned) && a.Version == rosa.Unversioned ||
a.HasReport == b.HasReport && b.Version == rosa.Unversioned) &&
a.Name == b.Name && a.HasReport == b.HasReport &&
a.Description == b.Description && a.Name == b.Name &&
a.Website == b.Website a.Description == b.Description &&
}) a.Website == b.Website
})
}) })
}) })
} }
checkWithSuffix("declarationAscending", "?limit=2&index=1&sort=0", []*metadata{ checkWithSuffix("declarationAscending", "?limit=2&index=0&sort=0", []*metadata{
{
Metadata: rosa.GetMetadata(0),
Version: rosa.Std.Version(0),
},
{ {
Metadata: rosa.GetMetadata(1), Metadata: rosa.GetMetadata(1),
Version: rosa.Std.Version(1), Version: rosa.Std.Version(1),
}, },
{
Metadata: rosa.GetMetadata(2),
Version: rosa.Std.Version(2),
},
}) })
checkWithSuffix("declarationAscending offset", "?limit=3&index=5&sort=0", []*metadata{ checkWithSuffix("declarationAscending offset", "?limit=3&index=5&sort=0", []*metadata{
{ {

View File

@@ -1,4 +1,4 @@
package pkgserver package main
import ( import (
"cmp" "cmp"
@@ -50,7 +50,7 @@ type metadata struct {
} }
// populate deterministically populates packageIndex, optionally with a report. // populate deterministically populates packageIndex, optionally with a report.
func (index *packageIndex) populate(report *rosa.Report) (err error) { func (index *packageIndex) populate(cache *pkg.Cache, report *rosa.Report) (err error) {
if report != nil { if report != nil {
defer report.HandleAccess(&err)() defer report.HandleAccess(&err)()
index.handleAccess = report.HandleAccess index.handleAccess = report.HandleAccess
@@ -58,7 +58,6 @@ func (index *packageIndex) populate(report *rosa.Report) (err error) {
var work [rosa.PresetUnexportedStart]*metadata var work [rosa.PresetUnexportedStart]*metadata
index.names = make(map[string]*metadata) index.names = make(map[string]*metadata)
ir := pkg.NewIR()
for p := range rosa.PresetUnexportedStart { for p := range rosa.PresetUnexportedStart {
m := metadata{ m := metadata{
p: p, p: p,
@@ -73,8 +72,8 @@ func (index *packageIndex) populate(report *rosa.Report) (err error) {
m.Version = "" m.Version = ""
} }
if report != nil { if cache != nil && report != nil {
id := ir.Ident(rosa.Std.Load(p)) id := cache.Ident(rosa.Std.Load(p))
m.ids = pkg.Encode(id.Value()) m.ids = pkg.Encode(id.Value())
m.status, m.Size = report.ArtifactOf(id) m.status, m.Size = report.ArtifactOf(id)
m.HasReport = m.Size >= 0 m.HasReport = m.Size >= 0

115
cmd/pkgserver/main.go Normal file
View File

@@ -0,0 +1,115 @@
package main
import (
"context"
"errors"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"hakurei.app/check"
"hakurei.app/command"
"hakurei.app/internal/pkg"
"hakurei.app/internal/rosa"
"hakurei.app/message"
)
const shutdownTimeout = 15 * time.Second
func main() {
log.SetFlags(0)
log.SetPrefix("pkgserver: ")
var (
flagBaseDir string
flagAddr string
)
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
defer stop()
msg := message.New(log.Default())
c := command.New(os.Stderr, log.Printf, "pkgserver", func(args []string) error {
var (
cache *pkg.Cache
report *rosa.Report
)
switch len(args) {
case 0:
break
case 1:
baseDir, err := check.NewAbs(flagBaseDir)
if err != nil {
return err
}
cache, err = pkg.Open(ctx, msg, 0, 0, 0, baseDir)
if err != nil {
return err
}
defer cache.Close()
report, err = rosa.OpenReport(args[0])
if err != nil {
return err
}
default:
return errors.New("pkgserver requires 1 argument")
}
var index packageIndex
index.search = make(searchCache)
if err := index.populate(cache, report); err != nil {
return err
}
ticker := time.NewTicker(1 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
index.search.clean()
}
}
}()
var mux http.ServeMux
uiRoutes(&mux)
testUIRoutes(&mux)
index.registerAPI(&mux)
server := http.Server{
Addr: flagAddr,
Handler: &mux,
}
go func() {
<-ctx.Done()
c, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer cancel()
if err := server.Shutdown(c); err != nil {
log.Fatal(err)
}
}()
return server.ListenAndServe()
}).Flag(
&flagBaseDir,
"b", command.StringFlag(""),
"base directory for cache",
).Flag(
&flagAddr,
"addr", command.StringFlag(":8067"),
"TCP network address to listen on",
)
c.MustParse(os.Args[1:], func(err error) {
if errors.Is(err, http.ErrServerClosed) {
os.Exit(0)
}
log.Fatal(err)
})
}

View File

@@ -1,4 +1,4 @@
package pkgserver package main
import ( import (
"bytes" "bytes"
@@ -15,7 +15,7 @@ func newIndex(t *testing.T) *packageIndex {
t.Helper() t.Helper()
var index packageIndex var index packageIndex
if err := index.populate(nil); err != nil { if err := index.populate(nil, nil); err != nil {
t.Fatalf("populate: error = %v", err) t.Fatalf("populate: error = %v", err)
} }
return &index return &index

View File

@@ -1,4 +1,4 @@
package pkgserver package main
import ( import (
"cmp" "cmp"

35
cmd/pkgserver/test_ui.go Normal file
View File

@@ -0,0 +1,35 @@
//go:build frontend && frontend_test
package main
import (
"embed"
"io/fs"
"net/http"
)
// Always remove ui_test/ui; if the previous tsc run failed, the rm never
// executes.
//go:generate sh -c "rm -r ui_test/ui/ 2>/dev/null || true"
//go:generate mkdir ui_test/ui
//go:generate sh -c "cp ui/static/*.ts ui_test/ui/"
//go:generate tsc -p ui_test
//go:generate rm -r ui_test/ui/
//go:generate cp ui_test/lib/ui.css ui_test/static/style.css
//go:generate cp ui_test/lib/ui.html ui_test/static/index.html
//go:generate sh -c "cd ui_test/lib && cp *.svg ../static/"
//go:embed ui_test/static
var _staticTest embed.FS
var staticTest = func() fs.FS {
if f, err := fs.Sub(_staticTest, "ui_test/static"); err != nil {
panic(err)
} else {
return f
}
}()
func testUIRoutes(mux *http.ServeMux) {
mux.Handle("GET /test/", http.StripPrefix("/test", http.FileServer(http.FS(staticTest))))
}

View File

@@ -0,0 +1,7 @@
//go:build !(frontend && frontend_test)
package main
import "net/http"
func testUIRoutes(mux *http.ServeMux) {}

33
cmd/pkgserver/ui.go Normal file
View File

@@ -0,0 +1,33 @@
package main
import "net/http"
func serveWebUI(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-XSS-Protection", "1")
w.Header().Set("X-Frame-Options", "DENY")
http.ServeFileFS(w, r, content, "ui/index.html")
}
func serveStaticContent(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/static/style.css":
http.ServeFileFS(w, r, content, "ui/static/style.css")
case "/favicon.ico":
http.ServeFileFS(w, r, content, "ui/static/favicon.ico")
case "/static/index.js":
http.ServeFileFS(w, r, content, "ui/static/index.js")
default:
http.NotFound(w, r)
}
}
func uiRoutes(mux *http.ServeMux) {
mux.HandleFunc("GET /{$}", serveWebUI)
mux.HandleFunc("GET /favicon.ico", serveStaticContent)
mux.HandleFunc("GET /static/", serveStaticContent)
}

View File

@@ -3,9 +3,9 @@
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="style.css"> <link rel="stylesheet" href="static/style.css">
<title>Hakurei PkgServer</title> <title>Hakurei PkgServer</title>
<script src="index.js"></script> <script src="static/index.js"></script>
</head> </head>
<body> <body>
<h1>Hakurei PkgServer</h1> <h1>Hakurei PkgServer</h1>

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

9
cmd/pkgserver/ui_full.go Normal file
View File

@@ -0,0 +1,9 @@
//go:build frontend
package main
import "embed"
//go:generate tsc -p ui
//go:embed ui/*
var content embed.FS

View File

@@ -1,7 +1,7 @@
//go:build !frontend //go:build !frontend
package ui package main
import "testing/fstest" import "testing/fstest"
var static fstest.MapFS var content fstest.MapFS

View File

@@ -0,0 +1,2 @@
// Import all test files to register their test suites.
import "./index_test.js";

View File

@@ -0,0 +1,2 @@
import { suite, test } from "./lib/test.js";
import "./ui/index.js";

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env node
// Many editors have terminal emulators built in, so running tests with NodeJS
// provides faster iteration, especially for those acclimated to test-driven
// development.
import "../all_tests.js";
import { StreamReporter, GLOBAL_REGISTRAR } from "./test.js";
// TypeScript doesn't like process and Deno as their type definitions aren't
// installed, but doesn't seem to complain if they're accessed through
// globalThis.
const process: any = (globalThis as any).process;
const Deno: any = (globalThis as any).Deno;
function getArgs(): string[] {
if (process) {
const [runtime, program, ...args] = process.argv;
return args;
}
if (Deno) return Deno.args;
return [];
}
function exit(code?: number): never {
if (Deno) Deno.exit(code);
if (process) process.exit(code);
throw `exited with code ${code ?? 0}`;
}
const args = getArgs();
let verbose = false;
if (args.length > 1) {
console.error("Too many arguments");
exit(1);
}
if (args.length === 1) {
if (args[0] === "-v" || args[0] === "--verbose" || args[0] === "-verbose") {
verbose = true;
} else if (args[0] !== "--") {
console.error(`Unknown argument '${args[0]}'`);
exit(1);
}
}
let reporter = new StreamReporter({ writeln: console.log }, verbose);
GLOBAL_REGISTRAR.run(reporter);
exit(reporter.succeeded() ? 0 : 1);

View File

@@ -0,0 +1,13 @@
<?xml version="1.0"?>
<!-- See failure-open.svg for an explanation of the view box dimensions. -->
<svg xmlns="http://www.w3.org/2000/svg" width="20" viewBox="-20,-20 160 130">
<!-- This triangle should match success-closed.svg, fill and stroke color notwithstanding. -->
<polygon points="0,0 100,50 0,100" fill="red" stroke="red" stroke-width="15" stroke-linejoin="round"/>
<!--
! y-coordinates go before x-coordinates here to highlight the difference
! (or, lack thereof) between these numbers and the ones in failure-open.svg;
! try a textual diff. Make sure to keep the numbers in sync!
-->
<line y1="30" x1="10" y2="70" x2="50" stroke="white" stroke-width="16"/>
<line y1="30" x1="50" y2="70" x2="10" stroke="white" stroke-width="16"/>
</svg>

After

Width:  |  Height:  |  Size: 788 B

View File

@@ -0,0 +1,35 @@
<?xml version="1.0"?>
<!--
! This view box is a bit weird: the strokes assume they're working in a view
! box that spans from the (0,0) to (100,100), and indeed that is convenient
! conceptualizing the strokes, but the stroke itself has a considerable width
! that gets clipped by restrictive view box dimensions. Hence, the view is
! shifted from (0,0)(100,100) to (-20,-20)(120,120), to make room for the
! clipped stroke, while leaving behind an illusion of working in a view box
! spanning from (0,0) to (100,100).
!
! However, the resulting SVG is too close to the summary text, and CSS
! properties to add padding do not seem to work with `content:` (likely because
! they're anonymous replaced elements); thus, the width of the view is
! increased considerably to provide padding in the SVG itself, while leaving
! the strokes oblivious.
!
! It gets worse: the summary text isn't vertically aligned with the icon! As
! a flexbox cannot be used in a summary to align the marker with the text, the
! simplest and most effective solution is to reduce the height of the view box
! from 140 to 130, thereby removing some of the bottom padding present.
!
! All six SVGs use the same view box (and indeed, they refer to this comment)
! so that they all appear to be the same size and position relative to each
! other on the DOM—indeed, the view box dimensions, alongside the width,
! directly control their placement on the DOM.
!
! TL;DR: CSS is janky, overflow is weird, and SVG is awesome!
-->
<svg xmlns="http://www.w3.org/2000/svg" width="20" viewBox="-20,-20 160 130">
<!-- This triangle should match success-open.svg, fill and stroke color notwithstanding. -->
<polygon points="0,0 100,0 50,100" fill="red" stroke="red" stroke-width="15" stroke-linejoin="round"/>
<!-- See the comment in failure-closed.svg before modifying this. -->
<line x1="30" y1="10" x2="70" y2="50" stroke="white" stroke-width="16"/>
<line x1="30" y1="50" x2="70" y2="10" stroke="white" stroke-width="16"/>
</svg>

View File

@@ -0,0 +1,3 @@
import "../all_tests.js";
import { GoTestReporter, GLOBAL_REGISTRAR } from "./test.js";
GLOBAL_REGISTRAR.run(new GoTestReporter());

View File

@@ -0,0 +1,21 @@
<?xml version="1.0"?>
<!-- See failure-open.svg for an explanation of the view box dimensions. -->
<svg xmlns="http://www.w3.org/2000/svg" width="20" viewBox="-20,-20 160 130">
<!-- This triangle should match success-closed.svg, fill and stroke color notwithstanding. -->
<polygon points="0,0 100,50 0,100" fill="blue" stroke="blue" stroke-width="15" stroke-linejoin="round"/>
<!--
! This path is extremely similar to the one in skip-open.svg; before
! making minor modifications, diff the two to understand how they should
! remain in sync.
-->
<path
d="M 50,50
A 23,23 270,1,1 30,30
l -10,20
m 10,-20
l -20,-10"
fill="none"
stroke="white"
stroke-width="12"
stroke-linejoin="round"/>
</svg>

After

Width:  |  Height:  |  Size: 812 B

View File

@@ -0,0 +1,21 @@
<?xml version="1.0"?>
<!-- See failure-open.svg for an explanation of the view box dimensions. -->
<svg xmlns="http://www.w3.org/2000/svg" width="20" viewBox="-20,-20 160 130">
<!-- This triangle should match success-open.svg, fill and stroke color notwithstanding. -->
<polygon points="0,0 100,0 50,100" fill="blue" stroke="blue" stroke-width="15" stroke-linejoin="round"/>
<!--
! This path is extremely similar to the one in skip-closed.svg; before
! making minor modifications, diff the two to understand how they should
! remain in sync.
-->
<path
d="M 50,50
A 23,23 270,1,1 70,30
l 10,-20
m -10,20
l -20,-10"
fill="none"
stroke="white"
stroke-width="12"
stroke-linejoin="round"/>
</svg>

After

Width:  |  Height:  |  Size: 812 B

View File

@@ -0,0 +1,16 @@
<?xml version="1.0"?>
<!-- See failure-open.svg for an explanation of the view box dimensions. -->
<svg xmlns="http://www.w3.org/2000/svg" width="20" viewBox="-20,-20 160 130">
<style>
.adaptive-stroke {
stroke: black;
}
@media (prefers-color-scheme: dark) {
.adaptive-stroke {
stroke: ghostwhite;
}
}
</style>
<!-- When updating this triangle, also update the other five SVGs. -->
<polygon points="0,0 100,50 0,100" fill="none" class="adaptive-stroke" stroke-width="15" stroke-linejoin="round"/>
</svg>

After

Width:  |  Height:  |  Size: 572 B

View File

@@ -0,0 +1,16 @@
<?xml version="1.0"?>
<!-- See failure-open.svg for an explanation of the view box dimensions. -->
<svg xmlns="http://www.w3.org/2000/svg" width="20" viewBox="-20,-20 160 130">
<style>
.adaptive-stroke {
stroke: black;
}
@media (prefers-color-scheme: dark) {
.adaptive-stroke {
stroke: ghostwhite;
}
}
</style>
<!-- When updating this triangle, also update the other five SVGs. -->
<polygon points="0,0 100,0 50,100" fill="none" class="adaptive-stroke" stroke-width="15" stroke-linejoin="round"/>
</svg>

After

Width:  |  Height:  |  Size: 572 B

View File

@@ -0,0 +1,403 @@
// =============================================================================
// DSL
type TestTree = TestGroup | Test;
type TestGroup = { name: string; children: TestTree[] };
type Test = { name: string; test: (t: TestController) => void };
export class TestRegistrar {
#suites: TestGroup[];
constructor() {
this.#suites = [];
}
suite(name: string, children: TestTree[]) {
checkDuplicates(name, children);
this.#suites.push({ name, children });
}
run(reporter: Reporter) {
reporter.register(this.#suites);
for (const suite of this.#suites) {
for (const c of suite.children) runTests(reporter, [suite.name], c);
}
reporter.finalize();
}
}
export let GLOBAL_REGISTRAR = new TestRegistrar();
// Register a suite in the global registrar.
export function suite(name: string, children: TestTree[]) {
GLOBAL_REGISTRAR.suite(name, children);
}
export function group(name: string, children: TestTree[]): TestTree {
checkDuplicates(name, children);
return { name, children };
}
export const context = group;
export const describe = group;
export function test(name: string, test: (t: TestController) => void): TestTree {
return { name, test };
}
function checkDuplicates(parent: string, names: { name: string }[]) {
let seen = new Set<string>();
for (const { name } of names) {
if (seen.has(name)) {
throw new RangeError(`duplicate name '${name}' in '${parent}'`);
}
seen.add(name);
}
}
export type TestState = "success" | "failure" | "skip";
class AbortSentinel {}
export class TestController {
#state: TestState;
logs: string[];
constructor() {
this.#state = "success";
this.logs = [];
}
getState(): TestState {
return this.#state;
}
fail() {
this.#state = "failure";
}
failed(): boolean {
return this.#state === "failure";
}
failNow(): never {
this.fail();
throw new AbortSentinel();
}
log(message: string) {
this.logs.push(message);
}
error(message: string) {
this.log(message);
this.fail();
}
fatal(message: string): never {
this.log(message);
this.failNow();
}
skip(message?: string): never {
if (message != null) this.log(message);
if (this.#state !== "failure") this.#state = "skip";
throw new AbortSentinel();
}
skipped(): boolean {
return this.#state === "skip";
}
}
// =============================================================================
// Execution
export interface TestResult {
state: TestState;
logs: string[];
}
function runTests(reporter: Reporter, parents: string[], node: TestTree) {
const path = [...parents, node.name];
if ("children" in node) {
for (const c of node.children) runTests(reporter, path, c);
return;
}
let controller = new TestController();
try {
node.test(controller);
} catch (e) {
if (!(e instanceof AbortSentinel)) {
controller.error(extractExceptionString(e));
}
}
reporter.update(path, { state: controller.getState(), logs: controller.logs });
}
function extractExceptionString(e: any): string {
// String() instead of .toString() as null and undefined don't have
// properties.
const s = String(e);
if (!(e instanceof Error && e.stack)) return s;
// v8 (Chromium, NodeJS) includes the error message, while Firefox and
// WebKit do not.
if (e.stack.startsWith(s)) return e.stack;
return `${s}\n${e.stack}`;
}
// =============================================================================
// Reporting
export interface Reporter {
register(suites: TestGroup[]): void;
update(path: string[], result: TestResult): void;
finalize(): void;
}
export class NoOpReporter implements Reporter {
suites: TestGroup[];
results: ({ path: string[] } & TestResult)[];
finalized: boolean;
constructor() {
this.suites = [];
this.results = [];
this.finalized = false;
}
register(suites: TestGroup[]) {
this.suites = suites;
}
update(path: string[], result: TestResult) {
this.results.push({ path, ...result });
}
finalize() {
this.finalized = true;
}
}
export interface Stream {
writeln(s: string): void;
}
const SEP = " ";
export class StreamReporter implements Reporter {
stream: Stream;
verbose: boolean;
#successes: ({ path: string[] } & TestResult)[];
#failures: ({ path: string[] } & TestResult)[];
#skips: ({ path: string[] } & TestResult)[];
constructor(stream: Stream, verbose: boolean = false) {
this.stream = stream;
this.verbose = verbose;
this.#successes = [];
this.#failures = [];
this.#skips = [];
}
succeeded(): boolean {
return this.#successes.length > 0 && this.#failures.length === 0;
}
register(suites: TestGroup[]) {}
update(path: string[], result: TestResult) {
if (path.length === 0) throw new RangeError("path is empty");
const pathStr = path.join(SEP);
switch (result.state) {
case "success":
this.#successes.push({ path, ...result });
if (this.verbose) this.stream.writeln(`✅️ ${pathStr}`);
break;
case "failure":
this.#failures.push({ path, ...result });
this.stream.writeln(`⚠️ ${pathStr}`);
break;
case "skip":
this.#skips.push({ path, ...result });
this.stream.writeln(`⏭️ ${pathStr}`);
break;
}
}
finalize() {
if (this.verbose) this.#displaySection("successes", this.#successes, true);
this.#displaySection("failures", this.#failures);
this.#displaySection("skips", this.#skips);
this.stream.writeln("");
this.stream.writeln(
`${this.#successes.length} succeeded, ${this.#failures.length} failed` +
(this.#skips.length ? `, ${this.#skips.length} skipped` : ""),
);
}
#displaySection(name: string, data: ({ path: string[] } & TestResult)[], ignoreEmpty: boolean = false) {
if (!data.length) return;
// Transform [{ path: ["a", "b", "c"] }, { path: ["a", "b", "d"] }]
// into { "a b": ["c", "d"] }.
let pathMap = new Map<string, ({ name: string } & TestResult)[]>();
for (const t of data) {
if (t.path.length === 0) throw new RangeError("path is empty");
const key = t.path.slice(0, -1).join(SEP);
if (!pathMap.has(key)) pathMap.set(key, []);
pathMap.get(key)!.push({ name: t.path.at(-1)!, ...t });
}
this.stream.writeln("");
this.stream.writeln(name.toUpperCase());
this.stream.writeln("=".repeat(name.length));
for (let [path, tests] of pathMap) {
if (ignoreEmpty) tests = tests.filter((t) => t.logs.length);
if (tests.length === 0) continue;
if (tests.length === 1) {
this.#writeOutput(tests[0], path ? `${path}${SEP}` : "", false);
} else {
this.stream.writeln(path);
for (const t of tests) this.#writeOutput(t, " - ", true);
}
}
}
#writeOutput(test: { name: string } & TestResult, prefix: string, nested: boolean) {
let output = "";
if (test.logs.length) {
// Individual logs might span multiple lines, so join them together
// then split it again.
const logStr = test.logs.join("\n");
const lines = logStr.split("\n");
if (lines.length <= 1) {
output = `: ${logStr}`;
} else {
const padding = nested ? " " : " ";
output = ":\n" + lines.map((line) => padding + line).join("\n");
}
}
this.stream.writeln(`${prefix}${test.name}${output}`);
}
}
function assertGetElementById(id: string): HTMLElement {
let elem = document.getElementById(id);
if (elem == null) throw new ReferenceError(`element with ID '${id}' missing from DOM`);
return elem;
}
export class DOMReporter implements Reporter {
register(suites: TestGroup[]) {}
update(path: string[], result: TestResult) {
if (path.length === 0) throw new RangeError("path is empty");
if (result.state === "skip") {
assertGetElementById("skip-counter-text").hidden = false;
}
const counter = assertGetElementById(`${result.state}-counter`);
counter.innerText = (Number(counter.innerText) + 1).toString();
let parent = assertGetElementById("root");
for (const node of path) {
let child: HTMLDetailsElement | null = null;
let summary: HTMLElement | null = null;
let d: Element;
outer: for (d of parent.children) {
if (!(d instanceof HTMLDetailsElement)) continue;
for (const s of d.children) {
if (!(s instanceof HTMLElement)) continue;
if (!(s.tagName === "SUMMARY" && s.innerText === node)) continue;
child = d;
summary = s;
break outer;
}
}
if (!child) {
child = document.createElement("details");
child.className = "test-node";
child.ariaRoleDescription = "test";
summary = document.createElement("summary");
summary.appendChild(document.createTextNode(node));
summary.ariaRoleDescription = "test name";
child.appendChild(summary);
parent.appendChild(child);
}
if (!summary) throw new Error("unreachable as assigned above");
switch (result.state) {
case "failure":
child.open = true;
child.classList.add("failure");
child.classList.remove("skip");
child.classList.remove("success");
// The summary marker does not appear in the AOM, so setting its
// alt text is fruitless; label the summary itself instead.
summary.setAttribute("aria-labelledby", "failure-description");
break;
case "skip":
if (child.classList.contains("failure")) break;
child.classList.add("skip");
child.classList.remove("success");
summary.setAttribute("aria-labelledby", "skip-description");
break;
case "success":
if (child.classList.contains("failure") || child.classList.contains("skip")) break;
child.classList.add("success");
summary.setAttribute("aria-labelledby", "success-description");
break;
}
parent = child;
}
const p = document.createElement("p");
p.classList.add("test-desc");
if (result.logs.length) {
const pre = document.createElement("pre");
pre.appendChild(document.createTextNode(result.logs.join("\n")));
p.appendChild(pre);
} else {
p.classList.add("italic");
p.appendChild(document.createTextNode("No output."));
}
parent.appendChild(p);
}
finalize() {}
}
interface GoNode {
name: string;
subtests?: GoNode[];
}
// Used to display results via `go test`, via some glue code from the Go side.
export class GoTestReporter implements Reporter {
register(suites: TestGroup[]) {
console.log(JSON.stringify(suites.map(GoTestReporter.serialize)));
}
// Convert a test tree into the one expected by the Go code.
static serialize(node: TestTree): GoNode {
return {
name: node.name,
subtests: "children" in node ? node.children.map(GoTestReporter.serialize) : undefined,
};
}
update(path: string[], result: TestResult) {
let state: number;
switch (result.state) {
case "success": state = 0; break;
case "failure": state = 1; break;
case "skip": state = 2; break;
}
console.log(JSON.stringify({ path, state, logs: result.logs }));
}
finalize() {
console.log("null");
}
}

View File

@@ -0,0 +1,87 @@
/*
* When updating the theme colors, also update them in success-closed.svg and
* success-open.svg!
*/
:root {
--bg: #d3d3d3;
--fg: black;
}
@media (prefers-color-scheme: dark) {
:root {
--bg: #2c2c2c;
--fg: ghostwhite;
}
}
html {
background-color: var(--bg);
color: var(--fg);
}
h1, p, summary, noscript {
font-family: sans-serif;
}
noscript {
font-size: 16pt;
}
.root {
margin: 1rem 0;
}
details.test-node {
margin-left: 1rem;
padding: 0.2rem 0.5rem;
border-left: 2px dashed var(--fg);
> summary {
cursor: pointer;
}
&.success > summary::marker {
/*
* WebKit only supports color and font-size properties in ::marker [1], and
* its ::-webkit-details-marker only supports hiding the marker entirely
* [2], contrary to mdn's example [3]; thus, set a color as a fallback:
* while it may not be accessible for colorblind individuals, it's better
* than no indication of a test's state for anyone, as that there's no other
* way to include an indication in the marker on WebKit.
*
* [1]: https://developer.mozilla.org/en-US/docs/Web/CSS/Reference/Selectors/::marker#browser_compatibility
* [2]: https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/summary#default_style
* [3]: https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/summary#changing_the_summarys_icon
*/
color: var(--fg);
content: url("/test/success-closed.svg") / "success";
}
&.success[open] > summary::marker {
content: url("/test/success-open.svg") / "success";
}
&.failure > summary::marker {
color: red;
content: url("/test/failure-closed.svg") / "failure";
}
&.failure[open] > summary::marker {
content: url("/test/failure-open.svg") / "failure";
}
&.skip > summary::marker {
color: blue;
content: url("/test/skip-closed.svg") / "skip";
}
&.skip[open] > summary::marker {
content: url("/test/skip-open.svg") / "skip";
}
}
p.test-desc {
margin: 0 0 0 1rem;
padding: 2px 0;
> pre {
margin: 0;
}
}
.italic {
font-style: italic;
}

View File

@@ -0,0 +1,39 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="/test/style.css">
<title>PkgServer Tests</title>
</head>
<body>
<noscript>
I hate JavaScript as much as you, but this page runs tests written in
JavaScript to test the functionality of code written in JavaScript, so it
wouldn't make sense for it to work without JavaScript. <strong>Please turn
JavaScript on!</strong>
</noscript>
<h1>PkgServer Tests</h1>
<main>
<p id="counters">
<span id="success-counter">0</span> succeeded, <span id="failure-counter">0</span>
failed<span id="skip-counter-text" hidden>, <span id="skip-counter">0</span> skipped</span>.
</p>
<p hidden id="success-description">Successful test</p>
<p hidden id="failure-description">Failed test</p>
<p hidden id="skip-description">Partially or fully skipped test</p>
<div id="root">
</div>
<script type="module">
import "/test/all_tests.js";
import { DOMReporter, GLOBAL_REGISTRAR } from "/test/lib/test.js";
GLOBAL_REGISTRAR.run(new DOMReporter());
</script>
</main>
</body>
</html>

View File

@@ -0,0 +1,8 @@
{
"compilerOptions": {
"target": "ES2024",
"strict": true,
"alwaysStrict": true,
"outDir": "static"
}
}

View File

@@ -20,14 +20,11 @@
}; };
virtualisation = { virtualisation = {
# Hopefully reduces spurious test failures:
memorySize = if pkgs.stdenv.hostPlatform.is32bit then 2046 else 8192;
diskSize = 6 * 1024; diskSize = 6 * 1024;
qemu.options = [ qemu.options = [
# Increase test performance: # Increase test performance:
"-smp 16" "-smp 8"
]; ];
}; };

View File

@@ -28,7 +28,7 @@ testers.nixosTest {
# For go tests: # For go tests:
(pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" '' (pkgs.writeShellScriptBin "sharefs-workload-hakurei-tests" ''
cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei" cp -r "${self.packages.${system}.hakurei.src}" "/sdcard/hakurei" && cd "/sdcard/hakurei"
${fhs}/bin/hakurei-fhs -c 'ROSA_SKIP_BINFMT=1 CC="clang -O3 -Werror" go test ./...' ${fhs}/bin/hakurei-fhs -c 'CC="clang -O3 -Werror" go test ./...'
'') '')
]; ];

View File

@@ -1,46 +0,0 @@
package container
import (
"strings"
"unsafe"
"hakurei.app/check"
)
// escapeBinfmt escapes magic/mask sequences in a [BinfmtEntry].
func escapeBinfmt(buf *strings.Builder, s string) string {
const lowerhex = "0123456789abcdef"
buf.Reset()
for _, c := range unsafe.Slice(unsafe.StringData(s), len(s)) {
switch c {
case 0, '\\', ':':
buf.WriteString(`\x`)
buf.WriteByte(lowerhex[c>>4])
buf.WriteByte(lowerhex[c&0xf])
default:
buf.WriteByte(c)
}
}
return buf.String()
}
// BinfmtEntry is an entry to be registered by the init process.
type BinfmtEntry struct {
// The offset of the magic/mask in the file, counted in bytes.
Offset byte
// The byte sequence binfmt_misc is matching for.
Magic string
// An (optional, defaults to all 0xff) mask.
Mask string
// The program that should be invoked with the binary as first argument.
Interpreter *check.Absolute
}
// Valid returns whether e can be registered into the kernel.
func (e *BinfmtEntry) Valid() bool {
return e != nil &&
int(e.Offset)+max(len(e.Magic), len(e.Mask)) < 128 &&
e.Interpreter != nil && len(e.Interpreter.String()) < 128
}

View File

@@ -1,62 +0,0 @@
package container
import (
"strings"
"testing"
"hakurei.app/fhs"
)
func TestEscapeBinfmt(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
magic string
want string
}{
{"packed DOS applications", "\x0eDEX", "\x0eDEX"},
{"riscv64 magic",
"\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00",
"\x7fELF\x02\x01\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\x02\\x00\xf3\\x00"},
{"riscv64 mask",
"\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff",
"\xff\xff\xff\xff\xff\xff\xff\\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := escapeBinfmt(new(strings.Builder), tc.magic)
if got != tc.want {
t.Errorf("escapeBinfmt: %q, want %q", got, tc.want)
}
})
}
}
func TestBinfmtEntry(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
e BinfmtEntry
valid bool
}{
{"zero", BinfmtEntry{}, false},
{"large offset", BinfmtEntry{Offset: 128}, false},
{"long magic", BinfmtEntry{Magic: strings.Repeat("\x00", 128)}, false},
{"long mask", BinfmtEntry{Mask: strings.Repeat("\x00", 128)}, false},
{"valid", BinfmtEntry{Interpreter: fhs.AbsRoot}, true},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if tc.e.Valid() != tc.valid {
t.Errorf("Valid: %v", !tc.valid)
}
})
}
}

View File

@@ -18,7 +18,6 @@ const (
CAP_SETPCAP = 0x8 CAP_SETPCAP = 0x8
CAP_NET_ADMIN = 0xc CAP_NET_ADMIN = 0xc
CAP_DAC_OVERRIDE = 0x1 CAP_DAC_OVERRIDE = 0x1
CAP_SETFCAP = 0x1f
) )
type ( type (

View File

@@ -67,9 +67,6 @@ type (
// Copied to the underlying [exec.Cmd]. // Copied to the underlying [exec.Cmd].
WaitDelay time.Duration WaitDelay time.Duration
// Suppress verbose output of init.
Quiet bool
cmd *exec.Cmd cmd *exec.Cmd
ctx context.Context ctx context.Context
msg message.Msg msg message.Msg
@@ -91,20 +88,12 @@ type (
// Time to wait for processes lingering after the initial process terminates. // Time to wait for processes lingering after the initial process terminates.
AdoptWaitDelay time.Duration AdoptWaitDelay time.Duration
// Map uid/gid 0 in the init process. Requires [FstypeProc] attached to
// [fhs.Proc] in the container filesystem.
InitAsRoot bool
// Mapped Uid in user namespace. // Mapped Uid in user namespace.
Uid int Uid int
// Mapped Gid in user namespace. // Mapped Gid in user namespace.
Gid int Gid int
// Hostname value in UTS namespace. // Hostname value in UTS namespace.
Hostname string Hostname string
// Register binfmt_misc entries.
Binfmt []BinfmtEntry
// Alternative pathname to attach binfmt_misc filesystem. The zero value
// requires [FstypeProc] to be made available at [fhs.Proc].
BinfmtPath *check.Absolute
// Sequential container setup ops. // Sequential container setup ops.
*Ops *Ops
@@ -224,9 +213,6 @@ func (p *Container) Start() error {
if p.cmd.Process != nil { if p.cmd.Process != nil {
return errors.New("container: already started") return errors.New("container: already started")
} }
if !p.InitAsRoot && len(p.Binfmt) > 0 {
return errors.New("container: init as root required, but not enabled")
}
if err := ensureCloseOnExec(); err != nil { if err := ensureCloseOnExec(); err != nil {
return err return err
@@ -297,18 +283,6 @@ func (p *Container) Start() error {
if !p.HostNet { if !p.HostNet {
p.cmd.SysProcAttr.Cloneflags |= CLONE_NEWNET p.cmd.SysProcAttr.Cloneflags |= CLONE_NEWNET
} }
if p.InitAsRoot {
p.cmd.SysProcAttr.AmbientCaps = append(p.cmd.SysProcAttr.AmbientCaps,
// mappings during init as root
CAP_SETFCAP,
)
if !p.SeccompDisable &&
len(p.SeccompRules) == 0 &&
p.SeccompPresets&std.PresetDenyNS != 0 {
return errors.New("container: as root requires late namespace creation")
}
}
// place setup pipe before user supplied extra files, this is later restored by init // place setup pipe before user supplied extra files, this is later restored by init
if r, w, err := os.Pipe(); err != nil { if r, w, err := os.Pipe(); err != nil {
@@ -368,6 +342,8 @@ func (p *Container) Start() error {
Err: ENOSYS, Err: ENOSYS,
Origin: true, Origin: true,
} }
} else {
p.msg.Verbosef("landlock abi version %d", abi)
} }
if rulesetFd, err := rulesetAttr.Create(0); err != nil { if rulesetFd, err := rulesetAttr.Create(0); err != nil {
@@ -377,6 +353,7 @@ func (p *Container) Start() error {
Err: err, Err: err,
} }
} else { } else {
p.msg.Verbosef("enforcing landlock ruleset %s", rulesetAttr)
if err = landlock.RestrictSelf(rulesetFd, 0); err != nil { if err = landlock.RestrictSelf(rulesetFd, 0); err != nil {
_ = Close(rulesetFd) _ = Close(rulesetFd)
return &StartError{ return &StartError{
@@ -433,6 +410,7 @@ func (p *Container) Start() error {
} }
} }
p.msg.Verbose("starting container init")
if err := p.cmd.Start(); err != nil { if err := p.cmd.Start(); err != nil {
return &StartError{ return &StartError{
Step: "start container init", Step: "start container init",
@@ -503,6 +481,7 @@ func (p *Container) Serve() (err error) {
} }
case <-done: case <-done:
p.msg.Verbose("setup payload took", time.Since(t))
return return
} }
}(p.setup[1]) }(p.setup[1])
@@ -512,7 +491,7 @@ func (p *Container) Serve() (err error) {
Getuid(), Getuid(),
Getgid(), Getgid(),
len(p.ExtraFiles), len(p.ExtraFiles),
p.msg.IsVerbose() && !p.Quiet, p.msg.IsVerbose(),
}) })
} }

View File

@@ -16,8 +16,6 @@ import (
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
"time"
"unsafe"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/command" "hakurei.app/command"
@@ -235,9 +233,6 @@ func earlyMnt(mnt ...*vfs.MountInfoEntry) func(*testing.T, context.Context) []*v
return func(*testing.T, context.Context) []*vfs.MountInfoEntry { return mnt } return func(*testing.T, context.Context) []*vfs.MountInfoEntry { return mnt }
} }
//go:linkname toHost hakurei.app/container.toHost
func toHost(name string) string
var containerTestCases = []struct { var containerTestCases = []struct {
name string name string
filter bool filter bool
@@ -337,15 +332,13 @@ var containerTestCases = []struct {
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry { func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
return []*vfs.MountInfoEntry{ return []*vfs.MountInfoEntry{
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay", ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
"rw"+ "rw,lowerdir="+
",lowerdir+="+ container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+
toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+ container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",lowerdir+="+
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",upperdir="+ ",upperdir="+
toHost(ctx.Value(testVal("upper")).(*check.Absolute).String())+ container.InternalToHostOvlEscape(ctx.Value(testVal("upper")).(*check.Absolute).String())+
",workdir="+ ",workdir="+
toHost(ctx.Value(testVal("work")).(*check.Absolute).String())+ container.InternalToHostOvlEscape(ctx.Value(testVal("work")).(*check.Absolute).String())+
",redirect_dir=nofollow,uuid=on,userxattr"), ",redirect_dir=nofollow,uuid=on,userxattr"),
} }
}, },
@@ -395,11 +388,9 @@ var containerTestCases = []struct {
func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry { func(t *testing.T, ctx context.Context) []*vfs.MountInfoEntry {
return []*vfs.MountInfoEntry{ return []*vfs.MountInfoEntry{
ent("/", hst.PrivateTmp, "rw", "overlay", "overlay", ent("/", hst.PrivateTmp, "rw", "overlay", "overlay",
"ro"+ "ro,lowerdir="+
",lowerdir+="+ container.InternalToHostOvlEscape(ctx.Value(testVal("lower0")).(*check.Absolute).String())+":"+
toHost(ctx.Value(testVal("lower0")).(*check.Absolute).String())+ container.InternalToHostOvlEscape(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",lowerdir+="+
toHost(ctx.Value(testVal("lower1")).(*check.Absolute).String())+
",redirect_dir=nofollow,userxattr"), ",redirect_dir=nofollow,userxattr"),
} }
}, },
@@ -409,11 +400,39 @@ var containerTestCases = []struct {
func TestContainer(t *testing.T) { func TestContainer(t *testing.T) {
t.Parallel() t.Parallel()
var suffix string t.Run("cancel", testContainerCancel(nil, func(t *testing.T, c *container.Container) {
runTests: wantErr := context.Canceled
wantExitCode := 0
if err := c.Wait(); !reflect.DeepEqual(err, wantErr) {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
}
t.Errorf("Wait: error = %#v, want %#v", err, wantErr)
}
if ps := c.ProcessState(); ps == nil {
t.Errorf("ProcessState unexpectedly returned nil")
} else if code := ps.ExitCode(); code != wantExitCode {
t.Errorf("ExitCode: %d, want %d", code, wantExitCode)
}
}))
t.Run("forward", testContainerCancel(func(c *container.Container) {
c.ForwardCancel = true
}, func(t *testing.T, c *container.Container) {
var exitError *exec.ExitError
if err := c.Wait(); !errors.As(err, &exitError) {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
}
t.Errorf("Wait: error = %v", err)
}
if code := exitError.ExitCode(); code != blockExitCodeInterrupt {
t.Errorf("ExitCode: %d, want %d", code, blockExitCodeInterrupt)
}
}))
for i, tc := range containerTestCases { for i, tc := range containerTestCases {
_suffix := suffix t.Run(tc.name, func(t *testing.T) {
t.Run(tc.name+_suffix, func(t *testing.T) {
t.Parallel() t.Parallel()
wantOps, wantOpsCtx := tc.ops(t) wantOps, wantOpsCtx := tc.ops(t)
@@ -437,8 +456,6 @@ runTests:
c.SeccompDisable = !tc.filter c.SeccompDisable = !tc.filter
c.RetainSession = tc.session c.RetainSession = tc.session
c.HostNet = tc.net c.HostNet = tc.net
c.InitAsRoot = _suffix != ""
c.Env = append(c.Env, "HAKUREI_TEST_SUFFIX="+_suffix)
if info.CanDegrade { if info.CanDegrade {
if _, err := landlock.GetABI(); err != nil { if _, err := landlock.GetABI(); err != nil {
if !errors.Is(err, syscall.ENOSYS) { if !errors.Is(err, syscall.ENOSYS) {
@@ -448,9 +465,6 @@ runTests:
t.Log("Landlock LSM is unavailable, enabling HostAbstract") t.Log("Landlock LSM is unavailable, enabling HostAbstract")
} }
} }
if c.InitAsRoot {
c.SeccompPresets &= ^std.PresetDenyNS
}
c. c.
Readonly(check.MustAbs(pathReadonly), 0755). Readonly(check.MustAbs(pathReadonly), 0755).
@@ -519,11 +533,6 @@ runTests:
} }
}) })
} }
if suffix == "" {
suffix = " as root"
goto runTests
}
} }
func ent(root, target, vfsOptstr, fsType, source, fsOptstr string) *vfs.MountInfoEntry { func ent(root, target, vfsOptstr, fsType, source, fsOptstr string) *vfs.MountInfoEntry {
@@ -546,118 +555,49 @@ func hostnameFromTestCase(name string) string {
} }
func testContainerCancel( func testContainerCancel(
t *testing.T,
containerExtra func(c *container.Container), containerExtra func(c *container.Container),
waitCheck func(ps *os.ProcessState, waitErr error), waitCheck func(t *testing.T, c *container.Container),
) { ) func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context()) return func(t *testing.T) {
c := helperNewContainer(ctx, "block")
c.Stdout, c.Stderr = os.Stdout, os.Stderr
if containerExtra != nil {
containerExtra(c)
}
ready := make(chan struct{})
var waitErr error
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("cannot pipe: %v", err)
}
c.ExtraFiles = append(c.ExtraFiles, w)
go func() {
defer close(ready)
if _, _err := r.Read(make([]byte, 1)); _err != nil {
panic(_err)
}
}()
if err = c.Start(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Fatal(m)
} else {
t.Fatalf("cannot start container: %v", err)
}
}
done := make(chan struct{})
go func() {
defer close(done)
waitErr = c.Wait()
_ = r.SetReadDeadline(time.Now())
}()
if err = c.Serve(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
} else {
t.Errorf("cannot serve setup params: %v", err)
}
}
<-ready
cancel()
<-done
waitCheck(c.ProcessState(), waitErr)
}
func TestForward(t *testing.T) {
t.Parallel()
f := func(ps *os.ProcessState, waitErr error) {
var exitError *exec.ExitError
if !errors.As(waitErr, &exitError) {
if m, ok := container.InternalMessageFromError(waitErr); ok {
t.Error(m)
}
t.Errorf("Wait: error = %v", waitErr)
}
if code := exitError.ExitCode(); code != blockExitCodeInterrupt {
t.Errorf("ExitCode: %d, want %d", code, blockExitCodeInterrupt)
}
}
t.Run("direct", func(t *testing.T) {
t.Parallel() t.Parallel()
testContainerCancel(t, func(c *container.Container) { ctx, cancel := context.WithCancel(t.Context())
c.ForwardCancel = true
}, f)
})
t.Run("as root", func(t *testing.T) {
testContainerCancel(t, func(c *container.Container) {
c.ForwardCancel = true
c.InitAsRoot = true
c.Proc(fhs.AbsProc)
}, f)
})
}
func TestCancel(t *testing.T) { c := helperNewContainer(ctx, "block")
t.Parallel() c.Stdout, c.Stderr = os.Stdout, os.Stderr
if containerExtra != nil {
containerExtra(c)
}
f := func(ps *os.ProcessState, waitErr error) { ready := make(chan struct{})
wantErr := context.Canceled if r, w, err := os.Pipe(); err != nil {
if !reflect.DeepEqual(waitErr, wantErr) { t.Fatalf("cannot pipe: %v", err)
if m, ok := container.InternalMessageFromError(waitErr); ok { } else {
t.Error(m) c.ExtraFiles = append(c.ExtraFiles, w)
go func() {
defer close(ready)
if _, err = r.Read(make([]byte, 1)); err != nil {
panic(err.Error())
}
}()
}
if err := c.Start(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Fatal(m)
} else {
t.Fatalf("cannot start container: %v", err)
}
} else if err = c.Serve(); err != nil {
if m, ok := container.InternalMessageFromError(err); ok {
t.Error(m)
} else {
t.Errorf("cannot serve setup params: %v", err)
} }
t.Errorf("Wait: error = %#v, want %#v", waitErr, wantErr)
}
if ps == nil {
t.Errorf("ProcessState unexpectedly returned nil")
} else if code := ps.ExitCode(); code != 0 {
t.Errorf("ExitCode: %d, want %d", code, 0)
} }
<-ready
cancel()
waitCheck(t, c)
} }
t.Run("direct", func(t *testing.T) {
t.Parallel()
testContainerCancel(t, nil, f)
})
t.Run("as root", func(t *testing.T) {
testContainerCancel(t, func(c *container.Container) {
c.InitAsRoot = true
c.Proc(fhs.AbsProc)
}, f)
})
} }
func TestContainerString(t *testing.T) { func TestContainerString(t *testing.T) {
@@ -693,8 +633,6 @@ func init() {
}) })
c.Command("container", command.UsageInternal, func(args []string) error { c.Command("container", command.UsageInternal, func(args []string) error {
asRoot := os.Getenv("HAKUREI_TEST_SUFFIX") == " as root"
if len(args) != 1 { if len(args) != 1 {
return syscall.EINVAL return syscall.EINVAL
} }
@@ -712,66 +650,6 @@ func init() {
return fmt.Errorf("gid: %d, want %d", gid, tc.gid) return fmt.Errorf("gid: %d, want %d", gid, tc.gid)
} }
// no attack surface increase during as root due to no_new_privs
var wantBounding uintptr = 1
asRootNot := " not"
if !asRoot {
wantBounding = 0
asRootNot = ""
}
const (
PR_CAP_AMBIENT = 0x2f
PR_CAP_AMBIENT_IS_SET = 0x1
)
for i := range container.LastCap(nil) + 1 {
r, _, errno := syscall.Syscall(
syscall.SYS_PRCTL,
PR_CAP_AMBIENT,
PR_CAP_AMBIENT_IS_SET,
i,
)
if errno != 0 {
return os.NewSyscallError("prctl", errno)
}
if r != 0 {
return fmt.Errorf("capability %d in ambient set", i)
}
r, _, errno = syscall.Syscall(
syscall.SYS_PRCTL,
syscall.PR_CAPBSET_READ,
i,
0,
)
if errno != 0 {
return os.NewSyscallError("prctl", errno)
}
if r != wantBounding {
return fmt.Errorf("capability %d%s in bounding set", i, asRootNot)
}
}
const _LINUX_CAPABILITY_VERSION_3 = 0x20080522
var capData struct {
effective uint32
permitted uint32
inheritable uint32
}
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&struct {
version uint32
pid int32
}{_LINUX_CAPABILITY_VERSION_3, 0})), uintptr(unsafe.Pointer(&capData)), 0); errno != 0 {
return os.NewSyscallError("capget", errno)
}
if max(capData.effective, capData.permitted, capData.inheritable) != 0 {
return fmt.Errorf(
"effective = %d, permitted = %d, inheritable = %d",
capData.effective, capData.permitted, capData.inheritable,
)
}
wantHost := hostnameFromTestCase(tc.name) wantHost := hostnameFromTestCase(tc.name)
if host, err := os.Hostname(); err != nil { if host, err := os.Hostname(); err != nil {
return fmt.Errorf("cannot get hostname: %v", err) return fmt.Errorf("cannot get hostname: %v", err)
@@ -889,7 +767,7 @@ func TestMain(m *testing.M) {
} }
c.MustParse(os.Args[1:], func(err error) { c.MustParse(os.Args[1:], func(err error) {
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err.Error())
} }
}) })
return return

View File

@@ -65,8 +65,6 @@ type syscallDispatcher interface {
remount(msg message.Msg, target string, flags uintptr) error remount(msg message.Msg, target string, flags uintptr) error
// mountTmpfs provides mountTmpfs. // mountTmpfs provides mountTmpfs.
mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error
// mountOverlay provides mountOverlay.
mountOverlay(target string, options [][2]string) error
// ensureFile provides ensureFile. // ensureFile provides ensureFile.
ensureFile(name string, perm, pperm os.FileMode) error ensureFile(name string, perm, pperm os.FileMode) error
// mustLoopback provides mustLoopback. // mustLoopback provides mustLoopback.
@@ -171,9 +169,6 @@ func (direct) remount(msg message.Msg, target string, flags uintptr) error {
func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error { func (k direct) mountTmpfs(fsname, target string, flags uintptr, size int, perm os.FileMode) error {
return mountTmpfs(k, fsname, target, flags, size, perm) return mountTmpfs(k, fsname, target, flags, size, perm)
} }
func (k direct) mountOverlay(target string, options [][2]string) error {
return mountOverlay(target, options)
}
func (direct) ensureFile(name string, perm, pperm os.FileMode) error { func (direct) ensureFile(name string, perm, pperm os.FileMode) error {
return ensureFile(name, perm, pperm) return ensureFile(name, perm, pperm)
} }

View File

@@ -468,14 +468,6 @@ func (k *kstub) mountTmpfs(fsname, target string, flags uintptr, size int, perm
stub.CheckArg(k.Stub, "perm", perm, 4)) stub.CheckArg(k.Stub, "perm", perm, 4))
} }
func (k *kstub) mountOverlay(target string, options [][2]string) error {
k.Helper()
return k.Expects("mountOverlay").Error(
stub.CheckArg(k.Stub, "target", target, 0),
stub.CheckArgReflect(k.Stub, "options", options, 1),
)
}
func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error { func (k *kstub) ensureFile(name string, perm, pperm os.FileMode) error {
k.Helper() k.Helper()
return k.Expects("ensureFile").Error( return k.Expects("ensureFile").Error(

View File

@@ -118,10 +118,6 @@ func errnoFallback(op, path string, err error) (syscall.Errno, *os.PathError) {
// mount wraps syscall.Mount for error handling. // mount wraps syscall.Mount for error handling.
func mount(source, target, fstype string, flags uintptr, data string) error { func mount(source, target, fstype string, flags uintptr, data string) error {
if max(len(source), len(target), len(data))+1 > os.Getpagesize() {
return &MountError{source, target, fstype, flags, data, syscall.ENOMEM}
}
err := syscall.Mount(source, target, fstype, flags, data) err := syscall.Mount(source, target, fstype, flags, data)
if err == nil { if err == nil {
return nil return nil

View File

@@ -11,13 +11,11 @@ import (
"path/filepath" "path/filepath"
"slices" "slices"
"strconv" "strconv"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
. "syscall" . "syscall"
"time" "time"
"hakurei.app/check"
"hakurei.app/container/seccomp" "hakurei.app/container/seccomp"
"hakurei.app/ext" "hakurei.app/ext"
"hakurei.app/fhs" "hakurei.app/fhs"
@@ -184,33 +182,23 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
cancel() cancel()
} }
uid, gid := param.Uid, param.Gid
if param.InitAsRoot {
uid, gid = 0, 0
}
// write uid/gid map here so parent does not need to set dumpable // write uid/gid map here so parent does not need to set dumpable
if err := k.setDumpable(ext.SUID_DUMP_USER); err != nil { if err := k.setDumpable(ext.SUID_DUMP_USER); err != nil {
k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err) k.fatalf(msg, "cannot set SUID_DUMP_USER: %v", err)
} }
if err := k.writeFile( if err := k.writeFile(fhs.Proc+"self/uid_map",
fhs.Proc+"self/uid_map", append([]byte{}, strconv.Itoa(param.Uid)+" "+strconv.Itoa(param.HostUid)+" 1\n"...),
[]byte(strconv.Itoa(uid)+" "+strconv.Itoa(param.HostUid)+" 1\n"), 0); err != nil {
0,
); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
if err := k.writeFile( if err := k.writeFile(fhs.Proc+"self/setgroups",
fhs.Proc+"self/setgroups",
[]byte("deny\n"), []byte("deny\n"),
0, 0); err != nil && !os.IsNotExist(err) {
); err != nil && !os.IsNotExist(err) {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
if err := k.writeFile(fhs.Proc+"self/gid_map", if err := k.writeFile(fhs.Proc+"self/gid_map",
[]byte(strconv.Itoa(gid)+" "+strconv.Itoa(param.HostGid)+" 1\n"), append([]byte{}, strconv.Itoa(param.Gid)+" "+strconv.Itoa(param.HostGid)+" 1\n"...),
0, 0); err != nil {
); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
if err := k.setDumpable(ext.SUID_DUMP_DISABLE); err != nil { if err := k.setDumpable(ext.SUID_DUMP_DISABLE); err != nil {
@@ -235,23 +223,6 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
state := &setupState{process: make(map[int]WaitStatus), Params: &param.Params, Msg: msg, Context: ctx} state := &setupState{process: make(map[int]WaitStatus), Params: &param.Params, Msg: msg, Context: ctx}
defer cancel() defer cancel()
if err := k.mount(SourceTmpfsRootfs, intermediateHostPath, FstypeTmpfs, MS_NODEV|MS_NOSUID, zeroString); err != nil {
k.fatalf(msg, "cannot mount intermediate root: %v", optionalErrorUnwrap(err))
}
if err := k.chdir(intermediateHostPath); err != nil {
k.fatalf(msg, "cannot enter intermediate host path: %v", err)
}
if len(param.Binfmt) > 0 {
for i, e := range param.Binfmt {
if pathname, err := k.evalSymlinks(e.Interpreter.String()); err != nil {
k.fatal(msg, err)
} else if param.Binfmt[i].Interpreter, err = check.NewAbs(pathname); err != nil {
k.fatal(msg, err)
}
}
}
/* early is called right before pivot_root into intermediate root; /* early is called right before pivot_root into intermediate root;
this step is mostly for gathering information that would otherwise be this step is mostly for gathering information that would otherwise be
difficult to obtain via library functions after pivot_root, and difficult to obtain via library functions after pivot_root, and
@@ -271,6 +242,13 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
} }
if err := k.mount(SourceTmpfsRootfs, intermediateHostPath, FstypeTmpfs, MS_NODEV|MS_NOSUID, zeroString); err != nil {
k.fatalf(msg, "cannot mount intermediate root: %v", optionalErrorUnwrap(err))
}
if err := k.chdir(intermediateHostPath); err != nil {
k.fatalf(msg, "cannot enter intermediate host path: %v", err)
}
if err := k.mkdir(sysrootDir, 0755); err != nil { if err := k.mkdir(sysrootDir, 0755); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)
} }
@@ -307,48 +285,6 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
} }
if len(param.Binfmt) > 0 {
const interpreter = "/interpreter"
if param.BinfmtPath == nil {
param.BinfmtPath = fhs.AbsProcSys.Append("fs/binfmt_misc")
}
binfmt := sysrootPath + param.BinfmtPath.String()
if err := k.mkdirAll(binfmt, 0); err != nil {
k.fatal(msg, err)
}
if err := k.mount(
SourceBinfmtMisc,
binfmt,
FstypeBinfmtMisc,
MS_NOSUID|MS_NOEXEC|MS_NODEV,
zeroString,
); err != nil {
k.fatal(msg, err)
}
var buf strings.Builder
buf.Grow(1920)
register := binfmt + "/register"
for i, e := range param.Binfmt {
if err := k.symlink(hostPath+e.Interpreter.String(), interpreter); err != nil {
k.fatal(msg, err)
} else if err = k.writeFile(register, []byte(":"+
strconv.Itoa(i)+":"+
"M:"+
strconv.Itoa(int(e.Offset))+":"+
escapeBinfmt(&buf, e.Magic)+":"+
escapeBinfmt(&buf, e.Mask)+":"+
interpreter+":"+
"F"), 0); err != nil {
k.fatal(msg, err)
} else if err = k.remove(interpreter); err != nil {
k.fatal(msg, err)
}
}
}
// setup requiring host root complete at this point // setup requiring host root complete at this point
if err := k.mount(hostDir, hostDir, zeroString, MS_SILENT|MS_REC|MS_PRIVATE, zeroString); err != nil { if err := k.mount(hostDir, hostDir, zeroString, MS_SILENT|MS_REC|MS_PRIVATE, zeroString); err != nil {
k.fatalf(msg, "cannot make host root rprivate: %v", optionalErrorUnwrap(err)) k.fatalf(msg, "cannot make host root rprivate: %v", optionalErrorUnwrap(err))
@@ -387,19 +323,11 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
} }
var keepCaps []uintptr
if param.Privileged {
keepCaps = append(keepCaps, CAP_SYS_ADMIN, CAP_SETPCAP)
}
if param.InitAsRoot {
keepCaps = append(keepCaps, CAP_SETFCAP)
}
if err := k.capAmbientClearAll(); err != nil { if err := k.capAmbientClearAll(); err != nil {
k.fatalf(msg, "cannot clear the ambient capability set: %v", err) k.fatalf(msg, "cannot clear the ambient capability set: %v", err)
} }
for i := range lastcap + 1 { for i := uintptr(0); i <= lastcap; i++ {
if slices.Contains(keepCaps, i) { if param.Privileged && i == CAP_SYS_ADMIN {
continue continue
} }
if err := k.capBoundingSetDrop(i); err != nil { if err := k.capBoundingSetDrop(i); err != nil {
@@ -408,23 +336,20 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
} }
var keep [2]uint32 var keep [2]uint32
for _, c := range keepCaps { if param.Privileged {
keep[capToIndex(c)] |= capToMask(c) keep[capToIndex(CAP_SYS_ADMIN)] |= capToMask(CAP_SYS_ADMIN)
}
if err := k.capAmbientRaise(CAP_SYS_ADMIN); err != nil {
k.fatalf(msg, "cannot raise CAP_SYS_ADMIN: %v", err)
}
}
if err := k.capset( if err := k.capset(
&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &capHeader{_LINUX_CAPABILITY_VERSION_3, 0},
&[2]capData{{keep[0], keep[0], keep[0]}, {keep[1], keep[1], keep[1]}}, &[2]capData{{0, keep[0], keep[0]}, {0, keep[1], keep[1]}},
); err != nil { ); err != nil {
k.fatalf(msg, "cannot capset: %v", err) k.fatalf(msg, "cannot capset: %v", err)
} }
for _, c := range keepCaps {
if err := k.capAmbientRaise(c); err != nil {
k.fatalf(msg, "cannot raise %#x: %v", c, err)
}
}
if !param.SeccompDisable { if !param.SeccompDisable {
rules := param.SeccompRules rules := param.SeccompRules
if len(rules) == 0 { // non-empty rules slice always overrides presets if len(rules) == 0 { // non-empty rules slice always overrides presets
@@ -549,14 +474,6 @@ func initEntrypoint(k syscallDispatcher, msg message.Msg) {
cmd.ExtraFiles = extraFiles cmd.ExtraFiles = extraFiles
cmd.Dir = param.Dir.String() cmd.Dir = param.Dir.String()
if param.InitAsRoot {
cmd.SysProcAttr = &SysProcAttr{
Cloneflags: CLONE_NEWUSER,
UidMappings: []SysProcIDMap{{ContainerID: param.Uid, HostID: 0, Size: 1}},
GidMappings: []SysProcIDMap{{ContainerID: param.Gid, HostID: 0, Size: 1}},
}
}
msg.Verbosef("starting initial process %s", param.Path) msg.Verbosef("starting initial process %s", param.Path)
if err := k.start(cmd); err != nil { if err := k.start(cmd); err != nil {
k.fatalf(msg, "%v", err) k.fatalf(msg, "%v", err)

View File

@@ -332,8 +332,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil), call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil),
/* end early */ /* end early */
@@ -372,8 +370,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil), call("fatalf", stub.ExpectArgs{"invalid op at index %d", []any{0}}, nil, nil),
/* end early */ /* end early */
@@ -412,8 +408,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", stub.UniqueError(61)), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", stub.UniqueError(61)),
call("fatalf", stub.ExpectArgs{"cannot prepare op at index %d: %v", []any{0, stub.UniqueError(61)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot prepare op at index %d: %v", []any{0, stub.UniqueError(61)}}, nil, nil),
@@ -453,8 +447,6 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", &os.PathError{Op: "readlink", Path: "/", Err: stub.UniqueError(60)}), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", &os.PathError{Op: "readlink", Path: "/", Err: stub.UniqueError(60)}),
call("fatal", stub.ExpectArgs{[]any{"cannot readlink /: unique error 60 injected by the test suite"}}, nil, nil), call("fatal", stub.ExpectArgs{[]any{"cannot readlink /: unique error 60 injected by the test suite"}}, nil, nil),
@@ -494,6 +486,9 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, stub.UniqueError(58)), call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, stub.UniqueError(58)),
call("fatalf", stub.ExpectArgs{"cannot mount intermediate root: %v", []any{stub.UniqueError(58)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot mount intermediate root: %v", []any{stub.UniqueError(58)}}, nil, nil),
}, },
@@ -531,6 +526,9 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
/* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil), call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, stub.UniqueError(56)), call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, stub.UniqueError(56)),
call("fatalf", stub.ExpectArgs{"cannot enter intermediate host path: %v", []any{stub.UniqueError(56)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot enter intermediate host path: %v", []any{stub.UniqueError(56)}}, nil, nil),
@@ -569,11 +567,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, stub.UniqueError(54)), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, stub.UniqueError(54)),
call("fatalf", stub.ExpectArgs{"%v", []any{stub.UniqueError(54)}}, nil, nil), call("fatalf", stub.ExpectArgs{"%v", []any{stub.UniqueError(54)}}, nil, nil),
}, },
@@ -611,11 +609,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, stub.UniqueError(52)), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, stub.UniqueError(52)),
call("fatalf", stub.ExpectArgs{"cannot bind sysroot: %v", []any{stub.UniqueError(52)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot bind sysroot: %v", []any{stub.UniqueError(52)}}, nil, nil),
@@ -654,11 +652,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, stub.UniqueError(50)), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, stub.UniqueError(50)),
@@ -698,11 +696,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -743,11 +741,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -789,11 +787,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -844,11 +842,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -899,11 +897,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -955,11 +953,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1012,11 +1010,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1071,11 +1069,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1131,11 +1129,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1192,11 +1190,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1254,11 +1252,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1317,11 +1315,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1381,11 +1379,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1446,11 +1444,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1512,11 +1510,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1586,11 +1584,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1624,6 +1622,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1655,9 +1654,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, stub.UniqueError(19)), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, stub.UniqueError(19)),
call("fatalf", stub.ExpectArgs{"cannot raise %#x: %v", []any{uintptr(0x15), stub.UniqueError(19)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot raise CAP_SYS_ADMIN: %v", []any{stub.UniqueError(19)}}, nil, nil),
}, },
}, nil}, }, nil},
@@ -1693,11 +1691,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1731,6 +1729,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1762,7 +1761,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, stub.UniqueError(17)), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, stub.UniqueError(17)),
call("fatalf", stub.ExpectArgs{"cannot capset: %v", []any{stub.UniqueError(17)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot capset: %v", []any{stub.UniqueError(17)}}, nil, nil),
}, },
}, nil}, }, nil},
@@ -1799,11 +1799,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -1837,6 +1837,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -1868,9 +1869,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x8)}, nil, nil), call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, nil),
call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil), call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil),
call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, stub.UniqueError(15)), call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, stub.UniqueError(15)),
call("fatalf", stub.ExpectArgs{"cannot load syscall filter: %v", []any{stub.UniqueError(15)}}, nil, nil), call("fatalf", stub.ExpectArgs{"cannot load syscall filter: %v", []any{stub.UniqueError(15)}}, nil, nil),
@@ -1908,11 +1908,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2032,11 +2032,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2132,11 +2132,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2232,11 +2232,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2323,11 +2323,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2418,11 +2418,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(4), nil), call("lastcap", stub.ExpectArgs{}, uintptr(4), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2520,11 +2520,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2659,11 +2659,11 @@ func TestInitEntrypoint(t *testing.T) {
call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil), call("sethostname", stub.ExpectArgs{[]byte("hakurei-check")}, nil, nil),
call("lastcap", stub.ExpectArgs{}, uintptr(40), nil), call("lastcap", stub.ExpectArgs{}, uintptr(40), nil),
call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil), call("mount", stub.ExpectArgs{"", "/", "", uintptr(0x8c000), ""}, nil, nil),
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
/* begin early */ /* begin early */
call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil), call("evalSymlinks", stub.ExpectArgs{"/"}, "/", nil),
/* end early */ /* end early */
call("mount", stub.ExpectArgs{"rootfs", "/proc/self/fd", "tmpfs", uintptr(6), ""}, nil, nil),
call("chdir", stub.ExpectArgs{"/proc/self/fd"}, nil, nil),
call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"sysroot", os.FileMode(0755)}, nil, nil),
call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil), call("mount", stub.ExpectArgs{"sysroot", "sysroot", "", uintptr(0xd000), ""}, nil, nil),
call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil), call("mkdir", stub.ExpectArgs{"host", os.FileMode(0755)}, nil, nil),
@@ -2697,6 +2697,7 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x5)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x6)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x7)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x8)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x9)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xa)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0xb)}, nil, nil),
@@ -2728,9 +2729,8 @@ func TestInitEntrypoint(t *testing.T) {
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x26)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x27)}, nil, nil),
call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil), call("capBoundingSetDrop", stub.ExpectArgs{uintptr(0x28)}, nil, nil),
call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0x200100, 0x200100, 0x200100}, {0, 0, 0}}}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil), call("capAmbientRaise", stub.ExpectArgs{uintptr(0x15)}, nil, nil),
call("capAmbientRaise", stub.ExpectArgs{uintptr(0x8)}, nil, nil), call("capset", stub.ExpectArgs{&capHeader{_LINUX_CAPABILITY_VERSION_3, 0}, &[2]capData{{0, 0x200000, 0x200000}, {0, 0, 0}}}, nil, nil),
call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil), call("verbosef", stub.ExpectArgs{"resolving presets %#x", []any{std.FilterPreset(0xf)}}, nil, nil),
call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, nil), call("seccompLoad", stub.ExpectArgs{seccomp.Preset(0xf, 0), seccomp.ExportFlag(0)}, nil, nil),
call("verbosef", stub.ExpectArgs{"%d filter rules loaded", []any{73}}, nil, nil), call("verbosef", stub.ExpectArgs{"%d filter rules loaded", []any{73}}, nil, nil),

View File

@@ -4,9 +4,9 @@ import (
"encoding/gob" "encoding/gob"
"fmt" "fmt"
"slices" "slices"
"strings"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/ext"
"hakurei.app/fhs" "hakurei.app/fhs"
) )
@@ -150,7 +150,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(o.Upper.String()); err != nil { if v, err := k.evalSymlinks(o.Upper.String()); err != nil {
return err return err
} else { } else {
o.upper = toHost(v) o.upper = check.EscapeOverlayDataSegment(toHost(v))
} }
} }
@@ -158,7 +158,7 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(o.Work.String()); err != nil { if v, err := k.evalSymlinks(o.Work.String()); err != nil {
return err return err
} else { } else {
o.work = toHost(v) o.work = check.EscapeOverlayDataSegment(toHost(v))
} }
} }
} }
@@ -168,39 +168,12 @@ func (o *MountOverlayOp) early(_ *setupState, k syscallDispatcher) error {
if v, err := k.evalSymlinks(a.String()); err != nil { if v, err := k.evalSymlinks(a.String()); err != nil {
return err return err
} else { } else {
o.lower[i] = toHost(v) o.lower[i] = check.EscapeOverlayDataSegment(toHost(v))
} }
} }
return nil return nil
} }
// mountOverlay sets up an overlay mount via [ext.FS].
func mountOverlay(target string, options [][2]string) error {
fs, err := ext.OpenFS(SourceOverlay, 0)
if err != nil {
return err
}
if err = fs.SetString("source", SourceOverlay); err != nil {
_ = fs.Close()
return err
}
for _, option := range options {
if err = fs.SetString(option[0], option[1]); err != nil {
_ = fs.Close()
return err
}
}
if err = fs.SetFlag(OptionOverlayUserxattr); err != nil {
_ = fs.Close()
return err
}
if err = fs.Mount(target, 0); err != nil {
_ = fs.Close()
return err
}
return fs.Close()
}
func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error { func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
target := o.Target.String() target := o.Target.String()
if !o.noPrefix { if !o.noPrefix {
@@ -221,7 +194,7 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
} }
} }
options := make([][2]string, 0, 2+len(o.lower)) options := make([]string, 0, 4)
if o.upper == zeroString && o.work == zeroString { // readonly if o.upper == zeroString && o.work == zeroString { // readonly
if len(o.Lower) < 2 { if len(o.Lower) < 2 {
@@ -232,16 +205,15 @@ func (o *MountOverlayOp) apply(state *setupState, k syscallDispatcher) error {
if len(o.Lower) == 0 { if len(o.Lower) == 0 {
return &OverlayArgumentError{OverlayEmptyLower, zeroString} return &OverlayArgumentError{OverlayEmptyLower, zeroString}
} }
options = append(options, [][2]string{ options = append(options,
{OptionOverlayUpperdir, o.upper}, OptionOverlayUpperdir+"="+o.upper,
{OptionOverlayWorkdir, o.work}, OptionOverlayWorkdir+"="+o.work)
}...)
}
for _, lower := range o.lower {
options = append(options, [2]string{OptionOverlayLowerdir + "+", lower})
} }
options = append(options,
OptionOverlayLowerdir+"="+strings.Join(o.lower, check.SpecialOverlayPath),
OptionOverlayUserxattr)
return k.mountOverlay(target, options) return k.mount(SourceOverlay, target, FstypeOverlay, 0, strings.Join(options, check.SpecialOverlayOption))
} }
func (o *MountOverlayOp) late(*setupState, syscallDispatcher) error { return nil } func (o *MountOverlayOp) late(*setupState, syscallDispatcher) error { return nil }

View File

@@ -97,12 +97,13 @@ func TestMountOverlayOp(t *testing.T) {
call("mkdirAll", stub.ExpectArgs{"/sysroot", os.FileMode(0705)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot", os.FileMode(0705)}, nil, nil),
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.upper.*"}, "overlay.upper.32768", nil), call("mkdirTemp", stub.ExpectArgs{"/", "overlay.upper.*"}, "overlay.upper.32768", nil),
call("mkdirTemp", stub.ExpectArgs{"/", "overlay.work.*"}, "overlay.work.32768", nil), call("mkdirTemp", stub.ExpectArgs{"/", "overlay.work.*"}, "overlay.work.32768", nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot", [][2]string{ call("mount", stub.ExpectArgs{"overlay", "/sysroot", "overlay", uintptr(0), "" +
{"upperdir", "overlay.upper.32768"}, "upperdir=overlay.upper.32768," +
{"workdir", "overlay.work.32768"}, "workdir=overlay.work.32768," +
{"lowerdir+", `/host/var/lib/planterette/base/debian:f92c9052`}, "lowerdir=" +
{"lowerdir+", `/host/var/lib/planterette/app/org.chromium.Chromium@debian:f92c9052`}, `/host/var/lib/planterette/base/debian\:f92c9052:` +
}}, nil, nil), `/host/var/lib/planterette/app/org.chromium.Chromium@debian\:f92c9052,` +
"userxattr"}, nil, nil),
}, nil}, }, nil},
{"short lower ro", &Params{ParentPerm: 0755}, &MountOverlayOp{ {"short lower ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
@@ -128,10 +129,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/nix/store", os.FileMode(0755)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/nix/store", os.FileMode(0755)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/nix/store", [][2]string{ call("mount", stub.ExpectArgs{"overlay", "/nix/store", "overlay", uintptr(0), "" +
{"lowerdir+", "/host/mnt-root/nix/.ro-store"}, "lowerdir=" +
{"lowerdir+", "/host/mnt-root/nix/.ro-store0"}, "/host/mnt-root/nix/.ro-store:" +
}}, nil, nil), "/host/mnt-root/nix/.ro-store0," +
"userxattr"}, nil, nil),
}, nil}, }, nil},
{"success ro", &Params{ParentPerm: 0755}, &MountOverlayOp{ {"success ro", &Params{ParentPerm: 0755}, &MountOverlayOp{
@@ -145,10 +147,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store0"}, "/mnt-root/nix/.ro-store0", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0755)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0755)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{ call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
{"lowerdir+", "/host/mnt-root/nix/.ro-store"}, "lowerdir=" +
{"lowerdir+", "/host/mnt-root/nix/.ro-store0"}, "/host/mnt-root/nix/.ro-store:" +
}}, nil, nil), "/host/mnt-root/nix/.ro-store0," +
"userxattr"}, nil, nil),
}, nil}, }, nil},
{"nil lower", &Params{ParentPerm: 0700}, &MountOverlayOp{ {"nil lower", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -216,11 +219,7 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{ call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "upperdir=/host/mnt-root/nix/.rw-store/.upper,workdir=/host/mnt-root/nix/.rw-store/.work,lowerdir=/host/mnt-root/nix/ro-store,userxattr"}, nil, stub.UniqueError(0)),
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"},
{"workdir", "/host/mnt-root/nix/.rw-store/.work"},
{"lowerdir+", "/host/mnt-root/nix/ro-store"},
}}, nil, stub.UniqueError(0)),
}, stub.UniqueError(0)}, }, stub.UniqueError(0)},
{"success single layer", &Params{ParentPerm: 0700}, &MountOverlayOp{ {"success single layer", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -234,11 +233,11 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store"}, "/mnt-root/nix/ro-store", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{ call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"}, "upperdir=/host/mnt-root/nix/.rw-store/.upper," +
{"workdir", "/host/mnt-root/nix/.rw-store/.work"}, "workdir=/host/mnt-root/nix/.rw-store/.work," +
{"lowerdir+", "/host/mnt-root/nix/ro-store"}, "lowerdir=/host/mnt-root/nix/ro-store," +
}}, nil, nil), "userxattr"}, nil, nil),
}, nil}, }, nil},
{"success", &Params{ParentPerm: 0700}, &MountOverlayOp{ {"success", &Params{ParentPerm: 0700}, &MountOverlayOp{
@@ -262,15 +261,16 @@ func TestMountOverlayOp(t *testing.T) {
call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store3"}, "/mnt-root/nix/ro-store3", nil), call("evalSymlinks", stub.ExpectArgs{"/mnt-root/nix/.ro-store3"}, "/mnt-root/nix/ro-store3", nil),
}, nil, []stub.Call{ }, nil, []stub.Call{
call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil), call("mkdirAll", stub.ExpectArgs{"/sysroot/nix/store", os.FileMode(0700)}, nil, nil),
call("mountOverlay", stub.ExpectArgs{"/sysroot/nix/store", [][2]string{ call("mount", stub.ExpectArgs{"overlay", "/sysroot/nix/store", "overlay", uintptr(0), "" +
{"upperdir", "/host/mnt-root/nix/.rw-store/.upper"}, "upperdir=/host/mnt-root/nix/.rw-store/.upper," +
{"workdir", "/host/mnt-root/nix/.rw-store/.work"}, "workdir=/host/mnt-root/nix/.rw-store/.work," +
{"lowerdir+", "/host/mnt-root/nix/ro-store"}, "lowerdir=" +
{"lowerdir+", "/host/mnt-root/nix/ro-store0"}, "/host/mnt-root/nix/ro-store:" +
{"lowerdir+", "/host/mnt-root/nix/ro-store1"}, "/host/mnt-root/nix/ro-store0:" +
{"lowerdir+", "/host/mnt-root/nix/ro-store2"}, "/host/mnt-root/nix/ro-store1:" +
{"lowerdir+", "/host/mnt-root/nix/ro-store3"}, "/host/mnt-root/nix/ro-store2:" +
}}, nil, nil), "/host/mnt-root/nix/ro-store3," +
"userxattr"}, nil, nil),
}, nil}, }, nil},
}) })

View File

@@ -40,9 +40,6 @@ const (
// SourceMqueue is used when mounting mqueue. // SourceMqueue is used when mounting mqueue.
// Note that any source value is allowed when fstype is [FstypeMqueue]. // Note that any source value is allowed when fstype is [FstypeMqueue].
SourceMqueue = "mqueue" SourceMqueue = "mqueue"
// SourceBinfmtMisc is used when mounting binfmt_misc.
// Note that any source value is allowed when fstype is [SourceBinfmtMisc].
SourceBinfmtMisc = "binfmt_misc"
// SourceOverlay is used when mounting overlay. // SourceOverlay is used when mounting overlay.
// Note that any source value is allowed when fstype is [FstypeOverlay]. // Note that any source value is allowed when fstype is [FstypeOverlay].
SourceOverlay = "overlay" SourceOverlay = "overlay"
@@ -73,9 +70,6 @@ const (
// FstypeMqueue represents the mqueue pseudo-filesystem. // FstypeMqueue represents the mqueue pseudo-filesystem.
// This filesystem type is usually mounted on /dev/mqueue. // This filesystem type is usually mounted on /dev/mqueue.
FstypeMqueue = "mqueue" FstypeMqueue = "mqueue"
// FstypeBinfmtMisc represents the binfmt_misc pseudo-filesystem.
// This filesystem type is usually mounted on /proc/sys/fs/binfmt_misc.
FstypeBinfmtMisc = "binfmt_misc"
// FstypeOverlay represents the overlay pseudo-filesystem. // FstypeOverlay represents the overlay pseudo-filesystem.
// This filesystem type can be mounted anywhere in the container filesystem. // This filesystem type can be mounted anywhere in the container filesystem.
FstypeOverlay = "overlay" FstypeOverlay = "overlay"

View File

@@ -10,6 +10,7 @@ import (
"testing" "testing"
"unsafe" "unsafe"
"hakurei.app/check"
"hakurei.app/vfs" "hakurei.app/vfs"
) )
@@ -49,6 +50,9 @@ func TestToHost(t *testing.T) {
} }
} }
// InternalToHostOvlEscape exports toHost passed to [check.EscapeOverlayDataSegment].
func InternalToHostOvlEscape(s string) string { return check.EscapeOverlayDataSegment(toHost(s)) }
func TestCreateFile(t *testing.T) { func TestCreateFile(t *testing.T) {
t.Run("nonexistent", func(t *testing.T) { t.Run("nonexistent", func(t *testing.T) {
t.Run("mkdir", func(t *testing.T) { t.Run("mkdir", func(t *testing.T) {

267
ext/fs.go
View File

@@ -1,267 +0,0 @@
package ext
import (
"os"
"runtime"
"syscall"
"unsafe"
)
// include/uapi/linux/mount.h
/*
* move_mount() flags.
*/
const (
MOVE_MOUNT_F_SYMLINKS = 1 << iota /* Follow symlinks on from path */
MOVE_MOUNT_F_AUTOMOUNTS /* Follow automounts on from path */
MOVE_MOUNT_F_EMPTY_PATH /* Empty from path permitted */
_
MOVE_MOUNT_T_SYMLINKS /* Follow symlinks on to path */
MOVE_MOUNT_T_AUTOMOUNTS /* Follow automounts on to path */
MOVE_MOUNT_T_EMPTY_PATH /* Empty to path permitted */
_
MOVE_MOUNT_SET_GROUP /* Set sharing group instead */
MOVE_MOUNT_BENEATH /* Mount beneath top mount */
)
/*
* fsopen() flags.
*/
const (
FSOPEN_CLOEXEC = 1 << iota
)
/*
* fspick() flags.
*/
const (
FSPICK_CLOEXEC = 1 << iota
FSPICK_SYMLINK_NOFOLLOW
FSPICK_NO_AUTOMOUNT
FSPICK_EMPTY_PATH
)
/*
* The type of fsconfig() call made.
*/
const (
FSCONFIG_SET_FLAG = iota /* Set parameter, supplying no value */
FSCONFIG_SET_STRING /* Set parameter, supplying a string value */
FSCONFIG_SET_BINARY /* Set parameter, supplying a binary blob value */
FSCONFIG_SET_PATH /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD /* Set parameter, supplying an object by fd */
FSCONFIG_CMD_CREATE /* Create new or reuse existing superblock */
FSCONFIG_CMD_RECONFIGURE /* Invoke superblock reconfiguration */
FSCONFIG_CMD_CREATE_EXCL /* Create new superblock, fail if reusing existing superblock */
)
/*
* fsmount() flags.
*/
const (
FSMOUNT_CLOEXEC = 1 << iota
)
/*
* Mount attributes.
*/
const (
MOUNT_ATTR_RDONLY = 0x00000001 /* Mount read-only */
MOUNT_ATTR_NOSUID = 0x00000002 /* Ignore suid and sgid bits */
MOUNT_ATTR_NODEV = 0x00000004 /* Disallow access to device special files */
MOUNT_ATTR_NOEXEC = 0x00000008 /* Disallow program execution */
MOUNT_ATTR__ATIME = 0x00000070 /* Setting on how atime should be updated */
MOUNT_ATTR_RELATIME = 0x00000000 /* - Update atime relative to mtime/ctime. */
MOUNT_ATTR_NOATIME = 0x00000010 /* - Do not update access times. */
MOUNT_ATTR_STRICTATIME = 0x00000020 /* - Always perform atime updates */
MOUNT_ATTR_NODIRATIME = 0x00000080 /* Do not update directory access times */
MOUNT_ATTR_IDMAP = 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
MOUNT_ATTR_NOSYMFOLLOW = 0x00200000 /* Do not follow symlinks */
)
// FS provides low-level wrappers around the suite of file-descriptor-based
// mount facilities in Linux.
type FS struct {
fd uintptr
c runtime.Cleanup
}
// newFS allocates a new [FS] for the specified fd.
func newFS(fd uintptr) *FS {
fs := FS{fd: fd}
fs.c = runtime.AddCleanup(&fs, func(fd uintptr) {
_ = syscall.Close(int(fd))
}, fd)
return &fs
}
// Close closes the underlying filesystem context.
func (fs *FS) Close() error {
if fs == nil {
return syscall.EINVAL
}
err := syscall.Close(int(fs.fd))
fs.c.Stop()
return err
}
// OpenFS creates a new filesystem context.
func OpenFS(fsname string, flags int) (fs *FS, err error) {
var s *byte
s, err = syscall.BytePtrFromString(fsname)
if err != nil {
return
}
fd, _, errno := syscall.Syscall(
SYS_FSOPEN,
uintptr(unsafe.Pointer(s)),
uintptr(flags|FSOPEN_CLOEXEC),
0,
)
if errno != 0 {
err = os.NewSyscallError("fsopen", errno)
} else {
fs = newFS(fd)
}
return
}
// PickFS selects filesystem for reconfiguration.
func PickFS(dirfd int, pathname string, flags int) (fs *FS, err error) {
var s *byte
s, err = syscall.BytePtrFromString(pathname)
if err != nil {
return
}
fd, _, errno := syscall.Syscall(
SYS_FSPICK,
uintptr(dirfd),
uintptr(unsafe.Pointer(s)),
uintptr(flags|FSPICK_CLOEXEC),
)
if errno != 0 {
err = os.NewSyscallError("fspick", errno)
} else {
fs = newFS(fd)
}
return
}
// config configures new or existing filesystem context.
func (fs *FS) config(cmd uint, key *byte, value unsafe.Pointer, aux int) (err error) {
_, _, errno := syscall.Syscall6(
SYS_FSCONFIG,
fs.fd,
uintptr(cmd),
uintptr(unsafe.Pointer(key)),
uintptr(value),
uintptr(aux),
0,
)
if errno != 0 {
err = os.NewSyscallError("fsconfig", errno)
}
return
}
// SetFlag sets the flag parameter named by key. ([FSCONFIG_SET_FLAG])
func (fs *FS) SetFlag(key string) (err error) {
var s *byte
s, err = syscall.BytePtrFromString(key)
if err != nil {
return
}
return fs.config(FSCONFIG_SET_FLAG, s, nil, 0)
}
// SetString sets the string parameter named by key to the value specified by
// value. ([FSCONFIG_SET_STRING])
func (fs *FS) SetString(key, value string) (err error) {
var s0 *byte
s0, err = syscall.BytePtrFromString(key)
if err != nil {
return
}
var s1 *byte
s1, err = syscall.BytePtrFromString(value)
if err != nil {
return
}
return fs.config(FSCONFIG_SET_STRING, s0, unsafe.Pointer(s1), 0)
}
// mount instantiates mount object from filesystem context.
func (fs *FS) mount(flags, attrFlags int) (fsfd int, err error) {
r, _, errno := syscall.Syscall(
SYS_FSMOUNT,
fs.fd,
uintptr(flags|FSMOUNT_CLOEXEC),
uintptr(attrFlags),
)
fsfd = int(r)
if errno != 0 {
err = os.NewSyscallError("fsmount", errno)
}
return
}
// MoveMount moves or attaches mount object to filesystem.
func MoveMount(
fromDirfd int,
fromPathname string,
toDirfd int,
toPathname string,
flags int,
) (err error) {
var s0 *byte
s0, err = syscall.BytePtrFromString(fromPathname)
if err != nil {
return
}
var s1 *byte
s1, err = syscall.BytePtrFromString(toPathname)
if err != nil {
return
}
_, _, errno := syscall.Syscall6(
SYS_MOVE_MOUNT,
uintptr(fromDirfd),
uintptr(unsafe.Pointer(s0)),
uintptr(toDirfd),
uintptr(unsafe.Pointer(s1)),
uintptr(flags),
0,
)
if errno != 0 {
err = os.NewSyscallError("move_mount", errno)
}
return
}
// Mount attaches the underlying filesystem context to the specified pathname.
func (fs *FS) Mount(pathname string, attrFlags int) error {
if err := fs.config(FSCONFIG_CMD_CREATE_EXCL, nil, nil, 0); err != nil {
return err
}
fd, err := fs.mount(0, attrFlags)
if err != nil {
return err
}
err = MoveMount(
fd, "",
-1, pathname,
MOVE_MOUNT_F_EMPTY_PATH,
)
closeErr := syscall.Close(fd)
if err == nil {
err = closeErr
}
return err
}

View File

@@ -42,8 +42,6 @@ var (
AbsDevShm = unsafeAbs(DevShm) AbsDevShm = unsafeAbs(DevShm)
// AbsProc is [Proc] as [check.Absolute]. // AbsProc is [Proc] as [check.Absolute].
AbsProc = unsafeAbs(Proc) AbsProc = unsafeAbs(Proc)
// AbsProcSys is [ProcSys] as [check.Absolute].
AbsProcSys = unsafeAbs(ProcSys)
// AbsProcSelfExe is [ProcSelfExe] as [check.Absolute]. // AbsProcSelfExe is [ProcSelfExe] as [check.Absolute].
AbsProcSelfExe = unsafeAbs(ProcSelfExe) AbsProcSelfExe = unsafeAbs(ProcSelfExe)
// AbsSys is [Sys] as [check.Absolute]. // AbsSys is [Sys] as [check.Absolute].

View File

@@ -64,6 +64,78 @@ func TestFlatten(t *testing.T) {
{Mode: fs.ModeDir | 0700, Path: "work"}, {Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil}, }, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C"), nil},
{"sample cache file", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
"checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: 0400, Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte{0, 0, 0, 0, 0xad, 0xb, 0, 4, 0xfe, 0xfe, 0, 0, 0xfe, 0xca, 0, 0}},
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", Data: []byte("../checksum/0bSFPu5Tnd-2Jj0Mv6co23PW2t3BmHc7eLFj9TgY3eIBg8zislo7xZYNBqovVLcq")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("St9rlE-mGZ5gXwiv_hzQ_B8bZP-UUvSNmf4nHUZzCMOumb6hKnheZSe0dmnuc4Q2"), nil},
{"sample http get cure", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU", Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_", Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs"), nil},
{"sample directory step simple", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte{0, 0}},
"lib": {Mode: fs.ModeDir | 0700},
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"lib/pkgconfig": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0700, Path: "lib"},
{Mode: fs.ModeSymlink | 0777, Path: "lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "lib/pkgconfig"},
}, pkg.MustDecode("qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"), nil},
{"sample directory step garbage", fstest.MapFS{ {"sample directory step garbage", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500}, ".": {Mode: fs.ModeDir | 0500},
@@ -79,6 +151,421 @@ func TestFlatten(t *testing.T) {
{Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"}, {Mode: fs.ModeDir | 0500, Path: "lib/pkgconfig"},
}, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil}, }, pkg.MustDecode("CUx-3hSbTWPsbMfDhgalG4Ni_GmR9TnVX8F99tY_P5GtkYvczg9RrF5zO0jX9XYT"), nil},
{"sample directory", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b": {Mode: fs.ModeDir | 0500},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig": {Mode: fs.ModeDir | 0700},
"checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b"},
{Mode: 0400, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b/lib/pkgconfig"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/qRN6in76LndiiOZJheHkwyW8UT1N5-f-bXvHfDvwrMw2fSkOoZdh8pWE1qhLk65b")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("WVpvsVqVKg9Nsh744x57h51AuWUoUR2nnh8Md-EYBQpk6ziyTuUn6PLtF2e0Eu_d"), nil},
{"sample no assume checksum", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M": {Mode: fs.ModeDir | 0500},
"checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M/check": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
"identifier/_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M"},
{Mode: 0400, Path: "checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M/check", Data: []byte{}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/Aubi5EG4_Y8DhL9bQ3Q4HFBhLRF7X5gt9D3CNCQfT-TeBtlRXc7Zi_JYZEMoCC7M")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("OC290t23aimNo2Rp2pPwan5GI2KRLRdOwYxXQMD9jw0QROgHnNXWodoWdV0hwu2w"), nil},
{"sample tar step unpack", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"checksum": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0500},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0500},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: fs.ModeDir | 0500, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
{Mode: 0400, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0500, Path: "checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
{Mode: fs.ModeDir | 0500, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeDir | 0500, Path: "work"},
}, pkg.MustDecode("cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"), nil},
{"sample tar", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier": {Mode: fs.ModeDir | 0500},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP"},
{Mode: 0400, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check", Data: []byte{0, 0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig"},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa", Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
{Mode: fs.ModeDir | 0500, Path: "checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM/work"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz", Data: []byte("../checksum/cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu"), nil},
{"sample tar expand step unpack", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: fs.ModeSymlink | 0777, Path: "libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
}, pkg.MustDecode("CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"), nil},
{"sample tar expand", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN": {Mode: fs.ModeDir | 0500},
"checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN"},
{Mode: fs.ModeSymlink | 0777, Path: "checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN/libedac.so", Data: []byte("/proc/nonexistent/libedac.so")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL", Data: []byte("../checksum/CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe"), nil},
{"testtool", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte{0}},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte{0}},
}, pkg.MustDecode("GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"), nil},
{"sample exec container", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx"), nil},
{"testtool net", fstest.MapFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte("net")},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0500, Path: "."},
{Mode: 0400, Path: "check", Data: []byte("net")},
}, pkg.MustDecode("a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"), nil},
{"sample exec net container", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0500, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W"},
{Mode: 0400, Path: "checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check", Data: []byte("net")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3", Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z"), nil},
{"sample exec container overlay root", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl"), nil},
{"sample exec container overlay work", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs"), nil},
{"sample exec container multiple layers", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
"identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: 0400, Path: "checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb", Data: []byte{}},
{Mode: fs.ModeDir | 0500, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK"},
{Mode: 0400, Path: "checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check", Data: []byte("layers")},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2", Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ"), nil},
{"sample exec container layer promotion", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9": {Mode: fs.ModeDir | 0500},
"checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: fs.ModeDir | 0500, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9"},
{Mode: 0400, Path: "checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9/check", Data: []byte{0}},
{Mode: fs.ModeDir | 0500, Path: "checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK", Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ", Data: []byte("../checksum/GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9")},
{Mode: fs.ModeDir | 0700, Path: "temp"},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm"), nil},
{"sample file short", fstest.MapFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX": {Mode: 0400, Data: []byte{0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"work": {Mode: fs.ModeDir | 0700},
}, []pkg.FlatEntry{
{Mode: fs.ModeDir | 0700, Path: "."},
{Mode: fs.ModeDir | 0700, Path: "checksum"},
{Mode: 0400, Path: "checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX", Data: []byte{0}},
{Mode: fs.ModeDir | 0700, Path: "identifier"},
{Mode: fs.ModeSymlink | 0777, Path: "identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi", Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
{Mode: fs.ModeDir | 0700, Path: "work"},
}, pkg.MustDecode("iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT"), nil},
} }
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {

View File

@@ -9,10 +9,8 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"slices" "slices"
"strconv" "strconv"
"sync"
"syscall" "syscall"
"time" "time"
"unique" "unique"
@@ -96,32 +94,6 @@ func MustPath(pathname string, writable bool, a ...Artifact) ExecPath {
return ExecPath{check.MustAbs(pathname), a, writable} return ExecPath{check.MustAbs(pathname), a, writable}
} }
var (
binfmt map[string]container.BinfmtEntry
binfmtMu sync.RWMutex
)
// RegisterArch arranges for [KindExec] and [KindExecNet] to support a new
// architecture via a binfmt_misc entry. Each architecture must be registered
// at most once.
func RegisterArch(arch string, e container.BinfmtEntry) {
if arch == "" {
panic(UnsupportedArchError(arch))
}
binfmtMu.Lock()
defer binfmtMu.Unlock()
if binfmt == nil {
binfmt = make(map[string]container.BinfmtEntry)
}
if _, ok := binfmt[arch]; ok {
panic("attempting to register " + strconv.Quote(arch) + " twice")
}
binfmt[arch] = e
}
const ( const (
// ExecTimeoutDefault replaces out of range [NewExec] timeout values. // ExecTimeoutDefault replaces out of range [NewExec] timeout values.
ExecTimeoutDefault = 15 * time.Minute ExecTimeoutDefault = 15 * time.Minute
@@ -138,8 +110,6 @@ type execArtifact struct {
// Caller-supplied user-facing reporting name, guaranteed to be nonzero // Caller-supplied user-facing reporting name, guaranteed to be nonzero
// during initialisation. // during initialisation.
name string name string
// Target architecture.
arch string
// Caller-supplied inner mount points. // Caller-supplied inner mount points.
paths []ExecPath paths []ExecPath
@@ -162,40 +132,28 @@ type execArtifact struct {
var _ fmt.Stringer = new(execArtifact) var _ fmt.Stringer = new(execArtifact)
// execMeasuredArtifact is like execArtifact but implements [KnownChecksum] and // execNetArtifact is like execArtifact but implements [KnownChecksum] and has
// has its resulting container optionally keep the host net namespace. // its resulting container keep the host net namespace.
type execMeasuredArtifact struct { type execNetArtifact struct {
checksum Checksum checksum Checksum
// Whether to keep host net namespace.
hostNet bool
execArtifact execArtifact
} }
var _ KnownChecksum = new(execMeasuredArtifact) var _ KnownChecksum = new(execNetArtifact)
// Checksum returns the caller-supplied checksum. // Checksum returns the caller-supplied checksum.
func (a *execMeasuredArtifact) Checksum() Checksum { return a.checksum } func (a *execNetArtifact) Checksum() Checksum { return a.checksum }
// Kind returns [KindExecNet], or [KindExec] if hostNet is false. // Kind returns the hardcoded [Kind] constant.
func (a *execMeasuredArtifact) Kind() Kind { func (*execNetArtifact) Kind() Kind { return KindExecNet }
if a == nil || a.hostNet {
return KindExecNet
}
return KindExec
}
// Cure cures the [Artifact] in the container described by the caller. The // Cure cures the [Artifact] in the container described by the caller. The
// container optionally retains host networking. // container retains host networking.
func (a *execMeasuredArtifact) Cure(f *FContext) error { func (a *execNetArtifact) Cure(f *FContext) error {
return a.cure(f, a.hostNet) return a.cure(f, true)
} }
// ErrNetChecksum is panicked by [NewExec] if host net namespace is requested
// with a nil checksum.
var ErrNetChecksum = errors.New("attempting to keep net namespace without checksum")
// NewExec returns a new [Artifact] that executes the program path in a // NewExec returns a new [Artifact] that executes the program path in a
// container with specified paths bind mounted read-only in order. A private // container with specified paths bind mounted read-only in order. A private
// instance of /proc and /dev is made available to the container. // instance of /proc and /dev is made available to the container.
@@ -209,7 +167,7 @@ var ErrNetChecksum = errors.New("attempting to keep net namespace without checks
// regular or symlink. // regular or symlink.
// //
// If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum] // If checksum is non-nil, the resulting [Artifact] implements [KnownChecksum]
// and its container optionally runs in the host net namespace. // and its container runs in the host net namespace.
// //
// The container is allowed to run for the specified duration before the initial // The container is allowed to run for the specified duration before the initial
// process and all processes originating from it is terminated. A zero or // process and all processes originating from it is terminated. A zero or
@@ -220,10 +178,10 @@ var ErrNetChecksum = errors.New("attempting to keep net namespace without checks
// container and does not affect curing outcome. Because of this, it is omitted // container and does not affect curing outcome. Because of this, it is omitted
// from parameter data for computing identifier. // from parameter data for computing identifier.
func NewExec( func NewExec(
name, arch string, name string,
checksum *Checksum, checksum *Checksum,
timeout time.Duration, timeout time.Duration,
hostNet, exclusive bool, exclusive bool,
dir *check.Absolute, dir *check.Absolute,
env []string, env []string,
@@ -235,23 +193,17 @@ func NewExec(
if name == "" { if name == "" {
name = "exec-" + filepath.Base(pathname.String()) name = "exec-" + filepath.Base(pathname.String())
} }
if arch == "" {
arch = runtime.GOARCH
}
if timeout <= 0 { if timeout <= 0 {
timeout = ExecTimeoutDefault timeout = ExecTimeoutDefault
} }
if timeout > ExecTimeoutMax { if timeout > ExecTimeoutMax {
timeout = ExecTimeoutMax timeout = ExecTimeoutMax
} }
a := execArtifact{name, arch, paths, dir, env, pathname, args, timeout, exclusive} a := execArtifact{name, paths, dir, env, pathname, args, timeout, exclusive}
if checksum == nil { if checksum == nil {
if hostNet {
panic(ErrNetChecksum)
}
return &a return &a
} }
return &execMeasuredArtifact{*checksum, hostNet, a} return &execNetArtifact{*checksum, a}
} }
// Kind returns the hardcoded [Kind] constant. // Kind returns the hardcoded [Kind] constant.
@@ -259,7 +211,6 @@ func (*execArtifact) Kind() Kind { return KindExec }
// Params writes paths, executable pathname and args. // Params writes paths, executable pathname and args.
func (a *execArtifact) Params(ctx *IContext) { func (a *execArtifact) Params(ctx *IContext) {
ctx.WriteString(a.arch)
ctx.WriteString(a.name) ctx.WriteString(a.name)
ctx.WriteUint32(uint32(len(a.paths))) ctx.WriteUint32(uint32(len(a.paths)))
@@ -306,26 +257,11 @@ func (a *execArtifact) Params(ctx *IContext) {
} }
} }
// UnsupportedArchError describes an unsupported or invalid architecture.
type UnsupportedArchError string
func (e UnsupportedArchError) Error() string {
if e == "" {
return "invalid architecture name"
}
return "unsupported architecture " + string(e)
}
// readExecArtifact interprets IR values and returns the address of execArtifact // readExecArtifact interprets IR values and returns the address of execArtifact
// or execNetArtifact. // or execNetArtifact.
func readExecArtifact(r *IRReader, net bool) Artifact { func readExecArtifact(r *IRReader, net bool) Artifact {
r.DiscardAll() r.DiscardAll()
arch := r.ReadString()
if arch == "" {
panic(UnsupportedArchError(arch))
}
name := r.ReadString() name := r.ReadString()
sz := r.ReadUint32() sz := r.ReadUint32()
@@ -376,17 +312,22 @@ func readExecArtifact(r *IRReader, net bool) Artifact {
exclusive := r.ReadUint32() != 0 exclusive := r.ReadUint32() != 0
checksum, ok := r.Finalise() checksum, ok := r.Finalise()
var checksumP *Checksum
if ok {
checksumP = new(checksum.Value())
}
if net && !ok { var checksumP *Checksum
panic(ErrExpectedChecksum) if net {
if !ok {
panic(ErrExpectedChecksum)
}
checksumVal := checksum.Value()
checksumP = &checksumVal
} else {
if ok {
panic(ErrUnexpectedChecksum)
}
} }
return NewExec( return NewExec(
name, arch, checksumP, timeout, net, exclusive, dir, env, pathname, args, paths..., name, checksumP, timeout, exclusive, dir, env, pathname, args, paths...,
) )
} }
@@ -495,23 +436,11 @@ func (a *execArtifact) makeContainer(
if z.HostNet { if z.HostNet {
z.Hostname = "cure-net" z.Hostname = "cure-net"
} }
z.Quiet = flags&CSuppressInit != 0
z.Uid, z.Gid = (1<<10)-1, (1<<10)-1 z.Uid, z.Gid = (1<<10)-1, (1<<10)-1
z.Dir, z.Path, z.Args = a.dir, a.path, a.args z.Dir, z.Path, z.Args = a.dir, a.path, a.args
z.Env = slices.Concat(a.env, []string{EnvJobs + "=" + strconv.Itoa(jobs)}) z.Env = slices.Concat(a.env, []string{EnvJobs + "=" + strconv.Itoa(jobs)})
z.Grow(len(a.paths) + 4) z.Grow(len(a.paths) + 4)
if a.arch != runtime.GOARCH {
binfmtMu.RLock()
e, ok := binfmt[a.arch]
binfmtMu.RUnlock()
if !ok {
return nil, UnsupportedArchError(a.arch)
}
z.Binfmt = []container.BinfmtEntry{e}
z.InitAsRoot = true
}
for i, b := range a.paths { for i, b := range a.paths {
if i == overlayWorkIndex { if i == overlayWorkIndex {
if err = os.MkdirAll(work.String(), 0700); err != nil { if err = os.MkdirAll(work.String(), 0700); err != nil {
@@ -598,9 +527,9 @@ func (c *Cache) EnterExec(
case *execArtifact: case *execArtifact:
e = f e = f
case *execMeasuredArtifact: case *execNetArtifact:
e = &f.execArtifact e = &f.execArtifact
hostNet = f.hostNet hostNet = true
default: default:
return ErrNotExec return ErrNotExec
@@ -701,6 +630,12 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
_ = stdout.Close() _ = stdout.Close()
return return
} }
defer func() {
if err != nil && !errors.As(err, new(*exec.ExitError)) {
_ = stdout.Close()
_ = stderr.Close()
}
}()
brStdout, brStderr := f.cache.getReader(stdout), f.cache.getReader(stderr) brStdout, brStderr := f.cache.getReader(stdout), f.cache.getReader(stderr)
stdoutDone, stderrDone := make(chan struct{}), make(chan struct{}) stdoutDone, stderrDone := make(chan struct{}), make(chan struct{})
@@ -715,11 +650,6 @@ func (a *execArtifact) cure(f *FContext, hostNet bool) (err error) {
io.TeeReader(brStderr, status), io.TeeReader(brStderr, status),
) )
defer func() { defer func() {
if err != nil && !errors.As(err, new(*exec.ExitError)) {
_ = stdout.Close()
_ = stderr.Close()
}
<-stdoutDone <-stdoutDone
<-stderrDone <-stderrDone
f.cache.putReader(brStdout) f.cache.putReader(brStdout)

View File

@@ -1,70 +1,44 @@
package pkg_test package pkg_test
//go:generate env CGO_ENABLED=0 go build -tags testtool -o testdata/testtool ./testdata
import ( import (
"bytes"
_ "embed" _ "embed"
"encoding/gob" "encoding/gob"
"errors" "errors"
"io/fs"
"net" "net"
"os" "os"
"os/exec" "os/exec"
"path/filepath"
"slices" "slices"
"testing" "testing"
"unique"
"hakurei.app/check" "hakurei.app/check"
"hakurei.app/container"
"hakurei.app/hst" "hakurei.app/hst"
"hakurei.app/internal/info"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
"hakurei.app/internal/stub" "hakurei.app/internal/stub"
"hakurei.app/internal/pkg/internal/testtool/expected"
) )
// testtoolBin is the container test tool binary made available to the // testtoolBin is the container test tool binary made available to the
// execArtifact for testing its curing environment. // execArtifact for testing its curing environment.
// //
//go:generate env CGO_ENABLED=0 go build -tags testtool -o internal/testtool ./internal/testtool //go:embed testdata/testtool
//go:embed internal/testtool/testtool
var testtoolBin []byte var testtoolBin []byte
func init() {
pathname, err := filepath.Abs("internal/testtool/testtool")
if err != nil {
panic(err)
}
pkg.RegisterArch("cafe", container.BinfmtEntry{
Magic: expected.Magic,
Interpreter: check.MustAbs(pathname),
})
}
func TestExec(t *testing.T) { func TestExec(t *testing.T) {
t.Parallel() t.Parallel()
wantOffline := expectsFS{ wantChecksumOffline := pkg.MustDecode(
".": {Mode: fs.ModeDir | 0500}, "GPa4aBakdSJd7Tz7LYj_VJFoojzyZinmVcG3k6M5xI6CZ821J5sXLhLDDuS47gi9",
)
"check": {Mode: 0400, Data: []byte{0}},
}
wantOfflineEncode := pkg.Encode(wantOffline.hash())
failingArtifact := &stubArtifact{
kind: pkg.KindTar,
params: []byte("doomed artifact"),
cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe)
},
}
checkWithCache(t, []cacheTestCase{ checkWithCache(t, []cacheTestCase{
{"offline", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"offline", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-offline", "", new(wantOffline.hash()), 0, false, false, "exec-offline", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -84,128 +58,67 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil}, ), ignorePathname, wantChecksumOffline, nil},
{"substitution", pkg.NewExec(
"exec-offline", "", new(wantOffline.hash()), 0, false, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"},
pkg.MustPath("/file", false, newStubFile(
pkg.KindHTTPGet,
pkg.ID{0xfe, 0},
nil,
nil, nil,
)),
// substitution miss fails in testtool due to differing idents
pkg.MustPath("/.hakurei", false, &stubArtifact{
kind: pkg.KindTar,
params: []byte("empty directory (substituted)"),
cure: func(t *pkg.TContext) error {
return os.MkdirAll(t.GetWorkDir().String(), 0700)
},
}),
pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil},
{"error passthrough", pkg.NewExec( {"error passthrough", pkg.NewExec(
"", "", nil, 0, false, true, "", nil, 0, true,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"}, []string{"testtool"},
pkg.MustPath("/proc/nonexistent", false, failingArtifact), pkg.MustPath("/proc/nonexistent", false, &stubArtifact{
), nil, nil, &pkg.DependencyCureError{ kind: pkg.KindTar,
params: []byte("doomed artifact"),
cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe)
},
}),
), nil, pkg.Checksum{}, &pkg.DependencyCureError{
{ {
A: failingArtifact, Ident: unique.Make(pkg.ID(pkg.MustDecode(
"Sowo6oZRmG6xVtUaxB6bDWZhVsqAJsIJWUp0OPKlE103cY0lodx7dem8J-qQF0Z1",
))),
Err: stub.UniqueError(0xcafe), Err: stub.UniqueError(0xcafe),
}, },
}}, }},
{"invalid paths", pkg.NewExec( {"invalid paths", pkg.NewExec(
"", "", nil, 0, false, false, "", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"}, []string{"testtool"},
pkg.ExecPath{}, pkg.ExecPath{},
), nil, nil, pkg.ErrInvalidPaths}, ), nil, pkg.Checksum{}, pkg.ErrInvalidPaths},
}) })
// check init failure passthrough // check init failure passthrough
initFailureArtifact := pkg.NewExec( var exitError *exec.ExitError
"", "", nil, 0, false, false, if _, _, err := c.Cure(pkg.NewExec(
"", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
nil, nil,
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
[]string{"testtool"}, []string{"testtool"},
) )); !errors.As(err, &exitError) ||
var exitError *exec.ExitError
if _, _, err := c.Cure(initFailureArtifact); !errors.As(err, &exitError) ||
exitError.ExitCode() != hst.ExitFailure { exitError.ExitCode() != hst.ExitFailure {
t.Fatalf("Cure: error = %v, want init exit status 1", err) t.Fatalf("Cure: error = %v, want init exit status 1", err)
} }
var faultStatus []byte
if faults, err := c.ReadFaults(initFailureArtifact); err != nil {
t.Fatal(err)
} else if len(faults) != 1 {
t.Fatalf("ReadFaults: %v", faults)
} else if faultStatus, err = os.ReadFile(faults[0].String()); err != nil {
t.Fatal(err)
} else if err = faults[0].Destroy(); err != nil {
t.Fatal(err)
} else {
t.Logf("destroyed expected fault at %s", faults[0].Time().UTC())
}
if !bytes.HasPrefix(faultStatus, []byte(
"internal/pkg ",
)) || !bytes.Contains(faultStatus, []byte(
"\ninit: fork/exec /opt/bin/testtool: no such file or directory\n",
)) {
t.Errorf("unexpected status:\n%s", string(faultStatus))
}
destroyStatus(t, base, 2, 1)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, expectsFS{ }, pkg.MustDecode("Q5DluWQCAeohLoiGRImurwFp3vdz9IfQCoj7Fuhh73s4KQPRHpEQEnHTdNHmB8Fx")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700}, {"net", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/IY91PCtOpCYy21AaIK0c9f8-Z6fb2_2ewoHWkt4dxoLf0GOrWqS8yAGFLV84b1Dw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/QwS7SmiatdqryQYgESdGw7Yw2PcpNf0vNfpvUA0t92BTlKiUjfCrXyMW17G2X77X": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/" + expected.Offline: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"net", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
wantNet := expectsFS{ wantChecksum := pkg.MustDecode(
".": {Mode: fs.ModeDir | 0500}, "a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W",
)
"check": {Mode: 0400, Data: []byte("net")},
}
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-net", "", new(wantNet.hash()), 0, true, false, "exec-net", &wantChecksum, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -225,37 +138,18 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantNet, nil}, ), ignorePathname, wantChecksum, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, expectsFS{ }, pkg.MustDecode("bPYvvqxpfV7xcC1EptqyKNK1klLJgYHMDkzBcoOyK6j_Aj5hb0mXNPwTwPSK5F6Z")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700}, {"overlay root", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W": {Mode: fs.ModeDir | 0500},
"checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W/check": {Mode: 0400, Data: []byte("net")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.Net: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/a1F_i9PVQI4qMcoHgTQkORuyWLkC1GLIxOhDt2JpU1NGAxWc5VJzdlfRK-PYBh3W")},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay root", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-overlay-root", "", nil, 0, false, false, "exec-overlay-root", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -269,35 +163,18 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil}, ), ignorePathname, wantChecksumOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, expectsFS{ }, pkg.MustDecode("PO2DSSCa4yoSgEYRcCSZfQfwow1yRigL3Ry-hI0RDI4aGuFBha-EfXeSJnG_5_Rl")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700}, {"overlay work", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.OvlRoot: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay work", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-overlay-work", "", nil, 0, false, false, "exec-overlay-work", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/work/bin/testtool"), check.MustAbs("/work/bin/testtool"),
@@ -316,35 +193,18 @@ func TestExec(t *testing.T) {
return os.MkdirAll(t.GetWorkDir().String(), 0700) return os.MkdirAll(t.GetWorkDir().String(), 0700)
}, },
}), pkg.Path(pkg.AbsWork, false /* ignored */, testtool), }), pkg.Path(pkg.AbsWork, false /* ignored */, testtool),
), ignorePathname, wantOffline, nil}, ), ignorePathname, wantChecksumOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, expectsFS{ }, pkg.MustDecode("iaRt6l_Wm2n-h5UsDewZxQkCmjZjyL8r7wv32QT2kyV55-Lx09Dq4gfg9BiwPnKs")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700}, {"multiple layers", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/" + expected.Work: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"multiple layers", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-multiple-layers", "", nil, 0, false, false, "exec-multiple-layers", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -385,40 +245,18 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil}, ), ignorePathname, wantChecksumOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, expectsFS{ }, pkg.MustDecode("O2YzyR7IUGU5J2CADy0hUZ3A5NkP_Vwzs4UadEdn2oMZZVWRtH0xZGJ3HXiimTnZ")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700}, {"overlay layer promotion", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb": {Mode: 0400, Data: []byte{}},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK": {Mode: fs.ModeDir | 0500},
"checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK/check": {Mode: 0400, Data: []byte("layers")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/_gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/OLBgp1GsljhM2TJ-sbHjaiH9txEUvgdDTAzHv2P24donTt6_529l-9Ua0vFImLlb")},
"identifier/B-kc5iJMx8GtlCua4dz6BiJHnDAOUfPjgpbKq4e-QEn0_CZkSYs3fOA1ve06qMs2": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK")},
"identifier/" + expected.Layers: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"overlay layer promotion", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
testtool, testtoolDestroy := newTesttool() testtool, testtoolDestroy := newTesttool()
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"container", pkg.NewExec( {"container", pkg.NewExec(
"exec-layer-promotion", "", nil, 0, false, true, "exec-layer-promotion", nil, 0, true,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"}, []string{"HAKUREI_TEST=1", "HAKUREI_ROOT=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -438,96 +276,11 @@ func TestExec(t *testing.T) {
}, },
}), }),
pkg.MustPath("/opt", false, testtool), pkg.MustPath("/opt", false, testtool),
), ignorePathname, wantOffline, nil}, ), ignorePathname, wantChecksumOffline, nil},
}) })
destroyStatus(t, base, 2, 0)
testtoolDestroy(t, base, c) testtoolDestroy(t, base, c)
}, expectsFS{ }, pkg.MustDecode("3EaW6WibLi9gl03_UieiFPaFcPy5p4x3JPxrnLJxGaTI-bh3HU9DK9IMx7c3rrNm")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantOfflineEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantOfflineEncode + "/check": {Mode: 0400, Data: []byte{0}},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/kvJIqZo5DKFOxC2ZQ-8_nPaQzEAz9cIm3p6guO-uLqm-xaiPu7oRkSnsu411jd_U": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"identifier/" + expected.Promote: {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantOfflineEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"binfmt", pkg.CValidateKnown | checkDestroySubstitutes, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
if info.CanDegrade && os.Getenv("ROSA_SKIP_BINFMT") != "" {
t.Skip("binfmt_misc test explicitly skipped")
}
cureMany(t, c, []cureStep{
{"container", pkg.NewExec(
"exec-binfmt", "cafe", nil, 0, false, true,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1", "HAKUREI_BINFMT=1"},
check.MustAbs("/opt/bin/sample"),
[]string{"sample"},
pkg.MustPath("/", true, &stubArtifact{
kind: pkg.KindTar,
params: []byte("empty directory"),
cure: func(t *pkg.TContext) error {
return os.MkdirAll(t.GetWorkDir().String(), 0700)
},
}),
pkg.MustPath("/opt", false, overrideIdent{pkg.ID{0xfe, 0xff}, &stubArtifact{
kind: pkg.KindTar,
cure: func(t *pkg.TContext) error {
work := t.GetWorkDir()
if err := os.MkdirAll(
work.Append("bin").String(),
0700,
); err != nil {
return err
}
return os.WriteFile(t.GetWorkDir().Append(
"bin",
"sample",
).String(), []byte(expected.Full), 0500)
},
}}),
), ignorePathname, expectsFS{
".": {Mode: fs.ModeDir | 0500},
"check": {Mode: 0400, Data: []byte("binfmt")},
}, nil},
})
destroyStatus(t, base, 2, 0)
}, expectsFS{
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV": {Mode: fs.ModeDir | 0500},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV/bin": {Mode: fs.ModeDir | 0700},
"checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV/bin/sample": {Mode: 0500, Data: []byte("\xca\xfe\xba\xbe\xfd\xfd:3")},
"checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU": {Mode: fs.ModeDir | 0500},
"checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3": {Mode: fs.ModeDir | 0500},
"checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3/check": {Mode: 0400, Data: []byte("binfmt")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/6VQTJ1lI5BmVuI1YFYJ8ClO3MRORvTTrcWFDcUU-l5Ga8EofxCxGlSTYN-u8dKj_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/UnDo4B5KneEUY5b4vRUk_y9MWgkWuw2N8f8a2XayO686xXur-aZmX2-7n_8tKMe3")},
"identifier/_v8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/5aevg3YpDxjqQZ-pdvXK7YqgkL5JKqcoStYQxeD96kuYar6K2mRQWMHib6NQRnpV")},
"identifier/vjz1MHPcGBKV7sjcs8jQP3cqxJ1hgPTiQBMCEHP9BGXjGxd-tJmEmXKaStObo5gK": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU")},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }

View File

@@ -1,7 +1,6 @@
package pkg_test package pkg_test
import ( import (
"io/fs"
"testing" "testing"
"hakurei.app/check" "hakurei.app/check"
@@ -11,27 +10,18 @@ import (
func TestFile(t *testing.T) { func TestFile(t *testing.T) {
t.Parallel() t.Parallel()
want := expectsFile{0}
checkWithCache(t, []cacheTestCase{ checkWithCache(t, []cacheTestCase{
{"file", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"file", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
cureMany(t, c, []cureStep{ cureMany(t, c, []cureStep{
{"short", pkg.NewFile("null", []byte{0}), base.Append( {"short", pkg.NewFile("null", []byte{0}), base.Append(
"identifier", "identifier",
"3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi", "3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi",
), want, nil}, ), pkg.MustDecode(
"vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX",
), nil},
}) })
}, expectsFS{ }, pkg.MustDecode(
".": {Mode: fs.ModeDir | 0700}, "iR6H5OIsyOW4EwEgtm9rGzGF6DVtyHLySEtwnFE8bnus9VJcoCbR4JIek7Lw-vwT",
)},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + pkg.Encode(want.hash()): {Mode: 0400, Data: []byte{0}},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/3376ALA7hIUm2LbzH2fDvRezgzod1eTK_G6XjyOgbM2u-6swvkFaF0BOwSl_juBi": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/vsAhtPNo4waRNOASwrQwcIPTqb3SBuJOXw2G4T1mNmVZM-wrQTRllmgXqcIIoRcX")},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }

View File

@@ -1,9 +0,0 @@
// Package expected contains data shared between test helper and test harness.
package expected
const (
// Magic are magic bytes in the binfmt test case.
Magic = "\xca\xfe\xba\xbe\xfd\xfd"
// Full is the full content of the binfmt test case executable.
Full = Magic + ":3"
)

View File

@@ -1,10 +0,0 @@
package expected
const (
Offline = "q5ktDTq0miP-VvB2blxqXQeaRXCUWgP_KbC18KNtUDtyoaI_h5mHmGuPMArVEBDs"
OvlRoot = "NacZGXwuRkTvcHaG08a22ujJ8qCWN0RSoFlRSR5FSt0ZcBbJ28FRvkYsHEtX7G8i"
Layers = "WBJDrATtX6rIE5yAu8ePX3WmDF0Tt9kFiue0m3cRnyRoVx1my8a67fh3CAW486oP"
Net = "CmYtj2sNB3LHtqiDuck_Lz3MjLLIiwyP8N4NDitQ1Icvv__LVP9p8tm-sHeQaKKp"
Promote = "TX3eCloaQFkV-SZIH6Jg6E5WKH--rcXY1P0jnZKmLFKWrNqnOzd4G9eIBh6i5ywN"
Work = "OuNiLSC68pZhAOr1YQ4WbV1tzASA0nxLEBcK7lO7MqxDY_j8dmP_C612RTuF23Lu"
)

View File

@@ -1,10 +0,0 @@
package expected
const (
Offline = "WapqyoPxbWSnq07dWHt71mHaJXq99pAjJfFlELlJljSiZMhTFqqlzU1_mN86shSj"
OvlRoot = "V9anFOiRvjGfAeBhLl14AL8TKdWZyD0WTPYe4fS9mOBw8iW5Lmarvt6TG6MV8uWm"
Layers = "tKx7JNRoSBdK_7MdzI-nwTNV2wmiPzwYdcd17oLmXKL_iLmUzUiA79qTqdrTasrv"
Net = "aXyDLzBCJ9XltXZIfetEVsEkrqHfcXuD5XE_FcUnYbN3emwL55N6P8LlHzNfGnM5"
Promote = "3k4V16n96Lq04gjFSKmm4sFjyQ883FFBNXgTy9s_DjeTwxT3pg_iacEh8yMb_S4m"
Work = "6Q49MhFWRE3Ne6MycwAotgl1GtoU5WCHqJNWG2byYZCY-zX-IxPrWiKk7bKkNzhE"
)

View File

@@ -1,10 +0,0 @@
package expected
const (
Offline = "Z6yXE5gOJScL3srmnVMWgCXccDiUNZ5snSrf6RkXuU1_U0rX_kGVwsfHUgNG_awd"
OvlRoot = "zYXJHFRLuxvUhuisZEXgGgVvdQd6piMfp5jmtT6jdVjvC2gICXquOq-UTwlrSD5I"
Layers = "_F8EDazHbcLeT0sVSQXRN_kn9IjduqJcDYgzXpsT-hpKU4EBcZ0PISN2zchpqMbm"
Net = "CA_FAaSIYJgapBEHV40doxpH23PdUEy_6s1TZc7wfSPN0XYqwGpMceXXDSabGveO"
Promote = "_3LPrLp--4h9k4GsNNApu9hHtAafq-GUhfU6d4hJKBDKT3bz_szOsvkXxc5sK53d"
Work = "FEgHeiCD_WT4wsfB-9kDH5n6cRWCEYtJmXdKZgmUUukAOoXumH_hLlosXREC-tqq"
)

View File

@@ -76,9 +76,6 @@ type IContext struct {
// Written to by various methods, should be zeroed after [Artifact.Params] // Written to by various methods, should be zeroed after [Artifact.Params]
// returns and must not be exposed directly. // returns and must not be exposed directly.
w io.Writer w io.Writer
// Optional [Artifact] to cureRes cache, replaces [IRKindIdent] with
// checksum values if non-nil.
inputs map[Artifact]cureRes
} }
// irZero is a zero IR word. // irZero is a zero IR word.
@@ -166,15 +163,7 @@ func (i *IContext) WriteIdent(a Artifact) {
defer i.ic.putIdentBuf(buf) defer i.ic.putIdentBuf(buf)
IRKindIdent.encodeHeader(0).put(buf[:]) IRKindIdent.encodeHeader(0).put(buf[:])
if i.inputs != nil { *(*ID)(buf[wordSize:]) = i.ic.Ident(a).Value()
res, ok := i.inputs[a]
if !ok {
panic(InvalidLookupError(i.ic.Ident(a).Value()))
}
*(*ID)(buf[wordSize:]) = res.checksum.Value()
} else {
*(*ID)(buf[wordSize:]) = i.ic.Ident(a).Value()
}
i.mustWrite(buf[:]) i.mustWrite(buf[:])
} }
@@ -218,44 +207,19 @@ func (i *IContext) WriteString(s string) {
// Encode writes a deterministic, efficient representation of a to w and returns // Encode writes a deterministic, efficient representation of a to w and returns
// the first non-nil error encountered while writing to w. // the first non-nil error encountered while writing to w.
func (ic *irCache) Encode(w io.Writer, a Artifact) (err error) { func (ic *irCache) Encode(w io.Writer, a Artifact) (err error) {
return ic.encode(w, a, nil)
}
// encode implements Encode but replaces identifiers with their cured checksums
// for a non-nil ident. Caller must acquire Cache.identMu.
func (ic *irCache) encode(
w io.Writer,
a Artifact,
inputs map[Artifact]cureRes,
) (err error) {
deps := a.Dependencies() deps := a.Dependencies()
idents := make([]*extIdent, len(deps)) idents := make([]*extIdent, len(deps))
if inputs == nil { for i, d := range deps {
for i, d := range deps { dbuf, did := ic.unsafeIdent(d, true)
dbuf, did := ic.unsafeIdent(d, true) if dbuf == nil {
if dbuf == nil { dbuf = ic.getIdentBuf()
dbuf = ic.getIdentBuf()
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
*(*ID)(dbuf[wordSize:]) = did.Value()
} else {
ic.storeIdent(d, dbuf)
}
defer ic.putIdentBuf(dbuf)
idents[i] = dbuf
}
} else {
for i, d := range deps {
res, ok := inputs[d]
if !ok {
return InvalidLookupError(ic.Ident(d).Value())
}
dbuf := ic.getIdentBuf()
binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind())) binary.LittleEndian.PutUint64(dbuf[:], uint64(d.Kind()))
*(*ID)(dbuf[wordSize:]) = res.checksum.Value() *(*ID)(dbuf[wordSize:]) = did.Value()
defer ic.putIdentBuf(dbuf) } else {
idents[i] = dbuf ic.storeIdent(d, dbuf)
} }
defer ic.putIdentBuf(dbuf)
idents[i] = dbuf
} }
slices.SortFunc(idents, func(a, b *extIdent) int { slices.SortFunc(idents, func(a, b *extIdent) int {
return bytes.Compare(a[:], b[:]) return bytes.Compare(a[:], b[:])
@@ -280,7 +244,7 @@ func (ic *irCache) encode(
} }
func() { func() {
i := IContext{ic, w, inputs} i := IContext{ic, w}
defer panicToError(&err) defer panicToError(&err)
defer func() { i.ic, i.w = nil, nil }() defer func() { i.ic, i.w = nil, nil }()
@@ -468,12 +432,6 @@ func (e InvalidKindError) Error() string {
// register is not safe for concurrent use. register must not be called after // register is not safe for concurrent use. register must not be called after
// the first instance of [Cache] has been opened. // the first instance of [Cache] has been opened.
func register(k Kind, f IRReadFunc) { func register(k Kind, f IRReadFunc) {
openMu.Lock()
defer openMu.Unlock()
if opened {
panic("attempting to register after open")
}
if _, ok := irArtifact[k]; ok { if _, ok := irArtifact[k]; ok {
panic("attempting to register " + strconv.Itoa(int(k)) + " twice") panic("attempting to register " + strconv.Itoa(int(k)) + " twice")
} }

View File

@@ -3,7 +3,6 @@ package pkg_test
import ( import (
"bytes" "bytes"
"io" "io"
"io/fs"
"reflect" "reflect"
"testing" "testing"
@@ -39,7 +38,7 @@ func TestIRRoundtrip(t *testing.T) {
)}, )},
{"exec offline", pkg.NewExec( {"exec offline", pkg.NewExec(
"exec-offline", "", nil, 0, false, false, "exec-offline", nil, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -59,9 +58,9 @@ func TestIRRoundtrip(t *testing.T) {
)}, )},
{"exec net", pkg.NewExec( {"exec net", pkg.NewExec(
"exec-net", "", "exec-net",
(*pkg.Checksum)(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))), (*pkg.Checksum)(bytes.Repeat([]byte{0xfc}, len(pkg.Checksum{}))),
0, false, false, 0, false,
pkg.AbsWork, pkg.AbsWork,
[]string{"HAKUREI_TEST=1"}, []string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"), check.MustAbs("/opt/bin/testtool"),
@@ -80,28 +79,6 @@ func TestIRRoundtrip(t *testing.T) {
)), )),
)}, )},
{"exec measured", pkg.NewExec(
"exec-measured", "",
(*pkg.Checksum)(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
0, false, false,
pkg.AbsWork,
[]string{"HAKUREI_TEST=1"},
check.MustAbs("/opt/bin/testtool"),
[]string{"testtool", "measured"},
pkg.MustPath("/file", false, pkg.NewFile("file", []byte(
"stub file",
))), pkg.MustPath("/.hakurei", false, pkg.NewHTTPGetTar(
nil, "file:///hakurei.tar",
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
pkg.TarUncompressed,
)), pkg.MustPath("/opt", false, pkg.NewHTTPGetTar(
nil, "file:///testtool.tar.gz",
pkg.Checksum(bytes.Repeat([]byte{0xfd}, len(pkg.Checksum{}))),
pkg.TarGzip,
)),
)},
{"file anonymous", pkg.NewFile("", []byte{0})}, {"file anonymous", pkg.NewFile("", []byte{0})},
{"file", pkg.NewFile("stub", []byte("stub"))}, {"file", pkg.NewFile("stub", []byte("stub"))},
} }
@@ -128,13 +105,9 @@ func TestIRRoundtrip(t *testing.T) {
if err := <-done; err != nil { if err := <-done; err != nil {
t.Fatalf("EncodeAll: error = %v", err) t.Fatalf("EncodeAll: error = %v", err)
} }
}, expectsFS{ }, pkg.MustDecode(
".": {Mode: fs.ModeDir | 0700}, "E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C",
"checksum": {Mode: fs.ModeDir | 0700}, ),
"identifier": {Mode: fs.ModeDir | 0700},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
},
} }
} }
checkWithCache(t, testCasesCache) checkWithCache(t, testCasesCache)

View File

@@ -3,7 +3,6 @@ package pkg_test
import ( import (
"crypto/sha512" "crypto/sha512"
"io" "io"
"io/fs"
"net/http" "net/http"
"reflect" "reflect"
"testing" "testing"
@@ -86,13 +85,7 @@ func TestHTTPGet(t *testing.T) {
if _, err := f.Cure(r); !reflect.DeepEqual(err, wantErrNotFound) { if _, err := f.Cure(r); !reflect.DeepEqual(err, wantErrNotFound) {
t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound) t.Fatalf("Cure: error = %#v, want %#v", err, wantErrNotFound)
} }
}, expectsFS{ }, pkg.MustDecode("E4vEZKhCcL2gPZ2Tt59FS3lDng-d_2SKa2i5G_RbDfwGn6EemptFaGLPUDiOa94C")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"identifier": {Mode: fs.ModeDir | 0700},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"cure", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"cure", pkg.CValidateKnown, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
r := newRContext(t, c) r := newRContext(t, c)
@@ -151,18 +144,6 @@ func TestHTTPGet(t *testing.T) {
if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) { if _, _, err := c.Cure(f); !reflect.DeepEqual(err, wantErrNotFound) {
t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound) t.Fatalf("Pathname: error = %#v, want %#v", err, wantErrNotFound)
} }
}, expectsFS{ }, pkg.MustDecode("L_0RFHpr9JUS4Zp14rz2dESSRvfLzpvqsLhR1-YjQt8hYlmEdVl7vI3_-v8UNPKs")},
".": {Mode: fs.ModeDir | 0700},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU": {Mode: 0400, Data: []byte("\x7f\xe1\x69\xa2\xdd\x63\x96\x26\x83\x79\x61\x8b\xf0\x3f\xd5\x16\x9a\x39\x3a\xdb\xcf\xb1\xbc\x8d\x33\xff\x75\xee\x62\x56\xa9\xf0\x27\xac\x13\x94\x69")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/oM-2pUlk-mOxK1t3aMWZer69UdOQlAXiAgMrpZ1476VoOqpYVP1aGFS9_HYy-D8_": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/fLYGIMHgN1louE-JzITJZJo2SDniPu-IHBXubtvQWFO-hXnDVKNuscV7-zlyr5fU")},
"substitute": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }

View File

@@ -4,7 +4,6 @@ package pkg
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"cmp"
"context" "context"
"crypto/sha512" "crypto/sha512"
"encoding/base64" "encoding/base64"
@@ -16,17 +15,14 @@ import (
"io/fs" "io/fs"
"maps" "maps"
"os" "os"
"os/signal"
"path/filepath" "path/filepath"
"runtime" "runtime"
"slices" "slices"
"strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"syscall" "syscall"
"testing" "testing"
"time"
"unique" "unique"
"unsafe" "unsafe"
@@ -74,64 +70,6 @@ func MustDecode(s string) (checksum Checksum) {
return return
} }
var (
// extension is a string uniquely identifying a set of custom [Artifact]
// implementations registered by calling [Register].
extension string
// openMu synchronises access to global state for initialisation.
openMu sync.Mutex
// opened is false if [Open] was never called.
opened bool
)
// Extension returns a string uniquely identifying the currently registered set
// of custom [Artifact], or the zero value if none was registered.
func Extension() string { return extension }
// ValidExtension returns whether s is valid for use in a call to SetExtension.
func ValidExtension(s string) bool {
if l := len(s); l == 0 || l > 128 {
return false
}
for _, v := range s {
if v < 'a' || v > 'z' {
return false
}
}
return true
}
// ErrInvalidExtension is returned for a variant identification string for which
// [ValidExtension] returns false.
var ErrInvalidExtension = errors.New("invalid extension variant identification string")
// SetExtension sets the extension variant identification string. SetExtension
// must be called before [Open] if custom [Artifact] implementations had been
// recorded by calling [Register].
//
// The variant identification string must be between 1 and 128 bytes long and
// consists of only bytes between 'a' and 'z'.
//
// SetExtension is not safe for concurrent use. SetExtension is called at most
// once and must not be called after the first instance of Cache has been opened.
func SetExtension(s string) {
openMu.Lock()
defer openMu.Unlock()
if opened {
panic("attempting to set extension after open")
}
if extension != "" {
panic("attempting to set extension twice")
}
if !ValidExtension(s) {
panic(ErrInvalidExtension)
}
extension = s
statusHeader = makeStatusHeader(s)
}
// common holds elements and receives methods shared between different contexts. // common holds elements and receives methods shared between different contexts.
type common struct { type common struct {
// Context specific to this [Artifact]. The toplevel context in [Cache] must // Context specific to this [Artifact]. The toplevel context in [Cache] must
@@ -164,27 +102,19 @@ type TContext struct {
common common
} }
// makeStatusHeader creates the header written to every status file. This should // statusHeader is the header written to all status files in dirStatus.
// not be called directly, its result is stored in statusHeader and will not var statusHeader = func() string {
// change after the first [Cache] is opened.
func makeStatusHeader(extension string) string {
s := programName s := programName
if v := info.Version(); v != info.FallbackVersion { if v := info.Version(); v != info.FallbackVersion {
s += " " + v s += " " + v
} }
if extension != "" {
s += " with " + extension + " extensions"
}
s += " (" + runtime.GOARCH + ")" s += " (" + runtime.GOARCH + ")"
if name, err := os.Hostname(); err == nil { if name, err := os.Hostname(); err == nil {
s += " on " + name s += " on " + name
} }
s += "\n\n" s += "\n\n"
return s return s
} }()
// statusHeader is the header written to all status files in dirStatus.
var statusHeader = makeStatusHeader("")
// prepareStatus initialises the status file once. // prepareStatus initialises the status file once.
func (t *TContext) prepareStatus() error { func (t *TContext) prepareStatus() error {
@@ -250,14 +180,7 @@ func (t *TContext) destroy(errP *error) {
*errP = errors.Join(*errP, err) *errP = errors.Join(*errP, err)
} }
if *errP != nil { if *errP != nil {
*errP = errors.Join(*errP, os.Rename( *errP = errors.Join(*errP, os.Remove(t.statusPath.String()))
t.statusPath.String(), t.cache.base.Append(
dirFault,
t.ids+"."+strconv.FormatUint(uint64(
time.Now().UnixNano(),
), 10),
).String(),
))
} }
t.status = nil t.status = nil
} }
@@ -504,9 +427,6 @@ const (
// KindFile is the kind of [Artifact] returned by [NewFile]. // KindFile is the kind of [Artifact] returned by [NewFile].
KindFile KindFile
// _kindEnd is the total number of kinds and does not denote a kind.
_kindEnd
// KindCustomOffset is the first [Kind] value reserved for implementations // KindCustomOffset is the first [Kind] value reserved for implementations
// not from this package. // not from this package.
KindCustomOffset = 1 << 31 KindCustomOffset = 1 << 31
@@ -518,34 +438,30 @@ const (
) )
const ( const (
// fileLock is the lock file for exclusive access to the cache directory. // fileLock is the file name appended to Cache.base for guaranteeing
// exclusive access to the cache directory.
fileLock = "lock" fileLock = "lock"
// fileVariant is a file holding the variant identification string set by a
// prior call to [SetExtension].
fileVariant = "variant"
// dirSubstitute holds symlinks to artifacts by checksum, named after their // dirIdentifier is the directory name appended to Cache.base for storing
// substitute identifier. // artifacts named after their [ID].
dirSubstitute = "substitute"
// dirIdentifier holds symlinks to artifacts by checksum, named after their
// IR-based identifier.
dirIdentifier = "identifier" dirIdentifier = "identifier"
// dirChecksum holds artifacts named after their [Checksum]. // dirChecksum is the directory name appended to Cache.base for storing
// artifacts named after their [Checksum].
dirChecksum = "checksum" dirChecksum = "checksum"
// dirStatus holds artifact metadata and logs named after their IR-based // dirStatus is the directory name appended to Cache.base for storing
// identifier. For [FloodArtifact], the same file is also available under // artifact metadata and logs named after their [ID].
// its substitute identifier.
dirStatus = "status" dirStatus = "status"
// dirFault holds status files of faulted cures.
dirFault = "fault"
// dirWork holds working pathnames set up during [Cache.Cure]. // dirWork is the directory name appended to Cache.base for working
// pathnames set up during [Cache.Cure].
dirWork = "work" dirWork = "work"
// dirTemp holds scratch space allocated during [Cache.Cure]. // dirTemp is the directory name appended to Cache.base for scratch space
// pathnames allocated during [Cache.Cure].
dirTemp = "temp" dirTemp = "temp"
// dirExecScratch is scratch space set up for the container started by // dirExecScratch is the directory name appended to Cache.base for scratch
// [Cache.EnterExec]. Exclusivity via Cache.inExec. // space setting up the container started by [Cache.EnterExec]. Exclusivity
// via Cache.inExec.
dirExecScratch = "scratch" dirExecScratch = "scratch"
// checksumLinknamePrefix is prepended to the encoded [Checksum] value // checksumLinknamePrefix is prepended to the encoded [Checksum] value
@@ -624,17 +540,6 @@ const (
// impurity due to [KindExecNet] being [KnownChecksum]. This flag exists // impurity due to [KindExecNet] being [KnownChecksum]. This flag exists
// to support kernels without Landlock LSM enabled. // to support kernels without Landlock LSM enabled.
CHostAbstract CHostAbstract
// CPromoteVariant allows [pkg.Open] to promote an unextended on-disk cache
// to the current extension variant. This is a one-way operation.
CPromoteVariant
// CSuppressInit arranges for verbose output of the container init to be
// suppressed regardless of [message.Msg] state.
CSuppressInit
// CIgnoreSubstitutes disables content-based dependency substitution.
CIgnoreSubstitutes
) )
// toplevel holds [context.WithCancel] over caller-supplied context, where all // toplevel holds [context.WithCancel] over caller-supplied context, where all
@@ -690,11 +595,6 @@ type Cache struct {
// Synchronises access to dirChecksum. // Synchronises access to dirChecksum.
checksumMu sync.RWMutex checksumMu sync.RWMutex
// Presence of an alternative in the cache. Keys are not valid identifiers
// and must not be used as such.
substitute map[unique.Handle[ID]]unique.Handle[Checksum]
// Synchronises access to substitute and corresponding filesystem entries.
substituteMu sync.RWMutex
// Identifier to content pair cache. // Identifier to content pair cache.
ident map[unique.Handle[ID]]unique.Handle[Checksum] ident map[unique.Handle[ID]]unique.Handle[Checksum]
// Identifier to error pair for unrecoverably faulted [Artifact]. // Identifier to error pair for unrecoverably faulted [Artifact].
@@ -905,14 +805,11 @@ func (c *Cache) Scrub(checks int) error {
checks = runtime.NumCPU() checks = runtime.NumCPU()
} }
c.substituteMu.Lock()
defer c.substituteMu.Unlock()
c.identMu.Lock() c.identMu.Lock()
defer c.identMu.Unlock() defer c.identMu.Unlock()
c.checksumMu.Lock() c.checksumMu.Lock()
defer c.checksumMu.Unlock() defer c.checksumMu.Unlock()
c.substitute = make(map[unique.Handle[ID]]unique.Handle[Checksum])
c.ident = make(map[unique.Handle[ID]]unique.Handle[Checksum]) c.ident = make(map[unique.Handle[ID]]unique.Handle[Checksum])
c.identErr = make(map[unique.Handle[ID]]error) c.identErr = make(map[unique.Handle[ID]]error)
c.artifact.Clear() c.artifact.Clear()
@@ -1020,52 +917,47 @@ func (c *Cache) Scrub(checks int) error {
wg.Wait() wg.Wait()
} }
for _, suffix := range []string{ dir = c.base.Append(dirIdentifier)
dirSubstitute, if entries, readdirErr := os.ReadDir(dir.String()); readdirErr != nil {
dirIdentifier, addErr(dir, readdirErr)
} { } else {
dir = c.base.Append(suffix) wg.Add(len(entries))
if entries, readdirErr := os.ReadDir(dir.String()); readdirErr != nil { for _, ent := range entries {
addErr(dir, readdirErr) w <- checkEntry{ent, func(ent os.DirEntry, want *Checksum) bool {
} else { got := p.Get().(*Checksum)
wg.Add(len(entries)) defer p.Put(got)
for _, ent := range entries {
w <- checkEntry{ent, func(ent os.DirEntry, want *Checksum) bool {
got := p.Get().(*Checksum)
defer p.Put(got)
pathname := dir.Append(ent.Name()) pathname := dir.Append(ent.Name())
if linkname, err := os.Readlink( if linkname, err := os.Readlink(
pathname.String(), pathname.String(),
); err != nil { ); err != nil {
seMu.Lock() seMu.Lock()
se.Errs[pathname.Handle()] = append(se.Errs[pathname.Handle()], err) se.Errs[pathname.Handle()] = append(se.Errs[pathname.Handle()], err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want) se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock() seMu.Unlock()
return false return false
} else if err = Decode(got, filepath.Base(linkname)); err != nil { } else if err = Decode(got, filepath.Base(linkname)); err != nil {
seMu.Lock() seMu.Lock()
lnp := dir.Append(linkname) lnp := dir.Append(linkname)
se.Errs[lnp.Handle()] = append(se.Errs[lnp.Handle()], err) se.Errs[lnp.Handle()] = append(se.Errs[lnp.Handle()], err)
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want) se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock() seMu.Unlock()
return false return false
} }
if _, err := os.Stat(pathname.String()); err != nil { if _, err := os.Stat(pathname.String()); err != nil {
if !errors.Is(err, os.ErrNotExist) { if !errors.Is(err, os.ErrNotExist) {
addErr(pathname, err) addErr(pathname, err)
}
seMu.Lock()
se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
seMu.Unlock()
return false
} }
return true seMu.Lock()
}} se.DanglingIdentifiers = append(se.DanglingIdentifiers, *want)
} seMu.Unlock()
wg.Wait() return false
}
return true
}}
} }
wg.Wait()
} }
dir = c.base.Append(dirStatus) dir = c.base.Append(dirStatus)
@@ -1213,52 +1105,6 @@ func (c *Cache) finaliseIdent(
close(done) close(done)
} }
// zeroChecksum is a zero [Checksum] handle, used for comparison only.
var zeroChecksum unique.Handle[Checksum]
// loadSubstitute returns a checksum corresponding to a substitute identifier,
// or zeroChecksum if an alternative is not available.
func (c *Cache) loadSubstitute(
substitute unique.Handle[ID],
) (unique.Handle[Checksum], error) {
c.substituteMu.RLock()
if checksum, ok := c.substitute[substitute]; ok {
c.substituteMu.RUnlock()
return checksum, nil
}
linkname, err := os.Readlink(c.base.Append(
dirSubstitute,
Encode(substitute.Value()),
).String())
c.substituteMu.RUnlock()
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return zeroChecksum, err
}
c.substituteMu.Lock()
c.substitute[substitute] = zeroChecksum
c.substituteMu.Unlock()
return zeroChecksum, nil
}
var checksum unique.Handle[Checksum]
buf := c.getIdentBuf()
err = Decode((*Checksum)(buf[:]), filepath.Base(linkname))
if err == nil {
checksum = unique.Make(Checksum(buf[:]))
c.substituteMu.Lock()
c.substitute[substitute] = checksum
c.substituteMu.Unlock()
}
c.putIdentBuf(buf)
return checksum, err
}
// Done returns a channel that is closed when the ongoing cure of an [Artifact] // Done returns a channel that is closed when the ongoing cure of an [Artifact]
// referred to by the specified identifier completes. Done may return nil if // referred to by the specified identifier completes. Done may return nil if
// no ongoing cure of the specified identifier exists. // no ongoing cure of the specified identifier exists.
@@ -1493,8 +1339,8 @@ func (c *Cache) Cure(a Artifact) (
// CureError wraps a non-nil error returned attempting to cure an [Artifact]. // CureError wraps a non-nil error returned attempting to cure an [Artifact].
type CureError struct { type CureError struct {
A Artifact Ident unique.Handle[ID]
Err error Err error
} }
// Unwrap returns the underlying error. // Unwrap returns the underlying error.
@@ -1507,63 +1353,40 @@ func (e *CureError) Error() string { return e.Err.Error() }
type DependencyCureError []*CureError type DependencyCureError []*CureError
// unwrapM recursively expands underlying errors into a caller-supplied map. // unwrapM recursively expands underlying errors into a caller-supplied map.
func (e *DependencyCureError) unwrapM( func (e *DependencyCureError) unwrapM(me map[unique.Handle[ID]]*CureError) {
ctx context.Context,
ir *IRCache,
me map[unique.Handle[ID]]*CureError,
) {
for _, err := range *e { for _, err := range *e {
if ctx.Err() != nil { if _, ok := me[err.Ident]; ok {
break
}
id := ir.Ident(err.A)
if _, ok := me[id]; ok {
continue continue
} }
if _e, ok := err.Err.(*DependencyCureError); ok { if _e, ok := err.Err.(*DependencyCureError); ok {
_e.unwrapM(ctx, ir, me) _e.unwrapM(me)
continue continue
} }
me[id] = err me[err.Ident] = err
} }
} }
// unwrap recursively expands and deduplicates underlying errors. // unwrap recursively expands and deduplicates underlying errors.
func (e *DependencyCureError) unwrap( func (e *DependencyCureError) unwrap() DependencyCureError {
ctx context.Context,
ir *IRCache,
) DependencyCureError {
me := make(map[unique.Handle[ID]]*CureError) me := make(map[unique.Handle[ID]]*CureError)
e.unwrapM(ctx, ir, me) e.unwrapM(me)
type ent struct { errs := slices.AppendSeq(
id unique.Handle[ID] make(DependencyCureError, 0, len(me)),
err *CureError maps.Values(me),
} )
errs := make([]*ent, 0, len(me))
for id, err := range me {
errs = append(errs, &ent{id, err})
}
var identBuf [2]ID var identBuf [2]ID
slices.SortFunc(errs, func(a, b *ent) int { slices.SortFunc(errs, func(a, b *CureError) int {
identBuf[0], identBuf[1] = a.id.Value(), b.id.Value() identBuf[0], identBuf[1] = a.Ident.Value(), b.Ident.Value()
return slices.Compare(identBuf[0][:], identBuf[1][:]) return slices.Compare(identBuf[0][:], identBuf[1][:])
}) })
_errs := make(DependencyCureError, len(errs)) return errs
for i, v := range errs {
_errs[i] = v.err
}
return _errs
} }
// Unwrap returns a deduplicated slice of underlying errors. // Unwrap returns a deduplicated slice of underlying errors.
func (e *DependencyCureError) Unwrap() []error { func (e *DependencyCureError) Unwrap() []error {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) errs := e.unwrap()
defer cancel()
errs := e.unwrap(ctx, NewIR())
_errs := make([]error, len(errs)) _errs := make([]error, len(errs))
for i, err := range errs { for i, err := range errs {
_errs[i] = err _errs[i] = err
@@ -1573,23 +1396,14 @@ func (e *DependencyCureError) Unwrap() []error {
// Error returns a user-facing multiline error message. // Error returns a user-facing multiline error message.
func (e *DependencyCureError) Error() string { func (e *DependencyCureError) Error() string {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) errs := e.unwrap()
defer cancel()
ir := NewIR()
errs := e.unwrap(ctx, ir)
if len(errs) == 0 { if len(errs) == 0 {
return "invalid dependency cure outcome" return "invalid dependency cure outcome"
} }
var buf strings.Builder var buf strings.Builder
buf.WriteString("errors curing dependencies:") buf.WriteString("errors curing dependencies:")
for _, err := range errs { for _, err := range errs {
buf.WriteString("\n\t" + buf.WriteString("\n\t" + Encode(err.Ident.Value()) + ": " + err.Error())
reportName(err.A, ir.Ident(err.A)) + ": " +
err.Error())
}
if ctx.Err() != nil {
buf.WriteString("\nerror resolution cancelled")
} }
return buf.String() return buf.String()
} }
@@ -1759,44 +1573,16 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
return return
} }
var ( var checksums string
checksums string
substitute unique.Handle[ID]
alternative *check.Absolute
)
defer func() { defer func() {
if err == nil && checksums != "" { if err == nil && checksums != "" {
linkname := checksumLinknamePrefix + checksums
err = os.Symlink( err = os.Symlink(
linkname, checksumLinknamePrefix+checksums,
pathname.String(), pathname.String(),
) )
if err == nil { if err == nil {
err = zeroTimes(pathname.String()) err = zeroTimes(pathname.String())
} }
if err == nil && alternative != nil {
c.substituteMu.Lock()
err = os.Symlink(
linkname,
alternative.String(),
)
if errors.Is(err, os.ErrExist) {
c.msg.Verbosef(
"creating alternative over %s for artifact %s",
Encode(substitute.Value()), ids,
)
err = nil
}
if err == nil {
err = zeroTimes(alternative.String())
}
if err == nil && checksum != zeroChecksum {
c.substitute[substitute] = checksum
}
c.substituteMu.Unlock()
}
} }
}() }()
@@ -1993,64 +1779,11 @@ func (c *Cache) cure(a Artifact, curesExempt bool) (
f.deps[deps[i]] = p f.deps[deps[i]] = p
} }
sh := sha512.New384()
err = c.encode(sh, a, f.deps)
if err != nil {
return
}
buf := c.getIdentBuf()
sh.Sum(buf[wordSize:wordSize])
substitute = unique.Make(ID(buf[wordSize:]))
substitutes := Encode(substitute.Value())
c.putIdentBuf(buf)
alternative = c.base.Append(
dirSubstitute,
substitutes,
)
if c.flags&CIgnoreSubstitutes == 0 {
var substituteChecksum unique.Handle[Checksum]
substituteChecksum, err = c.loadSubstitute(substitute)
if err != nil {
return
}
if substituteChecksum != zeroChecksum {
checksum = substituteChecksum
checksums = Encode(checksum.Value())
checksumPathname = c.base.Append(
dirChecksum,
checksums,
)
if _, err = os.Lstat(c.base.Append(
dirStatus,
substitutes,
).String()); err == nil {
err = os.Symlink(substitutes, c.base.Append(
dirStatus,
ids,
).String())
} else if errors.Is(err, os.ErrNotExist) {
err = nil
}
return
}
}
defer f.destroy(&err) defer f.destroy(&err)
if err = c.enterCure(a, curesExempt); err != nil { if err = c.enterCure(a, curesExempt); err != nil {
return return
} }
err = ca.Cure(&f) err = ca.Cure(&f)
if err == nil && f.status != nil {
err = os.Link(c.base.Append(
dirStatus,
ids,
).String(), c.base.Append(
dirStatus,
substitutes,
).String())
}
c.exitCure(a, curesExempt) c.exitCure(a, curesExempt)
if err != nil { if err != nil {
return return
@@ -2143,7 +1876,7 @@ func (pending *pendingArtifactDep) cure(c *Cache) {
} }
pending.errsMu.Lock() pending.errsMu.Lock()
*pending.errs = append(*pending.errs, &CureError{pending.a, err}) *pending.errs = append(*pending.errs, &CureError{c.Ident(pending.a), err})
pending.errsMu.Unlock() pending.errsMu.Unlock()
} }
@@ -2159,52 +1892,6 @@ func (c *Cache) OpenStatus(a Artifact) (r io.ReadSeekCloser, err error) {
return return
} }
// Fault holds the pathname and termination time of an [Artifact] fault entry.
type Fault struct {
*check.Absolute
t uint64
}
// Time returns the instant in time where the fault occurred.
func (f Fault) Time() time.Time { return time.Unix(0, int64(f.t)) }
// Open opens the underlying entry for reading.
func (f Fault) Open() (io.ReadCloser, error) { return os.Open(f.Absolute.String()) }
// Destroy removes the underlying fault entry.
func (f Fault) Destroy() error { return os.Remove(f.Absolute.String()) }
// ReadFaults returns fault entries for an [Artifact].
func (c *Cache) ReadFaults(a Artifact) (faults []Fault, err error) {
prefix := Encode(c.Ident(a).Value()) + "."
var dents []os.DirEntry
if dents, err = os.ReadDir(c.base.Append(dirFault).String()); err != nil {
return
}
for _, dent := range dents {
name := dent.Name()
if !strings.HasPrefix(name, prefix) {
continue
}
var t uint64
t, err = strconv.ParseUint(name[len(prefix):], 10, 64)
if err != nil {
return
}
faults = append(faults, Fault{c.base.Append(
dirFault,
name,
), t})
}
slices.SortFunc(faults, func(a, b Fault) int {
return cmp.Compare(a.t, b.t)
})
return
}
// Abort cancels all pending cures and waits for them to clean up, but does not // Abort cancels all pending cures and waits for them to clean up, but does not
// close the cache. // close the cache.
func (c *Cache) Abort() { func (c *Cache) Abort() {
@@ -2243,20 +1930,6 @@ func (c *Cache) Close() {
c.unlock() c.unlock()
} }
// UnsupportedVariantError describes an on-disk cache with an extension variant
// identification string that differs from the value returned by [Extension].
type UnsupportedVariantError string
func (e UnsupportedVariantError) Error() string {
return "unsupported variant " + strconv.Quote(string(e))
}
var (
// ErrWouldPromote is returned by [Open] if the [CPromoteVariant] bit is not
// set and the on-disk cache requires variant promotion.
ErrWouldPromote = errors.New("operation would promote unextended cache")
)
// Open returns the address of a newly opened instance of [Cache]. // Open returns the address of a newly opened instance of [Cache].
// //
// Concurrent cures of a [FloodArtifact] dependency graph is limited to the // Concurrent cures of a [FloodArtifact] dependency graph is limited to the
@@ -2288,14 +1961,6 @@ func open(
base *check.Absolute, base *check.Absolute,
lock bool, lock bool,
) (*Cache, error) { ) (*Cache, error) {
openMu.Lock()
defer openMu.Unlock()
opened = true
if extension == "" && len(irArtifact) != int(_kindEnd) {
panic("attempting to open cache with incomplete variant setup")
}
if cures < 1 { if cures < 1 {
cures = runtime.NumCPU() cures = runtime.NumCPU()
} }
@@ -2304,17 +1969,13 @@ func open(
} }
for _, name := range []string{ for _, name := range []string{
dirSubstitute,
dirIdentifier, dirIdentifier,
dirChecksum, dirChecksum,
dirStatus, dirStatus,
dirFault,
dirWork, dirWork,
} { } {
if err := os.MkdirAll( if err := os.MkdirAll(base.Append(name).String(), 0700); err != nil &&
base.Append(name).String(), !errors.Is(err, os.ErrExist) {
0700,
); err != nil && !errors.Is(err, os.ErrExist) {
return nil, err return nil, err
} }
} }
@@ -2331,7 +1992,6 @@ func open(
irCache: zeroIRCache(), irCache: zeroIRCache(),
substitute: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]), ident: make(map[unique.Handle[ID]]unique.Handle[Checksum]),
identErr: make(map[unique.Handle[ID]]error), identErr: make(map[unique.Handle[ID]]error),
identPending: make(map[unique.Handle[ID]]*pendingCure), identPending: make(map[unique.Handle[ID]]*pendingCure),
@@ -2353,45 +2013,6 @@ func open(
c.unlock = func() {} c.unlock = func() {}
} }
variantPath := base.Append(fileVariant).String()
if p, err := os.ReadFile(variantPath); err != nil {
if !errors.Is(err, os.ErrNotExist) {
c.unlock()
return nil, err
}
// nonexistence implies newly created cache, or a cache predating
// variant identification strings, in which case it is silently promoted
if err = os.WriteFile(
variantPath,
[]byte(extension),
0400,
); err != nil {
c.unlock()
return nil, err
}
} else if s := string(p); s == "" {
if extension != "" {
if flags&CPromoteVariant == 0 {
c.unlock()
return nil, ErrWouldPromote
}
if err = os.WriteFile(
variantPath,
[]byte(extension),
0400,
); err != nil {
c.unlock()
return nil, err
}
}
} else if !ValidExtension(s) {
c.unlock()
return nil, ErrInvalidExtension
} else if s != extension {
c.unlock()
return nil, UnsupportedVariantError(s)
}
return &c, nil return &c, nil
} }

File diff suppressed because it is too large Load Diff

View File

@@ -20,31 +20,6 @@ import (
func TestTar(t *testing.T) { func TestTar(t *testing.T) {
t.Parallel() t.Parallel()
want := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"checksum": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0500},
"identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0500},
}
wantEncode := pkg.Encode(want.hash())
wantExpand := expectsFS{
".": {Mode: fs.ModeDir | 0500},
"libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}
wantExpandEncode := pkg.Encode(wantExpand.hash())
checkWithCache(t, []cacheTestCase{ checkWithCache(t, []cacheTestCase{
{"http", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"http", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
checkTarHTTP(t, base, c, fstest.MapFS{ checkTarHTTP(t, base, c, fstest.MapFS{
@@ -62,32 +37,10 @@ func TestTar(t *testing.T) {
"identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")}, "identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"work": {Mode: fs.ModeDir | 0700}, "work": {Mode: fs.ModeDir | 0700},
}, want) }, pkg.MustDecode(
}, expectsFS{ "cTw0h3AmYe7XudSoyEMByduYXqGi-N5ZkTZ0t9K5elsu3i_jNIVF5T08KR1roBFM",
".": {Mode: fs.ModeDir | 0700}, ))
}, pkg.MustDecode("NQTlc466JmSVLIyWklm_u8_g95jEEb98PxJU-kjwxLpfdjwMWJq0G8ze9R4Vo1Vu")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/check": {Mode: 0400, Data: []byte{0, 0}},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"checksum/" + wantEncode + "/checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP/lib/pkgconfig": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/identifier": {Mode: fs.ModeDir | 0500},
"checksum/" + wantEncode + "/identifier/HnySzeLQvSBZuTUcvfmLEX_OmH4yJWWH788NxuLuv7kVn8_uPM6Ks4rqFWM2NZJY": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/" + wantEncode + "/identifier/Zx5ZG9BAwegNT3zQwCySuI2ktCXxNgxirkGLFjW4FW06PtojYVaCdtEw8yuntPLa": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/1TL00Qb8dcqayX7wTO8WNaraHvY6b-KCsctLDTrb64QBCmxj_-byK1HdIUwMaFEP")},
"checksum/" + wantEncode + "/work": {Mode: fs.ModeDir | 0500},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantEncode)},
"identifier/rg7F1D5hwv6o4xctjD5zDq4i5MD0mArTsUIWfhUbik8xC6Bsyt3mjXXOm3goojTz": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
{"http expand", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) { {"http expand", 0, nil, func(t *testing.T, base *check.Absolute, c *pkg.Cache) {
checkTarHTTP(t, base, c, fstest.MapFS{ checkTarHTTP(t, base, c, fstest.MapFS{
@@ -95,23 +48,10 @@ func TestTar(t *testing.T) {
"lib": {Mode: fs.ModeDir | 0700}, "lib": {Mode: fs.ModeDir | 0700},
"lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")}, "lib/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
}, wantExpand) }, pkg.MustDecode(
}, expectsFS{ "CH3AiUrCCcVOjOYLaMKKK1Da78989JtfHeIsxMzWOQFiN4mrCLDYpoDxLWqJWCUN",
".": {Mode: fs.ModeDir | 0700}, ))
}, pkg.MustDecode("hSoSSgCYTNonX3Q8FjvjD1fBl-E-BQyA6OTXro2OadXqbST4tZ-akGXszdeqphRe")},
"checksum": {Mode: fs.ModeDir | 0700},
"checksum/" + wantExpandEncode: {Mode: fs.ModeDir | 0500},
"checksum/" + wantExpandEncode + "/libedac.so": {Mode: fs.ModeSymlink | 0777, Data: []byte("/proc/nonexistent/libedac.so")},
"identifier": {Mode: fs.ModeDir | 0700},
"identifier/W5S65DEhawz_WKaok5NjUKLmnD9dNl5RPauNJjcOVcB3VM4eGhSaLGmXbL8vZpiw": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantExpandEncode)},
"identifier/_v1blm2h-_KA-dVaawdpLas6MjHc6rbhhFS8JWwx8iJxZGUu8EBbRrhr5AaZ9PJL": {Mode: fs.ModeSymlink | 0777, Data: []byte("../checksum/" + wantExpandEncode)},
"substitute": {Mode: fs.ModeDir | 0700},
"temp": {Mode: fs.ModeDir | 0700},
"work": {Mode: fs.ModeDir | 0700},
}},
}) })
} }
@@ -120,7 +60,7 @@ func checkTarHTTP(
base *check.Absolute, base *check.Absolute,
c *pkg.Cache, c *pkg.Cache,
testdataFsys fs.FS, testdataFsys fs.FS,
want expectsKnown, wantChecksum pkg.Checksum,
) { ) {
var testdata string var testdata string
{ {
@@ -254,24 +194,24 @@ func checkTarHTTP(
{"file", a, base.Append( {"file", a, base.Append(
"identifier", "identifier",
pkg.Encode(wantIdent), pkg.Encode(wantIdent),
), want, nil}, ), wantChecksum, nil},
{"directory", pkg.NewTar( {"directory", pkg.NewTar(
&tarDir, &tarDir,
pkg.TarGzip, pkg.TarGzip,
), ignorePathname, want, nil}, ), ignorePathname, wantChecksum, nil},
{"multiple entries", pkg.NewTar( {"multiple entries", pkg.NewTar(
&tarDirMulti, &tarDirMulti,
pkg.TarGzip, pkg.TarGzip,
), nil, nil, errors.New( ), nil, pkg.Checksum{}, errors.New(
"input directory does not contain a single regular file", "input directory does not contain a single regular file",
)}, )},
{"bad type", pkg.NewTar( {"bad type", pkg.NewTar(
&tarDirType, &tarDirType,
pkg.TarGzip, pkg.TarGzip,
), nil, nil, errors.New( ), nil, pkg.Checksum{}, errors.New(
"input directory does not contain a single regular file", "input directory does not contain a single regular file",
)}, )},
@@ -281,6 +221,6 @@ func checkTarHTTP(
cure: func(t *pkg.TContext) error { cure: func(t *pkg.TContext) error {
return stub.UniqueError(0xcafe) return stub.UniqueError(0xcafe)
}, },
}, pkg.TarGzip), nil, nil, stub.UniqueError(0xcafe)}, }, pkg.TarGzip), nil, pkg.Checksum{}, stub.UniqueError(0xcafe)},
}) })
} }

View File

@@ -14,29 +14,15 @@ import (
"strconv" "strconv"
"strings" "strings"
"hakurei.app/check"
"hakurei.app/fhs" "hakurei.app/fhs"
"hakurei.app/vfs" "hakurei.app/vfs"
"hakurei.app/internal/pkg/internal/testtool/expected"
) )
func main() { func main() {
log.SetFlags(0) log.SetFlags(0)
log.SetPrefix("testtool: ") log.SetPrefix("testtool: ")
if os.Getenv("HAKUREI_BINFMT") == "1" {
wantArgs := []string{"/interpreter", "/opt/bin/sample"}
if !slices.Equal(os.Args, wantArgs) {
log.Fatalf("Args: %q, want %q", os.Args, wantArgs)
}
if err := os.WriteFile("check", []byte("binfmt"), 0400); err != nil {
log.Fatal(err)
}
return
}
environ := slices.DeleteFunc(slices.Clone(os.Environ()), func(s string) bool { environ := slices.DeleteFunc(slices.Clone(os.Environ()), func(s string) bool {
return s == "CURE_JOBS="+strconv.Itoa(runtime.NumCPU()) return s == "CURE_JOBS="+strconv.Itoa(runtime.NumCPU())
}) })
@@ -162,40 +148,59 @@ func main() {
} }
const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" const checksumEmptyDir = "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU"
ident := expected.Offline ident := "dztPS6jRjiZtCF4_p8AzfnxGp6obkhrgFVsxdodbKWUoAEVtDz3MykepJB4kI_ks"
log.Println(m) log.Println(m)
next := func() { m = m.Next; log.Println(m) } next := func() { m = m.Next; log.Println(m) }
if overlayRoot { if overlayRoot {
ident = expected.OvlRoot ident = "RdMA-mubnrHuu3Ky1wWyxauSYCO0ZH_zCPUj3uDHqkfwv5sGcByoF_g5PjlGiClb"
if m.Root != "/" || m.Target != "/" || if m.Root != "/" || m.Target != "/" ||
m.Source != "overlay" || m.FsType != "overlay" { m.Source != "overlay" || m.FsType != "overlay" {
log.Fatal("unexpected root mount entry") log.Fatal("unexpected root mount entry")
} }
var lowerdir []string var lowerdir string
for _, o := range strings.Split(m.FsOptstr, ",") { for _, o := range strings.Split(m.FsOptstr, ",") {
const lowerdirKey = "lowerdir+=" const lowerdirKey = "lowerdir="
if strings.HasPrefix(o, lowerdirKey) { if strings.HasPrefix(o, lowerdirKey) {
lowerdir = append(lowerdir, o[len(lowerdirKey):]) lowerdir = o[len(lowerdirKey):]
} }
} }
if !layers { if !layers {
if len(lowerdir) != 1 || filepath.Base(lowerdir[0]) != checksumEmptyDir { if filepath.Base(lowerdir) != checksumEmptyDir {
log.Fatal("unexpected artifact checksum") log.Fatal("unexpected artifact checksum")
} }
} else { } else {
ident = expected.Layers ident = "p1t_drXr34i-jZNuxDMLaMOdL6tZvQqhavNafGynGqxOZoXAUTSn7kqNh3Ovv3DT"
if len(lowerdir) != 2 || lowerdirsEscaped := strings.Split(lowerdir, ":")
filepath.Base(lowerdir[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" || lowerdirs := lowerdirsEscaped[:0]
filepath.Base(lowerdir[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" { // ignore the option separator since it does not appear in ident
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdir, ", ")) for i, e := range lowerdirsEscaped {
if len(e) > 0 &&
e[len(e)-1] == check.SpecialOverlayEscape[0] &&
(len(e) == 1 || e[len(e)-2] != check.SpecialOverlayEscape[0]) {
// ignore escaped pathname separator since it does not
// appear in ident
e = e[:len(e)-1]
if len(lowerdirsEscaped) != i {
lowerdirsEscaped[i+1] = e + lowerdirsEscaped[i+1]
continue
}
}
lowerdirs = append(lowerdirs, e)
}
if len(lowerdirs) != 2 ||
filepath.Base(lowerdirs[0]) != "MGWmEfjut2QE2xPJwTsmUzpff4BN_FEnQ7T0j7gvUCCiugJQNwqt9m151fm9D1yU" ||
filepath.Base(lowerdirs[1]) != "nY_CUdiaUM1OL4cPr5TS92FCJ3rCRV7Hm5oVTzAvMXwC03_QnTRfQ5PPs7mOU9fK" {
log.Fatalf("unexpected lowerdirs %s", strings.Join(lowerdirs, ", "))
} }
} }
} else { } else {
if hostNet { if hostNet {
ident = expected.Net ident = "G8qPxD9puvvoOVV7lrT80eyDeIl3G_CCFoKw12c8mCjMdG1zF7NEPkwYpNubClK3"
} }
if m.Root != "/sysroot" || m.Target != "/" { if m.Root != "/sysroot" || m.Target != "/" {
@@ -214,14 +219,14 @@ func main() {
} }
if promote { if promote {
ident = expected.Promote ident = "xXTIYcXmgJWNLC91c417RRrNM9cjELwEZHpGvf8Fk_GNP5agRJp_SicD0w9aMeLJ"
} }
next() // testtool artifact next() // testtool artifact
next() next()
if overlayWork { if overlayWork {
ident = expected.Work ident = "5hlaukCirnXE4W_RSLJFOZN47Z5RiHnacXzdFp_70cLgiJUGR6cSb_HaFftkzi0-"
if m.Root != "/" || m.Target != "/work" || if m.Root != "/" || m.Target != "/work" ||
m.Source != "overlay" || m.FsType != "overlay" { m.Source != "overlay" || m.FsType != "overlay" {
log.Fatal("unexpected work mount entry") log.Fatal("unexpected work mount entry")

View File

@@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"runtime"
"strconv" "strconv"
"sync" "sync"
@@ -17,7 +16,9 @@ import (
type PArtifact int type PArtifact int
const ( const (
LLVM PArtifact = iota CompilerRT PArtifact = iota
LLVMRuntimes
Clang
// EarlyInit is the Rosa OS init program. // EarlyInit is the Rosa OS init program.
EarlyInit EarlyInit
@@ -56,8 +57,6 @@ const (
Fakeroot Fakeroot
Findutils Findutils
Flex Flex
FontUtil
Freetype
Fuse Fuse
GMP GMP
GLib GLib
@@ -73,63 +72,35 @@ const (
Gzip Gzip
Hakurei Hakurei
HakureiDist HakureiDist
Hwdata
IPTables IPTables
Kmod Kmod
LIT
LibX11
LibXau LibXau
LibXdmcp
LibXext
LibXfixes
LibXfont2
LibXrandr
LibXrender
LibXxf86vm
Libarchive
Libbsd Libbsd
Libcap Libcap
Libconfig Libclc
LibdisplayInfo
Libdrm Libdrm
Libepoxy
Libev Libev
Libexpat Libexpat
Libffi Libffi
Libfontenc
Libgd Libgd
Libglvnd Libglvnd
Libiconv Libiconv
Libmd Libmd
Libmnl Libmnl
Libnftnl
Libpciaccess Libpciaccess
Libpng Libnftnl
Libpsl Libpsl
Libseccomp Libseccomp
Libtasn1 Libtasn1
Libtirpc
Libtool Libtool
Libucontext Libucontext
Libunistring Libunistring
Libva
LibxcbRenderUtil
LibxcbUtil
LibxcbUtilImage
LibxcbUtilKeysyms
LibxcbUtilWM
Libxcvt
Libxkbfile
Libxml2 Libxml2
Libxshmfence
Libxslt Libxslt
Libxtrans
LMSensors
M4 M4
MPC MPC
MPFR MPFR
Make Make
Mesa
Meson Meson
Mksh Mksh
MuslFts MuslFts
@@ -151,39 +122,28 @@ const (
PerlPodParser PerlPodParser
PerlSGMLS PerlSGMLS
PerlTermReadKey PerlTermReadKey
PerlTestCmd
PerlTextCharWidth PerlTextCharWidth
PerlTextWrapI18N PerlTextWrapI18N
PerlUnicodeLineBreak PerlUnicodeLineBreak
PerlYAMLTiny PerlYAMLTiny
Pixman
PkgConfig PkgConfig
Procps Procps
Python Python
PythonFlitCore
PythonHatchling
PythonIniConfig PythonIniConfig
PythonMako PythonMako
PythonMarkupSafe PythonMarkupSafe
PythonPackaging PythonPackaging
PythonPathspec
PythonPluggy PythonPluggy
PythonPyTest PythonPyTest
PythonPyYAML PythonPyYAML
PythonPycparser
PythonPygments PythonPygments
PythonSetuptools
PythonSetuptoolsSCM
PythonTroveClassifiers
PythonVCSVersioning
PythonWheel
QEMU QEMU
Rdfind Rdfind
Readline Readline
Rsync Rsync
Sed Sed
Setuptools
SPIRVHeaders SPIRVHeaders
SPIRVLLVMTranslator
SPIRVTools SPIRVTools
SquashfsTools SquashfsTools
Strace Strace
@@ -194,44 +154,25 @@ const (
toyboxEarly toyboxEarly
Unzip Unzip
UtilLinux UtilLinux
VIM
Wayland Wayland
WaylandProtocols WaylandProtocols
XCB XCB
XCBProto XCBProto
XDGDBusProxy XDGDBusProxy
XZ XZ
Xkbcomp Xproto
XkeyboardConfig
XorgProto
Xserver
Zlib Zlib
Zstd Zstd
// PresetUnexportedStart is the first unexported preset. // PresetUnexportedStart is the first unexported preset.
PresetUnexportedStart PresetUnexportedStart
stage0Dist = iota - 1 llvmSource = iota - 1
llvmSource
// earlyCompilerRT is an early, standalone compiler-rt installation for the
// standalone runtimes build.
//
// earlyCompilerRT must only be loaded by [LLVM].
earlyCompilerRT
// earlyRuntimes is an early, standalone installation of LLVM runtimes to
// work around the cmake build system leaking the system LLVM installation
// when invoking the newly built toolchain.
//
// earlyRuntimes must only be loaded by [LLVM].
earlyRuntimes
buildcatrust buildcatrust
utilMacros utilMacros
// Musl is a standalone libc that does not depend on the toolchain. // Musl is a standalone libc that does not depend on the toolchain.
Musl Musl
// muslHeaders is a system installation of [Musl] headers.
muslHeaders
// gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate // gcc is a hacked-to-pieces GCC toolchain meant for use in intermediate
// stages only. This preset and its direct output must never be exposed. // stages only. This preset and its direct output must never be exposed.
@@ -376,40 +317,15 @@ var (
} }
// artifactsOnce is for lazy initialisation of artifacts. // artifactsOnce is for lazy initialisation of artifacts.
artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once artifactsOnce [_toolchainEnd][len(artifactsM)]sync.Once
// arch is the target architecture.
arch = runtime.GOARCH
// presetOpts globally modifies behaviour of presets.
presetOpts int
) )
const (
// OptSkipCheck skips running all test suites.
OptSkipCheck = 1 << iota
// OptLLVMNoLTO disables LTO in all [LLVM] stages.
OptLLVMNoLTO
)
// Arch returns the target architecture.
func Arch() string { return arch }
// Flags returns the current preset flags
func Flags() int { return presetOpts }
// zero zeros the value pointed to by p. // zero zeros the value pointed to by p.
func zero[T any](p *T) { var v T; *p = v } func zero[T any](p *T) { var v T; *p = v }
// DropCaches arranges for all cached [pkg.Artifact] to be freed some time after // DropCaches arranges for all cached [pkg.Artifact] to be freed some time after
// it returns. Must not be used concurrently with any other function from this // it returns. Must not be used concurrently with any other function from this
// package. // package.
func DropCaches(targetArch string, flags int) { func DropCaches() {
if targetArch == "" {
targetArch = runtime.GOARCH
}
arch = targetArch
presetOpts = flags
zero(&artifacts) zero(&artifacts)
zero(&artifactsOnce) zero(&artifactsOnce)
} }

View File

@@ -20,16 +20,13 @@ func TestLoad(t *testing.T) {
} }
func BenchmarkAll(b *testing.B) { func BenchmarkAll(b *testing.B) {
arch, flags := rosa.Arch(), rosa.Flags()
b.Cleanup(func() { rosa.DropCaches(arch, flags) })
for b.Loop() { for b.Loop() {
for i := range rosa.PresetEnd { for i := range rosa.PresetEnd {
rosa.Std.Load(rosa.PArtifact(i)) rosa.Std.Load(rosa.PArtifact(i))
} }
b.StopTimer() b.StopTimer()
rosa.DropCaches("", 0) rosa.DropCaches()
b.StartTimer() b.StartTimer()
} }
} }

View File

@@ -5,6 +5,7 @@ import (
"io" "io"
"net/http" "net/http"
"os" "os"
"runtime"
"time" "time"
"hakurei.app/fhs" "hakurei.app/fhs"
@@ -87,7 +88,7 @@ func (a busyboxBin) Cure(t *pkg.TContext) (err error) {
// the https://busybox.net/downloads/binaries/ binary release. // the https://busybox.net/downloads/binaries/ binary release.
func newBusyboxBin() pkg.Artifact { func newBusyboxBin() pkg.Artifact {
var version, url, checksum string var version, url, checksum string
switch arch { switch runtime.GOARCH {
case "amd64": case "amd64":
version = "1.35.0" version = "1.35.0"
url = "https://busybox.net/downloads/binaries/" + url = "https://busybox.net/downloads/binaries/" +
@@ -100,11 +101,11 @@ func newBusyboxBin() pkg.Artifact {
checksum = "npJjBO7iwhjW6Kx2aXeSxf8kXhVgTCDChOZTTsI8ZfFfa3tbsklxRiidZQdrVERg" checksum = "npJjBO7iwhjW6Kx2aXeSxf8kXhVgTCDChOZTTsI8ZfFfa3tbsklxRiidZQdrVERg"
default: default:
panic("unsupported target " + arch) panic("unsupported target " + runtime.GOARCH)
} }
return pkg.NewExec( return pkg.NewExec(
"busybox-bin-"+version, arch, nil, pkg.ExecTimeoutMax, false, false, "busybox-bin-"+version, nil, pkg.ExecTimeoutMax, false,
fhs.AbsRoot, []string{ fhs.AbsRoot, []string{
"PATH=/system/bin", "PATH=/system/bin",
}, },

View File

@@ -10,8 +10,8 @@ import (
func (t Toolchain) newCMake() (pkg.Artifact, string) { func (t Toolchain) newCMake() (pkg.Artifact, string) {
const ( const (
version = "4.3.2" version = "4.3.1"
checksum = "6QylwRVKletndTSkZTV2YBRwgd_9rUVgav_QW23HpjUgV21AVYZOUOal8tdBDmO7" checksum = "RHpzZiM1kJ5bwLjo9CpXSeHJJg3hTtV9QxBYpQoYwKFtRh5YhGWpShrqZCSOzQN6"
) )
return t.NewPackage("cmake", version, newFromGitHubRelease( return t.NewPackage("cmake", version, newFromGitHubRelease(
"Kitware/CMake", "Kitware/CMake",
@@ -122,18 +122,11 @@ type CMakeHelper struct {
// Path elements joined with source. // Path elements joined with source.
Append []string Append []string
// Value of CMAKE_BUILD_TYPE. The zero value is equivalent to "Release".
BuildType string
// CMake CACHE entries. // CMake CACHE entries.
Cache []KV Cache []KV
// Runs after install. // Runs after install.
Script string Script string
// Replaces the default test command.
Test string
// Whether to skip running tests.
SkipTest bool
// Whether to generate Makefile instead. // Whether to generate Makefile instead.
Make bool Make bool
} }
@@ -166,30 +159,20 @@ func (*CMakeHelper) wantsDir() string { return "/cure/" }
// script generates the cure script. // script generates the cure script.
func (attr *CMakeHelper) script(name string) string { func (attr *CMakeHelper) script(name string) string {
if attr == nil { if attr == nil {
attr = new(CMakeHelper) attr = &CMakeHelper{
Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
},
}
}
if len(attr.Cache) == 0 {
panic("CACHE must be non-empty")
} }
generate := "Ninja" generate := "Ninja"
test := "ninja " + jobsFlagE + " test"
if attr.Make { if attr.Make {
generate = "'Unix Makefiles'" generate = "'Unix Makefiles'"
test = "make " + jobsFlagE + " test"
} }
if attr.Test != "" {
test = attr.Test
}
script := attr.Script
if !attr.SkipTest && presetOpts&OptSkipCheck == 0 {
script += "\n" + test
}
cache := make([]KV, 1, 1+len(attr.Cache))
cache[0] = KV{"CMAKE_BUILD_TYPE", "Release"}
if attr.BuildType != "" {
cache[0][1] = attr.BuildType
}
cache = append(cache, attr.Cache...)
return ` return `
cmake -G ` + generate + ` \ cmake -G ` + generate + ` \
@@ -198,7 +181,7 @@ cmake -G ` + generate + ` \
-DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \ -DCMAKE_ASM_COMPILER_TARGET="${ROSA_TRIPLE}" \
-DCMAKE_INSTALL_LIBDIR=lib \ -DCMAKE_INSTALL_LIBDIR=lib \
` + strings.Join(slices.Collect(func(yield func(string) bool) { ` + strings.Join(slices.Collect(func(yield func(string) bool) {
for _, v := range cache { for _, v := range attr.Cache {
if !yield("-D" + v[0] + "=" + v[1]) { if !yield("-D" + v[0] + "=" + v[1]) {
return return
} }
@@ -208,5 +191,5 @@ cmake -G ` + generate + ` \
'/usr/src/` + name + `/` + filepath.Join(attr.Append...) + `' '/usr/src/` + name + `/` + filepath.Join(attr.Append...) + `'
cmake --build . --parallel=` + jobsE + ` cmake --build . --parallel=` + jobsE + `
cmake --install . --prefix=/work/system cmake --install . --prefix=/work/system
` + script ` + attr.Script
} }

View File

@@ -4,8 +4,8 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newCurl() (pkg.Artifact, string) { func (t Toolchain) newCurl() (pkg.Artifact, string) {
const ( const (
version = "8.20.0" version = "8.19.0"
checksum = "xyHXwrngIRGMasuzhn-I5MSCOhktwINbsWt1f_LuR-5jRVvyx_g6U1EQfDLEbr9r" checksum = "YHuVLVVp8q_Y7-JWpID5ReNjq2Zk6t7ArHB6ngQXilp_R5l3cubdxu3UKo-xDByv"
) )
return t.NewPackage("curl", version, newTar( return t.NewPackage("curl", version, newTar(
"https://curl.se/download/curl-"+version+".tar.bz2", "https://curl.se/download/curl-"+version+".tar.bz2",

View File

@@ -1,27 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newFreetype() (pkg.Artifact, string) {
const (
version = "2.14.3"
checksum = "-WfLv8fVJNyCHpP_lriiDzOcVbBL9ajdQ3tl8AzIIUa9-8sVpU9irxOmSMgRHWYz"
)
return t.NewPackage("freetype", version, newTar(
"https://download.savannah.gnu.org/releases/freetype/"+
"freetype-"+version+".tar.gz",
checksum,
pkg.TarGzip,
), nil, (*MakeHelper)(nil)), version
}
func init() {
artifactsM[Freetype] = Metadata{
f: Toolchain.newFreetype,
Name: "freetype",
Description: "a freely available software library to render fonts",
Website: "http://www.freetype.org/",
ID: 854,
}
}

View File

@@ -9,8 +9,8 @@ import (
func (t Toolchain) newGit() (pkg.Artifact, string) { func (t Toolchain) newGit() (pkg.Artifact, string) {
const ( const (
version = "2.54.0" version = "2.53.0"
checksum = "7vGKtFOJGqY8DO4e8UMRax7dLgImXKQz5MMalec6MlgYrsarffSJjgOughwRFpSH" checksum = "rlqSTeNgSeVKJA7nvzGqddFH8q3eFEPB4qRZft-4zth8wTHnbTbm7J90kp_obHGm"
) )
return t.NewPackage("git", version, newTar( return t.NewPackage("git", version, newTar(
"https://www.kernel.org/pub/software/scm/git/"+ "https://www.kernel.org/pub/software/scm/git/"+
@@ -20,9 +20,6 @@ func (t Toolchain) newGit() (pkg.Artifact, string) {
), &PackageAttr{ ), &PackageAttr{
ScriptEarly: ` ScriptEarly: `
ln -s ../../system/bin/perl /usr/bin/ || true ln -s ../../system/bin/perl /usr/bin/ || true
# test suite assumes apache
rm -f /system/bin/httpd
`, `,
// uses source tree as scratch space // uses source tree as scratch space
@@ -41,7 +38,6 @@ function disable_test {
fi fi
} }
disable_test t1800-hook
disable_test t5319-multi-pack-index disable_test t5319-multi-pack-index
disable_test t1305-config-include disable_test t1305-config-include
disable_test t3900-i18n-commit disable_test t3900-i18n-commit
@@ -55,14 +51,6 @@ disable_test t9300-fast-import
disable_test t0211-trace2-perf disable_test t0211-trace2-perf
disable_test t1517-outside-repo disable_test t1517-outside-repo
disable_test t2200-add-update disable_test t2200-add-update
disable_test t0027-auto-crlf
disable_test t7513-interpret-trailers
disable_test t7703-repack-geometric
disable_test t7002-mv-sparse-checkout
disable_test t1451-fsck-buffer
disable_test t4104-apply-boundary
disable_test t4200-rerere
disable_test t5515-fetch-merge-logic
`, `,
Check: []string{ Check: []string{
"-C t", "-C t",
@@ -75,9 +63,6 @@ disable_test t5515-fetch-merge-logic
NO_INSTALL_HARDLINKS=1 \ NO_INSTALL_HARDLINKS=1 \
install`, install`,
}, },
// test suite hangs on mksh
Bash,
Diffutils, Diffutils,
Autoconf, Autoconf,
Gettext, Gettext,
@@ -113,7 +98,7 @@ func (t Toolchain) NewViaGit(
return t.New(strings.TrimSuffix( return t.New(strings.TrimSuffix(
path.Base(url), path.Base(url),
".git", ".git",
)+"-src-"+path.Base(rev), THostNet, t.AppendPresets(nil, )+"-src-"+path.Base(rev), 0, t.AppendPresets(nil,
NSSCACert, NSSCACert,
Git, Git,
), &checksum, nil, ` ), &checksum, nil, `

View File

@@ -17,8 +17,9 @@ func (t Toolchain) newSPIRVHeaders() (pkg.Artifact, string) {
"vulkan-sdk-"+version, "vulkan-sdk-"+version,
checksum, checksum,
), nil, &CMakeHelper{ ), nil, &CMakeHelper{
// upstream has no tests Cache: []KV{
SkipTest: true, {"CMAKE_BUILD_TYPE", "Release"},
},
}), version }), version
} }
func init() { func init() {
@@ -63,6 +64,7 @@ func (t Toolchain) newSPIRVTools() (pkg.Artifact, string) {
checksum, checksum,
), nil, &CMakeHelper{ ), nil, &CMakeHelper{
Cache: []KV{ Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
{"SPIRV-Headers_SOURCE_DIR", "/system"}, {"SPIRV-Headers_SOURCE_DIR", "/system"},
}, },
}, },
@@ -84,15 +86,13 @@ func init() {
}, },
ID: 14894, ID: 14894,
latest: (*Versions).getStable,
} }
} }
func (t Toolchain) newGlslang() (pkg.Artifact, string) { func (t Toolchain) newGlslang() (pkg.Artifact, string) {
const ( const (
version = "16.3.0" version = "16.2.0"
checksum = "xyqDf8k3-D0_BXHGi0uLgMglnJ05Rf3j73QgbDs3sGtKNdBIQhY8JfqX1NcNoJQN" checksum = "6_UuF9reLRDaVkgO-9IfB3kMwme3lQZM8LL8YsJwPdUFkrjzxJtf2A9X3w9nFxj2"
) )
return t.NewPackage("glslang", version, newFromGitHub( return t.NewPackage("glslang", version, newFromGitHub(
"KhronosGroup/glslang", "KhronosGroup/glslang",
@@ -104,9 +104,11 @@ func (t Toolchain) newGlslang() (pkg.Artifact, string) {
Chmod: true, Chmod: true,
}, &CMakeHelper{ }, &CMakeHelper{
Cache: []KV{ Cache: []KV{
{"CMAKE_BUILD_TYPE", "Release"},
{"BUILD_SHARED_LIBS", "ON"}, {"BUILD_SHARED_LIBS", "ON"},
{"ALLOW_EXTERNAL_SPIRV_TOOLS", "ON"}, {"ALLOW_EXTERNAL_SPIRV_TOOLS", "ON"},
}, },
Script: "ctest",
}, },
Python, Python,
Bash, Bash,
@@ -126,123 +128,3 @@ func init() {
ID: 205796, ID: 205796,
} }
} }
func (t Toolchain) newSPIRVLLVMTranslator() (pkg.Artifact, string) {
const (
version = "22.1.2"
checksum = "JZAaV5ewYcm-35YA_U2BM2IcsQouZtX1BLZR0zh2vSlfEXMsT5OCtY4Gh5RJkcGy"
)
skipChecks := []string{
// error: line 13: OpTypeCooperativeMatrixKHR Scope is limited to Workgroup and Subgroup
"cooperative_matrix_constant_null.spvasm",
}
switch arch {
case "arm64":
skipChecks = append(skipChecks,
// LLVM ERROR: unsupported calling convention
"DebugInfo/COFF/no-cus.ll",
"DebugInfo/Generic/2009-11-05-DeadGlobalVariable.ll",
"DebugInfo/Generic/2009-11-10-CurrentFn.ll",
"DebugInfo/Generic/2010-01-05-DbgScope.ll",
"DebugInfo/Generic/2010-03-12-llc-crash.ll",
"DebugInfo/Generic/2010-03-24-MemberFn.ll",
"DebugInfo/Generic/2010-04-19-FramePtr.ll",
"DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll",
"DebugInfo/Generic/2010-10-01-crash.ll",
"DebugInfo/Generic/PR20038.ll",
"DebugInfo/Generic/constant-pointers.ll",
"DebugInfo/Generic/dead-argument-order.ll",
"DebugInfo/Generic/debug-info-eis-option.ll",
"DebugInfo/Generic/def-line.ll",
"DebugInfo/Generic/discriminator.ll",
"DebugInfo/Generic/dwarf-public-names.ll",
"DebugInfo/Generic/enum.ll",
"DebugInfo/Generic/func-using-decl.ll",
"DebugInfo/Generic/global.ll",
"DebugInfo/Generic/imported-name-inlined.ll",
"DebugInfo/Generic/incorrect-variable-debugloc1.ll",
"DebugInfo/Generic/inline-scopes.ll",
"DebugInfo/Generic/inlined-arguments.ll",
"DebugInfo/Generic/inlined-vars.ll",
"DebugInfo/Generic/linear-dbg-value.ll",
"DebugInfo/Generic/linkage-name-abstract.ll",
"DebugInfo/Generic/member-order.ll",
"DebugInfo/Generic/missing-abstract-variable.ll",
"DebugInfo/Generic/multiline.ll",
"DebugInfo/Generic/namespace_function_definition.ll",
"DebugInfo/Generic/namespace_inline_function_definition.ll",
"DebugInfo/Generic/noscopes.ll",
"DebugInfo/Generic/ptrsize.ll",
"DebugInfo/Generic/restrict.ll",
"DebugInfo/Generic/two-cus-from-same-file.ll",
"DebugInfo/Generic/version.ll",
"DebugInfo/LocalAddressSpace.ll",
"DebugInfo/UnknownBaseType.ll",
"DebugInfo/expr-opcode.ll",
)
}
return t.NewPackage("spirv-llvm-translator", version, newFromGitHub(
"KhronosGroup/SPIRV-LLVM-Translator",
"v"+version, checksum,
), &PackageAttr{
Patches: []KV{
{"remove-early-prefix", `diff --git a/CMakeLists.txt b/CMakeLists.txt
index c000a77e..f18f3fde 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -164,7 +164,7 @@ install(
${LLVM_SPIRV_INCLUDE_DIRS}/LLVMSPIRVOpts.h
${LLVM_SPIRV_INCLUDE_DIRS}/LLVMSPIRVExtensions.inc
DESTINATION
- ${CMAKE_INSTALL_PREFIX}/include/LLVMSPIRVLib
+ include/LLVMSPIRVLib
)
configure_file(LLVMSPIRVLib.pc.in ${CMAKE_BINARY_DIR}/LLVMSPIRVLib.pc @ONLY)
@@ -172,5 +172,5 @@ install(
FILES
${CMAKE_BINARY_DIR}/LLVMSPIRVLib.pc
DESTINATION
- ${CMAKE_INSTALL_PREFIX}/lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
+ lib${LLVM_LIBDIR_SUFFIX}/pkgconfig
)
;`},
},
// litArgs emits shell syntax
ScriptEarly: `
export LIT_OPTS=` + litArgs(true, skipChecks...) + `
`,
}, &CMakeHelper{
Cache: []KV{
{"CMAKE_SKIP_BUILD_RPATH", "ON"},
{"BUILD_SHARED_LIBS", "ON"},
{"LLVM_SPIRV_ENABLE_LIBSPIRV_DIS", "ON"},
{"LLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR", "/system"},
{"LLVM_EXTERNAL_LIT", "/system/bin/lit"},
{"LLVM_INCLUDE_TESTS", "ON"},
},
},
Bash,
LIT,
SPIRVTools,
), version
}
func init() {
artifactsM[SPIRVLLVMTranslator] = Metadata{
f: Toolchain.newSPIRVLLVMTranslator,
Name: "spirv-llvm-translator",
Description: "bi-directional translation between SPIR-V and LLVM IR",
Website: "https://github.com/KhronosGroup/SPIRV-LLVM-Translator",
Dependencies: P{
SPIRVTools,
},
ID: 227273,
}
}

View File

@@ -1,47 +1,11 @@
package rosa package rosa
import ( import (
"slices" "runtime"
"strconv"
"strings"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
) )
// skipGNUTests generates a string for skipping specific tests by number in a
// GNU test suite. This is nontrivial because the test suite does not support
// excluding tests in any way, so ranges for all but the skipped tests have to
// be specified instead.
//
// For example, to skip test 764, ranges around the skipped test must be
// specified:
//
// 1-763 765-
//
// Tests are numbered starting from 1. The resulting string is unquoted.
func skipGNUTests(tests ...int) string {
tests = slices.Clone(tests)
slices.Sort(tests)
var buf strings.Builder
if tests[0] != 1 {
buf.WriteString("1-")
}
for i, n := range tests {
if n != 1 && (i == 0 || tests[i-1] != n-1) {
buf.WriteString(strconv.Itoa(n - 1))
buf.WriteString(" ")
}
if i == len(tests)-1 || tests[i+1] != n+1 {
buf.WriteString(strconv.Itoa(n + 1))
buf.WriteString("-")
}
}
return buf.String()
}
func (t Toolchain) newM4() (pkg.Artifact, string) { func (t Toolchain) newM4() (pkg.Artifact, string) {
const ( const (
version = "1.4.21" version = "1.4.21"
@@ -83,15 +47,7 @@ func (t Toolchain) newBison() (pkg.Artifact, string) {
"https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/bison/bison-"+version+".tar.gz",
checksum, checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, (*MakeHelper)(nil),
Check: []string{
"TESTSUITEFLAGS=" + jobsFlagE + "' " + skipGNUTests(
// clang miscompiles (SIGILL)
764,
) + "'",
"check",
},
},
M4, M4,
Diffutils, Diffutils,
Sed, Sed,
@@ -111,8 +67,8 @@ func init() {
func (t Toolchain) newSed() (pkg.Artifact, string) { func (t Toolchain) newSed() (pkg.Artifact, string) {
const ( const (
version = "4.10" version = "4.9"
checksum = "TXTRFQJCyflb-bpBRI2S5Y1DpplwvT7-KfXtpqN4AdZgZ5OtI6yStn1-bkhDKx51" checksum = "pe7HWH4PHNYrazOTlUoE1fXmhn2GOPFN_xE62i0llOr3kYGrH1g2_orDz0UtZ9Nt"
) )
return t.NewPackage("sed", version, newTar( return t.NewPackage("sed", version, newTar(
"https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/sed/sed-"+version+".tar.gz",
@@ -120,8 +76,6 @@ func (t Toolchain) newSed() (pkg.Artifact, string) {
pkg.TarGzip, pkg.TarGzip,
), nil, (*MakeHelper)(nil), ), nil, (*MakeHelper)(nil),
Diffutils, Diffutils,
KernelHeaders,
), version ), version
} }
func init() { func init() {
@@ -230,9 +184,6 @@ func (t Toolchain) newLibtool() (pkg.Artifact, string) {
checksum, checksum,
pkg.TarGzip, pkg.TarGzip,
), nil, &MakeHelper{ ), nil, &MakeHelper{
// _Z2a2c: symbol not found
SkipCheck: t.isStage0(),
Check: []string{ Check: []string{
"TESTSUITEFLAGS=" + jobsFlagE, "TESTSUITEFLAGS=" + jobsFlagE,
"check", "check",
@@ -423,8 +374,8 @@ func init() {
func (t Toolchain) newCoreutils() (pkg.Artifact, string) { func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
const ( const (
version = "9.11" version = "9.10"
checksum = "t8UMed5wpFEoC56aa42_yidfOAaRGzOfj7MRtQkkqgGbpXiskNA8bd-EmVSQkZie" checksum = "o-B9wssRnZySzJUI1ZJAgw-bZtj1RC67R9po2AcM2OjjS8FQIl16IRHpC6IwO30i"
) )
return t.NewPackage("coreutils", version, newTar( return t.NewPackage("coreutils", version, newTar(
"https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz", "https://ftpmirror.gnu.org/gnu/coreutils/coreutils-"+version+".tar.gz",
@@ -436,13 +387,106 @@ func (t Toolchain) newCoreutils() (pkg.Artifact, string) {
test_disable() { chmod +w "$2" && echo "$1" > "$2"; } test_disable() { chmod +w "$2" && echo "$1" > "$2"; }
test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh test_disable '#!/bin/sh' gnulib-tests/test-c32ispunct.sh
test_disable '#!/bin/sh' tests/split/line-bytes.sh
test_disable '#!/bin/sh' tests/ls/hyperlink.sh test_disable '#!/bin/sh' tests/ls/hyperlink.sh
test_disable '#!/bin/sh' tests/misc/user.sh
test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c test_disable 'int main(){return 0;}' gnulib-tests/test-chown.c
test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c test_disable 'int main(){return 0;}' gnulib-tests/test-fchownat.c
test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c test_disable 'int main(){return 0;}' gnulib-tests/test-lchown.c
`, `,
Patches: []KV{
{"tests-fix-job-control", `From 21d287324aa43aa3a31f39619ade0deac7fd6013 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A1draig=20Brady?= <P@draigBrady.com>
Date: Tue, 24 Feb 2026 15:44:41 +0000
Subject: [PATCH] tests: fix job control triggering test termination
This avoids the test harness being terminated like:
make[1]: *** [Makefile:24419: check-recursive] Hangup
make[3]: *** [Makefile:24668: check-TESTS] Hangup
make: *** [Makefile:24922: check] Hangup
make[2]: *** [Makefile:24920: check-am] Hangup
make[4]: *** [Makefile:24685: tests/misc/usage_vs_refs.log] Error 129
...
This happened sometimes when the tests were being run non interactively.
For example when run like:
setsid make TESTS="tests/timeout/timeout.sh \
tests/tail/overlay-headers.sh" SUBDIRS=. -j2 check
Note the race window can be made bigger by adding a sleep
after tail is stopped in overlay-headers.sh
The race can trigger the kernel to induce its job control
mechanism to prevent stuck processes.
I.e. where it sends SIGHUP + SIGCONT to a process group
when it determines that group may become orphaned,
and there are stopped processes in that group.
* tests/tail/overlay-headers.sh: Use setsid(1) to keep the stopped
tail process in a separate process group, thus avoiding any kernel
job control protection mechanism.
* tests/timeout/timeout.sh: Use setsid(1) to avoid the kernel
checking the main process group when sleep(1) is reparented.
Fixes https://bugs.gnu.org/80477
---
tests/tail/overlay-headers.sh | 8 +++++++-
tests/timeout/timeout.sh | 11 ++++++++---
2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/tests/tail/overlay-headers.sh b/tests/tail/overlay-headers.sh
index be9b6a7df..1e6da0a3f 100755
--- a/tests/tail/overlay-headers.sh
+++ b/tests/tail/overlay-headers.sh
@@ -20,6 +20,8 @@
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ tail sleep
+setsid true || skip_ 'setsid required to control groups'
+
# Function to count number of lines from tail
# while ignoring transient errors due to resource limits
countlines_ ()
@@ -54,7 +56,11 @@ echo start > file2 || framework_failure_
env sleep 60 & sleep=$!
# Note don't use timeout(1) here as it currently
-# does not propagate SIGCONT
+# does not propagate SIGCONT.
+# Note use setsid here to ensure we're in a separate process group
+# as we're going to STOP this tail process, and this can trigger
+# the kernel to send SIGHUP to a group if other tests have
+# processes that are reparented. (See tests/timeout/timeout.sh).
tail $fastpoll --pid=$sleep -f file1 file2 > out & pid=$!
# Ensure tail is running
diff --git a/tests/timeout/timeout.sh b/tests/timeout/timeout.sh
index 9a395416b..fbb043312 100755
--- a/tests/timeout/timeout.sh
+++ b/tests/timeout/timeout.sh
@@ -56,9 +56,14 @@ returns_ 124 timeout --foreground -s0 -k1 .1 sleep 10 && fail=1
) || fail=1
# Don't be confused when starting off with a child (Bug#9098).
-out=$(sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
-status=$?
-test "$out" = "" && test $status = 124 || fail=1
+# Use setsid to avoid sleep being in the test's process group, as
+# upon reparenting it can trigger an orphaned process group SIGHUP
+# (if there were stopped processes in other tests).
+if setsid true; then
+ out=$(setsid sleep .1 & exec timeout .5 sh -c 'sleep 2; echo foo')
+ status=$?
+ test "$out" = "" && test $status = 124 || fail=1
+fi
# Verify --verbose output
cat > exp <<\EOF
--
2.53.0
`},
},
Flag: TEarly, Flag: TEarly,
}, &MakeHelper{ }, &MakeHelper{
Configure: []KV{ Configure: []KV{
@@ -713,20 +757,15 @@ func init() {
func (t Toolchain) newParallel() (pkg.Artifact, string) { func (t Toolchain) newParallel() (pkg.Artifact, string) {
const ( const (
version = "20260422" version = "20260322"
checksum = "eTsepxgqhXpMEhPd55qh-W5y4vjKn0x9TD2mzbJCNZYtFf4lT4Wzoqr74HGJYBEH" checksum = "gHoPmFkOO62ev4xW59HqyMlodhjp8LvTsBOwsVKHUUdfrt7KwB8koXmSVqQ4VOrB"
) )
return t.NewPackage("parallel", version, newTar( return t.NewPackage("parallel", version, newTar(
"https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2", "https://ftpmirror.gnu.org/gnu/parallel/parallel-"+version+".tar.bz2",
checksum, checksum,
pkg.TarBzip2, pkg.TarBzip2,
), &PackageAttr{ ), nil, (*MakeHelper)(nil),
ScriptEarly: `
ln -s ../system/bin/bash /bin/
`,
}, (*MakeHelper)(nil),
Perl, Perl,
Bash,
), version ), version
} }
func init() { func init() {
@@ -842,7 +881,7 @@ func (t Toolchain) newGnuTLS() (pkg.Artifact, string) {
) )
var configureExtra []KV var configureExtra []KV
switch arch { switch runtime.GOARCH {
case "arm64": case "arm64":
configureExtra = []KV{ configureExtra = []KV{
{"disable-hardware-acceleration"}, {"disable-hardware-acceleration"},
@@ -1104,11 +1143,10 @@ func init() {
func (t Toolchain) newMPC() (pkg.Artifact, string) { func (t Toolchain) newMPC() (pkg.Artifact, string) {
const ( const (
version = "1.4.1" version = "1.4.1"
checksum = "ZffaZyWkvIw0iPvRe5EJ7O-VvHtSkbbb3K_7SgPtK810NvGan7nbF0T5-6tozjQN" checksum = "wdXAhplnS89FjVp20m2nC2CmLFQeyQqLpQAfViTy4vPxFdv2WYOTtfBKeIk5_Rec"
) )
return t.NewPackage("mpc", version, newFromGitLab( return t.NewPackage("mpc", version, t.newTagRemote(
"gitlab.inria.fr", "https://gitlab.inria.fr/mpc/mpc.git",
"mpc/mpc",
version, checksum, version, checksum,
), &PackageAttr{ ), &PackageAttr{
// does not find mpc-impl.h otherwise // does not find mpc-impl.h otherwise
@@ -1142,12 +1180,12 @@ func init() {
func (t Toolchain) newGCC() (pkg.Artifact, string) { func (t Toolchain) newGCC() (pkg.Artifact, string) {
const ( const (
version = "16.1.0" version = "15.2.0"
checksum = "4ASoWbxaA2FW7PAB0zzHDPC5XnNhyaAyjtDPpGzceSLeYnEIXsNYZR3PA_Zu5P0K" checksum = "TXJ5WrbXlGLzy1swghQTr4qxgDCyIZFgJry51XEPTBZ8QYbVmFeB4lZbSMtPJ-a1"
) )
var configureExtra []KV var configureExtra []KV
switch arch { switch runtime.GOARCH {
case "amd64", "arm64": case "amd64", "arm64":
configureExtra = append(configureExtra, KV{"with-multilib-list", "''"}) configureExtra = append(configureExtra, KV{"with-multilib-list", "''"})
} }

View File

@@ -1,35 +0,0 @@
package rosa
import (
"slices"
"strconv"
"strings"
"testing"
)
func TestSkipGNUTests(t *testing.T) {
t.Parallel()
testCases := []struct {
tests []int
want string
}{
{[]int{764}, "1-763 765-"},
{[]int{764, 0xcafe, 37, 9}, "1-8 10-36 38-763 765-51965 51967-"},
{[]int{1, 2, 0xbed}, "3-3052 3054-"},
{[]int{3, 4}, "1-2 5-"},
}
for _, tc := range testCases {
t.Run(strings.Join(slices.Collect(func(yield func(string) bool) {
for _, n := range tc.tests {
yield(strconv.Itoa(n))
}
}), ","), func(t *testing.T) {
t.Parallel()
if got := skipGNUTests(tc.tests...); got != tc.want {
t.Errorf("skipGNUTests: %q, want %q", got, tc.want)
}
})
}
}

View File

@@ -1,6 +1,7 @@
package rosa package rosa
import ( import (
"runtime"
"slices" "slices"
"hakurei.app/internal/pkg" "hakurei.app/internal/pkg"
@@ -9,9 +10,9 @@ import (
// newGoBootstrap returns the Go bootstrap toolchain. // newGoBootstrap returns the Go bootstrap toolchain.
func (t Toolchain) newGoBootstrap() pkg.Artifact { func (t Toolchain) newGoBootstrap() pkg.Artifact {
const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23" const checksum = "8o9JL_ToiQKadCTb04nvBDkp8O1xiWOolAxVEqaTGodieNe4lOFEjlOxN3bwwe23"
return t.New("go1.4-bootstrap", 0, t.AppendPresets(nil, return t.New("go1.4-bootstrap", 0, []pkg.Artifact{
Bash, t.Load(Bash),
), nil, []string{ }, nil, []string{
"CGO_ENABLED=0", "CGO_ENABLED=0",
}, ` }, `
mkdir -p /var/tmp/ /work/system/ mkdir -p /var/tmp/ /work/system/
@@ -34,13 +35,9 @@ func (t Toolchain) newGo(
script string, script string,
extra ...pkg.Artifact, extra ...pkg.Artifact,
) pkg.Artifact { ) pkg.Artifact {
name := "all" return t.New("go"+version, 0, slices.Concat([]pkg.Artifact{
if presetOpts&OptSkipCheck != 0 { t.Load(Bash),
name = "make" }, extra), nil, slices.Concat([]string{
}
return t.New("go"+version, 0, t.AppendPresets(extra,
Bash,
), nil, slices.Concat([]string{
"CC=cc", "CC=cc",
"GOCACHE=/tmp/gocache", "GOCACHE=/tmp/gocache",
"GOROOT_BOOTSTRAP=/system/go", "GOROOT_BOOTSTRAP=/system/go",
@@ -51,7 +48,7 @@ cp -r /usr/src/go /work/system
cd /work/system/go/src cd /work/system/go/src
chmod -R +w .. chmod -R +w ..
`+script+` `+script+`
./`+name+`.bash ./all.bash
mkdir /work/system/bin mkdir /work/system/bin
ln -s \ ln -s \
@@ -72,7 +69,7 @@ func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
finalEnv []string finalEnv []string
) )
switch arch { switch runtime.GOARCH {
case "amd64": case "amd64":
bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap()) bootstrapExtra = append(bootstrapExtra, t.newGoBootstrap())
@@ -82,7 +79,7 @@ func (t Toolchain) newGoLatest() (pkg.Artifact, string) {
finalEnv = append(finalEnv, "CGO_ENABLED=0") finalEnv = append(finalEnv, "CGO_ENABLED=0")
default: default:
panic("unsupported target " + arch) panic("unsupported target " + runtime.GOARCH)
} }
go119 := t.newGo( go119 := t.newGo(
@@ -107,7 +104,7 @@ echo \
[]string{"CGO_ENABLED=0"}, ` []string{"CGO_ENABLED=0"}, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go cmd/link/internal/`+runtime.GOARCH+`/obj.go
rm \ rm \
crypto/tls/handshake_client_test.go \ crypto/tls/handshake_client_test.go \
@@ -125,17 +122,17 @@ echo \
[]string{"CGO_ENABLED=0"}, ` []string{"CGO_ENABLED=0"}, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go cmd/link/internal/`+runtime.GOARCH+`/obj.go
`, go121, `, go121,
) )
go125 := t.newGo( go125 := t.newGo(
"1.25.10", "1.25.7",
"TwKwatkpwal-j9U2sDSRPEdM3YesI4Gm88YgGV59wtU-L85K9gA7UPy9SCxn6PMb", "fyylHdBVRUobnBjYj3NKBaYPUw3kGmo2mEELiZonOYurPfbarNU1x77B99Fjut7Q",
[]string{"CGO_ENABLED=0"}, ` []string{"CGO_ENABLED=0"}, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go cmd/link/internal/`+runtime.GOARCH+`/obj.go
rm \ rm \
os/root_unix_test.go \ os/root_unix_test.go \
@@ -144,8 +141,8 @@ rm \
) )
const ( const (
version = "1.26.3" version = "1.26.2"
checksum = "lEiFocZFnN5fKvZzmwVdqc9pYUjAuhzqZGbuiOqxUP4XdcY8yECisKcqsQ_eNn1N" checksum = "v-6BE89_1g3xYf-9oIYpJKFXlo3xKHYJj2_VGkaUq8ZVkIVQmLwrto-xGG03OISH"
) )
return t.newGo( return t.newGo(
version, version,
@@ -153,15 +150,10 @@ rm \
finalEnv, ` finalEnv, `
sed -i \ sed -i \
's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \ 's,/lib/ld-musl-`+linuxArch()+`.so.1,/system/bin/linker,' \
cmd/link/internal/`+arch+`/obj.go cmd/link/internal/`+runtime.GOARCH+`/obj.go
sed -i \
's/cpu.X86.HasAVX512VBMI/& \&\& cpu.X86.HasPOPCNT/' \
internal/runtime/gc/scan/scan_amd64.go
rm \ rm \
os/root_unix_test.go \ os/root_unix_test.go
cmd/cgo/internal/testsanitizers/tsan_test.go \
cmd/cgo/internal/testsanitizers/cshared_test.go
`, go125, `, go125,
), version ), version
} }

View File

@@ -7,8 +7,8 @@ import (
func (t Toolchain) newGLib() (pkg.Artifact, string) { func (t Toolchain) newGLib() (pkg.Artifact, string) {
const ( const (
version = "2.88.1" version = "2.88.0"
checksum = "Rkszn6W4RHjyspyqfXdVAVawdwDJCuS0Zu0f7qot7tbJhnw2fUDoUUJB40m-1MCX" checksum = "T79Cg4z6j-sDZ2yIwvbY4ccRv2-fbwbqgcw59F5NQ6qJT6z4v261vbYp3dHO6Ma3"
) )
return t.NewPackage("glib", version, t.newTagRemote( return t.NewPackage("glib", version, t.newTagRemote(
"https://gitlab.gnome.org/GNOME/glib.git", "https://gitlab.gnome.org/GNOME/glib.git",

View File

@@ -7,8 +7,9 @@ func (t Toolchain) newHakurei(
withHostname bool, withHostname bool,
) pkg.Artifact { ) pkg.Artifact {
hostname := ` hostname := `
echo 'Building test helper (hostname).' echo '# Building test helper (hostname).'
go build -o /bin/hostname /usr/src/hostname/main.go go build -v -o /bin/hostname /usr/src/hostname/main.go
echo
` `
if !withHostname { if !withHostname {
hostname = "" hostname = ""
@@ -63,9 +64,9 @@ func init() {
return t.newHakurei("", ` return t.newHakurei("", `
mkdir -p /work/system/libexec/hakurei/ mkdir -p /work/system/libexec/hakurei/
echo "Building hakurei for $(go env GOOS)/$(go env GOARCH)." echo '# Building hakurei.'
go generate ./... go generate -v ./...
go build -trimpath -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w go build -trimpath -v -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w
-buildid= -buildid=
-linkmode external -linkmode external
-extldflags=-static -extldflags=-static
@@ -76,7 +77,7 @@ go build -trimpath -tags=rosa -o /work/system/libexec/hakurei -ldflags="-s -w
" ./... " ./...
echo echo
echo '##### Testing hakurei.' echo '# Testing hakurei.'
go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./... go test -ldflags='-buildid= -linkmode external -extldflags=-static' ./...
echo echo
@@ -96,13 +97,9 @@ mkdir -p /work/system/bin/
} }
artifactsM[HakureiDist] = Metadata{ artifactsM[HakureiDist] = Metadata{
f: func(t Toolchain) (pkg.Artifact, string) { f: func(t Toolchain) (pkg.Artifact, string) {
name := "all"
if presetOpts&OptSkipCheck != 0 {
name = "make"
}
return t.newHakurei("-dist", ` return t.newHakurei("-dist", `
export HAKUREI_VERSION export HAKUREI_VERSION
DESTDIR=/work /usr/src/hakurei/`+name+`.sh DESTDIR=/work /usr/src/hakurei/all.sh
`, true), hakureiVersion `, true), hakureiVersion
}, },

View File

@@ -4,13 +4,13 @@ package rosa
import "hakurei.app/internal/pkg" import "hakurei.app/internal/pkg"
const hakureiVersion = "0.4.2" const hakureiVersion = "0.4.0"
// hakureiSource is the source code of a hakurei release. // hakureiSource is the source code of a hakurei release.
var hakureiSource = newTar( var hakureiSource = newTar(
"https://git.gensokyo.uk/rosa/hakurei/archive/"+ "https://git.gensokyo.uk/rosa/hakurei/archive/"+
"v"+hakureiVersion+".tar.gz", "v"+hakureiVersion+".tar.gz",
"jadgaOrxv5ABGvzQ_Rk0aPGz7U8K-427TbMhQNQ32scSizEnlR44Pu7NoWYWVZWq", "wfQ9DqCW0Fw9o91wj-I55waoqzB-UqzzuC0_2h-P-1M78SgZ1WHSPCDJMth6EyC2",
pkg.TarGzip, pkg.TarGzip,
) )

View File

@@ -1,34 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newHwdata() (pkg.Artifact, string) {
const (
version = "0.407"
checksum = "6p1XD0CRuzt6hLfjv4ShKBW934BexmoPkRrmwxD4J63fBVCzVBRHyF8pVJdW_Xjm"
)
return t.NewPackage("hwdata", version, newFromGitHub(
"vcrhonek/hwdata",
"v"+version, checksum,
), &PackageAttr{
Writable: true,
EnterSource: true,
}, &MakeHelper{
// awk: fatal: cannot open file `hwdata.spec' for reading: No such file or directory
InPlace: true,
// lspci: Unknown option 'A' (see "lspci --help")
SkipCheck: true,
}), version
}
func init() {
artifactsM[Hwdata] = Metadata{
f: Toolchain.newHwdata,
Name: "hwdata",
Description: "contains various hardware identification and configuration data",
Website: "https://github.com/vcrhonek/hwdata",
ID: 5387,
}
}

View File

@@ -2,12 +2,12 @@ package rosa
import "hakurei.app/internal/pkg" import "hakurei.app/internal/pkg"
const kernelVersion = "6.12.87" const kernelVersion = "6.12.81"
var kernelSource = newTar( var kernelSource = newTar(
"https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+ "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/"+
"snapshot/linux-"+kernelVersion+".tar.gz", "snapshot/linux-"+kernelVersion+".tar.gz",
"QTl5teIy0K5KsOLYGHQ3FbnPCZNRH2bySXVzghiOoHDdM3zAcSPUkmdly85lMzHx", "fBkNwf82DQXh74in6gaF2Jot7Vg-Vlcp9BUtCEipL9mvcM1EXLVFdV7FcrO20Eve",
pkg.TarGzip, pkg.TarGzip,
) )
@@ -29,22 +29,8 @@ func init() {
} }
func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) { func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) {
const checksum = "lCmBNcMeUmXifg0vecKOPy3GAaFcJSmOPnf3wit9xYTDSTsFADPt1xxUFfmTn1fD"
return t.NewPackage("kernel-headers", kernelVersion, kernelSource, &PackageAttr{ return t.NewPackage("kernel-headers", kernelVersion, kernelSource, &PackageAttr{
Flag: TEarly, Flag: TEarly,
KnownChecksum: new(mustDecode(checksum)),
Paths: []pkg.ExecPath{
// updated manually for API changes
pkg.Path(AbsUsrSrc.Append("version.h"), false, pkg.NewFile(
"version.h", []byte(`#define LINUX_VERSION_CODE 396372
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
#define LINUX_VERSION_MAJOR 6
#define LINUX_VERSION_PATCHLEVEL 12
#define LINUX_VERSION_SUBLEVEL 84
`),
)),
},
}, &MakeHelper{ }, &MakeHelper{
SkipConfigure: true, SkipConfigure: true,
@@ -57,11 +43,7 @@ func (t Toolchain) newKernelHeaders() (pkg.Artifact, string) {
"INSTALL_HDR_PATH=/work/system", "INSTALL_HDR_PATH=/work/system",
"headers_install", "headers_install",
}, },
Install: ` Install: "# headers installed during make",
cat \
/usr/src/version.h > \
/work/system/include/linux/version.h
`,
}, },
Rsync, Rsync,
), kernelVersion ), kernelVersion

View File

@@ -1,16 +1,16 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.12.84 Kernel Configuration # Linux/x86 6.12.80 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="clang version 22.1.4" CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
CONFIG_GCC_VERSION=0 CONFIG_GCC_VERSION=0
CONFIG_CC_IS_CLANG=y CONFIG_CC_IS_CLANG=y
CONFIG_CLANG_VERSION=220104 CONFIG_CLANG_VERSION=220102
CONFIG_AS_IS_LLVM=y CONFIG_AS_IS_LLVM=y
CONFIG_AS_VERSION=220104 CONFIG_AS_VERSION=220102
CONFIG_LD_VERSION=0 CONFIG_LD_VERSION=0
CONFIG_LD_IS_LLD=y CONFIG_LD_IS_LLD=y
CONFIG_LLD_VERSION=220104 CONFIG_LLD_VERSION=220102
CONFIG_RUSTC_VERSION=0 CONFIG_RUSTC_VERSION=0
CONFIG_RUSTC_LLVM_VERSION=0 CONFIG_RUSTC_LLVM_VERSION=0
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
@@ -3175,8 +3175,14 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m CONFIG_PATA_LEGACY=m
CONFIG_MD=y CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set CONFIG_BLK_DEV_MD=m
CONFIG_MD_BITMAP_FILE=y CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -3199,7 +3205,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
# CONFIG_DM_RAID is not set CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_QL=m
@@ -11630,7 +11636,10 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_CRYPTO=y CONFIG_CRYPTO=y
# #
@@ -11916,6 +11925,8 @@ CONFIG_BINARY_PRINTF=y
# #
# Library routines # Library routines
# #
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y CONFIG_PACKING=y
CONFIG_BITREVERSE=y CONFIG_BITREVERSE=y
@@ -12460,6 +12471,7 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set # CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set # CONFIG_TEST_PRINTF is not set

View File

@@ -1,16 +1,16 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/arm64 6.12.83 Kernel Configuration # Linux/arm64 6.12.80 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="clang version 22.1.4" CONFIG_CC_VERSION_TEXT="clang version 21.1.8"
CONFIG_GCC_VERSION=0 CONFIG_GCC_VERSION=0
CONFIG_CC_IS_CLANG=y CONFIG_CC_IS_CLANG=y
CONFIG_CLANG_VERSION=220104 CONFIG_CLANG_VERSION=210108
CONFIG_AS_IS_LLVM=y CONFIG_AS_IS_LLVM=y
CONFIG_AS_VERSION=220104 CONFIG_AS_VERSION=210108
CONFIG_LD_VERSION=0 CONFIG_LD_VERSION=0
CONFIG_LD_IS_LLD=y CONFIG_LD_IS_LLD=y
CONFIG_LLD_VERSION=220104 CONFIG_LLD_VERSION=210108
CONFIG_RUSTC_VERSION=0 CONFIG_RUSTC_VERSION=0
CONFIG_RUSTC_LLVM_VERSION=0 CONFIG_RUSTC_LLVM_VERSION=0
CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
@@ -3253,8 +3253,14 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m CONFIG_PATA_LEGACY=m
CONFIG_MD=y CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set CONFIG_BLK_DEV_MD=m
CONFIG_MD_BITMAP_FILE=y CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -3277,7 +3283,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
# CONFIG_DM_RAID is not set CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_QL=m
@@ -10294,6 +10300,7 @@ CONFIG_ALTERA_MSGDMA=m
# CONFIG_AMBA_PL08X is not set # CONFIG_AMBA_PL08X is not set
CONFIG_APPLE_ADMAC=m CONFIG_APPLE_ADMAC=m
CONFIG_AXI_DMAC=m CONFIG_AXI_DMAC=m
CONFIG_BCM_SBA_RAID=m
CONFIG_DMA_BCM2835=m CONFIG_DMA_BCM2835=m
CONFIG_DMA_SUN6I=m CONFIG_DMA_SUN6I=m
CONFIG_DW_AXI_DMAC=m CONFIG_DW_AXI_DMAC=m
@@ -13285,7 +13292,12 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
CONFIG_CRYPTO=y CONFIG_CRYPTO=y
# #
@@ -13628,6 +13640,8 @@ CONFIG_BINARY_PRINTF=y
# #
# Library routines # Library routines
# #
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y CONFIG_PACKING=y
CONFIG_BITREVERSE=y CONFIG_BITREVERSE=y
@@ -14158,6 +14172,7 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set # CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set # CONFIG_TEST_PRINTF is not set

View File

@@ -1,6 +1,6 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/riscv 6.12.80 Kernel Configuration # Linux/riscv 6.12.77 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="clang version 22.1.2" CONFIG_CC_VERSION_TEXT="clang version 22.1.2"
CONFIG_GCC_VERSION=0 CONFIG_GCC_VERSION=0
@@ -37,6 +37,11 @@ CONFIG_BUILD_SALT=""
CONFIG_HAVE_KERNEL_GZIP=y CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_ZSTD=y CONFIG_HAVE_KERNEL_ZSTD=y
# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_GZIP is not set
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
# CONFIG_KERNEL_XZ is not set
# CONFIG_KERNEL_LZO is not set
# CONFIG_KERNEL_LZ4 is not set
CONFIG_KERNEL_ZSTD=y CONFIG_KERNEL_ZSTD=y
CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_INIT=""
CONFIG_DEFAULT_HOSTNAME="rosa-early" CONFIG_DEFAULT_HOSTNAME="rosa-early"
@@ -2843,8 +2848,14 @@ CONFIG_PATA_ACPI=y
CONFIG_ATA_GENERIC=y CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=m CONFIG_PATA_LEGACY=m
CONFIG_MD=y CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set CONFIG_BLK_DEV_MD=m
CONFIG_MD_BITMAP_FILE=y CONFIG_MD_BITMAP_FILE=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_ASYNC_REGISTRATION is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set
@@ -2867,7 +2878,7 @@ CONFIG_DM_ERA=m
CONFIG_DM_CLONE=m CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
# CONFIG_DM_RAID is not set CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_QL=m
@@ -10644,7 +10655,10 @@ CONFIG_RANDSTRUCT_NONE=y
CONFIG_XOR_BLOCKS=m CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
CONFIG_CRYPTO=y CONFIG_CRYPTO=y
# #
@@ -10904,6 +10918,8 @@ CONFIG_BINARY_PRINTF=y
# #
# Library routines # Library routines
# #
CONFIG_RAID6_PQ=m
CONFIG_RAID6_PQ_BENCHMARK=y
CONFIG_LINEAR_RANGES=y CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y CONFIG_PACKING=y
CONFIG_BITREVERSE=y CONFIG_BITREVERSE=y
@@ -11392,6 +11408,7 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set # CONFIG_PERCPU_TEST is not set
# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_PRINTF is not set # CONFIG_TEST_PRINTF is not set

View File

@@ -1,100 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibarchive() (pkg.Artifact, string) {
const (
version = "3.8.7"
checksum = "CUJK4MDQmZmATClgQBH2Wt-7Ts4iiSUlg1J_TVb6-5IK3rVUgVLIMc5k-bnWB9w3"
)
return t.NewPackage("libarchive", version, newFromGitHub(
"libarchive/libarchive",
"v"+version, checksum,
), &PackageAttr{
Paths: []pkg.ExecPath{
pkg.Path(AbsUsrSrc.Append(
"CTestCustom.cmake",
), false, pkg.NewFile("CTestCustom.cmake", []byte(`
list(APPEND CTEST_CUSTOM_TESTS_IGNORE
"libarchive_test_archive_string_conversion_fail_c"
"libarchive_test_archive_string_conversion_fail_latin1"
"libarchive_test_archive_string_update_utf8_koi8"
"libarchive_test_gnutar_filename_encoding_KOI8R_UTF8"
"libarchive_test_gnutar_filename_encoding_KOI8R_CP866"
"libarchive_test_gnutar_filename_encoding_CP1251_UTF8"
"libarchive_test_gnutar_filename_encoding_Russian_Russia"
"libarchive_test_gnutar_filename_encoding_EUCJP_UTF8"
"libarchive_test_gnutar_filename_encoding_EUCJP_CP932"
"libarchive_test_gnutar_filename_encoding_CP932_UTF8"
"libarchive_test_pax_filename_encoding_KOI8R"
"libarchive_test_pax_filename_encoding_CP1251"
"libarchive_test_pax_filename_encoding_EUCJP"
"libarchive_test_pax_filename_encoding_CP932"
"libarchive_test_read_format_cpio_filename_UTF8_eucJP"
"libarchive_test_read_format_cpio_filename_CP866_KOI8R"
"libarchive_test_read_format_cpio_filename_KOI8R_CP866"
"libarchive_test_read_format_cpio_filename_UTF8_KOI8R"
"libarchive_test_read_format_cpio_filename_UTF8_CP866"
"libarchive_test_read_format_cpio_filename_eucJP_CP932"
"libarchive_test_read_format_cpio_filename_UTF8_CP932"
"libarchive_test_read_format_cpio_filename_CP866_CP1251"
"libarchive_test_read_format_cpio_filename_CP866_CP1251_win"
"libarchive_test_read_format_cpio_filename_KOI8R_CP1251"
"libarchive_test_read_format_cpio_filename_UTF8_CP1251"
"libarchive_test_read_format_gtar_filename_CP866_KOI8R"
"libarchive_test_read_format_gtar_filename_KOI8R_CP866"
"libarchive_test_read_format_gtar_filename_eucJP_CP932"
"libarchive_test_read_format_gtar_filename_CP866_CP1251"
"libarchive_test_read_format_gtar_filename_CP866_CP1251_win"
"libarchive_test_read_format_gtar_filename_KOI8R_CP1251"
"libarchive_test_read_format_rar_unicode_CP932"
"libarchive_test_read_format_zip_filename_CP932_eucJP"
"libarchive_test_read_format_zip_filename_UTF8_eucJP"
"libarchive_test_read_format_zip_filename_CP866_KOI8R"
"libarchive_test_read_format_zip_filename_KOI8R_CP866"
"libarchive_test_read_format_zip_filename_UTF8_KOI8R"
"libarchive_test_read_format_zip_filename_UTF8_CP866"
"libarchive_test_read_format_zip_filename_CP932_CP932"
"libarchive_test_read_format_zip_filename_UTF8_CP932"
"libarchive_test_read_format_zip_filename_CP866_CP1251"
"libarchive_test_read_format_zip_filename_CP866_CP1251_win"
"libarchive_test_read_format_zip_filename_KOI8R_CP1251"
"libarchive_test_read_format_zip_filename_UTF8_CP1251"
"libarchive_test_ustar_filename_encoding_KOI8R_UTF8"
"libarchive_test_ustar_filename_encoding_KOI8R_CP866"
"libarchive_test_ustar_filename_encoding_CP1251_UTF8"
"libarchive_test_ustar_filename_encoding_Russian_Russia"
"libarchive_test_ustar_filename_encoding_EUCJP_UTF8"
"libarchive_test_ustar_filename_encoding_EUCJP_CP932"
"libarchive_test_ustar_filename_encoding_CP932_UTF8"
"libarchive_test_zip_filename_encoding_KOI8R"
"libarchive_test_zip_filename_encoding_ru_RU_CP1251"
"libarchive_test_zip_filename_encoding_Russian_Russia"
"libarchive_test_zip_filename_encoding_EUCJP"
"libarchive_test_zip_filename_encoding_CP932"
"libarchive_test_read_format_cab_filename"
"libarchive_test_read_format_lha_filename"
"libarchive_test_read_format_tar_filename"
"libarchive_test_read_format_ustar_filename"
"libarchive_test_read_append_wrong_filter"
)
`))),
},
Writable: true,
ScriptEarly: `
install -Dv /usr/src/CTestCustom.cmake /cure/
`,
}, (*CMakeHelper)(nil)), version
}
func init() {
artifactsM[Libarchive] = Metadata{
f: Toolchain.newLibarchive,
Name: "libarchive",
Description: "multi-format archive and compression library",
Website: "https://www.libarchive.org/",
ID: 1558,
}
}

View File

@@ -4,8 +4,8 @@ import "hakurei.app/internal/pkg"
func (t Toolchain) newLibmd() (pkg.Artifact, string) { func (t Toolchain) newLibmd() (pkg.Artifact, string) {
const ( const (
version = "1.2.0" version = "1.1.0"
checksum = "1rJ6joAO0wwMZvSfnRNkc1MOhywyAq7SM8VmF92NvDtv7Qdl1LRbjm5fg_DFFtGj" checksum = "9apYqPPZm0j5HQT8sCsVIhnVIqRD7XgN7kPIaTwTqnTuUq5waUAMq4M7ev8CODJ1"
) )
return t.NewPackage("libmd", version, t.newTagRemote( return t.NewPackage("libmd", version, t.newTagRemote(
"https://git.hadrons.org/git/libmd.git", "https://git.hadrons.org/git/libmd.git",

View File

@@ -1,50 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibconfig() (pkg.Artifact, string) {
const (
version = "1.8.2"
checksum = "fD32hjeAZuTz98g6WYHRwsxphrgrEFqxi5Z1jlJemPckPBfxpS3i5HgshAuA6vmT"
)
return t.NewPackage("libconfig", version, newFromGitHub(
"hyperrealm/libconfig",
"v"+version,
checksum,
), &PackageAttr{
Patches: []KV{
{"disable-broken-tests", `diff --git a/tests/tests.c b/tests/tests.c
index eba7eae..f916d2e 100644
--- a/tests/tests.c
+++ b/tests/tests.c
@@ -753,7 +753,6 @@ int main(int argc, char **argv)
int failures;
TT_SUITE_START(LibConfigTests);
- TT_SUITE_TEST(LibConfigTests, ParsingAndFormatting);
TT_SUITE_TEST(LibConfigTests, ParseInvalidFiles);
TT_SUITE_TEST(LibConfigTests, ParseInvalidStrings);
TT_SUITE_TEST(LibConfigTests, BigInt1);
@@ -768,7 +767,6 @@ int main(int argc, char **argv)
TT_SUITE_TEST(LibConfigTests, OverrideSetting);
TT_SUITE_TEST(LibConfigTests, SettingLookups);
TT_SUITE_TEST(LibConfigTests, ReadStream);
- TT_SUITE_TEST(LibConfigTests, BinaryAndHex);
TT_SUITE_RUN(LibConfigTests);
failures = TT_SUITE_NUM_FAILURES(LibConfigTests);
TT_SUITE_END(LibConfigTests);
`},
},
}, (*CMakeHelper)(nil)), version
}
func init() {
artifactsM[Libconfig] = Metadata{
f: Toolchain.newLibconfig,
Name: "libconfig",
Description: "a simple library for processing structured configuration files",
Website: "https://hyperrealm.github.io/libconfig/",
ID: 1580,
}
}

View File

@@ -1,30 +0,0 @@
package rosa
import "hakurei.app/internal/pkg"
func (t Toolchain) newLibdisplayInfo() (pkg.Artifact, string) {
const (
version = "0.3.0"
checksum = "yjOqPUHHYgRtpqGw5RI1n2Q1_hO5j0LiFNMbjcRWV4Nf71XwwoC9fZMlKBDeLchT"
)
return t.NewPackage("libdisplay-info", version, newFromGitLab(
"gitlab.freedesktop.org",
"emersion/libdisplay-info",
version, checksum,
), nil, (*MesonHelper)(nil),
Diffutils,
Hwdata,
), version
}
func init() {
artifactsM[LibdisplayInfo] = Metadata{
f: Toolchain.newLibdisplayInfo,
Name: "libdisplay-info",
Description: "EDID and DisplayID library",
Website: "https://gitlab.freedesktop.org/emersion/libdisplay-info",
ID: 326668,
}
}

Some files were not shown because too many files have changed in this diff Show More